body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
eae8bd6f3e93717829924dc6ec79a73643ea616df73d82259e611c2cfbf43c57
def test_create_new_superuser(self): 'Test creating a new superuser' user = get_user_model().objects.create_superuser('example@example.com', 'test123') self.assertTrue(user.is_superuser) self.assertTrue(user.is_staff)
Test creating a new superuser
app/core/tests/test_models.py
test_create_new_superuser
hl2999/recipe-app-api
1
python
def test_create_new_superuser(self): user = get_user_model().objects.create_superuser('example@example.com', 'test123') self.assertTrue(user.is_superuser) self.assertTrue(user.is_staff)
def test_create_new_superuser(self): user = get_user_model().objects.create_superuser('example@example.com', 'test123') self.assertTrue(user.is_superuser) self.assertTrue(user.is_staff)<|docstring|>Test creating a new superuser<|endoftext|>
be34f6d9e67f09bc4423f679793e2407eb9068f994bcc74997fd4dfa357a5c46
def mnist_model(image, labels, mesh): 'The model.\n\n Args:\n image: tf.Tensor with shape [batch, 28*28]\n labels: a tf.Tensor with shape [batch] and dtype tf.int32\n mesh: a mtf.Mesh\n\n Returns:\n logits: a tf.Tensor with shape [batch, 10]\n loss: a mtf.Tensor with shape []\n ' batch_dim = mtf.Dimension('batch', FLAGS.batch_size) rows_dim = mtf.Dimension('rows', 28) cols_dim = mtf.Dimension('cols', 28) classes_dim = mtf.Dimension('classes', 10) x = mtf.import_tf_tensor(mesh, tf.reshape(image, [FLAGS.batch_size, 28, 28]), [batch_dim, rows_dim, cols_dim]) y = mtf.import_tf_tensor(mesh, tf.reshape(labels, [FLAGS.batch_size]), [batch_dim]) w1 = mtf.get_variable(mesh, 'w1', [rows_dim, cols_dim, classes_dim]) b1 = mtf.get_variable(mesh, 'b1', [classes_dim]) logits = mtf.relu((mtf.einsum([x, w1], [batch_dim, classes_dim]) + b1)) if (labels is None): loss = None else: loss = mtf.layers.softmax_cross_entropy_with_logits(logits, mtf.one_hot(y, classes_dim), classes_dim) loss = mtf.reduce_mean(loss) return (logits, loss)
The model. Args: image: tf.Tensor with shape [batch, 28*28] labels: a tf.Tensor with shape [batch] and dtype tf.int32 mesh: a mtf.Mesh Returns: logits: a tf.Tensor with shape [batch, 10] loss: a mtf.Tensor with shape []
examples/mnist-simple.py
mnist_model
mzj14/mesh
0
python
def mnist_model(image, labels, mesh): 'The model.\n\n Args:\n image: tf.Tensor with shape [batch, 28*28]\n labels: a tf.Tensor with shape [batch] and dtype tf.int32\n mesh: a mtf.Mesh\n\n Returns:\n logits: a tf.Tensor with shape [batch, 10]\n loss: a mtf.Tensor with shape []\n ' batch_dim = mtf.Dimension('batch', FLAGS.batch_size) rows_dim = mtf.Dimension('rows', 28) cols_dim = mtf.Dimension('cols', 28) classes_dim = mtf.Dimension('classes', 10) x = mtf.import_tf_tensor(mesh, tf.reshape(image, [FLAGS.batch_size, 28, 28]), [batch_dim, rows_dim, cols_dim]) y = mtf.import_tf_tensor(mesh, tf.reshape(labels, [FLAGS.batch_size]), [batch_dim]) w1 = mtf.get_variable(mesh, 'w1', [rows_dim, cols_dim, classes_dim]) b1 = mtf.get_variable(mesh, 'b1', [classes_dim]) logits = mtf.relu((mtf.einsum([x, w1], [batch_dim, classes_dim]) + b1)) if (labels is None): loss = None else: loss = mtf.layers.softmax_cross_entropy_with_logits(logits, mtf.one_hot(y, classes_dim), classes_dim) loss = mtf.reduce_mean(loss) return (logits, loss)
def mnist_model(image, labels, mesh): 'The model.\n\n Args:\n image: tf.Tensor with shape [batch, 28*28]\n labels: a tf.Tensor with shape [batch] and dtype tf.int32\n mesh: a mtf.Mesh\n\n Returns:\n logits: a tf.Tensor with shape [batch, 10]\n loss: a mtf.Tensor with shape []\n ' batch_dim = mtf.Dimension('batch', FLAGS.batch_size) rows_dim = mtf.Dimension('rows', 28) cols_dim = mtf.Dimension('cols', 28) classes_dim = mtf.Dimension('classes', 10) x = mtf.import_tf_tensor(mesh, tf.reshape(image, [FLAGS.batch_size, 28, 28]), [batch_dim, rows_dim, cols_dim]) y = mtf.import_tf_tensor(mesh, tf.reshape(labels, [FLAGS.batch_size]), [batch_dim]) w1 = mtf.get_variable(mesh, 'w1', [rows_dim, cols_dim, classes_dim]) b1 = mtf.get_variable(mesh, 'b1', [classes_dim]) logits = mtf.relu((mtf.einsum([x, w1], [batch_dim, classes_dim]) + b1)) if (labels is None): loss = None else: loss = mtf.layers.softmax_cross_entropy_with_logits(logits, mtf.one_hot(y, classes_dim), classes_dim) loss = mtf.reduce_mean(loss) return (logits, loss)<|docstring|>The model. Args: image: tf.Tensor with shape [batch, 28*28] labels: a tf.Tensor with shape [batch] and dtype tf.int32 mesh: a mtf.Mesh Returns: logits: a tf.Tensor with shape [batch, 10] loss: a mtf.Tensor with shape []<|endoftext|>
8017f366e56a12e739f011721a78e7ce8e5ad87d4bd10b42e1354acc40b898bf
def model_fn(features, labels, mode, params): 'The model_fn argument for creating an Estimator.' tf.logging.info(('features = %s labels = %s mode = %s params=%s' % (features, labels, mode, params))) global_step = tf.train.get_global_step() graph = mtf.Graph() mesh = mtf.Mesh(graph, 'my_mesh') (logits, loss) = mnist_model(features, labels, mesh) mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape) layout_rules = mtf.convert_to_layout_rules(FLAGS.layout) mesh_size = mesh_shape.size print('mesh_shape.size = ', mesh_shape.size) mesh_devices = ([''] * mesh_size) mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(mesh_shape, layout_rules, mesh_devices) if (mode == tf.estimator.ModeKeys.TRAIN): var_grads = mtf.gradients([loss], [v.outputs[0] for v in graph.trainable_variables]) optimizer = mtf.optimize.AdafactorOptimizer() update_ops = optimizer.apply_grads(var_grads, graph.trainable_variables) lowering = mtf.Lowering(graph, {mesh: mesh_impl}) restore_hook = mtf.MtfRestoreHook(lowering) tf_logits = lowering.export_to_tf_tensor(logits) if (mode != tf.estimator.ModeKeys.PREDICT): tf_loss = lowering.export_to_tf_tensor(loss) tf.summary.scalar('loss', tf_loss) if (mode == tf.estimator.ModeKeys.TRAIN): tf_update_ops = [lowering.lowered_operation(op) for op in update_ops] tf_update_ops.append(tf.assign_add(global_step, 1)) train_op = tf.group(tf_update_ops) saver = tf.train.Saver(tf.global_variables(), sharded=True, max_to_keep=10, keep_checkpoint_every_n_hours=2, defer_build=False, save_relative_paths=True) tf.add_to_collection(tf.GraphKeys.SAVERS, saver) saver_listener = mtf.MtfCheckpointSaverListener(lowering) saver_hook = tf.train.CheckpointSaverHook(FLAGS.model_dir, save_steps=1000, saver=saver, listeners=[saver_listener]) accuracy = tf.metrics.accuracy(labels=labels, predictions=tf.argmax(tf_logits, axis=1)) tf.identity(tf_loss, 'cross_entropy') tf.identity(accuracy[1], name='train_accuracy') tf.summary.scalar('train_accuracy', accuracy[1]) return tf.estimator.EstimatorSpec(tf.estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op, training_chief_hooks=[restore_hook, saver_hook]) if (mode == tf.estimator.ModeKeys.PREDICT): predictions = {'classes': tf.argmax(tf_logits, axis=1), 'probabilities': tf.nn.softmax(tf_logits)} return tf.estimator.EstimatorSpec(mode=tf.estimator.ModeKeys.PREDICT, predictions=predictions, prediction_hooks=[restore_hook], export_outputs={'classify': tf.estimator.export.PredictOutput(predictions)}) if (mode == tf.estimator.ModeKeys.EVAL): return tf.estimator.EstimatorSpec(mode=tf.estimator.ModeKeys.EVAL, loss=tf_loss, evaluation_hooks=[restore_hook], eval_metric_ops={'accuracy': tf.metrics.accuracy(labels=labels, predictions=tf.argmax(tf_logits, axis=1))})
The model_fn argument for creating an Estimator.
examples/mnist-simple.py
model_fn
mzj14/mesh
0
python
def model_fn(features, labels, mode, params): tf.logging.info(('features = %s labels = %s mode = %s params=%s' % (features, labels, mode, params))) global_step = tf.train.get_global_step() graph = mtf.Graph() mesh = mtf.Mesh(graph, 'my_mesh') (logits, loss) = mnist_model(features, labels, mesh) mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape) layout_rules = mtf.convert_to_layout_rules(FLAGS.layout) mesh_size = mesh_shape.size print('mesh_shape.size = ', mesh_shape.size) mesh_devices = ([] * mesh_size) mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(mesh_shape, layout_rules, mesh_devices) if (mode == tf.estimator.ModeKeys.TRAIN): var_grads = mtf.gradients([loss], [v.outputs[0] for v in graph.trainable_variables]) optimizer = mtf.optimize.AdafactorOptimizer() update_ops = optimizer.apply_grads(var_grads, graph.trainable_variables) lowering = mtf.Lowering(graph, {mesh: mesh_impl}) restore_hook = mtf.MtfRestoreHook(lowering) tf_logits = lowering.export_to_tf_tensor(logits) if (mode != tf.estimator.ModeKeys.PREDICT): tf_loss = lowering.export_to_tf_tensor(loss) tf.summary.scalar('loss', tf_loss) if (mode == tf.estimator.ModeKeys.TRAIN): tf_update_ops = [lowering.lowered_operation(op) for op in update_ops] tf_update_ops.append(tf.assign_add(global_step, 1)) train_op = tf.group(tf_update_ops) saver = tf.train.Saver(tf.global_variables(), sharded=True, max_to_keep=10, keep_checkpoint_every_n_hours=2, defer_build=False, save_relative_paths=True) tf.add_to_collection(tf.GraphKeys.SAVERS, saver) saver_listener = mtf.MtfCheckpointSaverListener(lowering) saver_hook = tf.train.CheckpointSaverHook(FLAGS.model_dir, save_steps=1000, saver=saver, listeners=[saver_listener]) accuracy = tf.metrics.accuracy(labels=labels, predictions=tf.argmax(tf_logits, axis=1)) tf.identity(tf_loss, 'cross_entropy') tf.identity(accuracy[1], name='train_accuracy') tf.summary.scalar('train_accuracy', accuracy[1]) return tf.estimator.EstimatorSpec(tf.estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op, training_chief_hooks=[restore_hook, saver_hook]) if (mode == tf.estimator.ModeKeys.PREDICT): predictions = {'classes': tf.argmax(tf_logits, axis=1), 'probabilities': tf.nn.softmax(tf_logits)} return tf.estimator.EstimatorSpec(mode=tf.estimator.ModeKeys.PREDICT, predictions=predictions, prediction_hooks=[restore_hook], export_outputs={'classify': tf.estimator.export.PredictOutput(predictions)}) if (mode == tf.estimator.ModeKeys.EVAL): return tf.estimator.EstimatorSpec(mode=tf.estimator.ModeKeys.EVAL, loss=tf_loss, evaluation_hooks=[restore_hook], eval_metric_ops={'accuracy': tf.metrics.accuracy(labels=labels, predictions=tf.argmax(tf_logits, axis=1))})
def model_fn(features, labels, mode, params): tf.logging.info(('features = %s labels = %s mode = %s params=%s' % (features, labels, mode, params))) global_step = tf.train.get_global_step() graph = mtf.Graph() mesh = mtf.Mesh(graph, 'my_mesh') (logits, loss) = mnist_model(features, labels, mesh) mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape) layout_rules = mtf.convert_to_layout_rules(FLAGS.layout) mesh_size = mesh_shape.size print('mesh_shape.size = ', mesh_shape.size) mesh_devices = ([] * mesh_size) mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(mesh_shape, layout_rules, mesh_devices) if (mode == tf.estimator.ModeKeys.TRAIN): var_grads = mtf.gradients([loss], [v.outputs[0] for v in graph.trainable_variables]) optimizer = mtf.optimize.AdafactorOptimizer() update_ops = optimizer.apply_grads(var_grads, graph.trainable_variables) lowering = mtf.Lowering(graph, {mesh: mesh_impl}) restore_hook = mtf.MtfRestoreHook(lowering) tf_logits = lowering.export_to_tf_tensor(logits) if (mode != tf.estimator.ModeKeys.PREDICT): tf_loss = lowering.export_to_tf_tensor(loss) tf.summary.scalar('loss', tf_loss) if (mode == tf.estimator.ModeKeys.TRAIN): tf_update_ops = [lowering.lowered_operation(op) for op in update_ops] tf_update_ops.append(tf.assign_add(global_step, 1)) train_op = tf.group(tf_update_ops) saver = tf.train.Saver(tf.global_variables(), sharded=True, max_to_keep=10, keep_checkpoint_every_n_hours=2, defer_build=False, save_relative_paths=True) tf.add_to_collection(tf.GraphKeys.SAVERS, saver) saver_listener = mtf.MtfCheckpointSaverListener(lowering) saver_hook = tf.train.CheckpointSaverHook(FLAGS.model_dir, save_steps=1000, saver=saver, listeners=[saver_listener]) accuracy = tf.metrics.accuracy(labels=labels, predictions=tf.argmax(tf_logits, axis=1)) tf.identity(tf_loss, 'cross_entropy') tf.identity(accuracy[1], name='train_accuracy') tf.summary.scalar('train_accuracy', accuracy[1]) return tf.estimator.EstimatorSpec(tf.estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op, training_chief_hooks=[restore_hook, saver_hook]) if (mode == tf.estimator.ModeKeys.PREDICT): predictions = {'classes': tf.argmax(tf_logits, axis=1), 'probabilities': tf.nn.softmax(tf_logits)} return tf.estimator.EstimatorSpec(mode=tf.estimator.ModeKeys.PREDICT, predictions=predictions, prediction_hooks=[restore_hook], export_outputs={'classify': tf.estimator.export.PredictOutput(predictions)}) if (mode == tf.estimator.ModeKeys.EVAL): return tf.estimator.EstimatorSpec(mode=tf.estimator.ModeKeys.EVAL, loss=tf_loss, evaluation_hooks=[restore_hook], eval_metric_ops={'accuracy': tf.metrics.accuracy(labels=labels, predictions=tf.argmax(tf_logits, axis=1))})<|docstring|>The model_fn argument for creating an Estimator.<|endoftext|>
d9a9d5c986ffde2f0ab6c887d19a18859a766e1d6cffaf8bd03b0a166e91c732
def run_mnist(): 'Run MNIST training and eval loop.' mnist_classifier = tf.estimator.Estimator(model_fn=model_fn, model_dir=FLAGS.model_dir, config=tf.estimator.RunConfig(log_step_count_steps=FLAGS.log_steps)) def train_input_fn(): 'Prepare data for training.' ds = dataset.train(FLAGS.data_dir) ds_batched = ds.cache().batch(FLAGS.batch_size) ds = ds_batched.repeat(FLAGS.epochs_between_evals) return ds def eval_input_fn(): return dataset.test(FLAGS.data_dir).batch(FLAGS.batch_size).make_one_shot_iterator().get_next() for _ in range((FLAGS.train_epochs // FLAGS.epochs_between_evals)): mnist_classifier.train(input_fn=train_input_fn, hooks=None) eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn) print(('\nEvaluation results:\n\t%s\n' % eval_results))
Run MNIST training and eval loop.
examples/mnist-simple.py
run_mnist
mzj14/mesh
0
python
def run_mnist(): mnist_classifier = tf.estimator.Estimator(model_fn=model_fn, model_dir=FLAGS.model_dir, config=tf.estimator.RunConfig(log_step_count_steps=FLAGS.log_steps)) def train_input_fn(): 'Prepare data for training.' ds = dataset.train(FLAGS.data_dir) ds_batched = ds.cache().batch(FLAGS.batch_size) ds = ds_batched.repeat(FLAGS.epochs_between_evals) return ds def eval_input_fn(): return dataset.test(FLAGS.data_dir).batch(FLAGS.batch_size).make_one_shot_iterator().get_next() for _ in range((FLAGS.train_epochs // FLAGS.epochs_between_evals)): mnist_classifier.train(input_fn=train_input_fn, hooks=None) eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn) print(('\nEvaluation results:\n\t%s\n' % eval_results))
def run_mnist(): mnist_classifier = tf.estimator.Estimator(model_fn=model_fn, model_dir=FLAGS.model_dir, config=tf.estimator.RunConfig(log_step_count_steps=FLAGS.log_steps)) def train_input_fn(): 'Prepare data for training.' ds = dataset.train(FLAGS.data_dir) ds_batched = ds.cache().batch(FLAGS.batch_size) ds = ds_batched.repeat(FLAGS.epochs_between_evals) return ds def eval_input_fn(): return dataset.test(FLAGS.data_dir).batch(FLAGS.batch_size).make_one_shot_iterator().get_next() for _ in range((FLAGS.train_epochs // FLAGS.epochs_between_evals)): mnist_classifier.train(input_fn=train_input_fn, hooks=None) eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn) print(('\nEvaluation results:\n\t%s\n' % eval_results))<|docstring|>Run MNIST training and eval loop.<|endoftext|>
dcdd96398adcf0b2f42ae1bded49f01f4b5e0434378c09433516be431f45305a
def train_input_fn(): 'Prepare data for training.' ds = dataset.train(FLAGS.data_dir) ds_batched = ds.cache().batch(FLAGS.batch_size) ds = ds_batched.repeat(FLAGS.epochs_between_evals) return ds
Prepare data for training.
examples/mnist-simple.py
train_input_fn
mzj14/mesh
0
python
def train_input_fn(): ds = dataset.train(FLAGS.data_dir) ds_batched = ds.cache().batch(FLAGS.batch_size) ds = ds_batched.repeat(FLAGS.epochs_between_evals) return ds
def train_input_fn(): ds = dataset.train(FLAGS.data_dir) ds_batched = ds.cache().batch(FLAGS.batch_size) ds = ds_batched.repeat(FLAGS.epochs_between_evals) return ds<|docstring|>Prepare data for training.<|endoftext|>
50f9d92d62384fb8204e8ba8d26e5a0fc91bbdf277c72d7c434e03303db7dd5e
def accept(self): 'An item should call this method if it can handle the event. This will prevent the event being delivered to any other items.' self.accepted = True self.acceptedItem = self.currentItem
An item should call this method if it can handle the event. This will prevent the event being delivered to any other items.
pyqtgraph/GraphicsScene/mouseEvents.py
accept
msmttchr/pyqtgraph
2,762
python
def accept(self): self.accepted = True self.acceptedItem = self.currentItem
def accept(self): self.accepted = True self.acceptedItem = self.currentItem<|docstring|>An item should call this method if it can handle the event. This will prevent the event being delivered to any other items.<|endoftext|>
4c17d16601de12987febc70b7de22f8d4d24e036cee3f9ae6661029e52ab5ba7
def ignore(self): 'An item should call this method if it cannot handle the event. This will allow the event to be delivered to other items.' self.accepted = False
An item should call this method if it cannot handle the event. This will allow the event to be delivered to other items.
pyqtgraph/GraphicsScene/mouseEvents.py
ignore
msmttchr/pyqtgraph
2,762
python
def ignore(self): self.accepted = False
def ignore(self): self.accepted = False<|docstring|>An item should call this method if it cannot handle the event. This will allow the event to be delivered to other items.<|endoftext|>
02d61c74674f5c05d62a4d830b610030d5fe86d9b0f23535d2401619759ca71f
def scenePos(self): 'Return the current scene position of the mouse.' return Point(self._scenePos)
Return the current scene position of the mouse.
pyqtgraph/GraphicsScene/mouseEvents.py
scenePos
msmttchr/pyqtgraph
2,762
python
def scenePos(self): return Point(self._scenePos)
def scenePos(self): return Point(self._scenePos)<|docstring|>Return the current scene position of the mouse.<|endoftext|>
8b10c76f83b286638ee5efdda1f2280747de11ed3a3fa3d7b1b3a8b48a274ad4
def screenPos(self): 'Return the current screen position (pixels relative to widget) of the mouse.' return Point(self._screenPos)
Return the current screen position (pixels relative to widget) of the mouse.
pyqtgraph/GraphicsScene/mouseEvents.py
screenPos
msmttchr/pyqtgraph
2,762
python
def screenPos(self): return Point(self._screenPos)
def screenPos(self): return Point(self._screenPos)<|docstring|>Return the current screen position (pixels relative to widget) of the mouse.<|endoftext|>
a861e53383b3c8ae20d3d9eee817a5d80cc8caefc45dc09c58474eba0272a649
def buttonDownScenePos(self, btn=None): '\n Return the scene position of the mouse at the time *btn* was pressed.\n If *btn* is omitted, then the button that initiated the drag is assumed.\n ' if (btn is None): btn = self.button() return Point(self._buttonDownScenePos[btn])
Return the scene position of the mouse at the time *btn* was pressed. If *btn* is omitted, then the button that initiated the drag is assumed.
pyqtgraph/GraphicsScene/mouseEvents.py
buttonDownScenePos
msmttchr/pyqtgraph
2,762
python
def buttonDownScenePos(self, btn=None): '\n Return the scene position of the mouse at the time *btn* was pressed.\n If *btn* is omitted, then the button that initiated the drag is assumed.\n ' if (btn is None): btn = self.button() return Point(self._buttonDownScenePos[btn])
def buttonDownScenePos(self, btn=None): '\n Return the scene position of the mouse at the time *btn* was pressed.\n If *btn* is omitted, then the button that initiated the drag is assumed.\n ' if (btn is None): btn = self.button() return Point(self._buttonDownScenePos[btn])<|docstring|>Return the scene position of the mouse at the time *btn* was pressed. If *btn* is omitted, then the button that initiated the drag is assumed.<|endoftext|>
8aa422fa86c09332d820396e2eade3cf70ea0e1aa1af64b76780d19adc41770c
def buttonDownScreenPos(self, btn=None): '\n Return the screen position (pixels relative to widget) of the mouse at the time *btn* was pressed.\n If *btn* is omitted, then the button that initiated the drag is assumed.\n ' if (btn is None): btn = self.button() return Point(self._buttonDownScreenPos[btn])
Return the screen position (pixels relative to widget) of the mouse at the time *btn* was pressed. If *btn* is omitted, then the button that initiated the drag is assumed.
pyqtgraph/GraphicsScene/mouseEvents.py
buttonDownScreenPos
msmttchr/pyqtgraph
2,762
python
def buttonDownScreenPos(self, btn=None): '\n Return the screen position (pixels relative to widget) of the mouse at the time *btn* was pressed.\n If *btn* is omitted, then the button that initiated the drag is assumed.\n ' if (btn is None): btn = self.button() return Point(self._buttonDownScreenPos[btn])
def buttonDownScreenPos(self, btn=None): '\n Return the screen position (pixels relative to widget) of the mouse at the time *btn* was pressed.\n If *btn* is omitted, then the button that initiated the drag is assumed.\n ' if (btn is None): btn = self.button() return Point(self._buttonDownScreenPos[btn])<|docstring|>Return the screen position (pixels relative to widget) of the mouse at the time *btn* was pressed. If *btn* is omitted, then the button that initiated the drag is assumed.<|endoftext|>
9fc90e43e0b185fd3ddd542f44fff9794bec9fe5c7a9d0857cefec967574315a
def lastScenePos(self): '\n Return the scene position of the mouse immediately prior to this event.\n ' return Point(self._lastScenePos)
Return the scene position of the mouse immediately prior to this event.
pyqtgraph/GraphicsScene/mouseEvents.py
lastScenePos
msmttchr/pyqtgraph
2,762
python
def lastScenePos(self): '\n \n ' return Point(self._lastScenePos)
def lastScenePos(self): '\n \n ' return Point(self._lastScenePos)<|docstring|>Return the scene position of the mouse immediately prior to this event.<|endoftext|>
045654485ae69b6f70c567b5e6fc9cdcad35cea27184efdbcdc0418e52f145e5
def lastScreenPos(self): '\n Return the screen position of the mouse immediately prior to this event.\n ' return Point(self._lastScreenPos)
Return the screen position of the mouse immediately prior to this event.
pyqtgraph/GraphicsScene/mouseEvents.py
lastScreenPos
msmttchr/pyqtgraph
2,762
python
def lastScreenPos(self): '\n \n ' return Point(self._lastScreenPos)
def lastScreenPos(self): '\n \n ' return Point(self._lastScreenPos)<|docstring|>Return the screen position of the mouse immediately prior to this event.<|endoftext|>
cdb4eba20c2acd86a06d2130e3493072dec641cc96383e3e7f58f298cd829872
def buttons(self): '\n Return the buttons currently pressed on the mouse.\n (see QGraphicsSceneMouseEvent::buttons in the Qt documentation)\n ' return self._buttons
Return the buttons currently pressed on the mouse. (see QGraphicsSceneMouseEvent::buttons in the Qt documentation)
pyqtgraph/GraphicsScene/mouseEvents.py
buttons
msmttchr/pyqtgraph
2,762
python
def buttons(self): '\n Return the buttons currently pressed on the mouse.\n (see QGraphicsSceneMouseEvent::buttons in the Qt documentation)\n ' return self._buttons
def buttons(self): '\n Return the buttons currently pressed on the mouse.\n (see QGraphicsSceneMouseEvent::buttons in the Qt documentation)\n ' return self._buttons<|docstring|>Return the buttons currently pressed on the mouse. (see QGraphicsSceneMouseEvent::buttons in the Qt documentation)<|endoftext|>
9aa4427434734ce250e59ec864acb4c5a92372e70b7d10f466f58f2f5b87942d
def button(self): 'Return the button that initiated the drag (may be different from the buttons currently pressed)\n (see QGraphicsSceneMouseEvent::button in the Qt documentation)\n \n ' return self._button
Return the button that initiated the drag (may be different from the buttons currently pressed) (see QGraphicsSceneMouseEvent::button in the Qt documentation)
pyqtgraph/GraphicsScene/mouseEvents.py
button
msmttchr/pyqtgraph
2,762
python
def button(self): 'Return the button that initiated the drag (may be different from the buttons currently pressed)\n (see QGraphicsSceneMouseEvent::button in the Qt documentation)\n \n ' return self._button
def button(self): 'Return the button that initiated the drag (may be different from the buttons currently pressed)\n (see QGraphicsSceneMouseEvent::button in the Qt documentation)\n \n ' return self._button<|docstring|>Return the button that initiated the drag (may be different from the buttons currently pressed) (see QGraphicsSceneMouseEvent::button in the Qt documentation)<|endoftext|>
20949bad1e6f2ee66f480414b348fbe00e5d23626a6512946421795d0fc6325e
def pos(self): '\n Return the current position of the mouse in the coordinate system of the item\n that the event was delivered to.\n ' return Point(self.currentItem.mapFromScene(self._scenePos))
Return the current position of the mouse in the coordinate system of the item that the event was delivered to.
pyqtgraph/GraphicsScene/mouseEvents.py
pos
msmttchr/pyqtgraph
2,762
python
def pos(self): '\n Return the current position of the mouse in the coordinate system of the item\n that the event was delivered to.\n ' return Point(self.currentItem.mapFromScene(self._scenePos))
def pos(self): '\n Return the current position of the mouse in the coordinate system of the item\n that the event was delivered to.\n ' return Point(self.currentItem.mapFromScene(self._scenePos))<|docstring|>Return the current position of the mouse in the coordinate system of the item that the event was delivered to.<|endoftext|>
95326be536bf7c85ec7e82e17c7c0ab0332b4e0e6fca565b33c8490b54cd95ed
def lastPos(self): '\n Return the previous position of the mouse in the coordinate system of the item\n that the event was delivered to.\n ' return Point(self.currentItem.mapFromScene(self._lastScenePos))
Return the previous position of the mouse in the coordinate system of the item that the event was delivered to.
pyqtgraph/GraphicsScene/mouseEvents.py
lastPos
msmttchr/pyqtgraph
2,762
python
def lastPos(self): '\n Return the previous position of the mouse in the coordinate system of the item\n that the event was delivered to.\n ' return Point(self.currentItem.mapFromScene(self._lastScenePos))
def lastPos(self): '\n Return the previous position of the mouse in the coordinate system of the item\n that the event was delivered to.\n ' return Point(self.currentItem.mapFromScene(self._lastScenePos))<|docstring|>Return the previous position of the mouse in the coordinate system of the item that the event was delivered to.<|endoftext|>
07532be218d95b049f497bdce2c129a5a3c3cadcb855d057c1b503641b416b40
def buttonDownPos(self, btn=None): '\n Return the position of the mouse at the time the drag was initiated\n in the coordinate system of the item that the event was delivered to.\n ' if (btn is None): btn = self.button() return Point(self.currentItem.mapFromScene(self._buttonDownScenePos[btn]))
Return the position of the mouse at the time the drag was initiated in the coordinate system of the item that the event was delivered to.
pyqtgraph/GraphicsScene/mouseEvents.py
buttonDownPos
msmttchr/pyqtgraph
2,762
python
def buttonDownPos(self, btn=None): '\n Return the position of the mouse at the time the drag was initiated\n in the coordinate system of the item that the event was delivered to.\n ' if (btn is None): btn = self.button() return Point(self.currentItem.mapFromScene(self._buttonDownScenePos[btn]))
def buttonDownPos(self, btn=None): '\n Return the position of the mouse at the time the drag was initiated\n in the coordinate system of the item that the event was delivered to.\n ' if (btn is None): btn = self.button() return Point(self.currentItem.mapFromScene(self._buttonDownScenePos[btn]))<|docstring|>Return the position of the mouse at the time the drag was initiated in the coordinate system of the item that the event was delivered to.<|endoftext|>
0c96b58971670aad9ecfa363df5de7ff361e1ed193a6e01b97b1d74d1c6e52e9
def isStart(self): 'Returns True if this event is the first since a drag was initiated.' return self.start
Returns True if this event is the first since a drag was initiated.
pyqtgraph/GraphicsScene/mouseEvents.py
isStart
msmttchr/pyqtgraph
2,762
python
def isStart(self): return self.start
def isStart(self): return self.start<|docstring|>Returns True if this event is the first since a drag was initiated.<|endoftext|>
b682973dccbac790efc053f33ac8aa26926755e7c8696aa565e20f75fd1a74da
def isFinish(self): 'Returns False if this is the last event in a drag. Note that this\n event will have the same position as the previous one.' return self.finish
Returns False if this is the last event in a drag. Note that this event will have the same position as the previous one.
pyqtgraph/GraphicsScene/mouseEvents.py
isFinish
msmttchr/pyqtgraph
2,762
python
def isFinish(self): 'Returns False if this is the last event in a drag. Note that this\n event will have the same position as the previous one.' return self.finish
def isFinish(self): 'Returns False if this is the last event in a drag. Note that this\n event will have the same position as the previous one.' return self.finish<|docstring|>Returns False if this is the last event in a drag. Note that this event will have the same position as the previous one.<|endoftext|>
f3f371b9916943415c99a787b621e175ca80ae28ad798ba708927769c20a3a5e
def modifiers(self): 'Return any keyboard modifiers currently pressed.\n (see QGraphicsSceneMouseEvent::modifiers in the Qt documentation)\n \n ' return self._modifiers
Return any keyboard modifiers currently pressed. (see QGraphicsSceneMouseEvent::modifiers in the Qt documentation)
pyqtgraph/GraphicsScene/mouseEvents.py
modifiers
msmttchr/pyqtgraph
2,762
python
def modifiers(self): 'Return any keyboard modifiers currently pressed.\n (see QGraphicsSceneMouseEvent::modifiers in the Qt documentation)\n \n ' return self._modifiers
def modifiers(self): 'Return any keyboard modifiers currently pressed.\n (see QGraphicsSceneMouseEvent::modifiers in the Qt documentation)\n \n ' return self._modifiers<|docstring|>Return any keyboard modifiers currently pressed. (see QGraphicsSceneMouseEvent::modifiers in the Qt documentation)<|endoftext|>
50f9d92d62384fb8204e8ba8d26e5a0fc91bbdf277c72d7c434e03303db7dd5e
def accept(self): 'An item should call this method if it can handle the event. This will prevent the event being delivered to any other items.' self.accepted = True self.acceptedItem = self.currentItem
An item should call this method if it can handle the event. This will prevent the event being delivered to any other items.
pyqtgraph/GraphicsScene/mouseEvents.py
accept
msmttchr/pyqtgraph
2,762
python
def accept(self): self.accepted = True self.acceptedItem = self.currentItem
def accept(self): self.accepted = True self.acceptedItem = self.currentItem<|docstring|>An item should call this method if it can handle the event. This will prevent the event being delivered to any other items.<|endoftext|>
4c17d16601de12987febc70b7de22f8d4d24e036cee3f9ae6661029e52ab5ba7
def ignore(self): 'An item should call this method if it cannot handle the event. This will allow the event to be delivered to other items.' self.accepted = False
An item should call this method if it cannot handle the event. This will allow the event to be delivered to other items.
pyqtgraph/GraphicsScene/mouseEvents.py
ignore
msmttchr/pyqtgraph
2,762
python
def ignore(self): self.accepted = False
def ignore(self): self.accepted = False<|docstring|>An item should call this method if it cannot handle the event. This will allow the event to be delivered to other items.<|endoftext|>
02d61c74674f5c05d62a4d830b610030d5fe86d9b0f23535d2401619759ca71f
def scenePos(self): 'Return the current scene position of the mouse.' return Point(self._scenePos)
Return the current scene position of the mouse.
pyqtgraph/GraphicsScene/mouseEvents.py
scenePos
msmttchr/pyqtgraph
2,762
python
def scenePos(self): return Point(self._scenePos)
def scenePos(self): return Point(self._scenePos)<|docstring|>Return the current scene position of the mouse.<|endoftext|>
8b10c76f83b286638ee5efdda1f2280747de11ed3a3fa3d7b1b3a8b48a274ad4
def screenPos(self): 'Return the current screen position (pixels relative to widget) of the mouse.' return Point(self._screenPos)
Return the current screen position (pixels relative to widget) of the mouse.
pyqtgraph/GraphicsScene/mouseEvents.py
screenPos
msmttchr/pyqtgraph
2,762
python
def screenPos(self): return Point(self._screenPos)
def screenPos(self): return Point(self._screenPos)<|docstring|>Return the current screen position (pixels relative to widget) of the mouse.<|endoftext|>
cdb4eba20c2acd86a06d2130e3493072dec641cc96383e3e7f58f298cd829872
def buttons(self): '\n Return the buttons currently pressed on the mouse.\n (see QGraphicsSceneMouseEvent::buttons in the Qt documentation)\n ' return self._buttons
Return the buttons currently pressed on the mouse. (see QGraphicsSceneMouseEvent::buttons in the Qt documentation)
pyqtgraph/GraphicsScene/mouseEvents.py
buttons
msmttchr/pyqtgraph
2,762
python
def buttons(self): '\n Return the buttons currently pressed on the mouse.\n (see QGraphicsSceneMouseEvent::buttons in the Qt documentation)\n ' return self._buttons
def buttons(self): '\n Return the buttons currently pressed on the mouse.\n (see QGraphicsSceneMouseEvent::buttons in the Qt documentation)\n ' return self._buttons<|docstring|>Return the buttons currently pressed on the mouse. (see QGraphicsSceneMouseEvent::buttons in the Qt documentation)<|endoftext|>
265d64a9339491eb4c0ed8c0f509bbe95cfbdb8fcad4d55e75cb9ac2acdc88bd
def button(self): 'Return the mouse button that generated the click event.\n (see QGraphicsSceneMouseEvent::button in the Qt documentation)\n ' return self._button
Return the mouse button that generated the click event. (see QGraphicsSceneMouseEvent::button in the Qt documentation)
pyqtgraph/GraphicsScene/mouseEvents.py
button
msmttchr/pyqtgraph
2,762
python
def button(self): 'Return the mouse button that generated the click event.\n (see QGraphicsSceneMouseEvent::button in the Qt documentation)\n ' return self._button
def button(self): 'Return the mouse button that generated the click event.\n (see QGraphicsSceneMouseEvent::button in the Qt documentation)\n ' return self._button<|docstring|>Return the mouse button that generated the click event. (see QGraphicsSceneMouseEvent::button in the Qt documentation)<|endoftext|>
a79024a67922e866aba5fa7553d4f8f838200c6be84b0395928c37186a160b0d
def double(self): 'Return True if this is a double-click.' return self._double
Return True if this is a double-click.
pyqtgraph/GraphicsScene/mouseEvents.py
double
msmttchr/pyqtgraph
2,762
python
def double(self): return self._double
def double(self): return self._double<|docstring|>Return True if this is a double-click.<|endoftext|>
20949bad1e6f2ee66f480414b348fbe00e5d23626a6512946421795d0fc6325e
def pos(self): '\n Return the current position of the mouse in the coordinate system of the item\n that the event was delivered to.\n ' return Point(self.currentItem.mapFromScene(self._scenePos))
Return the current position of the mouse in the coordinate system of the item that the event was delivered to.
pyqtgraph/GraphicsScene/mouseEvents.py
pos
msmttchr/pyqtgraph
2,762
python
def pos(self): '\n Return the current position of the mouse in the coordinate system of the item\n that the event was delivered to.\n ' return Point(self.currentItem.mapFromScene(self._scenePos))
def pos(self): '\n Return the current position of the mouse in the coordinate system of the item\n that the event was delivered to.\n ' return Point(self.currentItem.mapFromScene(self._scenePos))<|docstring|>Return the current position of the mouse in the coordinate system of the item that the event was delivered to.<|endoftext|>
95326be536bf7c85ec7e82e17c7c0ab0332b4e0e6fca565b33c8490b54cd95ed
def lastPos(self): '\n Return the previous position of the mouse in the coordinate system of the item\n that the event was delivered to.\n ' return Point(self.currentItem.mapFromScene(self._lastScenePos))
Return the previous position of the mouse in the coordinate system of the item that the event was delivered to.
pyqtgraph/GraphicsScene/mouseEvents.py
lastPos
msmttchr/pyqtgraph
2,762
python
def lastPos(self): '\n Return the previous position of the mouse in the coordinate system of the item\n that the event was delivered to.\n ' return Point(self.currentItem.mapFromScene(self._lastScenePos))
def lastPos(self): '\n Return the previous position of the mouse in the coordinate system of the item\n that the event was delivered to.\n ' return Point(self.currentItem.mapFromScene(self._lastScenePos))<|docstring|>Return the previous position of the mouse in the coordinate system of the item that the event was delivered to.<|endoftext|>
001f952aab6f59a6a55ec9a4e001ad99fb8e8aa45249c84a1c3807a0a1798bd9
def modifiers(self): 'Return any keyboard modifiers currently pressed.\n (see QGraphicsSceneMouseEvent::modifiers in the Qt documentation) \n ' return self._modifiers
Return any keyboard modifiers currently pressed. (see QGraphicsSceneMouseEvent::modifiers in the Qt documentation)
pyqtgraph/GraphicsScene/mouseEvents.py
modifiers
msmttchr/pyqtgraph
2,762
python
def modifiers(self): 'Return any keyboard modifiers currently pressed.\n (see QGraphicsSceneMouseEvent::modifiers in the Qt documentation) \n ' return self._modifiers
def modifiers(self): 'Return any keyboard modifiers currently pressed.\n (see QGraphicsSceneMouseEvent::modifiers in the Qt documentation) \n ' return self._modifiers<|docstring|>Return any keyboard modifiers currently pressed. (see QGraphicsSceneMouseEvent::modifiers in the Qt documentation)<|endoftext|>
6b5806019e76a2c4133edc6434dadbbc3496681e1b68a4bea730762947627b07
def isEnter(self): "Returns True if the mouse has just entered the item's shape" return self.enter
Returns True if the mouse has just entered the item's shape
pyqtgraph/GraphicsScene/mouseEvents.py
isEnter
msmttchr/pyqtgraph
2,762
python
def isEnter(self): return self.enter
def isEnter(self): return self.enter<|docstring|>Returns True if the mouse has just entered the item's shape<|endoftext|>
71936f7f22776bd5c74f18c03826738829176b4a3a96d8cd2e1265fe7ceaaea4
def isExit(self): "Returns True if the mouse has just exited the item's shape" return self.exit
Returns True if the mouse has just exited the item's shape
pyqtgraph/GraphicsScene/mouseEvents.py
isExit
msmttchr/pyqtgraph
2,762
python
def isExit(self): return self.exit
def isExit(self): return self.exit<|docstring|>Returns True if the mouse has just exited the item's shape<|endoftext|>
85f24d6df3789c0c182a3af1df9b1d9c98f103c13da0201946a4cf7c99ab1adb
def acceptClicks(self, button): 'Inform the scene that the item (that the event was delivered to)\n would accept a mouse click event if the user were to click before\n moving the mouse again.\n \n Returns True if the request is successful, otherwise returns False (indicating\n that some other item would receive an incoming click).\n ' if (not self.acceptable): return False if (button not in self.__clickItems): self.__clickItems[button] = self.currentItem return True return False
Inform the scene that the item (that the event was delivered to) would accept a mouse click event if the user were to click before moving the mouse again. Returns True if the request is successful, otherwise returns False (indicating that some other item would receive an incoming click).
pyqtgraph/GraphicsScene/mouseEvents.py
acceptClicks
msmttchr/pyqtgraph
2,762
python
def acceptClicks(self, button): 'Inform the scene that the item (that the event was delivered to)\n would accept a mouse click event if the user were to click before\n moving the mouse again.\n \n Returns True if the request is successful, otherwise returns False (indicating\n that some other item would receive an incoming click).\n ' if (not self.acceptable): return False if (button not in self.__clickItems): self.__clickItems[button] = self.currentItem return True return False
def acceptClicks(self, button): 'Inform the scene that the item (that the event was delivered to)\n would accept a mouse click event if the user were to click before\n moving the mouse again.\n \n Returns True if the request is successful, otherwise returns False (indicating\n that some other item would receive an incoming click).\n ' if (not self.acceptable): return False if (button not in self.__clickItems): self.__clickItems[button] = self.currentItem return True return False<|docstring|>Inform the scene that the item (that the event was delivered to) would accept a mouse click event if the user were to click before moving the mouse again. Returns True if the request is successful, otherwise returns False (indicating that some other item would receive an incoming click).<|endoftext|>
005db0ff96c35c1b25fbedf6ee7b5fb660db1bc544a69a2e96b6f4c8e663119c
def acceptDrags(self, button): 'Inform the scene that the item (that the event was delivered to)\n would accept a mouse drag event if the user were to drag before\n the next hover event.\n \n Returns True if the request is successful, otherwise returns False (indicating\n that some other item would receive an incoming drag event).\n ' if (not self.acceptable): return False if (button not in self.__dragItems): self.__dragItems[button] = self.currentItem return True return False
Inform the scene that the item (that the event was delivered to) would accept a mouse drag event if the user were to drag before the next hover event. Returns True if the request is successful, otherwise returns False (indicating that some other item would receive an incoming drag event).
pyqtgraph/GraphicsScene/mouseEvents.py
acceptDrags
msmttchr/pyqtgraph
2,762
python
def acceptDrags(self, button): 'Inform the scene that the item (that the event was delivered to)\n would accept a mouse drag event if the user were to drag before\n the next hover event.\n \n Returns True if the request is successful, otherwise returns False (indicating\n that some other item would receive an incoming drag event).\n ' if (not self.acceptable): return False if (button not in self.__dragItems): self.__dragItems[button] = self.currentItem return True return False
def acceptDrags(self, button): 'Inform the scene that the item (that the event was delivered to)\n would accept a mouse drag event if the user were to drag before\n the next hover event.\n \n Returns True if the request is successful, otherwise returns False (indicating\n that some other item would receive an incoming drag event).\n ' if (not self.acceptable): return False if (button not in self.__dragItems): self.__dragItems[button] = self.currentItem return True return False<|docstring|>Inform the scene that the item (that the event was delivered to) would accept a mouse drag event if the user were to drag before the next hover event. Returns True if the request is successful, otherwise returns False (indicating that some other item would receive an incoming drag event).<|endoftext|>
02d61c74674f5c05d62a4d830b610030d5fe86d9b0f23535d2401619759ca71f
def scenePos(self): 'Return the current scene position of the mouse.' return Point(self._scenePos)
Return the current scene position of the mouse.
pyqtgraph/GraphicsScene/mouseEvents.py
scenePos
msmttchr/pyqtgraph
2,762
python
def scenePos(self): return Point(self._scenePos)
def scenePos(self): return Point(self._scenePos)<|docstring|>Return the current scene position of the mouse.<|endoftext|>
e0c24e920c763cf1773f3ff9756451cb1163ed486fe5d7b062adeaf0205767db
def screenPos(self): 'Return the current screen position of the mouse.' return Point(self._screenPos)
Return the current screen position of the mouse.
pyqtgraph/GraphicsScene/mouseEvents.py
screenPos
msmttchr/pyqtgraph
2,762
python
def screenPos(self): return Point(self._screenPos)
def screenPos(self): return Point(self._screenPos)<|docstring|>Return the current screen position of the mouse.<|endoftext|>
6eedc65c54bbabf41076907789e1bbcfcfde4386e0b3d5394f289338490164b5
def lastScenePos(self): 'Return the previous scene position of the mouse.' return Point(self._lastScenePos)
Return the previous scene position of the mouse.
pyqtgraph/GraphicsScene/mouseEvents.py
lastScenePos
msmttchr/pyqtgraph
2,762
python
def lastScenePos(self): return Point(self._lastScenePos)
def lastScenePos(self): return Point(self._lastScenePos)<|docstring|>Return the previous scene position of the mouse.<|endoftext|>
54f15cb33d2d819095c0b9c6b1787ffab958a234e55f647e424cac2a7b97bf0f
def lastScreenPos(self): 'Return the previous screen position of the mouse.' return Point(self._lastScreenPos)
Return the previous screen position of the mouse.
pyqtgraph/GraphicsScene/mouseEvents.py
lastScreenPos
msmttchr/pyqtgraph
2,762
python
def lastScreenPos(self): return Point(self._lastScreenPos)
def lastScreenPos(self): return Point(self._lastScreenPos)<|docstring|>Return the previous screen position of the mouse.<|endoftext|>
cdb4eba20c2acd86a06d2130e3493072dec641cc96383e3e7f58f298cd829872
def buttons(self): '\n Return the buttons currently pressed on the mouse.\n (see QGraphicsSceneMouseEvent::buttons in the Qt documentation)\n ' return self._buttons
Return the buttons currently pressed on the mouse. (see QGraphicsSceneMouseEvent::buttons in the Qt documentation)
pyqtgraph/GraphicsScene/mouseEvents.py
buttons
msmttchr/pyqtgraph
2,762
python
def buttons(self): '\n Return the buttons currently pressed on the mouse.\n (see QGraphicsSceneMouseEvent::buttons in the Qt documentation)\n ' return self._buttons
def buttons(self): '\n Return the buttons currently pressed on the mouse.\n (see QGraphicsSceneMouseEvent::buttons in the Qt documentation)\n ' return self._buttons<|docstring|>Return the buttons currently pressed on the mouse. (see QGraphicsSceneMouseEvent::buttons in the Qt documentation)<|endoftext|>
20949bad1e6f2ee66f480414b348fbe00e5d23626a6512946421795d0fc6325e
def pos(self): '\n Return the current position of the mouse in the coordinate system of the item\n that the event was delivered to.\n ' return Point(self.currentItem.mapFromScene(self._scenePos))
Return the current position of the mouse in the coordinate system of the item that the event was delivered to.
pyqtgraph/GraphicsScene/mouseEvents.py
pos
msmttchr/pyqtgraph
2,762
python
def pos(self): '\n Return the current position of the mouse in the coordinate system of the item\n that the event was delivered to.\n ' return Point(self.currentItem.mapFromScene(self._scenePos))
def pos(self): '\n Return the current position of the mouse in the coordinate system of the item\n that the event was delivered to.\n ' return Point(self.currentItem.mapFromScene(self._scenePos))<|docstring|>Return the current position of the mouse in the coordinate system of the item that the event was delivered to.<|endoftext|>
95326be536bf7c85ec7e82e17c7c0ab0332b4e0e6fca565b33c8490b54cd95ed
def lastPos(self): '\n Return the previous position of the mouse in the coordinate system of the item\n that the event was delivered to.\n ' return Point(self.currentItem.mapFromScene(self._lastScenePos))
Return the previous position of the mouse in the coordinate system of the item that the event was delivered to.
pyqtgraph/GraphicsScene/mouseEvents.py
lastPos
msmttchr/pyqtgraph
2,762
python
def lastPos(self): '\n Return the previous position of the mouse in the coordinate system of the item\n that the event was delivered to.\n ' return Point(self.currentItem.mapFromScene(self._lastScenePos))
def lastPos(self): '\n Return the previous position of the mouse in the coordinate system of the item\n that the event was delivered to.\n ' return Point(self.currentItem.mapFromScene(self._lastScenePos))<|docstring|>Return the previous position of the mouse in the coordinate system of the item that the event was delivered to.<|endoftext|>
001f952aab6f59a6a55ec9a4e001ad99fb8e8aa45249c84a1c3807a0a1798bd9
def modifiers(self): 'Return any keyboard modifiers currently pressed.\n (see QGraphicsSceneMouseEvent::modifiers in the Qt documentation) \n ' return self._modifiers
Return any keyboard modifiers currently pressed. (see QGraphicsSceneMouseEvent::modifiers in the Qt documentation)
pyqtgraph/GraphicsScene/mouseEvents.py
modifiers
msmttchr/pyqtgraph
2,762
python
def modifiers(self): 'Return any keyboard modifiers currently pressed.\n (see QGraphicsSceneMouseEvent::modifiers in the Qt documentation) \n ' return self._modifiers
def modifiers(self): 'Return any keyboard modifiers currently pressed.\n (see QGraphicsSceneMouseEvent::modifiers in the Qt documentation) \n ' return self._modifiers<|docstring|>Return any keyboard modifiers currently pressed. (see QGraphicsSceneMouseEvent::modifiers in the Qt documentation)<|endoftext|>
d08719fecebcc11bcad645ed085087d15854210bbf65a3bf02bf415695f3ee84
def get_all_orders(self): ' Returns all open orders for the current pair\n ' final_bids = list(map((lambda x: [x[PRICE], x[AMOUNT]]), self.orderbook.bids)) final_asks = list(map((lambda x: [x[PRICE], x[AMOUNT]]), self.orderbook.asks)) return {'asks': final_asks, 'bids': final_bids}
Returns all open orders for the current pair
merkato/exchanges/test_exchange/exchange.py
get_all_orders
esperantomerkato/tuner
0
python
def get_all_orders(self): ' \n ' final_bids = list(map((lambda x: [x[PRICE], x[AMOUNT]]), self.orderbook.bids)) final_asks = list(map((lambda x: [x[PRICE], x[AMOUNT]]), self.orderbook.asks)) return {'asks': final_asks, 'bids': final_bids}
def get_all_orders(self): ' \n ' final_bids = list(map((lambda x: [x[PRICE], x[AMOUNT]]), self.orderbook.bids)) final_asks = list(map((lambda x: [x[PRICE], x[AMOUNT]]), self.orderbook.asks)) return {'asks': final_asks, 'bids': final_bids}<|docstring|>Returns all open orders for the current pair<|endoftext|>
a163c89f2aae53baaaa48cd5068ae16661f5c48bbb5c385cc4c178099babb091
def get_my_open_orders(self, context_formatted=True): ' Returns all open orders for the authenticated user ' my_filtered_bids = list(filter((lambda order: (order[USER_ID] == self.user_id)), self.orderbook.bids)) my_filtered_asks = list(filter((lambda order: (order[USER_ID] == self.user_id)), self.orderbook.asks)) combined_orders = [] combined_orders.extend(my_filtered_asks) combined_orders.extend(my_filtered_bids) my_open_orders = {} for order in combined_orders: order_id = order['id'] my_open_orders[order_id] = order return my_open_orders
Returns all open orders for the authenticated user
merkato/exchanges/test_exchange/exchange.py
get_my_open_orders
esperantomerkato/tuner
0
python
def get_my_open_orders(self, context_formatted=True): ' ' my_filtered_bids = list(filter((lambda order: (order[USER_ID] == self.user_id)), self.orderbook.bids)) my_filtered_asks = list(filter((lambda order: (order[USER_ID] == self.user_id)), self.orderbook.asks)) combined_orders = [] combined_orders.extend(my_filtered_asks) combined_orders.extend(my_filtered_bids) my_open_orders = {} for order in combined_orders: order_id = order['id'] my_open_orders[order_id] = order return my_open_orders
def get_my_open_orders(self, context_formatted=True): ' ' my_filtered_bids = list(filter((lambda order: (order[USER_ID] == self.user_id)), self.orderbook.bids)) my_filtered_asks = list(filter((lambda order: (order[USER_ID] == self.user_id)), self.orderbook.asks)) combined_orders = [] combined_orders.extend(my_filtered_asks) combined_orders.extend(my_filtered_bids) my_open_orders = {} for order in combined_orders: order_id = order['id'] my_open_orders[order_id] = order return my_open_orders<|docstring|>Returns all open orders for the authenticated user<|endoftext|>
982f0ad461598855b278668405c47d313cc5254bc84f0d68773e134d1c7860a6
def cancel_order(self, order_id): ' Cancels the order with the specified order ID\n :param order_id: string\n ' return ''
Cancels the order with the specified order ID :param order_id: string
merkato/exchanges/test_exchange/exchange.py
cancel_order
esperantomerkato/tuner
0
python
def cancel_order(self, order_id): ' Cancels the order with the specified order ID\n :param order_id: string\n ' return
def cancel_order(self, order_id): ' Cancels the order with the specified order ID\n :param order_id: string\n ' return <|docstring|>Cancels the order with the specified order ID :param order_id: string<|endoftext|>
03731db3dae8bbef8730aa82f4b64edabd472590b4e75f65d06a6743a9121a95
def get_ticker(self): ' Returns the current ticker data for the target coin.\n ' return ''
Returns the current ticker data for the target coin.
merkato/exchanges/test_exchange/exchange.py
get_ticker
esperantomerkato/tuner
0
python
def get_ticker(self): ' \n ' return
def get_ticker(self): ' \n ' return <|docstring|>Returns the current ticker data for the target coin.<|endoftext|>
0dace835b7af350c9849dcdc9d9562ceea494c2e033482a835f70aa57fe99c45
def get_24h_volume(self): ' Returns the 24 hour volume for the target coin.\n ' return ''
Returns the 24 hour volume for the target coin.
merkato/exchanges/test_exchange/exchange.py
get_24h_volume
esperantomerkato/tuner
0
python
def get_24h_volume(self): ' \n ' return
def get_24h_volume(self): ' \n ' return <|docstring|>Returns the 24 hour volume for the target coin.<|endoftext|>
0216c0ed2dcf434f629da56edd757586aee2edd92dff3e59481163e5fdea5a83
def test_replace_parts_doctest() -> None: "\n\n # check Path like\n >>> pathlib.PurePath('test/test/test').replace_parts('test','testnew/testnew', 1)\n Pure...Path('testnew/testnew/test/test')\n\n >>> new = pathlib.PurePath('new1/new2/new3/new4')\n\n >>> # Test Source Path = relative PurePath\n >>> source_path = pathlib.PurePath('./test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('Test1/test2'), new)\n Pure...Path('test/new1/new2/new3/new4/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('test/test'), new)\n Pure...Path('test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePosixPath('/test/Test1'), new)\n Pure...Path('test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePosixPath('/test/test1'), new)\n Pure...Path('test/Test1/test2/test3')\n\n >>> # Test Source Path = absolute PosixPath\n >>> source_path = pathlib.PurePosixPath('/test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('Test1/test2'), new)\n Pure...Path('/test/new1/new2/new3/new4/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('test1/test2'), new)\n Pure...Path('/test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('test/test'), new)\n Pure...Path('/test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePosixPath('/test/Test1'), new)\n Pure...Path('new1/new2/new3/new4/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePosixPath('/test/test1'), new)\n Pure...Path('/test/Test1/test2/test3')\n\n >>> # Test Source Path = absolute WindowsPath\n >>> source_path = pathlib.PureWindowsPath(r'C:\\test\\Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('Test1/test2'), new)\n PureWindowsPath('C:/test/new1/new2/new3/new4/test3')\n\n >>> # this will be replaced because of windows case folding, it is correct !\n >>> source_path.replace_parts(pathlib.PurePath('test1/test2'), new)\n PureWindowsPath('C:/test/new1/new2/new3/new4/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('test/test'), new)\n PureWindowsPath('C:/test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePosixPath('/test/Test1'), new)\n PureWindowsPath('C:/test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePosixPath('/test/test1'), new)\n PureWindowsPath('C:/test/Test1/test2/test3')\n\n # this might be unexpected but correct - we make a relative path out of an absolute path\n >>> source_path.replace_parts(pathlib.PureWindowsPath(r'C:\\test\\Test1'), new)\n PureWindowsPath('new1/new2/new3/new4/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PureWindowsPath(r'C:\\test\\Test1'), pathlib.PureWindowsPath(r'd:\\new'))\n PureWindowsPath('d:/new/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PureWindowsPath(r'c:\\test\\test1'), pathlib.PureWindowsPath(r'D:\\new'))\n PureWindowsPath('D:/new/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PureWindowsPath(r'c:\\test\\test'), pathlib.PureWindowsPath(r'D:\\new'))\n PureWindowsPath('C:/test/Test1/test2/test3')\n\n >>> # check count\n >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test/test'), pathlib.PurePath('testnew/testnew'), 3)\n Pure...Path('testnew/testnew/test')\n >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test/test'), pathlib.PurePath('testnew/testnew'), 1)\n Pure...Path('testnew/testnew/test')\n >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test/test'), pathlib.PurePath('testnew/testnew'), 0)\n Pure...Path('test/test/test')\n >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test'), pathlib.PurePath('testnew/testnew'), 1)\n Pure...Path('testnew/testnew/test/test')\n >>> pathlib.PurePath('test').replace_parts(pathlib.PurePath('test'), pathlib.PurePath('testnew/testnew'), 1)\n Pure...Path('testnew/testnew')\n\n " pass
# check Path like >>> pathlib.PurePath('test/test/test').replace_parts('test','testnew/testnew', 1) Pure...Path('testnew/testnew/test/test') >>> new = pathlib.PurePath('new1/new2/new3/new4') >>> # Test Source Path = relative PurePath >>> source_path = pathlib.PurePath('./test/Test1/test2/test3') >>> source_path.replace_parts(pathlib.PurePath('Test1/test2'), new) Pure...Path('test/new1/new2/new3/new4/test3') >>> source_path.replace_parts(pathlib.PurePath('test/test'), new) Pure...Path('test/Test1/test2/test3') >>> source_path.replace_parts(pathlib.PurePosixPath('/test/Test1'), new) Pure...Path('test/Test1/test2/test3') >>> source_path.replace_parts(pathlib.PurePosixPath('/test/test1'), new) Pure...Path('test/Test1/test2/test3') >>> # Test Source Path = absolute PosixPath >>> source_path = pathlib.PurePosixPath('/test/Test1/test2/test3') >>> source_path.replace_parts(pathlib.PurePath('Test1/test2'), new) Pure...Path('/test/new1/new2/new3/new4/test3') >>> source_path.replace_parts(pathlib.PurePath('test1/test2'), new) Pure...Path('/test/Test1/test2/test3') >>> source_path.replace_parts(pathlib.PurePath('test/test'), new) Pure...Path('/test/Test1/test2/test3') >>> source_path.replace_parts(pathlib.PurePosixPath('/test/Test1'), new) Pure...Path('new1/new2/new3/new4/test2/test3') >>> source_path.replace_parts(pathlib.PurePosixPath('/test/test1'), new) Pure...Path('/test/Test1/test2/test3') >>> # Test Source Path = absolute WindowsPath >>> source_path = pathlib.PureWindowsPath(r'C:\test\Test1/test2/test3') >>> source_path.replace_parts(pathlib.PurePath('Test1/test2'), new) PureWindowsPath('C:/test/new1/new2/new3/new4/test3') >>> # this will be replaced because of windows case folding, it is correct ! >>> source_path.replace_parts(pathlib.PurePath('test1/test2'), new) PureWindowsPath('C:/test/new1/new2/new3/new4/test3') >>> source_path.replace_parts(pathlib.PurePath('test/test'), new) PureWindowsPath('C:/test/Test1/test2/test3') >>> source_path.replace_parts(pathlib.PurePosixPath('/test/Test1'), new) PureWindowsPath('C:/test/Test1/test2/test3') >>> source_path.replace_parts(pathlib.PurePosixPath('/test/test1'), new) PureWindowsPath('C:/test/Test1/test2/test3') # this might be unexpected but correct - we make a relative path out of an absolute path >>> source_path.replace_parts(pathlib.PureWindowsPath(r'C:\test\Test1'), new) PureWindowsPath('new1/new2/new3/new4/test2/test3') >>> source_path.replace_parts(pathlib.PureWindowsPath(r'C:\test\Test1'), pathlib.PureWindowsPath(r'd:\new')) PureWindowsPath('d:/new/test2/test3') >>> source_path.replace_parts(pathlib.PureWindowsPath(r'c:\test\test1'), pathlib.PureWindowsPath(r'D:\new')) PureWindowsPath('D:/new/test2/test3') >>> source_path.replace_parts(pathlib.PureWindowsPath(r'c:\test\test'), pathlib.PureWindowsPath(r'D:\new')) PureWindowsPath('C:/test/Test1/test2/test3') >>> # check count >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test/test'), pathlib.PurePath('testnew/testnew'), 3) Pure...Path('testnew/testnew/test') >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test/test'), pathlib.PurePath('testnew/testnew'), 1) Pure...Path('testnew/testnew/test') >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test/test'), pathlib.PurePath('testnew/testnew'), 0) Pure...Path('test/test/test') >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test'), pathlib.PurePath('testnew/testnew'), 1) Pure...Path('testnew/testnew/test/test') >>> pathlib.PurePath('test').replace_parts(pathlib.PurePath('test'), pathlib.PurePath('testnew/testnew'), 1) Pure...Path('testnew/testnew')
tests/test_pathlib.py
test_replace_parts_doctest
bitranox/pathlib3x
7
python
def test_replace_parts_doctest() -> None: "\n\n # check Path like\n >>> pathlib.PurePath('test/test/test').replace_parts('test','testnew/testnew', 1)\n Pure...Path('testnew/testnew/test/test')\n\n >>> new = pathlib.PurePath('new1/new2/new3/new4')\n\n >>> # Test Source Path = relative PurePath\n >>> source_path = pathlib.PurePath('./test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('Test1/test2'), new)\n Pure...Path('test/new1/new2/new3/new4/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('test/test'), new)\n Pure...Path('test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePosixPath('/test/Test1'), new)\n Pure...Path('test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePosixPath('/test/test1'), new)\n Pure...Path('test/Test1/test2/test3')\n\n >>> # Test Source Path = absolute PosixPath\n >>> source_path = pathlib.PurePosixPath('/test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('Test1/test2'), new)\n Pure...Path('/test/new1/new2/new3/new4/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('test1/test2'), new)\n Pure...Path('/test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('test/test'), new)\n Pure...Path('/test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePosixPath('/test/Test1'), new)\n Pure...Path('new1/new2/new3/new4/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePosixPath('/test/test1'), new)\n Pure...Path('/test/Test1/test2/test3')\n\n >>> # Test Source Path = absolute WindowsPath\n >>> source_path = pathlib.PureWindowsPath(r'C:\\test\\Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('Test1/test2'), new)\n PureWindowsPath('C:/test/new1/new2/new3/new4/test3')\n\n >>> # this will be replaced because of windows case folding, it is correct !\n >>> source_path.replace_parts(pathlib.PurePath('test1/test2'), new)\n PureWindowsPath('C:/test/new1/new2/new3/new4/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('test/test'), new)\n PureWindowsPath('C:/test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePosixPath('/test/Test1'), new)\n PureWindowsPath('C:/test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePosixPath('/test/test1'), new)\n PureWindowsPath('C:/test/Test1/test2/test3')\n\n # this might be unexpected but correct - we make a relative path out of an absolute path\n >>> source_path.replace_parts(pathlib.PureWindowsPath(r'C:\\test\\Test1'), new)\n PureWindowsPath('new1/new2/new3/new4/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PureWindowsPath(r'C:\\test\\Test1'), pathlib.PureWindowsPath(r'd:\\new'))\n PureWindowsPath('d:/new/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PureWindowsPath(r'c:\\test\\test1'), pathlib.PureWindowsPath(r'D:\\new'))\n PureWindowsPath('D:/new/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PureWindowsPath(r'c:\\test\\test'), pathlib.PureWindowsPath(r'D:\\new'))\n PureWindowsPath('C:/test/Test1/test2/test3')\n\n >>> # check count\n >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test/test'), pathlib.PurePath('testnew/testnew'), 3)\n Pure...Path('testnew/testnew/test')\n >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test/test'), pathlib.PurePath('testnew/testnew'), 1)\n Pure...Path('testnew/testnew/test')\n >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test/test'), pathlib.PurePath('testnew/testnew'), 0)\n Pure...Path('test/test/test')\n >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test'), pathlib.PurePath('testnew/testnew'), 1)\n Pure...Path('testnew/testnew/test/test')\n >>> pathlib.PurePath('test').replace_parts(pathlib.PurePath('test'), pathlib.PurePath('testnew/testnew'), 1)\n Pure...Path('testnew/testnew')\n\n " pass
def test_replace_parts_doctest() -> None: "\n\n # check Path like\n >>> pathlib.PurePath('test/test/test').replace_parts('test','testnew/testnew', 1)\n Pure...Path('testnew/testnew/test/test')\n\n >>> new = pathlib.PurePath('new1/new2/new3/new4')\n\n >>> # Test Source Path = relative PurePath\n >>> source_path = pathlib.PurePath('./test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('Test1/test2'), new)\n Pure...Path('test/new1/new2/new3/new4/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('test/test'), new)\n Pure...Path('test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePosixPath('/test/Test1'), new)\n Pure...Path('test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePosixPath('/test/test1'), new)\n Pure...Path('test/Test1/test2/test3')\n\n >>> # Test Source Path = absolute PosixPath\n >>> source_path = pathlib.PurePosixPath('/test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('Test1/test2'), new)\n Pure...Path('/test/new1/new2/new3/new4/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('test1/test2'), new)\n Pure...Path('/test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('test/test'), new)\n Pure...Path('/test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePosixPath('/test/Test1'), new)\n Pure...Path('new1/new2/new3/new4/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePosixPath('/test/test1'), new)\n Pure...Path('/test/Test1/test2/test3')\n\n >>> # Test Source Path = absolute WindowsPath\n >>> source_path = pathlib.PureWindowsPath(r'C:\\test\\Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('Test1/test2'), new)\n PureWindowsPath('C:/test/new1/new2/new3/new4/test3')\n\n >>> # this will be replaced because of windows case folding, it is correct !\n >>> source_path.replace_parts(pathlib.PurePath('test1/test2'), new)\n PureWindowsPath('C:/test/new1/new2/new3/new4/test3')\n\n >>> source_path.replace_parts(pathlib.PurePath('test/test'), new)\n PureWindowsPath('C:/test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePosixPath('/test/Test1'), new)\n PureWindowsPath('C:/test/Test1/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PurePosixPath('/test/test1'), new)\n PureWindowsPath('C:/test/Test1/test2/test3')\n\n # this might be unexpected but correct - we make a relative path out of an absolute path\n >>> source_path.replace_parts(pathlib.PureWindowsPath(r'C:\\test\\Test1'), new)\n PureWindowsPath('new1/new2/new3/new4/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PureWindowsPath(r'C:\\test\\Test1'), pathlib.PureWindowsPath(r'd:\\new'))\n PureWindowsPath('d:/new/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PureWindowsPath(r'c:\\test\\test1'), pathlib.PureWindowsPath(r'D:\\new'))\n PureWindowsPath('D:/new/test2/test3')\n\n >>> source_path.replace_parts(pathlib.PureWindowsPath(r'c:\\test\\test'), pathlib.PureWindowsPath(r'D:\\new'))\n PureWindowsPath('C:/test/Test1/test2/test3')\n\n >>> # check count\n >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test/test'), pathlib.PurePath('testnew/testnew'), 3)\n Pure...Path('testnew/testnew/test')\n >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test/test'), pathlib.PurePath('testnew/testnew'), 1)\n Pure...Path('testnew/testnew/test')\n >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test/test'), pathlib.PurePath('testnew/testnew'), 0)\n Pure...Path('test/test/test')\n >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test'), pathlib.PurePath('testnew/testnew'), 1)\n Pure...Path('testnew/testnew/test/test')\n >>> pathlib.PurePath('test').replace_parts(pathlib.PurePath('test'), pathlib.PurePath('testnew/testnew'), 1)\n Pure...Path('testnew/testnew')\n\n " pass<|docstring|># check Path like >>> pathlib.PurePath('test/test/test').replace_parts('test','testnew/testnew', 1) Pure...Path('testnew/testnew/test/test') >>> new = pathlib.PurePath('new1/new2/new3/new4') >>> # Test Source Path = relative PurePath >>> source_path = pathlib.PurePath('./test/Test1/test2/test3') >>> source_path.replace_parts(pathlib.PurePath('Test1/test2'), new) Pure...Path('test/new1/new2/new3/new4/test3') >>> source_path.replace_parts(pathlib.PurePath('test/test'), new) Pure...Path('test/Test1/test2/test3') >>> source_path.replace_parts(pathlib.PurePosixPath('/test/Test1'), new) Pure...Path('test/Test1/test2/test3') >>> source_path.replace_parts(pathlib.PurePosixPath('/test/test1'), new) Pure...Path('test/Test1/test2/test3') >>> # Test Source Path = absolute PosixPath >>> source_path = pathlib.PurePosixPath('/test/Test1/test2/test3') >>> source_path.replace_parts(pathlib.PurePath('Test1/test2'), new) Pure...Path('/test/new1/new2/new3/new4/test3') >>> source_path.replace_parts(pathlib.PurePath('test1/test2'), new) Pure...Path('/test/Test1/test2/test3') >>> source_path.replace_parts(pathlib.PurePath('test/test'), new) Pure...Path('/test/Test1/test2/test3') >>> source_path.replace_parts(pathlib.PurePosixPath('/test/Test1'), new) Pure...Path('new1/new2/new3/new4/test2/test3') >>> source_path.replace_parts(pathlib.PurePosixPath('/test/test1'), new) Pure...Path('/test/Test1/test2/test3') >>> # Test Source Path = absolute WindowsPath >>> source_path = pathlib.PureWindowsPath(r'C:\test\Test1/test2/test3') >>> source_path.replace_parts(pathlib.PurePath('Test1/test2'), new) PureWindowsPath('C:/test/new1/new2/new3/new4/test3') >>> # this will be replaced because of windows case folding, it is correct ! >>> source_path.replace_parts(pathlib.PurePath('test1/test2'), new) PureWindowsPath('C:/test/new1/new2/new3/new4/test3') >>> source_path.replace_parts(pathlib.PurePath('test/test'), new) PureWindowsPath('C:/test/Test1/test2/test3') >>> source_path.replace_parts(pathlib.PurePosixPath('/test/Test1'), new) PureWindowsPath('C:/test/Test1/test2/test3') >>> source_path.replace_parts(pathlib.PurePosixPath('/test/test1'), new) PureWindowsPath('C:/test/Test1/test2/test3') # this might be unexpected but correct - we make a relative path out of an absolute path >>> source_path.replace_parts(pathlib.PureWindowsPath(r'C:\test\Test1'), new) PureWindowsPath('new1/new2/new3/new4/test2/test3') >>> source_path.replace_parts(pathlib.PureWindowsPath(r'C:\test\Test1'), pathlib.PureWindowsPath(r'd:\new')) PureWindowsPath('d:/new/test2/test3') >>> source_path.replace_parts(pathlib.PureWindowsPath(r'c:\test\test1'), pathlib.PureWindowsPath(r'D:\new')) PureWindowsPath('D:/new/test2/test3') >>> source_path.replace_parts(pathlib.PureWindowsPath(r'c:\test\test'), pathlib.PureWindowsPath(r'D:\new')) PureWindowsPath('C:/test/Test1/test2/test3') >>> # check count >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test/test'), pathlib.PurePath('testnew/testnew'), 3) Pure...Path('testnew/testnew/test') >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test/test'), pathlib.PurePath('testnew/testnew'), 1) Pure...Path('testnew/testnew/test') >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test/test'), pathlib.PurePath('testnew/testnew'), 0) Pure...Path('test/test/test') >>> pathlib.PurePath('test/test/test').replace_parts(pathlib.PurePath('test'), pathlib.PurePath('testnew/testnew'), 1) Pure...Path('testnew/testnew/test/test') >>> pathlib.PurePath('test').replace_parts(pathlib.PurePath('test'), pathlib.PurePath('testnew/testnew'), 1) Pure...Path('testnew/testnew')<|endoftext|>
d3964986c25b9f4162127214114f3c2543ce5416165631425b6ebd514c7c397c
def test_shutil_wrappers() -> None: ' test the shutil wrappers ' path_test_dir = pathlib.Path(__file__).parent.resolve() path_test_file = (path_test_dir / 'test.txt') path_target_file = (path_test_dir / 'test_target.txt') path_test_tree = (path_test_dir / 'test_treecopy') path_test_tree_target = (path_test_dir / 'test_treecopy_target') path_test_file.copy(path_target_file) path_test_file.copy2(path_target_file) path_test_file.copyfile(path_target_file) path_test_file.copymode(path_target_file) path_test_file.copystat(path_target_file) path_test_tree.copytree(path_test_tree_target) path_test_tree_target.rmtree() path_target_file.unlink()
test the shutil wrappers
tests/test_pathlib.py
test_shutil_wrappers
bitranox/pathlib3x
7
python
def test_shutil_wrappers() -> None: ' ' path_test_dir = pathlib.Path(__file__).parent.resolve() path_test_file = (path_test_dir / 'test.txt') path_target_file = (path_test_dir / 'test_target.txt') path_test_tree = (path_test_dir / 'test_treecopy') path_test_tree_target = (path_test_dir / 'test_treecopy_target') path_test_file.copy(path_target_file) path_test_file.copy2(path_target_file) path_test_file.copyfile(path_target_file) path_test_file.copymode(path_target_file) path_test_file.copystat(path_target_file) path_test_tree.copytree(path_test_tree_target) path_test_tree_target.rmtree() path_target_file.unlink()
def test_shutil_wrappers() -> None: ' ' path_test_dir = pathlib.Path(__file__).parent.resolve() path_test_file = (path_test_dir / 'test.txt') path_target_file = (path_test_dir / 'test_target.txt') path_test_tree = (path_test_dir / 'test_treecopy') path_test_tree_target = (path_test_dir / 'test_treecopy_target') path_test_file.copy(path_target_file) path_test_file.copy2(path_target_file) path_test_file.copyfile(path_target_file) path_test_file.copymode(path_target_file) path_test_file.copystat(path_target_file) path_test_tree.copytree(path_test_tree_target) path_test_tree_target.rmtree() path_target_file.unlink()<|docstring|>test the shutil wrappers<|endoftext|>
577cef0ee77b2d73ec4957bc72f2c61a53dac33fec3e8b76716a816be4840893
def train_student(self, epochs=10, plot_losses=True, save_model=True, save_model_path='./models/student.pt', use_scheduler=False, smooth_teacher=True): '\n Function that will be training the student\n\n :param epochs (int): Number of epochs you want to train the teacher\n :param plot_losses (bool): True if you want to plot the losses\n :param save_model (bool): True if you want to save the student model\n :param save_model_pth (str): Path where you want to save the student model\n :param use_scheduler (bool): True to use OneCycleLR during training\n :param smooth_teacher (bool): True to apply temperature smoothing and Softmax to virtual teacher\n ' self.student_model.train() loss_arr = [] length_of_dataset = len(self.train_loader.dataset) best_acc = 0.0 self.best_student_model_weights = deepcopy(self.student_model.state_dict()) if use_scheduler: optim_lr = self.optimizer_student.param_groups[0]['lr'] scheduler_student = torch.optim.lr_scheduler.OneCycleLR(self.optimizer_student, max_lr=optim_lr, epochs=epochs, steps_per_epoch=len(self.train_loader), pct_start=0.1) save_dir = os.path.dirname(save_model_path) if (not os.path.exists(save_dir)): os.makedirs(save_dir) print('\nTraining student...') for ep in tqdm(range(epochs), position=0): epoch_loss = 0.0 correct = 0 student_ce_loss = [] student_divergence = [] student_entropy = [] student_calibration = [] epoch_len = int((length_of_dataset / self.train_loader.batch_size)) for (data, label) in self.train_loader: data = data.to(self.device) label = label.to(self.device) student_out = self.student_model(data) loss = self.calculate_kd_loss(student_out, label, smooth_teacher=smooth_teacher) if isinstance(loss, tuple): (loss, ce_loss, divergence) = loss student_ce_loss.append(ce_loss.item()) student_divergence.append(divergence.item()) if isinstance(student_out, tuple): student_out = student_out[0] student_calibration.append(self.ece_loss(student_out, label).item()) out_dist = Categorical(logits=student_out) entropy = out_dist.entropy().mean(dim=0) student_entropy.append(entropy.item()) pred = student_out.argmax(dim=1, keepdim=True) correct += pred.eq(label.view_as(pred)).sum().item() self.optimizer_student.zero_grad() loss.backward() self.optimizer_student.step() if use_scheduler: scheduler_student.step() epoch_loss += loss epoch_acc = (correct / length_of_dataset) epoch_val_acc = self.evaluate(verbose=False) if (epoch_val_acc > best_acc): best_acc = epoch_val_acc self.best_student_model_weights = deepcopy(self.student_model.state_dict()) if self.log: self.writer.add_scalar('Loss/Train student', epoch_loss, ep) self.writer.add_scalar('Accuracy/Train student', epoch_acc, ep) self.writer.add_scalar('Accuracy/Validation student', epoch_val_acc, ep) self.writer.add_scalar('Loss/Cross-entropy student', s.mean(student_ce_loss), ep) self.writer.add_scalar('Loss/Divergence student', s.mean(student_divergence), ep) self.writer.add_scalar('Loss/Entropy student', s.mean(student_entropy), ep) self.writer.add_scalar('Loss/Calibration student', s.mean(student_calibration), ep) if use_scheduler: self.writer.add_scalar('Optimizer/lr student', scheduler_student.get_last_lr()[0], ep) loss_arr.append(epoch_loss) self.student_model.load_state_dict(self.best_student_model_weights) if save_model: torch.save(self.student_model.state_dict(), os.path.join(save_model_path, 'student.pt')) if plot_losses: plt.plot(loss_arr)
Function that will be training the student :param epochs (int): Number of epochs you want to train the teacher :param plot_losses (bool): True if you want to plot the losses :param save_model (bool): True if you want to save the student model :param save_model_pth (str): Path where you want to save the student model :param use_scheduler (bool): True to use OneCycleLR during training :param smooth_teacher (bool): True to apply temperature smoothing and Softmax to virtual teacher
KD_Lib/KD/vision/teacher_free/virtual_teacher.py
train_student
PiaCuk/KD_Lib
0
python
def train_student(self, epochs=10, plot_losses=True, save_model=True, save_model_path='./models/student.pt', use_scheduler=False, smooth_teacher=True): '\n Function that will be training the student\n\n :param epochs (int): Number of epochs you want to train the teacher\n :param plot_losses (bool): True if you want to plot the losses\n :param save_model (bool): True if you want to save the student model\n :param save_model_pth (str): Path where you want to save the student model\n :param use_scheduler (bool): True to use OneCycleLR during training\n :param smooth_teacher (bool): True to apply temperature smoothing and Softmax to virtual teacher\n ' self.student_model.train() loss_arr = [] length_of_dataset = len(self.train_loader.dataset) best_acc = 0.0 self.best_student_model_weights = deepcopy(self.student_model.state_dict()) if use_scheduler: optim_lr = self.optimizer_student.param_groups[0]['lr'] scheduler_student = torch.optim.lr_scheduler.OneCycleLR(self.optimizer_student, max_lr=optim_lr, epochs=epochs, steps_per_epoch=len(self.train_loader), pct_start=0.1) save_dir = os.path.dirname(save_model_path) if (not os.path.exists(save_dir)): os.makedirs(save_dir) print('\nTraining student...') for ep in tqdm(range(epochs), position=0): epoch_loss = 0.0 correct = 0 student_ce_loss = [] student_divergence = [] student_entropy = [] student_calibration = [] epoch_len = int((length_of_dataset / self.train_loader.batch_size)) for (data, label) in self.train_loader: data = data.to(self.device) label = label.to(self.device) student_out = self.student_model(data) loss = self.calculate_kd_loss(student_out, label, smooth_teacher=smooth_teacher) if isinstance(loss, tuple): (loss, ce_loss, divergence) = loss student_ce_loss.append(ce_loss.item()) student_divergence.append(divergence.item()) if isinstance(student_out, tuple): student_out = student_out[0] student_calibration.append(self.ece_loss(student_out, label).item()) out_dist = Categorical(logits=student_out) entropy = out_dist.entropy().mean(dim=0) student_entropy.append(entropy.item()) pred = student_out.argmax(dim=1, keepdim=True) correct += pred.eq(label.view_as(pred)).sum().item() self.optimizer_student.zero_grad() loss.backward() self.optimizer_student.step() if use_scheduler: scheduler_student.step() epoch_loss += loss epoch_acc = (correct / length_of_dataset) epoch_val_acc = self.evaluate(verbose=False) if (epoch_val_acc > best_acc): best_acc = epoch_val_acc self.best_student_model_weights = deepcopy(self.student_model.state_dict()) if self.log: self.writer.add_scalar('Loss/Train student', epoch_loss, ep) self.writer.add_scalar('Accuracy/Train student', epoch_acc, ep) self.writer.add_scalar('Accuracy/Validation student', epoch_val_acc, ep) self.writer.add_scalar('Loss/Cross-entropy student', s.mean(student_ce_loss), ep) self.writer.add_scalar('Loss/Divergence student', s.mean(student_divergence), ep) self.writer.add_scalar('Loss/Entropy student', s.mean(student_entropy), ep) self.writer.add_scalar('Loss/Calibration student', s.mean(student_calibration), ep) if use_scheduler: self.writer.add_scalar('Optimizer/lr student', scheduler_student.get_last_lr()[0], ep) loss_arr.append(epoch_loss) self.student_model.load_state_dict(self.best_student_model_weights) if save_model: torch.save(self.student_model.state_dict(), os.path.join(save_model_path, 'student.pt')) if plot_losses: plt.plot(loss_arr)
def train_student(self, epochs=10, plot_losses=True, save_model=True, save_model_path='./models/student.pt', use_scheduler=False, smooth_teacher=True): '\n Function that will be training the student\n\n :param epochs (int): Number of epochs you want to train the teacher\n :param plot_losses (bool): True if you want to plot the losses\n :param save_model (bool): True if you want to save the student model\n :param save_model_pth (str): Path where you want to save the student model\n :param use_scheduler (bool): True to use OneCycleLR during training\n :param smooth_teacher (bool): True to apply temperature smoothing and Softmax to virtual teacher\n ' self.student_model.train() loss_arr = [] length_of_dataset = len(self.train_loader.dataset) best_acc = 0.0 self.best_student_model_weights = deepcopy(self.student_model.state_dict()) if use_scheduler: optim_lr = self.optimizer_student.param_groups[0]['lr'] scheduler_student = torch.optim.lr_scheduler.OneCycleLR(self.optimizer_student, max_lr=optim_lr, epochs=epochs, steps_per_epoch=len(self.train_loader), pct_start=0.1) save_dir = os.path.dirname(save_model_path) if (not os.path.exists(save_dir)): os.makedirs(save_dir) print('\nTraining student...') for ep in tqdm(range(epochs), position=0): epoch_loss = 0.0 correct = 0 student_ce_loss = [] student_divergence = [] student_entropy = [] student_calibration = [] epoch_len = int((length_of_dataset / self.train_loader.batch_size)) for (data, label) in self.train_loader: data = data.to(self.device) label = label.to(self.device) student_out = self.student_model(data) loss = self.calculate_kd_loss(student_out, label, smooth_teacher=smooth_teacher) if isinstance(loss, tuple): (loss, ce_loss, divergence) = loss student_ce_loss.append(ce_loss.item()) student_divergence.append(divergence.item()) if isinstance(student_out, tuple): student_out = student_out[0] student_calibration.append(self.ece_loss(student_out, label).item()) out_dist = Categorical(logits=student_out) entropy = out_dist.entropy().mean(dim=0) student_entropy.append(entropy.item()) pred = student_out.argmax(dim=1, keepdim=True) correct += pred.eq(label.view_as(pred)).sum().item() self.optimizer_student.zero_grad() loss.backward() self.optimizer_student.step() if use_scheduler: scheduler_student.step() epoch_loss += loss epoch_acc = (correct / length_of_dataset) epoch_val_acc = self.evaluate(verbose=False) if (epoch_val_acc > best_acc): best_acc = epoch_val_acc self.best_student_model_weights = deepcopy(self.student_model.state_dict()) if self.log: self.writer.add_scalar('Loss/Train student', epoch_loss, ep) self.writer.add_scalar('Accuracy/Train student', epoch_acc, ep) self.writer.add_scalar('Accuracy/Validation student', epoch_val_acc, ep) self.writer.add_scalar('Loss/Cross-entropy student', s.mean(student_ce_loss), ep) self.writer.add_scalar('Loss/Divergence student', s.mean(student_divergence), ep) self.writer.add_scalar('Loss/Entropy student', s.mean(student_entropy), ep) self.writer.add_scalar('Loss/Calibration student', s.mean(student_calibration), ep) if use_scheduler: self.writer.add_scalar('Optimizer/lr student', scheduler_student.get_last_lr()[0], ep) loss_arr.append(epoch_loss) self.student_model.load_state_dict(self.best_student_model_weights) if save_model: torch.save(self.student_model.state_dict(), os.path.join(save_model_path, 'student.pt')) if plot_losses: plt.plot(loss_arr)<|docstring|>Function that will be training the student :param epochs (int): Number of epochs you want to train the teacher :param plot_losses (bool): True if you want to plot the losses :param save_model (bool): True if you want to save the student model :param save_model_pth (str): Path where you want to save the student model :param use_scheduler (bool): True to use OneCycleLR during training :param smooth_teacher (bool): True to apply temperature smoothing and Softmax to virtual teacher<|endoftext|>
3aa8b4366b0b5f51774075919da81ee624afe8fb6e4e389a6c34406af5d16cd6
def calculate_kd_loss(self, y_pred_student, y_true, smooth_teacher=True): '\n Function used for calculating the KD loss during distillation\n\n :param y_pred_student (torch.FloatTensor): Prediction made by the student model\n :param y_true (torch.FloatTensor): Original label\n ' num_classes = y_pred_student.shape[1] virtual_teacher = torch.ones_like(y_pred_student, device=self.device) virtual_teacher = ((virtual_teacher * (1 - self.correct_prob)) / (num_classes - 1)) for i in range(y_pred_student.shape[0]): virtual_teacher[(i, y_true[i])] = self.correct_prob teacher_out = (F.softmax((virtual_teacher / self.temp), dim=1) if smooth_teacher else virtual_teacher) soft_student_out = F.log_softmax((y_pred_student / self.temp), dim=1) supervised = F.cross_entropy(y_pred_student, y_true) distillation = ((self.temp ** 2) * F.kl_div(input=soft_student_out, target=teacher_out, reduction='batchmean', log_target=False)) loss = (((1 - self.distil_weight) * supervised) + (self.distil_weight * distillation)) return (loss, supervised, distillation)
Function used for calculating the KD loss during distillation :param y_pred_student (torch.FloatTensor): Prediction made by the student model :param y_true (torch.FloatTensor): Original label
KD_Lib/KD/vision/teacher_free/virtual_teacher.py
calculate_kd_loss
PiaCuk/KD_Lib
0
python
def calculate_kd_loss(self, y_pred_student, y_true, smooth_teacher=True): '\n Function used for calculating the KD loss during distillation\n\n :param y_pred_student (torch.FloatTensor): Prediction made by the student model\n :param y_true (torch.FloatTensor): Original label\n ' num_classes = y_pred_student.shape[1] virtual_teacher = torch.ones_like(y_pred_student, device=self.device) virtual_teacher = ((virtual_teacher * (1 - self.correct_prob)) / (num_classes - 1)) for i in range(y_pred_student.shape[0]): virtual_teacher[(i, y_true[i])] = self.correct_prob teacher_out = (F.softmax((virtual_teacher / self.temp), dim=1) if smooth_teacher else virtual_teacher) soft_student_out = F.log_softmax((y_pred_student / self.temp), dim=1) supervised = F.cross_entropy(y_pred_student, y_true) distillation = ((self.temp ** 2) * F.kl_div(input=soft_student_out, target=teacher_out, reduction='batchmean', log_target=False)) loss = (((1 - self.distil_weight) * supervised) + (self.distil_weight * distillation)) return (loss, supervised, distillation)
def calculate_kd_loss(self, y_pred_student, y_true, smooth_teacher=True): '\n Function used for calculating the KD loss during distillation\n\n :param y_pred_student (torch.FloatTensor): Prediction made by the student model\n :param y_true (torch.FloatTensor): Original label\n ' num_classes = y_pred_student.shape[1] virtual_teacher = torch.ones_like(y_pred_student, device=self.device) virtual_teacher = ((virtual_teacher * (1 - self.correct_prob)) / (num_classes - 1)) for i in range(y_pred_student.shape[0]): virtual_teacher[(i, y_true[i])] = self.correct_prob teacher_out = (F.softmax((virtual_teacher / self.temp), dim=1) if smooth_teacher else virtual_teacher) soft_student_out = F.log_softmax((y_pred_student / self.temp), dim=1) supervised = F.cross_entropy(y_pred_student, y_true) distillation = ((self.temp ** 2) * F.kl_div(input=soft_student_out, target=teacher_out, reduction='batchmean', log_target=False)) loss = (((1 - self.distil_weight) * supervised) + (self.distil_weight * distillation)) return (loss, supervised, distillation)<|docstring|>Function used for calculating the KD loss during distillation :param y_pred_student (torch.FloatTensor): Prediction made by the student model :param y_true (torch.FloatTensor): Original label<|endoftext|>
b761255d9c737f06897baf714f44b4e46d313242592f832ff2a1b8ea4312903e
def evaluate(self, verbose=True): '\n Evaluate method for printing accuracies of the trained network\n\n ' model = deepcopy(self.student_model) model.eval() length_of_dataset = len(self.val_loader.dataset) correct = 0 with torch.no_grad(): for (data, target) in self.val_loader: data = data.to(self.device) target = target.to(self.device) output = model(data) if isinstance(output, tuple): output = output[0] pred = output.argmax(dim=1, keepdim=True) correct += pred.eq(target.view_as(pred)).sum().item() accuracy = (correct / length_of_dataset) if verbose: print(('-' * 80)) print(f'Accuracy: {accuracy}') return accuracy
Evaluate method for printing accuracies of the trained network
KD_Lib/KD/vision/teacher_free/virtual_teacher.py
evaluate
PiaCuk/KD_Lib
0
python
def evaluate(self, verbose=True): '\n \n\n ' model = deepcopy(self.student_model) model.eval() length_of_dataset = len(self.val_loader.dataset) correct = 0 with torch.no_grad(): for (data, target) in self.val_loader: data = data.to(self.device) target = target.to(self.device) output = model(data) if isinstance(output, tuple): output = output[0] pred = output.argmax(dim=1, keepdim=True) correct += pred.eq(target.view_as(pred)).sum().item() accuracy = (correct / length_of_dataset) if verbose: print(('-' * 80)) print(f'Accuracy: {accuracy}') return accuracy
def evaluate(self, verbose=True): '\n \n\n ' model = deepcopy(self.student_model) model.eval() length_of_dataset = len(self.val_loader.dataset) correct = 0 with torch.no_grad(): for (data, target) in self.val_loader: data = data.to(self.device) target = target.to(self.device) output = model(data) if isinstance(output, tuple): output = output[0] pred = output.argmax(dim=1, keepdim=True) correct += pred.eq(target.view_as(pred)).sum().item() accuracy = (correct / length_of_dataset) if verbose: print(('-' * 80)) print(f'Accuracy: {accuracy}') return accuracy<|docstring|>Evaluate method for printing accuracies of the trained network<|endoftext|>
b129fe547498ef75fd09d38aafeac293a544b583329db248a7e5c2cd7baad856
def get_parameters(self): '\n Get the number of parameters for the student network\n ' student_params = sum((p.numel() for p in self.student_model.parameters())) print(('-' * 80)) print(f'Total parameters for the student network are: {student_params}')
Get the number of parameters for the student network
KD_Lib/KD/vision/teacher_free/virtual_teacher.py
get_parameters
PiaCuk/KD_Lib
0
python
def get_parameters(self): '\n \n ' student_params = sum((p.numel() for p in self.student_model.parameters())) print(('-' * 80)) print(f'Total parameters for the student network are: {student_params}')
def get_parameters(self): '\n \n ' student_params = sum((p.numel() for p in self.student_model.parameters())) print(('-' * 80)) print(f'Total parameters for the student network are: {student_params}')<|docstring|>Get the number of parameters for the student network<|endoftext|>
4d8efc2019ddc6c267faafad8b7da506ddadb5e7ea6cad9851d3a88f6812bd78
def __init__(self, target_playlist_id, num_seed, k, len_threshold): "\n Parameters\n ----------\n target_playlist_id : int\n Unique identifier of a playlist for which we're trying to suggest songs\n num_seed : int\n Number of seed tracks used to create recommendations\n k : int\n The parameter for kNN algorithm in order to find top k most similar songs to the seed track\n " self.target_playlist_id = target_playlist_id self.num_seed = num_seed self.k = k self.len_threshold = len_threshold
Parameters ---------- target_playlist_id : int Unique identifier of a playlist for which we're trying to suggest songs num_seed : int Number of seed tracks used to create recommendations k : int The parameter for kNN algorithm in order to find top k most similar songs to the seed track
recommender/Individual/ItemCF.py
__init__
utkuarslan5/the_recommenders
1
python
def __init__(self, target_playlist_id, num_seed, k, len_threshold): "\n Parameters\n ----------\n target_playlist_id : int\n Unique identifier of a playlist for which we're trying to suggest songs\n num_seed : int\n Number of seed tracks used to create recommendations\n k : int\n The parameter for kNN algorithm in order to find top k most similar songs to the seed track\n " self.target_playlist_id = target_playlist_id self.num_seed = num_seed self.k = k self.len_threshold = len_threshold
def __init__(self, target_playlist_id, num_seed, k, len_threshold): "\n Parameters\n ----------\n target_playlist_id : int\n Unique identifier of a playlist for which we're trying to suggest songs\n num_seed : int\n Number of seed tracks used to create recommendations\n k : int\n The parameter for kNN algorithm in order to find top k most similar songs to the seed track\n " self.target_playlist_id = target_playlist_id self.num_seed = num_seed self.k = k self.len_threshold = len_threshold<|docstring|>Parameters ---------- target_playlist_id : int Unique identifier of a playlist for which we're trying to suggest songs num_seed : int Number of seed tracks used to create recommendations k : int The parameter for kNN algorithm in order to find top k most similar songs to the seed track<|endoftext|>
e54b94bdf139dff9656de7de9367f4befdec316c0165d777f60a1998010140e3
def get_seed_tracks(self, df): 'Randomly picks num_seed songs from the target playlist to serve as seed tracks. Song recommendations will be\n created based on the similarity to seed tracks.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about playlists and songs\n\n Returns\n -------\n seed_track_ids\n Unique song identifiers of seed tracks\n ' seed_track_ids = df.loc[(df.pid == self.target_playlist_id)].sample(n=self.num_seed).tid.tolist() return seed_track_ids
Randomly picks num_seed songs from the target playlist to serve as seed tracks. Song recommendations will be created based on the similarity to seed tracks. Parameters ---------- df : DataFrame Pandas dataframe containing full information about playlists and songs Returns ------- seed_track_ids Unique song identifiers of seed tracks
recommender/Individual/ItemCF.py
get_seed_tracks
utkuarslan5/the_recommenders
1
python
def get_seed_tracks(self, df): 'Randomly picks num_seed songs from the target playlist to serve as seed tracks. Song recommendations will be\n created based on the similarity to seed tracks.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about playlists and songs\n\n Returns\n -------\n seed_track_ids\n Unique song identifiers of seed tracks\n ' seed_track_ids = df.loc[(df.pid == self.target_playlist_id)].sample(n=self.num_seed).tid.tolist() return seed_track_ids
def get_seed_tracks(self, df): 'Randomly picks num_seed songs from the target playlist to serve as seed tracks. Song recommendations will be\n created based on the similarity to seed tracks.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about playlists and songs\n\n Returns\n -------\n seed_track_ids\n Unique song identifiers of seed tracks\n ' seed_track_ids = df.loc[(df.pid == self.target_playlist_id)].sample(n=self.num_seed).tid.tolist() return seed_track_ids<|docstring|>Randomly picks num_seed songs from the target playlist to serve as seed tracks. Song recommendations will be created based on the similarity to seed tracks. Parameters ---------- df : DataFrame Pandas dataframe containing full information about playlists and songs Returns ------- seed_track_ids Unique song identifiers of seed tracks<|endoftext|>
aa0dce26af16fc89a79579e82c532edb9b5f6c39dca1cf3e12f96b93629c8327
def filter_by_playlist_length(self, df): 'Filters a given dataframe to contain only playlist with more than len_threshold songs.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about playlists and songs\n len_threshold : int\n Threshold used to filter a dataframe based on playlist length\n\n Returns\n -------\n filtered_df\n Filtered dataframe containing only playlists longer than len_threshold\n ' filtered_df = df.groupby('pid').filter((lambda x: (len(x) > self.len_threshold))).copy() return filtered_df
Filters a given dataframe to contain only playlist with more than len_threshold songs. Parameters ---------- df : DataFrame Pandas dataframe containing full information about playlists and songs len_threshold : int Threshold used to filter a dataframe based on playlist length Returns ------- filtered_df Filtered dataframe containing only playlists longer than len_threshold
recommender/Individual/ItemCF.py
filter_by_playlist_length
utkuarslan5/the_recommenders
1
python
def filter_by_playlist_length(self, df): 'Filters a given dataframe to contain only playlist with more than len_threshold songs.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about playlists and songs\n len_threshold : int\n Threshold used to filter a dataframe based on playlist length\n\n Returns\n -------\n filtered_df\n Filtered dataframe containing only playlists longer than len_threshold\n ' filtered_df = df.groupby('pid').filter((lambda x: (len(x) > self.len_threshold))).copy() return filtered_df
def filter_by_playlist_length(self, df): 'Filters a given dataframe to contain only playlist with more than len_threshold songs.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about playlists and songs\n len_threshold : int\n Threshold used to filter a dataframe based on playlist length\n\n Returns\n -------\n filtered_df\n Filtered dataframe containing only playlists longer than len_threshold\n ' filtered_df = df.groupby('pid').filter((lambda x: (len(x) > self.len_threshold))).copy() return filtered_df<|docstring|>Filters a given dataframe to contain only playlist with more than len_threshold songs. Parameters ---------- df : DataFrame Pandas dataframe containing full information about playlists and songs len_threshold : int Threshold used to filter a dataframe based on playlist length Returns ------- filtered_df Filtered dataframe containing only playlists longer than len_threshold<|endoftext|>
f53fe8e747c4a412ef37e64f00e445dde7167657f20b00e852dd6020ac1f7f78
def create_playlist_song_matrix(self, df): 'Creates a binary playlist-song matrix with unique playlist identifiers as rows and unique song identifiers\n as columns. An entry of a matrix is 1 if a song is contained in the playlist, 0 otherwise.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about playlists and songs\n\n Returns\n -------\n pt_df\n Dataframe which stores a binary playlist-song matrix\n ' playlist_type = CategoricalDtype(df['pid'].unique(), ordered=True) song_type = CategoricalDtype(df['tid'].unique(), ordered=True) row = df['tid'].astype(song_type).cat.codes col = df['pid'].astype(playlist_type).cat.codes sparse_matrix = csr_matrix((df['count'], (row, col)), shape=(song_type.categories.size, playlist_type.categories.size)) pt_df = pd.DataFrame(sparse_matrix.todense(), index=song_type.categories, columns=playlist_type.categories) return pt_df
Creates a binary playlist-song matrix with unique playlist identifiers as rows and unique song identifiers as columns. An entry of a matrix is 1 if a song is contained in the playlist, 0 otherwise. Parameters ---------- df : DataFrame Pandas dataframe containing full information about playlists and songs Returns ------- pt_df Dataframe which stores a binary playlist-song matrix
recommender/Individual/ItemCF.py
create_playlist_song_matrix
utkuarslan5/the_recommenders
1
python
def create_playlist_song_matrix(self, df): 'Creates a binary playlist-song matrix with unique playlist identifiers as rows and unique song identifiers\n as columns. An entry of a matrix is 1 if a song is contained in the playlist, 0 otherwise.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about playlists and songs\n\n Returns\n -------\n pt_df\n Dataframe which stores a binary playlist-song matrix\n ' playlist_type = CategoricalDtype(df['pid'].unique(), ordered=True) song_type = CategoricalDtype(df['tid'].unique(), ordered=True) row = df['tid'].astype(song_type).cat.codes col = df['pid'].astype(playlist_type).cat.codes sparse_matrix = csr_matrix((df['count'], (row, col)), shape=(song_type.categories.size, playlist_type.categories.size)) pt_df = pd.DataFrame(sparse_matrix.todense(), index=song_type.categories, columns=playlist_type.categories) return pt_df
def create_playlist_song_matrix(self, df): 'Creates a binary playlist-song matrix with unique playlist identifiers as rows and unique song identifiers\n as columns. An entry of a matrix is 1 if a song is contained in the playlist, 0 otherwise.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about playlists and songs\n\n Returns\n -------\n pt_df\n Dataframe which stores a binary playlist-song matrix\n ' playlist_type = CategoricalDtype(df['pid'].unique(), ordered=True) song_type = CategoricalDtype(df['tid'].unique(), ordered=True) row = df['tid'].astype(song_type).cat.codes col = df['pid'].astype(playlist_type).cat.codes sparse_matrix = csr_matrix((df['count'], (row, col)), shape=(song_type.categories.size, playlist_type.categories.size)) pt_df = pd.DataFrame(sparse_matrix.todense(), index=song_type.categories, columns=playlist_type.categories) return pt_df<|docstring|>Creates a binary playlist-song matrix with unique playlist identifiers as rows and unique song identifiers as columns. An entry of a matrix is 1 if a song is contained in the playlist, 0 otherwise. Parameters ---------- df : DataFrame Pandas dataframe containing full information about playlists and songs Returns ------- pt_df Dataframe which stores a binary playlist-song matrix<|endoftext|>
814bd5f7ebc6babcbd36b1b28015f848e1301eb50efd8c5c5d58c27384061897
def exclude_target_playlist_songs(self, df, seed_track_id): 'Creates a slice of a dataframe containing a playlist-song matrix which excludes the songs which are already\n in the target playlist. This is necessary because the goal is to recommend songs which are not present in the playlist.\n The seed track is kept in order to compute the similarities with other songs.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing a binary playlist-song matrix\n\n seed_track_id : int\n Unique identifier of a seed track\n\n Returns\n -------\n slice_df\n Dataframe which stores a binary playlist-song matrix excluding the songs already present in the playlist\n ' song_ids = df.loc[(df[self.target_playlist_id] == 1)].index.tolist() song_ids.remove(seed_track_id) slice_df = df[(~ df.index.isin(song_ids))].copy() return slice_df
Creates a slice of a dataframe containing a playlist-song matrix which excludes the songs which are already in the target playlist. This is necessary because the goal is to recommend songs which are not present in the playlist. The seed track is kept in order to compute the similarities with other songs. Parameters ---------- df : DataFrame Pandas dataframe containing a binary playlist-song matrix seed_track_id : int Unique identifier of a seed track Returns ------- slice_df Dataframe which stores a binary playlist-song matrix excluding the songs already present in the playlist
recommender/Individual/ItemCF.py
exclude_target_playlist_songs
utkuarslan5/the_recommenders
1
python
def exclude_target_playlist_songs(self, df, seed_track_id): 'Creates a slice of a dataframe containing a playlist-song matrix which excludes the songs which are already\n in the target playlist. This is necessary because the goal is to recommend songs which are not present in the playlist.\n The seed track is kept in order to compute the similarities with other songs.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing a binary playlist-song matrix\n\n seed_track_id : int\n Unique identifier of a seed track\n\n Returns\n -------\n slice_df\n Dataframe which stores a binary playlist-song matrix excluding the songs already present in the playlist\n ' song_ids = df.loc[(df[self.target_playlist_id] == 1)].index.tolist() song_ids.remove(seed_track_id) slice_df = df[(~ df.index.isin(song_ids))].copy() return slice_df
def exclude_target_playlist_songs(self, df, seed_track_id): 'Creates a slice of a dataframe containing a playlist-song matrix which excludes the songs which are already\n in the target playlist. This is necessary because the goal is to recommend songs which are not present in the playlist.\n The seed track is kept in order to compute the similarities with other songs.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing a binary playlist-song matrix\n\n seed_track_id : int\n Unique identifier of a seed track\n\n Returns\n -------\n slice_df\n Dataframe which stores a binary playlist-song matrix excluding the songs already present in the playlist\n ' song_ids = df.loc[(df[self.target_playlist_id] == 1)].index.tolist() song_ids.remove(seed_track_id) slice_df = df[(~ df.index.isin(song_ids))].copy() return slice_df<|docstring|>Creates a slice of a dataframe containing a playlist-song matrix which excludes the songs which are already in the target playlist. This is necessary because the goal is to recommend songs which are not present in the playlist. The seed track is kept in order to compute the similarities with other songs. Parameters ---------- df : DataFrame Pandas dataframe containing a binary playlist-song matrix seed_track_id : int Unique identifier of a seed track Returns ------- slice_df Dataframe which stores a binary playlist-song matrix excluding the songs already present in the playlist<|endoftext|>
d2c413daba307dbd43f0f30e6952bf0340188c9c96ce07b7bc305984bfd49712
def find_k_most_similar_songs(self, df, seed_track_id): 'Finds k most similar songs to the seed track using a kNN algorithm with a cosine similarity as a similarity measure.\n The songs are considered more similar if they co-occur more often across different playlists.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing a binary playlist-song matrix excluding the songs which are already in the playlist\n\n seed_track_id : int\n Unique identifier of a seed track\n\n Returns\n -------\n sim_tracks_id\n List of unique song identifiers for the top k most similar songs to the seed track\n\n track_distances\n List of distances between a seed track and top k most similar songs to it\n ' slice_df = self.exclude_target_playlist_songs(df, seed_track_id) knn = NearestNeighbors(metric='cosine', algorithm='brute') knn.fit(csr_matrix(slice_df.values)) (distances, indices) = knn.kneighbors(csr_matrix(slice_df.values), n_neighbors=(self.k + 1)) distances = (1 - distances) seed_track_loc = slice_df.index.get_loc(seed_track_id) track_distances = distances[seed_track_loc].tolist() sim_tracks_loc = indices[seed_track_loc].tolist() sim_tracks_id = slice_df.iloc[(sim_tracks_loc, :)].index.tolist() track_distances.pop(0) sim_tracks_id.pop(0) return (sim_tracks_id, track_distances)
Finds k most similar songs to the seed track using a kNN algorithm with a cosine similarity as a similarity measure. The songs are considered more similar if they co-occur more often across different playlists. Parameters ---------- df : DataFrame Pandas dataframe containing a binary playlist-song matrix excluding the songs which are already in the playlist seed_track_id : int Unique identifier of a seed track Returns ------- sim_tracks_id List of unique song identifiers for the top k most similar songs to the seed track track_distances List of distances between a seed track and top k most similar songs to it
recommender/Individual/ItemCF.py
find_k_most_similar_songs
utkuarslan5/the_recommenders
1
python
def find_k_most_similar_songs(self, df, seed_track_id): 'Finds k most similar songs to the seed track using a kNN algorithm with a cosine similarity as a similarity measure.\n The songs are considered more similar if they co-occur more often across different playlists.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing a binary playlist-song matrix excluding the songs which are already in the playlist\n\n seed_track_id : int\n Unique identifier of a seed track\n\n Returns\n -------\n sim_tracks_id\n List of unique song identifiers for the top k most similar songs to the seed track\n\n track_distances\n List of distances between a seed track and top k most similar songs to it\n ' slice_df = self.exclude_target_playlist_songs(df, seed_track_id) knn = NearestNeighbors(metric='cosine', algorithm='brute') knn.fit(csr_matrix(slice_df.values)) (distances, indices) = knn.kneighbors(csr_matrix(slice_df.values), n_neighbors=(self.k + 1)) distances = (1 - distances) seed_track_loc = slice_df.index.get_loc(seed_track_id) track_distances = distances[seed_track_loc].tolist() sim_tracks_loc = indices[seed_track_loc].tolist() sim_tracks_id = slice_df.iloc[(sim_tracks_loc, :)].index.tolist() track_distances.pop(0) sim_tracks_id.pop(0) return (sim_tracks_id, track_distances)
def find_k_most_similar_songs(self, df, seed_track_id): 'Finds k most similar songs to the seed track using a kNN algorithm with a cosine similarity as a similarity measure.\n The songs are considered more similar if they co-occur more often across different playlists.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing a binary playlist-song matrix excluding the songs which are already in the playlist\n\n seed_track_id : int\n Unique identifier of a seed track\n\n Returns\n -------\n sim_tracks_id\n List of unique song identifiers for the top k most similar songs to the seed track\n\n track_distances\n List of distances between a seed track and top k most similar songs to it\n ' slice_df = self.exclude_target_playlist_songs(df, seed_track_id) knn = NearestNeighbors(metric='cosine', algorithm='brute') knn.fit(csr_matrix(slice_df.values)) (distances, indices) = knn.kneighbors(csr_matrix(slice_df.values), n_neighbors=(self.k + 1)) distances = (1 - distances) seed_track_loc = slice_df.index.get_loc(seed_track_id) track_distances = distances[seed_track_loc].tolist() sim_tracks_loc = indices[seed_track_loc].tolist() sim_tracks_id = slice_df.iloc[(sim_tracks_loc, :)].index.tolist() track_distances.pop(0) sim_tracks_id.pop(0) return (sim_tracks_id, track_distances)<|docstring|>Finds k most similar songs to the seed track using a kNN algorithm with a cosine similarity as a similarity measure. The songs are considered more similar if they co-occur more often across different playlists. Parameters ---------- df : DataFrame Pandas dataframe containing a binary playlist-song matrix excluding the songs which are already in the playlist seed_track_id : int Unique identifier of a seed track Returns ------- sim_tracks_id List of unique song identifiers for the top k most similar songs to the seed track track_distances List of distances between a seed track and top k most similar songs to it<|endoftext|>
f566937724693c6717efb4217a1e0fa56ea79ecca8ee8f33c9eb18caca36bd41
def get_song_and_artist_name(self, df, song_id): 'Given a unique song identifier, returns a song and artist name.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about playlists and songs\n song_id : int\n Unique id of a song for which we need a name and an artist\n\n Returns\n -------\n song_name\n Name of a song\n artist_name\n Name of an artist performing a song\n ' song_df = df.drop(['pid', 'playlist'], axis=1).sort_values(by=['tid']).copy() song_df.drop_duplicates(subset=['tid'], inplace=True) song_name = song_df.loc[((song_df.tid == song_id), 'track_name')].item() artist_name = song_df.loc[((song_df.tid == song_id), 'artist_name')].item() return (song_name, artist_name)
Given a unique song identifier, returns a song and artist name. Parameters ---------- df : DataFrame Pandas dataframe containing full information about playlists and songs song_id : int Unique id of a song for which we need a name and an artist Returns ------- song_name Name of a song artist_name Name of an artist performing a song
recommender/Individual/ItemCF.py
get_song_and_artist_name
utkuarslan5/the_recommenders
1
python
def get_song_and_artist_name(self, df, song_id): 'Given a unique song identifier, returns a song and artist name.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about playlists and songs\n song_id : int\n Unique id of a song for which we need a name and an artist\n\n Returns\n -------\n song_name\n Name of a song\n artist_name\n Name of an artist performing a song\n ' song_df = df.drop(['pid', 'playlist'], axis=1).sort_values(by=['tid']).copy() song_df.drop_duplicates(subset=['tid'], inplace=True) song_name = song_df.loc[((song_df.tid == song_id), 'track_name')].item() artist_name = song_df.loc[((song_df.tid == song_id), 'artist_name')].item() return (song_name, artist_name)
def get_song_and_artist_name(self, df, song_id): 'Given a unique song identifier, returns a song and artist name.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about playlists and songs\n song_id : int\n Unique id of a song for which we need a name and an artist\n\n Returns\n -------\n song_name\n Name of a song\n artist_name\n Name of an artist performing a song\n ' song_df = df.drop(['pid', 'playlist'], axis=1).sort_values(by=['tid']).copy() song_df.drop_duplicates(subset=['tid'], inplace=True) song_name = song_df.loc[((song_df.tid == song_id), 'track_name')].item() artist_name = song_df.loc[((song_df.tid == song_id), 'artist_name')].item() return (song_name, artist_name)<|docstring|>Given a unique song identifier, returns a song and artist name. Parameters ---------- df : DataFrame Pandas dataframe containing full information about playlists and songs song_id : int Unique id of a song for which we need a name and an artist Returns ------- song_name Name of a song artist_name Name of an artist performing a song<|endoftext|>
613cc207e668ab16b3eb909d171018dddd4e8e784048ec85ac7b07dabdf0f78c
def get_playlist_name(self, df, playlist_id): 'Given a unique playlist identifier, returns a playlist name.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about playlists and songs\n playlist_id : int\n Unique id of a playlist for which we need a name\n\n Returns\n -------\n playlist\n Name of a playlist\n ' playlist_df = df.drop(['track_name', 'artist_name', 'album_name', 'tid'], axis=1).sort_values(by=['pid']).copy() playlist_df.drop_duplicates(subset=['pid'], inplace=True) playlist = playlist_df.loc[((playlist_df.pid == playlist_id), 'playlist')].item() return playlist
Given a unique playlist identifier, returns a playlist name. Parameters ---------- df : DataFrame Pandas dataframe containing full information about playlists and songs playlist_id : int Unique id of a playlist for which we need a name Returns ------- playlist Name of a playlist
recommender/Individual/ItemCF.py
get_playlist_name
utkuarslan5/the_recommenders
1
python
def get_playlist_name(self, df, playlist_id): 'Given a unique playlist identifier, returns a playlist name.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about playlists and songs\n playlist_id : int\n Unique id of a playlist for which we need a name\n\n Returns\n -------\n playlist\n Name of a playlist\n ' playlist_df = df.drop(['track_name', 'artist_name', 'album_name', 'tid'], axis=1).sort_values(by=['pid']).copy() playlist_df.drop_duplicates(subset=['pid'], inplace=True) playlist = playlist_df.loc[((playlist_df.pid == playlist_id), 'playlist')].item() return playlist
def get_playlist_name(self, df, playlist_id): 'Given a unique playlist identifier, returns a playlist name.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about playlists and songs\n playlist_id : int\n Unique id of a playlist for which we need a name\n\n Returns\n -------\n playlist\n Name of a playlist\n ' playlist_df = df.drop(['track_name', 'artist_name', 'album_name', 'tid'], axis=1).sort_values(by=['pid']).copy() playlist_df.drop_duplicates(subset=['pid'], inplace=True) playlist = playlist_df.loc[((playlist_df.pid == playlist_id), 'playlist')].item() return playlist<|docstring|>Given a unique playlist identifier, returns a playlist name. Parameters ---------- df : DataFrame Pandas dataframe containing full information about playlists and songs playlist_id : int Unique id of a playlist for which we need a name Returns ------- playlist Name of a playlist<|endoftext|>
3550cef252b611d314a5a3df523e2ebe0b19771f9915fbe1db5a19672c24e734
def get_song_recommendation(self, df, seed_track_id, similar_song_id): 'Given a dataframe, seed track identifier and an identifier of a similar song, it prints out a recommendation of\n a similar song based on its similarity to the seed track.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about playlists and songs\n seed_track_id : int\n Unique id of a playlist for which we need a name\n similar_song_id : int\n Unique id of a song to be recommended based on its similarity to the seed track\n ' (seed_track_name, seed_track_artist) = self.get_song_and_artist_name(df, seed_track_id) (song_name, artist_name) = self.get_song_and_artist_name(df, similar_song_id) print((((((((((('Suggested song for your playlist ' + self.get_playlist_name(df, self.target_playlist_id)) + ' is ') + song_name) + ' by ') + artist_name) + ' based on its similarity to ') + seed_track_name) + ' by ') + seed_track_artist) + '.')) print()
Given a dataframe, seed track identifier and an identifier of a similar song, it prints out a recommendation of a similar song based on its similarity to the seed track. Parameters ---------- df : DataFrame Pandas dataframe containing full information about playlists and songs seed_track_id : int Unique id of a playlist for which we need a name similar_song_id : int Unique id of a song to be recommended based on its similarity to the seed track
recommender/Individual/ItemCF.py
get_song_recommendation
utkuarslan5/the_recommenders
1
python
def get_song_recommendation(self, df, seed_track_id, similar_song_id): 'Given a dataframe, seed track identifier and an identifier of a similar song, it prints out a recommendation of\n a similar song based on its similarity to the seed track.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about playlists and songs\n seed_track_id : int\n Unique id of a playlist for which we need a name\n similar_song_id : int\n Unique id of a song to be recommended based on its similarity to the seed track\n ' (seed_track_name, seed_track_artist) = self.get_song_and_artist_name(df, seed_track_id) (song_name, artist_name) = self.get_song_and_artist_name(df, similar_song_id) print((((((((((('Suggested song for your playlist ' + self.get_playlist_name(df, self.target_playlist_id)) + ' is ') + song_name) + ' by ') + artist_name) + ' based on its similarity to ') + seed_track_name) + ' by ') + seed_track_artist) + '.')) print()
def get_song_recommendation(self, df, seed_track_id, similar_song_id): 'Given a dataframe, seed track identifier and an identifier of a similar song, it prints out a recommendation of\n a similar song based on its similarity to the seed track.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about playlists and songs\n seed_track_id : int\n Unique id of a playlist for which we need a name\n similar_song_id : int\n Unique id of a song to be recommended based on its similarity to the seed track\n ' (seed_track_name, seed_track_artist) = self.get_song_and_artist_name(df, seed_track_id) (song_name, artist_name) = self.get_song_and_artist_name(df, similar_song_id) print((((((((((('Suggested song for your playlist ' + self.get_playlist_name(df, self.target_playlist_id)) + ' is ') + song_name) + ' by ') + artist_name) + ' based on its similarity to ') + seed_track_name) + ' by ') + seed_track_artist) + '.')) print()<|docstring|>Given a dataframe, seed track identifier and an identifier of a similar song, it prints out a recommendation of a similar song based on its similarity to the seed track. Parameters ---------- df : DataFrame Pandas dataframe containing full information about playlists and songs seed_track_id : int Unique id of a playlist for which we need a name similar_song_id : int Unique id of a song to be recommended based on its similarity to the seed track<|endoftext|>
c6e2925faeee306bb863ef77120f9bae397509fcb1b5da5d4ed4ed4a45bba7af
def calculate_song_idf(self, df, song_id): 'Given a unique song identifier, calculate inverse document frequency using a formula\n log10(|P|/|Pt|) where P is the set of all playlists and Pt is the set of all playlists containing\n a given song.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing a binary playlist-song matrix\n playlist_id : int\n Unique id of a song for which we need to calculate idf\n\n Returns\n -------\n idf\n Inverse document frequency of a given song\n ' idf = np.log10((len(df.columns) / df.loc[(df.index == song_id)].sum(axis=1).item())) return idf
Given a unique song identifier, calculate inverse document frequency using a formula log10(|P|/|Pt|) where P is the set of all playlists and Pt is the set of all playlists containing a given song. Parameters ---------- df : DataFrame Pandas dataframe containing a binary playlist-song matrix playlist_id : int Unique id of a song for which we need to calculate idf Returns ------- idf Inverse document frequency of a given song
recommender/Individual/ItemCF.py
calculate_song_idf
utkuarslan5/the_recommenders
1
python
def calculate_song_idf(self, df, song_id): 'Given a unique song identifier, calculate inverse document frequency using a formula\n log10(|P|/|Pt|) where P is the set of all playlists and Pt is the set of all playlists containing\n a given song.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing a binary playlist-song matrix\n playlist_id : int\n Unique id of a song for which we need to calculate idf\n\n Returns\n -------\n idf\n Inverse document frequency of a given song\n ' idf = np.log10((len(df.columns) / df.loc[(df.index == song_id)].sum(axis=1).item())) return idf
def calculate_song_idf(self, df, song_id): 'Given a unique song identifier, calculate inverse document frequency using a formula\n log10(|P|/|Pt|) where P is the set of all playlists and Pt is the set of all playlists containing\n a given song.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing a binary playlist-song matrix\n playlist_id : int\n Unique id of a song for which we need to calculate idf\n\n Returns\n -------\n idf\n Inverse document frequency of a given song\n ' idf = np.log10((len(df.columns) / df.loc[(df.index == song_id)].sum(axis=1).item())) return idf<|docstring|>Given a unique song identifier, calculate inverse document frequency using a formula log10(|P|/|Pt|) where P is the set of all playlists and Pt is the set of all playlists containing a given song. Parameters ---------- df : DataFrame Pandas dataframe containing a binary playlist-song matrix playlist_id : int Unique id of a song for which we need to calculate idf Returns ------- idf Inverse document frequency of a given song<|endoftext|>
eccda200d070f0c5bfac56d5c9347ae49e19e887f0ecc0d37af097143e9819a6
def execute(self, df): 'Given a dataframe with information about songs and playlists, it executes an item-based collaborative filtering\n algorithm in order to create song recommendations for a target playlist.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about songs and playlists\n ' filtered_df = self.filter_by_playlist_length(df) pt_df = self.create_playlist_song_matrix(filtered_df) seed_ids = self.get_seed_tracks(df) (all_recs_id, all_distances) = ([], []) for seed_track_id in seed_ids: (sim_tracks_id, track_distances) = self.find_k_most_similar_songs(pt_df, seed_track_id) all_recs_id.extend(sim_tracks_id) all_distances.extend(track_distances) final_sim_scores = [(all_distances[i] * self.calculate_song_idf(pt_df, all_recs_id[i])) for i in range(0, len(all_recs_id))] final_recs = [all_recs_id for (_, all_recs_id) in sorted(zip(final_sim_scores, all_recs_id), reverse=True)] final_recs = list(set(final_recs)) for similar_song_id in final_recs: self.get_song_recommendation(filtered_df, seed_track_id, similar_song_id)
Given a dataframe with information about songs and playlists, it executes an item-based collaborative filtering algorithm in order to create song recommendations for a target playlist. Parameters ---------- df : DataFrame Pandas dataframe containing full information about songs and playlists
recommender/Individual/ItemCF.py
execute
utkuarslan5/the_recommenders
1
python
def execute(self, df): 'Given a dataframe with information about songs and playlists, it executes an item-based collaborative filtering\n algorithm in order to create song recommendations for a target playlist.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about songs and playlists\n ' filtered_df = self.filter_by_playlist_length(df) pt_df = self.create_playlist_song_matrix(filtered_df) seed_ids = self.get_seed_tracks(df) (all_recs_id, all_distances) = ([], []) for seed_track_id in seed_ids: (sim_tracks_id, track_distances) = self.find_k_most_similar_songs(pt_df, seed_track_id) all_recs_id.extend(sim_tracks_id) all_distances.extend(track_distances) final_sim_scores = [(all_distances[i] * self.calculate_song_idf(pt_df, all_recs_id[i])) for i in range(0, len(all_recs_id))] final_recs = [all_recs_id for (_, all_recs_id) in sorted(zip(final_sim_scores, all_recs_id), reverse=True)] final_recs = list(set(final_recs)) for similar_song_id in final_recs: self.get_song_recommendation(filtered_df, seed_track_id, similar_song_id)
def execute(self, df): 'Given a dataframe with information about songs and playlists, it executes an item-based collaborative filtering\n algorithm in order to create song recommendations for a target playlist.\n\n Parameters\n ----------\n df : DataFrame\n Pandas dataframe containing full information about songs and playlists\n ' filtered_df = self.filter_by_playlist_length(df) pt_df = self.create_playlist_song_matrix(filtered_df) seed_ids = self.get_seed_tracks(df) (all_recs_id, all_distances) = ([], []) for seed_track_id in seed_ids: (sim_tracks_id, track_distances) = self.find_k_most_similar_songs(pt_df, seed_track_id) all_recs_id.extend(sim_tracks_id) all_distances.extend(track_distances) final_sim_scores = [(all_distances[i] * self.calculate_song_idf(pt_df, all_recs_id[i])) for i in range(0, len(all_recs_id))] final_recs = [all_recs_id for (_, all_recs_id) in sorted(zip(final_sim_scores, all_recs_id), reverse=True)] final_recs = list(set(final_recs)) for similar_song_id in final_recs: self.get_song_recommendation(filtered_df, seed_track_id, similar_song_id)<|docstring|>Given a dataframe with information about songs and playlists, it executes an item-based collaborative filtering algorithm in order to create song recommendations for a target playlist. Parameters ---------- df : DataFrame Pandas dataframe containing full information about songs and playlists<|endoftext|>
7029ef9d02d15c8da5bd6c49054fb0102418357fb67fcfed5ac8267e3938870d
def compile(self): 'Returns the name of a local temporary if it created one to\n store its most recent result. Otherwise returns None.\n\n ' self.emit(PRELUDE) main = self.program.find_production(Prodref('main', 'main')) if (not main): raise ValueError("no 'main:main' production defined") for module in self.program.modlist: mod_name = module.name for prod in module.prodlist: self.emit(('void prod_%s_%s(%s);' % (mod_name, prod.name, ', '.join(['const struct term *' for f in prod.branches[0].formals])))) self.emit('') for module in self.program.modlist: self.currmod = module mod_name = module.name for prod in module.prodlist: self.current_prod = prod self.compile_r(prod) self.current_prod = None self.currmod = None self.emit(POSTLUDE)
Returns the name of a local temporary if it created one to store its most recent result. Otherwise returns None.
src/tamsin/compiler.py
compile
catseye/Tamsin
11
python
def compile(self): 'Returns the name of a local temporary if it created one to\n store its most recent result. Otherwise returns None.\n\n ' self.emit(PRELUDE) main = self.program.find_production(Prodref('main', 'main')) if (not main): raise ValueError("no 'main:main' production defined") for module in self.program.modlist: mod_name = module.name for prod in module.prodlist: self.emit(('void prod_%s_%s(%s);' % (mod_name, prod.name, ', '.join(['const struct term *' for f in prod.branches[0].formals])))) self.emit() for module in self.program.modlist: self.currmod = module mod_name = module.name for prod in module.prodlist: self.current_prod = prod self.compile_r(prod) self.current_prod = None self.currmod = None self.emit(POSTLUDE)
def compile(self): 'Returns the name of a local temporary if it created one to\n store its most recent result. Otherwise returns None.\n\n ' self.emit(PRELUDE) main = self.program.find_production(Prodref('main', 'main')) if (not main): raise ValueError("no 'main:main' production defined") for module in self.program.modlist: mod_name = module.name for prod in module.prodlist: self.emit(('void prod_%s_%s(%s);' % (mod_name, prod.name, ', '.join(['const struct term *' for f in prod.branches[0].formals])))) self.emit() for module in self.program.modlist: self.currmod = module mod_name = module.name for prod in module.prodlist: self.current_prod = prod self.compile_r(prod) self.current_prod = None self.currmod = None self.emit(POSTLUDE)<|docstring|>Returns the name of a local temporary if it created one to store its most recent result. Otherwise returns None.<|endoftext|>
07213269dccbf41c2c4b95b993b604d3a2b3969ca277d8f52d13aa9c305f2935
def emit_lvalue(self, ast): 'Does not actually emit anything. (Yet.)' if isinstance(ast, TermNode): return self.emit_lvalue(ast.to_term()) elif isinstance(ast, Variable): return ast.name else: raise NotImplementedError(repr(ast))
Does not actually emit anything. (Yet.)
src/tamsin/compiler.py
emit_lvalue
catseye/Tamsin
11
python
def emit_lvalue(self, ast): if isinstance(ast, TermNode): return self.emit_lvalue(ast.to_term()) elif isinstance(ast, Variable): return ast.name else: raise NotImplementedError(repr(ast))
def emit_lvalue(self, ast): if isinstance(ast, TermNode): return self.emit_lvalue(ast.to_term()) elif isinstance(ast, Variable): return ast.name else: raise NotImplementedError(repr(ast))<|docstring|>Does not actually emit anything. (Yet.)<|endoftext|>
99624974d191c3b9045e74e9a98c4975d0bba54d9b297153e49fc0b7d370ed75
def model_predict(img_path): '\n model_predict will return the preprocessed image\n ' img = open_image(img_path) (pred_class, pred_idx, outputs) = learn.predict(img) return pred_class
model_predict will return the preprocessed image
app.py
model_predict
jamesdiplomat/Water-classifier-fastai
0
python
def model_predict(img_path): '\n \n ' img = open_image(img_path) (pred_class, pred_idx, outputs) = learn.predict(img) return pred_class
def model_predict(img_path): '\n \n ' img = open_image(img_path) (pred_class, pred_idx, outputs) = learn.predict(img) return pred_class<|docstring|>model_predict will return the preprocessed image<|endoftext|>
0d8c42d6151650c5d0dbaa1d87e121e346281379ffbc39395a76d626ecec16a5
def composite(vol, cmr): " Ranks securities in a composite fashion.\n\n Parameters:\n - `vol` : :class:`dict` volatility portfolio.\n - `cmr` : :class:`dict` momentum portfolio.\n\n .. note::\n at this point, the same tickers are present in both portfolios. Their\n ranking only is different.\n\n The function builds a :class:`dict` with the tickers and set their score\n to zero; sample {'ticker': 0}. Then it adds to the ticker score their index\n in volatility and momentum portfolio.\n\n The tickers are then sorted ascendingly, after having been transformed into\n a :class:`tuple`.\n\n Returns a :class:`dict` containing tickers and their score.\n\n " vector = {} v_sort = [] composite = {} for item in vol.keys(): vector[item] = 0 for (i, j) in enumerate(vol.keys()): vector[j] += i for (i, j) in enumerate(cmr.keys()): vector[j] += i for item in vector.keys(): v_sort.append((item, vector[item])) v_sort.sort(key=(lambda x: x[1])) for item in v_sort: composite[item[0]] = item[1] return composite
Ranks securities in a composite fashion. Parameters: - `vol` : :class:`dict` volatility portfolio. - `cmr` : :class:`dict` momentum portfolio. .. note:: at this point, the same tickers are present in both portfolios. Their ranking only is different. The function builds a :class:`dict` with the tickers and set their score to zero; sample {'ticker': 0}. Then it adds to the ticker score their index in volatility and momentum portfolio. The tickers are then sorted ascendingly, after having been transformed into a :class:`tuple`. Returns a :class:`dict` containing tickers and their score.
smartbetas/composite.py
composite
epfeff/smartbetas
3
python
def composite(vol, cmr): " Ranks securities in a composite fashion.\n\n Parameters:\n - `vol` : :class:`dict` volatility portfolio.\n - `cmr` : :class:`dict` momentum portfolio.\n\n .. note::\n at this point, the same tickers are present in both portfolios. Their\n ranking only is different.\n\n The function builds a :class:`dict` with the tickers and set their score\n to zero; sample {'ticker': 0}. Then it adds to the ticker score their index\n in volatility and momentum portfolio.\n\n The tickers are then sorted ascendingly, after having been transformed into\n a :class:`tuple`.\n\n Returns a :class:`dict` containing tickers and their score.\n\n " vector = {} v_sort = [] composite = {} for item in vol.keys(): vector[item] = 0 for (i, j) in enumerate(vol.keys()): vector[j] += i for (i, j) in enumerate(cmr.keys()): vector[j] += i for item in vector.keys(): v_sort.append((item, vector[item])) v_sort.sort(key=(lambda x: x[1])) for item in v_sort: composite[item[0]] = item[1] return composite
def composite(vol, cmr): " Ranks securities in a composite fashion.\n\n Parameters:\n - `vol` : :class:`dict` volatility portfolio.\n - `cmr` : :class:`dict` momentum portfolio.\n\n .. note::\n at this point, the same tickers are present in both portfolios. Their\n ranking only is different.\n\n The function builds a :class:`dict` with the tickers and set their score\n to zero; sample {'ticker': 0}. Then it adds to the ticker score their index\n in volatility and momentum portfolio.\n\n The tickers are then sorted ascendingly, after having been transformed into\n a :class:`tuple`.\n\n Returns a :class:`dict` containing tickers and their score.\n\n " vector = {} v_sort = [] composite = {} for item in vol.keys(): vector[item] = 0 for (i, j) in enumerate(vol.keys()): vector[j] += i for (i, j) in enumerate(cmr.keys()): vector[j] += i for item in vector.keys(): v_sort.append((item, vector[item])) v_sort.sort(key=(lambda x: x[1])) for item in v_sort: composite[item[0]] = item[1] return composite<|docstring|>Ranks securities in a composite fashion. Parameters: - `vol` : :class:`dict` volatility portfolio. - `cmr` : :class:`dict` momentum portfolio. .. note:: at this point, the same tickers are present in both portfolios. Their ranking only is different. The function builds a :class:`dict` with the tickers and set their score to zero; sample {'ticker': 0}. Then it adds to the ticker score their index in volatility and momentum portfolio. The tickers are then sorted ascendingly, after having been transformed into a :class:`tuple`. Returns a :class:`dict` containing tickers and their score.<|endoftext|>
06d567fa9393db6636e416f1e9f52fb409c73f2d3fdcff8c06758b241b700720
def __init__(self, desc: Callable=None, log_interval: Optional[int]=10): '\n Creates a smart progress bar (tqdm if in notebooks, text if in terminal).\n Use `alr.training.progress_bar.use_tqdm = True/False` to force TQDM (or force disable it).\n\n Args:\n desc (Callable, optional): takes an engine as input and returns a string\n log_interval (int, optional): log every `log_interval` iterations\n ' self.log_interval = log_interval self.desc = desc self.progress_bar = None
Creates a smart progress bar (tqdm if in notebooks, text if in terminal). Use `alr.training.progress_bar.use_tqdm = True/False` to force TQDM (or force disable it). Args: desc (Callable, optional): takes an engine as input and returns a string log_interval (int, optional): log every `log_interval` iterations
alr/training/progress_bar/ignite_progress_bar.py
__init__
jiahfong/alr
2
python
def __init__(self, desc: Callable=None, log_interval: Optional[int]=10): '\n Creates a smart progress bar (tqdm if in notebooks, text if in terminal).\n Use `alr.training.progress_bar.use_tqdm = True/False` to force TQDM (or force disable it).\n\n Args:\n desc (Callable, optional): takes an engine as input and returns a string\n log_interval (int, optional): log every `log_interval` iterations\n ' self.log_interval = log_interval self.desc = desc self.progress_bar = None
def __init__(self, desc: Callable=None, log_interval: Optional[int]=10): '\n Creates a smart progress bar (tqdm if in notebooks, text if in terminal).\n Use `alr.training.progress_bar.use_tqdm = True/False` to force TQDM (or force disable it).\n\n Args:\n desc (Callable, optional): takes an engine as input and returns a string\n log_interval (int, optional): log every `log_interval` iterations\n ' self.log_interval = log_interval self.desc = desc self.progress_bar = None<|docstring|>Creates a smart progress bar (tqdm if in notebooks, text if in terminal). Use `alr.training.progress_bar.use_tqdm = True/False` to force TQDM (or force disable it). Args: desc (Callable, optional): takes an engine as input and returns a string log_interval (int, optional): log every `log_interval` iterations<|endoftext|>
adcfe21d31ed8379f5feecb5da82fb4aa3f87fd48266d4c679a1c0c64eb8faa7
def __unicode__(): 'Returns the URL as a unicode string.'
Returns the URL as a unicode string.
venv/lib/python3.7/site-packages/zope/traversing/browser/interfaces.py
__unicode__
leanhvu86/matrix-server
0
python
def __unicode__():
def __unicode__(): <|docstring|>Returns the URL as a unicode string.<|endoftext|>
8ad24545948c8b027fe0ecda45e748af65649f0999606eccab3aef8282986c0a
def __str__(): 'Returns an ASCII string with all unicode characters url quoted.'
Returns an ASCII string with all unicode characters url quoted.
venv/lib/python3.7/site-packages/zope/traversing/browser/interfaces.py
__str__
leanhvu86/matrix-server
0
python
def __str__():
def __str__(): <|docstring|>Returns an ASCII string with all unicode characters url quoted.<|endoftext|>
6bfdb02eb912aa955ff78331978615dac7cb1dd5176ff9d3318e0bf6f66c3608
def __repr__(): 'Get a string representation '
Get a string representation
venv/lib/python3.7/site-packages/zope/traversing/browser/interfaces.py
__repr__
leanhvu86/matrix-server
0
python
def __repr__(): ' '
def __repr__(): ' '<|docstring|>Get a string representation<|endoftext|>
7392c2e047308f3d6a05cc6b6e7515d8e642bfaea63176091e272972e0f44696
def __call__(): 'Returns an ASCII string with all unicode characters url quoted.'
Returns an ASCII string with all unicode characters url quoted.
venv/lib/python3.7/site-packages/zope/traversing/browser/interfaces.py
__call__
leanhvu86/matrix-server
0
python
def __call__():
def __call__(): <|docstring|>Returns an ASCII string with all unicode characters url quoted.<|endoftext|>
c3822c4c513c36785f1c8e6ef789d1ce1d93d972e6589ddfaf292313cca1039d
def breadcrumbs(): "Returns a tuple like ({'name':name, 'url':url}, ...)\n\n Name is the name to display for that segment of the breadcrumbs.\n URL is the link for that segment of the breadcrumbs.\n "
Returns a tuple like ({'name':name, 'url':url}, ...) Name is the name to display for that segment of the breadcrumbs. URL is the link for that segment of the breadcrumbs.
venv/lib/python3.7/site-packages/zope/traversing/browser/interfaces.py
breadcrumbs
leanhvu86/matrix-server
0
python
def breadcrumbs(): "Returns a tuple like ({'name':name, 'url':url}, ...)\n\n Name is the name to display for that segment of the breadcrumbs.\n URL is the link for that segment of the breadcrumbs.\n "
def breadcrumbs(): "Returns a tuple like ({'name':name, 'url':url}, ...)\n\n Name is the name to display for that segment of the breadcrumbs.\n URL is the link for that segment of the breadcrumbs.\n "<|docstring|>Returns a tuple like ({'name':name, 'url':url}, ...) Name is the name to display for that segment of the breadcrumbs. URL is the link for that segment of the breadcrumbs.<|endoftext|>
018e8147b5886eb963fa0f3022b22fd3305b32b8b508c9045df90fba377d672f
def absoluteURL(ob, request): '\n Compute the absolute URL of an object.\n\n This should return an ASCII string by looking up an adapter\n from `(ob, request)` to :class:`IAbsoluteURL` and then calling it.\n '
Compute the absolute URL of an object. This should return an ASCII string by looking up an adapter from `(ob, request)` to :class:`IAbsoluteURL` and then calling it.
venv/lib/python3.7/site-packages/zope/traversing/browser/interfaces.py
absoluteURL
leanhvu86/matrix-server
0
python
def absoluteURL(ob, request): '\n Compute the absolute URL of an object.\n\n This should return an ASCII string by looking up an adapter\n from `(ob, request)` to :class:`IAbsoluteURL` and then calling it.\n '
def absoluteURL(ob, request): '\n Compute the absolute URL of an object.\n\n This should return an ASCII string by looking up an adapter\n from `(ob, request)` to :class:`IAbsoluteURL` and then calling it.\n '<|docstring|>Compute the absolute URL of an object. This should return an ASCII string by looking up an adapter from `(ob, request)` to :class:`IAbsoluteURL` and then calling it.<|endoftext|>
734c3f682e7d4ac89b972631dd225e4de277515ebfdf9665ccb3a2cf4b6af9d2
def convert_to_excel_file(csv_content, goodgrids_api_url): '\n Converts a CSV file into an Excel file\n :param csv_content: a string containing the contents of a CSV file\n :param goodgrids_api_url: the GoodGrids API URL for the Excel export configuration to use\n :return: an Excel file, as bytes\n ' verify_ssl = True if (settings.DEBUG or getattr(settings, 'TEST', False)): verify_ssl = False response = requests.post(url=goodgrids_api_url, files={'file': BytesIO(csv_content)}, verify=verify_ssl) if (response.status_code != 200): raise RuntimeError('Could not create Excel file from CSV file') return response.content
Converts a CSV file into an Excel file :param csv_content: a string containing the contents of a CSV file :param goodgrids_api_url: the GoodGrids API URL for the Excel export configuration to use :return: an Excel file, as bytes
django_goodgrids/goodgrids.py
convert_to_excel_file
GoodGrids/django-goodgrids
1
python
def convert_to_excel_file(csv_content, goodgrids_api_url): '\n Converts a CSV file into an Excel file\n :param csv_content: a string containing the contents of a CSV file\n :param goodgrids_api_url: the GoodGrids API URL for the Excel export configuration to use\n :return: an Excel file, as bytes\n ' verify_ssl = True if (settings.DEBUG or getattr(settings, 'TEST', False)): verify_ssl = False response = requests.post(url=goodgrids_api_url, files={'file': BytesIO(csv_content)}, verify=verify_ssl) if (response.status_code != 200): raise RuntimeError('Could not create Excel file from CSV file') return response.content
def convert_to_excel_file(csv_content, goodgrids_api_url): '\n Converts a CSV file into an Excel file\n :param csv_content: a string containing the contents of a CSV file\n :param goodgrids_api_url: the GoodGrids API URL for the Excel export configuration to use\n :return: an Excel file, as bytes\n ' verify_ssl = True if (settings.DEBUG or getattr(settings, 'TEST', False)): verify_ssl = False response = requests.post(url=goodgrids_api_url, files={'file': BytesIO(csv_content)}, verify=verify_ssl) if (response.status_code != 200): raise RuntimeError('Could not create Excel file from CSV file') return response.content<|docstring|>Converts a CSV file into an Excel file :param csv_content: a string containing the contents of a CSV file :param goodgrids_api_url: the GoodGrids API URL for the Excel export configuration to use :return: an Excel file, as bytes<|endoftext|>
61d177175a87e19fe94c9ba8f1f976fd145a79cf23b50a02cc9d3aae7dd83c6d
def __init__(self, input_chord_pitch_type: PitchType, input_key_pitch_type: PitchType, output_pitch_type: PitchType, input_reduction: Dict[(ChordType, ChordType)], output_reduction: Dict[(ChordType, ChordType)], use_input_inversions: bool, use_output_inversions: bool, learning_rate: float): '\n Create a new base KeySequenceModel with the given output and input data types.\n\n Parameters\n ----------\n input_chord_pitch_type : PitchType\n The type of pitch representation for the input chord root pitches.\n input_key_pitch_type : PitchType\n The type of pitch representation for the input key change vectors.\n output_pitch_type : PitchType\n The type of pitch representation for the target chord root pitches.\n input_reduction : Dict[ChordType, ChordType]\n The reduction used for input vector chord types.\n output_reduction : Dict[ChordType, ChordType]\n The reduction used for output vector chord types.\n use_input_inversions : bool\n True to take inversions into account for the input. False to ignore them.\n use_output_inversions : bool\n True to include inversions of chords in the output of this model.\n False to ignore them.\n learning_rate : float\n The learning rate.\n ' super().__init__() self.INPUT_CHORD_PITCH_TYPE = input_chord_pitch_type self.INPUT_KEY_PITCH_TYPE = input_key_pitch_type self.OUTPUT_PITCH_TYPE = output_pitch_type self.input_reduction = input_reduction self.output_reduction = output_reduction self.use_input_inversions = use_input_inversions self.use_output_inversions = use_output_inversions self.lr = learning_rate
Create a new base KeySequenceModel with the given output and input data types. Parameters ---------- input_chord_pitch_type : PitchType The type of pitch representation for the input chord root pitches. input_key_pitch_type : PitchType The type of pitch representation for the input key change vectors. output_pitch_type : PitchType The type of pitch representation for the target chord root pitches. input_reduction : Dict[ChordType, ChordType] The reduction used for input vector chord types. output_reduction : Dict[ChordType, ChordType] The reduction used for output vector chord types. use_input_inversions : bool True to take inversions into account for the input. False to ignore them. use_output_inversions : bool True to include inversions of chords in the output of this model. False to ignore them. learning_rate : float The learning rate.
harmonic_inference/models/chord_sequence_models.py
__init__
apmcleod/harmonic-inference
0
python
def __init__(self, input_chord_pitch_type: PitchType, input_key_pitch_type: PitchType, output_pitch_type: PitchType, input_reduction: Dict[(ChordType, ChordType)], output_reduction: Dict[(ChordType, ChordType)], use_input_inversions: bool, use_output_inversions: bool, learning_rate: float): '\n Create a new base KeySequenceModel with the given output and input data types.\n\n Parameters\n ----------\n input_chord_pitch_type : PitchType\n The type of pitch representation for the input chord root pitches.\n input_key_pitch_type : PitchType\n The type of pitch representation for the input key change vectors.\n output_pitch_type : PitchType\n The type of pitch representation for the target chord root pitches.\n input_reduction : Dict[ChordType, ChordType]\n The reduction used for input vector chord types.\n output_reduction : Dict[ChordType, ChordType]\n The reduction used for output vector chord types.\n use_input_inversions : bool\n True to take inversions into account for the input. False to ignore them.\n use_output_inversions : bool\n True to include inversions of chords in the output of this model.\n False to ignore them.\n learning_rate : float\n The learning rate.\n ' super().__init__() self.INPUT_CHORD_PITCH_TYPE = input_chord_pitch_type self.INPUT_KEY_PITCH_TYPE = input_key_pitch_type self.OUTPUT_PITCH_TYPE = output_pitch_type self.input_reduction = input_reduction self.output_reduction = output_reduction self.use_input_inversions = use_input_inversions self.use_output_inversions = use_output_inversions self.lr = learning_rate
def __init__(self, input_chord_pitch_type: PitchType, input_key_pitch_type: PitchType, output_pitch_type: PitchType, input_reduction: Dict[(ChordType, ChordType)], output_reduction: Dict[(ChordType, ChordType)], use_input_inversions: bool, use_output_inversions: bool, learning_rate: float): '\n Create a new base KeySequenceModel with the given output and input data types.\n\n Parameters\n ----------\n input_chord_pitch_type : PitchType\n The type of pitch representation for the input chord root pitches.\n input_key_pitch_type : PitchType\n The type of pitch representation for the input key change vectors.\n output_pitch_type : PitchType\n The type of pitch representation for the target chord root pitches.\n input_reduction : Dict[ChordType, ChordType]\n The reduction used for input vector chord types.\n output_reduction : Dict[ChordType, ChordType]\n The reduction used for output vector chord types.\n use_input_inversions : bool\n True to take inversions into account for the input. False to ignore them.\n use_output_inversions : bool\n True to include inversions of chords in the output of this model.\n False to ignore them.\n learning_rate : float\n The learning rate.\n ' super().__init__() self.INPUT_CHORD_PITCH_TYPE = input_chord_pitch_type self.INPUT_KEY_PITCH_TYPE = input_key_pitch_type self.OUTPUT_PITCH_TYPE = output_pitch_type self.input_reduction = input_reduction self.output_reduction = output_reduction self.use_input_inversions = use_input_inversions self.use_output_inversions = use_output_inversions self.lr = learning_rate<|docstring|>Create a new base KeySequenceModel with the given output and input data types. Parameters ---------- input_chord_pitch_type : PitchType The type of pitch representation for the input chord root pitches. input_key_pitch_type : PitchType The type of pitch representation for the input key change vectors. output_pitch_type : PitchType The type of pitch representation for the target chord root pitches. input_reduction : Dict[ChordType, ChordType] The reduction used for input vector chord types. output_reduction : Dict[ChordType, ChordType] The reduction used for output vector chord types. use_input_inversions : bool True to take inversions into account for the input. False to ignore them. use_output_inversions : bool True to include inversions of chords in the output of this model. False to ignore them. learning_rate : float The learning rate.<|endoftext|>
c2de4d4489684a07723e1321fcc1d341345fcbb9b24ed0759dd68eeec9ae5b2d
def get_dataset_kwargs(self) -> Dict[(str, Any)]: '\n Get a kwargs dict that can be used to create a dataset for this model with\n the correct parameters.\n\n Returns\n -------\n dataset_kwargs : Dict[str, Any]\n A keyword args dict that can be used to create a dataset for this model with\n the correct parameters.\n ' return {'input_reduction': self.input_reduction, 'output_reduction': self.output_reduction, 'use_inversions_input': self.use_input_inversions, 'use_inversions_output': self.use_output_inversions, 'pitch_based': False}
Get a kwargs dict that can be used to create a dataset for this model with the correct parameters. Returns ------- dataset_kwargs : Dict[str, Any] A keyword args dict that can be used to create a dataset for this model with the correct parameters.
harmonic_inference/models/chord_sequence_models.py
get_dataset_kwargs
apmcleod/harmonic-inference
0
python
def get_dataset_kwargs(self) -> Dict[(str, Any)]: '\n Get a kwargs dict that can be used to create a dataset for this model with\n the correct parameters.\n\n Returns\n -------\n dataset_kwargs : Dict[str, Any]\n A keyword args dict that can be used to create a dataset for this model with\n the correct parameters.\n ' return {'input_reduction': self.input_reduction, 'output_reduction': self.output_reduction, 'use_inversions_input': self.use_input_inversions, 'use_inversions_output': self.use_output_inversions, 'pitch_based': False}
def get_dataset_kwargs(self) -> Dict[(str, Any)]: '\n Get a kwargs dict that can be used to create a dataset for this model with\n the correct parameters.\n\n Returns\n -------\n dataset_kwargs : Dict[str, Any]\n A keyword args dict that can be used to create a dataset for this model with\n the correct parameters.\n ' return {'input_reduction': self.input_reduction, 'output_reduction': self.output_reduction, 'use_inversions_input': self.use_input_inversions, 'use_inversions_output': self.use_output_inversions, 'pitch_based': False}<|docstring|>Get a kwargs dict that can be used to create a dataset for this model with the correct parameters. Returns ------- dataset_kwargs : Dict[str, Any] A keyword args dict that can be used to create a dataset for this model with the correct parameters.<|endoftext|>
cda38b2381197d55ff87b0ce07e29497d5363275c9d8261a5bef00e0e4128b86
@abstractmethod def init_hidden(self, batch_size: int) -> Tuple[(Variable, ...)]: '\n Get initial hidden layers for this model.\n\n Parameters\n ----------\n batch_size : int\n The batch size to initialize hidden layers for.\n\n Returns\n -------\n hidden : Tuple[Variable, ...]\n A tuple of initialized hidden layers.\n ' raise NotImplementedError()
Get initial hidden layers for this model. Parameters ---------- batch_size : int The batch size to initialize hidden layers for. Returns ------- hidden : Tuple[Variable, ...] A tuple of initialized hidden layers.
harmonic_inference/models/chord_sequence_models.py
init_hidden
apmcleod/harmonic-inference
0
python
@abstractmethod def init_hidden(self, batch_size: int) -> Tuple[(Variable, ...)]: '\n Get initial hidden layers for this model.\n\n Parameters\n ----------\n batch_size : int\n The batch size to initialize hidden layers for.\n\n Returns\n -------\n hidden : Tuple[Variable, ...]\n A tuple of initialized hidden layers.\n ' raise NotImplementedError()
@abstractmethod def init_hidden(self, batch_size: int) -> Tuple[(Variable, ...)]: '\n Get initial hidden layers for this model.\n\n Parameters\n ----------\n batch_size : int\n The batch size to initialize hidden layers for.\n\n Returns\n -------\n hidden : Tuple[Variable, ...]\n A tuple of initialized hidden layers.\n ' raise NotImplementedError()<|docstring|>Get initial hidden layers for this model. Parameters ---------- batch_size : int The batch size to initialize hidden layers for. Returns ------- hidden : Tuple[Variable, ...] A tuple of initialized hidden layers.<|endoftext|>
cd991622606c06e155197e6d7d6195ee8a8c423c26cef93f1d45415ea596da3d
def __init__(self, input_chord_pitch_type: PitchType, input_key_pitch_type: PitchType, output_pitch_type: PitchType, input_reduction: Dict[(ChordType, ChordType)]=None, output_reduction: Dict[(ChordType, ChordType)]=None, use_input_inversions: bool=True, use_output_inversions: bool=True, embed_dim: int=64, lstm_layers: int=1, lstm_hidden_dim: int=128, hidden_dim: int=256, dropout: float=0.0, learning_rate: float=0.001): "\n Create a new simple chord sequence model.\n\n Parameters\n ----------\n input_chord_pitch_type : PitchType\n The type of pitch representation for the input chord root pitches.\n input_key_pitch_type : PitchType\n The type of pitch representation for the input key change vectors.\n output_pitch_type : PitchType\n The type of pitch representation for the target chord root pitches.\n input_reduction : Dict[ChordType, ChordType]\n The reduction used for input vector chord types.\n output_reduction : Dict[ChordType, ChordType]\n The reduction used for output vector chord types.\n use_input_inversions : bool\n True to take inversions into account for the input. False to ignore them.\n use_output_inversions : bool\n True to include inversions of chords in the output of this model.\n False to ignore them.\n embed_dim : int\n The size of the input embedding.\n lstm_layers : int\n The number of bi-directional LSTM layers.\n lstm_hidden_dim : int\n The size of the LSTM's hidden dimension.\n hidden_dim : int\n The size of the hidden dimension between the 2 consecutive linear layers.\n dropout : float\n The dropout proportion.\n learning_rate : float\n The learning rate.\n " super().__init__(input_chord_pitch_type, input_key_pitch_type, output_pitch_type, input_reduction, output_reduction, use_input_inversions, use_output_inversions, learning_rate) self.save_hyperparameters() self.input_dim = ((get_chord_vector_length(input_chord_pitch_type, one_hot=False, relative=True, use_inversions=use_input_inversions, pad=False, reduction=input_reduction) + get_key_change_vector_length(input_key_pitch_type, one_hot=False)) + 1) self.output_dim = get_chord_vector_length(output_pitch_type, one_hot=True, relative=True, use_inversions=use_output_inversions, pad=False, reduction=output_reduction) self.embed_dim = embed_dim self.embed = nn.Linear(self.input_dim, self.embed_dim) self.lstm_hidden_dim = lstm_hidden_dim self.lstm_layers = lstm_layers self.lstm = nn.LSTM(self.embed_dim, self.lstm_hidden_dim, num_layers=self.lstm_layers, bidirectional=False, batch_first=True) self.hidden_dim = hidden_dim self.dropout = dropout self.fc1 = nn.Linear(self.lstm_hidden_dim, self.hidden_dim) self.fc2 = nn.Linear(self.hidden_dim, self.output_dim) self.dropout1 = nn.Dropout(self.dropout)
Create a new simple chord sequence model. Parameters ---------- input_chord_pitch_type : PitchType The type of pitch representation for the input chord root pitches. input_key_pitch_type : PitchType The type of pitch representation for the input key change vectors. output_pitch_type : PitchType The type of pitch representation for the target chord root pitches. input_reduction : Dict[ChordType, ChordType] The reduction used for input vector chord types. output_reduction : Dict[ChordType, ChordType] The reduction used for output vector chord types. use_input_inversions : bool True to take inversions into account for the input. False to ignore them. use_output_inversions : bool True to include inversions of chords in the output of this model. False to ignore them. embed_dim : int The size of the input embedding. lstm_layers : int The number of bi-directional LSTM layers. lstm_hidden_dim : int The size of the LSTM's hidden dimension. hidden_dim : int The size of the hidden dimension between the 2 consecutive linear layers. dropout : float The dropout proportion. learning_rate : float The learning rate.
harmonic_inference/models/chord_sequence_models.py
__init__
apmcleod/harmonic-inference
0
python
def __init__(self, input_chord_pitch_type: PitchType, input_key_pitch_type: PitchType, output_pitch_type: PitchType, input_reduction: Dict[(ChordType, ChordType)]=None, output_reduction: Dict[(ChordType, ChordType)]=None, use_input_inversions: bool=True, use_output_inversions: bool=True, embed_dim: int=64, lstm_layers: int=1, lstm_hidden_dim: int=128, hidden_dim: int=256, dropout: float=0.0, learning_rate: float=0.001): "\n Create a new simple chord sequence model.\n\n Parameters\n ----------\n input_chord_pitch_type : PitchType\n The type of pitch representation for the input chord root pitches.\n input_key_pitch_type : PitchType\n The type of pitch representation for the input key change vectors.\n output_pitch_type : PitchType\n The type of pitch representation for the target chord root pitches.\n input_reduction : Dict[ChordType, ChordType]\n The reduction used for input vector chord types.\n output_reduction : Dict[ChordType, ChordType]\n The reduction used for output vector chord types.\n use_input_inversions : bool\n True to take inversions into account for the input. False to ignore them.\n use_output_inversions : bool\n True to include inversions of chords in the output of this model.\n False to ignore them.\n embed_dim : int\n The size of the input embedding.\n lstm_layers : int\n The number of bi-directional LSTM layers.\n lstm_hidden_dim : int\n The size of the LSTM's hidden dimension.\n hidden_dim : int\n The size of the hidden dimension between the 2 consecutive linear layers.\n dropout : float\n The dropout proportion.\n learning_rate : float\n The learning rate.\n " super().__init__(input_chord_pitch_type, input_key_pitch_type, output_pitch_type, input_reduction, output_reduction, use_input_inversions, use_output_inversions, learning_rate) self.save_hyperparameters() self.input_dim = ((get_chord_vector_length(input_chord_pitch_type, one_hot=False, relative=True, use_inversions=use_input_inversions, pad=False, reduction=input_reduction) + get_key_change_vector_length(input_key_pitch_type, one_hot=False)) + 1) self.output_dim = get_chord_vector_length(output_pitch_type, one_hot=True, relative=True, use_inversions=use_output_inversions, pad=False, reduction=output_reduction) self.embed_dim = embed_dim self.embed = nn.Linear(self.input_dim, self.embed_dim) self.lstm_hidden_dim = lstm_hidden_dim self.lstm_layers = lstm_layers self.lstm = nn.LSTM(self.embed_dim, self.lstm_hidden_dim, num_layers=self.lstm_layers, bidirectional=False, batch_first=True) self.hidden_dim = hidden_dim self.dropout = dropout self.fc1 = nn.Linear(self.lstm_hidden_dim, self.hidden_dim) self.fc2 = nn.Linear(self.hidden_dim, self.output_dim) self.dropout1 = nn.Dropout(self.dropout)
def __init__(self, input_chord_pitch_type: PitchType, input_key_pitch_type: PitchType, output_pitch_type: PitchType, input_reduction: Dict[(ChordType, ChordType)]=None, output_reduction: Dict[(ChordType, ChordType)]=None, use_input_inversions: bool=True, use_output_inversions: bool=True, embed_dim: int=64, lstm_layers: int=1, lstm_hidden_dim: int=128, hidden_dim: int=256, dropout: float=0.0, learning_rate: float=0.001): "\n Create a new simple chord sequence model.\n\n Parameters\n ----------\n input_chord_pitch_type : PitchType\n The type of pitch representation for the input chord root pitches.\n input_key_pitch_type : PitchType\n The type of pitch representation for the input key change vectors.\n output_pitch_type : PitchType\n The type of pitch representation for the target chord root pitches.\n input_reduction : Dict[ChordType, ChordType]\n The reduction used for input vector chord types.\n output_reduction : Dict[ChordType, ChordType]\n The reduction used for output vector chord types.\n use_input_inversions : bool\n True to take inversions into account for the input. False to ignore them.\n use_output_inversions : bool\n True to include inversions of chords in the output of this model.\n False to ignore them.\n embed_dim : int\n The size of the input embedding.\n lstm_layers : int\n The number of bi-directional LSTM layers.\n lstm_hidden_dim : int\n The size of the LSTM's hidden dimension.\n hidden_dim : int\n The size of the hidden dimension between the 2 consecutive linear layers.\n dropout : float\n The dropout proportion.\n learning_rate : float\n The learning rate.\n " super().__init__(input_chord_pitch_type, input_key_pitch_type, output_pitch_type, input_reduction, output_reduction, use_input_inversions, use_output_inversions, learning_rate) self.save_hyperparameters() self.input_dim = ((get_chord_vector_length(input_chord_pitch_type, one_hot=False, relative=True, use_inversions=use_input_inversions, pad=False, reduction=input_reduction) + get_key_change_vector_length(input_key_pitch_type, one_hot=False)) + 1) self.output_dim = get_chord_vector_length(output_pitch_type, one_hot=True, relative=True, use_inversions=use_output_inversions, pad=False, reduction=output_reduction) self.embed_dim = embed_dim self.embed = nn.Linear(self.input_dim, self.embed_dim) self.lstm_hidden_dim = lstm_hidden_dim self.lstm_layers = lstm_layers self.lstm = nn.LSTM(self.embed_dim, self.lstm_hidden_dim, num_layers=self.lstm_layers, bidirectional=False, batch_first=True) self.hidden_dim = hidden_dim self.dropout = dropout self.fc1 = nn.Linear(self.lstm_hidden_dim, self.hidden_dim) self.fc2 = nn.Linear(self.hidden_dim, self.output_dim) self.dropout1 = nn.Dropout(self.dropout)<|docstring|>Create a new simple chord sequence model. Parameters ---------- input_chord_pitch_type : PitchType The type of pitch representation for the input chord root pitches. input_key_pitch_type : PitchType The type of pitch representation for the input key change vectors. output_pitch_type : PitchType The type of pitch representation for the target chord root pitches. input_reduction : Dict[ChordType, ChordType] The reduction used for input vector chord types. output_reduction : Dict[ChordType, ChordType] The reduction used for output vector chord types. use_input_inversions : bool True to take inversions into account for the input. False to ignore them. use_output_inversions : bool True to include inversions of chords in the output of this model. False to ignore them. embed_dim : int The size of the input embedding. lstm_layers : int The number of bi-directional LSTM layers. lstm_hidden_dim : int The size of the LSTM's hidden dimension. hidden_dim : int The size of the hidden dimension between the 2 consecutive linear layers. dropout : float The dropout proportion. learning_rate : float The learning rate.<|endoftext|>
61b6bd3668985ad983bd75a653a4191d76b6e548784f4b423fb705c2a443c6a2
def init_hidden(self, batch_size: int) -> Tuple[(Variable, Variable)]: "\n Initialize the LSTM's hidden layer for a given batch size.\n\n Parameters\n ----------\n batch_size : int\n The batch size.\n " return (Variable(torch.zeros(self.lstm_layers, batch_size, self.lstm_hidden_dim, device=self.device)), Variable(torch.zeros(self.lstm_layers, batch_size, self.lstm_hidden_dim, device=self.device)))
Initialize the LSTM's hidden layer for a given batch size. Parameters ---------- batch_size : int The batch size.
harmonic_inference/models/chord_sequence_models.py
init_hidden
apmcleod/harmonic-inference
0
python
def init_hidden(self, batch_size: int) -> Tuple[(Variable, Variable)]: "\n Initialize the LSTM's hidden layer for a given batch size.\n\n Parameters\n ----------\n batch_size : int\n The batch size.\n " return (Variable(torch.zeros(self.lstm_layers, batch_size, self.lstm_hidden_dim, device=self.device)), Variable(torch.zeros(self.lstm_layers, batch_size, self.lstm_hidden_dim, device=self.device)))
def init_hidden(self, batch_size: int) -> Tuple[(Variable, Variable)]: "\n Initialize the LSTM's hidden layer for a given batch size.\n\n Parameters\n ----------\n batch_size : int\n The batch size.\n " return (Variable(torch.zeros(self.lstm_layers, batch_size, self.lstm_hidden_dim, device=self.device)), Variable(torch.zeros(self.lstm_layers, batch_size, self.lstm_hidden_dim, device=self.device)))<|docstring|>Initialize the LSTM's hidden layer for a given batch size. Parameters ---------- batch_size : int The batch size.<|endoftext|>
10174da7d362768d76842eb8cc5619cbc9ca6fdc37a5623cbf34c2b1a204ef2b
def __init__(self, input_chord_pitch_type: PitchType, input_key_pitch_type: PitchType, output_pitch_type: PitchType, input_reduction: Dict[(ChordType, ChordType)]=None, use_input_inversions: bool=True, embed_dim: int=64, lstm_layers: int=1, lstm_hidden_dim: int=128, hidden_dim: int=256, dropout: float=0.0, learning_rate: float=0.001): "\n Create a new simple chord sequence model.\n\n Parameters\n ----------\n input_chord_pitch_type : PitchType\n The type of pitch representation for the input chord root pitches.\n input_key_pitch_type : PitchType\n The type of pitch representation for the input key change vectors.\n output_pitch_type : PitchType\n The type of pitch representation for the target chord root pitches.\n input_reduction : Dict[ChordType, ChordType]\n The reduction used for input vector chord types.\n use_input_inversions : bool\n True to take inversions into account for the input. False to ignore them.\n embed_dim : int\n The size of the input embedding.\n lstm_layers : int\n The number of bi-directional LSTM layers.\n lstm_hidden_dim : int\n The size of the LSTM's hidden dimension.\n hidden_dim : int\n The size of the hidden dimension between the 2 consecutive linear layers.\n dropout : float\n The dropout proportion.\n learning_rate : float\n The learning rate.\n " super().__init__(input_chord_pitch_type, input_key_pitch_type, output_pitch_type, input_reduction, None, use_input_inversions, True, learning_rate) self.save_hyperparameters() self.input_dim = ((get_chord_vector_length(input_chord_pitch_type, one_hot=False, relative=True, use_inversions=use_input_inversions, pad=False, reduction=input_reduction) + get_key_change_vector_length(input_key_pitch_type, one_hot=False)) + 1) self.output_dim = (MAX_RELATIVE_TPC - MIN_RELATIVE_TPC) self.embed_dim = embed_dim self.embed = nn.Linear(self.input_dim, self.embed_dim) self.lstm_hidden_dim = lstm_hidden_dim self.lstm_layers = lstm_layers self.lstm = nn.LSTM(self.embed_dim, self.lstm_hidden_dim, num_layers=self.lstm_layers, bidirectional=False, batch_first=True) self.hidden_dim = hidden_dim self.dropout = dropout self.fc1 = nn.Linear(self.lstm_hidden_dim, self.hidden_dim) self.fc2 = nn.Linear(self.hidden_dim, self.output_dim) self.dropout1 = nn.Dropout(self.dropout)
Create a new simple chord sequence model. Parameters ---------- input_chord_pitch_type : PitchType The type of pitch representation for the input chord root pitches. input_key_pitch_type : PitchType The type of pitch representation for the input key change vectors. output_pitch_type : PitchType The type of pitch representation for the target chord root pitches. input_reduction : Dict[ChordType, ChordType] The reduction used for input vector chord types. use_input_inversions : bool True to take inversions into account for the input. False to ignore them. embed_dim : int The size of the input embedding. lstm_layers : int The number of bi-directional LSTM layers. lstm_hidden_dim : int The size of the LSTM's hidden dimension. hidden_dim : int The size of the hidden dimension between the 2 consecutive linear layers. dropout : float The dropout proportion. learning_rate : float The learning rate.
harmonic_inference/models/chord_sequence_models.py
__init__
apmcleod/harmonic-inference
0
python
def __init__(self, input_chord_pitch_type: PitchType, input_key_pitch_type: PitchType, output_pitch_type: PitchType, input_reduction: Dict[(ChordType, ChordType)]=None, use_input_inversions: bool=True, embed_dim: int=64, lstm_layers: int=1, lstm_hidden_dim: int=128, hidden_dim: int=256, dropout: float=0.0, learning_rate: float=0.001): "\n Create a new simple chord sequence model.\n\n Parameters\n ----------\n input_chord_pitch_type : PitchType\n The type of pitch representation for the input chord root pitches.\n input_key_pitch_type : PitchType\n The type of pitch representation for the input key change vectors.\n output_pitch_type : PitchType\n The type of pitch representation for the target chord root pitches.\n input_reduction : Dict[ChordType, ChordType]\n The reduction used for input vector chord types.\n use_input_inversions : bool\n True to take inversions into account for the input. False to ignore them.\n embed_dim : int\n The size of the input embedding.\n lstm_layers : int\n The number of bi-directional LSTM layers.\n lstm_hidden_dim : int\n The size of the LSTM's hidden dimension.\n hidden_dim : int\n The size of the hidden dimension between the 2 consecutive linear layers.\n dropout : float\n The dropout proportion.\n learning_rate : float\n The learning rate.\n " super().__init__(input_chord_pitch_type, input_key_pitch_type, output_pitch_type, input_reduction, None, use_input_inversions, True, learning_rate) self.save_hyperparameters() self.input_dim = ((get_chord_vector_length(input_chord_pitch_type, one_hot=False, relative=True, use_inversions=use_input_inversions, pad=False, reduction=input_reduction) + get_key_change_vector_length(input_key_pitch_type, one_hot=False)) + 1) self.output_dim = (MAX_RELATIVE_TPC - MIN_RELATIVE_TPC) self.embed_dim = embed_dim self.embed = nn.Linear(self.input_dim, self.embed_dim) self.lstm_hidden_dim = lstm_hidden_dim self.lstm_layers = lstm_layers self.lstm = nn.LSTM(self.embed_dim, self.lstm_hidden_dim, num_layers=self.lstm_layers, bidirectional=False, batch_first=True) self.hidden_dim = hidden_dim self.dropout = dropout self.fc1 = nn.Linear(self.lstm_hidden_dim, self.hidden_dim) self.fc2 = nn.Linear(self.hidden_dim, self.output_dim) self.dropout1 = nn.Dropout(self.dropout)
def __init__(self, input_chord_pitch_type: PitchType, input_key_pitch_type: PitchType, output_pitch_type: PitchType, input_reduction: Dict[(ChordType, ChordType)]=None, use_input_inversions: bool=True, embed_dim: int=64, lstm_layers: int=1, lstm_hidden_dim: int=128, hidden_dim: int=256, dropout: float=0.0, learning_rate: float=0.001): "\n Create a new simple chord sequence model.\n\n Parameters\n ----------\n input_chord_pitch_type : PitchType\n The type of pitch representation for the input chord root pitches.\n input_key_pitch_type : PitchType\n The type of pitch representation for the input key change vectors.\n output_pitch_type : PitchType\n The type of pitch representation for the target chord root pitches.\n input_reduction : Dict[ChordType, ChordType]\n The reduction used for input vector chord types.\n use_input_inversions : bool\n True to take inversions into account for the input. False to ignore them.\n embed_dim : int\n The size of the input embedding.\n lstm_layers : int\n The number of bi-directional LSTM layers.\n lstm_hidden_dim : int\n The size of the LSTM's hidden dimension.\n hidden_dim : int\n The size of the hidden dimension between the 2 consecutive linear layers.\n dropout : float\n The dropout proportion.\n learning_rate : float\n The learning rate.\n " super().__init__(input_chord_pitch_type, input_key_pitch_type, output_pitch_type, input_reduction, None, use_input_inversions, True, learning_rate) self.save_hyperparameters() self.input_dim = ((get_chord_vector_length(input_chord_pitch_type, one_hot=False, relative=True, use_inversions=use_input_inversions, pad=False, reduction=input_reduction) + get_key_change_vector_length(input_key_pitch_type, one_hot=False)) + 1) self.output_dim = (MAX_RELATIVE_TPC - MIN_RELATIVE_TPC) self.embed_dim = embed_dim self.embed = nn.Linear(self.input_dim, self.embed_dim) self.lstm_hidden_dim = lstm_hidden_dim self.lstm_layers = lstm_layers self.lstm = nn.LSTM(self.embed_dim, self.lstm_hidden_dim, num_layers=self.lstm_layers, bidirectional=False, batch_first=True) self.hidden_dim = hidden_dim self.dropout = dropout self.fc1 = nn.Linear(self.lstm_hidden_dim, self.hidden_dim) self.fc2 = nn.Linear(self.hidden_dim, self.output_dim) self.dropout1 = nn.Dropout(self.dropout)<|docstring|>Create a new simple chord sequence model. Parameters ---------- input_chord_pitch_type : PitchType The type of pitch representation for the input chord root pitches. input_key_pitch_type : PitchType The type of pitch representation for the input key change vectors. output_pitch_type : PitchType The type of pitch representation for the target chord root pitches. input_reduction : Dict[ChordType, ChordType] The reduction used for input vector chord types. use_input_inversions : bool True to take inversions into account for the input. False to ignore them. embed_dim : int The size of the input embedding. lstm_layers : int The number of bi-directional LSTM layers. lstm_hidden_dim : int The size of the LSTM's hidden dimension. hidden_dim : int The size of the hidden dimension between the 2 consecutive linear layers. dropout : float The dropout proportion. learning_rate : float The learning rate.<|endoftext|>
61b6bd3668985ad983bd75a653a4191d76b6e548784f4b423fb705c2a443c6a2
def init_hidden(self, batch_size: int) -> Tuple[(Variable, Variable)]: "\n Initialize the LSTM's hidden layer for a given batch size.\n\n Parameters\n ----------\n batch_size : int\n The batch size.\n " return (Variable(torch.zeros(self.lstm_layers, batch_size, self.lstm_hidden_dim, device=self.device)), Variable(torch.zeros(self.lstm_layers, batch_size, self.lstm_hidden_dim, device=self.device)))
Initialize the LSTM's hidden layer for a given batch size. Parameters ---------- batch_size : int The batch size.
harmonic_inference/models/chord_sequence_models.py
init_hidden
apmcleod/harmonic-inference
0
python
def init_hidden(self, batch_size: int) -> Tuple[(Variable, Variable)]: "\n Initialize the LSTM's hidden layer for a given batch size.\n\n Parameters\n ----------\n batch_size : int\n The batch size.\n " return (Variable(torch.zeros(self.lstm_layers, batch_size, self.lstm_hidden_dim, device=self.device)), Variable(torch.zeros(self.lstm_layers, batch_size, self.lstm_hidden_dim, device=self.device)))
def init_hidden(self, batch_size: int) -> Tuple[(Variable, Variable)]: "\n Initialize the LSTM's hidden layer for a given batch size.\n\n Parameters\n ----------\n batch_size : int\n The batch size.\n " return (Variable(torch.zeros(self.lstm_layers, batch_size, self.lstm_hidden_dim, device=self.device)), Variable(torch.zeros(self.lstm_layers, batch_size, self.lstm_hidden_dim, device=self.device)))<|docstring|>Initialize the LSTM's hidden layer for a given batch size. Parameters ---------- batch_size : int The batch size.<|endoftext|>
bfbf4671cb2f09c05764ced60fcedd22520b1ae8c62a24ee712fd687213a4e6d
def _to_complex(value: Union[(List[float], complex)]) -> complex: 'Convert the input value to type ``complex``.\n Args:\n value: Value to be converted.\n Returns:\n Input value in ``complex``.\n Raises:\n TypeError: If the input value is not in the expected format.\n ' if (isinstance(value, list) and (len(value) == 2)): return complex(value[0], value[1]) elif isinstance(value, complex): return value raise TypeError('{} is not in a valid complex number format.'.format(value))
Convert the input value to type ``complex``. Args: value: Value to be converted. Returns: Input value in ``complex``. Raises: TypeError: If the input value is not in the expected format.
qiskit/qobj/pulse_qobj.py
_to_complex
areeq-hasan/qiskit-terra
11
python
def _to_complex(value: Union[(List[float], complex)]) -> complex: 'Convert the input value to type ``complex``.\n Args:\n value: Value to be converted.\n Returns:\n Input value in ``complex``.\n Raises:\n TypeError: If the input value is not in the expected format.\n ' if (isinstance(value, list) and (len(value) == 2)): return complex(value[0], value[1]) elif isinstance(value, complex): return value raise TypeError('{} is not in a valid complex number format.'.format(value))
def _to_complex(value: Union[(List[float], complex)]) -> complex: 'Convert the input value to type ``complex``.\n Args:\n value: Value to be converted.\n Returns:\n Input value in ``complex``.\n Raises:\n TypeError: If the input value is not in the expected format.\n ' if (isinstance(value, list) and (len(value) == 2)): return complex(value[0], value[1]) elif isinstance(value, complex): return value raise TypeError('{} is not in a valid complex number format.'.format(value))<|docstring|>Convert the input value to type ``complex``. Args: value: Value to be converted. Returns: Input value in ``complex``. Raises: TypeError: If the input value is not in the expected format.<|endoftext|>
1226ae48c526087bcc659abf01066ddbf6af8cb4fa51c845fba44e5ae8ed6f7c
def __init__(self, name, params=None): 'Instantiate a new QobjMeasurementOption object.\n\n Args:\n name (str): The name of the measurement option\n params (list): The parameters of the measurement option.\n ' self.name = name if (params is not None): self.params = params
Instantiate a new QobjMeasurementOption object. Args: name (str): The name of the measurement option params (list): The parameters of the measurement option.
qiskit/qobj/pulse_qobj.py
__init__
areeq-hasan/qiskit-terra
11
python
def __init__(self, name, params=None): 'Instantiate a new QobjMeasurementOption object.\n\n Args:\n name (str): The name of the measurement option\n params (list): The parameters of the measurement option.\n ' self.name = name if (params is not None): self.params = params
def __init__(self, name, params=None): 'Instantiate a new QobjMeasurementOption object.\n\n Args:\n name (str): The name of the measurement option\n params (list): The parameters of the measurement option.\n ' self.name = name if (params is not None): self.params = params<|docstring|>Instantiate a new QobjMeasurementOption object. Args: name (str): The name of the measurement option params (list): The parameters of the measurement option.<|endoftext|>
6d942d4ca8e7be652cd404622d05900e3148ffc31f770693539042a528c4ed4a
def to_dict(self): 'Return a dict format representation of the QobjMeasurementOption.\n\n Returns:\n dict: The dictionary form of the QasmMeasurementOption.\n ' out_dict = {'name': self.name} if hasattr(self, 'params'): out_dict['params'] = self.params return out_dict
Return a dict format representation of the QobjMeasurementOption. Returns: dict: The dictionary form of the QasmMeasurementOption.
qiskit/qobj/pulse_qobj.py
to_dict
areeq-hasan/qiskit-terra
11
python
def to_dict(self): 'Return a dict format representation of the QobjMeasurementOption.\n\n Returns:\n dict: The dictionary form of the QasmMeasurementOption.\n ' out_dict = {'name': self.name} if hasattr(self, 'params'): out_dict['params'] = self.params return out_dict
def to_dict(self): 'Return a dict format representation of the QobjMeasurementOption.\n\n Returns:\n dict: The dictionary form of the QasmMeasurementOption.\n ' out_dict = {'name': self.name} if hasattr(self, 'params'): out_dict['params'] = self.params return out_dict<|docstring|>Return a dict format representation of the QobjMeasurementOption. Returns: dict: The dictionary form of the QasmMeasurementOption.<|endoftext|>
9b7a09b80067c37e9bbd856363bd254e62def0b8ac40bf9381892bafe80f03b1
@classmethod def from_dict(cls, data): 'Create a new QobjMeasurementOption object from a dictionary.\n\n Args:\n data (dict): A dictionary for the experiment config\n\n Returns:\n QobjMeasurementOption: The object from the input dictionary.\n ' name = data.pop('name') return cls(name, **data)
Create a new QobjMeasurementOption object from a dictionary. Args: data (dict): A dictionary for the experiment config Returns: QobjMeasurementOption: The object from the input dictionary.
qiskit/qobj/pulse_qobj.py
from_dict
areeq-hasan/qiskit-terra
11
python
@classmethod def from_dict(cls, data): 'Create a new QobjMeasurementOption object from a dictionary.\n\n Args:\n data (dict): A dictionary for the experiment config\n\n Returns:\n QobjMeasurementOption: The object from the input dictionary.\n ' name = data.pop('name') return cls(name, **data)
@classmethod def from_dict(cls, data): 'Create a new QobjMeasurementOption object from a dictionary.\n\n Args:\n data (dict): A dictionary for the experiment config\n\n Returns:\n QobjMeasurementOption: The object from the input dictionary.\n ' name = data.pop('name') return cls(name, **data)<|docstring|>Create a new QobjMeasurementOption object from a dictionary. Args: data (dict): A dictionary for the experiment config Returns: QobjMeasurementOption: The object from the input dictionary.<|endoftext|>
38fd9cc20a9c260733d5e6e46c7211716317ec95cff22079a4306d418731d2e4
def __init__(self, name, t0, ch=None, conditional=None, val=None, phase=None, duration=None, qubits=None, memory_slot=None, register_slot=None, kernels=None, discriminators=None, label=None, type=None, pulse_shape=None, parameters=None, frequency=None): 'Instantiate a new PulseQobjInstruction object.\n\n Args:\n name (str): The name of the instruction\n t0 (int): Pulse start time in integer **dt** units.\n ch (str): The channel to apply the pulse instruction.\n conditional (int): The register to use for a conditional for this\n instruction\n val (complex): Complex value to apply, bounded by an absolute value\n of 1.\n phase (float): if a ``fc`` instruction, the frame change phase in\n radians.\n frequency (float): if a ``sf`` instruction, the frequency in Hz.\n duration (int): The duration of the pulse in **dt** units.\n qubits (list): A list of ``int`` representing the qubits the\n instruction operates on\n memory_slot (list): If a ``measure`` instruction this is a list\n of ``int`` containing the list of memory slots to store the\n measurement results in (must be the same length as qubits).\n If a ``bfunc`` instruction this is a single ``int`` of the\n memory slot to store the boolean function result in.\n register_slot (list): If a ``measure`` instruction this is a list\n of ``int`` containing the list of register slots in which to\n store the measurement results (must be the same length as\n qubits). If a ``bfunc`` instruction this is a single ``int``\n of the register slot in which to store the result.\n kernels (list): List of :class:`QobjMeasurementOption` objects\n defining the measurement kernels and set of parameters if the\n measurement level is 1 or 2. Only used for ``acquire``\n instructions.\n discriminators (list): A list of :class:`QobjMeasurementOption`\n used to set the discriminators to be used if the measurement\n level is 2. Only used for ``acquire`` instructions.\n label (str): Label of instruction\n type (str): Type of instruction\n pulse_shape (str): The shape of the parametric pulse\n parameters (dict): The parameters for a parametric pulse\n ' self.name = name self.t0 = t0 if (ch is not None): self.ch = ch if (conditional is not None): self.conditional = conditional if (val is not None): self.val = val if (phase is not None): self.phase = phase if (frequency is not None): self.frequency = frequency if (duration is not None): self.duration = duration if (qubits is not None): self.qubits = qubits if (memory_slot is not None): self.memory_slot = memory_slot if (register_slot is not None): self.register_slot = register_slot if (kernels is not None): self.kernels = kernels if (discriminators is not None): self.discriminators = discriminators if (label is not None): self.label = label if (type is not None): self.type = type if (pulse_shape is not None): self.pulse_shape = pulse_shape if (parameters is not None): self.parameters = parameters
Instantiate a new PulseQobjInstruction object. Args: name (str): The name of the instruction t0 (int): Pulse start time in integer **dt** units. ch (str): The channel to apply the pulse instruction. conditional (int): The register to use for a conditional for this instruction val (complex): Complex value to apply, bounded by an absolute value of 1. phase (float): if a ``fc`` instruction, the frame change phase in radians. frequency (float): if a ``sf`` instruction, the frequency in Hz. duration (int): The duration of the pulse in **dt** units. qubits (list): A list of ``int`` representing the qubits the instruction operates on memory_slot (list): If a ``measure`` instruction this is a list of ``int`` containing the list of memory slots to store the measurement results in (must be the same length as qubits). If a ``bfunc`` instruction this is a single ``int`` of the memory slot to store the boolean function result in. register_slot (list): If a ``measure`` instruction this is a list of ``int`` containing the list of register slots in which to store the measurement results (must be the same length as qubits). If a ``bfunc`` instruction this is a single ``int`` of the register slot in which to store the result. kernels (list): List of :class:`QobjMeasurementOption` objects defining the measurement kernels and set of parameters if the measurement level is 1 or 2. Only used for ``acquire`` instructions. discriminators (list): A list of :class:`QobjMeasurementOption` used to set the discriminators to be used if the measurement level is 2. Only used for ``acquire`` instructions. label (str): Label of instruction type (str): Type of instruction pulse_shape (str): The shape of the parametric pulse parameters (dict): The parameters for a parametric pulse
qiskit/qobj/pulse_qobj.py
__init__
areeq-hasan/qiskit-terra
11
python
def __init__(self, name, t0, ch=None, conditional=None, val=None, phase=None, duration=None, qubits=None, memory_slot=None, register_slot=None, kernels=None, discriminators=None, label=None, type=None, pulse_shape=None, parameters=None, frequency=None): 'Instantiate a new PulseQobjInstruction object.\n\n Args:\n name (str): The name of the instruction\n t0 (int): Pulse start time in integer **dt** units.\n ch (str): The channel to apply the pulse instruction.\n conditional (int): The register to use for a conditional for this\n instruction\n val (complex): Complex value to apply, bounded by an absolute value\n of 1.\n phase (float): if a ``fc`` instruction, the frame change phase in\n radians.\n frequency (float): if a ``sf`` instruction, the frequency in Hz.\n duration (int): The duration of the pulse in **dt** units.\n qubits (list): A list of ``int`` representing the qubits the\n instruction operates on\n memory_slot (list): If a ``measure`` instruction this is a list\n of ``int`` containing the list of memory slots to store the\n measurement results in (must be the same length as qubits).\n If a ``bfunc`` instruction this is a single ``int`` of the\n memory slot to store the boolean function result in.\n register_slot (list): If a ``measure`` instruction this is a list\n of ``int`` containing the list of register slots in which to\n store the measurement results (must be the same length as\n qubits). If a ``bfunc`` instruction this is a single ``int``\n of the register slot in which to store the result.\n kernels (list): List of :class:`QobjMeasurementOption` objects\n defining the measurement kernels and set of parameters if the\n measurement level is 1 or 2. Only used for ``acquire``\n instructions.\n discriminators (list): A list of :class:`QobjMeasurementOption`\n used to set the discriminators to be used if the measurement\n level is 2. Only used for ``acquire`` instructions.\n label (str): Label of instruction\n type (str): Type of instruction\n pulse_shape (str): The shape of the parametric pulse\n parameters (dict): The parameters for a parametric pulse\n ' self.name = name self.t0 = t0 if (ch is not None): self.ch = ch if (conditional is not None): self.conditional = conditional if (val is not None): self.val = val if (phase is not None): self.phase = phase if (frequency is not None): self.frequency = frequency if (duration is not None): self.duration = duration if (qubits is not None): self.qubits = qubits if (memory_slot is not None): self.memory_slot = memory_slot if (register_slot is not None): self.register_slot = register_slot if (kernels is not None): self.kernels = kernels if (discriminators is not None): self.discriminators = discriminators if (label is not None): self.label = label if (type is not None): self.type = type if (pulse_shape is not None): self.pulse_shape = pulse_shape if (parameters is not None): self.parameters = parameters
def __init__(self, name, t0, ch=None, conditional=None, val=None, phase=None, duration=None, qubits=None, memory_slot=None, register_slot=None, kernels=None, discriminators=None, label=None, type=None, pulse_shape=None, parameters=None, frequency=None): 'Instantiate a new PulseQobjInstruction object.\n\n Args:\n name (str): The name of the instruction\n t0 (int): Pulse start time in integer **dt** units.\n ch (str): The channel to apply the pulse instruction.\n conditional (int): The register to use for a conditional for this\n instruction\n val (complex): Complex value to apply, bounded by an absolute value\n of 1.\n phase (float): if a ``fc`` instruction, the frame change phase in\n radians.\n frequency (float): if a ``sf`` instruction, the frequency in Hz.\n duration (int): The duration of the pulse in **dt** units.\n qubits (list): A list of ``int`` representing the qubits the\n instruction operates on\n memory_slot (list): If a ``measure`` instruction this is a list\n of ``int`` containing the list of memory slots to store the\n measurement results in (must be the same length as qubits).\n If a ``bfunc`` instruction this is a single ``int`` of the\n memory slot to store the boolean function result in.\n register_slot (list): If a ``measure`` instruction this is a list\n of ``int`` containing the list of register slots in which to\n store the measurement results (must be the same length as\n qubits). If a ``bfunc`` instruction this is a single ``int``\n of the register slot in which to store the result.\n kernels (list): List of :class:`QobjMeasurementOption` objects\n defining the measurement kernels and set of parameters if the\n measurement level is 1 or 2. Only used for ``acquire``\n instructions.\n discriminators (list): A list of :class:`QobjMeasurementOption`\n used to set the discriminators to be used if the measurement\n level is 2. Only used for ``acquire`` instructions.\n label (str): Label of instruction\n type (str): Type of instruction\n pulse_shape (str): The shape of the parametric pulse\n parameters (dict): The parameters for a parametric pulse\n ' self.name = name self.t0 = t0 if (ch is not None): self.ch = ch if (conditional is not None): self.conditional = conditional if (val is not None): self.val = val if (phase is not None): self.phase = phase if (frequency is not None): self.frequency = frequency if (duration is not None): self.duration = duration if (qubits is not None): self.qubits = qubits if (memory_slot is not None): self.memory_slot = memory_slot if (register_slot is not None): self.register_slot = register_slot if (kernels is not None): self.kernels = kernels if (discriminators is not None): self.discriminators = discriminators if (label is not None): self.label = label if (type is not None): self.type = type if (pulse_shape is not None): self.pulse_shape = pulse_shape if (parameters is not None): self.parameters = parameters<|docstring|>Instantiate a new PulseQobjInstruction object. Args: name (str): The name of the instruction t0 (int): Pulse start time in integer **dt** units. ch (str): The channel to apply the pulse instruction. conditional (int): The register to use for a conditional for this instruction val (complex): Complex value to apply, bounded by an absolute value of 1. phase (float): if a ``fc`` instruction, the frame change phase in radians. frequency (float): if a ``sf`` instruction, the frequency in Hz. duration (int): The duration of the pulse in **dt** units. qubits (list): A list of ``int`` representing the qubits the instruction operates on memory_slot (list): If a ``measure`` instruction this is a list of ``int`` containing the list of memory slots to store the measurement results in (must be the same length as qubits). If a ``bfunc`` instruction this is a single ``int`` of the memory slot to store the boolean function result in. register_slot (list): If a ``measure`` instruction this is a list of ``int`` containing the list of register slots in which to store the measurement results (must be the same length as qubits). If a ``bfunc`` instruction this is a single ``int`` of the register slot in which to store the result. kernels (list): List of :class:`QobjMeasurementOption` objects defining the measurement kernels and set of parameters if the measurement level is 1 or 2. Only used for ``acquire`` instructions. discriminators (list): A list of :class:`QobjMeasurementOption` used to set the discriminators to be used if the measurement level is 2. Only used for ``acquire`` instructions. label (str): Label of instruction type (str): Type of instruction pulse_shape (str): The shape of the parametric pulse parameters (dict): The parameters for a parametric pulse<|endoftext|>
3be0b13bad1ce799d1a312f22cb18b37d269eb3e5cda55709030ac78f15f702e
def to_dict(self): 'Return a dictionary format representation of the Instruction.\n\n Returns:\n dict: The dictionary form of the PulseQobjInstruction.\n ' out_dict = {'name': self.name, 't0': self.t0} for attr in self._COMMON_ATTRS: if hasattr(self, attr): out_dict[attr] = getattr(self, attr) if hasattr(self, 'kernels'): out_dict['kernels'] = [x.to_dict() for x in self.kernels] if hasattr(self, 'discriminators'): out_dict['discriminators'] = [x.to_dict() for x in self.discriminators] return out_dict
Return a dictionary format representation of the Instruction. Returns: dict: The dictionary form of the PulseQobjInstruction.
qiskit/qobj/pulse_qobj.py
to_dict
areeq-hasan/qiskit-terra
11
python
def to_dict(self): 'Return a dictionary format representation of the Instruction.\n\n Returns:\n dict: The dictionary form of the PulseQobjInstruction.\n ' out_dict = {'name': self.name, 't0': self.t0} for attr in self._COMMON_ATTRS: if hasattr(self, attr): out_dict[attr] = getattr(self, attr) if hasattr(self, 'kernels'): out_dict['kernels'] = [x.to_dict() for x in self.kernels] if hasattr(self, 'discriminators'): out_dict['discriminators'] = [x.to_dict() for x in self.discriminators] return out_dict
def to_dict(self): 'Return a dictionary format representation of the Instruction.\n\n Returns:\n dict: The dictionary form of the PulseQobjInstruction.\n ' out_dict = {'name': self.name, 't0': self.t0} for attr in self._COMMON_ATTRS: if hasattr(self, attr): out_dict[attr] = getattr(self, attr) if hasattr(self, 'kernels'): out_dict['kernels'] = [x.to_dict() for x in self.kernels] if hasattr(self, 'discriminators'): out_dict['discriminators'] = [x.to_dict() for x in self.discriminators] return out_dict<|docstring|>Return a dictionary format representation of the Instruction. Returns: dict: The dictionary form of the PulseQobjInstruction.<|endoftext|>
2d912f84a75c89973c7942e63bdabbd6797f7cc96e25c5e40dfb85f34abfc4c7
@classmethod def from_dict(cls, data): 'Create a new PulseQobjExperimentConfig object from a dictionary.\n\n Args:\n data (dict): A dictionary for the experiment config\n\n Returns:\n PulseQobjInstruction: The object from the input dictionary.\n ' t0 = data.pop('t0') name = data.pop('name') if ('kernels' in data): kernels = data.pop('kernels') kernel_obj = [QobjMeasurementOption.from_dict(x) for x in kernels] data['kernels'] = kernel_obj if ('discriminators' in data): discriminators = data.pop('discriminators') discriminators_obj = [QobjMeasurementOption.from_dict(x) for x in discriminators] data['discriminators'] = discriminators_obj if (('parameters' in data) and ('amp' in data['parameters'])): data['parameters']['amp'] = _to_complex(data['parameters']['amp']) return cls(name, t0, **data)
Create a new PulseQobjExperimentConfig object from a dictionary. Args: data (dict): A dictionary for the experiment config Returns: PulseQobjInstruction: The object from the input dictionary.
qiskit/qobj/pulse_qobj.py
from_dict
areeq-hasan/qiskit-terra
11
python
@classmethod def from_dict(cls, data): 'Create a new PulseQobjExperimentConfig object from a dictionary.\n\n Args:\n data (dict): A dictionary for the experiment config\n\n Returns:\n PulseQobjInstruction: The object from the input dictionary.\n ' t0 = data.pop('t0') name = data.pop('name') if ('kernels' in data): kernels = data.pop('kernels') kernel_obj = [QobjMeasurementOption.from_dict(x) for x in kernels] data['kernels'] = kernel_obj if ('discriminators' in data): discriminators = data.pop('discriminators') discriminators_obj = [QobjMeasurementOption.from_dict(x) for x in discriminators] data['discriminators'] = discriminators_obj if (('parameters' in data) and ('amp' in data['parameters'])): data['parameters']['amp'] = _to_complex(data['parameters']['amp']) return cls(name, t0, **data)
@classmethod def from_dict(cls, data): 'Create a new PulseQobjExperimentConfig object from a dictionary.\n\n Args:\n data (dict): A dictionary for the experiment config\n\n Returns:\n PulseQobjInstruction: The object from the input dictionary.\n ' t0 = data.pop('t0') name = data.pop('name') if ('kernels' in data): kernels = data.pop('kernels') kernel_obj = [QobjMeasurementOption.from_dict(x) for x in kernels] data['kernels'] = kernel_obj if ('discriminators' in data): discriminators = data.pop('discriminators') discriminators_obj = [QobjMeasurementOption.from_dict(x) for x in discriminators] data['discriminators'] = discriminators_obj if (('parameters' in data) and ('amp' in data['parameters'])): data['parameters']['amp'] = _to_complex(data['parameters']['amp']) return cls(name, t0, **data)<|docstring|>Create a new PulseQobjExperimentConfig object from a dictionary. Args: data (dict): A dictionary for the experiment config Returns: PulseQobjInstruction: The object from the input dictionary.<|endoftext|>
b7ebfce198775c64cd0e1b57d79a8e1f985c1edf76bd3502f8d349378f4486c9
def __init__(self, meas_level, meas_return, pulse_library, qubit_lo_freq, meas_lo_freq, memory_slot_size=None, rep_time=None, rep_delay=None, shots=None, max_credits=None, seed_simulator=None, memory_slots=None, **kwargs): "Instantiate a PulseQobjConfig object.\n\n Args:\n meas_level (int): The measurement level to use.\n meas_return (int): The level of measurement information to return.\n pulse_library (list): A list of :class:`PulseLibraryItem` objects\n which define the set of primative pulses\n qubit_lo_freq (list): List of frequencies (as floats) for the qubit\n driver LO's in GHz.\n meas_lo_freq (list): List of frequencies (as floats) for the'\n measurement driver LO's in GHz.\n memory_slot_size (int): Size of each memory slot if the output is\n Level 0.\n rep_time (int): Time per program execution in sec. Must be from the list provided\n by the backend (``backend.configuration().rep_times``). Defaults to the first entry\n in ``backend.configuration().rep_times``.\n rep_delay (float): Delay between programs in sec. Only supported on certain\n backends (``backend.configuration().dynamic_reprate_enabled`` ). If supported,\n ``rep_delay`` will be used instead of ``rep_time`` and must be from the range\n supplied by the backend (``backend.configuration().rep_delay_range``). Default is\n ``backend.configuration().default_rep_delay``.\n shots (int): The number of shots\n max_credits (int): the max_credits to use on the IBMQ public devices.\n seed_simulator (int): the seed to use in the simulator\n memory_slots (list): The number of memory slots on the device\n kwargs: Additional free form key value fields to add to the\n configuration\n " self.meas_level = meas_level self.meas_return = meas_return self.pulse_library = pulse_library self.qubit_lo_freq = qubit_lo_freq self.meas_lo_freq = meas_lo_freq if (memory_slot_size is not None): self.memory_slot_size = memory_slot_size if (rep_time is not None): self.rep_time = rep_time if (rep_delay is not None): self.rep_delay = rep_delay if (shots is not None): self.shots = int(shots) if (max_credits is not None): self.max_credits = int(max_credits) if (seed_simulator is not None): self.seed_simulator = int(seed_simulator) if (memory_slots is not None): self.memory_slots = int(memory_slots) if kwargs: self.__dict__.update(kwargs)
Instantiate a PulseQobjConfig object. Args: meas_level (int): The measurement level to use. meas_return (int): The level of measurement information to return. pulse_library (list): A list of :class:`PulseLibraryItem` objects which define the set of primative pulses qubit_lo_freq (list): List of frequencies (as floats) for the qubit driver LO's in GHz. meas_lo_freq (list): List of frequencies (as floats) for the' measurement driver LO's in GHz. memory_slot_size (int): Size of each memory slot if the output is Level 0. rep_time (int): Time per program execution in sec. Must be from the list provided by the backend (``backend.configuration().rep_times``). Defaults to the first entry in ``backend.configuration().rep_times``. rep_delay (float): Delay between programs in sec. Only supported on certain backends (``backend.configuration().dynamic_reprate_enabled`` ). If supported, ``rep_delay`` will be used instead of ``rep_time`` and must be from the range supplied by the backend (``backend.configuration().rep_delay_range``). Default is ``backend.configuration().default_rep_delay``. shots (int): The number of shots max_credits (int): the max_credits to use on the IBMQ public devices. seed_simulator (int): the seed to use in the simulator memory_slots (list): The number of memory slots on the device kwargs: Additional free form key value fields to add to the configuration
qiskit/qobj/pulse_qobj.py
__init__
areeq-hasan/qiskit-terra
11
python
def __init__(self, meas_level, meas_return, pulse_library, qubit_lo_freq, meas_lo_freq, memory_slot_size=None, rep_time=None, rep_delay=None, shots=None, max_credits=None, seed_simulator=None, memory_slots=None, **kwargs): "Instantiate a PulseQobjConfig object.\n\n Args:\n meas_level (int): The measurement level to use.\n meas_return (int): The level of measurement information to return.\n pulse_library (list): A list of :class:`PulseLibraryItem` objects\n which define the set of primative pulses\n qubit_lo_freq (list): List of frequencies (as floats) for the qubit\n driver LO's in GHz.\n meas_lo_freq (list): List of frequencies (as floats) for the'\n measurement driver LO's in GHz.\n memory_slot_size (int): Size of each memory slot if the output is\n Level 0.\n rep_time (int): Time per program execution in sec. Must be from the list provided\n by the backend (``backend.configuration().rep_times``). Defaults to the first entry\n in ``backend.configuration().rep_times``.\n rep_delay (float): Delay between programs in sec. Only supported on certain\n backends (``backend.configuration().dynamic_reprate_enabled`` ). If supported,\n ``rep_delay`` will be used instead of ``rep_time`` and must be from the range\n supplied by the backend (``backend.configuration().rep_delay_range``). Default is\n ``backend.configuration().default_rep_delay``.\n shots (int): The number of shots\n max_credits (int): the max_credits to use on the IBMQ public devices.\n seed_simulator (int): the seed to use in the simulator\n memory_slots (list): The number of memory slots on the device\n kwargs: Additional free form key value fields to add to the\n configuration\n " self.meas_level = meas_level self.meas_return = meas_return self.pulse_library = pulse_library self.qubit_lo_freq = qubit_lo_freq self.meas_lo_freq = meas_lo_freq if (memory_slot_size is not None): self.memory_slot_size = memory_slot_size if (rep_time is not None): self.rep_time = rep_time if (rep_delay is not None): self.rep_delay = rep_delay if (shots is not None): self.shots = int(shots) if (max_credits is not None): self.max_credits = int(max_credits) if (seed_simulator is not None): self.seed_simulator = int(seed_simulator) if (memory_slots is not None): self.memory_slots = int(memory_slots) if kwargs: self.__dict__.update(kwargs)
def __init__(self, meas_level, meas_return, pulse_library, qubit_lo_freq, meas_lo_freq, memory_slot_size=None, rep_time=None, rep_delay=None, shots=None, max_credits=None, seed_simulator=None, memory_slots=None, **kwargs): "Instantiate a PulseQobjConfig object.\n\n Args:\n meas_level (int): The measurement level to use.\n meas_return (int): The level of measurement information to return.\n pulse_library (list): A list of :class:`PulseLibraryItem` objects\n which define the set of primative pulses\n qubit_lo_freq (list): List of frequencies (as floats) for the qubit\n driver LO's in GHz.\n meas_lo_freq (list): List of frequencies (as floats) for the'\n measurement driver LO's in GHz.\n memory_slot_size (int): Size of each memory slot if the output is\n Level 0.\n rep_time (int): Time per program execution in sec. Must be from the list provided\n by the backend (``backend.configuration().rep_times``). Defaults to the first entry\n in ``backend.configuration().rep_times``.\n rep_delay (float): Delay between programs in sec. Only supported on certain\n backends (``backend.configuration().dynamic_reprate_enabled`` ). If supported,\n ``rep_delay`` will be used instead of ``rep_time`` and must be from the range\n supplied by the backend (``backend.configuration().rep_delay_range``). Default is\n ``backend.configuration().default_rep_delay``.\n shots (int): The number of shots\n max_credits (int): the max_credits to use on the IBMQ public devices.\n seed_simulator (int): the seed to use in the simulator\n memory_slots (list): The number of memory slots on the device\n kwargs: Additional free form key value fields to add to the\n configuration\n " self.meas_level = meas_level self.meas_return = meas_return self.pulse_library = pulse_library self.qubit_lo_freq = qubit_lo_freq self.meas_lo_freq = meas_lo_freq if (memory_slot_size is not None): self.memory_slot_size = memory_slot_size if (rep_time is not None): self.rep_time = rep_time if (rep_delay is not None): self.rep_delay = rep_delay if (shots is not None): self.shots = int(shots) if (max_credits is not None): self.max_credits = int(max_credits) if (seed_simulator is not None): self.seed_simulator = int(seed_simulator) if (memory_slots is not None): self.memory_slots = int(memory_slots) if kwargs: self.__dict__.update(kwargs)<|docstring|>Instantiate a PulseQobjConfig object. Args: meas_level (int): The measurement level to use. meas_return (int): The level of measurement information to return. pulse_library (list): A list of :class:`PulseLibraryItem` objects which define the set of primative pulses qubit_lo_freq (list): List of frequencies (as floats) for the qubit driver LO's in GHz. meas_lo_freq (list): List of frequencies (as floats) for the' measurement driver LO's in GHz. memory_slot_size (int): Size of each memory slot if the output is Level 0. rep_time (int): Time per program execution in sec. Must be from the list provided by the backend (``backend.configuration().rep_times``). Defaults to the first entry in ``backend.configuration().rep_times``. rep_delay (float): Delay between programs in sec. Only supported on certain backends (``backend.configuration().dynamic_reprate_enabled`` ). If supported, ``rep_delay`` will be used instead of ``rep_time`` and must be from the range supplied by the backend (``backend.configuration().rep_delay_range``). Default is ``backend.configuration().default_rep_delay``. shots (int): The number of shots max_credits (int): the max_credits to use on the IBMQ public devices. seed_simulator (int): the seed to use in the simulator memory_slots (list): The number of memory slots on the device kwargs: Additional free form key value fields to add to the configuration<|endoftext|>
4ff84dd287a4c3343da65ec52de0bd66c7ec506adb720479d3dc693ddeb7b280
def to_dict(self): 'Return a dictionary format representation of the Pulse Qobj config.\n\n Returns:\n dict: The dictionary form of the PulseQobjConfig.\n ' out_dict = copy.copy(self.__dict__) if hasattr(self, 'pulse_library'): out_dict['pulse_library'] = [x.to_dict() for x in self.pulse_library] return out_dict
Return a dictionary format representation of the Pulse Qobj config. Returns: dict: The dictionary form of the PulseQobjConfig.
qiskit/qobj/pulse_qobj.py
to_dict
areeq-hasan/qiskit-terra
11
python
def to_dict(self): 'Return a dictionary format representation of the Pulse Qobj config.\n\n Returns:\n dict: The dictionary form of the PulseQobjConfig.\n ' out_dict = copy.copy(self.__dict__) if hasattr(self, 'pulse_library'): out_dict['pulse_library'] = [x.to_dict() for x in self.pulse_library] return out_dict
def to_dict(self): 'Return a dictionary format representation of the Pulse Qobj config.\n\n Returns:\n dict: The dictionary form of the PulseQobjConfig.\n ' out_dict = copy.copy(self.__dict__) if hasattr(self, 'pulse_library'): out_dict['pulse_library'] = [x.to_dict() for x in self.pulse_library] return out_dict<|docstring|>Return a dictionary format representation of the Pulse Qobj config. Returns: dict: The dictionary form of the PulseQobjConfig.<|endoftext|>
d800cb2735bcb339f3e1d19ec8666b865996a8684ca01e7ff0332282ec848d49
@classmethod def from_dict(cls, data): 'Create a new PulseQobjConfig object from a dictionary.\n\n Args:\n data (dict): A dictionary for the config\n\n Returns:\n PulseQobjConfig: The object from the input dictionary.\n ' if ('pulse_library' in data): pulse_lib = data.pop('pulse_library') pulse_lib_obj = [PulseLibraryItem.from_dict(x) for x in pulse_lib] data['pulse_library'] = pulse_lib_obj return cls(**data)
Create a new PulseQobjConfig object from a dictionary. Args: data (dict): A dictionary for the config Returns: PulseQobjConfig: The object from the input dictionary.
qiskit/qobj/pulse_qobj.py
from_dict
areeq-hasan/qiskit-terra
11
python
@classmethod def from_dict(cls, data): 'Create a new PulseQobjConfig object from a dictionary.\n\n Args:\n data (dict): A dictionary for the config\n\n Returns:\n PulseQobjConfig: The object from the input dictionary.\n ' if ('pulse_library' in data): pulse_lib = data.pop('pulse_library') pulse_lib_obj = [PulseLibraryItem.from_dict(x) for x in pulse_lib] data['pulse_library'] = pulse_lib_obj return cls(**data)
@classmethod def from_dict(cls, data): 'Create a new PulseQobjConfig object from a dictionary.\n\n Args:\n data (dict): A dictionary for the config\n\n Returns:\n PulseQobjConfig: The object from the input dictionary.\n ' if ('pulse_library' in data): pulse_lib = data.pop('pulse_library') pulse_lib_obj = [PulseLibraryItem.from_dict(x) for x in pulse_lib] data['pulse_library'] = pulse_lib_obj return cls(**data)<|docstring|>Create a new PulseQobjConfig object from a dictionary. Args: data (dict): A dictionary for the config Returns: PulseQobjConfig: The object from the input dictionary.<|endoftext|>
ab2df932151b5bdc208db813122c9698c56694c285dc6df0dff9d7585fdca70f
def __init__(self, instructions, config=None, header=None): 'Instantiate a PulseQobjExperiment.\n\n Args:\n config (PulseQobjExperimentConfig): A config object for the experiment\n header (PulseQobjExperimentHeader): A header object for the experiment\n instructions (list): A list of :class:`PulseQobjInstruction` objects\n ' if (config is not None): self.config = config if (header is not None): self.header = header self.instructions = instructions
Instantiate a PulseQobjExperiment. Args: config (PulseQobjExperimentConfig): A config object for the experiment header (PulseQobjExperimentHeader): A header object for the experiment instructions (list): A list of :class:`PulseQobjInstruction` objects
qiskit/qobj/pulse_qobj.py
__init__
areeq-hasan/qiskit-terra
11
python
def __init__(self, instructions, config=None, header=None): 'Instantiate a PulseQobjExperiment.\n\n Args:\n config (PulseQobjExperimentConfig): A config object for the experiment\n header (PulseQobjExperimentHeader): A header object for the experiment\n instructions (list): A list of :class:`PulseQobjInstruction` objects\n ' if (config is not None): self.config = config if (header is not None): self.header = header self.instructions = instructions
def __init__(self, instructions, config=None, header=None): 'Instantiate a PulseQobjExperiment.\n\n Args:\n config (PulseQobjExperimentConfig): A config object for the experiment\n header (PulseQobjExperimentHeader): A header object for the experiment\n instructions (list): A list of :class:`PulseQobjInstruction` objects\n ' if (config is not None): self.config = config if (header is not None): self.header = header self.instructions = instructions<|docstring|>Instantiate a PulseQobjExperiment. Args: config (PulseQobjExperimentConfig): A config object for the experiment header (PulseQobjExperimentHeader): A header object for the experiment instructions (list): A list of :class:`PulseQobjInstruction` objects<|endoftext|>
606d7b23f2f7a03b98aba7699a531d46eb4abd728daa27c8979d4487639b4936
def to_dict(self): 'Return a dictionary format representation of the Experiment.\n\n Returns:\n dict: The dictionary form of the PulseQobjExperiment.\n ' out_dict = {'instructions': [x.to_dict() for x in self.instructions]} if hasattr(self, 'config'): out_dict['config'] = self.config.to_dict() if hasattr(self, 'header'): out_dict['header'] = self.header.to_dict() return out_dict
Return a dictionary format representation of the Experiment. Returns: dict: The dictionary form of the PulseQobjExperiment.
qiskit/qobj/pulse_qobj.py
to_dict
areeq-hasan/qiskit-terra
11
python
def to_dict(self): 'Return a dictionary format representation of the Experiment.\n\n Returns:\n dict: The dictionary form of the PulseQobjExperiment.\n ' out_dict = {'instructions': [x.to_dict() for x in self.instructions]} if hasattr(self, 'config'): out_dict['config'] = self.config.to_dict() if hasattr(self, 'header'): out_dict['header'] = self.header.to_dict() return out_dict
def to_dict(self): 'Return a dictionary format representation of the Experiment.\n\n Returns:\n dict: The dictionary form of the PulseQobjExperiment.\n ' out_dict = {'instructions': [x.to_dict() for x in self.instructions]} if hasattr(self, 'config'): out_dict['config'] = self.config.to_dict() if hasattr(self, 'header'): out_dict['header'] = self.header.to_dict() return out_dict<|docstring|>Return a dictionary format representation of the Experiment. Returns: dict: The dictionary form of the PulseQobjExperiment.<|endoftext|>
44fd8445029b004386307d591efc032e571670e9d92efcf230d365f59808dd8a
@classmethod def from_dict(cls, data): 'Create a new PulseQobjExperiment object from a dictionary.\n\n Args:\n data (dict): A dictionary for the experiment config\n\n Returns:\n PulseQobjExperiment: The object from the input dictionary.\n ' config = None if ('config' in data): config = PulseQobjExperimentConfig.from_dict(data.pop('config')) header = None if ('header' in data): header = QobjExperimentHeader.from_dict(data.pop('header')) instructions = None if ('instructions' in data): instructions = [PulseQobjInstruction.from_dict(inst) for inst in data.pop('instructions')] return cls(instructions, config, header)
Create a new PulseQobjExperiment object from a dictionary. Args: data (dict): A dictionary for the experiment config Returns: PulseQobjExperiment: The object from the input dictionary.
qiskit/qobj/pulse_qobj.py
from_dict
areeq-hasan/qiskit-terra
11
python
@classmethod def from_dict(cls, data): 'Create a new PulseQobjExperiment object from a dictionary.\n\n Args:\n data (dict): A dictionary for the experiment config\n\n Returns:\n PulseQobjExperiment: The object from the input dictionary.\n ' config = None if ('config' in data): config = PulseQobjExperimentConfig.from_dict(data.pop('config')) header = None if ('header' in data): header = QobjExperimentHeader.from_dict(data.pop('header')) instructions = None if ('instructions' in data): instructions = [PulseQobjInstruction.from_dict(inst) for inst in data.pop('instructions')] return cls(instructions, config, header)
@classmethod def from_dict(cls, data): 'Create a new PulseQobjExperiment object from a dictionary.\n\n Args:\n data (dict): A dictionary for the experiment config\n\n Returns:\n PulseQobjExperiment: The object from the input dictionary.\n ' config = None if ('config' in data): config = PulseQobjExperimentConfig.from_dict(data.pop('config')) header = None if ('header' in data): header = QobjExperimentHeader.from_dict(data.pop('header')) instructions = None if ('instructions' in data): instructions = [PulseQobjInstruction.from_dict(inst) for inst in data.pop('instructions')] return cls(instructions, config, header)<|docstring|>Create a new PulseQobjExperiment object from a dictionary. Args: data (dict): A dictionary for the experiment config Returns: PulseQobjExperiment: The object from the input dictionary.<|endoftext|>
103cb0b9282a82065c1941f6c5305f5766b691b5a23b23fcf1df5a3f02c7018b
def __init__(self, qubit_lo_freq=None, meas_lo_freq=None, **kwargs): "Instantiate a PulseQobjExperimentConfig object.\n\n Args:\n qubit_lo_freq (list): List of frequencies (as floats) for the qubit\n driver LO's in GHz.\n meas_lo_freq (list): List of frequencies (as floats) for the'\n measurement driver LO's in GHz.\n kwargs: Additional free form key value fields to add to the\n configuration\n " if (qubit_lo_freq is not None): self.qubit_lo_freq = qubit_lo_freq if (meas_lo_freq is not None): self.meas_lo_freq = meas_lo_freq if kwargs: self.__dict__.update(kwargs)
Instantiate a PulseQobjExperimentConfig object. Args: qubit_lo_freq (list): List of frequencies (as floats) for the qubit driver LO's in GHz. meas_lo_freq (list): List of frequencies (as floats) for the' measurement driver LO's in GHz. kwargs: Additional free form key value fields to add to the configuration
qiskit/qobj/pulse_qobj.py
__init__
areeq-hasan/qiskit-terra
11
python
def __init__(self, qubit_lo_freq=None, meas_lo_freq=None, **kwargs): "Instantiate a PulseQobjExperimentConfig object.\n\n Args:\n qubit_lo_freq (list): List of frequencies (as floats) for the qubit\n driver LO's in GHz.\n meas_lo_freq (list): List of frequencies (as floats) for the'\n measurement driver LO's in GHz.\n kwargs: Additional free form key value fields to add to the\n configuration\n " if (qubit_lo_freq is not None): self.qubit_lo_freq = qubit_lo_freq if (meas_lo_freq is not None): self.meas_lo_freq = meas_lo_freq if kwargs: self.__dict__.update(kwargs)
def __init__(self, qubit_lo_freq=None, meas_lo_freq=None, **kwargs): "Instantiate a PulseQobjExperimentConfig object.\n\n Args:\n qubit_lo_freq (list): List of frequencies (as floats) for the qubit\n driver LO's in GHz.\n meas_lo_freq (list): List of frequencies (as floats) for the'\n measurement driver LO's in GHz.\n kwargs: Additional free form key value fields to add to the\n configuration\n " if (qubit_lo_freq is not None): self.qubit_lo_freq = qubit_lo_freq if (meas_lo_freq is not None): self.meas_lo_freq = meas_lo_freq if kwargs: self.__dict__.update(kwargs)<|docstring|>Instantiate a PulseQobjExperimentConfig object. Args: qubit_lo_freq (list): List of frequencies (as floats) for the qubit driver LO's in GHz. meas_lo_freq (list): List of frequencies (as floats) for the' measurement driver LO's in GHz. kwargs: Additional free form key value fields to add to the configuration<|endoftext|>
2af24ec659c3d21bc85aed21d8550c812a6e7187316cf6a8f7cd63c7b9b55f4d
def __init__(self, name, samples): 'Instantiate a pulse library item.\n\n Args:\n name (str): A name for the pulse.\n samples (list[complex]): A list of complex values defining pulse\n shape.\n ' self.name = name if isinstance(samples[0], list): self.samples = numpy.array([complex(sample[0], sample[1]) for sample in samples]) else: self.samples = samples
Instantiate a pulse library item. Args: name (str): A name for the pulse. samples (list[complex]): A list of complex values defining pulse shape.
qiskit/qobj/pulse_qobj.py
__init__
areeq-hasan/qiskit-terra
11
python
def __init__(self, name, samples): 'Instantiate a pulse library item.\n\n Args:\n name (str): A name for the pulse.\n samples (list[complex]): A list of complex values defining pulse\n shape.\n ' self.name = name if isinstance(samples[0], list): self.samples = numpy.array([complex(sample[0], sample[1]) for sample in samples]) else: self.samples = samples
def __init__(self, name, samples): 'Instantiate a pulse library item.\n\n Args:\n name (str): A name for the pulse.\n samples (list[complex]): A list of complex values defining pulse\n shape.\n ' self.name = name if isinstance(samples[0], list): self.samples = numpy.array([complex(sample[0], sample[1]) for sample in samples]) else: self.samples = samples<|docstring|>Instantiate a pulse library item. Args: name (str): A name for the pulse. samples (list[complex]): A list of complex values defining pulse shape.<|endoftext|>
87abad27a692ee1f97c5e603a0094a13a2cc2f2c65ff0bf62a5d446b255b294c
def to_dict(self): 'Return a dictionary format representation of the pulse library item.\n\n Returns:\n dict: The dictionary form of the PulseLibraryItem.\n ' return {'name': self.name, 'samples': self.samples}
Return a dictionary format representation of the pulse library item. Returns: dict: The dictionary form of the PulseLibraryItem.
qiskit/qobj/pulse_qobj.py
to_dict
areeq-hasan/qiskit-terra
11
python
def to_dict(self): 'Return a dictionary format representation of the pulse library item.\n\n Returns:\n dict: The dictionary form of the PulseLibraryItem.\n ' return {'name': self.name, 'samples': self.samples}
def to_dict(self): 'Return a dictionary format representation of the pulse library item.\n\n Returns:\n dict: The dictionary form of the PulseLibraryItem.\n ' return {'name': self.name, 'samples': self.samples}<|docstring|>Return a dictionary format representation of the pulse library item. Returns: dict: The dictionary form of the PulseLibraryItem.<|endoftext|>