hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f70cae23221e6762c36e4c65e0fba61ecbd51fa9
| 7,705
|
py
|
Python
|
magenta/models/arbitrary_image_stylization/arbitrary_image_stylization_distill_mobilenet.py
|
zhangxu999/magenta
|
60b85828cc69cff855fabce78b51ddaddc873a5d
|
[
"Apache-2.0"
] | 17
|
2017-06-29T18:32:25.000Z
|
2021-10-03T12:30:49.000Z
|
magenta/models/arbitrary_image_stylization/arbitrary_image_stylization_distill_mobilenet.py
|
zhangxu999/magenta
|
60b85828cc69cff855fabce78b51ddaddc873a5d
|
[
"Apache-2.0"
] | 12
|
2021-02-15T07:42:08.000Z
|
2022-02-08T02:05:27.000Z
|
magenta/models/arbitrary_image_stylization/arbitrary_image_stylization_distill_mobilenet.py
|
zhangxu999/magenta
|
60b85828cc69cff855fabce78b51ddaddc873a5d
|
[
"Apache-2.0"
] | 6
|
2017-07-06T06:12:36.000Z
|
2021-07-06T13:07:32.000Z
|
# Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distills a trained style prediction network using a MobileNetV2.
"""
import ast
import os
from magenta.models.arbitrary_image_stylization import arbitrary_image_stylization_build_mobilenet_model as build_mobilenet_model
from magenta.models.arbitrary_image_stylization import arbitrary_image_stylization_build_model as build_model
from magenta.models.image_stylization import image_utils
import tensorflow.compat.v1 as tf
import tf_slim as slim
DEFAULT_CONTENT_WEIGHTS = '{"vgg_16/conv3": 1}'
DEFAULT_STYLE_WEIGHTS = ('{"vgg_16/conv1": 0.5e-3, "vgg_16/conv2": 0.5e-3,'
' "vgg_16/conv3": 0.5e-3, "vgg_16/conv4": 0.5e-3}')
flags = tf.app.flags
flags.DEFINE_float('clip_gradient_norm', 0, 'Clip gradients to this norm')
flags.DEFINE_float('learning_rate', 1e-5, 'Learning rate')
flags.DEFINE_float('total_variation_weight', 1e4, 'Total variation weight')
flags.DEFINE_string('content_weights', DEFAULT_CONTENT_WEIGHTS,
'Content weights')
flags.DEFINE_string('style_weights', DEFAULT_STYLE_WEIGHTS, 'Style weights')
flags.DEFINE_integer('batch_size', 8, 'Batch size.')
flags.DEFINE_integer('image_size', 256, 'Image size.')
flags.DEFINE_boolean('random_style_image_size', True,
'Whether to resize the style images '
'to a random size or not.')
flags.DEFINE_boolean(
'augment_style_images', True,
'Whether to augment style images or not.')
flags.DEFINE_boolean('center_crop', False,
'Whether to center crop the style images.')
flags.DEFINE_integer('ps_tasks', 0,
'Number of parameter servers. If 0, parameters '
'are handled locally by the worker.')
flags.DEFINE_integer('save_summaries_secs', 15,
'Frequency at which summaries are saved, in seconds.')
flags.DEFINE_integer('save_interval_secs', 15,
'Frequency at which the model is saved, in seconds.')
flags.DEFINE_integer('task', 0, 'Task ID. Used when training with multiple '
'workers to identify each worker.')
flags.DEFINE_integer('train_steps', 8000000, 'Number of training steps.')
flags.DEFINE_string('master', '', 'BNS name of the TensorFlow master to use.')
flags.DEFINE_string('style_dataset_file', None, 'Style dataset file.')
flags.DEFINE_string('train_dir', None,
'Directory for checkpoints and summaries.')
flags.DEFINE_string('initial_checkpoint', None,
'Path to the pre-trained arbitrary_image_stylization '
'checkpoint')
flags.DEFINE_string('mobilenet_checkpoint', 'mobilenet_v2_1.0_224.ckpt',
'Path to the pre-trained mobilenet checkpoint')
flags.DEFINE_boolean('use_true_loss', False,
'Add true style loss term based on VGG.')
flags.DEFINE_float('true_loss_weight', 1e-9,
'Scale factor for real loss')
FLAGS = flags.FLAGS
def main(unused_argv=None):
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
# Forces all input processing onto CPU in order to reserve the GPU for the
# forward inference and back-propagation.
device = '/cpu:0' if not FLAGS.ps_tasks else '/job:worker/cpu:0'
with tf.device(
tf.train.replica_device_setter(FLAGS.ps_tasks, worker_device=device)):
# Load content images
content_inputs_, _ = image_utils.imagenet_inputs(FLAGS.batch_size,
FLAGS.image_size)
# Loads style images.
[style_inputs_, _,
style_inputs_orig_] = image_utils.arbitrary_style_image_inputs(
FLAGS.style_dataset_file,
batch_size=FLAGS.batch_size,
image_size=FLAGS.image_size,
shuffle=True,
center_crop=FLAGS.center_crop,
augment_style_images=FLAGS.augment_style_images,
random_style_image_size=FLAGS.random_style_image_size)
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
# Process style and content weight flags.
content_weights = ast.literal_eval(FLAGS.content_weights)
style_weights = ast.literal_eval(FLAGS.style_weights)
# Define the model
stylized_images, \
true_loss, \
_, \
bottleneck_feat = build_mobilenet_model.build_mobilenet_model(
content_inputs_,
style_inputs_,
mobilenet_trainable=True,
style_params_trainable=False,
style_prediction_bottleneck=100,
adds_losses=True,
content_weights=content_weights,
style_weights=style_weights,
total_variation_weight=FLAGS.total_variation_weight,
)
_, inception_bottleneck_feat = build_model.style_prediction(
style_inputs_,
[],
[],
is_training=False,
trainable=False,
inception_end_point='Mixed_6e',
style_prediction_bottleneck=100,
reuse=None,
)
print('PRINTING TRAINABLE VARIABLES')
for x in tf.trainable_variables():
print(x)
mse_loss = tf.losses.mean_squared_error(
inception_bottleneck_feat, bottleneck_feat)
total_loss = mse_loss
if FLAGS.use_true_loss:
true_loss = FLAGS.true_loss_weight*true_loss
total_loss += true_loss
if FLAGS.use_true_loss:
tf.summary.scalar('mse', mse_loss)
tf.summary.scalar('true_loss', true_loss)
tf.summary.scalar('total_loss', total_loss)
tf.summary.image('image/0_content_inputs', content_inputs_, 3)
tf.summary.image('image/1_style_inputs_orig', style_inputs_orig_, 3)
tf.summary.image('image/2_style_inputs_aug', style_inputs_, 3)
tf.summary.image('image/3_stylized_images', stylized_images, 3)
mobilenet_variables_to_restore = slim.get_variables_to_restore(
include=['MobilenetV2'],
exclude=['global_step'])
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
clip_gradient_norm=FLAGS.clip_gradient_norm,
summarize_gradients=False
)
init_fn = slim.assign_from_checkpoint_fn(
FLAGS.initial_checkpoint,
slim.get_variables_to_restore(
exclude=['MobilenetV2', 'mobilenet_conv', 'global_step']))
init_pretrained_mobilenet = slim.assign_from_checkpoint_fn(
FLAGS.mobilenet_checkpoint, mobilenet_variables_to_restore)
def init_sub_networks(session):
init_fn(session)
init_pretrained_mobilenet(session)
slim.learning.train(
train_op=train_op,
logdir=os.path.expanduser(FLAGS.train_dir),
master=FLAGS.master,
is_chief=FLAGS.task == 0,
number_of_steps=FLAGS.train_steps,
init_fn=init_sub_networks,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs)
def console_entry_point():
tf.disable_v2_behavior()
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| 40.552632
| 129
| 0.688254
|
import ast
import os
from magenta.models.arbitrary_image_stylization import arbitrary_image_stylization_build_mobilenet_model as build_mobilenet_model
from magenta.models.arbitrary_image_stylization import arbitrary_image_stylization_build_model as build_model
from magenta.models.image_stylization import image_utils
import tensorflow.compat.v1 as tf
import tf_slim as slim
DEFAULT_CONTENT_WEIGHTS = '{"vgg_16/conv3": 1}'
DEFAULT_STYLE_WEIGHTS = ('{"vgg_16/conv1": 0.5e-3, "vgg_16/conv2": 0.5e-3,'
' "vgg_16/conv3": 0.5e-3, "vgg_16/conv4": 0.5e-3}')
flags = tf.app.flags
flags.DEFINE_float('clip_gradient_norm', 0, 'Clip gradients to this norm')
flags.DEFINE_float('learning_rate', 1e-5, 'Learning rate')
flags.DEFINE_float('total_variation_weight', 1e4, 'Total variation weight')
flags.DEFINE_string('content_weights', DEFAULT_CONTENT_WEIGHTS,
'Content weights')
flags.DEFINE_string('style_weights', DEFAULT_STYLE_WEIGHTS, 'Style weights')
flags.DEFINE_integer('batch_size', 8, 'Batch size.')
flags.DEFINE_integer('image_size', 256, 'Image size.')
flags.DEFINE_boolean('random_style_image_size', True,
'Whether to resize the style images '
'to a random size or not.')
flags.DEFINE_boolean(
'augment_style_images', True,
'Whether to augment style images or not.')
flags.DEFINE_boolean('center_crop', False,
'Whether to center crop the style images.')
flags.DEFINE_integer('ps_tasks', 0,
'Number of parameter servers. If 0, parameters '
'are handled locally by the worker.')
flags.DEFINE_integer('save_summaries_secs', 15,
'Frequency at which summaries are saved, in seconds.')
flags.DEFINE_integer('save_interval_secs', 15,
'Frequency at which the model is saved, in seconds.')
flags.DEFINE_integer('task', 0, 'Task ID. Used when training with multiple '
'workers to identify each worker.')
flags.DEFINE_integer('train_steps', 8000000, 'Number of training steps.')
flags.DEFINE_string('master', '', 'BNS name of the TensorFlow master to use.')
flags.DEFINE_string('style_dataset_file', None, 'Style dataset file.')
flags.DEFINE_string('train_dir', None,
'Directory for checkpoints and summaries.')
flags.DEFINE_string('initial_checkpoint', None,
'Path to the pre-trained arbitrary_image_stylization '
'checkpoint')
flags.DEFINE_string('mobilenet_checkpoint', 'mobilenet_v2_1.0_224.ckpt',
'Path to the pre-trained mobilenet checkpoint')
flags.DEFINE_boolean('use_true_loss', False,
'Add true style loss term based on VGG.')
flags.DEFINE_float('true_loss_weight', 1e-9,
'Scale factor for real loss')
FLAGS = flags.FLAGS
def main(unused_argv=None):
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
device = '/cpu:0' if not FLAGS.ps_tasks else '/job:worker/cpu:0'
with tf.device(
tf.train.replica_device_setter(FLAGS.ps_tasks, worker_device=device)):
content_inputs_, _ = image_utils.imagenet_inputs(FLAGS.batch_size,
FLAGS.image_size)
[style_inputs_, _,
style_inputs_orig_] = image_utils.arbitrary_style_image_inputs(
FLAGS.style_dataset_file,
batch_size=FLAGS.batch_size,
image_size=FLAGS.image_size,
shuffle=True,
center_crop=FLAGS.center_crop,
augment_style_images=FLAGS.augment_style_images,
random_style_image_size=FLAGS.random_style_image_size)
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
content_weights = ast.literal_eval(FLAGS.content_weights)
style_weights = ast.literal_eval(FLAGS.style_weights)
stylized_images, \
true_loss, \
_, \
bottleneck_feat = build_mobilenet_model.build_mobilenet_model(
content_inputs_,
style_inputs_,
mobilenet_trainable=True,
style_params_trainable=False,
style_prediction_bottleneck=100,
adds_losses=True,
content_weights=content_weights,
style_weights=style_weights,
total_variation_weight=FLAGS.total_variation_weight,
)
_, inception_bottleneck_feat = build_model.style_prediction(
style_inputs_,
[],
[],
is_training=False,
trainable=False,
inception_end_point='Mixed_6e',
style_prediction_bottleneck=100,
reuse=None,
)
print('PRINTING TRAINABLE VARIABLES')
for x in tf.trainable_variables():
print(x)
mse_loss = tf.losses.mean_squared_error(
inception_bottleneck_feat, bottleneck_feat)
total_loss = mse_loss
if FLAGS.use_true_loss:
true_loss = FLAGS.true_loss_weight*true_loss
total_loss += true_loss
if FLAGS.use_true_loss:
tf.summary.scalar('mse', mse_loss)
tf.summary.scalar('true_loss', true_loss)
tf.summary.scalar('total_loss', total_loss)
tf.summary.image('image/0_content_inputs', content_inputs_, 3)
tf.summary.image('image/1_style_inputs_orig', style_inputs_orig_, 3)
tf.summary.image('image/2_style_inputs_aug', style_inputs_, 3)
tf.summary.image('image/3_stylized_images', stylized_images, 3)
mobilenet_variables_to_restore = slim.get_variables_to_restore(
include=['MobilenetV2'],
exclude=['global_step'])
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
clip_gradient_norm=FLAGS.clip_gradient_norm,
summarize_gradients=False
)
init_fn = slim.assign_from_checkpoint_fn(
FLAGS.initial_checkpoint,
slim.get_variables_to_restore(
exclude=['MobilenetV2', 'mobilenet_conv', 'global_step']))
init_pretrained_mobilenet = slim.assign_from_checkpoint_fn(
FLAGS.mobilenet_checkpoint, mobilenet_variables_to_restore)
def init_sub_networks(session):
init_fn(session)
init_pretrained_mobilenet(session)
slim.learning.train(
train_op=train_op,
logdir=os.path.expanduser(FLAGS.train_dir),
master=FLAGS.master,
is_chief=FLAGS.task == 0,
number_of_steps=FLAGS.train_steps,
init_fn=init_sub_networks,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs)
def console_entry_point():
tf.disable_v2_behavior()
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| true
| true
|
f70caeff660f0ae67d2791ca0f3bf2000ed3e058
| 3,092
|
py
|
Python
|
master/llvmpasses.py
|
ianshmean/julia-buildbot
|
76f9c423d0c201b98707020d09045c9a4af4aac1
|
[
"MIT"
] | null | null | null |
master/llvmpasses.py
|
ianshmean/julia-buildbot
|
76f9c423d0c201b98707020d09045c9a4af4aac1
|
[
"MIT"
] | null | null | null |
master/llvmpasses.py
|
ianshmean/julia-buildbot
|
76f9c423d0c201b98707020d09045c9a4af4aac1
|
[
"MIT"
] | null | null | null |
julia_llvmpasses_factory = util.BuildFactory()
julia_llvmpasses_factory.useProgress = True
julia_llvmpasses_factory.addSteps([
# Fetch first (allowing failure if no existing clone is present)
steps.ShellCommand(
name="git fetch",
command=["git", "fetch", "--tags", "--all", "--force"],
flunkOnFailure=False
),
# Clone julia
steps.Git(
name="Julia checkout",
repourl=util.Property('repository', default='git://github.com/JuliaLang/julia.git'),
mode='full',
method='fresh',
submodules=True,
clobberOnFailure=True,
progress=True,
retryFetch=True,
getDescription={'--tags': True},
),
# Make debug build
steps.ShellCommand(
name="make release",
command=["/bin/sh", "-c", util.Interpolate("%(prop:make_cmd)s -j%(prop:nthreads)s %(prop:flags)s %(prop:extra_make_flags)s release")],
haltOnFailure = True,
# Fail out if 60 minutes have gone by with nothing printed to stdout
timeout=60*60,
# Kill everything if the overall job has taken more than 2 hours
maxTime=60*60*2,
# Give the process 10 seconds to print out the current backtraces when being killed
sigtermTime=10,
),
steps.ShellCommand(
name="make test/llvmpasses",
command=["/bin/sh", "-c", util.Interpolate("%(prop:make_cmd)s -C test/llvmpasses -j%(prop:nthreads)s %(prop:flags)s %(prop:extra_make_flags)s")],
haltOnFailure = True,
# Fail out if 60 minutes have gone by with nothing printed to stdout
timeout=60*60,
# Kill everything if the overall job has taken more than 2 hours
maxTime=60*60*2,
# Give the process 10 seconds to print out the current backtraces when being killed
sigtermTime=10,
),
])
c['schedulers'].append(schedulers.AnyBranchScheduler(
name="Julia test llvmpasses",
change_filter=util.ChangeFilter(filter_fn=julia_ci_filter),
builderNames=["llvmpasses_linux64"],
treeStableTimer=1,
))
# Add workers for these jobs
c['builders'].append(util.BuilderConfig(
name="llvmpasses_linux64",
workernames=builder_mapping["linux64"],
collapseRequests=False,
tags=["Packaging"],
factory=julia_llvmpasses_factory,
))
# Add a scheduler for building release candidates/triggering builds manually
c['schedulers'].append(schedulers.ForceScheduler(
name="llvmpasses",
label="Force llvmpasses",
builderNames=["llvmpasses_linux64"],
reason=util.FixedParameter(name="reason", default=""),
codebases=[
util.CodebaseParameter(
"",
name="",
branch=util.FixedParameter(name="branch", default=""),
repository=util.FixedParameter(name="repository", default=""),
project=util.FixedParameter(name="project", default="Packaging"),
)
],
properties=[
util.StringParameter(
name="extra_make_flags",
label="Extra Make Flags",
size=30,
default="",
),
],
))
| 34.355556
| 153
| 0.639715
|
julia_llvmpasses_factory = util.BuildFactory()
julia_llvmpasses_factory.useProgress = True
julia_llvmpasses_factory.addSteps([
steps.ShellCommand(
name="git fetch",
command=["git", "fetch", "--tags", "--all", "--force"],
flunkOnFailure=False
),
steps.Git(
name="Julia checkout",
repourl=util.Property('repository', default='git://github.com/JuliaLang/julia.git'),
mode='full',
method='fresh',
submodules=True,
clobberOnFailure=True,
progress=True,
retryFetch=True,
getDescription={'--tags': True},
),
steps.ShellCommand(
name="make release",
command=["/bin/sh", "-c", util.Interpolate("%(prop:make_cmd)s -j%(prop:nthreads)s %(prop:flags)s %(prop:extra_make_flags)s release")],
haltOnFailure = True,
timeout=60*60,
maxTime=60*60*2,
sigtermTime=10,
),
steps.ShellCommand(
name="make test/llvmpasses",
command=["/bin/sh", "-c", util.Interpolate("%(prop:make_cmd)s -C test/llvmpasses -j%(prop:nthreads)s %(prop:flags)s %(prop:extra_make_flags)s")],
haltOnFailure = True,
timeout=60*60,
maxTime=60*60*2,
sigtermTime=10,
),
])
c['schedulers'].append(schedulers.AnyBranchScheduler(
name="Julia test llvmpasses",
change_filter=util.ChangeFilter(filter_fn=julia_ci_filter),
builderNames=["llvmpasses_linux64"],
treeStableTimer=1,
))
c['builders'].append(util.BuilderConfig(
name="llvmpasses_linux64",
workernames=builder_mapping["linux64"],
collapseRequests=False,
tags=["Packaging"],
factory=julia_llvmpasses_factory,
))
c['schedulers'].append(schedulers.ForceScheduler(
name="llvmpasses",
label="Force llvmpasses",
builderNames=["llvmpasses_linux64"],
reason=util.FixedParameter(name="reason", default=""),
codebases=[
util.CodebaseParameter(
"",
name="",
branch=util.FixedParameter(name="branch", default=""),
repository=util.FixedParameter(name="repository", default=""),
project=util.FixedParameter(name="project", default="Packaging"),
)
],
properties=[
util.StringParameter(
name="extra_make_flags",
label="Extra Make Flags",
size=30,
default="",
),
],
))
| true
| true
|
f70caf336c711737e2bc6b173a4fdab41dc8a3b0
| 3,267
|
py
|
Python
|
experiments/ca/ext.py
|
UKPLab/curriculum-annotation
|
1d6ca490ea180019bb09d1d3818874f4321d4d0f
|
[
"Apache-2.0"
] | null | null | null |
experiments/ca/ext.py
|
UKPLab/curriculum-annotation
|
1d6ca490ea180019bb09d1d3818874f4321d4d0f
|
[
"Apache-2.0"
] | null | null | null |
experiments/ca/ext.py
|
UKPLab/curriculum-annotation
|
1d6ca490ea180019bb09d1d3818874f4321d4d0f
|
[
"Apache-2.0"
] | null | null | null |
from typing import List, Tuple
import pandas as pd
@pd.api.extensions.register_dataframe_accessor("tag")
class CaTaggingAccessor:
def __init__(self, df: pd.DataFrame):
self._df = df
def group_by_sentences(self):
yield from (x[1] for x in self._df.groupby("sentence_id"))
def group_by_documents(self):
yield from (x[1] for x in self._df.groupby("document_id"))
def number_of_sentences(self):
return len(self._df.groupby("sentence_id"))
def number_of_documents(self):
return len(self._df.groupby("document_id"))
def split_x_y_sentencewise(self) -> Tuple[List[List[str]], List[List[str]]]:
X = []
y = []
for sent in self._df.tag.group_by_sentences():
words = list(sent["word"])
labels = list(sent["label"])
X.append(words)
y.append(labels)
return X, y
def get_times_per_document(self) -> List[int]:
t = []
# Right now, we assume that the time per token is the same
# for a sentence. This might be an invalid assumption
for df in self._df.tag.group_by_sentences():
t.append(df["t"].values[0])
return t
def group_by_documents_x_y(self) -> Tuple[List[List[List[str]]], List[List[List[str]]]]:
"""Returns a list of documents that each contain a list of sentences and
their respective labels grouped the same way.
"""
X = []
y = []
for doc in self._df.tag.group_by_documents():
X_doc = []
y_doc = []
for sent in doc.tag.group_by_sentences():
words = list(sent["word"])
labels = list(sent["label"])
X_doc.append(words)
y_doc.append(labels)
X.append(X_doc)
y.append(y_doc)
return X, y
def group_by_sentences_x_y(self) -> Tuple[List[List[str]], List[List[str]]]:
"""Returns a list of sentences and their respective labels grouped the same way."""
X = []
y = []
for sent in self._df.tag.group_by_sentences():
words = list(sent["word"])
labels = list(sent["label"])
assert len(words) == len(labels)
X.append(words)
y.append(labels)
return X, y
@pd.api.extensions.register_dataframe_accessor("dclass")
class CaDocumentClassificationAccessor:
def __init__(self, df: pd.DataFrame):
self._df = df
def split_x_y(self) -> Tuple[List[str], List[str]]:
X = self._df["sentence"]
y = self._df["label"]
return X.values.tolist(), y.values.tolist()
def get_time_per_sentence(self) -> List[int]:
return self._df["t"].values.tolist()
@pd.api.extensions.register_dataframe_accessor("pair")
class CaPairAccessor:
def __init__(self, df: pd.DataFrame):
self._df = df
def split_args_y(self) -> Tuple[List[str], List[str], List[str]]:
args1 = self._df["arg1"].values.tolist()
args2 = self._df["arg2"].values.tolist()
label = self._df["label"].values.tolist()
return args1, args2, label
def get_time_per_sentence(self) -> List[int]:
return self._df["t"].values.tolist()
| 29.169643
| 92
| 0.590756
|
from typing import List, Tuple
import pandas as pd
@pd.api.extensions.register_dataframe_accessor("tag")
class CaTaggingAccessor:
def __init__(self, df: pd.DataFrame):
self._df = df
def group_by_sentences(self):
yield from (x[1] for x in self._df.groupby("sentence_id"))
def group_by_documents(self):
yield from (x[1] for x in self._df.groupby("document_id"))
def number_of_sentences(self):
return len(self._df.groupby("sentence_id"))
def number_of_documents(self):
return len(self._df.groupby("document_id"))
def split_x_y_sentencewise(self) -> Tuple[List[List[str]], List[List[str]]]:
X = []
y = []
for sent in self._df.tag.group_by_sentences():
words = list(sent["word"])
labels = list(sent["label"])
X.append(words)
y.append(labels)
return X, y
def get_times_per_document(self) -> List[int]:
t = []
for df in self._df.tag.group_by_sentences():
t.append(df["t"].values[0])
return t
def group_by_documents_x_y(self) -> Tuple[List[List[List[str]]], List[List[List[str]]]]:
X = []
y = []
for doc in self._df.tag.group_by_documents():
X_doc = []
y_doc = []
for sent in doc.tag.group_by_sentences():
words = list(sent["word"])
labels = list(sent["label"])
X_doc.append(words)
y_doc.append(labels)
X.append(X_doc)
y.append(y_doc)
return X, y
def group_by_sentences_x_y(self) -> Tuple[List[List[str]], List[List[str]]]:
X = []
y = []
for sent in self._df.tag.group_by_sentences():
words = list(sent["word"])
labels = list(sent["label"])
assert len(words) == len(labels)
X.append(words)
y.append(labels)
return X, y
@pd.api.extensions.register_dataframe_accessor("dclass")
class CaDocumentClassificationAccessor:
def __init__(self, df: pd.DataFrame):
self._df = df
def split_x_y(self) -> Tuple[List[str], List[str]]:
X = self._df["sentence"]
y = self._df["label"]
return X.values.tolist(), y.values.tolist()
def get_time_per_sentence(self) -> List[int]:
return self._df["t"].values.tolist()
@pd.api.extensions.register_dataframe_accessor("pair")
class CaPairAccessor:
def __init__(self, df: pd.DataFrame):
self._df = df
def split_args_y(self) -> Tuple[List[str], List[str], List[str]]:
args1 = self._df["arg1"].values.tolist()
args2 = self._df["arg2"].values.tolist()
label = self._df["label"].values.tolist()
return args1, args2, label
def get_time_per_sentence(self) -> List[int]:
return self._df["t"].values.tolist()
| true
| true
|
f70cafe61bd60c467537a3027e0789791601ad09
| 8,937
|
py
|
Python
|
forte/data/ontology/code_generation_util.py
|
tcl326/forte
|
d0d7b8b97da5e1d507dfa7cd4ec51d96067770b8
|
[
"Apache-2.0"
] | null | null | null |
forte/data/ontology/code_generation_util.py
|
tcl326/forte
|
d0d7b8b97da5e1d507dfa7cd4ec51d96067770b8
|
[
"Apache-2.0"
] | 13
|
2019-12-01T04:51:38.000Z
|
2020-02-11T23:55:11.000Z
|
forte/data/ontology/code_generation_util.py
|
tcl326/forte
|
d0d7b8b97da5e1d507dfa7cd4ec51d96067770b8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import OrderedDict
from typing import Optional, Any, List
class Config:
indent: int = 4
line_break: str = os.linesep
def indent(level: int) -> str:
return ' ' * Config.indent * level
def indent_line(line: str, level: int) -> str:
return f"{indent(level)}{line}" if line else ''
def indent_code(code_lines: List[str], level: int = 0) -> str:
lines = []
for code in code_lines:
lines.extend(code.split(Config.line_break) if code is not None else [])
return Config.line_break.join([indent_line(line, level) for line in lines])
def empty_lines(num: int):
return ''.join([Config.line_break] * num)
class Item:
def __init__(self, name: str, description: Optional[str]):
self.name: str = name
self.description: Optional[str] = description
def to_description(self, level: int) -> Optional[str]:
if self.description is not None:
return indent_code([self.description], level)
return None
def to_code(self, level: int) -> str:
raise NotImplementedError
class Property(Item):
def __init__(self,
name: str,
type_str: str,
description: Optional[str] = None,
default: Any = None):
super().__init__(name, description)
self.type_str = type_str
self.default = default
def to_getter_setter_code(self, level) -> str:
"""
Returns: getter and setter functions generated by a property.
"""
name = self.name
lines = [("@property", 0),
(f"def {name}(self):", 0),
(f"return self._{name}", 1),
(empty_lines(0), 0),
(f"def set_{name}(self, {name}: {self.to_code(0)}):", 0),
(f"self.set_fields(_{name}={self.to_field_value()})", 1),
(empty_lines(0), 0)]
return indent_code([indent_line(*line) for line in lines], level)
def to_init_code(self, level: int) -> str:
return indent_line(f"self._{self.name}: {self.to_code(0)} = "
f"{repr(self.default)}", level)
def to_description(self, level: int) -> Optional[str]:
if self.description is not None and self.description.strip() != '':
type_str = f'{self.to_code(0)}'
type_str = f' ({type_str})' if type_str.strip() != '' else type_str
return indent_line(f"{self.name}{type_str}: "
f"{self.description}", level)
return None
def to_field_value(self):
raise NotImplementedError
class ClassAttributeItem(Property):
def to_code(self, level: int = 0) -> str:
return self.type_str
def to_init_code(self, level: int) -> str:
type_code = f'{self.to_code(0)}'
type_ = f': {type_code}' if type_code.strip() != '' else ''
return indent_line(f"{self.name}{type_} = {self.default}", level)
def to_field_value(self):
pass
class BasicItem(Property):
TYPES = {'int', 'float', 'str', 'bool'}
def to_code(self, level: int = 0) -> str:
return f"typing.Optional[{self.type_str}]"
def to_field_value(self):
if self.type_str in self.TYPES:
return self.name
return f"{self.name}.tid"
class CompositeItem(Property):
TYPES = {'List'}
def __init__(self,
name: str,
type_str: str,
item_type: str,
description: Optional[str] = None,
default: Any = None):
super().__init__(name, type_str, description, default)
self.item_type = item_type
def to_code(self, level: int = 0) -> str:
# TODO: Assumes only one type of elements are allowed in the list,
# allow multiple types
# items = list(OrderedDict([(item, None)
# for item in self.items]).keys())
# item_type_str = f"{', '.join(self.item_type)}"
# if len(self.items) > 1:
# item_type_str = f"typing.Union[{item_type_str}]"
return f"typing.Optional[{self.type_str}[{self.item_type}]]"
def to_field_value(self):
item_value_str = BasicItem('item', self.item_type).to_field_value()
return f"[{item_value_str} for item in {self.name}]"
class DefinitionItem(Item):
def __init__(self, name: str,
class_type: str,
init_args: Optional[str] = None,
properties: Optional[List[Property]] = None,
class_attributes: Optional[List[Property]] = None,
description: Optional[str] = None):
super().__init__(name, description)
self.class_type = class_type
self.properties: List[Property] = \
[] if properties is None else properties
self.class_attributes = [] if class_attributes is None \
else class_attributes
self.description = description if description else None
self.init_args = init_args if init_args is not None else ''
self.init_args = self.init_args.replace('=', ' = ')
def to_init_code(self, level: int) -> str:
return indent_line(f"def __init__(self, {self.init_args}):", level)
def to_code(self, level: int) -> str:
super_args = ', '.join([item.split(':')[0].strip()
for item in self.init_args.split(',')])
raw_desc = self.to_description(1)
desc: str = '' if raw_desc is None else raw_desc
lines = [
empty_lines(1),
f"__all__.extend('{self.name}')",
empty_lines(1),
f"class {self.name}({self.class_type}):",
]
lines += [desc] if desc.strip() else []
lines += [item.to_init_code(1) for item in self.class_attributes]
lines += [empty_lines(0)]
lines += [self.to_init_code(1),
indent_line(f"super().__init__({super_args})", 2)]
lines += [item.to_init_code(2) for item in self.properties]
lines += [empty_lines(0)]
lines += [item.to_getter_setter_code(1) for item in self.properties]
return indent_code(lines, level)
@staticmethod
def to_item_descs(items, title):
item_descs = [item.to_description(0) for item in items]
item_descs = [item for item in item_descs if item is not None]
if len(item_descs) > 0:
item_descs = [indent_line(title, 1)] + \
[indent_line(desc, 2) for desc in item_descs]
return item_descs
def to_description(self, level: int) -> Optional[str]:
class_desc = [] if self.description is None else [self.description]
item_descs = self.to_item_descs(self.properties, 'Args:')
att_descs = self.to_item_descs(self.class_attributes, 'Attr:')
descs = class_desc + item_descs + att_descs
if len(descs) == 0:
return ""
quotes = indent_line('"""', 0)
return indent_code([quotes] + descs + [quotes], level)
class FileItem:
def __init__(self,
entry_item: DefinitionItem,
entry_file: str,
ignore_errors: Optional[List[str]],
description: Optional[str],
imports: Optional[List[str]]):
self.description = description
self.ignore_errors = [] if not ignore_errors else ignore_errors
self.imports = [] if not imports else list(set(imports))
self.entry_item = entry_item
self.entry_file_exists = os.path.exists(entry_file)
def to_code(self, level: int) -> str:
lines: List[str] = []
if not self.entry_file_exists:
lines = [self.to_description(0),
self.to_import_code(0),
empty_lines(1), '__all__ = []']
lines.append(self.entry_item.to_code(0))
return indent_code(lines, level)
def to_description(self, level):
quotes = '"""'
lines = self.ignore_errors + [quotes, self.description, quotes]
return indent_code(lines, level)
def to_import_code(self, level):
imports_set: OrderedDict[str] = {}
for import_ in sorted(self.imports):
imports_set[f"import {import_}"] = None
return indent_code(list(imports_set), level)
| 36.329268
| 79
| 0.596397
|
import os
from collections import OrderedDict
from typing import Optional, Any, List
class Config:
indent: int = 4
line_break: str = os.linesep
def indent(level: int) -> str:
return ' ' * Config.indent * level
def indent_line(line: str, level: int) -> str:
return f"{indent(level)}{line}" if line else ''
def indent_code(code_lines: List[str], level: int = 0) -> str:
lines = []
for code in code_lines:
lines.extend(code.split(Config.line_break) if code is not None else [])
return Config.line_break.join([indent_line(line, level) for line in lines])
def empty_lines(num: int):
return ''.join([Config.line_break] * num)
class Item:
def __init__(self, name: str, description: Optional[str]):
self.name: str = name
self.description: Optional[str] = description
def to_description(self, level: int) -> Optional[str]:
if self.description is not None:
return indent_code([self.description], level)
return None
def to_code(self, level: int) -> str:
raise NotImplementedError
class Property(Item):
def __init__(self,
name: str,
type_str: str,
description: Optional[str] = None,
default: Any = None):
super().__init__(name, description)
self.type_str = type_str
self.default = default
def to_getter_setter_code(self, level) -> str:
name = self.name
lines = [("@property", 0),
(f"def {name}(self):", 0),
(f"return self._{name}", 1),
(empty_lines(0), 0),
(f"def set_{name}(self, {name}: {self.to_code(0)}):", 0),
(f"self.set_fields(_{name}={self.to_field_value()})", 1),
(empty_lines(0), 0)]
return indent_code([indent_line(*line) for line in lines], level)
def to_init_code(self, level: int) -> str:
return indent_line(f"self._{self.name}: {self.to_code(0)} = "
f"{repr(self.default)}", level)
def to_description(self, level: int) -> Optional[str]:
if self.description is not None and self.description.strip() != '':
type_str = f'{self.to_code(0)}'
type_str = f' ({type_str})' if type_str.strip() != '' else type_str
return indent_line(f"{self.name}{type_str}: "
f"{self.description}", level)
return None
def to_field_value(self):
raise NotImplementedError
class ClassAttributeItem(Property):
def to_code(self, level: int = 0) -> str:
return self.type_str
def to_init_code(self, level: int) -> str:
type_code = f'{self.to_code(0)}'
type_ = f': {type_code}' if type_code.strip() != '' else ''
return indent_line(f"{self.name}{type_} = {self.default}", level)
def to_field_value(self):
pass
class BasicItem(Property):
TYPES = {'int', 'float', 'str', 'bool'}
def to_code(self, level: int = 0) -> str:
return f"typing.Optional[{self.type_str}]"
def to_field_value(self):
if self.type_str in self.TYPES:
return self.name
return f"{self.name}.tid"
class CompositeItem(Property):
TYPES = {'List'}
def __init__(self,
name: str,
type_str: str,
item_type: str,
description: Optional[str] = None,
default: Any = None):
super().__init__(name, type_str, description, default)
self.item_type = item_type
def to_code(self, level: int = 0) -> str:
return f"typing.Optional[{self.type_str}[{self.item_type}]]"
def to_field_value(self):
item_value_str = BasicItem('item', self.item_type).to_field_value()
return f"[{item_value_str} for item in {self.name}]"
class DefinitionItem(Item):
def __init__(self, name: str,
class_type: str,
init_args: Optional[str] = None,
properties: Optional[List[Property]] = None,
class_attributes: Optional[List[Property]] = None,
description: Optional[str] = None):
super().__init__(name, description)
self.class_type = class_type
self.properties: List[Property] = \
[] if properties is None else properties
self.class_attributes = [] if class_attributes is None \
else class_attributes
self.description = description if description else None
self.init_args = init_args if init_args is not None else ''
self.init_args = self.init_args.replace('=', ' = ')
def to_init_code(self, level: int) -> str:
return indent_line(f"def __init__(self, {self.init_args}):", level)
def to_code(self, level: int) -> str:
super_args = ', '.join([item.split(':')[0].strip()
for item in self.init_args.split(',')])
raw_desc = self.to_description(1)
desc: str = '' if raw_desc is None else raw_desc
lines = [
empty_lines(1),
f"__all__.extend('{self.name}')",
empty_lines(1),
f"class {self.name}({self.class_type}):",
]
lines += [desc] if desc.strip() else []
lines += [item.to_init_code(1) for item in self.class_attributes]
lines += [empty_lines(0)]
lines += [self.to_init_code(1),
indent_line(f"super().__init__({super_args})", 2)]
lines += [item.to_init_code(2) for item in self.properties]
lines += [empty_lines(0)]
lines += [item.to_getter_setter_code(1) for item in self.properties]
return indent_code(lines, level)
@staticmethod
def to_item_descs(items, title):
item_descs = [item.to_description(0) for item in items]
item_descs = [item for item in item_descs if item is not None]
if len(item_descs) > 0:
item_descs = [indent_line(title, 1)] + \
[indent_line(desc, 2) for desc in item_descs]
return item_descs
def to_description(self, level: int) -> Optional[str]:
class_desc = [] if self.description is None else [self.description]
item_descs = self.to_item_descs(self.properties, 'Args:')
att_descs = self.to_item_descs(self.class_attributes, 'Attr:')
descs = class_desc + item_descs + att_descs
if len(descs) == 0:
return ""
quotes = indent_line('"""', 0)
return indent_code([quotes] + descs + [quotes], level)
class FileItem:
def __init__(self,
entry_item: DefinitionItem,
entry_file: str,
ignore_errors: Optional[List[str]],
description: Optional[str],
imports: Optional[List[str]]):
self.description = description
self.ignore_errors = [] if not ignore_errors else ignore_errors
self.imports = [] if not imports else list(set(imports))
self.entry_item = entry_item
self.entry_file_exists = os.path.exists(entry_file)
def to_code(self, level: int) -> str:
lines: List[str] = []
if not self.entry_file_exists:
lines = [self.to_description(0),
self.to_import_code(0),
empty_lines(1), '__all__ = []']
lines.append(self.entry_item.to_code(0))
return indent_code(lines, level)
def to_description(self, level):
quotes = '"""'
lines = self.ignore_errors + [quotes, self.description, quotes]
return indent_code(lines, level)
def to_import_code(self, level):
imports_set: OrderedDict[str] = {}
for import_ in sorted(self.imports):
imports_set[f"import {import_}"] = None
return indent_code(list(imports_set), level)
| true
| true
|
f70cb0ad2ce41c37d7ca1d87ed20ebab11b77969
| 4,092
|
py
|
Python
|
carbonplan_data/tests/__init__.py
|
carbonplan/data
|
9eb84991650ac9a75ae622d3aac754794ada9405
|
[
"MIT"
] | 8
|
2020-06-29T18:15:00.000Z
|
2022-02-16T11:10:23.000Z
|
carbonplan_data/tests/__init__.py
|
carbonplan/data
|
9eb84991650ac9a75ae622d3aac754794ada9405
|
[
"MIT"
] | 159
|
2020-06-05T04:54:16.000Z
|
2022-03-29T04:09:30.000Z
|
carbonplan_data/tests/__init__.py
|
carbonplan/data
|
9eb84991650ac9a75ae622d3aac754794ada9405
|
[
"MIT"
] | 4
|
2021-01-26T22:02:07.000Z
|
2021-11-23T22:26:20.000Z
|
# Based on scikit-learn/sklearn/utils/estimator_checks.py
import itertools
from functools import partial
def get_entry_params(entry):
user_parameters = entry.describe()["user_parameters"]
if not user_parameters:
return []
keys = [p["name"] for p in user_parameters]
try:
values = [p["allowed"] for p in user_parameters]
except KeyError:
return []
params = [None]
params.extend([dict(zip(keys, p)) for p in itertools.product(*values)])
return params
def _set_check_ids(obj):
"""Create pytest ids for checks.
When `obj` is an intake entry, this returns the pprint version of the
intake entry. When `obj` is a function, the name of the function is
returned with its keyworld arguments.
Parameters
----------
obj : intake entry or function
Items generated by `check_entry`
Returns
-------
id : string or None
See also
--------
check_entry
"""
if hasattr(obj, "container"):
c = getattr(obj, "_catalog", None)
if c:
name = f"{c.name}.{obj.name}"
else:
name = f"{obj.name}"
return name
if callable(obj):
if not isinstance(obj, partial):
return obj.__name__
if not obj.keywords:
return obj.func.__name__
kwstring = ",".join(["{}={}".format(k, v) for k, v in obj.keywords.items()])
return "{}({})".format(obj.func.__name__, kwstring)
def parametrize_with_checks(catalog):
"""Pytest specific decorator for parametrizing catalog checks.
The `id` of each check is set to be a pprint version of the catalog
and the name of the check with its keyword arguments.
This allows to use `pytest -k` to specify which tests to run::
pytest test_check_catalogs.py -k check_catalog_metadata
Parameters
----------
catalog : Intake Catalog
Catalog to generated checks for.
Returns
-------
decorator : `pytest.mark.parametrize`
Examples
--------
>>> from carbonplan.data.tests import parametrize_with_checks
>>> from carbonplan.data import cat
>>> @parametrize_with_checks(cat)
... def test_catalog(entry, check):
... check(entry)
"""
import pytest
checks_generator = itertools.chain.from_iterable(
check_entry(name, entry) for name, entry in dict(catalog.walk(depth=10)).items()
)
checks_with_marks = list(
_mark_xfail_checks(estimator, check, pytest) for estimator, check in checks_generator
)
return pytest.mark.parametrize("entry, check", checks_with_marks, ids=_set_check_ids)
def _mark_xfail_checks(entry, check, pytest):
# TODO
return entry, check
def _yield_all_checks(name, entry):
yield check_entry_metadata
for params in get_entry_params(entry):
yield partial(check_get_entry_data, params=params)
def check_entry(name, entry):
yield from ((entry, partial(check, name)) for check in _yield_all_checks(name, entry))
def check_get_entry_data(name, entry, params=None):
import pytest
if params is not None:
entry = entry(**params)
else:
entry = entry()
if entry.container == "catalog":
entry.reload()
elif entry.container in ["xarray", "dataframe"]:
if entry.metadata.get("ci", None) == "skip":
pytest.skip("dataset marked as ci: skip") # TODO: move to _mark_xfail_checks
elif entry.metadata.get("ci", None) == "xfail":
pytest.xfail("dataset marked as ci: xfail") # TODO: move to _mark_xfail_checks
try:
_ = entry.to_dask()
except NotImplementedError:
_ = entry.read()
def check_entry_metadata(name, entry):
import pytest
expected_keys = ["title", "summary", "description", "tags", "license", "providers"]
if entry.container == "catalog":
pytest.skip(
"not checking metadata in top level catalog objects."
) # TODO: move to _mark_xfail_checks
for key in expected_keys:
assert key in entry().metadata
| 28.615385
| 93
| 0.637341
|
import itertools
from functools import partial
def get_entry_params(entry):
user_parameters = entry.describe()["user_parameters"]
if not user_parameters:
return []
keys = [p["name"] for p in user_parameters]
try:
values = [p["allowed"] for p in user_parameters]
except KeyError:
return []
params = [None]
params.extend([dict(zip(keys, p)) for p in itertools.product(*values)])
return params
def _set_check_ids(obj):
if hasattr(obj, "container"):
c = getattr(obj, "_catalog", None)
if c:
name = f"{c.name}.{obj.name}"
else:
name = f"{obj.name}"
return name
if callable(obj):
if not isinstance(obj, partial):
return obj.__name__
if not obj.keywords:
return obj.func.__name__
kwstring = ",".join(["{}={}".format(k, v) for k, v in obj.keywords.items()])
return "{}({})".format(obj.func.__name__, kwstring)
def parametrize_with_checks(catalog):
import pytest
checks_generator = itertools.chain.from_iterable(
check_entry(name, entry) for name, entry in dict(catalog.walk(depth=10)).items()
)
checks_with_marks = list(
_mark_xfail_checks(estimator, check, pytest) for estimator, check in checks_generator
)
return pytest.mark.parametrize("entry, check", checks_with_marks, ids=_set_check_ids)
def _mark_xfail_checks(entry, check, pytest):
return entry, check
def _yield_all_checks(name, entry):
yield check_entry_metadata
for params in get_entry_params(entry):
yield partial(check_get_entry_data, params=params)
def check_entry(name, entry):
yield from ((entry, partial(check, name)) for check in _yield_all_checks(name, entry))
def check_get_entry_data(name, entry, params=None):
import pytest
if params is not None:
entry = entry(**params)
else:
entry = entry()
if entry.container == "catalog":
entry.reload()
elif entry.container in ["xarray", "dataframe"]:
if entry.metadata.get("ci", None) == "skip":
pytest.skip("dataset marked as ci: skip") elif entry.metadata.get("ci", None) == "xfail":
pytest.xfail("dataset marked as ci: xfail") try:
_ = entry.to_dask()
except NotImplementedError:
_ = entry.read()
def check_entry_metadata(name, entry):
import pytest
expected_keys = ["title", "summary", "description", "tags", "license", "providers"]
if entry.container == "catalog":
pytest.skip(
"not checking metadata in top level catalog objects."
) for key in expected_keys:
assert key in entry().metadata
| true
| true
|
f70cb0df208ce61fa914f50e3672edb1081c3638
| 272,527
|
py
|
Python
|
jax/_src/lax/lax.py
|
gmittal/jax
|
281816221dea03c64f6d8b61253397c719c55feb
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/_src/lax/lax.py
|
gmittal/jax
|
281816221dea03c64f6d8b61253397c719c55feb
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/_src/lax/lax.py
|
gmittal/jax
|
281816221dea03c64f6d8b61253397c719c55feb
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Pytype is too slow to check this file.
# pytype: skip-file
import builtins
import functools
import itertools
import operator
from typing import (Any, Callable, List, NamedTuple, Optional, Sequence, Union, Tuple)
import warnings
import numpy as np
import jax
from jax import core
from jax import ad_util
from jax import api
from jax import api_util
from jax import linear_util as lu
from jax import dtypes
from jax import lazy
from jax import tree_util
from jax.config import flags, config
from jax.core import (Primitive, _canonicalize_dimension, UnshapedArray,
ShapedArray, ConcreteArray, raise_to_shaped,
abstract_token, canonicalize_shape)
from jax.abstract_arrays import array_types
from jax.interpreters import partial_eval as pe
from jax.interpreters import xla
from jax.interpreters import pxla
from jax.interpreters import ad
from jax.interpreters import invertible_ad as iad
from jax.interpreters import batching
from jax.interpreters import masking
from jax.util import (cache, safe_zip, partial, prod, safe_map, canonicalize_axis,
split_list)
from jax.tree_util import tree_map
from jax.lib import pytree
from jax.lib import xla_bridge
from jax.lib import xla_client
xb = xla_bridge
xc = xla_client
xops = xla_client.ops
FLAGS = flags.FLAGS
_max = builtins.max
_min = builtins.min
_reduce = functools.reduce
Array = Any
DType = Any
Shape = Sequence[int]
def _try_broadcast_shapes(shapes):
assert shapes
if len(shapes) == 1: return shapes[0]
rank, *others = {len(shape) for shape in shapes}
if others: return None # must have consistent rank
if not rank: return () # scalar case
result_shape = [None] * rank
for i, sizes in enumerate(zip(*shapes)):
if sizes[:-1] == sizes[1:]:
result_shape[i] = sizes[0] # all equal sizes for this dimension
else:
sizes = [d for d in sizes if d != 1]
if sizes[:-1] != sizes[1:]:
return None # must have equal sizes other than 1-sized axes
result_shape[i] = sizes[0] if sizes else 1
return tuple(result_shape)
@cache()
def broadcast_shapes(*shapes):
"""Returns the shape that results from NumPy broadcasting of `shapes`."""
if len(shapes) == 1:
return shapes[0]
ndim = _max(len(shape) for shape in shapes)
shapes = [(1,) * (ndim - len(shape)) + shape for shape in shapes]
result_shape = _try_broadcast_shapes(shapes)
if result_shape is None:
raise ValueError("Incompatible shapes for broadcasting: {}"
.format(tuple(map(tuple, shapes))))
return result_shape
def _identity(x): return x
### traceables
def neg(x: Array) -> Array:
r"""Elementwise negation: :math:`-x`."""
return neg_p.bind(x)
def sign(x: Array) -> Array:
r"""Elementwise sign.
For floating-point inputs, returns
:math:`\mathrm{sign}(x) = \begin{cases}
-1 & x < 0\\
-0 & x = -0\\
\mathit{NaN} & x = \mathit{NaN}\\
+0 & x = +0\\
1 & x > 0
\end{cases}`
For signed integer inputs, returns
:math:`\mathrm{sign}(x) = \begin{cases}
-1 & x < 0\\
0 & x = 0\\
1 & x > 0
\end{cases}`
For complex inputs, returns the complex phase, i.e.
:math:`\mathrm{sign}(x) = \frac{x}{|x|}`.
"""
return sign_p.bind(x)
def nextafter(x1: Array, x2: Array) -> Array:
r"""Returns the next representable value after `x1` in the direction of `x2`.
Note that in some environments flush-denormal-to-zero semantics is used.
This means that, around zero, this function returns strictly non-zero
values which appear as zero in any operations. Consider this example::
>>> jnp.nextafter(0, 1) # denormal numbers are representable
DeviceArray(1.e-45, dtype=float32)
>>> jnp.nextafter(0, 1) * 1 # but are flushed to zero
DeviceArray(0., dtype=float32)
For the smallest usable (i.e. normal) float, use ``tiny`` of ``jnp.finfo``.
"""
return nextafter_p.bind(_brcast(x1, x2), _brcast(x2, x1))
def floor(x: Array) -> Array:
r"""Elementwise floor: :math:`\left\lfloor x \right\rfloor`."""
return floor_p.bind(x)
def ceil(x: Array) -> Array:
r"""Elementwise ceiling: :math:`\left\lceil x \right\rceil`."""
return ceil_p.bind(x)
def round(x: Array) -> Array:
r"""Elementwise round.
Rounds values to the nearest integer. Halfway values (e.g., `0.5`) are rounded
away from zero."""
return round_p.bind(x)
def is_finite(x: Array) -> Array:
r"""Elementwise :math:`\mathrm{isfinite}`.
For each element x returns `True` if and only if x is not :math:`\pm\infty` or
:math:`\mathit{NaN}`.
"""
return is_finite_p.bind(x)
def exp(x: Array) -> Array:
r"""Elementwise exponential: :math:`e^x`."""
return exp_p.bind(x)
def expm1(x: Array) -> Array:
r"""Elementwise :math:`e^{x} - 1`."""
return expm1_p.bind(x)
def log(x: Array) -> Array:
r"""Elementwise natural logarithm: :math:`\mathrm{log}(x)`."""
return log_p.bind(x)
def log1p(x: Array) -> Array:
r"""Elementwise :math:`\mathrm{log}(1 + x)`."""
return log1p_p.bind(x)
def tanh(x: Array) -> Array:
r"""Elementwise hyperbolic tangent: :math:`\mathrm{tanh}(x)`."""
return tanh_p.bind(x)
def sin(x: Array) -> Array:
r"""Elementwise sine: :math:`\mathrm{sin}(x)`."""
return sin_p.bind(x)
def cos(x: Array) -> Array:
r"""Elementwise cosine: :math:`\mathrm{cos}(x)`."""
return cos_p.bind(x)
def atan2(x: Array, y: Array) -> Array:
r"""Elementwise arc tangent of two variables:
:math:`\mathrm{atan}({x \over y})`."""
return atan2_p.bind(x, y)
def betainc(a: Array, b: Array, x: Array) -> Array:
r"""Elementwise regularized incomplete beta integral."""
return regularized_incomplete_beta_p.bind(a, b, x)
def lgamma(x: Array) -> Array:
r"""Elementwise log gamma: :math:`\mathrm{log}(\Gamma(x))`."""
return lgamma_p.bind(x)
def digamma(x: Array) -> Array:
r"""Elementwise digamma: :math:`\psi(x)`."""
return digamma_p.bind(x)
def igamma(a: Array, x: Array) -> Array:
r"""Elementwise regularized incomplete gamma function."""
return igamma_p.bind(a, x)
def igammac(a: Array, x: Array) -> Array:
r"""Elementwise complementary regularized incomplete gamma function."""
return igammac_p.bind(a, x)
def igamma_grad_a(a: Array, x: Array) -> Array:
r"""Elementwise derivative of the regularized incomplete gamma function."""
return igamma_grad_a_p.bind(a, x)
def random_gamma_grad(a: Array, x: Array) -> Array:
r"""Elementwise derivative of samples from `Gamma(a, 1)`."""
return random_gamma_grad_p.bind(a, x)
def bessel_i0e(x: Array) -> Array:
r"""Exponentially scaled modified Bessel function of order 0:
:math:`\mathrm{i0e}(x) = e^{-|x|} \mathrm{i0}(x)`
"""
return bessel_i0e_p.bind(x)
def bessel_i1e(x: Array) -> Array:
r"""Exponentially scaled modified Bessel function of order 1:
:math:`\mathrm{i1e}(x) = e^{-|x|} \mathrm{i1}(x)`
"""
return bessel_i1e_p.bind(x)
def erf(x: Array) -> Array:
r"""Elementwise error function: :math:`\mathrm{erf}(x)`."""
return erf_p.bind(x)
def erfc(x: Array) -> Array:
r"""Elementwise complementary error function:
:math:`\mathrm{erfc}(x) = 1 - \mathrm{erf}(x)`."""
return erfc_p.bind(x)
def erf_inv(x: Array) -> Array:
r"""Elementwise inverse error function: :math:`\mathrm{erf}^{-1}(x)`."""
return erf_inv_p.bind(x)
def real(x: Array) -> Array:
r"""Elementwise extract real part: :math:`\mathrm{Re}(x)`.
Returns the real part of a complex number.
"""
return real_p.bind(x)
def imag(x: Array) -> Array:
r"""Elementwise extract imaginary part: :math:`\mathrm{Im}(x)`.
Returns the imaginary part of a complex number.
"""
return imag_p.bind(x)
def complex(x: Array, y: Array) -> Array:
r"""Elementwise make complex number: :math:`x + jy`.
Builds a complex number from real and imaginary parts.
"""
return complex_p.bind(_brcast(x, y), _brcast(y, x))
def conj(x: Array) -> Array:
r"""Elementwise complex conjugate function: :math:`\overline{x}`."""
return conj_p.bind(x, input_dtype=_dtype(x))
def abs(x: Array) -> Array:
r"""Elementwise absolute value: :math:`|x|`."""
return abs_p.bind(x)
def pow(x: Array, y: Array) -> Array:
r"""Elementwise power: :math:`x^y`."""
return pow_p.bind(x, y)
def integer_pow(x: Array, y: int) -> Array:
r"""Elementwise power: :math:`x^y`, where :math:`y` is a fixed integer."""
if y == 0:
return _ones(x)
elif y == 1:
return x
else:
return integer_pow_p.bind(x, y=y)
def sqrt(x: Array) -> Array:
r"""Elementwise square root: :math:`\sqrt{x}`."""
return sqrt_p.bind(x)
def rsqrt(x: Array) -> Array:
r"""Elementwise reciprocal square root: :math:`1 \over \sqrt{x}."""
return rsqrt_p.bind(x)
def bitwise_not(x: Array) -> Array:
r"""Elementwise NOT: :math:`\neg x`."""
return not_p.bind(x)
def bitwise_and(x: Array, y: Array) -> Array:
r"""Elementwise AND: :math:`x \wedge y`."""
return and_p.bind(x, y)
def bitwise_or(x: Array, y: Array) -> Array:
r"""Elementwise OR: :math:`x \vee y`."""
return or_p.bind(x, y)
def bitwise_xor(x: Array, y: Array) -> Array:
r"""Elementwise exclusive OR: :math:`x \oplus y`."""
return xor_p.bind(x, y)
def population_count(x: Array) -> Array:
r"""Elementwise popcount, count the number of set bits in each element."""
return population_count_p.bind(x)
def add(x: Array, y: Array) -> Array:
r"""Elementwise addition: :math:`x + y`."""
return add_p.bind(x, y)
def sub(x: Array, y: Array) -> Array:
r"""Elementwise subtraction: :math:`x - y`."""
return sub_p.bind(x, y)
def mul(x: Array, y: Array) -> Array:
r"""Elementwise multiplication: :math:`x \times y`."""
return mul_p.bind(x, y)
def div(x: Array, y: Array) -> Array:
r"""Elementwise division: :math:`x \over y`."""
return div_p.bind(x, y)
def rem(x: Array, y: Array) -> Array:
r"""Elementwise remainder: :math:`x \bmod y`."""
return rem_p.bind(x, y)
def max(x: Array, y: Array) -> Array:
r"""Elementwise maximum: :math:`\mathrm{max}(x, y)`
For complex numbers, uses a lexicographic comparison on the
`(real, imaginary)` pairs."""
return max_p.bind(x, y)
def min(x: Array, y: Array) -> Array:
r"""Elementwise minimum: :math:`\mathrm{min}(x, y)`
For complex numbers, uses a lexicographic comparison on the
`(real, imaginary)` pairs."""
return min_p.bind(x, y)
def shift_left(x: Array, y: Array) -> Array:
r"""Elementwise left shift: :math:`x \ll y`."""
return shift_left_p.bind(x, y)
def shift_right_arithmetic(x: Array, y: Array) -> Array:
r"""Elementwise arithmetic right shift: :math:`x \gg y`."""
return shift_right_arithmetic_p.bind(x, y)
def shift_right_logical(x: Array, y: Array) -> Array:
r"""Elementwise logical right shift: :math:`x \gg y`."""
return shift_right_logical_p.bind(x, y)
def eq(x: Array, y: Array) -> Array:
r"""Elementwise equals: :math:`x = y`."""
return eq_p.bind(x, y)
def ne(x: Array, y: Array) -> Array:
r"""Elementwise not-equals: :math:`x \neq y`."""
return ne_p.bind(x, y)
def ge(x: Array, y: Array) -> Array:
r"""Elementwise greater-than-or-equals: :math:`x \geq y`."""
return ge_p.bind(x, y)
def gt(x: Array, y: Array) -> Array:
r"""Elementwise greater-than: :math:`x > y`."""
return gt_p.bind(x, y)
def le(x: Array, y: Array) -> Array:
r"""Elementwise less-than-or-equals: :math:`x \leq y`."""
return le_p.bind(x, y)
def lt(x: Array, y: Array) -> Array:
r"""Elementwise less-than: :math:`x < y`."""
return lt_p.bind(x, y)
def convert_element_type(operand: Array, new_dtype: DType) -> Array:
"""Elementwise cast.
Wraps XLA's `ConvertElementType
<https://www.tensorflow.org/xla/operation_semantics#convertelementtype>`_
operator, which performs an elementwise conversion from one type to another.
Similar to a C++ `static_cast`.
Args:
operand: an array or scalar value to be cast
new_dtype: the new type. Should be a NumPy type.
Returns:
An array with the same shape as `operand`, cast elementwise to `new_dtype`.
"""
new_dtype = dtypes.canonicalize_dtype(new_dtype)
# Avoids dropping precision by casting Python scalars to the default Jax
# type. If we passed a Python scalar directly to the bind call below, it is
# cast to the default type as part of the calling convention.
if type(operand) in dtypes.python_scalar_dtypes:
operand = np.asarray(operand, new_dtype)
old_dtype = dtypes.canonicalize_dtype(_dtype(operand))
if old_dtype == new_dtype:
return operand
if (dtypes.issubdtype(old_dtype, np.complexfloating) and
not dtypes.issubdtype(new_dtype, np.complexfloating)):
msg = "Casting complex values to real discards the imaginary part"
warnings.warn(msg, np.ComplexWarning, stacklevel=2)
return convert_element_type_p.bind(
operand, new_dtype=new_dtype, old_dtype=old_dtype)
def bitcast_convert_type(operand: Array, new_dtype: DType) -> Array:
"""Elementwise bitcast.
Wraps XLA's `BitcastConvertType
<https://www.tensorflow.org/xla/operation_semantics#bitcastconverttype>`_
operator, which performs a bit cast from one type to another. The bitwidth
of the source and destination types must match.
Args:
operand: an array or scalar value to be cast
new_dtype: the new type. Should be a NumPy type.
Returns:
An array with the same shape as `operand`, bitcast elementwise to
`new_dtype`.
"""
new_dtype = dtypes.canonicalize_dtype(new_dtype)
old_dtype = _dtype(operand)
if old_dtype != new_dtype:
return bitcast_convert_type_p.bind(operand, new_dtype=new_dtype)
else:
return operand
def clamp(min: Array, x: Array, max: Array) -> Array:
r"""Elementwise clamp.
Returns :math:`\mathrm{clamp}(x) = \begin{cases}
\mathit{min} & \text{if } x < \mathit{min},\\
\mathit{max} & \text{if } x > \mathit{max},\\
x & \text{otherwise}
\end{cases}`.
"""
return clamp_p.bind(min, x, max)
def concatenate(operands: Sequence[Array], dimension: int) -> Array:
"""Concatenates a sequence of arrays along `dimension`.
Wraps XLA's `Concatenate
<https://www.tensorflow.org/xla/operation_semantics#concatenate>`_
operator.
Args:
operands: a sequence of arrays to concatenate. The arrays must have equal
shapes, except in the `dimension` axis.
dimension: the dimension along which to concatenate the arrays.
Returns:
An array containing the concatenation.
"""
return concatenate_p.bind(*operands, dimension=dimension)
Precision = xla_client.PrecisionConfig.Precision
Precision.__str__ = lambda precision: precision.name
PrecisionType = Any
PrecisionLike = Union[None, PrecisionType, Tuple[PrecisionType, PrecisionType]]
class ConvDimensionNumbers(NamedTuple):
"""Describes batch, spatial, and feature dimensions of a convolution.
Args:
lhs_spec: a tuple of nonnegative integer dimension numbers containing
`(batch dimension, feature dimension, spatial dimensions...)`.
rhs_spec: a tuple of nonnegative integer dimension numbers containing
`(out feature dimension, in feature dimension, spatial dimensions...)`.
out_spec: a tuple of nonnegative integer dimension numbers containing
`(batch dimension, feature dimension, spatial dimensions...)`.
"""
lhs_spec: Sequence[int]
rhs_spec: Sequence[int]
out_spec: Sequence[int]
ConvGeneralDilatedDimensionNumbers = Union[
None, ConvDimensionNumbers, Tuple[str, str, str]]
def conv_general_dilated(
lhs: Array, rhs: Array, window_strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
lhs_dilation: Optional[Sequence[int]] = None,
rhs_dilation: Optional[Sequence[int]] = None,
dimension_numbers: ConvGeneralDilatedDimensionNumbers = None,
feature_group_count: int = 1, batch_group_count: int = 1,
precision: PrecisionLike = None) -> Array:
"""General n-dimensional convolution operator, with optional dilation.
Wraps XLA's `Conv
<https://www.tensorflow.org/xla/operation_semantics#conv_convolution>`_
operator.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
window_strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence of
`n` `(low, high)` integer pairs that give the padding to apply before and
after each spatial dimension.
lhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `lhs`. LHS dilation
is also known as transposed convolution.
rhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `rhs`. RHS dilation
is also known as atrous convolution.
dimension_numbers: either `None`, a `ConvDimensionNumbers` object, or
a 3-tuple `(lhs_spec, rhs_spec, out_spec)`, where each element is a string
of length `n+2`.
feature_group_count: integer, default 1. See XLA HLO docs.
batch_group_count: integer, default 1. See XLA HLO docs.
precision: Optional. Either ``None``, which means the default precision for
the backend, a ``lax.Precision`` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
``lax.Precision`` enums indicating precision of ``lhs``` and ``rhs``.
Returns:
An array containing the convolution result.
In the string case of `dimension_numbers`, each character identifies by
position:
- the batch dimensions in `lhs`, `rhs`, and the output with the character
'N',
- the feature dimensions in `lhs` and the output with the character 'C',
- the input and output feature dimensions in rhs with the characters 'I'
and 'O' respectively, and
- spatial dimension correspondences between lhs, rhs, and the output using
any distinct characters.
For example, to indicate dimension numbers consistent with the `conv` function
with two spatial dimensions, one could use `('NCHW', 'OIHW', 'NCHW')`. As
another example, to indicate dimension numbers consistent with the TensorFlow
Conv2D operation, one could use `('NHWC', 'HWIO', 'NHWC')`. When using the
latter form of convolution dimension specification, window strides are
associated with spatial dimension character labels according to the order in
which the labels appear in the `rhs_spec` string, so that `window_strides[0]`
is matched with the dimension corresponding to the first character
appearing in rhs_spec that is not `'I'` or `'O'`.
If `dimension_numbers` is `None`, the default is `('NCHW', 'OIHW', 'NCHW')`
(for a 2D convolution).
"""
dnums = conv_dimension_numbers(lhs.shape, rhs.shape, dimension_numbers)
if lhs_dilation is None:
lhs_dilation = (1,) * (lhs.ndim - 2)
elif isinstance(padding, str) and not len(lhs_dilation) == lhs_dilation.count(1):
raise ValueError(
"String padding is not implemented for transposed convolution "
"using this op. Please either exactly specify the required padding or "
"use conv_transpose.")
if rhs_dilation is None:
rhs_dilation = (1,) * (rhs.ndim - 2)
if isinstance(padding, str):
lhs_perm, rhs_perm, _ = dnums
rhs_shape = np.take(rhs.shape, rhs_perm)[2:]
effective_rhs_shape = [(k-1) * r + 1 for k, r in zip(rhs_shape, rhs_dilation)]
padding = padtype_to_pads(
np.take(lhs.shape, lhs_perm)[2:], effective_rhs_shape,
window_strides, padding)
return conv_general_dilated_p.bind(
lhs, rhs, window_strides=tuple(window_strides), padding=tuple(padding),
lhs_dilation=tuple(lhs_dilation), rhs_dilation=tuple(rhs_dilation),
dimension_numbers=dnums,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
lhs_shape=lhs.shape, rhs_shape=rhs.shape,
precision=_canonicalize_precision(precision))
def dot(lhs: Array, rhs: Array, precision: PrecisionLike = None) -> Array:
"""Vector/vector, matrix/vector, and matrix/matrix multiplication.
Wraps XLA's `Dot
<https://www.tensorflow.org/xla/operation_semantics#dot>`_
operator.
For more general contraction, see the `dot_general` operator.
Args:
lhs: an array of rank 1 or 2.
rhs: an array of rank 1 or 2.
precision: Optional. Either ``None``, which means the default precision for
the backend, a ``lax.Precision`` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
``lax.Precision`` enums indicating precision of ``lhs``` and ``rhs``.
Returns:
An array containing the product.
"""
if 1 <= lhs.ndim <= 2 and 1 <= rhs.ndim <= 2 and lhs.shape[-1] == rhs.shape[0]:
return dot_general(lhs, rhs, (((lhs.ndim - 1,), (0,)), ((), ())),
precision=precision)
else:
raise TypeError("Incompatible shapes for dot: got {} and {}.".format(
lhs.shape, rhs.shape))
DotDimensionNumbers = Tuple[Tuple[Sequence[int], Sequence[int]],
Tuple[Sequence[int], Sequence[int]]]
def dot_general(lhs: Array, rhs: Array, dimension_numbers: DotDimensionNumbers,
precision: PrecisionLike = None) -> Array:
"""More general contraction operator.
Wraps XLA's `DotGeneral
<https://www.tensorflow.org/xla/operation_semantics#dotgeneral>`_
operator.
Args:
lhs: an array
rhs: an array
dimension_numbers: a tuple of tuples of the form
`((lhs_contracting_dims, rhs_contracting_dims),
(lhs_batch_dims, rhs_batch_dims))`
precision: Optional. Either ``None``, which means the default precision for
the backend, a ``lax.Precision`` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
``lax.Precision`` enums indicating precision of ``lhs``` and ``rhs``.
Returns:
An array containing the result.
"""
contract_dims_seq, batch_dims_seq = dimension_numbers
contract_dims = tuple(map(lambda x: tuple(x), contract_dims_seq))
batch_dims = tuple(map(lambda x: tuple(x), batch_dims_seq))
return dot_general_p.bind(lhs, rhs,
dimension_numbers=(contract_dims, batch_dims),
precision=_canonicalize_precision(precision))
def broadcast(operand: Array, sizes: Sequence[int]) -> Array:
"""Broadcasts an array, adding new major dimensions.
Wraps XLA's `Broadcast
<https://www.tensorflow.org/xla/operation_semantics#broadcast>`_
operator.
Args:
operand: an array
sizes: a sequence of integers, giving the sizes of new major dimensions
to add.
Returns:
An array containing the result.
"""
dims = tuple(range(len(sizes), len(sizes) + np.ndim(operand)))
return broadcast_in_dim(operand, tuple(sizes) + np.shape(operand), dims)
def broadcast_in_dim(operand: Array, shape: Shape,
broadcast_dimensions: Sequence[int]) -> Array:
"""Wraps XLA's `BroadcastInDim
<https://www.tensorflow.org/xla/operation_semantics#broadcastindim>`_
operator.
"""
shape = _broadcast_in_dim_shape_rule(
operand, shape=shape, broadcast_dimensions=broadcast_dimensions)
if (np.ndim(operand) == len(shape) and not len(broadcast_dimensions)
and isinstance(operand, (xla.DeviceArray, core.Tracer))):
return operand
return broadcast_in_dim_p.bind(
operand, shape=tuple(shape),
broadcast_dimensions=tuple(broadcast_dimensions))
def broadcast_to_rank(x: Array, rank: int) -> Array:
"""Adds leading dimensions of ``1`` to give ``x`` rank ``rank``."""
return broadcast(x, (1,) * (rank - x.ndim))
def reshape(operand: Array, new_sizes: Shape,
dimensions: Optional[Sequence[int]] = None) -> Array:
"""Wraps XLA's `Reshape
<https://www.tensorflow.org/xla/operation_semantics#reshape>`_
operator.
For inserting/removing dimensions of size 1, prefer using ``lax.squeeze`` /
``lax.expand_dims``. These preserve information about axis identity that may
be useful for advanced transformation rules.
"""
new_sizes = canonicalize_shape(new_sizes) # TODO
new_sizes = tuple(new_sizes)
same_shape = np.shape(operand) == new_sizes
same_dims = dimensions is None or tuple(dimensions) == tuple(range(np.ndim(operand)))
if np.shape(operand) and same_shape and same_dims:
return operand
else:
return reshape_p.bind(
operand, new_sizes=new_sizes,
dimensions=None if dimensions is None or same_dims else tuple(dimensions))
def pad(operand: Array, padding_value: Array,
padding_config: Sequence[Tuple[int, int, int]]) -> Array:
"""Applies low, high, and/or interior padding to an array.
Wraps XLA's `Pad
<https://www.tensorflow.org/xla/operation_semantics#pad>`_
operator.
Args:
operand: an array to be padded.
padding_value: the value to be inserted as padding. Must have the same dtype
as ``operand``.
padding_config: a sequence of ``(low, high, interior)`` tuples of integers,
giving the amount of low, high, and interior (dilation) padding to insert
in each dimension.
Returns:
The ``operand`` array with padding value ``padding_value`` inserted in each
dimension according to the ``padding_config``.
"""
return pad_p.bind(operand, padding_value, padding_config=tuple(padding_config))
def rev(operand: Array, dimensions: Sequence[int]) -> Array:
"""Wraps XLA's `Rev
<https://www.tensorflow.org/xla/operation_semantics#rev_reverse>`_
operator.
"""
return rev_p.bind(operand, dimensions=tuple(dimensions))
def select(pred: Array, on_true: Array, on_false: Array) -> Array:
"""Wraps XLA's `Select
<https://www.tensorflow.org/xla/operation_semantics#select>`_
operator.
"""
return select_p.bind(pred, on_true, on_false)
def slice(operand: Array, start_indices: Sequence[int],
limit_indices: Sequence[int],
strides: Optional[Sequence[int]] = None) -> Array:
"""Wraps XLA's `Slice
<https://www.tensorflow.org/xla/operation_semantics#slice>`_
operator.
"""
return slice_p.bind(operand, start_indices=tuple(start_indices),
limit_indices=tuple(limit_indices),
strides=None if strides is None else tuple(strides))
def dynamic_slice(operand: Array, start_indices: Sequence[Array],
slice_sizes: Shape) -> Array:
"""Wraps XLA's `DynamicSlice
<https://www.tensorflow.org/xla/operation_semantics#dynamicslice>`_
operator.
Args:
operand: an array to slice.
start_indices: a list of scalar indices, one per dimension. These values
may be dynamic.
slice_sizes: the size of the slice. Must be a sequence of non-negative
integers with length equal to `ndim(operand)`. Inside a JIT compiled
function, only static values are supported (all JAX arrays inside JIT
must have statically known size).
Returns:
An array containing the slice.
"""
start_indices = _dynamic_slice_indices(operand, start_indices)
return dynamic_slice_p.bind(operand, *start_indices,
slice_sizes=tuple(slice_sizes))
def dynamic_update_slice(operand: Array, update: Array,
start_indices: Array) -> Array:
"""Wraps XLA's `DynamicUpdateSlice
<https://www.tensorflow.org/xla/operation_semantics#dynamicupdateslice>`_
operator.
Args:
operand: an array to slice.
update: an array containing the new values to write onto `operand`.
start_indices: a list of scalar indices, one per dimension.
Returns:
An array containing the slice.
"""
start_indices = _dynamic_slice_indices(operand, start_indices)
return dynamic_update_slice_p.bind(operand, update, *start_indices)
class GatherDimensionNumbers(NamedTuple):
"""
Describes the dimension number arguments to an `XLA's Gather operator
<https://www.tensorflow.org/xla/operation_semantics#gather>`_. See the XLA
documentation for more details of what the dimension numbers mean.
Args:
offset_dims: the set of dimensions in the `gather` output that offset into
an array sliced from `operand`. Must be a tuple of integers in ascending
order, each representing a dimension number of the output.
collapsed_slice_dims: the set of dimensions `i` in `operand` that have
`slice_sizes[i] == 1` and that should not have a corresponding dimension
in the output of the gather. Must be a tuple of integers in ascending
order.
start_index_map: for each dimension in `start_indices`, gives the
corresponding dimension in `operand` that is to be sliced. Must be a
tuple of integers with size equal to `start_indices.shape[-1]`.
Unlike XLA's `GatherDimensionNumbers` structure, `index_vector_dim` is
implicit; there is always an index vector dimension and it must always be the
last dimension. To gather scalar indices, add a trailing dimension of size 1.
"""
offset_dims: Sequence[int]
collapsed_slice_dims: Sequence[int]
start_index_map: Sequence[int]
def gather(operand: Array, start_indices: Array,
dimension_numbers: GatherDimensionNumbers,
slice_sizes: Shape) -> Array:
"""Gather operator.
Wraps `XLA's Gather operator
<https://www.tensorflow.org/xla/operation_semantics#gather>`_.
The semantics of gather are complicated, and its API might change in the
future. For most use cases, you should prefer `Numpy-style indexing
<https://docs.scipy.org/doc/numpy-1.16.0/reference/arrays.indexing.html>`_
(e.g., `x[:, (1,4,7), ...]`), rather than using `gather` directly.
Args:
operand: an array from which slices should be taken
start_indices: the indices at which slices should be taken
dimension_numbers: a `lax.GatherDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices` and the output relate.
slice_sizes: the size of each slice. Must be a sequence of non-negative
integers with length equal to `ndim(operand)`.
Returns:
An array containing the gather output.
"""
return gather_p.bind(
operand, start_indices, dimension_numbers=dimension_numbers,
slice_sizes=canonicalize_shape(slice_sizes))
class ScatterDimensionNumbers(NamedTuple):
"""
Describes the dimension number arguments to an `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_. See the XLA
documentation for more details of what the dimension numbers mean.
Args:
update_window_dims: the set of dimensions in the `updates` that are window
dimensions. Must be a tuple of integers in ascending
order, each representing a dimension number.
inserted_window_dims: the set of size 1 window dimensions that must be inserted
into the shape of `updates`. Must be a tuple of integers in ascending
order, each representing a dimension number of the output. These are the
mirror image of `collapsed_slice_dims` in the case of `gather`.
scatter_dims_to_operand_dims: for each dimension in `scatter_indices`, gives
the corresponding dimension in `operand`. Must be a sequence of integers
with size equal to indices.shape[-1].
Unlike XLA's `ScatterDimensionNumbers` structure, `index_vector_dim` is
implicit; there is always an index vector dimension and it must always be the
last dimension. To scatter scalar indices, add a trailing dimension of size 1.
"""
update_window_dims: Sequence[int]
inserted_window_dims: Sequence[int]
scatter_dims_to_operand_dims: Sequence[int]
def scatter_add(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers, *,
indices_are_sorted: bool = False,
unique_indices: bool = False) -> Array:
"""Scatter-add operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
addition is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
indices_are_sorted: whether `scatter_indices` is known to be sorted. If
true, may improve performance on some backends.
unique_indices: whether the indices to be updated in ``operand`` are
guaranteed to not overlap with each other. If true, may improve performance on
some backends.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(add, _abstractify(_const(operand, 0)))
return scatter_add_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
def scatter_mul(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers, *,
indices_are_sorted: bool = False,
unique_indices: bool = False) -> Array:
"""Scatter-multiply operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
multiplication is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
indices_are_sorted: whether `scatter_indices` is known to be sorted. If
true, may improve performance on some backends.
unique_indices: whether the indices to be updated in ``operand`` are
guaranteed to not overlap with each other. If true, may improve performance on
some backends.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(mul, _abstractify(_const(operand, 1)))
return scatter_mul_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
def scatter_min(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers, *,
indices_are_sorted: bool = False,
unique_indices: bool = False) -> Array:
"""Scatter-min operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
the `min` function is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
indices_are_sorted: whether `scatter_indices` is known to be sorted. If
true, may improve performance on some backends.
unique_indices: whether the indices to be updated in ``operand`` are
guaranteed to not overlap with each other. If true, may improve performance on
some backends.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(min, _abstractify(_const(operand, 0)))
return scatter_min_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
def scatter_max(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers, *,
indices_are_sorted: bool = False,
unique_indices: bool = False) -> Array:
"""Scatter-max operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where
the `max` function is used to combine updates and values from `operand`.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
indices_are_sorted: whether `scatter_indices` is known to be sorted. If
true, may improve performance on some backends.
unique_indices: whether the indices to be updated in ``operand`` are
guaranteed to not overlap with each other. If true, may improve performance on
some backends.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(max, _abstractify(_const(operand, 0)))
return scatter_max_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
# Define this outside of scatter to ensure cache hits.
_scatter_reduction_computation = lambda x, y: y
def scatter(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers, *,
indices_are_sorted: bool = False,
unique_indices: bool = False) -> Array:
"""Scatter-update operator.
Wraps `XLA's Scatter operator
<https://www.tensorflow.org/xla/operation_semantics#scatter>`_, where updates
replace values from `operand`.
If multiple updates are performed to the same index of operand, they may be
applied in any order.
The semantics of scatter are complicated and its API is subject to change.
Args:
operand: an array to which the scatter should be applied
scatter_indices: an array that gives the indices in `operand` to which each
update in `updates` should be applied.
updates: the updates that should be scattered onto `operand`.
dimension_numbers: a `lax.ScatterDimensionNumbers` object that describes
how dimensions of `operand`, `start_indices`, `updates` and the output
relate.
indices_are_sorted: whether `scatter_indices` is known to be sorted. If
true, may improve performance on some backends.
unique_indices: whether the indices to be updated in ``operand`` are
guaranteed to not overlap with each other. If true, may improve performance on
some backends.
Returns:
An array containing the sum of `operand` and the scattered updates.
"""
jaxpr, consts = _reduction_jaxpr(_scatter_reduction_computation,
_abstractify(_const(operand, 0)))
return scatter_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
def index_take(src: Array, idxs: Array, axes: Sequence[int]) -> Array:
indices = concatenate([expand_dims(i, (1,)) for i in idxs], 1)
indices = indices % np.array([src.shape[ax] for ax in axes])
slice_sizes = list(src.shape)
for ax in axes:
slice_sizes[ax] = 1
offset_dims = tuple(range(1, src.ndim - indices.shape[1] + 1))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=axes,
start_index_map=axes)
return gather(src, indices, dimension_numbers=dnums,
slice_sizes=tuple(slice_sizes))
def transpose(operand: Array, permutation: Sequence[int]) -> Array:
"""Wraps XLA's `Transpose
<https://www.tensorflow.org/xla/operation_semantics#transpose>`_
operator.
"""
permutation = tuple(permutation)
if permutation == tuple(range(len(permutation))):
return operand
else:
return transpose_p.bind(operand, permutation=permutation)
def argmin(operand: Array, axis: int,
index_dtype: DType) -> Tuple[Array, Array]:
"""Computes the index of the minimum element along ``axis``."""
return argmin_p.bind(operand, axes=(axis,),
index_dtype=dtypes.canonicalize_dtype(index_dtype))
def argmax(operand: Array, axis: int,
index_dtype: DType) -> Tuple[Array, Array]:
"""Computes the index of the maximum element along ``axis``."""
return argmax_p.bind(operand, axes=(axis,),
index_dtype=dtypes.canonicalize_dtype(index_dtype))
def reduce(operands: Array, init_values: Array, computation: Callable,
dimensions: Sequence[int]) -> Array:
"""Wraps XLA's `Reduce
<https://www.tensorflow.org/xla/operation_semantics#reduce>`_
operator.
"""
flat_operands, operand_tree = tree_util.tree_flatten(operands)
flat_init_values, init_value_tree = tree_util.tree_flatten(init_values)
if operand_tree != init_value_tree:
raise ValueError('Operands must have the same tree structure as init_values:'
f' {operand_tree} vs. {init_value_tree}')
if len(flat_operands) != len(flat_init_values):
raise ValueError('Must have same total number of operands as init_values: '
f' {len(flat_operands)} vs. {len(flat_init_values)}')
monoid_reducer = _get_monoid_reducer(computation, flat_init_values)
if monoid_reducer:
return monoid_reducer(*flat_operands, dimensions)
else:
flat_init_avals = safe_map(_abstractify, flat_init_values)
jaxpr, consts, out_tree = _variadic_reduction_jaxpr(
computation, tuple(flat_init_avals), init_value_tree)
out = reduce_p.bind(*(flat_operands + flat_init_values), computation=computation,
jaxpr=jaxpr, consts=consts, dimensions=tuple(dimensions))
return tree_util.tree_unflatten(out_tree, out)
@cache()
def _reduction_jaxpr(computation, aval):
pval = pe.PartialVal.unknown(aval)
comp = lu.wrap_init(lambda x, y: (computation(x, y),))
jaxpr, _, consts = pe.trace_to_jaxpr(comp, (pval, pval), instantiate=False)
return jaxpr, consts
@cache()
def _variadic_reduction_jaxpr(computation, flat_avals, aval_tree):
avals = tree_util.tree_unflatten(aval_tree, flat_avals)
flat_in_avals, in_tree = tree_util.tree_flatten((avals, avals))
pvals = safe_map(pe.PartialVal.unknown, flat_in_avals)
comp = lu.wrap_init(computation)
flat_comp, out_tree = api_util.flatten_fun_nokwargs(comp, in_tree)
jaxpr, _, consts = pe.trace_to_jaxpr(flat_comp, tuple(pvals),
instantiate=False)
return jaxpr, consts, out_tree()
def _get_monoid_reducer(monoid_op: Callable, xs: Array) -> Optional[Callable]:
if len(xs) != 1:
return None
x, = xs
aval = core.get_aval(x)
dtype = _dtype(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return np.equal(aval.val, 0) and partial(
_reduce_sum)
if monoid_op is mul:
return np.equal(aval.val, 1) and _reduce_prod
elif monoid_op is bitwise_or and dtype == np.bool_:
return np.equal(aval.val, _get_max_identity(dtype)) and _reduce_or
elif monoid_op is bitwise_and and dtype == np.bool_:
return np.equal(aval.val, _get_min_identity(dtype)) and _reduce_and
elif monoid_op is max:
return np.equal(aval.val, _get_max_identity(dtype)) and _reduce_max
elif monoid_op is min:
return np.equal(aval.val, _get_min_identity(dtype)) and _reduce_min
return None
def _get_max_identity(dtype: DType) -> Array:
if dtypes.issubdtype(dtype, np.inexact):
return np.array(-np.inf, dtype)
elif dtypes.issubdtype(dtype, np.integer):
return np.array(dtypes.iinfo(dtype).min, dtype)
elif dtypes.issubdtype(dtype, np.bool_):
return np.array(False, np.bool_)
def _get_min_identity(dtype: DType) -> Array:
if dtypes.issubdtype(dtype, np.inexact):
return np.array(np.inf, dtype)
elif dtypes.issubdtype(dtype, np.integer):
return np.array(dtypes.iinfo(dtype).max, dtype)
elif dtypes.issubdtype(dtype, np.bool_):
return np.array(True, np.bool_)
def _reduce_sum(operand: Array, axes: Sequence[int]) -> Array:
return reduce_sum_p.bind(operand, axes=tuple(axes))
def _reduce_prod(operand: Array, axes: Sequence[int]) -> Array:
return reduce_prod_p.bind(operand, axes=tuple(axes))
def _reduce_max(operand: Array, axes: Sequence[int]) -> Array:
return reduce_max_p.bind(operand, axes=tuple(axes))
def _reduce_min(operand: Array, axes: Sequence[int]) -> Array:
return reduce_min_p.bind(operand, axes=tuple(axes))
def _reduce_or(operand: Array, axes: Sequence[int]) -> Array:
return reduce_or_p.bind(operand, axes=tuple(axes))
def _reduce_and(operand: Array, axes: Sequence[int]) -> Array:
return reduce_and_p.bind(operand, axes=tuple(axes))
def reduce_window(operand: Array, init_value: Array, computation: Callable,
window_dimensions: Shape, window_strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
base_dilation: Optional[Sequence[int]] = None,
window_dilation: Optional[Sequence[int]] = None) -> Array:
"""Wraps XLA's `ReduceWindowWithGeneralPadding
<https://www.tensorflow.org/xla/operation_semantics#reducewindow>`_
operator.
"""
if isinstance(padding, str):
dilated_window_dims = (window_dimensions if window_dilation is None else
_dilate_shape(window_dimensions, window_dilation))
padding = tuple(padtype_to_pads(operand.shape, dilated_window_dims,
window_strides, padding))
else:
padding = tuple(padding)
if base_dilation is None:
base_dilation = (1,) * len(window_dimensions)
if window_dilation is None:
window_dilation = (1,) * len(window_dimensions)
monoid_reducer = _get_monoid_window_reducer(computation, init_value)
if monoid_reducer:
return monoid_reducer(operand, window_dimensions, window_strides, padding,
base_dilation, window_dilation)
else:
jaxpr, consts = _reduction_jaxpr(computation, _abstractify(init_value))
return reduce_window_p.bind(
operand, init_value, jaxpr=jaxpr, consts=consts,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding,
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def _get_monoid_window_reducer(monoid_op: Callable, x: Array) -> Optional[Callable]:
aval = core.get_aval(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return aval.val == 0 and _reduce_window_sum
elif monoid_op is max:
return aval.val == _get_max_identity(aval.dtype) and _reduce_window_max
elif monoid_op is min:
return aval.val == _get_min_identity(aval.dtype) and _reduce_window_min
return None
def _reduce_window_sum(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]],
base_dilation: Optional[Sequence[int]] = None,
window_dilation: Optional[Sequence[int]] = None) -> Array:
if base_dilation is None:
base_dilation = (1,) * len(window_dimensions)
if window_dilation is None:
window_dilation = (1,) * len(window_dimensions)
return reduce_window_sum_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding),
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def _reduce_window_prod(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]],
base_dilation: Optional[Sequence[int]] = None,
window_dilation: Optional[Sequence[int]] = None) -> Array:
init_value = _const(operand, 1)
jaxpr, consts = _reduction_jaxpr(mul, _abstractify(init_value))
if base_dilation is None:
base_dilation = (1,) * len(window_dimensions)
if window_dilation is None:
window_dilation = (1,) * len(window_dimensions)
return reduce_window_p.bind(
operand, init_value, jaxpr=jaxpr, consts=consts,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding),
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def _reduce_window_max(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]],
base_dilation: Optional[Sequence[int]] = None,
window_dilation: Optional[Sequence[int]] = None) -> Array:
if base_dilation is None:
base_dilation = (1,) * len(window_dimensions)
if window_dilation is None:
window_dilation = (1,) * len(window_dimensions)
return reduce_window_max_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding),
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def _reduce_window_min(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]],
base_dilation: Optional[Sequence[int]] = None,
window_dilation: Optional[Sequence[int]] = None) -> Array:
if base_dilation is None:
base_dilation = (1,) * len(window_dimensions)
if window_dilation is None:
window_dilation = (1,) * len(window_dimensions)
return reduce_window_min_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding),
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def _select_and_scatter(operand: Array, select: Callable,
window_dimensions: Shape, window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]], source: Array,
init_value: Array, scatter: Callable,
base_dilation: Sequence[int],
window_dilation: Sequence[int]) -> Array:
select_jaxpr, select_consts = _reduction_jaxpr(select, _abstractify(init_value))
scatter_jaxpr, scatter_consts = _reduction_jaxpr(scatter, _abstractify(init_value))
return select_and_scatter_p.bind(
operand, source, init_value, select_jaxpr=select_jaxpr,
select_consts=select_consts, scatter_jaxpr=scatter_jaxpr,
scatter_consts=scatter_consts, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding),
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def _select_and_scatter_add(source: Array, operand: Array,
select_prim: core.Primitive,
window_dimensions: Shape,
window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]]) -> Array:
return select_and_scatter_add_p.bind(
source, operand, select_prim=select_prim,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding))
def _select_and_gather_add(tangents: Array, operand: Array,
select_prim: core.Primitive,
window_dimensions: Shape,
window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]],
base_dilation: Sequence[int],
window_dilation: Sequence[int]) -> Array:
"""Extracts the tangent corresponding to the minimum or maximum element in each
window of the `operand` array.
Wraps XLA's `ReduceWindow
<https://www.tensorflow.org/xla/operation_semantics#reducewindow>`_
operator, which applies a reduction function to all elements in each window of the
input multi-dimensional array. In this case, the input multi-dimensional array is
built by packing each element in the `operand` array with its corresponding
element in the `tangents` array.
Args:
tangents: an array
operand: an array with the same shape as `tangents`
select_prim: a reduction function (restricted to `ge_p` and `le_p`)
window_dimensions: an array of integers for window dimension values
window_strides: an array of integers for window stride values
base_dilation: an array of integers for base dilation values
window_dilation: an array of integers for window dilation values
Returns:
An array containing the elements in `tangents` corresponding to the output of the
reduction of `operand` fin each window.
"""
return select_and_gather_add_p.bind(
tangents, operand, select_prim=select_prim,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding),
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def sort(operand: Union[Array, Sequence[Array]], dimension: int = -1,
is_stable: bool = True, num_keys: int = 1) -> Union[Array, Tuple[Array, ...]]:
"""Wraps XLA's `Sort
<https://www.tensorflow.org/xla/operation_semantics#sort>`_
operator.
Args:
operand : Array or sequence of arrays
dimension : integer dimension along which to sort. Default: -1.
is_stable : boolean specifying whether to use a stable sort. Default: True.
num_keys : number of operands to treat as sort keys. Default: 1.
For num_keys > 1, the sort order will be determined lexicographically using
the first `num_keys` arrays, with the first key being primary.
The remaining operands will be returned with the same permutation.
Returns:
operand : sorted version of the input or inputs.
"""
if isinstance(operand, Sequence):
if len(operand) == 0:
raise TypeError("Sort requires at least one operand")
if not (1 <= num_keys <= len(operand)):
raise ValueError(f"num_keys={num_keys} must be between 1 and len(operand)={len(operand)}")
dimension = canonicalize_axis(dimension, len(operand[0].shape))
return tuple(sort_p.bind(*operand, dimension=dimension,
is_stable=is_stable,
num_keys=num_keys))
else:
if num_keys != 1:
raise ValueError(f"num_keys={num_keys} must equal 1 for a single operand.")
dimension = canonicalize_axis(dimension, len(operand.shape))
return sort_p.bind(operand, dimension=dimension, is_stable=is_stable, num_keys=1)[0]
def sort_key_val(keys: Array, values: Array, dimension: int = -1,
is_stable: bool = True) -> Tuple[Array, Array]:
"""Sorts ``keys`` along ``dimension`` and applies same permutation to ``values``."""
dimension = canonicalize_axis(dimension, len(keys.shape))
k, v = sort_p.bind(keys, values, dimension=dimension, is_stable=is_stable, num_keys=1)
return k, v
def top_k(operand: Array, k: int) -> Tuple[Array, Array]:
"""Returns top ``k`` values and their indices along the last axis of ``operand``."""
k = int(k)
if k < 0:
raise ValueError("k argument to top_k must be nonnegative, got {}".format(k))
return top_k_p.bind(operand, k=k)
def tie_in(x: Array, y: Array) -> Array:
"""Deprecated. Ignores ``x`` and returns ``y``."""
return y
def full(shape: Shape, fill_value: Array, dtype: Optional[DType] = None) -> Array:
"""Returns an array of `shape` filled with `fill_value`.
Arguments:
shape: sequence of integers, describing the shape of the output array.
fill_value: the value to fill the new array with.
dtype: the type of the output array, or `None`. If not `None`, `fill_value`
will be cast to `dtype`.
"""
shape = canonicalize_shape(shape)
if np.shape(fill_value):
msg = "full must be called with scalar fill_value, got fill_value.shape {}."
raise TypeError(msg.format(np.shape(fill_value)))
dtype = dtypes.canonicalize_dtype(dtype or _dtype(fill_value))
fill_value = convert_element_type(fill_value, dtype)
return broadcast(fill_value, shape)
def _device_put_raw(x):
if isinstance(x, xla.DeviceArray):
return x
else:
aval = raise_to_shaped(core.get_aval(x))
return xla.array_result_handler(None, aval)(*xla.device_put(x))
def iota(dtype: DType, size: int) -> Array:
"""Wraps XLA's `Iota
<https://www.tensorflow.org/xla/operation_semantics#iota>`_
operator.
"""
if config.omnistaging_enabled:
dtype = dtypes.canonicalize_dtype(dtype)
size = core.concrete_or_error(int, size, "size argument of lax.iota")
return iota_p.bind(dtype=dtype, shape=(size,), dimension=0)
else:
size = size if type(size) is masking.Poly else int(size)
shape = canonicalize_shape((size,))
dtype = dtypes.canonicalize_dtype(dtype)
lazy_expr = lazy.iota(dtype, shape[0])
aval = ShapedArray(shape, dtype)
return xla.make_device_array(aval, None, lazy_expr, xla.DeviceConstant())
def broadcasted_iota(dtype: DType, shape: Shape, dimension: int) -> Array:
"""Convenience wrapper around ``iota``."""
dtype = dtypes.canonicalize_dtype(dtype)
shape = canonicalize_shape(shape)
dimension = core.concrete_or_error(
int, dimension, "dimension argument of lax.broadcasted_iota")
return iota_p.bind(dtype=dtype, shape=shape, dimension=dimension)
def _eye(dtype: DType, shape: Shape, offset: int) -> Array:
"""Like numpy.eye, create a 2D array with ones on a diagonal."""
N, M = tuple(map(int, shape))
offset = int(offset)
dtype = dtypes.canonicalize_dtype(dtype)
if config.omnistaging_enabled:
bool_eye = eq(add(broadcasted_iota(np.int32, (N, M), 0), np.int32(offset)),
broadcasted_iota(np.int32, (N, M), 1))
return convert_element_type_p.bind(bool_eye, new_dtype=dtype,
old_dtype=np.bool_)
else:
lazy_expr = lazy.eye(dtype, (N, M), offset)
aval = ShapedArray((N, M), dtype)
return xla.make_device_array(aval, None, lazy_expr, xla.DeviceConstant())
def _delta(dtype: DType, shape: Shape, axes: Sequence[int]) -> Array:
"""This utility function exists for creating Kronecker delta arrays."""
shape = tuple(map(int, shape))
axes = tuple(map(int, axes))
dtype = dtypes.canonicalize_dtype(dtype)
base_shape = tuple(np.take(shape, axes))
if config.omnistaging_enabled:
iotas = [broadcasted_iota(np.uint32, base_shape, i)
for i in range(len(base_shape))]
eyes = [eq(i1, i2) for i1, i2 in zip(iotas[:-1], iotas[1:])]
result = convert_element_type_p.bind(_reduce(operator.and_, eyes),
new_dtype=dtype, old_dtype=np.bool_)
return broadcast_in_dim(result, shape, axes)
else:
lazy_expr = lazy.broadcast(lazy.delta(dtype, base_shape), shape, axes)
aval = ShapedArray(shape, dtype)
return xla.make_device_array(aval, None, lazy_expr, xla.DeviceConstant())
def _tri(dtype: DType, shape: Shape, offset: int) -> Array:
"""Like numpy.tri, create a 2D array with ones below a diagonal."""
N, M = tuple(map(int, shape))
offset = int(offset)
dtype = dtypes.canonicalize_dtype(dtype)
if config.omnistaging_enabled:
bool_tri = ge(add(broadcasted_iota(np.int32, (N, M), 0), np.int32(offset)),
broadcasted_iota(np.int32, (N, M), 1))
return convert_element_type_p.bind(bool_tri, old_dtype=np.int32,
new_dtype=dtype)
else:
lazy_expr = lazy.tri(dtype, (N, M), offset)
aval = ShapedArray((N, M), dtype)
return xla.make_device_array(aval, None, lazy_expr, xla.DeviceConstant())
def stop_gradient(x):
"""Stops gradient computation.
Operationally ``stop_gradient`` is the identity function, that is, it returns
argument `x` unchanged. However, ``stop_gradient`` prevents the flow of
gradients during forward or reverse-mode automatic differentiation. If there
are multiple nested gradient computations, ``stop_gradient`` stops gradients
for all of them.
For example:
>>> jax.grad(lambda x: x**2)(3.)
array(6., dtype=float32)
>>> jax.grad(lambda x: jax.lax.stop_gradient(x)**2)(3.)
array(0., dtype=float32)
>>> jax.grad(jax.grad(lambda x: x**2))(3.)
array(2., dtype=float32)
>>> jax.grad(jax.grad(lambda x: jax.lax.stop_gradient(x)**2))(3.)
array(0., dtype=float32)
"""
def stop(x):
if (dtypes.issubdtype(_dtype(x), np.floating) or
dtypes.issubdtype(_dtype(x), np.complexfloating)):
return ad_util.stop_gradient_p.bind(x)
else:
return x # only bind primitive on inexact dtypes, to avoid some staging
return tree_map(stop, x)
### convenience wrappers around traceables
def conv(lhs: Array, rhs: Array, window_strides: Sequence[int],
padding: str, precision: PrecisionLike = None) -> Array:
"""Convenience wrapper around `conv_general_dilated`.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
window_strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`.
precision: Optional. Either ``None``, which means the default precision for
the backend, a ``lax.Precision`` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
``lax.Precision`` enums indicating precision of ``lhs``` and ``rhs``.
Returns:
An array containing the convolution result.
"""
return conv_general_dilated(lhs, rhs, window_strides, padding,
precision=precision)
def conv_with_general_padding(lhs: Array, rhs: Array,
window_strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
lhs_dilation: Optional[Sequence[int]],
rhs_dilation: Optional[Sequence[int]],
precision: PrecisionLike = None) -> Array:
"""Convenience wrapper around `conv_general_dilated`.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
window_strides: a sequence of `n` integers, representing the inter-window
strides.
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence of
`n` `(low, high)` integer pairs that give the padding to apply before and
after each spatial dimension.
lhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `lhs`. LHS dilation
is also known as transposed convolution.
rhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `rhs`. RHS dilation
is also known as atrous convolution.
precision: Optional. Either ``None``, which means the default precision for
the backend, a ``lax.Precision`` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
``lax.Precision`` enums indicating precision of ``lhs``` and ``rhs``.
Returns:
An array containing the convolution result.
"""
return conv_general_dilated(
lhs, rhs, window_strides, padding, lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation, precision=precision)
def _conv_transpose_padding(k, s, padding):
"""Calculate before and after padding for a dim of transposed convolution.
Args:
k: int: kernel dimension.
s: int: dimension stride value.
padding: 'same' or 'valid' padding mode for original forward conv.
Returns:
2-tuple: ints: before and after padding for transposed convolution.
"""
if padding == 'SAME':
pad_len = k + s - 2
if s > k - 1:
pad_a = k - 1
else:
pad_a = int(np.ceil(pad_len / 2))
elif padding == 'VALID':
pad_len = k + s - 2 + _max(k - s, 0)
pad_a = k - 1
else:
raise ValueError('Padding mode must be `SAME` or `VALID`.')
pad_b = pad_len - pad_a
return pad_a, pad_b
def _flip_axes(x, axes):
"""Flip ndarray 'x' along each axis specified in axes tuple."""
for axis in axes:
x = np.flip(x, axis)
return x
def conv_transpose(lhs: Array, rhs: Array, strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
rhs_dilation: Optional[Sequence[int]] = None,
dimension_numbers: ConvGeneralDilatedDimensionNumbers = None,
transpose_kernel: bool = False,
precision: PrecisionLike = None) -> Array:
"""Convenience wrapper for calculating the N-d convolution "transpose".
This function directly calculates a fractionally strided conv rather than
indirectly calculating the gradient (transpose) of a forward convolution.
Args:
lhs: a rank `n+2` dimensional input array.
rhs: a rank `n+2` dimensional array of kernel weights.
strides: sequence of `n` integers, sets fractional stride.
padding: 'SAME', 'VALID' will set as transpose of corresponding forward
conv, or a sequence of `n` integer 2-tuples describing before-and-after
padding for each `n` spatial dimension.
rhs_dilation: `None`, or a sequence of `n` integers, giving the
dilation factor to apply in each spatial dimension of `rhs`. RHS dilation
is also known as atrous convolution.
dimension_numbers: tuple of dimension descriptors as in
lax.conv_general_dilated. Defaults to tensorflow convention.
transpose_kernel: if True flips spatial axes and swaps the input/output
channel axes of the kernel. This makes the output of this function identical
to the gradient-derived functions like keras.layers.Conv2DTranspose
applied to the same kernel. For typical use in neural nets this is completely
pointless and just makes input/output channel specification confusing.
precision: Optional. Either ``None``, which means the default precision for
the backend, a ``lax.Precision`` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
``lax.Precision`` enums indicating precision of ``lhs``` and ``rhs``.
Returns:
Transposed N-d convolution, with output padding following the conventions of
keras.layers.Conv2DTranspose.
"""
assert len(lhs.shape) == len(rhs.shape) and len(lhs.shape) >= 2
ndims = len(lhs.shape)
one = (1,) * (ndims - 2)
# Set dimensional layout defaults if not specified.
if dimension_numbers is None:
if ndims == 2:
dimension_numbers = ('NC', 'IO', 'NC')
elif ndims == 3:
dimension_numbers = ('NHC', 'HIO', 'NHC')
elif ndims == 4:
dimension_numbers = ('NHWC', 'HWIO', 'NHWC')
elif ndims == 5:
dimension_numbers = ('NHWDC', 'HWDIO', 'NHWDC')
else:
raise ValueError('No 4+ dimensional dimension_number defaults.')
dn = conv_dimension_numbers(lhs.shape, rhs.shape, dimension_numbers)
k_shape = np.take(rhs.shape, dn.rhs_spec)
k_sdims = k_shape[2:]
# Calculate correct output shape given padding and strides.
pads: Union[str, Sequence[Tuple[int, int]]]
if padding in {'SAME', 'VALID'}:
if rhs_dilation is None:
rhs_dilation = (1,) * (rhs.ndim - 2)
effective_k_size = map(lambda k, r: (k-1) * r + 1, k_sdims, rhs_dilation)
pads = [_conv_transpose_padding(k, s, padding)
for k,s in zip(effective_k_size, strides)]
else:
pads = padding
if transpose_kernel:
# flip spatial dims and swap input / output channel axes
rhs = _flip_axes(rhs, np.array(dn.rhs_spec)[2:])
rhs = np.swapaxes(rhs, dn.rhs_spec[0], dn.rhs_spec[1])
return conv_general_dilated(lhs, rhs, one, pads, strides, rhs_dilation, dn,
precision=precision)
def full_like(x: Array, fill_value: Array, dtype: Optional[DType] = None,
shape: Optional[Shape] = None) -> Array:
"""Create a full array like np.full based on the example array `x`.
Args:
x: example array-like, used for shape and dtype information.
fill_value: a scalar value to fill the entries of the output array.
dtype: optional, a dtype parameter for the output ndarray.
shape: optional, a shape parameter for the output ndarray.
Returns:
An ndarray with the same shape as `x` with its entries set equal to
`fill_value`, similar to the output of np.full.
"""
fill_shape = np.shape(x) if shape is None else canonicalize_shape(shape)
if not config.omnistaging_enabled:
fill_value = tie_in(x, fill_value)
return full(fill_shape, fill_value, dtype or _dtype(x))
def collapse(operand: Array, start_dimension: int,
stop_dimension: int) -> Array:
"""Collapses dimensions of an array into a single dimension.
For example, if ``operand`` is an array with shape ``[2, 3, 4]``,
``collapse(operand, 0, 2).shape == [6, 4]``. The elements of the collapsed
dimension are laid out major-to-minor, i.e., with the lowest-numbered
dimension as the slowest varying dimension.
Args:
operand: an input array.
start_dimension: the start of the dimensions to collapse (inclusive).
stop_dimension: the end of the dimensions to collapse (exclusive).
Returns:
An array where dimensions ``[start_dimension, stop_dimension)`` have been
collapsed (raveled) into a single dimension.
"""
lo, hi = start_dimension, stop_dimension
size = prod(operand.shape[lo:hi])
new_shape = operand.shape[:lo] + (size,) + operand.shape[hi:]
return reshape(operand, new_shape)
def slice_in_dim(operand: Array, start_index: Optional[int],
limit_index: Optional[int],
stride: int = 1, axis: int = 0)-> Array:
"""Convenience wrapper around slice applying to only one dimension."""
start_indices = [0] * operand.ndim
limit_indices = list(operand.shape)
strides = [1] * operand.ndim
# translate `None`
len_axis = operand.shape[axis]
start_index_int = _canonicalize_dimension(start_index) if start_index is not None else 0
limit_index_int = _canonicalize_dimension(limit_index) if limit_index is not None else len_axis
# translate negative indices
if start_index_int < 0:
start_index_int = start_index_int + len_axis
if limit_index_int < 0:
limit_index_int = limit_index_int + len_axis
axis = int(axis)
start_indices[axis] = start_index_int
limit_indices[axis] = limit_index_int
strides[axis] = int(stride)
return slice(operand, start_indices, limit_indices, strides)
def index_in_dim(operand: Array, index: int, axis: int = 0,
keepdims: bool = True) -> Array:
"""Convenience wrapper around slice to perform int indexing."""
index, axis = int(index), int(axis)
axis_size = operand.shape[axis]
wrapped_index = index + axis_size if index < 0 else index
if not 0 <= wrapped_index < axis_size:
msg = 'index {} is out of bounds for axis {} with size {}'
raise IndexError(msg.format(index, axis, axis_size))
result = slice_in_dim(operand, wrapped_index, wrapped_index + 1, 1, axis)
if keepdims:
return result
else:
return squeeze(result, (axis,))
def dynamic_slice_in_dim(operand: Array, start_index: Array,
slice_size: int, axis: int = 0) -> Array:
"""Convenience wrapper around dynamic_slice applying to one dimension."""
start_indices = [_zero(start_index)] * operand.ndim
slice_sizes = list(operand.shape)
axis = int(axis)
start_indices[axis] = start_index
slice_sizes[axis] = int(slice_size)
return dynamic_slice(operand, start_indices, slice_sizes)
def dynamic_index_in_dim(operand: Array, index: Array, axis: int = 0,
keepdims: bool = True) -> Array:
"""Convenience wrapper around dynamic_slice to perform int indexing."""
result = dynamic_slice_in_dim(operand, index, 1, axis)
if keepdims:
return result
else:
return squeeze(result, (axis,))
def dynamic_update_slice_in_dim(operand: Array, update: Array,
start_index: Array, axis: int) -> Array:
"""Convenience wrapper around :func:`dynamic_update_slice` to update a slice
in a single ``axis``.
"""
axis = int(axis)
start_indices = [_zero(start_index)] * _ndim(operand)
start_indices[axis] = start_index
return dynamic_update_slice(operand, update, start_indices)
def dynamic_update_index_in_dim(operand: Array, update: Array, index: Array,
axis: int) -> Array:
"""Convenience wrapper around :func:`dynamic_update_slice` to update a slice
of size 1 in a single ``axis``.
"""
axis = int(axis)
if _ndim(update) != _ndim(operand):
assert _ndim(update) + 1 == _ndim(operand)
update = expand_dims(update, (axis,))
return dynamic_update_slice_in_dim(operand, update, index, axis)
def batch_matmul(lhs: Array, rhs: Array,
precision: PrecisionLike = None) -> Array:
"""Batch matrix multiplication."""
if _min(lhs.ndim, rhs.ndim) < 2:
raise ValueError('Arguments to batch_matmul must be at least 2D, got {}, {}'
.format(lhs.ndim, rhs.ndim))
if lhs.ndim != rhs.ndim:
raise ValueError('Arguments to batch_matmul must have same ndim, got {}, {}'
.format(lhs.ndim, rhs.ndim))
lhs_contract = (lhs.ndim - 1,)
rhs_contract = (rhs.ndim - 2,)
batch = tuple(range(lhs.ndim - 2))
return dot_general(lhs, rhs, ((lhs_contract, rhs_contract), (batch, batch)),
precision=precision)
# These functions also exist in the XLA client library, but we treat them
# as non-primitive to maintain a smaller set of autodiff primitives.
def square(x: Array) -> Array:
r"""Elementwise square: :math:`x^2`."""
return integer_pow(x, 2)
def reciprocal(x: Array) -> Array:
r"""Elementwise reciprocal: :math:`1 \over x`."""
return integer_pow(x, -1)
def _upcast_fp16_for_computation(f):
@functools.wraps(f)
def f_wrapped(x):
dtype = _dtype(x)
if dtype == np.float16 or dtype == dtypes.bfloat16:
return convert_element_type(
f(convert_element_type(x, np.float32)), dtype)
return f(x)
return f_wrapped
@api.jit
@_upcast_fp16_for_computation
def tan(x: Array) -> Array:
r"""Elementwise tangent: :math:`\mathrm{tan}(x)`."""
return div(sin(x), cos(x))
@api.jit
def asin(x: Array) -> Array:
r"""Elementwise arc sine: :math:`\mathrm{asin}(x)`."""
if dtypes.issubdtype(_dtype(x), np.complexfloating):
return mul(_const(x, -1j), asinh(mul(_const(x, 1j), x)))
else:
return mul(_const(x, 2),
atan2(x, add(_const(x, 1), sqrt(sub(_const(x, 1), square(x))))))
@api.jit
def acos(x: Array) -> Array:
r"""Elementwise arc cosine: :math:`\mathrm{acos}(x)`."""
if dtypes.issubdtype(_dtype(x), np.complexfloating):
result = mul(_const(x, 1j), acosh(x))
# By convention, numpy chooses the branch with positive real part.
rpart = real(result)
return select(
gt(rpart, _const(rpart, 0)),
result,
neg(result)
)
else:
return select(
ne(x, _const(x, -1.0)),
mul(_const(x, 2),
atan2(sqrt(sub(_const(x, 1), square(x))), add(_const(x, 1), x))),
full_like(x, np.pi))
def atan(x: Array) -> Array:
r"""Elementwise arc tangent: :math:`\mathrm{atan}(x)`."""
if dtypes.issubdtype(_dtype(x), np.complexfloating):
return mul(_const(x, -1j), atanh(mul(_const(x, 1j), x)))
else:
return atan2(x, _const(x, 1))
def sinh(x: Array) -> Array:
r"""Elementwise hyperbolic sine: :math:`\mathrm{sinh}(x)`."""
return sinh_p.bind(x)
def cosh(x: Array) -> Array:
r"""Elementwise hyperbolic cosine: :math:`\mathrm{cosh}(x)`."""
return cosh_p.bind(x)
def asinh(x: Array) -> Array:
r"""Elementwise inverse hyperbolic sine: :math:`\mathrm{asinh}(x)`."""
return asinh_p.bind(x)
def acosh(x: Array) -> Array:
r"""Elementwise inverse hyperbolic cosine: :math:`\mathrm{acosh}(x)`."""
return acosh_p.bind(x)
def atanh(x: Array) -> Array:
r"""Elementwise inverse hyperbolic tangent: :math:`\mathrm{atanh}(x)`."""
return atanh_p.bind(x)
# Add some methods to ShapedArray that rely on lax primitives
ShapedArray.broadcast = core.aval_method(broadcast)
ShapedArray.transpose = core.aval_method(transpose) # clobbered by lax_numpy
ShapedArray.reshape = core.aval_method(reshape) # clobbered by lax_numpy
def _iter(tracer):
if tracer.ndim == 0:
raise TypeError("iteration over a 0-d array") # same as numpy error
else:
n = int(tracer.shape[0])
# return (index_in_dim(tracer, i, keepdims=False) for i in range(n))
return iter([index_in_dim(tracer, i, keepdims=False) for i in range(n)])
ShapedArray._iter = staticmethod(_iter)
# Add some ad handlers that use (or could use) lax primitives
def zeros_like_array(x):
return full_like(x, 0)
for t in itertools.chain(dtypes.python_scalar_dtypes.keys(), array_types,
[xla.DeviceArray, pxla.ShardedDeviceArray]):
ad_util.jaxval_adders[t] = add
ad_util.jaxval_zeros_likers[xla.DeviceArray] = zeros_like_array
ad_util.jaxval_zeros_likers[pxla.ShardedDeviceArray] = zeros_like_array
### primitives
_input_dtype = lambda *args, **_: dtypes.canonicalize_dtype(args[0].dtype)
_fixed_dtype = lambda dtype: lambda *args, **kwargs: dtypes.canonicalize_dtype(dtype)
_complex_basetype = lambda dtype: np.abs(np.zeros((), dtype)).dtype
def standard_primitive(shape_rule, dtype_rule, name, translation_rule=None,
multiple_results=False):
prim = Primitive(name)
prim.multiple_results = multiple_results
prim.def_impl(partial(xla.apply_primitive, prim))
prim.def_abstract_eval(partial(standard_abstract_eval, prim, shape_rule, dtype_rule))
xla.translations[prim] = translation_rule or partial(standard_translate, name)
return prim
def standard_abstract_eval(prim, shape_rule, dtype_rule, *args, **kwargs):
assert all(isinstance(arg, UnshapedArray) for arg in args), args
least_specialized = _max(
map(type, args), key=operator.attrgetter('array_abstraction_level'))
if least_specialized is ConcreteArray:
out_vals = prim.impl(*[x.val for x in args], **kwargs)
if not prim.multiple_results:
out_vals = [out_vals]
out_avals = safe_map(ConcreteArray, out_vals)
elif least_specialized is ShapedArray:
shapes, dtypes = shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs)
if not prim.multiple_results:
shapes, dtypes = [shapes], [dtypes]
out_avals = safe_map(ShapedArray, shapes, dtypes)
elif least_specialized is UnshapedArray:
dtypes = dtype_rule(*args, **kwargs)
if not prim.multiple_results:
dtypes = [dtypes]
out_avals = safe_map(UnshapedArray, dtypes)
else:
raise TypeError(args, least_specialized)
if not prim.multiple_results:
return out_avals[0]
return out_avals
def standard_translate(name, c, *args, **kwargs):
xla_opname = ''.join(term.capitalize() for term in name.split('_'))
return getattr(xops, xla_opname)(*args, **kwargs)
def unop_dtype_rule(result_dtype, accepted_dtypes, name, aval, **kwargs):
if not any(dtypes.issubdtype(aval.dtype, t) for t in accepted_dtypes):
msg = '{} does not accept dtype {}. Accepted dtypes are subtypes of {}.'
typename = str(np.dtype(aval.dtype).name)
accepted_typenames = (t.__name__ for t in accepted_dtypes)
raise TypeError(msg.format(name, typename, ', '.join(accepted_typenames)))
return result_dtype(aval.dtype)
def unop(result_dtype, accepted_dtypes, name, translation_rule=None):
dtype_rule = partial(unop_dtype_rule, result_dtype, accepted_dtypes, name)
prim = standard_primitive(_attrgetter('shape'), dtype_rule, name,
translation_rule=translation_rule)
batching.defvectorized(prim)
masking.defvectorized(prim)
return prim
standard_unop = partial(unop, _identity)
_attrgetter = lambda name: lambda x, **kwargs: getattr(x, name)
def naryop_dtype_rule(result_dtype, accepted_dtypes, name, *avals, **kwargs):
aval_dtypes = [aval.dtype for aval in avals]
for i, (aval_dtype, types) in enumerate(zip(aval_dtypes, accepted_dtypes)):
if not any(dtypes.issubdtype(aval_dtype, t) for t in types):
if aval_dtype is dtypes.float0:
raise TypeError(
f"Called {name} with a float0 at position {i}. "
"float0s do not support any operations by design, because they "
"are not compatible with non-trivial vector spaces. No implicit dtype "
"conversion is done. You can use np.zeros_like(arr, dtype=np.float) "
"to cast a float0 array to a regular zeros array. \n"
"If you didn't expect to get a float0 you might have accidentally "
"taken a gradient with respect to an integer argument.")
else:
msg = ('{} does not accept dtype {} at position {}. '
'Accepted dtypes at position {} are subtypes of {}.')
typename = str(np.dtype(aval_dtype).name)
typenames = ', '.join(t.__name__ for t in types)
raise TypeError(msg.format(name, typename, i, i, typenames))
_check_same_dtypes(name, False, *aval_dtypes)
return result_dtype(*avals)
def _broadcasting_shape_rule(name, *avals):
shapes = [aval.shape for aval in avals if aval.shape]
if not shapes:
return ()
if len({len(shape) for shape in shapes}) != 1:
msg = '{} got arrays of different rank: {}.'
raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))
result_shape = _try_broadcast_shapes(shapes)
if result_shape is None:
msg = '{} got incompatible shapes for broadcasting: {}.'
raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))
return result_shape
def naryop(result_dtype, accepted_dtypes, name, translation_rule=None):
dtype_rule = partial(naryop_dtype_rule, result_dtype, accepted_dtypes, name)
shape_rule = partial(_broadcasting_shape_rule, name)
prim = standard_primitive(shape_rule, dtype_rule, name,
translation_rule=translation_rule)
batching.defbroadcasting(prim)
masking.defnaryop(prim)
return prim
standard_naryop = partial(naryop, _input_dtype)
def _broadcast_translate(translate: Callable):
# Decorator for translation rules which adds explicit broadcasting of
# positional arguments. This is necessary only for a handful of primitives
# whose XLA implementations do not support broadcasting.
def _broadcast_array(array, array_shape, result_shape):
if array_shape == result_shape:
return array
bcast_dims = tuple(range(len(result_shape) - len(array_shape),
len(result_shape)))
result = xops.BroadcastInDim(array, result_shape, bcast_dims)
return result
def _broadcasted_translation_rule(c, *args, **kwargs):
shapes = [c.get_shape(arg).dimensions() for arg in args]
result_shape = broadcast_shapes(*shapes)
args = [_broadcast_array(arg, arg_shape, result_shape)
for arg, arg_shape in zip(args, shapes)]
return translate(c, *args, **kwargs)
return _broadcasted_translation_rule
# NOTE(mattjj): this isn't great for orchestrate fwd mode because it means JVPs
# get two extra ops in them: a reshape and a broadcast_in_dim (or sometimes just
# a broadcast). but saving the shape info with the primitives isn't great either
# because then we can't trace these ops without shape data.
def _brcast(x, *others):
# Used in jvprules to make naryop broadcasting explicit for transposability.
# Requires shape info during jvp tracing, which isn't strictly necessary.
# We don't need full numpy broadcasting, but otherwise the logic is the same
# so we reuse the broadcast_shapes function after filtering out scalars.
shapes = tuple(filter(None, map(np.shape, (x,) + others)))
shape = shapes and broadcast_shapes(*shapes)
if np.shape(x) != shape:
return _brcast_to(x, shape)
else:
return x
def _brcast_to(x, shape):
x_shape = np.shape(x)
assert x_shape != shape
if x_shape:
assert len(x_shape) == len(shape)
broadcast_dimensions, = np.where(np.equal(x_shape, shape))
squeezed_dimensions, = np.where(np.not_equal(x_shape, shape))
squeezed = squeeze(x, squeezed_dimensions)
return broadcast_in_dim(squeezed, shape, broadcast_dimensions)
else:
return broadcast(x, shape)
_float = {np.floating}
_complex = {np.complexfloating}
_complex_elem_types = {np.float32, np.float64}
_int = {np.integer}
_bool = {np.bool_}
_num = _int | _float | _complex
_any = _int | _float | _complex | _bool
_bool_or_int = _int | _bool
neg_p = standard_unop(_num, 'neg')
ad.deflinear(neg_p, lambda t: [neg(t)])
def _sign_translation_rule(c, x):
shape = c.get_shape(x)
dtype = shape.numpy_dtype()
if dtypes.issubdtype(dtype, np.unsignedinteger):
zero = xb.constant(c, np.array(0, dtype=dtype))
dims = c.get_shape(x).dimensions()
return xops.Select(xops.Eq(x, zero), xops.Broadcast(zero, dims),
xops.Broadcast(xb.constant(c, np.array(1, dtype=dtype)),
dims))
return xops.Sign(x)
sign_p = standard_unop(_num, 'sign', translation_rule=_sign_translation_rule)
ad.defjvp_zero(sign_p)
nextafter_p = standard_naryop(
[_float, _float], 'nextafter',
translation_rule=lambda c, x1, x2: xops.NextAfter(x1, x2))
floor_p = standard_unop(_float, 'floor')
ad.defjvp_zero(floor_p)
ceil_p = standard_unop(_float, 'ceil')
ad.defjvp_zero(ceil_p)
round_p = standard_unop(_float, 'round')
ad.defjvp_zero(round_p)
is_finite_p = unop(_fixed_dtype(np.bool_), _float, 'is_finite')
ad.defjvp_zero(is_finite_p)
exp_p = standard_unop(_float | _complex, 'exp')
ad.defjvp2(exp_p, lambda g, ans, x: mul(g, ans))
iad.definverse(exp_p, lambda r, x: log(r))
# For exp_p it is more efficient to use the reconstructed output for the vjp
# rule instead of computing it again from the input.
iad.primitive_ivjps[exp_p] = lambda x, y, ct: [[log(y[0])], [ct[0] * y[0]]]
log_p = standard_unop(_float | _complex, 'log')
ad.defjvp(log_p, lambda g, x: div(g, x))
iad.definverse(log_p, lambda r, x: exp(r))
expm1_p = standard_unop(_float | _complex, 'expm1')
ad.defjvp2(expm1_p, lambda g, ans, x: mul(g, add(ans, _one(ans))))
log1p_p = standard_unop(_float | _complex, 'log1p')
ad.defjvp(log1p_p, lambda g, x: div(g, add(x, _one(x))))
tanh_p = standard_unop(_float | _complex, 'tanh')
ad.defjvp2(tanh_p, lambda g, ans, x: mul(g, sub(_one(x), mul(ans, ans))))
sin_p = standard_unop(_float | _complex, 'sin')
ad.defjvp(sin_p, lambda g, x: mul(g, cos(x)))
cos_p = standard_unop(_float | _complex, 'cos')
ad.defjvp(cos_p, lambda g, x: neg(mul(g, sin(x))))
atan2_p = standard_naryop([_float, _float], 'atan2')
ad.defjvp(atan2_p,
lambda g, x, y: _brcast(g, y) * (y / (square(x) + square(y))),
lambda g, x, y: _brcast(g, x) * -x / (square(x) + square(y)))
sinh_p = standard_unop(_float | _complex, 'sinh')
ad.defjvp(sinh_p, lambda g, x: mul(g, cosh(x)))
cosh_p = standard_unop(_float | _complex, 'cosh')
ad.defjvp(cosh_p, lambda g, x: mul(g, sinh(x)))
asinh_p = standard_unop(_float | _complex, 'asinh')
ad.defjvp(asinh_p, lambda g, x: mul(g, rsqrt(square(x) + _one(x))))
acosh_p = standard_unop(_float | _complex, 'acosh')
ad.defjvp(acosh_p,
lambda g, x: mul(g, rsqrt((x - _one(x)) * (x + _one(x)))))
atanh_p = standard_unop(_float | _complex, 'atanh')
ad.defjvp(atanh_p,
lambda g, x: mul(g, reciprocal((_one(x) - x) * (_one(x) + x))))
regularized_incomplete_beta_p = standard_naryop(
[_float, _float, _float], 'regularized_incomplete_beta',
translation_rule=_broadcast_translate(
partial(standard_translate, 'regularized_incomplete_beta')))
def betainc_gradx(g, a, b, x):
lbeta = lgamma(a) + lgamma(b) - lgamma(a + b)
partial_x = exp((b - 1) * log1p(-x) +
(a - 1) * log(x) - lbeta)
return partial_x * g
def betainc_grad_not_implemented(g, a, b, x):
raise ValueError("Betainc gradient with respect to a and b not supported.")
ad.defjvp(regularized_incomplete_beta_p,
betainc_grad_not_implemented,
betainc_grad_not_implemented,
betainc_gradx)
lgamma_p = standard_unop(_float, 'lgamma')
ad.defjvp(lgamma_p, lambda g, x: mul(g, digamma(x)))
digamma_p = standard_unop(_float, 'digamma')
igamma_p = standard_naryop(
[_float, _float], 'igamma',
translation_rule=_broadcast_translate(partial(standard_translate, 'igamma')))
igamma_grad_a_p = standard_naryop([_float, _float], 'igamma_grad_a',
translation_rule=_broadcast_translate(partial(standard_translate,
'igamma_grad_a')))
def igamma_gradx(g, a, x):
return _brcast(g, a, x) * exp(-x + (a - _ones(a)) * log(x) - lgamma(a))
def igamma_grada(g, a, x):
return _brcast(g, a, x) * igamma_grad_a(a, x)
ad.defjvp(igamma_p, igamma_grada, igamma_gradx)
igammac_p = standard_naryop(
[_float, _float], 'igammac',
translation_rule=_broadcast_translate(partial(standard_translate, 'igammac')))
def igammac_gradx(g, a, x):
return -igamma_gradx(g, a, x)
def igammac_grada(g, a, x):
return -igamma_grada(g, a, x)
ad.defjvp(igammac_p, igammac_grada, igammac_gradx)
random_gamma_grad_p = standard_naryop([_float, _float], 'random_gamma_grad',
translation_rule=_broadcast_translate(partial(standard_translate,
'random_gamma_grad')))
bessel_i0e_p = standard_unop(_float, 'bessel_i0e')
ad.defjvp2(bessel_i0e_p, lambda g, y, x: g * (bessel_i1e(x) - sign(x) * y))
bessel_i1e_p = standard_unop(_float, 'bessel_i1e')
def _bessel_i1e_jvp(g, y, x):
eps = dtypes.finfo(_dtype(x)).eps
x_is_not_tiny = abs(x) > eps
safe_x = select(x_is_not_tiny, x, full_like(x, eps))
dy_dx = bessel_i0e(safe_x) - y * (sign(safe_x) + reciprocal(safe_x))
dy_dx = select(x_is_not_tiny, dy_dx, full_like(x, 0.5))
return g * dy_dx
ad.defjvp2(bessel_i1e_p, _bessel_i1e_jvp)
erf_p = standard_unop(_float, 'erf')
ad.defjvp(erf_p, lambda g, x: mul(_const(x, 2. / np.sqrt(np.pi)),
mul(g, exp(neg(square(x))))))
erfc_p = standard_unop(_float, 'erfc')
ad.defjvp(erfc_p, lambda g, x: mul(_const(x, 2. / np.sqrt(np.pi)),
mul(neg(g), exp(neg(square(x))))))
erf_inv_p = standard_unop(_float, 'erf_inv')
ad.defjvp2(erf_inv_p, lambda g, ans, x: mul(_const(x, np.sqrt(np.pi) / 2.),
mul(g, exp(square(ans)))))
real_p = unop(_complex_basetype, _complex, 'real')
ad.deflinear(real_p, lambda t: [complex(t, np.zeros((), _dtype(t)))])
imag_p = unop(_complex_basetype, _complex, 'imag')
ad.defjvp(imag_p, lambda g, _: real(mul(_const(g, -1j), g)))
_complex_dtype = lambda dtype, *args: (np.zeros((), dtype) + np.zeros((), np.complex64)).dtype
complex_p = naryop(_complex_dtype, [_complex_elem_types, _complex_elem_types],
'complex')
ad.deflinear(complex_p, lambda t: [real(t), imag(neg(t))])
conj_p = unop(_complex_dtype, _complex_elem_types | _complex, 'conj')
def _conj_transpose_rule(t, x, *, input_dtype):
assert ad.is_undefined_primal(x)
if dtypes.issubdtype(input_dtype, np.complexfloating):
return [conj(t)]
else:
return [real(t)]
xla.translations[conj_p] = lambda c, x, **kwargs: xops.Conj(x)
ad.primitive_jvps[conj_p] = partial(ad.linear_jvp, conj_p)
ad.primitive_transposes[conj_p] = _conj_transpose_rule
abs_p = unop(_complex_basetype, _num, 'abs')
def _abs_jvp_rule(g, ans, x):
if _iscomplex(x):
return _maybe_real(mul(g, div(_maybe_conj(x),
_replace_zero(convert_element_type(ans, _dtype(x))))))
else:
return select(ge(x, _zero(x)), g, neg(g))
ad.defjvp2(abs_p, _abs_jvp_rule)
_maybe_conj = lambda x: conj(x) if _iscomplex(x) else x
_maybe_real = lambda x: real(x) if _iscomplex(x) else x
sqrt_p = standard_unop(_float | _complex, 'sqrt')
ad.defjvp2(sqrt_p, lambda g, ans, x: mul(g, div(_const(x, 0.5), ans)))
rsqrt_p = standard_unop(_float | _complex, 'rsqrt')
ad.defjvp2(rsqrt_p,
lambda g, ans, x:
mul(g, mul(_const(x, -0.5), pow(x, _const(x, -1.5)))))
pow_p = standard_naryop([_float | _complex, _float | _complex], 'pow')
def _pow_jvp_lhs(g, ans, x, y):
jac = mul(y, pow(x, select(eq(y, _zeros(y)), _ones(y), sub(y, _ones(y)))))
return mul(_brcast(g, y), jac)
def _pow_jvp_rhs(g, ans, x, y):
return mul(_brcast(g, x), mul(log(_replace_zero(x)), ans))
ad.defjvp2(pow_p, _pow_jvp_lhs, _pow_jvp_rhs)
def _integer_pow_dtype_rule(x, *, y):
dtype = unop_dtype_rule(_identity, _int | _float | _complex, 'integer_pow', x)
if y < 0 and dtypes.issubdtype(dtype, np.integer):
raise TypeError("Integers cannot be raised to negative powers, got "
f"integer_pow({x}, {y})")
return dtype
def _integer_pow_translation_rule(c, x, *, y):
if y == 0:
shape = c.get_shape(x)
return xb.constant(c, np.array(1, dtype=shape.numpy_dtype()))
is_reciprocal = y < 0
if is_reciprocal:
y = -y
acc = None
while y > 0:
if y & 1:
acc = x if acc is None else xops.Mul(acc, x)
y >>= 1
if y > 0:
x = xops.Mul(x, x)
return xops.Reciprocal(acc) if is_reciprocal else acc
def _integer_pow_jvp(g, x, *, y):
return g if y == 0 else mul(g, mul(_const(x, y), integer_pow(x, y - 1)))
integer_pow_p = standard_primitive(
_attrgetter('shape'), _integer_pow_dtype_rule, 'integer_pow',
translation_rule=_integer_pow_translation_rule)
batching.defvectorized(integer_pow_p)
masking.defvectorized(integer_pow_p)
ad.defjvp(integer_pow_p, _integer_pow_jvp)
_replace_zero = lambda x: select(eq(x, _const(x, 0)), _ones(x), x)
not_p = standard_unop(_bool_or_int, 'not')
ad.defjvp_zero(not_p)
and_p = standard_naryop([_bool_or_int, _bool_or_int], 'and')
ad.defjvp_zero(and_p)
or_p = standard_naryop([_bool_or_int, _bool_or_int], 'or')
ad.defjvp_zero(or_p)
xor_p = standard_naryop([_bool_or_int, _bool_or_int], 'xor')
ad.defjvp_zero(xor_p)
population_count_p = standard_unop(_int, 'population_count')
def _add_transpose(t, x, y):
# The following linearity assertion is morally true, but because in some cases we
# instantiate zeros for convenience, it doesn't always hold.
# assert ad.is_undefined_primal(x) and ad.is_undefined_primal(y)
return [t, t]
add_p = standard_naryop([_num, _num], 'add')
ad.defjvp(add_p, lambda g, x, y: _brcast(g, y), lambda g, x, y: _brcast(g, x))
ad.primitive_transposes[add_p] = _add_transpose
def _add_inverse(r, x, y):
xr = r - y
yr = r - x
return xr, yr
iad.definverse(add_p, _add_inverse)
def _sub_transpose(t, x, y):
# The following linearity assertion is morally true, but because in some cases
# we instantiate zeros for convenience, it doesn't always hold.
# assert ad.is_undefined_primal(x) and ad.is_undefined_primal(y)
return [t, neg(t) if type(t) is not ad_util.Zero else ad_util.Zero]
sub_p = standard_naryop([_num, _num], 'sub')
ad.defjvp(sub_p,
lambda g, x, y: _brcast(g, y),
lambda g, x, y: _brcast(neg(g), x))
ad.primitive_transposes[sub_p] = _sub_transpose
mul_p = standard_naryop([_num, _num], 'mul')
ad.defbilinear_broadcasting(_brcast, mul_p, mul, mul)
def _mul_inverse(r, x, y):
xr = r / y
yr = r / x
return xr, yr
iad.definverse(mul_p, _mul_inverse)
def _div_transpose_rule(cotangent, x, y):
assert ad.is_undefined_primal(x) and not ad.is_undefined_primal(y)
res = ad_util.Zero if type(cotangent) is ad_util.Zero else div(cotangent, y)
return res, None
div_p = standard_naryop([_num, _num], 'div')
ad.defjvp(div_p,
lambda g, x, y: div(_brcast(g, y), y),
lambda g, x, y: mul(mul(neg(_brcast(g, x)), x), integer_pow(y, -2)))
ad.primitive_transposes[div_p] = _div_transpose_rule
rem_p = standard_naryop([_num, _num], 'rem')
ad.defjvp(rem_p,
lambda g, x, y: _brcast(g, y),
lambda g, x, y: mul(_brcast(neg(g), x), floor(div(x, y))))
def _broadcasting_select(c, which, x, y):
"""Wrapper around XLA `Select` that broadcasts its arguments."""
which_shape, x_shape, y_shape = (
c.get_shape(t).dimensions() for t in (which, x, y))
out_shape = broadcast_shapes(which_shape, x_shape, y_shape)
bcast_dims = lambda shape: tuple(range(len(out_shape) - len(shape),
len(out_shape)))
which = xops.BroadcastInDim(which, out_shape, bcast_dims(which_shape))
x = xops.BroadcastInDim(x, out_shape, bcast_dims(x_shape))
y = xops.BroadcastInDim(y, out_shape, bcast_dims(y_shape))
return xops.Select(which, x, y)
def _minmax_translation_rule(c, x, y, *, minmax=None, cmp=None):
dtype = c.get_shape(x).numpy_dtype()
if dtypes.issubdtype(dtype, np.complexfloating):
rx = xops.Real(x)
ry = xops.Real(y)
return _broadcasting_select(
c, xops.Select(xops.Eq(rx, ry), cmp(xops.Imag(x), xops.Imag(y)),
cmp(rx, ry)),
x, y)
return minmax(x, y)
max_p: core.Primitive = standard_naryop(
[_any, _any], 'max', translation_rule=partial(
_minmax_translation_rule, minmax=xops.Max, cmp=xops.Gt))
ad.defjvp2(max_p,
lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)),
lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x)))
min_p: core.Primitive = standard_naryop(
[_any, _any], 'min', translation_rule=partial(
_minmax_translation_rule, minmax=xops.Min, cmp=xops.Lt))
ad.defjvp2(min_p,
lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)),
lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x)))
shift_left_p = standard_naryop([_int, _int], 'shift_left')
ad.defjvp_zero(shift_left_p)
shift_right_arithmetic_p = standard_naryop([_int, _int], 'shift_right_arithmetic')
ad.defjvp_zero(shift_right_arithmetic_p)
shift_right_logical_p = standard_naryop([_int, _int], 'shift_right_logical')
ad.defjvp_zero(shift_right_logical_p)
eq_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'eq')
ad.defjvp_zero(eq_p)
ne_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'ne')
ad.defjvp_zero(ne_p)
ge_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'ge')
ad.defjvp_zero(ge_p)
gt_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'gt')
ad.defjvp_zero(gt_p)
le_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'le')
ad.defjvp_zero(le_p)
lt_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'lt')
ad.defjvp_zero(lt_p)
def _convert_element_type_shape_rule(operand, *, new_dtype, old_dtype):
return operand.shape
def _convert_element_type_dtype_rule(operand, *, new_dtype, old_dtype):
return new_dtype
def _convert_element_type_translation_rule(c, operand, *, new_dtype, old_dtype):
if (dtypes.issubdtype(old_dtype, np.complexfloating) and
not dtypes.issubdtype(new_dtype, np.complexfloating)):
operand = xops.Real(operand)
new_etype = xla_client.dtype_to_etype(new_dtype)
return xops.ConvertElementType(operand, new_element_type=new_etype)
def _convert_element_type_transpose_rule(ct, operand, *, new_dtype, old_dtype):
if type(ct) is ad_util.Zero:
return [ad_util.Zero(operand.aval)]
elif core.primal_dtype_to_tangent_dtype(old_dtype) is dtypes.float0:
return [ad_util.Zero(ShapedArray(operand.aval.shape, dtype=dtypes.float0))]
else:
return [convert_element_type_p.bind(ct, new_dtype=old_dtype,
old_dtype=new_dtype)]
def _convert_element_type_jvp_rule(tangent, operand , *, new_dtype, old_dtype):
if core.primal_dtype_to_tangent_dtype(new_dtype) is dtypes.float0:
return ad_util.Zero(ShapedArray(tangent.shape, dtype=dtypes.float0))
else:
return convert_element_type_p.bind(tangent, new_dtype=new_dtype,
old_dtype=old_dtype)
convert_element_type_p = standard_primitive(
_convert_element_type_shape_rule, _convert_element_type_dtype_rule,
'convert_element_type', _convert_element_type_translation_rule)
ad.defjvp(convert_element_type_p, _convert_element_type_jvp_rule)
ad.primitive_transposes[convert_element_type_p] = _convert_element_type_transpose_rule
batching.defvectorized(convert_element_type_p)
masking.defvectorized(convert_element_type_p)
def _bitcast_convert_type_shape_rule(operand, *, new_dtype):
return operand.shape
def _bitcast_convert_type_dtype_rule(operand, *, new_dtype):
return new_dtype
def _bitcast_convert_type_translation_rule(c, operand, *, new_dtype):
new_etype = xla_bridge.dtype_to_etype(new_dtype)
return xops.BitcastConvertType(operand, new_element_type=new_etype)
bitcast_convert_type_p = standard_primitive(
_bitcast_convert_type_shape_rule, _bitcast_convert_type_dtype_rule,
'bitcast_convert_type', _bitcast_convert_type_translation_rule)
ad.defjvp_zero(bitcast_convert_type_p)
batching.defvectorized(bitcast_convert_type_p)
masking.defvectorized(bitcast_convert_type_p)
def _conv_general_dilated_shape_rule(
lhs: ShapedArray, rhs: ShapedArray, *, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count,
batch_group_count, **unused_kwargs) -> Tuple[int, ...]:
assert type(dimension_numbers) is ConvDimensionNumbers
if len(lhs.shape) != len(rhs.shape):
msg = ("conv_general_dilated lhs and rhs must have the same number of "
"dimensions, but got {} and {}.")
raise ValueError(msg.format(lhs.shape, rhs.shape))
if not feature_group_count > 0:
msg = ("conv_general_dilated feature_group_count "
"must be a positive integer, got {}.")
raise ValueError(msg.format(feature_group_count))
lhs_feature_count = lhs.shape[dimension_numbers.lhs_spec[1]]
quot, rem = divmod(lhs_feature_count, feature_group_count)
if rem:
msg = ("conv_general_dilated feature_group_count must divide lhs feature "
"dimension size, but {} does not divide {}.")
raise ValueError(msg.format(feature_group_count, lhs_feature_count))
if quot != rhs.shape[dimension_numbers.rhs_spec[1]]:
msg = ("conv_general_dilated lhs feature dimension size divided by "
"feature_group_count must equal the rhs input feature dimension "
"size, but {} // {} != {}.")
raise ValueError(msg.format(lhs_feature_count, feature_group_count,
rhs.shape[dimension_numbers.rhs_spec[1]]))
if rhs.shape[dimension_numbers.rhs_spec[0]] % feature_group_count:
msg = ("conv_general_dilated rhs output feature dimension size must be a "
"multiple of feature_group_count, but {} is not a multiple of {}.")
raise ValueError(msg.format(rhs.shape[dimension_numbers.rhs_spec[0]],
feature_group_count))
if not batch_group_count > 0:
msg = ("conv_general_dilated batch_group_count "
"must be a positive integer, got {}.")
raise ValueError(msg.format(batch_group_count))
lhs_batch_count = lhs.shape[dimension_numbers.lhs_spec[0]]
if lhs_batch_count % batch_group_count != 0:
msg = ("conv_general_dilated batch_group_count must divide lhs batch "
"dimension size, but {} does not divide {}.")
raise ValueError(msg.format(batch_group_count, lhs_batch_count))
if rhs.shape[dimension_numbers.rhs_spec[0]] % batch_group_count:
msg = ("conv_general_dilated rhs output feature dimension size must be a "
"multiple of batch_group_count, but {} is not a multiple of {}.")
raise ValueError(msg.format(rhs.shape[dimension_numbers.rhs_spec[0]],
batch_group_count))
if batch_group_count > 1 and feature_group_count > 1:
msg = ("At most one of batch_group_count and feature_group_count may be > "
"1, got batch_group_count={} and feature_group_count={}")
raise ValueError(msg.format(batch_group_count, feature_group_count))
lhs_perm, rhs_perm, out_perm = dimension_numbers
lhs_trans = _dilate_shape(np.take(lhs.shape, lhs_perm), lhs_dilation)
rhs_trans = _dilate_shape(np.take(rhs.shape, rhs_perm), rhs_dilation)
out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding,
batch_group_count)
return tuple(np.take(out_trans, np.argsort(out_perm)))
def _conv_general_dilated_dtype_rule(
lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, **unused_kwargs):
return naryop_dtype_rule(_input_dtype, [_float | _complex, _float | _complex],
'conv_general_dilated', lhs, rhs)
_conv_spec_transpose = lambda spec: (spec[1], spec[0]) + spec[2:]
_conv_sdims = lambda spec: spec[2:]
# Understanding the convolution transpose rules:
# Ignoring the spatial dimensions, let m = batch, j = input feature,
# k = output feature.
#
# Convolution computes the following contraction:
# Forward: [m, j] [j, k] -> [m, k]
#
# The transposes are similar to the rules for transposing a matmul:
# LHS transpose: [m, k] [k, j] -> [m, j]
# RHS transpose: [j, m] [m, k] -> [j, k]
#
# With feature grouping, we have the following signatures:
# Forward: [m, gj] [j, gk] -> [m, gk]
# LHS transpose: [m, gk] [k, gj] -> [m, gj]
# --> implemented as feature grouping after transposing the group from the
# kernel input features to the kernel output features.
# RHS transpose: [gj, m] [m, gk] -> [j, gk]
# --> which is batch grouping.
#
# With batch grouping, we have the following signatures:
# Forward: [gm,j] [j,gk]->[m,gk]
# LHS transpose: [m, gk][gk, j] -> [gm, j]
# --> implemented as feature grouping with transposing the group on the kernel
# and the output.
# RHS transpose: [j, gm][m, gk] -> [j, gk]
# --> which is feature grouping.
def _conv_general_dilated_transpose_lhs(
g, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
lhs_shape, rhs_shape, precision):
assert type(dimension_numbers) is ConvDimensionNumbers
assert batch_group_count == 1 or feature_group_count == 1
lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)
lhs_spec, rhs_spec, out_spec = dimension_numbers
t_rhs_spec = _conv_spec_transpose(rhs_spec)
if feature_group_count > 1:
# in addition to switching the dims in the spec, need to move the feature
# group axis into the transposed rhs's output feature dim
rhs = _reshape_axis_out_of(rhs_spec[0], feature_group_count, rhs)
rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[1], rhs)
elif batch_group_count > 1:
rhs = _reshape_axis_out_of(rhs_spec[0], batch_group_count, rhs)
rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[1], rhs)
feature_group_count = batch_group_count
trans_dimension_numbers = ConvDimensionNumbers(out_spec, t_rhs_spec, lhs_spec)
padding = _conv_general_vjp_lhs_padding(
np.take(lhs_shape, lhs_sdims), np.take(rhs_shape, rhs_sdims),
window_strides, np.take(g.shape, out_sdims), padding, lhs_dilation,
rhs_dilation)
revd_weights = rev(rhs, rhs_sdims)
out = conv_general_dilated(
g, revd_weights, window_strides=lhs_dilation, padding=padding,
lhs_dilation=window_strides, rhs_dilation=rhs_dilation,
dimension_numbers=trans_dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=1, precision=precision)
if batch_group_count > 1:
out = _reshape_axis_out_of(lhs_spec[1], batch_group_count, out)
out = _reshape_axis_into(lhs_spec[1], lhs_spec[0], out)
return out
def _conv_general_dilated_transpose_rhs(
g, lhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers: ConvDimensionNumbers, feature_group_count: int,
batch_group_count: int, lhs_shape, rhs_shape, precision):
assert type(dimension_numbers) is ConvDimensionNumbers
if np.size(g) == 0:
# Avoids forming degenerate convolutions where the RHS has spatial size 0.
return ad_util.Zero
lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)
lhs_trans, rhs_trans, out_trans = map(_conv_spec_transpose, dimension_numbers)
assert batch_group_count == 1 or feature_group_count == 1
if batch_group_count > 1:
feature_group_count = batch_group_count
batch_group_count = 1
elif feature_group_count > 1:
batch_group_count = feature_group_count
feature_group_count = 1
trans_dimension_numbers = ConvDimensionNumbers(lhs_trans, out_trans, rhs_trans)
padding = _conv_general_vjp_rhs_padding(
np.take(lhs_shape, lhs_sdims), np.take(rhs_shape, rhs_sdims),
window_strides, np.take(g.shape, out_sdims), padding, lhs_dilation,
rhs_dilation)
return conv_general_dilated(
lhs, g, window_strides=rhs_dilation, padding=padding,
lhs_dilation=lhs_dilation, rhs_dilation=window_strides,
dimension_numbers=trans_dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count, precision=precision)
def _conv_general_dilated_translation_rule(
c, lhs, rhs, *, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count,
batch_group_count, precision, expand_complex_convolutions, **unused_kwargs):
assert type(dimension_numbers) is ConvDimensionNumbers
dimension_numbers = _conv_general_proto(dimension_numbers)
precision_config = _precision_config(precision)
dtype = c.get_shape(lhs).numpy_dtype()
conv = lambda x, y: xops.ConvGeneralDilated(
x, y, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
precision_config=precision_config)
if expand_complex_convolutions and np.issubdtype(dtype, np.complexfloating):
# We use a trick for complex multiplication due to Gauss which uses three
# multiplications and five additions; instead of the naive method of four
# multiplications and two additions.
# https://en.wikipedia.org/wiki/Multiplication_algorithm#Complex_multiplication_algorithm
#
# This performance win comes with a trade-off in accuracy; especially in
# cases when the real and imaginary differ hugely in magnitude. The relative
# error bound (e.g. 1p-24 in case of float32) would be relative to the
# maximum of real and imaginary parts of the result instead of being
# satisfied by the real and imaginary parts independently of each other.
lhs_real, lhs_imag = xops.Real(lhs), xops.Imag(lhs)
rhs_real, rhs_imag = xops.Real(rhs), xops.Imag(rhs)
k1 = conv(xops.Add(lhs_real, lhs_imag), rhs_real)
k2 = conv(lhs_real, xops.Sub(rhs_imag, rhs_real))
k3 = conv(lhs_imag, xops.Add(rhs_real, rhs_imag))
return xops.Complex(xops.Sub(k1, k3), xops.Add(k1, k2))
return conv(lhs, rhs)
def _conv_general_dilated_batch_rule(
batched_args, batch_dims, *, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count, precision, **unused_kwargs):
assert batch_group_count == 1 or feature_group_count == 1
lhs, rhs = batched_args
lhs_bdim, rhs_bdim = batch_dims
lhs_spec, rhs_spec, out_spec = dimension_numbers
if lhs_bdim is not None and rhs_bdim is not None:
assert lhs.shape[lhs_bdim] == rhs.shape[rhs_bdim]
if batch_group_count > 1:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[0], lhs)
batch_group_count *= lhs.shape[lhs_bdim]
else:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[1], lhs)
feature_group_count *= lhs.shape[lhs_bdim]
new_rhs = _reshape_axis_into(rhs_bdim, rhs_spec[0], rhs)
out = conv_general_dilated(
new_lhs, new_rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], lhs.shape[lhs_bdim], out)
return out, out_spec[1]
elif lhs_bdim is not None:
if batch_group_count == 1:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[0], lhs)
out = conv_general_dilated(new_lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, precision=precision)
out = _reshape_axis_out_of(out_spec[0], lhs.shape[lhs_bdim], out)
return out, out_spec[0]
else:
new_lhs = _reshape_axis_out_of(lhs_spec[0] + int(lhs_bdim <= lhs_spec[0]),
batch_group_count, lhs)
new_lhs = _reshape_axis_into(lhs_bdim + int(lhs_spec[0] < lhs_bdim),
lhs_spec[0] + 1,
new_lhs)
new_lhs = _reshape_axis_into(lhs_spec[0], lhs_spec[0], new_lhs)
out = conv_general_dilated(new_lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[0], lhs.shape[lhs_bdim], out)
return out, out_spec[0]
elif rhs_bdim is not None:
if feature_group_count == 1 and batch_group_count == 1:
new_rhs = _reshape_axis_into(rhs_bdim, rhs_spec[0], rhs)
out = conv_general_dilated(lhs, new_rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], rhs.shape[rhs_bdim], out)
return out, out_spec[1]
else:
# groups need to be outermost, so we need to factor them out of the
# rhs output feature dim, then factor the batch dim into the remaining rhs
# output feature dim, then put groups back in. We do something
# similar on the output. An alternative which would require more FLOPs but
# fewer reshapes would be to broadcast lhs.
group_count = (feature_group_count if feature_group_count > 1
else batch_group_count)
new_rhs = _reshape_axis_out_of(rhs_spec[0] + int(rhs_bdim <= rhs_spec[0]),
group_count, rhs)
new_rhs = _reshape_axis_into(rhs_bdim + int(rhs_spec[0] < rhs_bdim),
rhs_spec[0] + 1,
new_rhs)
new_rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[0], new_rhs)
out = conv_general_dilated(lhs, new_rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], group_count, out)
out = _reshape_axis_out_of(out_spec[1] + 1, rhs.shape[rhs_bdim], out)
out = _reshape_axis_into(out_spec[1], out_spec[1] + 1, out)
return out, out_spec[1]
def _masked(padded_value, logical_shape, dimensions, value=0):
"""
Sets all padding to the given value (default is 0) in the given dimensions.
All values outside the logical shape are considered padding.
"""
if len(dimensions) == 0:
return padded_value
masks = [broadcasted_iota(np.int32, padded_value.shape, d) < logical_shape[d]
for d in dimensions]
mask_intersection = masks[0]
for mask in masks[1:]:
mask_intersection &= mask
return select(mask_intersection, padded_value, full_like(padded_value, value))
def _conv_general_dilated_masking_rule(
padded_vals, logical_shapes, window_strides, padding, lhs_dilation,
rhs_dilation, dimension_numbers, feature_group_count, batch_group_count,
lhs_shape, rhs_shape, precision):
lhs, rhs = padded_vals
logical_lhs_shape, logical_rhs_shape = logical_shapes
o, i, *window_dimensions = dimension_numbers.rhs_spec
assert (np.all(np.take(rhs.shape, window_dimensions)
== np.take(logical_rhs_shape, window_dimensions))), \
"Conv filter masking not yet implemented."
n, c, *padded_dimensions = dimension_numbers.lhs_spec
return conv_general_dilated(
_masked(lhs, logical_lhs_shape, padded_dimensions),
_masked(rhs, logical_rhs_shape, (i,)),
window_strides=window_strides, padding=padding,
lhs_dilation=lhs_dilation, rhs_dilation=rhs_dilation,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
precision=precision)
conv_general_dilated_p = standard_primitive(
_conv_general_dilated_shape_rule, _conv_general_dilated_dtype_rule,
'conv_general_dilated', partial(_conv_general_dilated_translation_rule,
expand_complex_convolutions=False))
# TODO(b/161124619, b/161126248): XLA does not support complex convolution on
# CPU or GPU; on these backends, lower complex convolutions away.
xla.backend_specific_translations['cpu'][conv_general_dilated_p] = partial(
_conv_general_dilated_translation_rule, expand_complex_convolutions=True)
xla.backend_specific_translations['gpu'][conv_general_dilated_p] = partial(
_conv_general_dilated_translation_rule, expand_complex_convolutions=True)
ad.defbilinear(conv_general_dilated_p,
_conv_general_dilated_transpose_lhs,
_conv_general_dilated_transpose_rhs)
batching.primitive_batchers[conv_general_dilated_p] = \
_conv_general_dilated_batch_rule
masking.masking_rules[conv_general_dilated_p] = \
_conv_general_dilated_masking_rule
def _reshape_axis_into(src, dst, x):
perm = [i for i in range(x.ndim) if i != src]
perm.insert(dst, src)
new_shape = list(np.delete(x.shape, src))
new_shape[dst] *= x.shape[src]
return reshape(x, new_shape, perm)
def _reshape_axis_out_of(src, size1, x):
shape = list(x.shape)
size2, ragged = divmod(shape[src], size1)
assert not ragged
shape[src:src+1] = [size1, size2]
return reshape(x, shape)
def _precision_config(precision):
if precision is not None:
config = xla_client.PrecisionConfig()
if isinstance(precision, tuple):
config.operand_precision.extend(precision)
else:
config.operand_precision.extend((precision, precision))
return config
return None
def _dot_general_shape_rule(lhs, rhs, *, dimension_numbers, precision):
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
if not all(np.all(np.greater_equal(d, 0)) and np.all(np.less(d, lhs.ndim))
for d in (lhs_contracting, lhs_batch)):
msg = ("dot_general requires lhs dimension numbers to be nonnegative and "
"less than the number of axes of the lhs value, got "
f"lhs_batch of {lhs_batch} and lhs_contracting of {lhs_contracting} "
f"for lhs of rank {lhs.ndim}")
raise TypeError(msg)
if not all(np.all(np.greater_equal(d, 0)) and np.all(np.less(d, rhs.ndim))
for d in (rhs_contracting, rhs_batch)):
msg = ("dot_general requires rhs dimension numbers to be nonnegative and "
"less than the number of axes of the rhs value, got "
f"rhs_batch of {rhs_batch} and rhs_contracting of {rhs_contracting} "
f"for rhs of rank {rhs.ndim}")
raise TypeError(msg)
if len(lhs_batch) != len(rhs_batch):
msg = ("dot_general requires equal numbers of lhs_batch and rhs_batch "
"dimensions, got lhs_batch {} and rhs_batch {}.")
raise TypeError(msg.format(lhs_batch, rhs_batch))
lhs_contracting_set, lhs_batch_set = set(lhs_contracting), set(lhs_batch)
rhs_contracting_set, rhs_batch_set = set(rhs_contracting), set(rhs_batch)
if len(lhs_batch_set) != len(lhs_batch):
msg = ("dot_general requires lhs batch dimensions to be distinct, got "
f"lhs_batch {lhs_batch}.")
raise TypeError(msg)
if len(rhs_batch_set) != len(rhs_batch):
msg = ("dot_general requires rhs batch dimensions to be distinct, got "
f"rhs_batch {rhs_batch}.")
raise TypeError(msg)
if len(lhs_contracting_set) != len(lhs_contracting):
msg = ("dot_general requires lhs contracting dimensions to be distinct, "
f"got lhs_contracting {lhs_contracting}.")
raise TypeError(msg)
if len(rhs_contracting_set) != len(rhs_contracting):
msg = ("dot_general requires rhs contracting dimensions to be distinct, "
f"got rhs_contracting {rhs_contracting}.")
raise TypeError(msg)
if lhs_contracting_set & lhs_batch_set:
msg = ("dot_general requires lhs batch dimensions to be disjoint from "
"contracting dimensions, got lhs_batch {} and lhs_contracting {}.")
raise TypeError(msg.format(lhs_batch, lhs_contracting))
if rhs_contracting_set & rhs_batch_set:
msg = ("dot_general requires rhs batch dimensions to be disjoint from "
"contracting dimensions, got rhs_batch {} and rhs_contracting {}.")
raise TypeError(msg.format(rhs_batch, rhs_contracting))
lhs_batch_shape = np.take(lhs.shape, lhs_batch)
rhs_batch_shape = np.take(rhs.shape, rhs_batch)
if not np.all(np.equal(lhs_batch_shape, rhs_batch_shape)):
msg = ("dot_general requires lhs batch dimensions and rhs batch dimensions "
"to have the same shape, got {} and {}.")
raise TypeError(msg.format(lhs_batch_shape, rhs_batch_shape))
lhs_contracting_shape = np.take(lhs.shape, lhs_contracting)
rhs_contracting_shape = np.take(rhs.shape, rhs_contracting)
if not np.all(np.equal(lhs_contracting_shape, rhs_contracting_shape)):
msg = ("dot_general requires contracting dimensions to have the same "
"shape, got {} and {}.")
raise TypeError(msg.format(lhs_contracting_shape, rhs_contracting_shape))
batch_shape = tuple(lhs_batch_shape)
lhs_contract_or_batch = tuple(sorted(tuple(lhs_contracting) + tuple(lhs_batch)))
lhs_tensored_shape = tuple(np.delete(lhs.shape, lhs_contract_or_batch))
rhs_contract_or_batch = tuple(sorted(tuple(rhs_contracting) + tuple(rhs_batch)))
rhs_tensored_shape = tuple(np.delete(rhs.shape, rhs_contract_or_batch))
return batch_shape + lhs_tensored_shape + rhs_tensored_shape
def _dot_general_dtype_rule(lhs, rhs, *, dimension_numbers, precision):
return naryop_dtype_rule(_input_dtype, [_any, _any], 'dot_general', lhs, rhs)
def _dot_general_transpose_lhs(g, y, *, dimension_numbers, precision,
swap_ans=False):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
x_ndim = g.ndim - y.ndim + len(x_batch) + 2 * len(x_contract)
x_kept = remaining(range(x_ndim), x_contract, x_batch)
y_kept = remaining(range(y.ndim), y_contract, y_batch)
if swap_ans:
ans_batch, ans_y, _ = ranges_like(x_batch, y_kept, x_kept)
else:
ans_batch, _, ans_y = ranges_like(x_batch, x_kept, y_kept)
dims = ((ans_y, y_kept), (ans_batch, y_batch))
x_contract_sorted_by_y = list(np.take(x_contract, np.argsort(y_contract)))
out_axes = np.argsort(list(x_batch) + x_kept + x_contract_sorted_by_y)
return transpose(dot_general(g, y, dims, precision=precision),
tuple(out_axes))
def _dot_general_transpose_rhs(g, x, *, dimension_numbers, precision):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
swapped_dimension_numbers = ((y_contract, x_contract), (y_batch, x_batch))
return _dot_general_transpose_lhs(
g, x, dimension_numbers=swapped_dimension_numbers, precision=precision,
swap_ans=True)
def _dot_general_batch_rule(batched_args, batch_dims, *, dimension_numbers,
precision):
# there are three kinds of dimensions in a dot_general:
# - contraction dimensions appear in lhs and rhs but not the result
# - batch dimensions appear in lhs, rhs, and result
# - tensor product dimensions appear in the result and one of lhs or rhs
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
lhs, rhs = batched_args
lbd, rbd = batch_dims
assert lbd is not None or rbd is not None
def bump_dims(dims, b):
return tuple(np.add(dims, np.greater_equal(dims, b)))
if lbd is not None and rbd is not None:
# adding a batch dimension
lhs_batch = (lbd,) + bump_dims(lhs_batch, lbd)
rhs_batch = (rbd,) + bump_dims(rhs_batch, rbd)
lhs_contract = bump_dims(lhs_contract, lbd)
rhs_contract = bump_dims(rhs_contract, rbd)
result_batch_dim = 0
else:
# adding a tensor product dimension
if lbd is not None:
other = tuple(d for d in range(lhs.ndim)
if d not in lhs_batch and d not in lhs_contract)
result_batch_dim = (len(lhs_batch) + sum(np.less(other, lbd)))
lhs_batch = bump_dims(lhs_batch, lbd)
lhs_contract = bump_dims(lhs_contract, lbd)
else:
other = tuple(d for d in range(rhs.ndim)
if d not in rhs_batch and d not in rhs_contract)
result_batch_dim = (lhs.ndim - len(lhs_contract) +
sum(np.less(other, rbd)))
rhs_batch = bump_dims(rhs_batch, rbd)
rhs_contract = bump_dims(rhs_contract, rbd)
new_dimension_numbers = ((lhs_contract, rhs_contract), (lhs_batch, rhs_batch))
batched_out = dot_general(lhs, rhs, new_dimension_numbers,
precision=precision)
return batched_out, int(result_batch_dim)
def _dot_using_sum_of_products(lhs, rhs, *, dimension_numbers):
contract_dims, batch_dims = dimension_numbers
lhs_contract_dims, rhs_contract_dims = contract_dims
lhs_batch_dims, rhs_batch_dims = batch_dims
lhs_noncontract_dims = tuple(sorted(
set(range(np.ndim(lhs))) - set(lhs_batch_dims) - set(lhs_contract_dims)))
rhs_noncontract_dims = tuple(sorted(
set(range(np.ndim(rhs))) - set(rhs_batch_dims) - set(rhs_contract_dims)))
lhs = transpose(lhs,
lhs_batch_dims + lhs_noncontract_dims + lhs_contract_dims)
rhs = transpose(rhs,
rhs_batch_dims + rhs_noncontract_dims + rhs_contract_dims)
lhs_start_expand = len(lhs_batch_dims) + len(lhs_noncontract_dims)
lhs_end_expand = lhs_start_expand + len(rhs_noncontract_dims)
lhs = expand_dims(lhs, tuple(range(lhs_start_expand, lhs_end_expand)))
rhs_start_expand = len(lhs_batch_dims)
rhs_end_expand = rhs_start_expand + len(lhs_noncontract_dims)
rhs = expand_dims(rhs, tuple(range(rhs_start_expand, rhs_end_expand)))
out_ndim = (len(lhs_batch_dims) + len(lhs_noncontract_dims) +
len(rhs_noncontract_dims))
op_product = bitwise_and if lhs.dtype == np.bool_ else mul
op_sum = bitwise_or if lhs.dtype == np.bool_ else add
return reduce(op_product(lhs, rhs), _zero(lhs), op_sum,
tuple(range(out_ndim, out_ndim + len(lhs_contract_dims))))
def _dot_general_translation_rule(c, lhs, rhs, *, dimension_numbers, precision):
dtype = c.get_shape(lhs).numpy_dtype()
if dtypes.issubdtype(dtype, np.inexact):
return xops.DotGeneral(lhs, rhs,
xc.make_dot_dimension_numbers(dimension_numbers),
precision_config=_precision_config(precision))
else:
# TODO(b/134526360): XLA doesn't support bool or integer dots, so we emit a
# sum of products instead.
translation = xla.lower_fun(_dot_using_sum_of_products,
multiple_results=False)
return translation(c, lhs, rhs, dimension_numbers=dimension_numbers)
def _dot_general_masking_rule(padded_vals, logical_shapes, *, dimension_numbers,
precision):
lhs, rhs = padded_vals
# Only need to mask off contraction dims of one side - we mask the lhs here
# but this is arbitrary. Could check the sizes of lhs and rhs and mask
# whichever is smallest.
lhs_shape, _ = logical_shapes
(lhs_contract, _), _ = dimension_numbers
return dot_general(_masked(lhs, lhs_shape, lhs_contract),
rhs, dimension_numbers, precision=precision)
dot_general_p = standard_primitive(_dot_general_shape_rule,
_dot_general_dtype_rule, 'dot_general',
_dot_general_translation_rule)
ad.defbilinear(dot_general_p,
_dot_general_transpose_lhs, _dot_general_transpose_rhs)
batching.primitive_batchers[dot_general_p] = _dot_general_batch_rule
masking.masking_rules[dot_general_p] = _dot_general_masking_rule
def _broadcast_shape_rule(operand, sizes):
_check_shapelike('broadcast', 'sizes', sizes)
return tuple(sizes) + operand.shape
def _broadcast_batch_rule(batched_args, batch_dims, *, sizes):
operand, = batched_args
bdim, = batch_dims
new_bdim = None if bdim is None else bdim + len(sizes)
return broadcast(operand, sizes), new_bdim
broadcast_p = standard_primitive(
_broadcast_shape_rule, _input_dtype, 'broadcast')
ad.deflinear(broadcast_p, lambda t, sizes: [_reduce_sum(t, range(len(sizes)))])
batching.primitive_batchers[broadcast_p] = _broadcast_batch_rule
def _broadcast_in_dim_impl(operand, *, shape, broadcast_dimensions):
if type(operand) is np.ndarray:
operand = _device_put_raw(operand)
if xla.type_is_device_array(operand) and np.all(
np.equal(operand.shape, np.take(shape, broadcast_dimensions))):
shape = _broadcast_in_dim_shape_rule(
operand, shape=shape, broadcast_dimensions=broadcast_dimensions)
aval = ShapedArray(shape, _dtype(operand))
if operand._lazy_expr is None:
lazy_expr = lazy.broadcast(lazy.array(operand), shape, broadcast_dimensions)
else:
lazy_expr = lazy.broadcast(operand._lazy_expr, shape, broadcast_dimensions)
return xla.make_device_array(aval, operand._device, lazy_expr, operand.device_buffer)
else:
return xla.apply_primitive(broadcast_in_dim_p, operand, shape=shape,
broadcast_dimensions=broadcast_dimensions)
def _broadcast_in_dim_shape_rule(operand, *, shape, broadcast_dimensions):
_check_shapelike('broadcast_in_dim', 'shape', shape)
_check_shapelike('broadcast_in_dim', 'broadcast_dimensions',
broadcast_dimensions)
operand_ndim = np.ndim(operand)
if operand_ndim != len(broadcast_dimensions):
msg = ('broadcast_in_dim broadcast_dimensions must have length equal to '
'operand ndim; got broadcast_dimensions {} for operand ndim {}.')
raise TypeError(msg.format(broadcast_dimensions, operand_ndim))
if len(shape) < operand_ndim:
msg = ('broadcast_in_dim target broadcast shape must have equal or higher rank '
'to the operand shape; got operand ndim {} and target broadcast ndim {}.')
raise TypeError(msg.format(operand_ndim, len(shape)))
if not set(broadcast_dimensions).issubset(set(range(len(shape)))):
msg = ('broadcast_in_dim broadcast_dimensions must be a subset of output '
'dimensions, got {} for operand ndim {} and shape {}.')
raise TypeError(msg.format(broadcast_dimensions, operand_ndim, shape))
if any(operand.shape[i] != shape[broadcast_dimensions[i]] and
operand.shape[i] != 1 for i in range(operand_ndim)):
msg = (
"broadcast_in_dim operand dimension sizes must either be 1, or be "
"equal to their corresponding dimensions in the target broadcast "
"shape; got operand of shape {}, target broadcast shape {}, "
"broadcast_dimensions {} ")
raise TypeError(msg.format(operand.shape, shape, broadcast_dimensions))
if (len(broadcast_dimensions) != len(set(broadcast_dimensions)) or
tuple(broadcast_dimensions) != tuple(sorted(broadcast_dimensions))):
msg = ("broadcast_in_dim broadcast_dimensions must be strictly increasing; "
"got broadcast_dimensions {}")
raise TypeError(msg.format(broadcast_dimensions))
return shape
def _broadcast_in_dim_transpose_rule(t, *, shape, broadcast_dimensions):
axes = tuple(np.delete(range(len(shape)), broadcast_dimensions))
return [_reduce_sum(t, axes)]
def _broadcast_in_dim_batch_rule(batched_args, batch_dims, *, shape,
broadcast_dimensions):
operand, = batched_args
bdim, = batch_dims
new_operand = batching.moveaxis(operand, bdim, 0)
new_shape = (operand.shape[bdim],) + shape
new_broadcast_dimensions = (0,) + tuple(np.add(1, broadcast_dimensions))
return broadcast_in_dim(new_operand, new_shape, new_broadcast_dimensions), 0
broadcast_in_dim_p = standard_primitive(
_broadcast_in_dim_shape_rule, _input_dtype, 'broadcast_in_dim')
broadcast_in_dim_p.def_impl(_broadcast_in_dim_impl)
ad.deflinear(broadcast_in_dim_p, _broadcast_in_dim_transpose_rule)
batching.primitive_batchers[broadcast_in_dim_p] = _broadcast_in_dim_batch_rule
def _clamp_shape_rule(min, operand, max):
if min.shape and min.shape != operand.shape:
m = "clamp requires min.shape == operand.shape or min.shape == (), got {}."
raise TypeError(m.format(min.shape))
if max.shape and max.shape != operand.shape:
m = "clamp requires max.shape == operand.shape or max.shape == (), got {}."
raise TypeError(m.format(max.shape))
return operand.shape
_clamp_dtype_rule = partial(naryop_dtype_rule, _input_dtype, [_any, _any, _any],
'clamp')
clamp_p = standard_primitive(_clamp_shape_rule, _clamp_dtype_rule, 'clamp')
ad.defjvp(clamp_p,
lambda g, min, operand, max:
select(bitwise_and(gt(min, operand), lt(min, max)),
_brcast(g, operand), _zeros(operand)),
lambda g, min, operand, max:
select(bitwise_and(gt(operand, min), lt(operand, max)),
g, _zeros(operand)),
lambda g, min, operand, max:
select(lt(max, operand), _brcast(g, operand), _zeros(operand)))
batching.defbroadcasting(clamp_p)
def _concatenate_shape_rule(*operands, **kwargs):
dimension = kwargs.pop('dimension')
if not operands:
msg = "concatenate expects at least one operand, got 0."
raise TypeError(msg)
if not all(isinstance(operand, UnshapedArray) for operand in operands):
msg = "All objects to concatenate must be arrays, got {}."
op = next(op for op in operands if not isinstance(op, UnshapedArray))
raise TypeError(msg.format(type(op)))
if len({operand.ndim for operand in operands}) != 1:
msg = "Cannot concatenate arrays with different ranks, got {}."
raise TypeError(msg.format(", ".join(str(o.ndim) for o in operands)))
if not 0 <= dimension < operands[0].ndim:
msg = "concatenate dimension out of bounds: dimension {} for shapes {}."
raise TypeError(msg.format(dimension, ", ".join([str(o.shape) for o in operands])))
shapes = [operand.shape[:dimension] + operand.shape[dimension+1:]
for operand in operands]
if not shapes[:-1] == shapes[1:]:
msg = ("Cannot concatenate arrays with shapes that differ in dimensions "
"other than the one being concatenated: concatenating along "
"dimension {} for shapes {}.")
shapes = [operand.shape for operand in operands]
raise TypeError(msg.format(dimension, ", ".join(map(str, shapes))))
concat_size = sum(o.shape[dimension] for o in operands)
ex_shape = operands[0].shape
return ex_shape[:dimension] + (concat_size,) + ex_shape[dimension+1:]
def _concatenate_dtype_rule(*operands, **kwargs):
_check_same_dtypes('concatenate', False, *(o.dtype for o in operands))
return operands[0].dtype
def _concatenate_translation_rule(c, *operands, **kwargs):
dimension = kwargs.pop('dimension')
return xops.ConcatInDim(c, operands, dimension)
def _concatenate_transpose_rule(t, *operands, dimension):
operand_shapes = [o.aval.shape if ad.is_undefined_primal(o) else o.shape
for o in operands]
if type(t) is ad_util.Zero:
return ad_util.Zero
else:
limit_points = np.cumsum([shape[dimension] for shape in operand_shapes])
starts = np.zeros((len(operands), t.ndim), dtype=int)
starts[1:, dimension] = limit_points[:-1]
limits = np.tile(t.shape, (len(operands), 1))
limits[:, dimension] = limit_points
return [slice(t, start, limit) if ad.is_undefined_primal(o) else None
for o, start, limit in zip(operands, starts, limits)]
def _concatenate_batch_rule(batched_args, batch_dims, *, dimension):
size = next(op.shape[bdim] for op, bdim in zip(batched_args, batch_dims)
if bdim is not None)
operands = [batching.moveaxis(op, bdim, 0) if bdim is not None
else broadcast(op, (size,))
for op, bdim in zip(batched_args, batch_dims)]
return concatenate(operands, dimension + 1), 0
# The concatenate_p masking rule requires use of a while-loop construct and so
# is defined in lax_control_flow.py
concatenate_p = standard_primitive(
_concatenate_shape_rule, _concatenate_dtype_rule, 'concatenate',
_concatenate_translation_rule)
ad.deflinear(concatenate_p, _concatenate_transpose_rule)
ad.primitive_transposes[concatenate_p] = _concatenate_transpose_rule
batching.primitive_batchers[concatenate_p] = _concatenate_batch_rule
def _pad_dtype_rule(operand, padding_value, *, padding_config):
if operand.dtype != padding_value.dtype:
msg = "pad operand and padding_value must be same dtype: got {} and {}."
raise TypeError(msg.format(operand.dtype, padding_value.dtype))
return _input_dtype(operand, padding_value)
def _pad_shape_rule(operand, padding_value, *, padding_config):
del padding_value
if not len(padding_config) == np.ndim(operand):
raise ValueError("length of padding_config must equal the number of axes "
f"of operand, got padding_config {padding_config} "
f"for operand shape {np.shape(operand)}")
if not all(i >= 0 for _, _, i in padding_config):
raise ValueError("interior padding in padding_config must be nonnegative, "
f"got padding_config {padding_config}")
return tuple(l + h + d + (_max(0, d - 1) * i if i > 0 else 0)
for (l, h, i), d in zip(padding_config, np.shape(operand)))
def _pad_transpose(t, operand, padding_value, *, padding_config):
if type(t) is ad_util.Zero:
return ad_util.Zero
lo, hi, interior = zip(*padding_config)
total = lambda x: _reduce_sum(x, list(range(t.ndim)))
def t_op():
unpad_config = safe_zip(np.negative(lo), np.negative(hi),
np.zeros_like(interior))
unpadded = pad(t, np.array(0., t.dtype), unpad_config)
return slice(unpadded, np.zeros_like(lo), unpadded.shape, np.add(interior, 1))
t_operand = t_op() if ad.is_undefined_primal(operand) else None
t_padv = sub(total(t), total(t_operand)) if ad.is_undefined_primal(padding_value) else None
return [t_operand, t_padv]
def _pad_batch_rule(batched_args, batch_dims, *, padding_config):
operand, padding_value = batched_args
operand_bdim, padding_value_bdim = batch_dims
if padding_value_bdim is None:
assert operand_bdim is not None
padding_config = list(padding_config)
padding_config.insert(operand_bdim, (0, 0, 0))
return pad(operand, padding_value, padding_config), operand_bdim
else:
raise NotImplementedError # loop and stack
def _pad_translation_rule(c, operand, padding_value, *, padding_config):
return xops.Pad(operand, padding_value,
xc.make_padding_config(padding_config))
def _pad_masking_rule(padded_vals, logical_shapes, padding_config):
operand, padding_value = padded_vals
shape, _ = logical_shapes
out = pad(operand, padding_value, padding_config)
out_shape = [lo + shape[i] * (interior + 1)
for i, (lo, hi, interior) in enumerate(padding_config)]
padded_dims = [i for i, config in enumerate(padding_config)
if config != (0, 0, 0)]
return _masked(out, out_shape, padded_dims, padding_value)
pad_p = standard_primitive(_pad_shape_rule, _pad_dtype_rule, 'pad',
translation_rule=_pad_translation_rule)
ad.deflinear(pad_p, _pad_transpose)
ad.primitive_transposes[pad_p] = _pad_transpose
batching.primitive_batchers[pad_p] = _pad_batch_rule
masking.masking_rules[pad_p] = _pad_masking_rule
# The squeeze primitive exists for the benefit of masking and other
# transformations that need to keep track of axis identity.
# For example, consider reshaping a 2D array with shape (1, N) into a 1D array
# with shape (N,). This results in the following JAXpr:
# reshape[ dimension=None new_sizes=(N,) ]
# For N > 1, we can match up the output array axis with the second axis of the
# input. But for N = 1, it is not clear how axes match up: all we know from the
# JAXpr is that we are reshaping from (1, 1) to (1,).
# In constrast, squeeze[ dimensions=(0,) ] is unambiguous.
def squeeze(array: Array, dimensions: Tuple[int, ...]) -> Array:
"""Squeeze any number of size 1 dimensions from an array."""
ndim = np.ndim(array)
dimensions = tuple(sorted(canonicalize_axis(i, ndim) for i in dimensions))
if not dimensions:
return array
return squeeze_p.bind(array, dimensions=dimensions)
def _squeeze_dtype_rule(operand, *, dimensions):
return operand.dtype
def _squeeze_shape_rule(operand, *, dimensions):
return _compute_squeeze_shape(np.shape(operand), dimensions)
def _compute_squeeze_shape(shape, dimensions):
dims_set = set(dimensions)
if len(dims_set) != len(dimensions):
raise ValueError(f"dimensions are not unique: {dimensions}")
if not all(0 <= d < len(shape) for d in dims_set):
raise ValueError(f"dimensions outside range [0, ndim): {dimensions}")
if any(shape[d] != 1 for d in dimensions):
raise ValueError(
"cannot select an axis to squeeze out which has size not equal to "
f"one, got shape={shape} and dimensions={dimensions}")
return tuple(s for i, s in enumerate(shape) if i not in dims_set)
def _squeeze_translation_rule(c, arg, *, dimensions):
new_shape = _compute_squeeze_shape(c.get_shape(arg).dimensions(), dimensions)
return xops.Reshape(arg, new_shape)
def _squeeze_transpose_rule(t, operand, *, dimensions):
assert ad.is_undefined_primal(operand)
return [expand_dims(t, dimensions)]
def _squeeze_batch_rule(batched_args, batch_dims, *, dimensions):
operand, = batched_args
bdim, = batch_dims
operand = batching.moveaxis(operand, bdim, 0)
dimensions = tuple(np.add(1, dimensions))
return squeeze(operand, dimensions=dimensions), 0
squeeze_p = standard_primitive(_squeeze_shape_rule, _squeeze_dtype_rule,
'squeeze', _squeeze_translation_rule)
ad.deflinear2(squeeze_p, _squeeze_transpose_rule)
batching.primitive_batchers[squeeze_p] = _squeeze_batch_rule
def expand_dims(array: Array, dimensions: Tuple[int, ...]) -> Array:
"""Insert any number of size 1 dimensions into an array."""
ndim_out = np.ndim(array) + len(dimensions)
dims_set = frozenset(canonicalize_axis(i, ndim_out) for i in dimensions)
result_shape = list(np.shape(array))
for i in sorted(dims_set):
result_shape.insert(i, 1)
broadcast_dims = [i for i in range(ndim_out) if i not in dims_set]
return broadcast_in_dim(array, result_shape, broadcast_dims)
# We have a nonstandard reshape impl so that we can be lazy about data movement.
def _reshape_impl(operand, *, new_sizes, dimensions):
old_sizes = np.shape(operand)
if xla.type_is_device_array(operand) and dimensions is None:
bcast_dims = _is_singleton_reshape(old_sizes, new_sizes)
if bcast_dims is not None:
aval = ShapedArray(new_sizes, operand.dtype)
if operand._lazy_expr is None:
lazy_expr = lazy.broadcast(lazy.array(operand), new_sizes, bcast_dims)
else:
lazy_expr = lazy.broadcast(operand._lazy_expr, new_sizes, bcast_dims)
return xla.make_device_array(aval, operand._device, lazy_expr, operand.device_buffer)
return xla.apply_primitive(reshape_p, operand, new_sizes=new_sizes,
dimensions=dimensions)
def _is_singleton_reshape(old, new):
# A singleton reshape is one where only singleton dimensions are added. We
# want to detect them because they can be expressed as (lazy) broadcasts.
old, new = iter(old), iter(new)
d1, d2 = next(old, None), next(new, None)
bcast_dims = []
i = 0
while True:
if d1 is d2 is None:
return bcast_dims
elif d1 == d2:
bcast_dims.append(i)
i += 1
d1, d2 = next(old, None), next(new, None)
elif d2 == 1:
i += 1
d2 = next(new, None)
else:
return None
def _reshape_shape_rule(operand, *, new_sizes, dimensions):
if not np.all(np.greater_equal(new_sizes, 0)):
msg = 'reshape new_sizes must all be positive, got {}.'
raise TypeError(msg.format(new_sizes))
if prod(np.shape(operand)) != prod(new_sizes):
msg = 'reshape total size must be unchanged, got new_sizes {} for shape {}.'
raise TypeError(msg.format(new_sizes, np.shape(operand)))
if dimensions is not None:
if set(dimensions) != set(range(np.ndim(operand))):
msg = ('reshape dimensions must be a permutation of operand dimensions, '
'got dimensions {} for shape {}.')
raise TypeError(msg.format(dimensions, np.shape(operand)))
return tuple(new_sizes)
def _reshape_dtype_rule(operand, *, new_sizes, dimensions):
return operand.dtype
def _reshape_translation_rule(c, operand, *, new_sizes, dimensions):
if dimensions is None:
return xops.Reshape(operand, new_sizes)
else:
return xops.Reshape(operand, dimensions, new_sizes)
def _reshape_transpose_rule(t, operand, *, new_sizes, dimensions):
assert ad.is_undefined_primal(operand)
if dimensions is None:
return [reshape(t, operand.aval.shape)]
else:
return [transpose(reshape(t, np.take(operand.aval.shape, dimensions)),
np.argsort(dimensions))]
def _reshape_batch_rule(batched_args, batch_dims, *, new_sizes, dimensions):
operand, = batched_args
bdim, = batch_dims
operand = batching.moveaxis(operand, bdim, 0)
if dimensions is not None:
dimensions = (0,) + tuple(np.add(1, dimensions))
return reshape(operand, operand.shape[:1] + new_sizes, dimensions), 0
def _reshape_masking_rule(padded_args, logical_shapes, polymorphic_shapes,
new_sizes, dimensions):
operand, = padded_args
old_shape, = polymorphic_shapes
def is_poly(size): return type(size) is masking.Poly and not size.is_constant
def merge_const_sizes(shape):
"""Merges all nonpolymorphic sizes into the previous polymorphic size."""
poly_dims = [i for i, size in enumerate(shape) if is_poly(size)]
return [prod(shape[start:stop])
for start, stop in zip([0] + poly_dims, poly_dims + [len(shape)])]
if merge_const_sizes(old_shape) != merge_const_sizes(new_sizes):
raise NotImplementedError(
"Reshape on padded dimensions causing fragmentation is not supported.")
return reshape(operand,
new_sizes=masking.padded_shape_as_value(new_sizes),
dimensions=dimensions)
reshape_p = standard_primitive(_reshape_shape_rule, _reshape_dtype_rule,
'reshape', _reshape_translation_rule)
reshape_p.def_impl(_reshape_impl)
ad.deflinear2(reshape_p, _reshape_transpose_rule)
batching.primitive_batchers[reshape_p] = _reshape_batch_rule
masking.masking_rules[reshape_p] = _reshape_masking_rule
def _rev_shape_rule(operand, *, dimensions):
_check_shapelike('rev', 'dimensions', dimensions)
if len(set(dimensions)) != len(dimensions):
msg = 'rev dimensions must be unique, got {}.'
raise TypeError(msg.format(dimensions))
if dimensions and not _max(dimensions) < operand.ndim:
msg = ('rev dimensions must all be less than operand ndim, got dimensions '
'{} for operand ndim {}.')
raise TypeError(msg.format(dimensions, operand.ndim))
return operand.shape
def _rev_batch_rule(batched_args, batch_dims, *, dimensions):
operand, = batched_args
bdim, = batch_dims
new_dimensions = [i + 1 if i >= bdim else i for i in dimensions]
return rev(operand, new_dimensions), bdim
rev_p = standard_primitive(_rev_shape_rule, _input_dtype, 'rev')
ad.deflinear(rev_p, lambda t, dimensions: [rev(t, dimensions)])
batching.primitive_batchers[rev_p] = _rev_batch_rule
def _transpose_impl(operand, *, permutation):
if xla.type_is_device_array(operand):
if operand._lazy_expr is None:
lazy_expr = lazy.transpose(lazy.array(operand), permutation)
else:
lazy_expr = lazy.transpose(operand._lazy_expr, permutation)
aval = ShapedArray(lazy_expr.shape, operand.dtype)
return xla.make_device_array(aval, operand._device, lazy_expr, operand.device_buffer)
else:
return xla.apply_primitive(transpose_p, operand, permutation=permutation)
def _transpose_shape_rule(operand, *, permutation):
if not isinstance(permutation, (tuple, list, np.ndarray)):
msg = "transpose permutation must be a tuple/list/ndarray, got {}."
raise TypeError(msg.format(type(permutation)))
if tuple(sorted(permutation)) != tuple(range(operand.ndim)):
msg = ("transpose permutation isn't a permutation of operand dimensions, "
"got permutation {} for operand shape {}.")
raise TypeError(msg.format(permutation, operand.shape))
return tuple(np.take(operand.shape, permutation))
def _transpose_batch_rule(batched_args, batch_dims, *, permutation):
operand, = batched_args
bdim, = batch_dims
perm = (bdim,) + tuple(i if i < bdim else i+1 for i in permutation)
return transpose(operand, perm), 0
def _transpose_masking_rule(padded_vals, logical_shapes, permutation):
return transpose(*padded_vals, permutation=permutation)
transpose_p = standard_primitive(_transpose_shape_rule, _input_dtype,
'transpose')
transpose_p.def_impl(_transpose_impl)
ad.deflinear(transpose_p,
lambda t, permutation: [transpose(t, np.argsort(permutation))])
batching.primitive_batchers[transpose_p] = _transpose_batch_rule
masking.masking_rules[transpose_p] = _transpose_masking_rule
def _select_shape_rule(pred, on_true, on_false):
if on_true.shape != on_false.shape:
msg = "select on_true and on_false must have the same shape, got {} and {}."
raise TypeError(msg.format(on_true.shape, on_false.shape))
if pred.shape and pred.shape != on_true.shape:
msg = ("select pred must be scalar or have the same shape as on_true and "
"on_false, got pred shape {} for on_true and on_false of shape {}.")
raise TypeError(msg.format(pred.shape, on_true.shape))
return on_true.shape
def _select_dtype_rule(pred, on_true, on_false):
_check_same_dtypes("select", False, on_true.dtype, on_false.dtype)
if not dtypes.issubdtype(pred.dtype, np.bool_):
msg = "select pred must be boolean type, got {}."
raise TypeError(msg.format(pred.dtype))
return on_true.dtype
def _select_transpose_rule(t, pred, on_true, on_false):
assert not ad.is_undefined_primal(pred)
if type(t) is ad_util.Zero:
return ad_util.Zero
else:
zeros = full_like(t, 0)
return [None,
select(pred, t, zeros) if ad.is_undefined_primal(on_true) else None,
select(pred, zeros, t) if ad.is_undefined_primal(on_false) else None]
def _select_batch_rule(batched_args, batch_dims, **unused_kwargs):
pred, on_true, on_false, = batched_args
pred_bdim, ot_bdim, of_bdim = batch_dims
size = next(x.shape[i] for x, i in zip(batched_args, batch_dims)
if i is not None)
# avoid transposes and some broadcasts in special cases
if pred_bdim == ot_bdim == of_bdim:
if np.shape(pred) == np.shape(on_true):
return select(pred, on_true, on_false), pred_bdim
else:
# vmapped function had a scalar pred with nonscalar args
assert np.ndim(pred) == 1
pred = broadcast_in_dim(pred, on_true.shape, [pred_bdim])
return select(pred, on_true, on_false), pred_bdim
elif np.ndim(pred) == 0 and ot_bdim is not None and of_bdim is not None:
if ot_bdim == of_bdim:
return select(pred, on_true, on_false), ot_bdim
elif np.shape(on_true) == np.shape(on_false):
on_false = batching.moveaxis(on_false, of_bdim, ot_bdim)
return select(pred, on_true, on_false), ot_bdim
pred = batching.bdim_at_front(pred, pred_bdim, size) if np.shape(pred) else pred
if not np.shape(on_true) == np.shape(on_false) == ():
on_true = batching.bdim_at_front(on_true, ot_bdim, size)
on_false = batching.bdim_at_front(on_false, of_bdim, size)
assert np.shape(on_true) == np.shape(on_false)
if 0 < np.ndim(pred) < np.ndim(on_true):
# vmapped function had a scalar pred with nonscalar args
assert np.ndim(pred) == 1
pred = broadcast_in_dim(pred, on_true.shape, [0])
if np.ndim(pred) > np.ndim(on_true):
assert np.ndim(on_true) == 0
on_true = broadcast(on_true, pred.shape)
on_false = broadcast(on_false, pred.shape)
return select(pred, on_true, on_false), 0
def _select_masking_rule(padded_vals, logical_shapes):
pred_shape, true_shape, false_shape = [
masking.padded_shape_as_value(val.shape) for val in padded_vals]
assert np.array_equal(pred_shape, true_shape)
assert np.array_equal(pred_shape, false_shape)
return select(*padded_vals)
def _select_jvp(primals, tangents):
pred, on_true, on_false = primals
_, on_true_dot, on_false_dot = tangents
out = select(pred, on_true, on_false)
if type(on_true_dot) is ad_util.Zero:
out_dot = select(pred, _zeros(on_false_dot), on_false_dot)
elif type(on_false_dot) is ad_util.Zero:
out_dot = select(pred, on_true_dot, _zeros(on_true_dot))
else:
out_dot = select(pred, on_true_dot, on_false_dot)
return out, out_dot
select_p = standard_primitive(_select_shape_rule, _select_dtype_rule, 'select')
ad.primitive_jvps[select_p] = _select_jvp
ad.primitive_transposes[select_p] = _select_transpose_rule
batching.primitive_batchers[select_p] = _select_batch_rule
masking.masking_rules[select_p] = _select_masking_rule
def _slice_shape_rule(operand, *, start_indices, limit_indices, strides):
_check_shapelike("slice", "start_indices", start_indices)
_check_shapelike("slice", "limit_indices", limit_indices)
if operand.ndim != len(start_indices):
msg = ("slice start_indices must have length equal to the number of "
"dimensions of the operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if len(start_indices) != len(limit_indices):
msg = ("slice limit_indices must have the same length as start_indices, "
"got start_inidices {} and limit_indices {}.")
raise TypeError(msg.format(start_indices, limit_indices))
if (not masking.is_polymorphic(limit_indices) and
not masking.is_polymorphic(operand.shape) and
not np.all(np.less_equal(limit_indices, operand.shape))):
msg = ("slice limit_indices must be less than or equal to operand shape, "
"got limit_indices {} for operand shape {}.")
raise TypeError(msg.format(limit_indices, operand.shape))
if not np.all(np.greater_equal(start_indices, 0)):
msg = ("slice start_indices must be greater than or equal to zero, "
"got start_indices of {}.")
raise TypeError(msg.format(start_indices))
if (not masking.is_polymorphic(limit_indices) and
not np.all(np.greater_equal(limit_indices, start_indices))):
msg = ("slice limit_indices must be greater than or equal to start_indices,"
" got start_indices {} and limit_indices {}.")
raise TypeError(msg.format(start_indices, limit_indices))
if strides is None:
strides = np.ones(operand.ndim, np.int32)
else:
_check_shapelike("slice", "strides", strides)
if len(strides) != operand.ndim:
msg = ("slice strides must have length equal to the number of dimensions "
"of the operand, got strides {} for operand shape {}.")
raise TypeError(msg.format(strides, operand.shape))
if not np.all(np.greater(strides, 0)):
msg = "slice strides must be positive, got {}"
raise TypeError(msg.format(strides))
diff = np.subtract(limit_indices, start_indices)
# Not np.divmod since Poly.__rdivmod__ is ignored by NumPy, breaks poly stride
return tuple(q + (r > 0) for q, r in map(divmod, diff, strides))
def _slice_translation_rule(c, operand, *, start_indices, limit_indices,
strides):
return xops.Slice(operand, start_indices, limit_indices,
strides or [1] * len(start_indices))
def _slice_transpose_rule(t, operand, *, start_indices, limit_indices, strides):
assert ad.is_undefined_primal(operand)
operand_shape = operand.aval.shape
if strides is None or np.all(np.equal(strides, 1)):
pads = zip(start_indices, np.subtract(operand_shape, limit_indices),
(0,) * len(start_indices))
else:
real_limits = np.add(
start_indices,
np.where(np.array(t.shape) == 0, 0,
np.add(1, np.multiply(np.subtract(t.shape, 1), strides))))
pads = safe_zip(start_indices, np.subtract(operand_shape, real_limits),
np.subtract(strides, 1))
result = pad(t, _const(t, 0), pads)
assert result.shape == operand_shape, (
f"result.shape={result.shape} operand_shape={operand_shape}")
return [result]
def _slice_batching_rule(batched_args, batch_dims, *, start_indices,
limit_indices, strides):
operand, = batched_args
bdim, = batch_dims
new_start_indices = list(start_indices)
new_start_indices.insert(bdim, 0)
new_limit_indices = list(limit_indices)
new_limit_indices.insert(bdim, operand.shape[bdim])
if strides is None:
new_strides = None
else:
new_strides = list(strides)
new_strides.insert(bdim, 1)
out = slice(operand, new_start_indices, new_limit_indices, new_strides)
return out, bdim
def _slice_masking_rule(
padded_vals, logical_shapes, start_indices, limit_indices, strides):
operand, = padded_vals
strides = masking.padded_shape_as_value(strides) if strides else None
return slice(operand,
start_indices=masking.padded_shape_as_value(start_indices),
limit_indices=masking.padded_shape_as_value(limit_indices),
strides=strides)
slice_p = standard_primitive(_slice_shape_rule, _input_dtype, 'slice',
_slice_translation_rule)
ad.deflinear2(slice_p, _slice_transpose_rule)
batching.primitive_batchers[slice_p] = _slice_batching_rule
masking.masking_rules[slice_p] = _slice_masking_rule
def _dynamic_slice_shape_rule(operand, *start_indices, slice_sizes):
if operand.ndim != len(start_indices):
msg = ("dynamic_slice start_indices must have length equal to the number "
"of dimensions of the operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if len(start_indices) != len(slice_sizes):
msg = ("dynamic_slice slice_sizes must have the same length as "
"start_indices, got start_inidices length {} and slice_sizes {}.")
raise TypeError(msg.format(len(start_indices), slice_sizes))
if not np.all(np.less_equal(slice_sizes, operand.shape)):
msg = ("slice slice_sizes must be less than or equal to operand shape, "
"got slice_sizes {} for operand shape {}.")
raise TypeError(msg.format(slice_sizes, operand.shape))
if not np.all(np.greater_equal(slice_sizes, 0)):
msg = ("slice slice_sizes must be greater than or equal to zero, "
"got slice_sizes of {}.")
raise TypeError(msg.format(slice_sizes))
return tuple(slice_sizes)
def _dynamic_slice_dtype_rule(operand, *start_indices, slice_sizes):
if any(i.dtype != start_indices[0].dtype or
not dtypes.issubdtype(i.dtype, np.integer) for i in start_indices):
msg = ("index arguments to dynamic_slice must be integers of the same "
"type, got: {}")
raise TypeError(msg.format(", ".join(i.dtype.name for i in start_indices)))
return operand.dtype
def _dynamic_slice_translation_rule(c, operand, *start_indices, slice_sizes):
return xops.DynamicSlice(operand, start_indices, slice_sizes)
def _dynamic_slice_jvp(primals, tangents, *, slice_sizes):
tangent_out = tangents[0]
if type(tangent_out) is not ad_util.Zero:
tangent_out = dynamic_slice(tangent_out, primals[1:], slice_sizes)
return dynamic_slice(primals[0], primals[1:], slice_sizes), tangent_out
def _dynamic_slice_transpose_rule(t, operand, *start_indices, slice_sizes):
assert ad.is_undefined_primal(operand)
assert all(not ad.is_undefined_primal(s) for s in start_indices)
operand_shape, operand_dtype = operand.aval.shape, operand.aval.dtype
if config.omnistaging_enabled:
zeros = full(operand_shape, 0, operand_dtype)
else:
zeros = full(operand_shape, tie_in(t, _zero(t)))
if type(t) is ad_util.Zero:
return [zeros] + [None] * len(start_indices)
else:
return ([dynamic_update_slice(zeros, t, start_indices)] +
[None] * len(start_indices))
def _batch_dynamic_slice_indices(indices, bdims):
if len(indices) == 0:
return np.array([], 'int32'), None
size = next((x.shape[i] for x, i in zip(indices, bdims) if i is not None), -1)
if size < 0:
return concatenate([broadcast(i, (1,)) for i in indices], 0), None
indices = concatenate(
[broadcast_in_dim(x, (size, 1),
broadcast_dimensions=((0,) if i is not None else ()))
for x, i in zip(indices, bdims)],
dimension=1)
return indices, 0
def _dynamic_slice_batching_rule(batched_args, batch_dims, *, slice_sizes):
# A dynamic slice is a special case of gather; we can delegate to the gather
# batching rule.
# TODO(phawkins): consider removing dynamic_slice entirely and using gather
# always.
operand, *start_indices = batched_args
operand_bd, *start_idx_bds = batch_dims
operand_shape = (operand.shape if operand_bd is batching.not_mapped
else tuple(np.delete(operand.shape, operand_bd)))
dims = tuple(range(len(operand_shape)))
dnums = GatherDimensionNumbers(offset_dims=dims, collapsed_slice_dims=(),
start_index_map=dims)
index, index_bdim = _batch_dynamic_slice_indices(start_indices, start_idx_bds)
return _gather_batching_rule(
[operand, index], [operand_bd, index_bdim], dimension_numbers=dnums,
slice_sizes=slice_sizes)
dynamic_slice_p = standard_primitive(
_dynamic_slice_shape_rule, _dynamic_slice_dtype_rule, 'dynamic_slice',
_dynamic_slice_translation_rule)
ad.primitive_jvps[dynamic_slice_p] = _dynamic_slice_jvp # TODO
ad.primitive_transposes[dynamic_slice_p] = _dynamic_slice_transpose_rule
batching.primitive_batchers[dynamic_slice_p] = _dynamic_slice_batching_rule
def _dynamic_update_slice_shape_rule(operand, update, *start_indices):
if operand.ndim != update.ndim:
msg = ("dynamic_update_slice update must have the same rank as operand, "
"got update shape {} for operand shape {}.")
raise TypeError(msg.format(update.shape, operand.shape))
if operand.ndim != len(start_indices):
msg = ("dynamic_update_slice start_indices must have length equal to the "
"rank of operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if not np.all(np.less_equal(update.shape, operand.shape)):
msg = ("dynamic_update_slice update shape must be smaller than operand "
"shape, got update shape {} for operand shape {}.")
raise TypeError(msg.format(update.shape, operand.shape))
return operand.shape
def _dynamic_update_slice_dtype_rule(operand, update, *start_indices):
_check_same_dtypes("dynamic_update_slice", False, operand.dtype, update.dtype)
if any(i.dtype != start_indices[0].dtype or
not dtypes.issubdtype(i.dtype, np.integer) for i in start_indices):
msg = ("index arguments to dynamic_update_slice must be integers of the "
"same type, got {}")
raise TypeError(msg.format(", ".join(i.dtype.name for i in start_indices)))
return operand.dtype
def _dynamic_update_slice_jvp(primals, tangents):
operand, update = primals[:2]
start_indices = primals[2:]
g_operand, g_update = tangents[:2]
val_out = dynamic_update_slice(operand, update, start_indices)
if type(g_operand) is ad_util.Zero and type(g_update) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(val_out)
else:
g_operand = ad.instantiate_zeros(g_operand)
g_update = ad.instantiate_zeros(g_update)
tangent_out = dynamic_update_slice(g_operand, g_update, start_indices)
return val_out, tangent_out
def _dynamic_update_slice_transpose_rule(t, operand, update, *start_indices):
assert all(not ad.is_undefined_primal(x) for x in start_indices)
if ad.is_undefined_primal(update):
update_shape = update.aval.shape
else:
update_shape = update.shape
dus = dynamic_update_slice
ds = dynamic_slice
zeros = _zeros(t, shape=update_shape)
operand_t = dus(t, zeros, start_indices) if ad.is_undefined_primal(operand) else None
update_t = ds(t, start_indices, update_shape) if ad.is_undefined_primal(update) else None
return [operand_t, update_t] + [None] * len(start_indices)
def _dynamic_update_slice_translation_rule(c, operand, update, *start_indices):
return xops.DynamicUpdateSlice(operand, update, start_indices)
def _dynamic_update_slice_batching_rule(batched_args, batch_dims):
# A dynamic update slice is a special case of scatter; we can delegate to the
# scatter batching rule.
# TODO(phawkins): consider removing dynamic_update_slice entirely and using
# scatter always.
operand, update, *start_idx = batched_args
operand_bd, update_bd, *start_idx_bd = batch_dims
update_shape = (np.shape(update) if update_bd is batching.not_mapped
else tuple(np.delete(np.shape(update), update_bd)))
dims = tuple(range(len(update_shape)))
dnums = ScatterDimensionNumbers(update_window_dims=dims,
inserted_window_dims=(),
scatter_dims_to_operand_dims=dims)
index, index_bdim = _batch_dynamic_slice_indices(start_idx, start_idx_bd)
return _scatter_batching_rule(
scatter, (operand, index, update), (operand_bd, index_bdim, update_bd),
update_jaxpr=None, update_consts=None, dimension_numbers=dnums,
indices_are_sorted=True, unique_indices=True)
dynamic_update_slice_p = standard_primitive(
_dynamic_update_slice_shape_rule, _dynamic_update_slice_dtype_rule,
'dynamic_update_slice', _dynamic_update_slice_translation_rule)
ad.primitive_jvps[dynamic_update_slice_p] = _dynamic_update_slice_jvp
ad.primitive_transposes[dynamic_update_slice_p] = \
_dynamic_update_slice_transpose_rule
batching.primitive_batchers[dynamic_update_slice_p] = \
_dynamic_update_slice_batching_rule
def _gather_dimensions_proto(indices_shape, dimension_numbers):
assert type(dimension_numbers) is GatherDimensionNumbers
proto = xla_client.GatherDimensionNumbers()
proto.offset_dims.extend(dimension_numbers.offset_dims)
proto.collapsed_slice_dims.extend(dimension_numbers.collapsed_slice_dims)
proto.start_index_map.extend(dimension_numbers.start_index_map)
assert indices_shape.rank() > 0
proto.index_vector_dim = indices_shape.rank() - 1
return proto
def _gather_dtype_rule(operand, start_indices, **kwargs):
if not dtypes.issubdtype(start_indices.dtype, np.integer):
raise ValueError("start_indices must have an integer type")
return dtypes.canonicalize_dtype(operand.dtype)
_rank = lambda arr: len(arr.shape)
def _is_sorted(dims, op_name, name):
for i in range(1, len(dims)):
if dims[i] < dims[i - 1]:
raise TypeError(f"{name} in {op_name} op must be sorted; got {dims}")
def _sorted_dims_in_range(dims, rank, op_name, name):
if len(dims) == 0:
return
invalid_dim = None
if dims[0] < 0:
invalid_dim = dims[0]
elif dims[-1] >= rank:
invalid_dim = dims[-1]
if invalid_dim:
raise TypeError(f"Invalid {name} set in {op_name} op; valid range is "
f"[0, {rank}); got: {invalid_dim}.")
def _no_duplicate_dims(dims, op_name, name):
if len(set(dims)) != len(dims):
raise TypeError(f"{name} in {op_name} op must not repeat; got: {dims}.")
def _gather_shape_rule(operand, start_indices, *, dimension_numbers,
slice_sizes):
"""Validates the well-formedness of the arguments to Gather.
The code implements the checks based on the detailed operation semantics of
XLA's `Gather <https://www.tensorflow.org/xla/operation_semantics#gather>`_
operator and following the outline of the implementation of
ShapeInference::InferGatherShape in TensorFlow.
"""
offset_dims = dimension_numbers.offset_dims
collapsed_slice_dims = dimension_numbers.collapsed_slice_dims
start_index_map = dimension_numbers.start_index_map
# Note: in JAX, index_vector_dim is always computed as below, cf. the
# documentation of the GatherDimensionNumbers class.
index_vector_dim = _rank(start_indices) - 1
# This case should never happen in JAX, due to the implicit construction of
# index_vector_dim, but is included for completeness.
if _rank(start_indices) < index_vector_dim or index_vector_dim < 0:
raise TypeError(f"Gather index leaf dimension must be within [0, rank("
f"start_indices) + 1). rank(start_indices) is "
f"{_rank(start_indices)} and gather index leaf dimension "
f"is {index_vector_dim}.")
expanded_start_indices_shape = list(start_indices.shape)
# This case should never happen in JAX, due to the implicit construction of
# index_vector_dim, but is included for completeness.
if len(expanded_start_indices_shape) == index_vector_dim:
expanded_start_indices_shape.append(1)
# Start ValidateGatherDimensions
# In the error messages output by XLA, "offset_dims" is called "Output window
# dimensions" in error messages. For consistency's sake, our error messages
# stick to "offset_dims".
_is_sorted(offset_dims, "gather", "offset_dims")
_no_duplicate_dims(offset_dims, "gather", "offset_dims")
output_offset_dim_count = len(offset_dims)
output_shape_rank = len(offset_dims) + _rank(start_indices) - 1
for i in range(output_offset_dim_count):
offset_dim = offset_dims[i]
if offset_dim < 0 or offset_dim >= output_shape_rank:
raise TypeError(f"Offset dimension {i} in gather op is out of bounds; "
f"got {offset_dim}, but should have been in "
f"[0, {output_shape_rank})")
if len(start_index_map) != start_indices.shape[index_vector_dim]:
raise TypeError(f"Gather op has {len(start_index_map)} elements in "
f"start_index_map and the bound of dimension "
f"index_vector_dim={index_vector_dim} of start_indices is "
f"{start_indices.shape[index_vector_dim]}. These two "
f"numbers must be equal.")
for i in range(len(start_index_map)):
operand_dim_for_start_index_i = start_index_map[i]
if (operand_dim_for_start_index_i < 0 or
operand_dim_for_start_index_i >= _rank(operand)):
raise TypeError(f"Invalid start_index_map; domain is "
f"[0, {_rank(operand)}), got: "
f"{i}->{operand_dim_for_start_index_i}.")
_no_duplicate_dims(start_index_map, "gather", "start_index_map")
# _is_sorted and _sorted_dims_in_range are checked in the opposite order
# compared to the XLA implementation. In cases when the input is not sorted
# AND there are problematic collapsed_slice_dims, the error message will thus
# be different.
_is_sorted(collapsed_slice_dims, "gather", "collapsed_slice_dims")
_sorted_dims_in_range(collapsed_slice_dims, _rank(operand), "gather",
"collapsed_slice_dims")
_no_duplicate_dims(collapsed_slice_dims, "gather", "collapsed_slice_dims")
# End ValidateGatherDimensions
if _rank(operand) != len(slice_sizes):
raise TypeError(f"Gather op must have one slice size for every input "
f"dimension; got: len(slice_sizes)={len(slice_sizes)}, "
f"input_shape.rank={_rank(operand)}")
if len(slice_sizes) != len(offset_dims) + len(collapsed_slice_dims):
raise TypeError(f"All components of the offset index in a gather op must "
f"either be a offset dimension or explicitly collapsed; "
f"got len(slice_sizes)={len(slice_sizes)}, "
f"output_slice_sizes={offset_dims}, collapsed_slice_dims="
f"{collapsed_slice_dims}.")
for i in range(len(slice_sizes)):
slice_size = slice_sizes[i]
corresponding_input_size = operand.shape[i]
if slice_size < 0 or slice_size > corresponding_input_size:
raise TypeError(f"Slice size at index {i} in gather op is out of range, "
f"must be within [0, {corresponding_input_size + 1}), "
f"got {slice_size}.")
for i in range(len(collapsed_slice_dims)):
bound = slice_sizes[collapsed_slice_dims[i]]
if bound > 1:
raise TypeError(f"Gather op can only collapse slice dims with bound 1 "
f"or 0, but bound is {bound} for index "
f"{collapsed_slice_dims[i]} at position {i}.")
expanded_start_indices_shape.pop(index_vector_dim)
start_indices_shape = iter(expanded_start_indices_shape)
slice_sizes = iter(np.delete(slice_sizes, collapsed_slice_dims))
return tuple(next(slice_sizes) if i in offset_dims
else next(start_indices_shape) for i in range(output_shape_rank))
def _gather_translation_rule(c, operand, start_indices, *, dimension_numbers,
slice_sizes):
indices_shape = c.get_shape(start_indices)
return xops.Gather(
operand, start_indices,
_gather_dimensions_proto(indices_shape, dimension_numbers), slice_sizes,
indices_are_sorted=False)
def _gather_jvp_rule(g, operand, start_indices, *, dimension_numbers,
slice_sizes):
return gather(g, start_indices, dimension_numbers, slice_sizes)
def _gather_transpose_rule(t, operand, start_indices, *, dimension_numbers,
slice_sizes):
assert ad.is_undefined_primal(operand)
operand_shape = operand.aval.shape
if type(t) is ad_util.Zero:
return ad_util.Zero
if config.omnistaging_enabled:
zeros = full(operand_shape, _zero(t))
else:
zeros = full(operand_shape, tie_in(t, _zero(t)))
scatter_dnums = ScatterDimensionNumbers(
update_window_dims=dimension_numbers.offset_dims,
inserted_window_dims=dimension_numbers.collapsed_slice_dims,
scatter_dims_to_operand_dims=dimension_numbers.start_index_map)
out = scatter_add(zeros, start_indices, t, scatter_dnums,
indices_are_sorted=False,
unique_indices=False)
return [out, ad_util.Zero.from_value(start_indices)]
def _gather_batching_rule(batched_args, batch_dims, *, dimension_numbers,
slice_sizes):
operand, start_indices = batched_args
operand_bdim, start_indices_bdim = batch_dims
if operand_bdim is not None and start_indices_bdim is None:
operand = batching.moveaxis(operand, operand_bdim, 0)
slice_sizes = (operand.shape[0],) + slice_sizes
offset_dims = (0,) + tuple(np.add(1, dimension_numbers.offset_dims))
collapsed_slice_dims = tuple(np.add(1, dimension_numbers.collapsed_slice_dims))
start_index_map = tuple(np.add(1, dimension_numbers.start_index_map))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=collapsed_slice_dims,
start_index_map=start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
elif operand_bdim is None and start_indices_bdim is not None:
start_indices = batching.moveaxis(start_indices, start_indices_bdim, 0)
offset_dims = tuple(np.add(1, dimension_numbers.offset_dims))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=dimension_numbers.collapsed_slice_dims,
start_index_map=dimension_numbers.start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
else:
# move batch dimensions to the front to simplify logic
operand = batching.moveaxis(operand, operand_bdim, 0)
start_indices = batching.moveaxis(start_indices, start_indices_bdim, 0)
# Example: user code had start_indices shape (3, 4, 5), and we have to deal
# with start_indices shape (7, 3, 4, 5). We transform that to a
# start_indices of shape (7, 3, 4, 6) where we concatenated an iota that
# counts along our batch dimension to the front of the ndindex.
count_shape = list(start_indices.shape)
count_shape[-1] = 1
counts = broadcasted_iota(start_indices.dtype, tuple(count_shape), 0)
start_indices = concatenate([counts, start_indices], len(count_shape) - 1)
slice_sizes = (1,) + slice_sizes
collapsed_slice_dims = (0,) + tuple(np.add(1, dimension_numbers.collapsed_slice_dims))
offset_dims = tuple(np.add(1, dimension_numbers.offset_dims))
start_index_map = (0,) + tuple(np.add(1, dimension_numbers.start_index_map))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=collapsed_slice_dims,
start_index_map=start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
gather_p = standard_primitive(
_gather_shape_rule, _gather_dtype_rule, 'gather',
_gather_translation_rule)
ad.defjvp(gather_p, _gather_jvp_rule, None)
ad.primitive_transposes[gather_p] = _gather_transpose_rule
batching.primitive_batchers[gather_p] = _gather_batching_rule
def _scatter_dimensions_proto(indices_shape, dimension_numbers):
assert type(dimension_numbers) is ScatterDimensionNumbers
proto = xla_client.ScatterDimensionNumbers()
proto.update_window_dims.extend(dimension_numbers.update_window_dims)
proto.inserted_window_dims.extend(dimension_numbers.inserted_window_dims)
proto.scatter_dims_to_operand_dims.extend(
dimension_numbers.scatter_dims_to_operand_dims)
assert indices_shape.rank() > 0
proto.index_vector_dim = indices_shape.rank() - 1
return proto
def _scatter_dtype_rule(operand, scatter_indices, updates, **kwargs):
if not dtypes.issubdtype(scatter_indices.dtype, np.integer):
raise ValueError("scatter_indices must have an integer type")
_check_same_dtypes("scatter", False, operand.dtype, updates.dtype)
return dtypes.canonicalize_dtype(operand.dtype)
def _scatter_shape_rule(operand, scatter_indices, updates, *, update_jaxpr,
update_consts, dimension_numbers, indices_are_sorted,
unique_indices):
"""Validates the well-formedness of the ``dimension_numbers`` argument to
Scatter.
The code implements the checks based on the detailed operation semantics of
XLA's `Scatter <https://www.tensorflow.org/xla/operation_semantics#scatter>`_
operator and following the outline of the implementation of
ShapeInference::InferScatterShape in TensorFlow.
"""
update_window_dims = dimension_numbers.update_window_dims
inserted_window_dims = dimension_numbers.inserted_window_dims
scatter_dims_to_operand_dims = dimension_numbers.scatter_dims_to_operand_dims
# Note: in JAX, index_vector_dim is always computed as below, cf. the
# documentation of the ScatterDimensionNumbers class.
index_vector_dim = _rank(scatter_indices) - 1
# This case should never happen in JAX, due to the implicit construction of
# index_vector_dim, but is included for completeness.
if _rank(scatter_indices) < index_vector_dim or index_vector_dim < 0:
raise TypeError(f"Scatter index leaf dimension must be within [0, "
f"rank(scatter_indices) + 1). rank(scatter_indices) is "
f"{_rank(scatter_indices)} and scatter index leaf "
f"dimension is {index_vector_dim}.")
expanded_scatter_indices_shape = list(scatter_indices.shape)
# This case should never happen in JAX, due to the implicit construction of
# index_vector_dim, but is included for completeness.
if len(expanded_scatter_indices_shape) == index_vector_dim:
expanded_scatter_indices_shape.append(1)
expected_updates_rank = (len(expanded_scatter_indices_shape) - 1 +
len(update_window_dims))
if _rank(updates) != expected_updates_rank:
raise TypeError(f"Updates tensor must be of rank {expected_updates_rank}; "
f"got {_rank(updates)}.")
# Validate update_window_dims
_is_sorted(update_window_dims, "scatter", "update_window_dims")
_no_duplicate_dims(update_window_dims, "scatter", "update_window_dims")
_sorted_dims_in_range(update_window_dims, _rank(updates), "scatter",
"update_window_dims")
# Validate inserted_window_dims
_is_sorted(inserted_window_dims, "scatter", "inserted_window_dims")
_no_duplicate_dims(inserted_window_dims, "scatter", "inserted_window_dims")
_sorted_dims_in_range(inserted_window_dims, _rank(operand), "scatter",
"inserted_window_dims")
# Validate window_size
window_size = len(update_window_dims) + len(inserted_window_dims)
if _rank(operand) != window_size:
raise TypeError(f"Scatter op has window of size {window_size}; doesn't "
f"match operand of rank {_rank(operand)}.")
# Validate scatter_dims_to_operand_dims
if (len(scatter_dims_to_operand_dims) !=
scatter_indices.shape[index_vector_dim]):
raise TypeError(f"Scatter op has {len(scatter_dims_to_operand_dims)} "
f"elements in scatter_dims_to_operand_dims and the bound "
f"of dimension index_vector_dim={index_vector_dim} of "
f"scatter_indices is "
f"{scatter_indices.shape[index_vector_dim]}. These two "
f"numbers must be equal")
for i in range(len(scatter_dims_to_operand_dims)):
dim = scatter_dims_to_operand_dims[i]
if dim < 0 or dim >= _rank(operand):
raise TypeError(f"Invalid scatter_dims_to_operand_dims mapping; domain "
f"is [0, {_rank(operand)}), got: {i}->{dim}.")
_no_duplicate_dims(scatter_dims_to_operand_dims, "scatter",
"scatter_dims_to_operand_dims")
max_update_slice_sizes = [operand.shape[i] for i in range(len(operand.shape))
if not i in set(inserted_window_dims)]
for i in range(len(update_window_dims)):
update_window_dim = update_window_dims[i]
if updates.shape[update_window_dim] > max_update_slice_sizes[i]:
raise TypeError(f"Bounds of the window dimensions of updates must not "
f"exceed the bounds of the corresponding dimensions of "
f"operand. For dimension {update_window_dim}, updates "
f"bound is {updates.shape[update_window_dim]}, operand "
f"bound is {max_update_slice_sizes[i]}.")
update_scatter_dims = [dim for dim in range(_rank(updates)) if dim not in
set(update_window_dims)]
scatter_dims_seen = 0
for i in update_scatter_dims:
if scatter_dims_seen == index_vector_dim:
scatter_dims_seen += 1
if updates.shape[i] != expanded_scatter_indices_shape[scatter_dims_seen]:
raise TypeError(f"Bounds of the scatter dimensions of updates must be "
f"the same as the bounds of the corresponding dimensions "
f"of scatter indices. For scatter dimension {i}, updates "
f"bound is {updates.shape[i]}, scatter_indices bound is "
f"{expanded_scatter_indices_shape[scatter_dims_seen]}.")
scatter_dims_seen += 1
return operand.shape
def _scatter_translation_rule(c, operand, scatter_indices, updates, *,
update_jaxpr, update_consts, dimension_numbers,
indices_are_sorted, unique_indices):
dtype = c.get_shape(operand).numpy_dtype()
init_value = xb.constant(c, np.array(0, dtype))
update_computation = _reduction_computation(
c, update_jaxpr, update_consts, init_value)
indices_shape = c.get_shape(scatter_indices)
return xops.Scatter(operand, scatter_indices, updates, update_computation,
_scatter_dimensions_proto(indices_shape, dimension_numbers),
indices_are_sorted, unique_indices)
def _scatter_add_translation_rule(
c, operand, scatter_indices, updates, *, update_jaxpr, update_consts,
dimension_numbers, indices_are_sorted, unique_indices,
expand_complex128=False):
dtype = c.get_shape(operand).numpy_dtype()
scatter_dims = _scatter_dimensions_proto(c.get_shape(scatter_indices),
dimension_numbers)
def _make_reducer(dtype):
subc = xla_bridge.make_computation_builder("scatter_add_reducer")
shape = xc.Shape.array_shape(np.dtype(dtype), ())
args = [xb.parameter(subc, 0, shape), xb.parameter(subc, 1, shape)]
out = xops.Add(args[0], args[1])
return subc.build(out)
if expand_complex128 and dtype == np.complex128:
update_computation = _make_reducer(np.float64)
re = xops.Scatter(xops.Real(operand), scatter_indices, xops.Real(updates),
update_computation, scatter_dims, indices_are_sorted,
unique_indices)
im = xops.Scatter(xops.Imag(operand), scatter_indices, xops.Imag(updates),
update_computation, scatter_dims, indices_are_sorted,
unique_indices)
return xops.Complex(re, im)
else:
update_computation = _make_reducer(dtype)
return xops.Scatter(operand, scatter_indices, updates, update_computation,
scatter_dims, indices_are_sorted, unique_indices)
def _scatter_add_jvp(primals, tangents, *, update_jaxpr, update_consts,
dimension_numbers, indices_are_sorted, unique_indices):
operand, scatter_indices, updates = primals
g_operand, g_scatter_indices, g_updates = tangents
val_out = scatter_add_p.bind(
operand, scatter_indices, updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
if type(g_operand) is ad_util.Zero and type(g_updates) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(val_out)
else:
g_operand = ad.instantiate_zeros(g_operand)
g_updates = ad.instantiate_zeros(g_updates)
tangent_out = scatter_add_p.bind(
g_operand, scatter_indices, g_updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
return val_out, tangent_out
def _scatter_add_transpose_rule(t, operand, scatter_indices, updates, *,
update_jaxpr, update_consts, dimension_numbers,
indices_are_sorted, unique_indices):
assert not ad.is_undefined_primal(scatter_indices)
if ad.is_undefined_primal(updates):
updates_shape = updates.aval.shape
else:
updates_shape = updates.shape
if type(t) is ad_util.Zero:
return ad_util.Zero
operand_t = update_t = None
if ad.is_undefined_primal(operand):
operand_t = t
if ad.is_undefined_primal(updates):
gather_dnums = GatherDimensionNumbers(
offset_dims=dimension_numbers.update_window_dims,
collapsed_slice_dims=dimension_numbers.inserted_window_dims,
start_index_map=dimension_numbers.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(t.shape)):
if i in dimension_numbers.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[dimension_numbers.update_window_dims[pos]])
pos += 1
update_t = gather(t, scatter_indices, dimension_numbers=gather_dnums,
slice_sizes=slice_sizes)
return [operand_t, None, update_t]
def _scatter_mul_transpose_rule(t, operand, scatter_indices, updates, *,
update_jaxpr, update_consts, dimension_numbers,
indices_are_sorted, unique_indices):
assert not ad.is_undefined_primal(scatter_indices)
if ad.is_undefined_primal(updates):
updates_shape = updates.aval.shape
else:
updates_shape = updates.shape
if type(t) is ad_util.Zero:
return ad_util.Zero
operand_t = update_t = None
if ad.is_undefined_primal(operand):
operand_t = scatter_mul(
t, scatter_indices, updates, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
if ad.is_undefined_primal(updates):
gather_dnums = GatherDimensionNumbers(
offset_dims=dimension_numbers.update_window_dims,
collapsed_slice_dims=dimension_numbers.inserted_window_dims,
start_index_map=dimension_numbers.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(t.shape)):
if i in dimension_numbers.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[dimension_numbers.update_window_dims[pos]])
pos += 1
update_t = gather(mul(t, operand), scatter_indices,
dimension_numbers=gather_dnums, slice_sizes=slice_sizes)
return [operand_t, None, update_t]
def _scatter_batching_rule(scatter_op, batched_args, batch_dims, *,
update_jaxpr, update_consts, dimension_numbers,
indices_are_sorted, unique_indices):
operand, scatter_indices, updates = batched_args
operand_bdim, scatter_indices_bdim, updates_bdim = batch_dims
del update_jaxpr, update_consts # Unused.
# move the operand batch dim to the front if it is not None, otherwise create
# it at the front (so that we can scatter into it)
size = next(x.shape[ax] for x, ax in zip(batched_args, batch_dims)
if ax is not None)
operand = batching.bdim_at_front(operand, operand_bdim, size)
operand_bdim = 0
updates = batching.bdim_at_front(updates, updates_bdim, size)
if scatter_indices_bdim is None:
inserted_window_dims = tuple(np.add(1, dimension_numbers.inserted_window_dims))
update_window_dims = (0,) + tuple(np.add(1, dimension_numbers.update_window_dims))
scatter_dims_to_operand_dims = tuple(np.add(1, dimension_numbers.scatter_dims_to_operand_dims))
dnums = ScatterDimensionNumbers(
update_window_dims=update_window_dims,
inserted_window_dims=inserted_window_dims,
scatter_dims_to_operand_dims=scatter_dims_to_operand_dims)
return scatter_op(
operand, scatter_indices, updates, dnums,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices), 0
# see the third case in _gather_batching_rule for comparison and comments
scatter_indices = batching.bdim_at_front(
scatter_indices, scatter_indices_bdim, size)
count_shape = list(scatter_indices.shape)
count_shape[-1] = 1
counts = broadcasted_iota(scatter_indices.dtype, tuple(count_shape), 0)
scatter_indices = concatenate([counts, scatter_indices],
len(count_shape) - 1)
update_window_dims = tuple(np.add(1, dimension_numbers.update_window_dims))
inserted_window_dims = (0,) + tuple(np.add(1, dimension_numbers.inserted_window_dims))
scatter_dims_to_operand_dims = (0,) + tuple(np.add(1, dimension_numbers.scatter_dims_to_operand_dims))
dnums = ScatterDimensionNumbers(
update_window_dims=update_window_dims,
inserted_window_dims=inserted_window_dims,
scatter_dims_to_operand_dims=scatter_dims_to_operand_dims)
return scatter_op(
operand, scatter_indices, updates, dnums,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices), 0
scatter_add_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-add',
_scatter_add_translation_rule)
ad.primitive_jvps[scatter_add_p] = _scatter_add_jvp
ad.primitive_transposes[scatter_add_p] = _scatter_add_transpose_rule
batching.primitive_batchers[scatter_add_p] = (
partial(_scatter_batching_rule, scatter_add))
xla.backend_specific_translations['gpu'][scatter_add_p] = partial(
_scatter_add_translation_rule, expand_complex128=True)
scatter_mul_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-mul',
_scatter_translation_rule)
def _scatter_mul_jvp_rhs(g, x, i, y, *, dimension_numbers,
indices_are_sorted, unique_indices, **kw):
return mul(x, scatter_add(
zeros_like_array(x), i, g, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices))
ad.defjvp(scatter_mul_p,
lambda g, x, i, y, **kw: scatter_mul_p.bind(g, i, y, **kw),
None,
_scatter_mul_jvp_rhs)
ad.primitive_transposes[scatter_mul_p] = _scatter_mul_transpose_rule
batching.primitive_batchers[scatter_mul_p] = (
partial(_scatter_batching_rule, scatter_mul))
def _scatter_extremal_jvp(scatter_op, primals, tangents, update_jaxpr,
update_consts, dimension_numbers,
indices_are_sorted, unique_indices):
operand, scatter_indices, updates = primals
g_operand, g_scatter_indices, g_updates = tangents
scatter_dnums = dimension_numbers
updates_shape = updates.shape
val_out = scatter_op.bind(
operand, scatter_indices, updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=scatter_dnums,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
if type(g_operand) is ad_util.Zero and type(g_updates) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(val_out)
else:
g_operand = ad.instantiate_zeros(g_operand)
g_updates = ad.instantiate_zeros(g_updates)
# gather_dnums and slice_sizes define the gather op that is the inverse of
# the scatter op specified by scatter_dnums
gather_dnums = GatherDimensionNumbers(
offset_dims=scatter_dnums.update_window_dims,
collapsed_slice_dims=scatter_dnums.inserted_window_dims,
start_index_map=scatter_dnums.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(operand.shape)):
if i in scatter_dnums.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[scatter_dnums.update_window_dims[pos]])
pos += 1
# For consistency with other max operations, if there are two or more values
# in updates that are contending to replace the same index location, the
# resulting tangent at that location will be the average of the associated
# tangents for the values in updates.
initial_vals = gather(
operand, scatter_indices, gather_dnums, np.array(slice_sizes))
target_vals = gather(
val_out, scatter_indices, gather_dnums, np.array(slice_sizes))
successful_updates = (updates == target_vals)
retained_values = (initial_vals == target_vals)
num_updates = gather(
scatter_add(_zeros(operand),
scatter_indices,
select(successful_updates, _ones(updates), _zeros(updates)),
scatter_dnums),
scatter_indices,
gather_dnums,
np.array(slice_sizes))
num_refs = gather(
scatter_add(_zeros(operand),
scatter_indices,
_ones(updates),
scatter_dnums),
scatter_indices,
gather_dnums,
np.array(slice_sizes))
updates_normalizer = select(retained_values,
1.0 / (num_updates + 1),
1.0 / num_updates)
updates_coef = select(successful_updates,
updates_normalizer,
_zeros(updates))
operand_normalizer = select(retained_values,
1.0 / (num_updates + 1),
_zeros(num_updates))
operand_coef = (-1.0 + operand_normalizer) / num_refs
# This can be simplified once scatter has transpose implemented
target_tangents = gather(
g_operand, scatter_indices, gather_dnums, np.array(slice_sizes))
tangent_updates = (target_tangents * operand_coef +
g_updates * updates_coef)
tangent_out = scatter_add(g_operand,
scatter_indices,
tangent_updates,
scatter_dnums,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
return val_out, tangent_out
scatter_min_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-min',
_scatter_translation_rule)
batching.primitive_batchers[scatter_min_p] = (
partial(_scatter_batching_rule, scatter_min))
ad.primitive_jvps[scatter_min_p] = partial(_scatter_extremal_jvp, scatter_min_p)
scatter_max_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-max',
_scatter_translation_rule)
batching.primitive_batchers[scatter_max_p] = (
partial(_scatter_batching_rule, scatter_max))
ad.primitive_jvps[scatter_max_p] = partial(_scatter_extremal_jvp, scatter_max_p)
def _scatter_jvp(primals, tangents, *, update_jaxpr, update_consts,
dimension_numbers, indices_are_sorted, unique_indices):
operand, scatter_indices, updates = primals
g_operand, g_scatter_indices, g_updates = tangents
dnums = dimension_numbers
if type(g_operand) is ad_util.Zero and type(g_updates) is ad_util.Zero:
val_out = scatter_p.bind(
operand, scatter_indices, updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dnums,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
return val_out, ad_util.Zero.from_value(val_out)
g_operand = ad.instantiate_zeros(g_operand)
g_updates = ad.instantiate_zeros(g_updates)
# If there are overlapping indices in the scatter, it is unspecified which
# update "wins". So we use the following perhaps surprising scheme:
# a) attach a positive ID to each update in updates, and perform the scatter
# on the IDs
# b) perform the inverse gather on the scattered IDs (similar to
# _scatter_add_transpose).
# c) use the gathered IDs to mask the primal and tangent values.
# d) perform a scatter-add on the masked primal and tangent values. A benefit
# of using scatter-add here is that we don't need a `scatter` transpose
# rule.
# a) attach a positive ID to each update in `updates`, and perform a scatter
# on the IDs.
ids_shape = np.array(updates.shape, dtype=np.int64)
ids_shape[dnums.update_window_dims,] = 1
num_ids = np.prod(ids_shape)
id_dtype = np.uint32 if (num_ids + 1) < np.iinfo(np.uint32).max else np.uint64
update_ids = add(reshape(iota(id_dtype, num_ids), ids_shape),
_ones(updates, dtype=id_dtype))
scattered_ids = scatter(full(operand.shape, 0, id_dtype),
scatter_indices, update_ids, dnums,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
# b) compute the inverse gather that "undoes" the scatter on the id values.
gather_dnums = GatherDimensionNumbers(
offset_dims=dnums.update_window_dims,
collapsed_slice_dims=dnums.inserted_window_dims,
start_index_map=dnums.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(scattered_ids.shape)):
if i in dnums.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates.shape[dnums.update_window_dims[pos]])
pos += 1
gathered_update_ids = gather(scattered_ids, scatter_indices,
dimension_numbers=gather_dnums,
slice_sizes=slice_sizes)
# c) mask off input elements that do not correspond to a primal output.
masked_operand = select(eq(scattered_ids, _zeros(scattered_ids)),
operand, _zeros(operand))
masked_updates = select(eq(update_ids, gathered_update_ids),
updates, _zeros(updates))
masked_g_operand = select(eq(scattered_ids, _zeros(scattered_ids)),
g_operand, _zeros(g_operand))
masked_g_updates = select(eq(update_ids, gathered_update_ids),
g_updates, _zeros(g_updates))
# d) perform scatter-adds to compute the primal and tangent outputs.
val_out = scatter_add(masked_operand, scatter_indices, masked_updates,
dimension_numbers=dnums,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
tangent_out = scatter_add(masked_g_operand, scatter_indices, masked_g_updates,
dimension_numbers=dnums,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
return val_out, tangent_out
scatter_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter',
_scatter_translation_rule)
ad.primitive_jvps[scatter_p] = _scatter_jvp
batching.primitive_batchers[scatter_p] = (
partial(_scatter_batching_rule, scatter))
def _reduce_shape_rule(*args, computation, jaxpr, consts, dimensions):
operand_args, init_value_args = split_list(args, [len(args) // 2])
if any(arg.shape != () for arg in init_value_args):
init_value_shapes = [a.shape for a in init_value_args]
raise ValueError(f'Found non-scalar init_value: {init_value_shapes}')
return [
tuple(np.delete(op_arg.shape, dimensions))
for op_arg in operand_args
]
def _reduce_dtype_rule(*args, computation, jaxpr, consts, dimensions):
_, init_value_args = split_list(args, [len(args) // 2])
return [
dtypes.canonicalize_dtype(in_arg.dtype)
for in_arg in init_value_args
]
def _reduce_translation_rule(c, *values, computation, jaxpr,
consts, dimensions):
operands, init_values = split_list(values, [len(values) // 2])
if len(operands) == 1:
init_value = init_values[0]
xla_computation = _reduction_computation(c, jaxpr, consts, init_value)
out = xops.Reduce(c, operands, init_values, xla_computation, dimensions)
return xops.Tuple(c, (out,))
xla_computation = _reduction_computation(c, jaxpr, consts, init_values, singleton=False)
return xops.Reduce(c, operands, init_values, xla_computation, dimensions)
def _reduce_batch_rule(batched_args, batch_dims, *, computation, jaxpr,
consts, dimensions):
num_operands = len(batched_args) // 2
operands, init_values = split_list(batched_args, [num_operands])
operand_bdims, init_value_bdims = split_list(batch_dims, [num_operands])
if all(init_value_bdim is None for init_value_bdim in init_value_bdims):
# Assume all batch dims are the same for each of the operands
assert all(operand_bdim is not None for operand_bdim in operand_bdims)
assert all(operand_bdim == operand_bdims[0] for operand_bdim in operand_bdims)
# TODO(sharadmv): handle the case when batch dims are different across
# operands or when some are unbatched
operand_bdim = operand_bdims[0]
new_dimensions = [d + bool(d >= operand_bdim) for d in dimensions]
new_operand_bdim = operand_bdim - int(np.sum(np.less(dimensions, operand_bdim)))
new_operand_bdims = [new_operand_bdim] * num_operands
return reduce_p.bind(*(operands + init_values),
computation=computation, dimensions=tuple(new_dimensions),
consts=consts,
jaxpr=jaxpr), new_operand_bdims
else:
raise NotImplementedError # loop and stack
def _reduction_computation(c, jaxpr, consts, init_values, singleton=True):
if singleton:
init_values = [init_values]
shapes = safe_map(c.get_shape, init_values + init_values)
axis_env = xla.AxisEnv(1, (), ()) # no parallel primitives inside reductions
subc = xla_bridge.make_computation_builder("reduction_computation")
assert len(consts) == 0, "Reduction computations cannot have constants"
args = [xb.parameter(subc, i, shape) for i, shape in enumerate(shapes)]
out_nodes = xla.jaxpr_subcomp(subc, jaxpr, None, axis_env, consts, '', *args)
if singleton:
return subc.build(out_nodes[0])
out_nodes = xops.Tuple(subc, out_nodes)
return subc.build(out_nodes)
def _masking_defreducer(prim, identity):
masking.masking_rules[prim] = partial(_reducer_masking_rule, prim, identity)
def _reducer_masking_rule(prim, identity, padded_vals, logical_shapes,
axes, input_shape=None, **reduce_kwargs):
(padded_val,), (logical_shape,) = padded_vals, logical_shapes
padded_shape = masking.padded_shape_as_value(padded_val.shape)
masks = [broadcasted_iota(np.int32, padded_shape, i) < d
for i, d in enumerate(logical_shape) if i in axes]
mask = _reduce(operator.and_, masks)
masked_val = select(mask, padded_val, identity(padded_shape, padded_val.dtype))
prim_bind = partial(prim.bind, **reduce_kwargs)
bind = prim_bind if input_shape is None else partial(prim_bind, input_shape=padded_shape)
return bind(masked_val, axes=axes)
reduce_p = standard_primitive(_reduce_shape_rule, _reduce_dtype_rule,
'reduce', translation_rule=_reduce_translation_rule,
multiple_results=True)
batching.primitive_batchers[reduce_p] = _reduce_batch_rule
def _reduce_number_dtype_rule(name, operand, *args, **kw):
if not dtypes.issubdtype(operand.dtype, np.number):
raise TypeError("{} does not accept dtype {}. Accepted dtypes are subtypes "
"of number.".format(name, np.dtype(operand.dtype).name))
return dtypes.canonicalize_dtype(operand.dtype)
def _reduce_sum_shape_rule(operand, *, axes):
return _reduce_op_shape_rule(operand, axes=axes)
def _reduce_sum_translation_rule(c, operand, *, axes):
shape = c.get_shape(operand)
dtype = shape.numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.Reduce(c, [operand], [xb.constant(c, np.array(0, dtype))],
xla.primitive_subcomputation(add_p, scalar, scalar),
axes)
def _reduce_sum_transpose_rule(cotangent, operand, *, axes):
assert ad.is_undefined_primal(operand)
input_shape = operand.aval.shape
broadcast_dimensions = tuple(np.delete(np.arange(len(input_shape)), axes))
result = broadcast_in_dim(cotangent, input_shape, broadcast_dimensions)
assert result.shape == input_shape
return [result]
reduce_sum_p = standard_primitive(
_reduce_sum_shape_rule, partial(_reduce_number_dtype_rule, 'reduce_sum'),
'reduce_sum', _reduce_sum_translation_rule)
ad.deflinear2(reduce_sum_p, _reduce_sum_transpose_rule)
batching.defreducer(reduce_sum_p)
_masking_defreducer(reduce_sum_p,
lambda shape, dtype: np.broadcast_to(np.array(0, dtype), shape))
def _reduce_op_shape_rule(operand, *, axes, input_shape=None):
del input_shape # Unused.
if len(axes) != len(set(axes)):
raise ValueError(f"duplicate value in 'axes' of reduction: {axes}")
return tuple(np.delete(operand.shape, axes))
def _reduce_prod_translation_rule(c, operand, *, axes):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.Reduce(c, [operand], [xb.constant(c, np.array(1, dtype))],
xla.primitive_subcomputation(mul_p, scalar, scalar), axes)
def _reduce_prod_jvp_rule(primals, tangents, *, axes):
operand, = primals
tangent, = tangents
input_shape = np.array(operand.shape)
n = np.prod(input_shape[list(axes)])
non_axes = np.delete(np.arange(len(input_shape)), axes)
# Move the reduced axes to the front, and flatten them to 1D.
permutation = axes + tuple(non_axes)
new_shape = (n,) + tuple(input_shape[non_axes])
operand = reshape(operand, new_shape, permutation)
tangent = reshape(tangent, new_shape, permutation)
def _reduce_prod_tree(x, axis=0):
"""Reduce by repeatedly splitting the array and multiplying."""
while x.shape[axis] > 1:
n = x.shape[axis]
n1 = (n + 1) // 2
n2 = n - n1
x1 = slice_in_dim(x, 0, n1)
x2 = slice_in_dim(x, n1, None)
if n2 != n1:
paddings = [(0, 0, 0)] * len(x.shape)
paddings[axis] = (0, 1, 0)
x2 = pad(x2, _const(x, 1), paddings)
x = x1 * x2
if x.shape[axis] == 0:
return full(input_shape[non_axes], _one(x))
return squeeze(x, (axis,))
return api.jvp(_reduce_prod_tree, (operand,), (tangent,))
reduce_prod_p = standard_primitive(
_reduce_op_shape_rule, partial(_reduce_number_dtype_rule, 'reduce_prod'),
'reduce_prod', _reduce_prod_translation_rule)
ad.primitive_jvps[reduce_prod_p] = _reduce_prod_jvp_rule
batching.defreducer(reduce_prod_p)
_masking_defreducer(reduce_prod_p,
lambda shape, dtype: np.broadcast_to(np.array(1, dtype), shape))
def _reduce_chooser_shape_rule(operand, *, axes):
return tuple(np.delete(operand.shape, axes))
def _reduce_chooser_translation_rule(prim, identity, c, operand, *, axes):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.Reduce(c, [operand], [xb.constant(c, identity(dtype))],
xla.primitive_subcomputation(prim, scalar, scalar), axes)
def _reduce_chooser_jvp_rule(g, ans, operand, *, axes):
# TODO(mattjj): an alternative is to use variadic reduce to compute the chosen
# locations in a single pass (rather than comparing equality) and use a
# gather, and/or even push along the chosen elements of g (b/112040122)
shape = [1 if i in axes else d for i, d in enumerate(operand.shape)]
location_indicators = convert_element_type(
_eq_meet(operand, reshape(ans, shape)), g.dtype)
counts = _reduce_sum(location_indicators, axes)
return div(_reduce_sum(mul(g, location_indicators), axes), counts)
_reduce_max_translation_rule = partial(_reduce_chooser_translation_rule, max_p,
_get_max_identity)
reduce_max_p = standard_primitive(_reduce_op_shape_rule, _input_dtype,
'reduce_max', _reduce_max_translation_rule)
ad.defjvp2(reduce_max_p, _reduce_chooser_jvp_rule)
batching.defreducer(reduce_max_p)
_masking_defreducer(reduce_max_p,
lambda shape, dtype: np.broadcast_to(np.array(-np.inf, dtype), shape))
_reduce_min_translation_rule = partial(
_reduce_chooser_translation_rule, min_p, _get_min_identity)
reduce_min_p = standard_primitive(_reduce_op_shape_rule, _input_dtype,
'reduce_min', _reduce_min_translation_rule)
ad.defjvp2(reduce_min_p, _reduce_chooser_jvp_rule)
batching.defreducer(reduce_min_p)
_masking_defreducer(reduce_min_p,
lambda shape, dtype: np.broadcast_to(np.array(np.inf, dtype), shape))
def _argminmax_shape_rule(operand, *, axes, index_dtype):
axis, = axes
return tuple(np.delete(operand.shape, axis))
def _argminmax_dtype_rule(operand, *, axes, index_dtype):
if not dtypes.issubdtype(index_dtype, np.integer):
raise TypeError("index_dtype must be an integer type, but got {}"
.format(np.dtype(index_dtype).name))
return index_dtype
def _argminmax_translation_rule(value_comparator, identity,
c, operand, *, axes, index_dtype):
axis, = axes
shape = c.get_shape(operand)
dtype = shape.numpy_dtype()
subc = xb.make_computation_builder("argminmax_comparator")
value_shape = xc.Shape.array_shape(shape.xla_element_type(), ())
index_shape = xc.Shape.array_shape(index_dtype, ())
x_value = xb.parameter(subc, 0, value_shape)
x_index = xb.parameter(subc, 1, index_shape)
y_value = xb.parameter(subc, 2, value_shape)
y_index = xb.parameter(subc, 3, index_shape)
which_value = value_comparator(x_value, y_value)
which_index = xops.Or(which_value, xops.And(xops.Eq(x_value, y_value),
xops.Lt(x_index, y_index)))
xops.Tuple(subc, [xops.Select(which_value, x_value, y_value),
xops.Select(which_index, x_index, y_index)])
comparator = subc.build()
iota_shape = xc.Shape.array_shape(index_dtype, shape.dimensions())
iota = xc.ops.Iota(c, iota_shape, axis)
out = xops.Reduce(
c, [operand, iota],
[xb.constant(c, identity(dtype)),
xb.constant(c, np.array(0, index_dtype))], comparator, [axis])
return xops.GetTupleElement(out, 1)
def _argminmax_gpu_translation_rule(op, a, *, axes, index_dtype):
axis, = axes
idxs = tie_in(a, broadcasted_iota(index_dtype, a.shape, axis))
maxval = np.array(dtypes.iinfo(index_dtype).max, dtype=index_dtype)
maxval = broadcast(tie_in(a, maxval), a.shape)
mask_idxs = select(eq(a, expand_dims(op(a, (axis,)), (axis,))), idxs,
maxval)
return _reduce_min(mask_idxs, (axis,))
_argmin_translation_rule = partial(_argminmax_translation_rule, xops.Lt,
_get_min_identity)
_argmax_translation_rule = partial(_argminmax_translation_rule, xops.Gt,
_get_max_identity)
argmin_p = standard_primitive(_argminmax_shape_rule, _argminmax_dtype_rule,
'argmin', _argmin_translation_rule)
batching.defreducer(argmin_p)
ad.defjvp_zero(argmin_p)
xla.backend_specific_translations['gpu'][argmin_p] = xla.lower_fun(
partial(_argminmax_gpu_translation_rule, _reduce_min),
multiple_results=False)
argmax_p = standard_primitive(_argminmax_shape_rule, _argminmax_dtype_rule,
'argmax', _argmax_translation_rule)
batching.defreducer(argmax_p)
ad.defjvp_zero(argmax_p)
xla.backend_specific_translations['gpu'][argmax_p] = xla.lower_fun(
partial(_argminmax_gpu_translation_rule, _reduce_max),
multiple_results=False)
def _reduce_logical_shape_rule(operand, *, axes):
if operand.dtype != np.bool_:
msg = "logical reduction requires operand dtype bool, got {}."
raise TypeError(msg.format(operand.dtype))
return tuple(np.delete(operand.shape, axes))
def _reduce_logical_translation_rule(prim, identity, c, operand, *, axes):
scalar = ShapedArray((), np.bool_)
return xops.Reduce(c, [operand], [xb.constant(c, identity(np.bool_))],
xla.primitive_subcomputation(prim, scalar, scalar), axes)
_reduce_or_translation_rule = partial(_reduce_logical_translation_rule,
or_p, _get_max_identity)
reduce_or_p = standard_primitive(_reduce_logical_shape_rule, _fixed_dtype(np.bool_),
'reduce_or', _reduce_or_translation_rule)
batching.defreducer(reduce_or_p)
_reduce_and_translation_rule = partial(_reduce_logical_translation_rule,
and_p, _get_min_identity)
reduce_and_p = standard_primitive(_reduce_logical_shape_rule, _fixed_dtype(np.bool_),
'reduce_and', _reduce_and_translation_rule)
batching.defreducer(reduce_and_p)
def _reduce_window_shape_rule(operand, init_value, *, jaxpr, consts,
window_dimensions, window_strides, padding,
base_dilation, window_dilation):
if operand.dtype != init_value.dtype:
msg = ("reduce_window got inconsistent dtypes for operand and init_value: "
" got operand dtype {} and init_value dtype {}.")
raise TypeError(msg.format(operand.dtype, init_value.dtype))
if init_value.shape != ():
msg = ("reduce_window expected init_value to be a scalar but init_value "
"has shape {}.")
raise TypeError(msg.format(init_value.shape))
return _common_reduce_window_shape_rule(
operand, window_dimensions, window_strides, padding, base_dilation,
window_dilation)
def _reduce_window_translation_rule(c, operand, init_value, *, jaxpr, consts,
window_dimensions, window_strides, padding,
base_dilation, window_dilation):
xla_computation = _reduction_computation(c, jaxpr, consts, init_value)
return xops.ReduceWindowWithGeneralPadding(
operand, init_value, xla_computation, window_dimensions,
window_strides, base_dilation, window_dilation, padding)
def _generic_reduce_window_batch_rule(
batched_args, batch_dims, *, jaxpr, consts, window_dimensions,
window_strides, padding, base_dilation, window_dilation):
operand, init = batched_args
bdim, init_bdim = batch_dims
if init_bdim is not None:
raise NotImplementedError("reduce_window batching is not implemented for "
"initial values")
def reduce_window(x, window_dimensions, window_strides, padding, base_dilation,
window_dilation):
return reduce_window_p.bind(
x, init, jaxpr=jaxpr, consts=consts, window_dimensions=window_dimensions,
window_strides=window_strides, padding=padding, base_dilation=base_dilation,
window_dilation=window_dilation)
return _reduce_window_batch_rule(
reduce_window, (operand,), (bdim,), window_dimensions=window_dimensions,
window_strides=window_strides, padding=padding, base_dilation=base_dilation,
window_dilation=window_dilation)
reduce_window_p = standard_primitive(
_reduce_window_shape_rule, _input_dtype, 'reduce_window',
_reduce_window_translation_rule)
batching.primitive_batchers[reduce_window_p] = _generic_reduce_window_batch_rule
def _reduce_window_sum_shape_rule(operand, *, window_dimensions, window_strides,
padding, base_dilation, window_dilation):
if not dtypes.issubdtype(operand.dtype, np.number):
msg = "operand to reduce_window_sum must have a number dtype, got {}"
raise TypeError(msg.format(np.dtype(operand.dtype).name))
return _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding, base_dilation,
window_dilation)
def _reduce_window_sum_translation_rule(c, operand, *, window_dimensions,
window_strides, padding, base_dilation,
window_dilation):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.ReduceWindowWithGeneralPadding(
operand, xb.constant(c, np.array(0, dtype)),
xla.primitive_subcomputation(add_p, scalar, scalar), window_dimensions,
window_strides, base_dilation, window_dilation, padding)
def _reduce_window_sum_transpose_rule(cotangent, operand, *, window_dimensions,
window_strides, padding, base_dilation,
window_dilation):
assert ad.is_undefined_primal(operand)
input_shape = operand.aval.shape
pads = _conv_general_vjp_lhs_padding(
input_shape, window_dimensions, window_strides, cotangent.shape, padding,
base_dilation, window_dilation)
ones = [1] * len(input_shape)
padding_config = [(lo, hi, stride - 1)
for (lo, hi), stride in zip(pads, window_strides)]
pad_cotangent = pad(cotangent, _zero(cotangent), padding_config)
result = _reduce_window_sum(pad_cotangent, window_dimensions, base_dilation,
[(0, 0)] * len(input_shape),
base_dilation=ones,
window_dilation=window_dilation)
assert result.shape == input_shape, (result.shape, input_shape)
return [result]
def _reduce_window_batch_rule(reduce_window, batched_args, bdims, *,
window_dimensions, window_strides, padding,
base_dilation, window_dilation):
operand, = batched_args
bdim, = bdims
if bdim is not None:
window_dimensions = \
window_dimensions[:bdim] + (1,) + window_dimensions[bdim:]
window_strides = window_strides[:bdim] + (1,) + window_strides[bdim:]
padding = padding[:bdim] + ((0, 0),) + padding[bdim:]
base_dilation = base_dilation[:bdim] + (1,) + base_dilation[bdim:]
window_dilation = window_dilation[:bdim] + (1,) + window_dilation[bdim:]
operand = reduce_window(operand, window_dimensions, window_strides, padding,
base_dilation, window_dilation)
return operand, bdim
reduce_window_sum_p = standard_primitive(
_reduce_window_sum_shape_rule, _input_dtype, 'reduce_window_sum',
_reduce_window_sum_translation_rule)
ad.deflinear2(reduce_window_sum_p, _reduce_window_sum_transpose_rule)
batching.primitive_batchers[reduce_window_sum_p] = partial(
_reduce_window_batch_rule, _reduce_window_sum)
def _reduce_window_chooser_translation_rule(
prim, identity, c, operand, *, window_dimensions, window_strides, padding,
base_dilation, window_dilation):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.ReduceWindowWithGeneralPadding(
operand, xb.constant(c, identity(dtype)),
xla.primitive_subcomputation(prim, scalar, scalar), window_dimensions,
window_strides, base_dilation, window_dilation, padding)
def _reduce_window_chooser_jvp_rule(prim, g, operand, *, window_dimensions,
window_strides, padding, base_dilation,
window_dilation):
assert prim is max_p or prim is min_p
select_prim = ge_p if prim is max_p else le_p
return _select_and_gather_add(g, operand, select_prim, window_dimensions,
window_strides, padding, base_dilation,
window_dilation)
def _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding, base_dilation,
window_dilation):
_check_shapelike("reduce_window", "window_dimensions", window_dimensions,
non_zero_shape=True)
_check_shapelike("reduce_window", "window_strides", window_strides,
non_zero_shape=True)
_check_shapelike("reduce_window", "base_dilation", base_dilation)
_check_shapelike("reduce_window", "window_dilation", window_dilation)
if operand.ndim != len(window_dimensions):
msg = ("reduce_window got the wrong number of window_dimensions for "
"operand: got operand shape {} with window_dimensions {}.")
raise TypeError(msg.format(operand.shape, window_dimensions))
if len(window_strides) != len(window_dimensions):
msg = ("reduce_window got inconsistent window_strides and "
"window_dimensions: got window_strides {} and window_dimensions {}.")
raise TypeError(msg.format(window_strides, window_dimensions))
if len(base_dilation) != len(window_dimensions):
msg = ("reduce_window got inconsistent base_dilation and "
"window_dimensions: got base_dilation {} and window_dimensions {}.")
raise TypeError(msg.format(base_dilation, window_dimensions))
if len(window_dilation) != len(window_dimensions):
msg = ("reduce_window got inconsistent window_dilation and "
"window_dimensions: got window_dilation {} and window_dimensions "
"{}.")
raise TypeError(msg.format(window_dilation, window_dimensions))
return reduce_window_shape_tuple(operand.shape, window_dimensions,
window_strides, padding, base_dilation,
window_dilation)
def reduce_window_shape_tuple(operand_shape, window_dimensions, window_strides,
padding, base_dilation=None,
window_dilation=None):
if base_dilation is not None:
operand_shape = _dilate_shape(operand_shape, base_dilation)
if window_dilation is not None:
window_dimensions = _dilate_shape(window_dimensions, window_dilation)
operand_padded = np.add(operand_shape, np.add(*zip(*padding)))
t = np.floor_divide(
np.subtract(operand_padded, window_dimensions), window_strides) + 1
return tuple(t)
_reduce_window_max_translation_rule = partial(
_reduce_window_chooser_translation_rule, max_p, _get_max_identity)
reduce_window_max_p = standard_primitive(
_common_reduce_window_shape_rule, _input_dtype, 'reduce_window_max',
_reduce_window_max_translation_rule)
ad.defjvp(reduce_window_max_p, partial(_reduce_window_chooser_jvp_rule, max_p))
batching.primitive_batchers[reduce_window_max_p] = partial(
_reduce_window_batch_rule, _reduce_window_max)
_reduce_window_min_translation_rule = partial(
_reduce_window_chooser_translation_rule, min_p, _get_min_identity)
reduce_window_min_p = standard_primitive(
_common_reduce_window_shape_rule, _input_dtype, 'reduce_window_min',
_reduce_window_min_translation_rule)
ad.defjvp(reduce_window_min_p, partial(_reduce_window_chooser_jvp_rule, min_p))
_reduce_window_min_batch_rule = partial(_reduce_window_batch_rule,
_reduce_window_min)
batching.primitive_batchers[reduce_window_min_p] = partial(
_reduce_window_batch_rule, _reduce_window_min)
def _select_and_scatter_shape_rule(
operand, source, init_value, *, select_jaxpr, select_consts, scatter_jaxpr,
scatter_consts, window_dimensions, window_strides, padding):
_check_shapelike("select_and_scatter", "window_dimensions", window_dimensions)
_check_shapelike("select_and_scatter", "window_strides", window_strides)
if len(window_dimensions) != len(window_strides):
msg = ("select_and_scatter got inconsistent window_strides and "
"window_dimensions: got window_strides {} and window_dimensions {}.")
raise TypeError(msg.format(window_strides, window_dimensions))
return operand.shape
def _select_and_scatter_translation(
c, operand, source, init_value, *, select_jaxpr, select_consts, scatter_jaxpr,
scatter_consts, window_dimensions, window_strides, padding):
select = _reduction_computation(c, select_jaxpr, select_consts, init_value)
scatter = _reduction_computation(c, scatter_jaxpr, scatter_consts, init_value)
return xops.SelectAndScatterWithGeneralPadding(
operand, select, window_dimensions, window_strides, padding, source,
init_value, scatter)
select_and_scatter_p = standard_primitive(
_select_and_scatter_shape_rule, _input_dtype, 'select_and_scatter',
_select_and_scatter_translation)
def _select_and_scatter_add_shape_rule(
source, operand, *, select_prim, window_dimensions, window_strides,
padding):
return operand.shape
def _select_and_scatter_add_translation(
c, source, operand, *, select_prim, window_dimensions, window_strides,
padding):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
select = xla.primitive_subcomputation(select_prim, scalar, scalar)
scatter = xla.primitive_subcomputation(add_p, scalar, scalar)
zero = xb.constant(c, np.array(0, dtype))
return xops.SelectAndScatterWithGeneralPadding(
operand, select, window_dimensions, window_strides, padding, source, zero,
scatter)
def _select_and_scatter_add_jvp(
primals, tangents, *, select_prim, window_dimensions, window_strides,
padding):
source, operand = primals
g_source, g_operand = tangents
val_out = _select_and_scatter_add(
source, operand, select_prim, window_dimensions, window_strides,
padding)
del g_operand
if type(g_source) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(val_out)
else:
tangent_out = _select_and_scatter_add(
g_source, operand, select_prim, window_dimensions,
window_strides, padding)
return val_out, tangent_out
def _select_and_scatter_add_transpose(
t, source, operand, *, select_prim, window_dimensions, window_strides,
padding):
assert ad.is_undefined_primal(source) and not ad.is_undefined_primal(operand)
ones = (1,) * len(window_dimensions)
source_t = _select_and_gather_add(t, operand, select_prim, window_dimensions,
window_strides, padding, ones, ones)
return [source_t, None]
def _select_and_scatter_add_batch_rule(
batched_args, batch_dims, *, select_prim, window_dimensions, window_strides,
padding):
source, operand = batched_args
s_bdim, o_bdim = batch_dims
size = next(a.shape[bdim] for a, bdim in zip(batched_args, batch_dims)
if bdim is not None)
source = batching.bdim_at_front(source, s_bdim, size)
operand = batching.bdim_at_front(operand, o_bdim, size)
window_dimensions = (1,) + window_dimensions
window_strides = (1,) + window_strides
padding = ((0, 0),) + padding
out = _select_and_scatter_add(source, operand, select_prim, window_dimensions,
window_strides, padding)
return out, 0
select_and_scatter_add_p = standard_primitive(
_select_and_scatter_add_shape_rule, _input_dtype, 'select_and_scatter_add',
_select_and_scatter_add_translation)
ad.primitive_transposes[select_and_scatter_add_p] = \
_select_and_scatter_add_transpose
ad.primitive_jvps[select_and_scatter_add_p] = _select_and_scatter_add_jvp
batching.primitive_batchers[select_and_scatter_add_p] = \
_select_and_scatter_add_batch_rule
def _select_and_gather_add_shape_rule(
tangents, operand, *, select_prim, window_dimensions, window_strides,
padding, base_dilation, window_dilation):
if tangents.shape != operand.shape:
msg = ("select_and_gather_add tangents and operand shapes must match, "
"got {} and {}.")
raise TypeError(msg.format(tangents.shape, operand.shape))
return _common_reduce_window_shape_rule(
operand, window_dimensions, window_strides, padding, base_dilation,
window_dilation)
_UINT_DTYPES = {
16: np.uint16,
32: np.uint32,
64: np.uint64,
}
_INT_DTYPES = {
16: np.int16,
32: np.int32,
64: np.int64,
}
def _select_and_gather_add_translation(
c, tangents, operand, *, select_prim, window_dimensions, window_strides,
padding, base_dilation, window_dilation, max_bits=64):
shape = c.get_shape(operand)
dtype = shape.numpy_dtype()
etype = shape.xla_element_type()
nbits = dtypes.finfo(dtype).bits
assert nbits <= max_bits
double_word_reduction = nbits * 2 <= max_bits
const = lambda c, dtype, x: xb.constant(c, np.array(x, dtype=dtype),
canonicalize_types=False)
if double_word_reduction:
# TODO(b/73062247): XLA doesn't yet implement ReduceWindow on tuples, so
# we implement a pair-wise ReduceWindow by packing two k-bit values into
# 2k-bit unsigned integer using bit tricks.
word_dtype = _UINT_DTYPES[nbits]
double_word_dtype = _UINT_DTYPES[nbits * 2]
word_type = xla_client.dtype_to_etype(word_dtype)
double_word_type = xla_client.dtype_to_etype(double_word_dtype)
# Packs two values into a tuple.
def pack(a, b):
a = xops.BitcastConvertType(a, word_type)
b = xops.BitcastConvertType(b, word_type)
a = xops.ConvertElementType(a, double_word_type)
b = xops.ConvertElementType(b, double_word_type)
a = xops.ShiftLeft(a, const(c, double_word_dtype, nbits))
return xops.Or(a, b)
# Unpacks the first element of a tuple.
def fst(c, t):
st = xops.ShiftRightLogical(t, const(c, double_word_dtype, nbits))
return xops.BitcastConvertType(xops.ConvertElementType(st, word_type), etype)
# Unpacks the second element of a tuple.
def snd(t):
return xops.BitcastConvertType(xops.ConvertElementType(t, word_type), etype)
else:
# The double-word trick above only works if we have a sufficiently large
# type. As an alternative, we can pack two half words into a single word,
# at the cost of precision.
# TODO(b/73062247): add support for tuple reductions and remove this case.
warnings.warn("Using reduced precision for gradient of reduce-window "
"min/max operator to work around missing XLA support for "
"pair-reductions. This is likely from a second or "
"higher derivative of a max-pooling operation.")
r_nbits = nbits // 2
# Drop/round the bottom mantissa bits.
nexp = dtypes.finfo(dtype).nexp
nmant = r_nbits - nexp - 1
double_word_dtype = word_dtype = _UINT_DTYPES[nbits]
word_type = xla_client.dtype_to_etype(word_dtype)
# Packs two values into a tuple.
def pack(a, b):
a = xops.ReducePrecision(a, exponent_bits=nexp, mantissa_bits=nmant)
b = xops.ReducePrecision(b, exponent_bits=nexp, mantissa_bits=nmant)
a = xops.BitcastConvertType(a, word_type)
b = xops.BitcastConvertType(b, word_type)
b = xops.ShiftRightLogical(b, const(c, word_dtype, r_nbits))
return xops.Or(a, b)
# Unpacks the first element of a tuple.
def fst(c, t):
st = xops.And(t, const(c, word_dtype, ((1 << r_nbits) - 1) << r_nbits))
return xops.BitcastConvertType(st, etype)
# Unpacks the second element of a tuple.
def snd(t):
return xops.BitcastConvertType(xops.ShiftLeft(t, const(c, word_dtype, r_nbits)),
etype)
def reducer():
c = xla_bridge.make_computation_builder("select_and_gather_pair_reducer")
x = xb.parameter(c, 0,
xla_client.Shape.array_shape(np.dtype(double_word_dtype), ()))
y = xb.parameter(c, 1,
xla_client.Shape.array_shape(np.dtype(double_word_dtype), ()))
assert select_prim is ge_p or select_prim is le_p
which = xops.Ge if select_prim is ge_p else xops.Le
xops.Select(which(fst(c, x), fst(c, y)), x, y)
return c.build()
assert select_prim is ge_p or select_prim is le_p, select_prim
init = -np.inf if select_prim is ge_p else np.inf
out = xops.ReduceWindowWithGeneralPadding(
pack(operand, tangents), pack(const(c, dtype, init), const(c, dtype, 0)),
reducer(), window_dimensions, window_strides, base_dilation,
window_dilation, padding)
return snd(out)
def _select_and_gather_add_jvp(
primals, tangents, *, select_prim, window_dimensions, window_strides,
padding, base_dilation, window_dilation):
source, operand = primals
g_source, g_operand = tangents
val_out = _select_and_gather_add(
source, operand, select_prim, window_dimensions, window_strides,
padding, base_dilation, window_dilation)
del g_operand
if type(g_source) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(val_out)
else:
tangent_out = _select_and_gather_add(
g_source, operand, select_prim, window_dimensions,
window_strides, padding, base_dilation, window_dilation)
return val_out, tangent_out
def _select_and_gather_add_transpose(
t, tangents, operand, *, select_prim, window_dimensions, window_strides,
padding, base_dilation, window_dilation):
assert select_prim in (le_p, ge_p)
assert ad.is_undefined_primal(tangents) and not ad.is_undefined_primal(operand)
if any(d != 1 for d in window_dilation):
msg = ("VJP not implemented for select_and_gather (MaxPool) with window "
"dilation, got window_dilation={}.")
raise NotImplementedError(msg.format(window_dilation))
if type(t) is ad_util.Zero:
return [ad_util.Zero, None]
has_base_dilation = any(d != 1 for d in base_dilation)
if has_base_dilation:
select_identity = (_get_max_identity if select_prim is ge_p
else _get_min_identity)
operand = pad(operand, select_identity(operand.dtype),
tuple((0, 0, d - 1) for d in base_dilation))
result = _select_and_scatter_add(t, operand, select_prim, window_dimensions,
window_strides, padding)
if has_base_dilation:
result = slice(operand, (0,) * len(operand.shape), operand.shape,
base_dilation)
return [result, None]
def _select_and_gather_add_batching_rule(
batched_args, batch_dims, *, select_prim, window_dimensions, window_strides,
padding, base_dilation, window_dilation):
t, x = batched_args
t_bdim, x_bdim = batch_dims
size = next(a.shape[bdim] for a, bdim in zip(batched_args, batch_dims)
if bdim is not None)
t = batching.bdim_at_front(t, t_bdim, size)
x = batching.bdim_at_front(x, x_bdim, size)
window_dimensions = (1,) + window_dimensions
window_strides = (1,) + window_strides
padding = ((0, 0),) + padding
base_dilation = (1,) + base_dilation
window_dilation = (1,) + window_dilation
out = _select_and_gather_add(t, x, select_prim, window_dimensions,
window_strides, padding, base_dilation,
window_dilation)
return (out, 0)
select_and_gather_add_p = standard_primitive(
_select_and_gather_add_shape_rule, _input_dtype, 'select_and_gather_add',
_select_and_gather_add_translation)
ad.primitive_jvps[select_and_gather_add_p] = _select_and_gather_add_jvp
ad.primitive_transposes[select_and_gather_add_p] = \
_select_and_gather_add_transpose
batching.primitive_batchers[select_and_gather_add_p] = \
_select_and_gather_add_batching_rule
xla.backend_specific_translations['tpu'][select_and_gather_add_p] = partial(
_select_and_gather_add_translation,
max_bits=32)
def _sort_abstract_eval(*args, **kwargs):
args = tuple(raise_to_shaped(arg) for arg in args)
if any(arg.shape != args[0].shape for arg in args[1:]):
shapes = " ".join(str(a.shape) for a in args)
raise TypeError(f"Arguments to sort must have equal shapes, got: {shapes}")
return args
def _float_to_int_for_sort(x):
# Switch from a floating point value to a integer value in such a way that
# when using the integer value to compare, we get the same result for normal
# values, and -nan is treated as the smallest value, and nan is treated as
# the largest value.
# If f is a float, and
# x = bit_cast<int32>(f);
# y = x < 0 ? int32_max - x : x;
# then y is ordered as an int32 such that finite values have the obvious
# order, -0 is ordered before 0, and -NaN and NaN appear at the beginning
# and end of the ordering.
# Note that in order to avoid -x to overflow, we calculate
# int32_max - x as unsigned, and then convert back to signed.
if x.dtype == dtypes.bfloat16:
x = convert_element_type(x, np.float32)
nbits = np.finfo(x).bits
signed_dtype = _INT_DTYPES[nbits]
unsigned_dtype = _UINT_DTYPES[nbits]
signed = bitcast_convert_type(x, signed_dtype)
unsigned = bitcast_convert_type(x, unsigned_dtype)
flipped = bitcast_convert_type(
sub(unsigned_dtype(np.iinfo(signed_dtype).max), unsigned), signed_dtype)
return select(lt(signed, _zero(signed)), flipped, signed)
# Default comparator that sorts the operands lexicographically on the
# first `num_keys` arguments.
# For floating point types, a total order is created where
# -NaN < -infinity < ... < -0 < 0 < ... < infinity < NaN.
# For complex types, the (real, imag) pairs are sorted lexicographically
# (following NumPy's semantics).
# This code adds complex-number support and lexicographic ordering to the algorithm from:
# https://github.com/tensorflow/tensorflow/blob/ba43780830f09da72081fe5061c436f1c6203a92/tensorflow/compiler/xla/client/lib/comparators.h#L33
def _sort_lt_comparator(*operands, num_keys=1):
assert len(operands) >= 2 and len(operands) % 2 == 0, operands
assert len(operands) // 2 >= num_keys, (operands, num_keys)
x_keys, y_keys = [], []
for x, y in zip(operands[:2*num_keys:2], operands[1:2*num_keys:2]):
assert x.dtype == y.dtype, (x.dtype, y.dtype)
if np.issubdtype(x.dtype, np.complexfloating):
x_keys.extend([_float_to_int_for_sort(real(x)), _float_to_int_for_sort(imag(x))])
y_keys.extend([_float_to_int_for_sort(real(y)), _float_to_int_for_sort(imag(y))])
elif np.issubdtype(x.dtype, np.floating):
x_keys.append(_float_to_int_for_sort(x))
y_keys.append(_float_to_int_for_sort(y))
else:
x_keys.append(x)
y_keys.append(y)
p = None
for xk, yk in zip(x_keys[::-1], y_keys[::-1]):
p = (bitwise_or(lt(xk, yk), bitwise_and(eq(xk, yk), p)) if p is not None
else lt(xk, yk))
return p
def _sort_translation_rule(c, *operands, dimension, is_stable, num_keys):
types = [c.get_shape(x).xla_element_type() for x in operands]
subc = xla_bridge.make_computation_builder("sort_lt_comparator")
params = [xb.parameter(subc, 2 * i + j, xc.Shape.array_shape(typ, ()))
for i, typ in enumerate(types) for j in range(2)]
result = xla.lower_fun(partial(_sort_lt_comparator, num_keys=num_keys),
multiple_results=False)(subc, *params)
comparator = subc.build(result)
out = xops.Sort(c, operands, dimension=dimension, is_stable=is_stable,
comparator=comparator)
return out if len(operands) != 1 else xops.Tuple(c, [out])
def _sort_jvp(primals, tangents, *, dimension, is_stable, num_keys):
shape = primals[0].shape
iotas = []
for dim, size in enumerate(shape):
dtype = np.int32 if size < np.iinfo(np.int32).max else np.int64
iotas.append(broadcasted_iota(dtype, shape, dim))
primals = sort_p.bind(*(primals + (iotas[dimension],)), dimension=dimension,
is_stable=is_stable, num_keys=num_keys)
idx = tuple(primals[-1] if i == dimension else iotas[i]
for i in range(len(shape)))
tangents_out = tuple(t if type(t) is ad_util.Zero else t[idx] for t in tangents)
return tuple(primals[:-1]), tangents_out
def _sort_batch_rule(batched_args, batch_dims, *, dimension, is_stable, num_keys):
prototype_arg, new_bdim = next(
(a, b) for a, b in zip(batched_args, batch_dims) if b is not None)
new_args = []
for arg, bdim in zip(batched_args, batch_dims):
if bdim is None:
dims = np.delete(np.arange(prototype_arg.ndim), new_bdim)
new_args.append(broadcast_in_dim(arg, prototype_arg.shape, dims))
else:
new_args.append(batching.moveaxis(arg, bdim, new_bdim))
new_dimension = dimension + (new_bdim <= dimension)
bdims = (new_bdim,) * len(new_args)
return (sort_p.bind(*new_args, dimension=new_dimension, is_stable=is_stable, num_keys=num_keys),
bdims)
sort_p = Primitive('sort')
sort_p.multiple_results = True
sort_p.def_impl(partial(xla.apply_primitive, sort_p))
sort_p.def_abstract_eval(_sort_abstract_eval)
xla.translations[sort_p] = _sort_translation_rule
ad.primitive_jvps[sort_p] = _sort_jvp
batching.primitive_batchers[sort_p] = _sort_batch_rule
def _top_k_abstract_eval(operand, *, k):
if k < 0:
raise ValueError("k argument to top_k must be nonnegative, got {}".format(k))
if len(operand.shape) == 0:
raise TypeError("top_k operand must have >= 1 dimension, got {}"
.format(operand.shape))
shape = list(operand.shape)
if shape[-1] < k:
msg = "k argument to top_k must be no larger than minor dimension; {} vs {}"
raise ValueError(msg.format(k, shape))
shape[-1] = k
return (ShapedArray(shape, operand.dtype),
ShapedArray(shape, np.dtype(np.int32)))
def _top_k_jvp(primals, tangents, *, k):
operand, = primals
tangent, = tangents
primals_out = top_k(operand, k)
if type(tangent) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(primals_out[0])
else:
_, k_idxs = primals_out
idx_shape = k_idxs.shape
rank = len(idx_shape)
gather_index_shape = idx_shape + (1,)
gather_indices = []
for i in range(rank-1):
_iota = iota(k_idxs.dtype, idx_shape[i])
if not config.omnistaging_enabled:
_iota = tie_in(operand, _iota)
_iota = broadcast_in_dim(_iota, gather_index_shape, (i,))
gather_indices.append(_iota)
gather_indices.append(reshape(k_idxs, gather_index_shape))
gather_indices = concatenate(gather_indices, dimension=rank)
slice_sizes = (1,) * rank
dnums = GatherDimensionNumbers(
offset_dims=(),
collapsed_slice_dims=tuple(range(rank)),
start_index_map=tuple(range(rank)))
tangent_out = gather(tangent, gather_indices, dnums, slice_sizes)
return primals_out, (tangent_out, ad_util.Zero.from_value(primals_out[1]))
def _top_k_batch_rule(batched_args, batch_dims, *, k):
operand, = batched_args
bdim, = batch_dims
if bdim == operand.ndim-1:
perm = np.arange(operand.ndim)
perm[bdim-1], perm[bdim] = perm[bdim], perm[bdim-1]
top_k_v, top_k_i = top_k(transpose(operand, perm), k=k)
return (transpose(top_k_v, perm),
transpose(top_k_i, perm)), (bdim, bdim)
else:
return top_k(operand, k=k), (bdim, bdim)
top_k_p = Primitive('top_k')
top_k_p.multiple_results = True
top_k_p.def_impl(partial(xla.apply_primitive, top_k_p))
top_k_p.def_abstract_eval(_top_k_abstract_eval)
xla.translations[top_k_p] = partial(standard_translate, 'top_k')
ad.primitive_jvps[top_k_p] = _top_k_jvp
batching.primitive_batchers[top_k_p] = _top_k_batch_rule
def _stop_gradient_jvp_rule(primals, tangents):
# if we don't call stop_gradient here, we'd only peel off one autodiff tracer
x, = primals
return stop_gradient(x), ad_util.Zero.from_value(x)
def _stop_gradient_batch_rule(batched_args, batch_dims):
x, = batched_args
dim, = batch_dims
return stop_gradient(x), dim
ad.primitive_jvps[ad_util.stop_gradient_p] = _stop_gradient_jvp_rule
batching.primitive_batchers[ad_util.stop_gradient_p] = _stop_gradient_batch_rule
def create_token(x):
"""Creates an XLA token value with no preconditions for sequencing effects.
Experimental.
Args:
x: a dummy argument used to tie the CreateToken operator into a trace. The
value of `x` is ignored.
"""
# x is a dummy argument used to tie the operator into a trace.
return create_token_p.bind(stop_gradient(x))
create_token_p = Primitive("create_token")
create_token_p.def_impl(partial(xla.apply_primitive, create_token_p))
create_token_p.def_abstract_eval(lambda _: abstract_token)
xla.translations[create_token_p] = lambda c, *_: xops.CreateToken(c)
def after_all(*operands):
"""Merges one or more XLA token values. Experimental.
Wraps the XLA AfterAll operator."""
return after_all_p.bind(*operands)
def _after_all_abstract_eval(*operands):
if any(x is not abstract_token for x in operands):
raise TypeError("Arguments to after_all must be tokens")
return abstract_token
def _after_all_translation_rule(c, *operands):
return xops.AfterAll(c, operands)
after_all_p = Primitive("after_all")
after_all_p.def_impl(partial(xla.apply_primitive, after_all_p))
after_all_p.def_abstract_eval(_after_all_abstract_eval)
xla.translations[after_all_p] = _after_all_translation_rule
def infeed(token, shape=None, partitions=None):
"""Consumes an infeed value of `shape` from the host. Experimental.
`token` is used to sequence infeed and outfeed effects.
`partitions` may be specifed inside a `sharded_jit` function.
"""
flat_shapes, treedef = pytree.flatten(shape)
for shape in flat_shapes:
if not isinstance(shape, ShapedArray):
raise TypeError("shape argument to infeed must be a pytree of "
"ShapedArray values, got {}".format(shape))
if partitions is not None:
# Always replicate token.
# We specifically use type() to raise an error for PartitionSpecs.
if type(partitions) != tuple: # pylint: disable=unidiomatic-typecheck
raise ValueError(f"'partitions' argument to infeed should be a tuple, "
f"got {partitions}")
partitions = partitions + (None,)
xs_and_token = infeed_p.bind(token, shapes=tuple(flat_shapes),
partitions=partitions)
return (treedef.unflatten(xs_and_token[:-1]), xs_and_token[-1])
def _infeed_abstract_eval(token, *, shapes, partitions):
if token is not abstract_token:
raise TypeError("First argument to infeed must be a token")
return shapes + (abstract_token,)
def _infeed_translation_rule(c, token, *, shapes, partitions):
shape = tuple(shape.with_major_to_minor_layout_if_absent()
for x in shapes for shape in xla.aval_to_xla_shapes(x))
build_infeed = partial(xops.InfeedWithToken, token,
xla_client.Shape.tuple_shape(shape))
if partitions:
xs_and_token = xb.with_sharding(c, partitions, build_infeed)
else:
# Note that infeed will default to replication if inside a sharded
# computation and no sharding is specified.
xs_and_token = build_infeed()
xs = xops.GetTupleElement(xs_and_token, 0)
token = xops.GetTupleElement(xs_and_token, 1)
outs = [xops.GetTupleElement(xs, i) for i in range(len(shapes))] + [token]
return xops.Tuple(c, outs)
infeed_p = Primitive("infeed")
infeed_p.multiple_results = True
infeed_p.def_impl(partial(xla.apply_primitive, infeed_p))
infeed_p.def_abstract_eval(_infeed_abstract_eval)
xla.translations[infeed_p] = _infeed_translation_rule
def outfeed(token, xs):
"""Outfeeds value `xs` to the host. Experimental.
`token` is used to sequence infeed and outfeed effects.
"""
flat_xs, _ = pytree.flatten(xs)
return outfeed_p.bind(token, *flat_xs)
def _outfeed_abstract_eval(token, *xs):
if token is not abstract_token:
raise TypeError("First argument to outfeed must be a token")
return abstract_token
def _outfeed_translation_rule(c, token, *xs):
t = xops.Tuple(c, xs)
return xops.OutfeedWithToken(t, token, c.get_shape(t))
outfeed_p = Primitive("outfeed")
outfeed_p.def_impl(partial(xla.apply_primitive, outfeed_p))
outfeed_p.def_abstract_eval(_outfeed_abstract_eval)
xla.translations[outfeed_p] = _outfeed_translation_rule
def rng_uniform(a, b, shape):
"""Stateful PRNG generator. Experimental and its use is discouraged.
Returns uniformly distributed random numbers in the range [a, b)
You should use jax.random for most purposes; this function exists only for
niche use cases with special performance requirements.
This API may be removed at any time.
"""
return rng_uniform_p.bind(a, b, shape=tuple(shape))
def _rng_uniform_abstract_eval(a, b, *, shape):
if a.dtype != b.dtype:
raise ValueError(
"Arguments to rng_uniform must have identical dtypes, got {} "
"and {}.".format(a.dtype, b.dtype))
if a.shape != () or b.shape != ():
raise ValueError(
"Arguments to rng_uniform must be scalars; got shapes {} and {}."
.format(a.shape, b.shape))
return ShapedArray(shape, a.dtype)
def _rng_uniform_translation_rule(c, a, b, *, shape):
xla_shape = xc.Shape.array_shape(c.get_shape(a).xla_element_type(), shape)
return xops.RngUniform(a, b, xla_shape)
rng_uniform_p = Primitive("rng_uniform")
rng_uniform_p.def_impl(partial(xla.apply_primitive, rng_uniform_p))
rng_uniform_p.def_abstract_eval(_rng_uniform_abstract_eval)
xla.translations[rng_uniform_p] = _rng_uniform_translation_rule
def _iota_abstract_eval(*, dtype, shape, dimension):
_check_shapelike("iota", "shape", shape)
if not any(dtypes.issubdtype(dtype, t) for t in _num):
msg = 'iota does not accept dtype {}. Accepted dtypes are subtypes of {}.'
typename = str(np.dtype(dtype).name)
accepted_typenames = (t.__name__ for t in _num)
raise TypeError(msg.format(typename, ', '.join(accepted_typenames)))
if not 0 <= dimension < len(shape):
raise ValueError("iota dimension must be between 0 and len(shape), got "
f"dimension={dimension} for shape {shape}")
return ShapedArray(shape, dtype)
def _iota_translation_rule(c, dtype, shape, dimension):
etype = xla_client.dtype_to_etype(dtype)
xla_shape = xc.Shape.array_shape(etype, shape)
return xops.Iota(c, xla_shape, dimension)
iota_p = Primitive('iota')
iota_p.def_impl(partial(xla.apply_primitive, iota_p))
iota_p.def_abstract_eval(_iota_abstract_eval)
xla.translations[iota_p] = _iota_translation_rule
### util
_ndim = np.ndim
def _dilate_shape(shape, dilation):
"""Utility function for computing the shape resulting from a dilation."""
if not np.all(np.greater(dilation, 0)):
msg = "All dilations must be positive, got {}."
raise TypeError(msg.format(dilation))
dilation = (1,) * (len(shape) - len(dilation)) + tuple(dilation)
return np.where(shape == 0, 0,
np.multiply(dilation, np.subtract(shape, 1)) + 1)
def _ceil_divide(x1, x2):
return -np.floor_divide(np.negative(x1), x2)
def padtype_to_pads(in_shape, window_shape, window_strides, padding):
"""Convert padding string to list of pairs of pad values."""
PaddingType = xla_client.PaddingType
if isinstance(padding, str):
mapping = {'VALID': PaddingType.VALID, 'SAME': PaddingType.SAME}
try:
padding = mapping[padding.upper()]
except KeyError as err:
msg = "Unrecognized padding type: expected 'VALID' or 'SAME', got {}."
raise RuntimeError(msg.format(padding)) from err
if padding == PaddingType.SAME:
out_shape = _ceil_divide(in_shape, window_strides)
pad_sizes = np.maximum(0, (out_shape - 1) * window_strides +
window_shape - in_shape)
return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes]
elif padding == PaddingType.VALID:
return [(0, 0)] * len(in_shape)
else:
msg = "Unknown padding type: {}."
raise TypeError(msg.format(padding))
def _check_same_dtypes(name, ignore_fp_precision, *ttypes):
"""Check that dtypes agree, possibly ignoring float precision."""
# the `ignore_fp_precision` flag exists because the XLA shape inference logic
# allows mixed floating point precision, but the HLO verifier often rejects it
types = list(map(np.dtype, ttypes)) # canonicalize
if ignore_fp_precision:
types = [
np.floating if dtypes.issubdtype(dtype, np.floating)
else np.complexfloating if dtypes.issubdtype(dtype, np.complexfloating)
else dtype for dtype in types]
if len({dtypes.canonicalize_dtype(t) for t in types}) != 1:
if ignore_fp_precision:
msg = ("{} requires arguments to have same dtypes up to floating point "
"precision, got {}.")
else:
msg = "{} requires arguments to have the same dtypes, got {}."
raise TypeError(msg.format(name, ", ".join(map(str, types))))
def _check_conv_shapes(name, lhs_shape, rhs_shape, window_strides):
"""Check that conv shapes are valid and are consistent with window_strides."""
if len(lhs_shape) != len(rhs_shape):
msg = "Arguments to {} must have same rank, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if len(lhs_shape) < 2:
msg = "Arguments to {} must have rank at least 2, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if lhs_shape[1] != rhs_shape[1]:
msg = "Arguments to {} must agree on input feature size, got {} and {}."
raise TypeError(msg.format(name, lhs_shape[1], rhs_shape[1]))
_check_shapelike(name, "window_strides", window_strides)
if not np.all(np.greater(window_strides, 0)):
msg = "All elements of window_strides must be positive, got {}."
raise TypeError(msg.format(window_strides))
if len(window_strides) != len(lhs_shape) - 2:
msg = "{} window_strides has wrong length: expected {}, got {}."
expected_length = len(lhs_shape) - 2
raise TypeError(msg.format(name, expected_length, len(window_strides)))
def conv_shape_tuple(lhs_shape, rhs_shape, strides, pads, batch_group_count=1):
"""Compute the shape tuple of a conv given input shapes in canonical order."""
if isinstance(pads, str):
pads = padtype_to_pads(lhs_shape[2:], rhs_shape[2:], strides, pads)
if len(pads) != len(lhs_shape) - 2:
msg = "Wrong number of explicit pads for convolution: expected {}, got {}."
raise TypeError(msg.format(len(lhs_shape) - 2, len(pads)))
lhs_padded = np.add(lhs_shape[2:], np.sum(np.array(pads).reshape(-1, 2),
axis=1))
out_space = np.floor_divide(
np.subtract(lhs_padded, rhs_shape[2:]), strides) + 1
out_space = np.maximum(0, out_space)
assert lhs_shape[0] % batch_group_count == 0
out_shape = (lhs_shape[0] // batch_group_count, rhs_shape[0])
return tuple(out_shape + tuple(out_space))
def conv_general_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,
dimension_numbers):
lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)
lhs_trans = np.take(lhs_shape, lhs_perm)
rhs_trans = np.take(rhs_shape, rhs_perm)
out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding)
return tuple(np.take(out_trans, np.argsort(out_perm)))
def conv_transpose_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,
dimension_numbers):
lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)
lhs_trans = np.take(lhs_shape, lhs_perm)
rhs_trans = np.take(rhs_shape, rhs_perm)
if isinstance(padding, str):
padding = [_conv_transpose_padding(k, s, padding)
for k,s in zip(rhs_trans[2:], window_strides)]
padding = list(map(np.sum, padding))
unpad_out_space = [(i-1) * s - k + 2
for i, k, s in zip(lhs_trans[2:],
rhs_trans[2:],
window_strides)]
out_space = np.sum([unpad_out_space, padding], axis=0).tolist()
out_trans = tuple((lhs_trans[0], rhs_trans[0]) + tuple(out_space))
return tuple(np.take(out_trans, np.argsort(out_perm)))
def _check_shapelike(fun_name, arg_name, obj, non_zero_shape=False):
"""Check that `obj` is a shape-like value (e.g. tuple of nonnegative ints)."""
if not isinstance(obj, (tuple, list, np.ndarray)):
msg = "{} {} must be of type tuple/list/ndarray, got {}."
raise TypeError(msg.format(fun_name, arg_name, type(obj)))
# bool(obj) for an ndarray raises an error, so we check len
if not len(obj): # pylint: disable=g-explicit-length-test
return
obj_arr = np.array(obj)
if obj_arr.ndim != 1:
msg = "{} {} must be rank 1, got {}."
raise TypeError(msg.format(obj_arr.ndim))
try:
canonicalize_shape(obj_arr)
except TypeError as err:
msg = "{} {} must have every element be an integer type, got {}."
raise TypeError(msg.format(fun_name, arg_name, tuple(map(type, obj)))) from err
lower_bound, bound_error = (
(1, "strictly positive") if non_zero_shape else (0, "nonnegative"))
if not (obj_arr >= lower_bound).all():
msg = "{} {} must have every element be {}, got {}."
raise TypeError(msg.format(fun_name, arg_name, bound_error, obj))
def _dynamic_slice_indices(operand, start_indices):
if len(start_indices) != operand.ndim:
msg = ("Length of slice indices must match number of operand dimensions ({} "
"vs {})")
raise ValueError(msg.format(len(start_indices), operand.shape))
# map int over operand.shape to raise any dynamic-shape errors
safe_map(int, operand.shape)
if not isinstance(start_indices, (tuple, list)):
if start_indices.ndim != 1:
raise ValueError("Slice indices must be a 1D sequence, got {}"
.format(start_indices.shape))
return select(lt(start_indices, _zeros(start_indices)),
add(start_indices, _const(start_indices, operand.shape)),
start_indices)
else:
return [np.asarray(i + d if i < 0 else i, getattr(i, 'dtype', dtypes.int_))
if isinstance(i, (int, np.integer))
else select(lt(i, _const(i, 0)), add(i, _const(i, d)), i)
for i, d in zip(start_indices, operand.shape)]
def _const(example, val):
if dtypes.is_python_scalar(example):
return dtypes.scalar_type_of(example)(val)
return np.array(val, _dtype(example))
_zeros: Callable = partial(full_like, fill_value=0)
_zero: Callable = partial(full_like, shape=(), fill_value=0)
_ones: Callable = partial(full_like, fill_value=1)
_one: Callable = partial(full_like, shape=(), fill_value=1)
_twos: Callable = partial(full_like, fill_value=2)
_two: Callable = partial(full_like, shape=(), fill_value=2)
dtype: Callable = dtypes.result_type
_dtype: Callable = dtypes.result_type
def _iscomplex(x) -> bool:
return dtypes.issubdtype(_dtype(x), np.complexfloating)
def ranges_like(*xs):
start = 0
for x in xs:
x_len = len(x)
yield range(start, start + x_len)
start += x_len
def remaining(original, *removed_lists):
removed = set(itertools.chain(*removed_lists))
return [i for i in original if i not in removed]
def _canonicalize_precision(precision):
if precision is None:
return None
if isinstance(precision, Precision) or (
isinstance(precision, tuple)
and len(precision) == 2
and all(isinstance(p, Precision) for p in precision)
):
return precision
else:
raise ValueError("Precision argument must be None, a lax.Precision value "
f"or a tuple of two lax.Precision values; got {precision}")
def conv_dimension_numbers(lhs_shape, rhs_shape, dimension_numbers
) -> ConvDimensionNumbers:
"""Converts convolution `dimension_numbers` to a `ConvDimensionNumbers`.
Args:
lhs_shape: tuple of nonnegative integers, shape of the convolution input.
rhs_shape: tuple of nonnegative integers, shape of the convolution kernel.
dimension_numbers: None or a tuple/list of strings or a ConvDimensionNumbers
object following the convolution dimension number specification format in
xla_client.py.
Returns:
A `ConvDimensionNumbers` object that represents `dimension_numbers` in the
canonical form used by lax functions.
"""
if isinstance(dimension_numbers, ConvDimensionNumbers):
return dimension_numbers
if len(lhs_shape) != len(rhs_shape):
msg = "convolution requires lhs and rhs ndim to be equal, got {} and {}."
raise TypeError(msg.format(len(lhs_shape), len(rhs_shape)))
if dimension_numbers is None:
iota = tuple(range(len(lhs_shape)))
return ConvDimensionNumbers(iota, iota, iota)
elif isinstance(dimension_numbers, (list, tuple)):
if len(dimension_numbers) != 3:
msg = "convolution dimension_numbers list/tuple must be length 3, got {}."
raise TypeError(msg.format(len(dimension_numbers)))
if not all(isinstance(elt, str) for elt in dimension_numbers):
msg = "convolution dimension_numbers elements must be strings, got {}."
raise TypeError(msg.format(tuple(map(type, dimension_numbers))))
msg = ("convolution dimension_numbers[{}] must have len equal to the ndim "
"of lhs and rhs, got {} for lhs and rhs shapes {} and {}.")
for i, elt in enumerate(dimension_numbers):
if len(elt) != len(lhs_shape):
raise TypeError(msg.format(i, len(elt), lhs_shape, rhs_shape))
lhs_spec, rhs_spec, out_spec = conv_general_permutations(dimension_numbers)
return ConvDimensionNumbers(lhs_spec, rhs_spec, out_spec)
else:
msg = "convolution dimension_numbers must be tuple/list or None, got {}."
raise TypeError(msg.format(type(dimension_numbers)))
def conv_general_permutations(dimension_numbers):
"""Utility for convolution dimension permutations relative to Conv HLO."""
lhs_spec, rhs_spec, out_spec = dimension_numbers
lhs_char, rhs_char, out_char = charpairs = ("N", "C"), ("O", "I"), ("N", "C")
for i, (a, b) in enumerate(charpairs):
if not dimension_numbers[i].count(a) == dimension_numbers[i].count(b) == 1:
msg = ("convolution dimension_numbers[{}] must contain the characters "
"'{}' and '{}' exactly once, got {}.")
raise TypeError(msg.format(i, a, b, dimension_numbers[i]))
if len(dimension_numbers[i]) != len(set(dimension_numbers[i])):
msg = ("convolution dimension_numbers[{}] cannot have duplicate "
"characters, got {}.")
raise TypeError(msg.format(i, dimension_numbers[i]))
if not (set(lhs_spec) - set(lhs_char) == set(rhs_spec) - set(rhs_char) ==
set(out_spec) - set(out_char)):
msg = ("convolution dimension_numbers elements must each have the same "
"set of spatial characters, got {}.")
raise TypeError(msg.format(dimension_numbers))
def getperm(spec, charpair):
spatial = (i for i, c in enumerate(spec) if c not in charpair)
if spec is not rhs_spec:
spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i]))
return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial)
lhs_perm, rhs_perm, out_perm = map(getperm, dimension_numbers, charpairs)
return lhs_perm, rhs_perm, out_perm
def _conv_general_proto(dimension_numbers):
assert type(dimension_numbers) is ConvDimensionNumbers
lhs_spec, rhs_spec, out_spec = dimension_numbers
proto = xla_client.ConvolutionDimensionNumbers()
proto.input_batch_dimension = lhs_spec[0]
proto.input_feature_dimension = lhs_spec[1]
proto.output_batch_dimension = out_spec[0]
proto.output_feature_dimension = out_spec[1]
proto.kernel_output_feature_dimension = rhs_spec[0]
proto.kernel_input_feature_dimension = rhs_spec[1]
proto.input_spatial_dimensions.extend(lhs_spec[2:])
proto.kernel_spatial_dimensions.extend(rhs_spec[2:])
proto.output_spatial_dimensions.extend(out_spec[2:])
return proto
def _conv_general_vjp_lhs_padding(
in_shape, window_dimensions, window_strides, out_shape, padding,
lhs_dilation, rhs_dilation) -> List[Tuple[int, int]]:
lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = _dilate_shape(out_shape, window_strides)
pad_before = np.subtract(rhs_dilated_shape, [lo for lo, _ in padding]) - 1
pad_after = (np.add(lhs_dilated_shape, rhs_dilated_shape) - 1
- out_dilated_shape - pad_before)
return safe_zip(pad_before, pad_after)
def _conv_general_vjp_rhs_padding(
in_shape, window_dimensions, window_strides, out_shape, padding,
lhs_dilation, rhs_dilation):
lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = _dilate_shape(out_shape, window_strides)
total_in_pad = out_dilated_shape + rhs_dilated_shape - lhs_dilated_shape - 1
return [(pad[0], tot - pad[0]) for pad, tot in zip(padding, total_in_pad)]
def _balanced_eq(x, z, y):
return div(select(_eq_meet(x, z), _ones(z), _zeros(z)),
select(_eq_meet(y, z), _twos(z), _ones(z)))
def _eq_meet(a, b):
a_dtype, b_dtype = _dtype(a), _dtype(b)
if a_dtype != b_dtype:
higher_dtype = dtypes.promote_types(a_dtype, b_dtype)
if higher_dtype == a_dtype:
a = convert_element_type(a, b_dtype)
else:
b = convert_element_type(b, a_dtype)
return eq(a, b)
def _abstractify(x):
return raise_to_shaped(core.get_aval(x))
def _check_user_dtype_supported(dtype, fun_name=None):
# Avoid using `dtype in [...]` becuase of numpy dtype equality overloading.
if isinstance(dtype, type) and dtype in {bool, int, float, complex}:
return
np_dtype = np.dtype(dtype)
if np_dtype.kind not in "biufc" and np_dtype.type != dtypes.bfloat16:
msg = f"JAX only supports number and bool dtypes, got dtype {dtype}"
msg += f" in {fun_name}" if fun_name else ""
raise TypeError(msg)
if dtype is not None and np_dtype != dtypes.canonicalize_dtype(dtype):
msg = ("Explicitly requested dtype {} {} is not available, "
"and will be truncated to dtype {}. To enable more dtypes, set the "
"jax_enable_x64 configuration option or the JAX_ENABLE_X64 shell "
"environment variable. "
"See https://github.com/google/jax#current-gotchas for more.")
fun_name = f"requested in {fun_name}" if fun_name else ""
truncated_dtype = dtypes.canonicalize_dtype(dtype).name
warnings.warn(msg.format(dtype, fun_name , truncated_dtype))
def _canonicalize_axis(axis, num_dims):
"""Canonicalize an axis in [-num_dims, num_dims) to [0, num_dims)."""
axis = operator.index(axis)
if not -num_dims <= axis < num_dims:
raise ValueError(
"axis {} is out of bounds for array of dimension {}".format(
axis, num_dims))
if axis < 0:
axis = axis + num_dims
return axis
tie_in_p = Primitive('tie_in')
@config.register_omnistaging_disabler
def omnistaging_disabler() -> None:
global tie_in
def tie_in(x: Array, y: Array) -> Array:
"""Returns the value of ``y`` but with a fake data dependence on ``x``.
When staging to XLA (e.g. running under jit or pmap), values that don't depend
on computation inputs are computed op-by-op, and folded into the XLA
computation as constants.
``tie_in`` provides a way to explicitly stage values into the computation.
When staging to XLA and ``x`` is already staged, then the result of ``tie_in``
is ``y``, but staged to XLA. Downstream use of the result will also be staged
to XLA.
For example, ``lax.sin(const)`` would be constant-folded if ``const`` is
a constant array, but ``lax.sin(lax.tie_in(x, const))``, will be staged to
XLA as long as ``x`` is staged to XLA.
"""
if config.omnistaging_enabled:
return y
else:
return tie_in_p.bind(x, y)
# If lax has already been imported, we need to monkey-patch the
# lax/__init__.py import of tie_in. If not (i.e. if this is running at lax
# module creation time) then we'll get an import error.
try:
jax.lax.tie_in = tie_in
except AttributeError:
pass
def _tie_in_transpose_rule(t, x, y):
if ad.is_undefined_primal(x):
return [ad_util.Zero(x.aval), t]
else:
return [ad_util.Zero.from_value(x), t]
def _tie_in_batch_rule(batched_args, batch_dims):
y = tie_in(*batched_args)
_, bdim_y = batch_dims
return y, bdim_y
def _tie_in_impl(x, y):
core.check_valid_jaxtype(x)
core.check_valid_jaxtype(y)
return y
def _tie_in_jvp(primals, tangents):
x, y = primals
x_dot, y_dot = tangents
if type(y_dot) is ad_util.Zero or core.get_aval(y_dot).dtype is dtypes.float0:
return y, y_dot # skip tying in in this case
else:
return ad.linear_jvp(tie_in_p, primals, tangents)
tie_in_p.def_impl(_tie_in_impl)
tie_in_p.def_abstract_eval(lambda x, y: raise_to_shaped(y))
xla.translations[tie_in_p] = lambda c, x, y: y
ad.primitive_jvps[tie_in_p] = _tie_in_jvp
ad.primitive_transposes[tie_in_p] = partial(ad.linear_transpose2, _tie_in_transpose_rule)
batching.primitive_batchers[tie_in_p] = _tie_in_batch_rule
masking.masking_rules[tie_in_p] = lambda vals, logical_shapes: vals[1]
| 42.877124
| 141
| 0.706781
|
import builtins
import functools
import itertools
import operator
from typing import (Any, Callable, List, NamedTuple, Optional, Sequence, Union, Tuple)
import warnings
import numpy as np
import jax
from jax import core
from jax import ad_util
from jax import api
from jax import api_util
from jax import linear_util as lu
from jax import dtypes
from jax import lazy
from jax import tree_util
from jax.config import flags, config
from jax.core import (Primitive, _canonicalize_dimension, UnshapedArray,
ShapedArray, ConcreteArray, raise_to_shaped,
abstract_token, canonicalize_shape)
from jax.abstract_arrays import array_types
from jax.interpreters import partial_eval as pe
from jax.interpreters import xla
from jax.interpreters import pxla
from jax.interpreters import ad
from jax.interpreters import invertible_ad as iad
from jax.interpreters import batching
from jax.interpreters import masking
from jax.util import (cache, safe_zip, partial, prod, safe_map, canonicalize_axis,
split_list)
from jax.tree_util import tree_map
from jax.lib import pytree
from jax.lib import xla_bridge
from jax.lib import xla_client
xb = xla_bridge
xc = xla_client
xops = xla_client.ops
FLAGS = flags.FLAGS
_max = builtins.max
_min = builtins.min
_reduce = functools.reduce
Array = Any
DType = Any
Shape = Sequence[int]
def _try_broadcast_shapes(shapes):
assert shapes
if len(shapes) == 1: return shapes[0]
rank, *others = {len(shape) for shape in shapes}
if others: return None if not rank: return () result_shape = [None] * rank
for i, sizes in enumerate(zip(*shapes)):
if sizes[:-1] == sizes[1:]:
result_shape[i] = sizes[0] else:
sizes = [d for d in sizes if d != 1]
if sizes[:-1] != sizes[1:]:
return None result_shape[i] = sizes[0] if sizes else 1
return tuple(result_shape)
@cache()
def broadcast_shapes(*shapes):
if len(shapes) == 1:
return shapes[0]
ndim = _max(len(shape) for shape in shapes)
shapes = [(1,) * (ndim - len(shape)) + shape for shape in shapes]
result_shape = _try_broadcast_shapes(shapes)
if result_shape is None:
raise ValueError("Incompatible shapes for broadcasting: {}"
.format(tuple(map(tuple, shapes))))
return result_shape
def _identity(x): return x
def neg(x: Array) -> Array:
return neg_p.bind(x)
def sign(x: Array) -> Array:
return sign_p.bind(x)
def nextafter(x1: Array, x2: Array) -> Array:
return nextafter_p.bind(_brcast(x1, x2), _brcast(x2, x1))
def floor(x: Array) -> Array:
return floor_p.bind(x)
def ceil(x: Array) -> Array:
return ceil_p.bind(x)
def round(x: Array) -> Array:
return round_p.bind(x)
def is_finite(x: Array) -> Array:
return is_finite_p.bind(x)
def exp(x: Array) -> Array:
return exp_p.bind(x)
def expm1(x: Array) -> Array:
return expm1_p.bind(x)
def log(x: Array) -> Array:
return log_p.bind(x)
def log1p(x: Array) -> Array:
return log1p_p.bind(x)
def tanh(x: Array) -> Array:
return tanh_p.bind(x)
def sin(x: Array) -> Array:
return sin_p.bind(x)
def cos(x: Array) -> Array:
return cos_p.bind(x)
def atan2(x: Array, y: Array) -> Array:
return atan2_p.bind(x, y)
def betainc(a: Array, b: Array, x: Array) -> Array:
return regularized_incomplete_beta_p.bind(a, b, x)
def lgamma(x: Array) -> Array:
return lgamma_p.bind(x)
def digamma(x: Array) -> Array:
return digamma_p.bind(x)
def igamma(a: Array, x: Array) -> Array:
return igamma_p.bind(a, x)
def igammac(a: Array, x: Array) -> Array:
return igammac_p.bind(a, x)
def igamma_grad_a(a: Array, x: Array) -> Array:
return igamma_grad_a_p.bind(a, x)
def random_gamma_grad(a: Array, x: Array) -> Array:
return random_gamma_grad_p.bind(a, x)
def bessel_i0e(x: Array) -> Array:
return bessel_i0e_p.bind(x)
def bessel_i1e(x: Array) -> Array:
return bessel_i1e_p.bind(x)
def erf(x: Array) -> Array:
return erf_p.bind(x)
def erfc(x: Array) -> Array:
return erfc_p.bind(x)
def erf_inv(x: Array) -> Array:
return erf_inv_p.bind(x)
def real(x: Array) -> Array:
return real_p.bind(x)
def imag(x: Array) -> Array:
return imag_p.bind(x)
def complex(x: Array, y: Array) -> Array:
return complex_p.bind(_brcast(x, y), _brcast(y, x))
def conj(x: Array) -> Array:
return conj_p.bind(x, input_dtype=_dtype(x))
def abs(x: Array) -> Array:
return abs_p.bind(x)
def pow(x: Array, y: Array) -> Array:
return pow_p.bind(x, y)
def integer_pow(x: Array, y: int) -> Array:
if y == 0:
return _ones(x)
elif y == 1:
return x
else:
return integer_pow_p.bind(x, y=y)
def sqrt(x: Array) -> Array:
return sqrt_p.bind(x)
def rsqrt(x: Array) -> Array:
return rsqrt_p.bind(x)
def bitwise_not(x: Array) -> Array:
return not_p.bind(x)
def bitwise_and(x: Array, y: Array) -> Array:
return and_p.bind(x, y)
def bitwise_or(x: Array, y: Array) -> Array:
return or_p.bind(x, y)
def bitwise_xor(x: Array, y: Array) -> Array:
return xor_p.bind(x, y)
def population_count(x: Array) -> Array:
return population_count_p.bind(x)
def add(x: Array, y: Array) -> Array:
return add_p.bind(x, y)
def sub(x: Array, y: Array) -> Array:
return sub_p.bind(x, y)
def mul(x: Array, y: Array) -> Array:
return mul_p.bind(x, y)
def div(x: Array, y: Array) -> Array:
return div_p.bind(x, y)
def rem(x: Array, y: Array) -> Array:
return rem_p.bind(x, y)
def max(x: Array, y: Array) -> Array:
return max_p.bind(x, y)
def min(x: Array, y: Array) -> Array:
return min_p.bind(x, y)
def shift_left(x: Array, y: Array) -> Array:
return shift_left_p.bind(x, y)
def shift_right_arithmetic(x: Array, y: Array) -> Array:
return shift_right_arithmetic_p.bind(x, y)
def shift_right_logical(x: Array, y: Array) -> Array:
return shift_right_logical_p.bind(x, y)
def eq(x: Array, y: Array) -> Array:
return eq_p.bind(x, y)
def ne(x: Array, y: Array) -> Array:
return ne_p.bind(x, y)
def ge(x: Array, y: Array) -> Array:
return ge_p.bind(x, y)
def gt(x: Array, y: Array) -> Array:
return gt_p.bind(x, y)
def le(x: Array, y: Array) -> Array:
return le_p.bind(x, y)
def lt(x: Array, y: Array) -> Array:
return lt_p.bind(x, y)
def convert_element_type(operand: Array, new_dtype: DType) -> Array:
new_dtype = dtypes.canonicalize_dtype(new_dtype)
if type(operand) in dtypes.python_scalar_dtypes:
operand = np.asarray(operand, new_dtype)
old_dtype = dtypes.canonicalize_dtype(_dtype(operand))
if old_dtype == new_dtype:
return operand
if (dtypes.issubdtype(old_dtype, np.complexfloating) and
not dtypes.issubdtype(new_dtype, np.complexfloating)):
msg = "Casting complex values to real discards the imaginary part"
warnings.warn(msg, np.ComplexWarning, stacklevel=2)
return convert_element_type_p.bind(
operand, new_dtype=new_dtype, old_dtype=old_dtype)
def bitcast_convert_type(operand: Array, new_dtype: DType) -> Array:
new_dtype = dtypes.canonicalize_dtype(new_dtype)
old_dtype = _dtype(operand)
if old_dtype != new_dtype:
return bitcast_convert_type_p.bind(operand, new_dtype=new_dtype)
else:
return operand
def clamp(min: Array, x: Array, max: Array) -> Array:
return clamp_p.bind(min, x, max)
def concatenate(operands: Sequence[Array], dimension: int) -> Array:
return concatenate_p.bind(*operands, dimension=dimension)
Precision = xla_client.PrecisionConfig.Precision
Precision.__str__ = lambda precision: precision.name
PrecisionType = Any
PrecisionLike = Union[None, PrecisionType, Tuple[PrecisionType, PrecisionType]]
class ConvDimensionNumbers(NamedTuple):
lhs_spec: Sequence[int]
rhs_spec: Sequence[int]
out_spec: Sequence[int]
ConvGeneralDilatedDimensionNumbers = Union[
None, ConvDimensionNumbers, Tuple[str, str, str]]
def conv_general_dilated(
lhs: Array, rhs: Array, window_strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
lhs_dilation: Optional[Sequence[int]] = None,
rhs_dilation: Optional[Sequence[int]] = None,
dimension_numbers: ConvGeneralDilatedDimensionNumbers = None,
feature_group_count: int = 1, batch_group_count: int = 1,
precision: PrecisionLike = None) -> Array:
dnums = conv_dimension_numbers(lhs.shape, rhs.shape, dimension_numbers)
if lhs_dilation is None:
lhs_dilation = (1,) * (lhs.ndim - 2)
elif isinstance(padding, str) and not len(lhs_dilation) == lhs_dilation.count(1):
raise ValueError(
"String padding is not implemented for transposed convolution "
"using this op. Please either exactly specify the required padding or "
"use conv_transpose.")
if rhs_dilation is None:
rhs_dilation = (1,) * (rhs.ndim - 2)
if isinstance(padding, str):
lhs_perm, rhs_perm, _ = dnums
rhs_shape = np.take(rhs.shape, rhs_perm)[2:]
effective_rhs_shape = [(k-1) * r + 1 for k, r in zip(rhs_shape, rhs_dilation)]
padding = padtype_to_pads(
np.take(lhs.shape, lhs_perm)[2:], effective_rhs_shape,
window_strides, padding)
return conv_general_dilated_p.bind(
lhs, rhs, window_strides=tuple(window_strides), padding=tuple(padding),
lhs_dilation=tuple(lhs_dilation), rhs_dilation=tuple(rhs_dilation),
dimension_numbers=dnums,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
lhs_shape=lhs.shape, rhs_shape=rhs.shape,
precision=_canonicalize_precision(precision))
def dot(lhs: Array, rhs: Array, precision: PrecisionLike = None) -> Array:
if 1 <= lhs.ndim <= 2 and 1 <= rhs.ndim <= 2 and lhs.shape[-1] == rhs.shape[0]:
return dot_general(lhs, rhs, (((lhs.ndim - 1,), (0,)), ((), ())),
precision=precision)
else:
raise TypeError("Incompatible shapes for dot: got {} and {}.".format(
lhs.shape, rhs.shape))
DotDimensionNumbers = Tuple[Tuple[Sequence[int], Sequence[int]],
Tuple[Sequence[int], Sequence[int]]]
def dot_general(lhs: Array, rhs: Array, dimension_numbers: DotDimensionNumbers,
precision: PrecisionLike = None) -> Array:
contract_dims_seq, batch_dims_seq = dimension_numbers
contract_dims = tuple(map(lambda x: tuple(x), contract_dims_seq))
batch_dims = tuple(map(lambda x: tuple(x), batch_dims_seq))
return dot_general_p.bind(lhs, rhs,
dimension_numbers=(contract_dims, batch_dims),
precision=_canonicalize_precision(precision))
def broadcast(operand: Array, sizes: Sequence[int]) -> Array:
dims = tuple(range(len(sizes), len(sizes) + np.ndim(operand)))
return broadcast_in_dim(operand, tuple(sizes) + np.shape(operand), dims)
def broadcast_in_dim(operand: Array, shape: Shape,
broadcast_dimensions: Sequence[int]) -> Array:
shape = _broadcast_in_dim_shape_rule(
operand, shape=shape, broadcast_dimensions=broadcast_dimensions)
if (np.ndim(operand) == len(shape) and not len(broadcast_dimensions)
and isinstance(operand, (xla.DeviceArray, core.Tracer))):
return operand
return broadcast_in_dim_p.bind(
operand, shape=tuple(shape),
broadcast_dimensions=tuple(broadcast_dimensions))
def broadcast_to_rank(x: Array, rank: int) -> Array:
return broadcast(x, (1,) * (rank - x.ndim))
def reshape(operand: Array, new_sizes: Shape,
dimensions: Optional[Sequence[int]] = None) -> Array:
new_sizes = canonicalize_shape(new_sizes) new_sizes = tuple(new_sizes)
same_shape = np.shape(operand) == new_sizes
same_dims = dimensions is None or tuple(dimensions) == tuple(range(np.ndim(operand)))
if np.shape(operand) and same_shape and same_dims:
return operand
else:
return reshape_p.bind(
operand, new_sizes=new_sizes,
dimensions=None if dimensions is None or same_dims else tuple(dimensions))
def pad(operand: Array, padding_value: Array,
padding_config: Sequence[Tuple[int, int, int]]) -> Array:
return pad_p.bind(operand, padding_value, padding_config=tuple(padding_config))
def rev(operand: Array, dimensions: Sequence[int]) -> Array:
return rev_p.bind(operand, dimensions=tuple(dimensions))
def select(pred: Array, on_true: Array, on_false: Array) -> Array:
return select_p.bind(pred, on_true, on_false)
def slice(operand: Array, start_indices: Sequence[int],
limit_indices: Sequence[int],
strides: Optional[Sequence[int]] = None) -> Array:
return slice_p.bind(operand, start_indices=tuple(start_indices),
limit_indices=tuple(limit_indices),
strides=None if strides is None else tuple(strides))
def dynamic_slice(operand: Array, start_indices: Sequence[Array],
slice_sizes: Shape) -> Array:
start_indices = _dynamic_slice_indices(operand, start_indices)
return dynamic_slice_p.bind(operand, *start_indices,
slice_sizes=tuple(slice_sizes))
def dynamic_update_slice(operand: Array, update: Array,
start_indices: Array) -> Array:
start_indices = _dynamic_slice_indices(operand, start_indices)
return dynamic_update_slice_p.bind(operand, update, *start_indices)
class GatherDimensionNumbers(NamedTuple):
offset_dims: Sequence[int]
collapsed_slice_dims: Sequence[int]
start_index_map: Sequence[int]
def gather(operand: Array, start_indices: Array,
dimension_numbers: GatherDimensionNumbers,
slice_sizes: Shape) -> Array:
return gather_p.bind(
operand, start_indices, dimension_numbers=dimension_numbers,
slice_sizes=canonicalize_shape(slice_sizes))
class ScatterDimensionNumbers(NamedTuple):
update_window_dims: Sequence[int]
inserted_window_dims: Sequence[int]
scatter_dims_to_operand_dims: Sequence[int]
def scatter_add(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers, *,
indices_are_sorted: bool = False,
unique_indices: bool = False) -> Array:
jaxpr, consts = _reduction_jaxpr(add, _abstractify(_const(operand, 0)))
return scatter_add_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
def scatter_mul(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers, *,
indices_are_sorted: bool = False,
unique_indices: bool = False) -> Array:
jaxpr, consts = _reduction_jaxpr(mul, _abstractify(_const(operand, 1)))
return scatter_mul_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
def scatter_min(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers, *,
indices_are_sorted: bool = False,
unique_indices: bool = False) -> Array:
jaxpr, consts = _reduction_jaxpr(min, _abstractify(_const(operand, 0)))
return scatter_min_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
def scatter_max(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers, *,
indices_are_sorted: bool = False,
unique_indices: bool = False) -> Array:
jaxpr, consts = _reduction_jaxpr(max, _abstractify(_const(operand, 0)))
return scatter_max_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
_scatter_reduction_computation = lambda x, y: y
def scatter(operand: Array, scatter_indices: Array, updates: Array,
dimension_numbers: ScatterDimensionNumbers, *,
indices_are_sorted: bool = False,
unique_indices: bool = False) -> Array:
jaxpr, consts = _reduction_jaxpr(_scatter_reduction_computation,
_abstractify(_const(operand, 0)))
return scatter_p.bind(
operand, scatter_indices, updates, update_jaxpr=jaxpr,
update_consts=consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
def index_take(src: Array, idxs: Array, axes: Sequence[int]) -> Array:
indices = concatenate([expand_dims(i, (1,)) for i in idxs], 1)
indices = indices % np.array([src.shape[ax] for ax in axes])
slice_sizes = list(src.shape)
for ax in axes:
slice_sizes[ax] = 1
offset_dims = tuple(range(1, src.ndim - indices.shape[1] + 1))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=axes,
start_index_map=axes)
return gather(src, indices, dimension_numbers=dnums,
slice_sizes=tuple(slice_sizes))
def transpose(operand: Array, permutation: Sequence[int]) -> Array:
permutation = tuple(permutation)
if permutation == tuple(range(len(permutation))):
return operand
else:
return transpose_p.bind(operand, permutation=permutation)
def argmin(operand: Array, axis: int,
index_dtype: DType) -> Tuple[Array, Array]:
return argmin_p.bind(operand, axes=(axis,),
index_dtype=dtypes.canonicalize_dtype(index_dtype))
def argmax(operand: Array, axis: int,
index_dtype: DType) -> Tuple[Array, Array]:
return argmax_p.bind(operand, axes=(axis,),
index_dtype=dtypes.canonicalize_dtype(index_dtype))
def reduce(operands: Array, init_values: Array, computation: Callable,
dimensions: Sequence[int]) -> Array:
flat_operands, operand_tree = tree_util.tree_flatten(operands)
flat_init_values, init_value_tree = tree_util.tree_flatten(init_values)
if operand_tree != init_value_tree:
raise ValueError('Operands must have the same tree structure as init_values:'
f' {operand_tree} vs. {init_value_tree}')
if len(flat_operands) != len(flat_init_values):
raise ValueError('Must have same total number of operands as init_values: '
f' {len(flat_operands)} vs. {len(flat_init_values)}')
monoid_reducer = _get_monoid_reducer(computation, flat_init_values)
if monoid_reducer:
return monoid_reducer(*flat_operands, dimensions)
else:
flat_init_avals = safe_map(_abstractify, flat_init_values)
jaxpr, consts, out_tree = _variadic_reduction_jaxpr(
computation, tuple(flat_init_avals), init_value_tree)
out = reduce_p.bind(*(flat_operands + flat_init_values), computation=computation,
jaxpr=jaxpr, consts=consts, dimensions=tuple(dimensions))
return tree_util.tree_unflatten(out_tree, out)
@cache()
def _reduction_jaxpr(computation, aval):
pval = pe.PartialVal.unknown(aval)
comp = lu.wrap_init(lambda x, y: (computation(x, y),))
jaxpr, _, consts = pe.trace_to_jaxpr(comp, (pval, pval), instantiate=False)
return jaxpr, consts
@cache()
def _variadic_reduction_jaxpr(computation, flat_avals, aval_tree):
avals = tree_util.tree_unflatten(aval_tree, flat_avals)
flat_in_avals, in_tree = tree_util.tree_flatten((avals, avals))
pvals = safe_map(pe.PartialVal.unknown, flat_in_avals)
comp = lu.wrap_init(computation)
flat_comp, out_tree = api_util.flatten_fun_nokwargs(comp, in_tree)
jaxpr, _, consts = pe.trace_to_jaxpr(flat_comp, tuple(pvals),
instantiate=False)
return jaxpr, consts, out_tree()
def _get_monoid_reducer(monoid_op: Callable, xs: Array) -> Optional[Callable]:
if len(xs) != 1:
return None
x, = xs
aval = core.get_aval(x)
dtype = _dtype(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return np.equal(aval.val, 0) and partial(
_reduce_sum)
if monoid_op is mul:
return np.equal(aval.val, 1) and _reduce_prod
elif monoid_op is bitwise_or and dtype == np.bool_:
return np.equal(aval.val, _get_max_identity(dtype)) and _reduce_or
elif monoid_op is bitwise_and and dtype == np.bool_:
return np.equal(aval.val, _get_min_identity(dtype)) and _reduce_and
elif monoid_op is max:
return np.equal(aval.val, _get_max_identity(dtype)) and _reduce_max
elif monoid_op is min:
return np.equal(aval.val, _get_min_identity(dtype)) and _reduce_min
return None
def _get_max_identity(dtype: DType) -> Array:
if dtypes.issubdtype(dtype, np.inexact):
return np.array(-np.inf, dtype)
elif dtypes.issubdtype(dtype, np.integer):
return np.array(dtypes.iinfo(dtype).min, dtype)
elif dtypes.issubdtype(dtype, np.bool_):
return np.array(False, np.bool_)
def _get_min_identity(dtype: DType) -> Array:
if dtypes.issubdtype(dtype, np.inexact):
return np.array(np.inf, dtype)
elif dtypes.issubdtype(dtype, np.integer):
return np.array(dtypes.iinfo(dtype).max, dtype)
elif dtypes.issubdtype(dtype, np.bool_):
return np.array(True, np.bool_)
def _reduce_sum(operand: Array, axes: Sequence[int]) -> Array:
return reduce_sum_p.bind(operand, axes=tuple(axes))
def _reduce_prod(operand: Array, axes: Sequence[int]) -> Array:
return reduce_prod_p.bind(operand, axes=tuple(axes))
def _reduce_max(operand: Array, axes: Sequence[int]) -> Array:
return reduce_max_p.bind(operand, axes=tuple(axes))
def _reduce_min(operand: Array, axes: Sequence[int]) -> Array:
return reduce_min_p.bind(operand, axes=tuple(axes))
def _reduce_or(operand: Array, axes: Sequence[int]) -> Array:
return reduce_or_p.bind(operand, axes=tuple(axes))
def _reduce_and(operand: Array, axes: Sequence[int]) -> Array:
return reduce_and_p.bind(operand, axes=tuple(axes))
def reduce_window(operand: Array, init_value: Array, computation: Callable,
window_dimensions: Shape, window_strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
base_dilation: Optional[Sequence[int]] = None,
window_dilation: Optional[Sequence[int]] = None) -> Array:
if isinstance(padding, str):
dilated_window_dims = (window_dimensions if window_dilation is None else
_dilate_shape(window_dimensions, window_dilation))
padding = tuple(padtype_to_pads(operand.shape, dilated_window_dims,
window_strides, padding))
else:
padding = tuple(padding)
if base_dilation is None:
base_dilation = (1,) * len(window_dimensions)
if window_dilation is None:
window_dilation = (1,) * len(window_dimensions)
monoid_reducer = _get_monoid_window_reducer(computation, init_value)
if monoid_reducer:
return monoid_reducer(operand, window_dimensions, window_strides, padding,
base_dilation, window_dilation)
else:
jaxpr, consts = _reduction_jaxpr(computation, _abstractify(init_value))
return reduce_window_p.bind(
operand, init_value, jaxpr=jaxpr, consts=consts,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=padding,
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def _get_monoid_window_reducer(monoid_op: Callable, x: Array) -> Optional[Callable]:
aval = core.get_aval(x)
if (type(aval) is ConcreteArray) and aval.shape == ():
if monoid_op is add:
return aval.val == 0 and _reduce_window_sum
elif monoid_op is max:
return aval.val == _get_max_identity(aval.dtype) and _reduce_window_max
elif monoid_op is min:
return aval.val == _get_min_identity(aval.dtype) and _reduce_window_min
return None
def _reduce_window_sum(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]],
base_dilation: Optional[Sequence[int]] = None,
window_dilation: Optional[Sequence[int]] = None) -> Array:
if base_dilation is None:
base_dilation = (1,) * len(window_dimensions)
if window_dilation is None:
window_dilation = (1,) * len(window_dimensions)
return reduce_window_sum_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding),
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def _reduce_window_prod(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]],
base_dilation: Optional[Sequence[int]] = None,
window_dilation: Optional[Sequence[int]] = None) -> Array:
init_value = _const(operand, 1)
jaxpr, consts = _reduction_jaxpr(mul, _abstractify(init_value))
if base_dilation is None:
base_dilation = (1,) * len(window_dimensions)
if window_dilation is None:
window_dilation = (1,) * len(window_dimensions)
return reduce_window_p.bind(
operand, init_value, jaxpr=jaxpr, consts=consts,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding),
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def _reduce_window_max(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]],
base_dilation: Optional[Sequence[int]] = None,
window_dilation: Optional[Sequence[int]] = None) -> Array:
if base_dilation is None:
base_dilation = (1,) * len(window_dimensions)
if window_dilation is None:
window_dilation = (1,) * len(window_dimensions)
return reduce_window_max_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding),
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def _reduce_window_min(operand: Array, window_dimensions: Shape,
window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]],
base_dilation: Optional[Sequence[int]] = None,
window_dilation: Optional[Sequence[int]] = None) -> Array:
if base_dilation is None:
base_dilation = (1,) * len(window_dimensions)
if window_dilation is None:
window_dilation = (1,) * len(window_dimensions)
return reduce_window_min_p.bind(
operand, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding),
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def _select_and_scatter(operand: Array, select: Callable,
window_dimensions: Shape, window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]], source: Array,
init_value: Array, scatter: Callable,
base_dilation: Sequence[int],
window_dilation: Sequence[int]) -> Array:
select_jaxpr, select_consts = _reduction_jaxpr(select, _abstractify(init_value))
scatter_jaxpr, scatter_consts = _reduction_jaxpr(scatter, _abstractify(init_value))
return select_and_scatter_p.bind(
operand, source, init_value, select_jaxpr=select_jaxpr,
select_consts=select_consts, scatter_jaxpr=scatter_jaxpr,
scatter_consts=scatter_consts, window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding),
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def _select_and_scatter_add(source: Array, operand: Array,
select_prim: core.Primitive,
window_dimensions: Shape,
window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]]) -> Array:
return select_and_scatter_add_p.bind(
source, operand, select_prim=select_prim,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding))
def _select_and_gather_add(tangents: Array, operand: Array,
select_prim: core.Primitive,
window_dimensions: Shape,
window_strides: Sequence[int],
padding: Sequence[Tuple[int, int]],
base_dilation: Sequence[int],
window_dilation: Sequence[int]) -> Array:
return select_and_gather_add_p.bind(
tangents, operand, select_prim=select_prim,
window_dimensions=tuple(window_dimensions),
window_strides=tuple(window_strides), padding=tuple(padding),
base_dilation=tuple(base_dilation),
window_dilation=tuple(window_dilation))
def sort(operand: Union[Array, Sequence[Array]], dimension: int = -1,
is_stable: bool = True, num_keys: int = 1) -> Union[Array, Tuple[Array, ...]]:
if isinstance(operand, Sequence):
if len(operand) == 0:
raise TypeError("Sort requires at least one operand")
if not (1 <= num_keys <= len(operand)):
raise ValueError(f"num_keys={num_keys} must be between 1 and len(operand)={len(operand)}")
dimension = canonicalize_axis(dimension, len(operand[0].shape))
return tuple(sort_p.bind(*operand, dimension=dimension,
is_stable=is_stable,
num_keys=num_keys))
else:
if num_keys != 1:
raise ValueError(f"num_keys={num_keys} must equal 1 for a single operand.")
dimension = canonicalize_axis(dimension, len(operand.shape))
return sort_p.bind(operand, dimension=dimension, is_stable=is_stable, num_keys=1)[0]
def sort_key_val(keys: Array, values: Array, dimension: int = -1,
is_stable: bool = True) -> Tuple[Array, Array]:
dimension = canonicalize_axis(dimension, len(keys.shape))
k, v = sort_p.bind(keys, values, dimension=dimension, is_stable=is_stable, num_keys=1)
return k, v
def top_k(operand: Array, k: int) -> Tuple[Array, Array]:
k = int(k)
if k < 0:
raise ValueError("k argument to top_k must be nonnegative, got {}".format(k))
return top_k_p.bind(operand, k=k)
def tie_in(x: Array, y: Array) -> Array:
return y
def full(shape: Shape, fill_value: Array, dtype: Optional[DType] = None) -> Array:
shape = canonicalize_shape(shape)
if np.shape(fill_value):
msg = "full must be called with scalar fill_value, got fill_value.shape {}."
raise TypeError(msg.format(np.shape(fill_value)))
dtype = dtypes.canonicalize_dtype(dtype or _dtype(fill_value))
fill_value = convert_element_type(fill_value, dtype)
return broadcast(fill_value, shape)
def _device_put_raw(x):
if isinstance(x, xla.DeviceArray):
return x
else:
aval = raise_to_shaped(core.get_aval(x))
return xla.array_result_handler(None, aval)(*xla.device_put(x))
def iota(dtype: DType, size: int) -> Array:
if config.omnistaging_enabled:
dtype = dtypes.canonicalize_dtype(dtype)
size = core.concrete_or_error(int, size, "size argument of lax.iota")
return iota_p.bind(dtype=dtype, shape=(size,), dimension=0)
else:
size = size if type(size) is masking.Poly else int(size)
shape = canonicalize_shape((size,))
dtype = dtypes.canonicalize_dtype(dtype)
lazy_expr = lazy.iota(dtype, shape[0])
aval = ShapedArray(shape, dtype)
return xla.make_device_array(aval, None, lazy_expr, xla.DeviceConstant())
def broadcasted_iota(dtype: DType, shape: Shape, dimension: int) -> Array:
dtype = dtypes.canonicalize_dtype(dtype)
shape = canonicalize_shape(shape)
dimension = core.concrete_or_error(
int, dimension, "dimension argument of lax.broadcasted_iota")
return iota_p.bind(dtype=dtype, shape=shape, dimension=dimension)
def _eye(dtype: DType, shape: Shape, offset: int) -> Array:
N, M = tuple(map(int, shape))
offset = int(offset)
dtype = dtypes.canonicalize_dtype(dtype)
if config.omnistaging_enabled:
bool_eye = eq(add(broadcasted_iota(np.int32, (N, M), 0), np.int32(offset)),
broadcasted_iota(np.int32, (N, M), 1))
return convert_element_type_p.bind(bool_eye, new_dtype=dtype,
old_dtype=np.bool_)
else:
lazy_expr = lazy.eye(dtype, (N, M), offset)
aval = ShapedArray((N, M), dtype)
return xla.make_device_array(aval, None, lazy_expr, xla.DeviceConstant())
def _delta(dtype: DType, shape: Shape, axes: Sequence[int]) -> Array:
shape = tuple(map(int, shape))
axes = tuple(map(int, axes))
dtype = dtypes.canonicalize_dtype(dtype)
base_shape = tuple(np.take(shape, axes))
if config.omnistaging_enabled:
iotas = [broadcasted_iota(np.uint32, base_shape, i)
for i in range(len(base_shape))]
eyes = [eq(i1, i2) for i1, i2 in zip(iotas[:-1], iotas[1:])]
result = convert_element_type_p.bind(_reduce(operator.and_, eyes),
new_dtype=dtype, old_dtype=np.bool_)
return broadcast_in_dim(result, shape, axes)
else:
lazy_expr = lazy.broadcast(lazy.delta(dtype, base_shape), shape, axes)
aval = ShapedArray(shape, dtype)
return xla.make_device_array(aval, None, lazy_expr, xla.DeviceConstant())
def _tri(dtype: DType, shape: Shape, offset: int) -> Array:
N, M = tuple(map(int, shape))
offset = int(offset)
dtype = dtypes.canonicalize_dtype(dtype)
if config.omnistaging_enabled:
bool_tri = ge(add(broadcasted_iota(np.int32, (N, M), 0), np.int32(offset)),
broadcasted_iota(np.int32, (N, M), 1))
return convert_element_type_p.bind(bool_tri, old_dtype=np.int32,
new_dtype=dtype)
else:
lazy_expr = lazy.tri(dtype, (N, M), offset)
aval = ShapedArray((N, M), dtype)
return xla.make_device_array(aval, None, lazy_expr, xla.DeviceConstant())
def stop_gradient(x):
def stop(x):
if (dtypes.issubdtype(_dtype(x), np.floating) or
dtypes.issubdtype(_dtype(x), np.complexfloating)):
return ad_util.stop_gradient_p.bind(x)
else:
return x return tree_map(stop, x)
def conv(lhs: Array, rhs: Array, window_strides: Sequence[int],
padding: str, precision: PrecisionLike = None) -> Array:
return conv_general_dilated(lhs, rhs, window_strides, padding,
precision=precision)
def conv_with_general_padding(lhs: Array, rhs: Array,
window_strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
lhs_dilation: Optional[Sequence[int]],
rhs_dilation: Optional[Sequence[int]],
precision: PrecisionLike = None) -> Array:
return conv_general_dilated(
lhs, rhs, window_strides, padding, lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation, precision=precision)
def _conv_transpose_padding(k, s, padding):
if padding == 'SAME':
pad_len = k + s - 2
if s > k - 1:
pad_a = k - 1
else:
pad_a = int(np.ceil(pad_len / 2))
elif padding == 'VALID':
pad_len = k + s - 2 + _max(k - s, 0)
pad_a = k - 1
else:
raise ValueError('Padding mode must be `SAME` or `VALID`.')
pad_b = pad_len - pad_a
return pad_a, pad_b
def _flip_axes(x, axes):
for axis in axes:
x = np.flip(x, axis)
return x
def conv_transpose(lhs: Array, rhs: Array, strides: Sequence[int],
padding: Union[str, Sequence[Tuple[int, int]]],
rhs_dilation: Optional[Sequence[int]] = None,
dimension_numbers: ConvGeneralDilatedDimensionNumbers = None,
transpose_kernel: bool = False,
precision: PrecisionLike = None) -> Array:
assert len(lhs.shape) == len(rhs.shape) and len(lhs.shape) >= 2
ndims = len(lhs.shape)
one = (1,) * (ndims - 2)
if dimension_numbers is None:
if ndims == 2:
dimension_numbers = ('NC', 'IO', 'NC')
elif ndims == 3:
dimension_numbers = ('NHC', 'HIO', 'NHC')
elif ndims == 4:
dimension_numbers = ('NHWC', 'HWIO', 'NHWC')
elif ndims == 5:
dimension_numbers = ('NHWDC', 'HWDIO', 'NHWDC')
else:
raise ValueError('No 4+ dimensional dimension_number defaults.')
dn = conv_dimension_numbers(lhs.shape, rhs.shape, dimension_numbers)
k_shape = np.take(rhs.shape, dn.rhs_spec)
k_sdims = k_shape[2:]
pads: Union[str, Sequence[Tuple[int, int]]]
if padding in {'SAME', 'VALID'}:
if rhs_dilation is None:
rhs_dilation = (1,) * (rhs.ndim - 2)
effective_k_size = map(lambda k, r: (k-1) * r + 1, k_sdims, rhs_dilation)
pads = [_conv_transpose_padding(k, s, padding)
for k,s in zip(effective_k_size, strides)]
else:
pads = padding
if transpose_kernel:
rhs = _flip_axes(rhs, np.array(dn.rhs_spec)[2:])
rhs = np.swapaxes(rhs, dn.rhs_spec[0], dn.rhs_spec[1])
return conv_general_dilated(lhs, rhs, one, pads, strides, rhs_dilation, dn,
precision=precision)
def full_like(x: Array, fill_value: Array, dtype: Optional[DType] = None,
shape: Optional[Shape] = None) -> Array:
fill_shape = np.shape(x) if shape is None else canonicalize_shape(shape)
if not config.omnistaging_enabled:
fill_value = tie_in(x, fill_value)
return full(fill_shape, fill_value, dtype or _dtype(x))
def collapse(operand: Array, start_dimension: int,
stop_dimension: int) -> Array:
lo, hi = start_dimension, stop_dimension
size = prod(operand.shape[lo:hi])
new_shape = operand.shape[:lo] + (size,) + operand.shape[hi:]
return reshape(operand, new_shape)
def slice_in_dim(operand: Array, start_index: Optional[int],
limit_index: Optional[int],
stride: int = 1, axis: int = 0)-> Array:
start_indices = [0] * operand.ndim
limit_indices = list(operand.shape)
strides = [1] * operand.ndim
len_axis = operand.shape[axis]
start_index_int = _canonicalize_dimension(start_index) if start_index is not None else 0
limit_index_int = _canonicalize_dimension(limit_index) if limit_index is not None else len_axis
if start_index_int < 0:
start_index_int = start_index_int + len_axis
if limit_index_int < 0:
limit_index_int = limit_index_int + len_axis
axis = int(axis)
start_indices[axis] = start_index_int
limit_indices[axis] = limit_index_int
strides[axis] = int(stride)
return slice(operand, start_indices, limit_indices, strides)
def index_in_dim(operand: Array, index: int, axis: int = 0,
keepdims: bool = True) -> Array:
index, axis = int(index), int(axis)
axis_size = operand.shape[axis]
wrapped_index = index + axis_size if index < 0 else index
if not 0 <= wrapped_index < axis_size:
msg = 'index {} is out of bounds for axis {} with size {}'
raise IndexError(msg.format(index, axis, axis_size))
result = slice_in_dim(operand, wrapped_index, wrapped_index + 1, 1, axis)
if keepdims:
return result
else:
return squeeze(result, (axis,))
def dynamic_slice_in_dim(operand: Array, start_index: Array,
slice_size: int, axis: int = 0) -> Array:
start_indices = [_zero(start_index)] * operand.ndim
slice_sizes = list(operand.shape)
axis = int(axis)
start_indices[axis] = start_index
slice_sizes[axis] = int(slice_size)
return dynamic_slice(operand, start_indices, slice_sizes)
def dynamic_index_in_dim(operand: Array, index: Array, axis: int = 0,
keepdims: bool = True) -> Array:
result = dynamic_slice_in_dim(operand, index, 1, axis)
if keepdims:
return result
else:
return squeeze(result, (axis,))
def dynamic_update_slice_in_dim(operand: Array, update: Array,
start_index: Array, axis: int) -> Array:
axis = int(axis)
start_indices = [_zero(start_index)] * _ndim(operand)
start_indices[axis] = start_index
return dynamic_update_slice(operand, update, start_indices)
def dynamic_update_index_in_dim(operand: Array, update: Array, index: Array,
axis: int) -> Array:
axis = int(axis)
if _ndim(update) != _ndim(operand):
assert _ndim(update) + 1 == _ndim(operand)
update = expand_dims(update, (axis,))
return dynamic_update_slice_in_dim(operand, update, index, axis)
def batch_matmul(lhs: Array, rhs: Array,
precision: PrecisionLike = None) -> Array:
if _min(lhs.ndim, rhs.ndim) < 2:
raise ValueError('Arguments to batch_matmul must be at least 2D, got {}, {}'
.format(lhs.ndim, rhs.ndim))
if lhs.ndim != rhs.ndim:
raise ValueError('Arguments to batch_matmul must have same ndim, got {}, {}'
.format(lhs.ndim, rhs.ndim))
lhs_contract = (lhs.ndim - 1,)
rhs_contract = (rhs.ndim - 2,)
batch = tuple(range(lhs.ndim - 2))
return dot_general(lhs, rhs, ((lhs_contract, rhs_contract), (batch, batch)),
precision=precision)
def square(x: Array) -> Array:
return integer_pow(x, 2)
def reciprocal(x: Array) -> Array:
return integer_pow(x, -1)
def _upcast_fp16_for_computation(f):
@functools.wraps(f)
def f_wrapped(x):
dtype = _dtype(x)
if dtype == np.float16 or dtype == dtypes.bfloat16:
return convert_element_type(
f(convert_element_type(x, np.float32)), dtype)
return f(x)
return f_wrapped
@api.jit
@_upcast_fp16_for_computation
def tan(x: Array) -> Array:
return div(sin(x), cos(x))
@api.jit
def asin(x: Array) -> Array:
if dtypes.issubdtype(_dtype(x), np.complexfloating):
return mul(_const(x, -1j), asinh(mul(_const(x, 1j), x)))
else:
return mul(_const(x, 2),
atan2(x, add(_const(x, 1), sqrt(sub(_const(x, 1), square(x))))))
@api.jit
def acos(x: Array) -> Array:
if dtypes.issubdtype(_dtype(x), np.complexfloating):
result = mul(_const(x, 1j), acosh(x))
rpart = real(result)
return select(
gt(rpart, _const(rpart, 0)),
result,
neg(result)
)
else:
return select(
ne(x, _const(x, -1.0)),
mul(_const(x, 2),
atan2(sqrt(sub(_const(x, 1), square(x))), add(_const(x, 1), x))),
full_like(x, np.pi))
def atan(x: Array) -> Array:
if dtypes.issubdtype(_dtype(x), np.complexfloating):
return mul(_const(x, -1j), atanh(mul(_const(x, 1j), x)))
else:
return atan2(x, _const(x, 1))
def sinh(x: Array) -> Array:
return sinh_p.bind(x)
def cosh(x: Array) -> Array:
return cosh_p.bind(x)
def asinh(x: Array) -> Array:
return asinh_p.bind(x)
def acosh(x: Array) -> Array:
return acosh_p.bind(x)
def atanh(x: Array) -> Array:
return atanh_p.bind(x)
ShapedArray.broadcast = core.aval_method(broadcast)
ShapedArray.transpose = core.aval_method(transpose) ShapedArray.reshape = core.aval_method(reshape)
def _iter(tracer):
if tracer.ndim == 0:
raise TypeError("iteration over a 0-d array") else:
n = int(tracer.shape[0])
return iter([index_in_dim(tracer, i, keepdims=False) for i in range(n)])
ShapedArray._iter = staticmethod(_iter)
def zeros_like_array(x):
return full_like(x, 0)
for t in itertools.chain(dtypes.python_scalar_dtypes.keys(), array_types,
[xla.DeviceArray, pxla.ShardedDeviceArray]):
ad_util.jaxval_adders[t] = add
ad_util.jaxval_zeros_likers[xla.DeviceArray] = zeros_like_array
ad_util.jaxval_zeros_likers[pxla.ShardedDeviceArray] = zeros_like_array
_input_dtype = lambda *args, **_: dtypes.canonicalize_dtype(args[0].dtype)
_fixed_dtype = lambda dtype: lambda *args, **kwargs: dtypes.canonicalize_dtype(dtype)
_complex_basetype = lambda dtype: np.abs(np.zeros((), dtype)).dtype
def standard_primitive(shape_rule, dtype_rule, name, translation_rule=None,
multiple_results=False):
prim = Primitive(name)
prim.multiple_results = multiple_results
prim.def_impl(partial(xla.apply_primitive, prim))
prim.def_abstract_eval(partial(standard_abstract_eval, prim, shape_rule, dtype_rule))
xla.translations[prim] = translation_rule or partial(standard_translate, name)
return prim
def standard_abstract_eval(prim, shape_rule, dtype_rule, *args, **kwargs):
assert all(isinstance(arg, UnshapedArray) for arg in args), args
least_specialized = _max(
map(type, args), key=operator.attrgetter('array_abstraction_level'))
if least_specialized is ConcreteArray:
out_vals = prim.impl(*[x.val for x in args], **kwargs)
if not prim.multiple_results:
out_vals = [out_vals]
out_avals = safe_map(ConcreteArray, out_vals)
elif least_specialized is ShapedArray:
shapes, dtypes = shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs)
if not prim.multiple_results:
shapes, dtypes = [shapes], [dtypes]
out_avals = safe_map(ShapedArray, shapes, dtypes)
elif least_specialized is UnshapedArray:
dtypes = dtype_rule(*args, **kwargs)
if not prim.multiple_results:
dtypes = [dtypes]
out_avals = safe_map(UnshapedArray, dtypes)
else:
raise TypeError(args, least_specialized)
if not prim.multiple_results:
return out_avals[0]
return out_avals
def standard_translate(name, c, *args, **kwargs):
xla_opname = ''.join(term.capitalize() for term in name.split('_'))
return getattr(xops, xla_opname)(*args, **kwargs)
def unop_dtype_rule(result_dtype, accepted_dtypes, name, aval, **kwargs):
if not any(dtypes.issubdtype(aval.dtype, t) for t in accepted_dtypes):
msg = '{} does not accept dtype {}. Accepted dtypes are subtypes of {}.'
typename = str(np.dtype(aval.dtype).name)
accepted_typenames = (t.__name__ for t in accepted_dtypes)
raise TypeError(msg.format(name, typename, ', '.join(accepted_typenames)))
return result_dtype(aval.dtype)
def unop(result_dtype, accepted_dtypes, name, translation_rule=None):
dtype_rule = partial(unop_dtype_rule, result_dtype, accepted_dtypes, name)
prim = standard_primitive(_attrgetter('shape'), dtype_rule, name,
translation_rule=translation_rule)
batching.defvectorized(prim)
masking.defvectorized(prim)
return prim
standard_unop = partial(unop, _identity)
_attrgetter = lambda name: lambda x, **kwargs: getattr(x, name)
def naryop_dtype_rule(result_dtype, accepted_dtypes, name, *avals, **kwargs):
aval_dtypes = [aval.dtype for aval in avals]
for i, (aval_dtype, types) in enumerate(zip(aval_dtypes, accepted_dtypes)):
if not any(dtypes.issubdtype(aval_dtype, t) for t in types):
if aval_dtype is dtypes.float0:
raise TypeError(
f"Called {name} with a float0 at position {i}. "
"float0s do not support any operations by design, because they "
"are not compatible with non-trivial vector spaces. No implicit dtype "
"conversion is done. You can use np.zeros_like(arr, dtype=np.float) "
"to cast a float0 array to a regular zeros array. \n"
"If you didn't expect to get a float0 you might have accidentally "
"taken a gradient with respect to an integer argument.")
else:
msg = ('{} does not accept dtype {} at position {}. '
'Accepted dtypes at position {} are subtypes of {}.')
typename = str(np.dtype(aval_dtype).name)
typenames = ', '.join(t.__name__ for t in types)
raise TypeError(msg.format(name, typename, i, i, typenames))
_check_same_dtypes(name, False, *aval_dtypes)
return result_dtype(*avals)
def _broadcasting_shape_rule(name, *avals):
shapes = [aval.shape for aval in avals if aval.shape]
if not shapes:
return ()
if len({len(shape) for shape in shapes}) != 1:
msg = '{} got arrays of different rank: {}.'
raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))
result_shape = _try_broadcast_shapes(shapes)
if result_shape is None:
msg = '{} got incompatible shapes for broadcasting: {}.'
raise TypeError(msg.format(name, ', '.join(map(str, map(tuple, shapes)))))
return result_shape
def naryop(result_dtype, accepted_dtypes, name, translation_rule=None):
dtype_rule = partial(naryop_dtype_rule, result_dtype, accepted_dtypes, name)
shape_rule = partial(_broadcasting_shape_rule, name)
prim = standard_primitive(shape_rule, dtype_rule, name,
translation_rule=translation_rule)
batching.defbroadcasting(prim)
masking.defnaryop(prim)
return prim
standard_naryop = partial(naryop, _input_dtype)
def _broadcast_translate(translate: Callable):
# Decorator for translation rules which adds explicit broadcasting of
# positional arguments. This is necessary only for a handful of primitives
# whose XLA implementations do not support broadcasting.
def _broadcast_array(array, array_shape, result_shape):
if array_shape == result_shape:
return array
bcast_dims = tuple(range(len(result_shape) - len(array_shape),
len(result_shape)))
result = xops.BroadcastInDim(array, result_shape, bcast_dims)
return result
def _broadcasted_translation_rule(c, *args, **kwargs):
shapes = [c.get_shape(arg).dimensions() for arg in args]
result_shape = broadcast_shapes(*shapes)
args = [_broadcast_array(arg, arg_shape, result_shape)
for arg, arg_shape in zip(args, shapes)]
return translate(c, *args, **kwargs)
return _broadcasted_translation_rule
# NOTE(mattjj): this isn't great for orchestrate fwd mode because it means JVPs
# because then we can't trace these ops without shape data.
def _brcast(x, *others):
# We don't need full numpy broadcasting, but otherwise the logic is the same
shapes = tuple(filter(None, map(np.shape, (x,) + others)))
shape = shapes and broadcast_shapes(*shapes)
if np.shape(x) != shape:
return _brcast_to(x, shape)
else:
return x
def _brcast_to(x, shape):
x_shape = np.shape(x)
assert x_shape != shape
if x_shape:
assert len(x_shape) == len(shape)
broadcast_dimensions, = np.where(np.equal(x_shape, shape))
squeezed_dimensions, = np.where(np.not_equal(x_shape, shape))
squeezed = squeeze(x, squeezed_dimensions)
return broadcast_in_dim(squeezed, shape, broadcast_dimensions)
else:
return broadcast(x, shape)
_float = {np.floating}
_complex = {np.complexfloating}
_complex_elem_types = {np.float32, np.float64}
_int = {np.integer}
_bool = {np.bool_}
_num = _int | _float | _complex
_any = _int | _float | _complex | _bool
_bool_or_int = _int | _bool
neg_p = standard_unop(_num, 'neg')
ad.deflinear(neg_p, lambda t: [neg(t)])
def _sign_translation_rule(c, x):
shape = c.get_shape(x)
dtype = shape.numpy_dtype()
if dtypes.issubdtype(dtype, np.unsignedinteger):
zero = xb.constant(c, np.array(0, dtype=dtype))
dims = c.get_shape(x).dimensions()
return xops.Select(xops.Eq(x, zero), xops.Broadcast(zero, dims),
xops.Broadcast(xb.constant(c, np.array(1, dtype=dtype)),
dims))
return xops.Sign(x)
sign_p = standard_unop(_num, 'sign', translation_rule=_sign_translation_rule)
ad.defjvp_zero(sign_p)
nextafter_p = standard_naryop(
[_float, _float], 'nextafter',
translation_rule=lambda c, x1, x2: xops.NextAfter(x1, x2))
floor_p = standard_unop(_float, 'floor')
ad.defjvp_zero(floor_p)
ceil_p = standard_unop(_float, 'ceil')
ad.defjvp_zero(ceil_p)
round_p = standard_unop(_float, 'round')
ad.defjvp_zero(round_p)
is_finite_p = unop(_fixed_dtype(np.bool_), _float, 'is_finite')
ad.defjvp_zero(is_finite_p)
exp_p = standard_unop(_float | _complex, 'exp')
ad.defjvp2(exp_p, lambda g, ans, x: mul(g, ans))
iad.definverse(exp_p, lambda r, x: log(r))
iad.primitive_ivjps[exp_p] = lambda x, y, ct: [[log(y[0])], [ct[0] * y[0]]]
log_p = standard_unop(_float | _complex, 'log')
ad.defjvp(log_p, lambda g, x: div(g, x))
iad.definverse(log_p, lambda r, x: exp(r))
expm1_p = standard_unop(_float | _complex, 'expm1')
ad.defjvp2(expm1_p, lambda g, ans, x: mul(g, add(ans, _one(ans))))
log1p_p = standard_unop(_float | _complex, 'log1p')
ad.defjvp(log1p_p, lambda g, x: div(g, add(x, _one(x))))
tanh_p = standard_unop(_float | _complex, 'tanh')
ad.defjvp2(tanh_p, lambda g, ans, x: mul(g, sub(_one(x), mul(ans, ans))))
sin_p = standard_unop(_float | _complex, 'sin')
ad.defjvp(sin_p, lambda g, x: mul(g, cos(x)))
cos_p = standard_unop(_float | _complex, 'cos')
ad.defjvp(cos_p, lambda g, x: neg(mul(g, sin(x))))
atan2_p = standard_naryop([_float, _float], 'atan2')
ad.defjvp(atan2_p,
lambda g, x, y: _brcast(g, y) * (y / (square(x) + square(y))),
lambda g, x, y: _brcast(g, x) * -x / (square(x) + square(y)))
sinh_p = standard_unop(_float | _complex, 'sinh')
ad.defjvp(sinh_p, lambda g, x: mul(g, cosh(x)))
cosh_p = standard_unop(_float | _complex, 'cosh')
ad.defjvp(cosh_p, lambda g, x: mul(g, sinh(x)))
asinh_p = standard_unop(_float | _complex, 'asinh')
ad.defjvp(asinh_p, lambda g, x: mul(g, rsqrt(square(x) + _one(x))))
acosh_p = standard_unop(_float | _complex, 'acosh')
ad.defjvp(acosh_p,
lambda g, x: mul(g, rsqrt((x - _one(x)) * (x + _one(x)))))
atanh_p = standard_unop(_float | _complex, 'atanh')
ad.defjvp(atanh_p,
lambda g, x: mul(g, reciprocal((_one(x) - x) * (_one(x) + x))))
regularized_incomplete_beta_p = standard_naryop(
[_float, _float, _float], 'regularized_incomplete_beta',
translation_rule=_broadcast_translate(
partial(standard_translate, 'regularized_incomplete_beta')))
def betainc_gradx(g, a, b, x):
lbeta = lgamma(a) + lgamma(b) - lgamma(a + b)
partial_x = exp((b - 1) * log1p(-x) +
(a - 1) * log(x) - lbeta)
return partial_x * g
def betainc_grad_not_implemented(g, a, b, x):
raise ValueError("Betainc gradient with respect to a and b not supported.")
ad.defjvp(regularized_incomplete_beta_p,
betainc_grad_not_implemented,
betainc_grad_not_implemented,
betainc_gradx)
lgamma_p = standard_unop(_float, 'lgamma')
ad.defjvp(lgamma_p, lambda g, x: mul(g, digamma(x)))
digamma_p = standard_unop(_float, 'digamma')
igamma_p = standard_naryop(
[_float, _float], 'igamma',
translation_rule=_broadcast_translate(partial(standard_translate, 'igamma')))
igamma_grad_a_p = standard_naryop([_float, _float], 'igamma_grad_a',
translation_rule=_broadcast_translate(partial(standard_translate,
'igamma_grad_a')))
def igamma_gradx(g, a, x):
return _brcast(g, a, x) * exp(-x + (a - _ones(a)) * log(x) - lgamma(a))
def igamma_grada(g, a, x):
return _brcast(g, a, x) * igamma_grad_a(a, x)
ad.defjvp(igamma_p, igamma_grada, igamma_gradx)
igammac_p = standard_naryop(
[_float, _float], 'igammac',
translation_rule=_broadcast_translate(partial(standard_translate, 'igammac')))
def igammac_gradx(g, a, x):
return -igamma_gradx(g, a, x)
def igammac_grada(g, a, x):
return -igamma_grada(g, a, x)
ad.defjvp(igammac_p, igammac_grada, igammac_gradx)
random_gamma_grad_p = standard_naryop([_float, _float], 'random_gamma_grad',
translation_rule=_broadcast_translate(partial(standard_translate,
'random_gamma_grad')))
bessel_i0e_p = standard_unop(_float, 'bessel_i0e')
ad.defjvp2(bessel_i0e_p, lambda g, y, x: g * (bessel_i1e(x) - sign(x) * y))
bessel_i1e_p = standard_unop(_float, 'bessel_i1e')
def _bessel_i1e_jvp(g, y, x):
eps = dtypes.finfo(_dtype(x)).eps
x_is_not_tiny = abs(x) > eps
safe_x = select(x_is_not_tiny, x, full_like(x, eps))
dy_dx = bessel_i0e(safe_x) - y * (sign(safe_x) + reciprocal(safe_x))
dy_dx = select(x_is_not_tiny, dy_dx, full_like(x, 0.5))
return g * dy_dx
ad.defjvp2(bessel_i1e_p, _bessel_i1e_jvp)
erf_p = standard_unop(_float, 'erf')
ad.defjvp(erf_p, lambda g, x: mul(_const(x, 2. / np.sqrt(np.pi)),
mul(g, exp(neg(square(x))))))
erfc_p = standard_unop(_float, 'erfc')
ad.defjvp(erfc_p, lambda g, x: mul(_const(x, 2. / np.sqrt(np.pi)),
mul(neg(g), exp(neg(square(x))))))
erf_inv_p = standard_unop(_float, 'erf_inv')
ad.defjvp2(erf_inv_p, lambda g, ans, x: mul(_const(x, np.sqrt(np.pi) / 2.),
mul(g, exp(square(ans)))))
real_p = unop(_complex_basetype, _complex, 'real')
ad.deflinear(real_p, lambda t: [complex(t, np.zeros((), _dtype(t)))])
imag_p = unop(_complex_basetype, _complex, 'imag')
ad.defjvp(imag_p, lambda g, _: real(mul(_const(g, -1j), g)))
_complex_dtype = lambda dtype, *args: (np.zeros((), dtype) + np.zeros((), np.complex64)).dtype
complex_p = naryop(_complex_dtype, [_complex_elem_types, _complex_elem_types],
'complex')
ad.deflinear(complex_p, lambda t: [real(t), imag(neg(t))])
conj_p = unop(_complex_dtype, _complex_elem_types | _complex, 'conj')
def _conj_transpose_rule(t, x, *, input_dtype):
assert ad.is_undefined_primal(x)
if dtypes.issubdtype(input_dtype, np.complexfloating):
return [conj(t)]
else:
return [real(t)]
xla.translations[conj_p] = lambda c, x, **kwargs: xops.Conj(x)
ad.primitive_jvps[conj_p] = partial(ad.linear_jvp, conj_p)
ad.primitive_transposes[conj_p] = _conj_transpose_rule
abs_p = unop(_complex_basetype, _num, 'abs')
def _abs_jvp_rule(g, ans, x):
if _iscomplex(x):
return _maybe_real(mul(g, div(_maybe_conj(x),
_replace_zero(convert_element_type(ans, _dtype(x))))))
else:
return select(ge(x, _zero(x)), g, neg(g))
ad.defjvp2(abs_p, _abs_jvp_rule)
_maybe_conj = lambda x: conj(x) if _iscomplex(x) else x
_maybe_real = lambda x: real(x) if _iscomplex(x) else x
sqrt_p = standard_unop(_float | _complex, 'sqrt')
ad.defjvp2(sqrt_p, lambda g, ans, x: mul(g, div(_const(x, 0.5), ans)))
rsqrt_p = standard_unop(_float | _complex, 'rsqrt')
ad.defjvp2(rsqrt_p,
lambda g, ans, x:
mul(g, mul(_const(x, -0.5), pow(x, _const(x, -1.5)))))
pow_p = standard_naryop([_float | _complex, _float | _complex], 'pow')
def _pow_jvp_lhs(g, ans, x, y):
jac = mul(y, pow(x, select(eq(y, _zeros(y)), _ones(y), sub(y, _ones(y)))))
return mul(_brcast(g, y), jac)
def _pow_jvp_rhs(g, ans, x, y):
return mul(_brcast(g, x), mul(log(_replace_zero(x)), ans))
ad.defjvp2(pow_p, _pow_jvp_lhs, _pow_jvp_rhs)
def _integer_pow_dtype_rule(x, *, y):
dtype = unop_dtype_rule(_identity, _int | _float | _complex, 'integer_pow', x)
if y < 0 and dtypes.issubdtype(dtype, np.integer):
raise TypeError("Integers cannot be raised to negative powers, got "
f"integer_pow({x}, {y})")
return dtype
def _integer_pow_translation_rule(c, x, *, y):
if y == 0:
shape = c.get_shape(x)
return xb.constant(c, np.array(1, dtype=shape.numpy_dtype()))
is_reciprocal = y < 0
if is_reciprocal:
y = -y
acc = None
while y > 0:
if y & 1:
acc = x if acc is None else xops.Mul(acc, x)
y >>= 1
if y > 0:
x = xops.Mul(x, x)
return xops.Reciprocal(acc) if is_reciprocal else acc
def _integer_pow_jvp(g, x, *, y):
return g if y == 0 else mul(g, mul(_const(x, y), integer_pow(x, y - 1)))
integer_pow_p = standard_primitive(
_attrgetter('shape'), _integer_pow_dtype_rule, 'integer_pow',
translation_rule=_integer_pow_translation_rule)
batching.defvectorized(integer_pow_p)
masking.defvectorized(integer_pow_p)
ad.defjvp(integer_pow_p, _integer_pow_jvp)
_replace_zero = lambda x: select(eq(x, _const(x, 0)), _ones(x), x)
not_p = standard_unop(_bool_or_int, 'not')
ad.defjvp_zero(not_p)
and_p = standard_naryop([_bool_or_int, _bool_or_int], 'and')
ad.defjvp_zero(and_p)
or_p = standard_naryop([_bool_or_int, _bool_or_int], 'or')
ad.defjvp_zero(or_p)
xor_p = standard_naryop([_bool_or_int, _bool_or_int], 'xor')
ad.defjvp_zero(xor_p)
population_count_p = standard_unop(_int, 'population_count')
def _add_transpose(t, x, y):
# assert ad.is_undefined_primal(x) and ad.is_undefined_primal(y)
return [t, t]
add_p = standard_naryop([_num, _num], 'add')
ad.defjvp(add_p, lambda g, x, y: _brcast(g, y), lambda g, x, y: _brcast(g, x))
ad.primitive_transposes[add_p] = _add_transpose
def _add_inverse(r, x, y):
xr = r - y
yr = r - x
return xr, yr
iad.definverse(add_p, _add_inverse)
def _sub_transpose(t, x, y):
# The following linearity assertion is morally true, but because in some cases
# we instantiate zeros for convenience, it doesn't always hold.
return [t, neg(t) if type(t) is not ad_util.Zero else ad_util.Zero]
sub_p = standard_naryop([_num, _num], 'sub')
ad.defjvp(sub_p,
lambda g, x, y: _brcast(g, y),
lambda g, x, y: _brcast(neg(g), x))
ad.primitive_transposes[sub_p] = _sub_transpose
mul_p = standard_naryop([_num, _num], 'mul')
ad.defbilinear_broadcasting(_brcast, mul_p, mul, mul)
def _mul_inverse(r, x, y):
xr = r / y
yr = r / x
return xr, yr
iad.definverse(mul_p, _mul_inverse)
def _div_transpose_rule(cotangent, x, y):
assert ad.is_undefined_primal(x) and not ad.is_undefined_primal(y)
res = ad_util.Zero if type(cotangent) is ad_util.Zero else div(cotangent, y)
return res, None
div_p = standard_naryop([_num, _num], 'div')
ad.defjvp(div_p,
lambda g, x, y: div(_brcast(g, y), y),
lambda g, x, y: mul(mul(neg(_brcast(g, x)), x), integer_pow(y, -2)))
ad.primitive_transposes[div_p] = _div_transpose_rule
rem_p = standard_naryop([_num, _num], 'rem')
ad.defjvp(rem_p,
lambda g, x, y: _brcast(g, y),
lambda g, x, y: mul(_brcast(neg(g), x), floor(div(x, y))))
def _broadcasting_select(c, which, x, y):
which_shape, x_shape, y_shape = (
c.get_shape(t).dimensions() for t in (which, x, y))
out_shape = broadcast_shapes(which_shape, x_shape, y_shape)
bcast_dims = lambda shape: tuple(range(len(out_shape) - len(shape),
len(out_shape)))
which = xops.BroadcastInDim(which, out_shape, bcast_dims(which_shape))
x = xops.BroadcastInDim(x, out_shape, bcast_dims(x_shape))
y = xops.BroadcastInDim(y, out_shape, bcast_dims(y_shape))
return xops.Select(which, x, y)
def _minmax_translation_rule(c, x, y, *, minmax=None, cmp=None):
dtype = c.get_shape(x).numpy_dtype()
if dtypes.issubdtype(dtype, np.complexfloating):
rx = xops.Real(x)
ry = xops.Real(y)
return _broadcasting_select(
c, xops.Select(xops.Eq(rx, ry), cmp(xops.Imag(x), xops.Imag(y)),
cmp(rx, ry)),
x, y)
return minmax(x, y)
max_p: core.Primitive = standard_naryop(
[_any, _any], 'max', translation_rule=partial(
_minmax_translation_rule, minmax=xops.Max, cmp=xops.Gt))
ad.defjvp2(max_p,
lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)),
lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x)))
min_p: core.Primitive = standard_naryop(
[_any, _any], 'min', translation_rule=partial(
_minmax_translation_rule, minmax=xops.Min, cmp=xops.Lt))
ad.defjvp2(min_p,
lambda g, ans, x, y: mul(_brcast(g, y), _balanced_eq(x, ans, y)),
lambda g, ans, x, y: mul(_brcast(g, x), _balanced_eq(y, ans, x)))
shift_left_p = standard_naryop([_int, _int], 'shift_left')
ad.defjvp_zero(shift_left_p)
shift_right_arithmetic_p = standard_naryop([_int, _int], 'shift_right_arithmetic')
ad.defjvp_zero(shift_right_arithmetic_p)
shift_right_logical_p = standard_naryop([_int, _int], 'shift_right_logical')
ad.defjvp_zero(shift_right_logical_p)
eq_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'eq')
ad.defjvp_zero(eq_p)
ne_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'ne')
ad.defjvp_zero(ne_p)
ge_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'ge')
ad.defjvp_zero(ge_p)
gt_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'gt')
ad.defjvp_zero(gt_p)
le_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'le')
ad.defjvp_zero(le_p)
lt_p = naryop(_fixed_dtype(np.bool_), [_any, _any], 'lt')
ad.defjvp_zero(lt_p)
def _convert_element_type_shape_rule(operand, *, new_dtype, old_dtype):
return operand.shape
def _convert_element_type_dtype_rule(operand, *, new_dtype, old_dtype):
return new_dtype
def _convert_element_type_translation_rule(c, operand, *, new_dtype, old_dtype):
if (dtypes.issubdtype(old_dtype, np.complexfloating) and
not dtypes.issubdtype(new_dtype, np.complexfloating)):
operand = xops.Real(operand)
new_etype = xla_client.dtype_to_etype(new_dtype)
return xops.ConvertElementType(operand, new_element_type=new_etype)
def _convert_element_type_transpose_rule(ct, operand, *, new_dtype, old_dtype):
if type(ct) is ad_util.Zero:
return [ad_util.Zero(operand.aval)]
elif core.primal_dtype_to_tangent_dtype(old_dtype) is dtypes.float0:
return [ad_util.Zero(ShapedArray(operand.aval.shape, dtype=dtypes.float0))]
else:
return [convert_element_type_p.bind(ct, new_dtype=old_dtype,
old_dtype=new_dtype)]
def _convert_element_type_jvp_rule(tangent, operand , *, new_dtype, old_dtype):
if core.primal_dtype_to_tangent_dtype(new_dtype) is dtypes.float0:
return ad_util.Zero(ShapedArray(tangent.shape, dtype=dtypes.float0))
else:
return convert_element_type_p.bind(tangent, new_dtype=new_dtype,
old_dtype=old_dtype)
convert_element_type_p = standard_primitive(
_convert_element_type_shape_rule, _convert_element_type_dtype_rule,
'convert_element_type', _convert_element_type_translation_rule)
ad.defjvp(convert_element_type_p, _convert_element_type_jvp_rule)
ad.primitive_transposes[convert_element_type_p] = _convert_element_type_transpose_rule
batching.defvectorized(convert_element_type_p)
masking.defvectorized(convert_element_type_p)
def _bitcast_convert_type_shape_rule(operand, *, new_dtype):
return operand.shape
def _bitcast_convert_type_dtype_rule(operand, *, new_dtype):
return new_dtype
def _bitcast_convert_type_translation_rule(c, operand, *, new_dtype):
new_etype = xla_bridge.dtype_to_etype(new_dtype)
return xops.BitcastConvertType(operand, new_element_type=new_etype)
bitcast_convert_type_p = standard_primitive(
_bitcast_convert_type_shape_rule, _bitcast_convert_type_dtype_rule,
'bitcast_convert_type', _bitcast_convert_type_translation_rule)
ad.defjvp_zero(bitcast_convert_type_p)
batching.defvectorized(bitcast_convert_type_p)
masking.defvectorized(bitcast_convert_type_p)
def _conv_general_dilated_shape_rule(
lhs: ShapedArray, rhs: ShapedArray, *, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count,
batch_group_count, **unused_kwargs) -> Tuple[int, ...]:
assert type(dimension_numbers) is ConvDimensionNumbers
if len(lhs.shape) != len(rhs.shape):
msg = ("conv_general_dilated lhs and rhs must have the same number of "
"dimensions, but got {} and {}.")
raise ValueError(msg.format(lhs.shape, rhs.shape))
if not feature_group_count > 0:
msg = ("conv_general_dilated feature_group_count "
"must be a positive integer, got {}.")
raise ValueError(msg.format(feature_group_count))
lhs_feature_count = lhs.shape[dimension_numbers.lhs_spec[1]]
quot, rem = divmod(lhs_feature_count, feature_group_count)
if rem:
msg = ("conv_general_dilated feature_group_count must divide lhs feature "
"dimension size, but {} does not divide {}.")
raise ValueError(msg.format(feature_group_count, lhs_feature_count))
if quot != rhs.shape[dimension_numbers.rhs_spec[1]]:
msg = ("conv_general_dilated lhs feature dimension size divided by "
"feature_group_count must equal the rhs input feature dimension "
"size, but {} // {} != {}.")
raise ValueError(msg.format(lhs_feature_count, feature_group_count,
rhs.shape[dimension_numbers.rhs_spec[1]]))
if rhs.shape[dimension_numbers.rhs_spec[0]] % feature_group_count:
msg = ("conv_general_dilated rhs output feature dimension size must be a "
"multiple of feature_group_count, but {} is not a multiple of {}.")
raise ValueError(msg.format(rhs.shape[dimension_numbers.rhs_spec[0]],
feature_group_count))
if not batch_group_count > 0:
msg = ("conv_general_dilated batch_group_count "
"must be a positive integer, got {}.")
raise ValueError(msg.format(batch_group_count))
lhs_batch_count = lhs.shape[dimension_numbers.lhs_spec[0]]
if lhs_batch_count % batch_group_count != 0:
msg = ("conv_general_dilated batch_group_count must divide lhs batch "
"dimension size, but {} does not divide {}.")
raise ValueError(msg.format(batch_group_count, lhs_batch_count))
if rhs.shape[dimension_numbers.rhs_spec[0]] % batch_group_count:
msg = ("conv_general_dilated rhs output feature dimension size must be a "
"multiple of batch_group_count, but {} is not a multiple of {}.")
raise ValueError(msg.format(rhs.shape[dimension_numbers.rhs_spec[0]],
batch_group_count))
if batch_group_count > 1 and feature_group_count > 1:
msg = ("At most one of batch_group_count and feature_group_count may be > "
"1, got batch_group_count={} and feature_group_count={}")
raise ValueError(msg.format(batch_group_count, feature_group_count))
lhs_perm, rhs_perm, out_perm = dimension_numbers
lhs_trans = _dilate_shape(np.take(lhs.shape, lhs_perm), lhs_dilation)
rhs_trans = _dilate_shape(np.take(rhs.shape, rhs_perm), rhs_dilation)
out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding,
batch_group_count)
return tuple(np.take(out_trans, np.argsort(out_perm)))
def _conv_general_dilated_dtype_rule(
lhs, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, **unused_kwargs):
return naryop_dtype_rule(_input_dtype, [_float | _complex, _float | _complex],
'conv_general_dilated', lhs, rhs)
_conv_spec_transpose = lambda spec: (spec[1], spec[0]) + spec[2:]
_conv_sdims = lambda spec: spec[2:]
def _conv_general_dilated_transpose_lhs(
g, rhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
lhs_shape, rhs_shape, precision):
assert type(dimension_numbers) is ConvDimensionNumbers
assert batch_group_count == 1 or feature_group_count == 1
lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)
lhs_spec, rhs_spec, out_spec = dimension_numbers
t_rhs_spec = _conv_spec_transpose(rhs_spec)
if feature_group_count > 1:
rhs = _reshape_axis_out_of(rhs_spec[0], feature_group_count, rhs)
rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[1], rhs)
elif batch_group_count > 1:
rhs = _reshape_axis_out_of(rhs_spec[0], batch_group_count, rhs)
rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[1], rhs)
feature_group_count = batch_group_count
trans_dimension_numbers = ConvDimensionNumbers(out_spec, t_rhs_spec, lhs_spec)
padding = _conv_general_vjp_lhs_padding(
np.take(lhs_shape, lhs_sdims), np.take(rhs_shape, rhs_sdims),
window_strides, np.take(g.shape, out_sdims), padding, lhs_dilation,
rhs_dilation)
revd_weights = rev(rhs, rhs_sdims)
out = conv_general_dilated(
g, revd_weights, window_strides=lhs_dilation, padding=padding,
lhs_dilation=window_strides, rhs_dilation=rhs_dilation,
dimension_numbers=trans_dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=1, precision=precision)
if batch_group_count > 1:
out = _reshape_axis_out_of(lhs_spec[1], batch_group_count, out)
out = _reshape_axis_into(lhs_spec[1], lhs_spec[0], out)
return out
def _conv_general_dilated_transpose_rhs(
g, lhs, *, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers: ConvDimensionNumbers, feature_group_count: int,
batch_group_count: int, lhs_shape, rhs_shape, precision):
assert type(dimension_numbers) is ConvDimensionNumbers
if np.size(g) == 0:
# Avoids forming degenerate convolutions where the RHS has spatial size 0.
return ad_util.Zero
lhs_sdims, rhs_sdims, out_sdims = map(_conv_sdims, dimension_numbers)
lhs_trans, rhs_trans, out_trans = map(_conv_spec_transpose, dimension_numbers)
assert batch_group_count == 1 or feature_group_count == 1
if batch_group_count > 1:
feature_group_count = batch_group_count
batch_group_count = 1
elif feature_group_count > 1:
batch_group_count = feature_group_count
feature_group_count = 1
trans_dimension_numbers = ConvDimensionNumbers(lhs_trans, out_trans, rhs_trans)
padding = _conv_general_vjp_rhs_padding(
np.take(lhs_shape, lhs_sdims), np.take(rhs_shape, rhs_sdims),
window_strides, np.take(g.shape, out_sdims), padding, lhs_dilation,
rhs_dilation)
return conv_general_dilated(
lhs, g, window_strides=rhs_dilation, padding=padding,
lhs_dilation=lhs_dilation, rhs_dilation=window_strides,
dimension_numbers=trans_dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count, precision=precision)
def _conv_general_dilated_translation_rule(
c, lhs, rhs, *, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count,
batch_group_count, precision, expand_complex_convolutions, **unused_kwargs):
assert type(dimension_numbers) is ConvDimensionNumbers
dimension_numbers = _conv_general_proto(dimension_numbers)
precision_config = _precision_config(precision)
dtype = c.get_shape(lhs).numpy_dtype()
conv = lambda x, y: xops.ConvGeneralDilated(
x, y, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
precision_config=precision_config)
if expand_complex_convolutions and np.issubdtype(dtype, np.complexfloating):
# We use a trick for complex multiplication due to Gauss which uses three
# multiplications and five additions; instead of the naive method of four
# multiplications and two additions.
# https://en.wikipedia.org/wiki/Multiplication_algorithm#Complex_multiplication_algorithm
#
# This performance win comes with a trade-off in accuracy; especially in
# cases when the real and imaginary differ hugely in magnitude. The relative
# error bound (e.g. 1p-24 in case of float32) would be relative to the
# maximum of real and imaginary parts of the result instead of being
# satisfied by the real and imaginary parts independently of each other.
lhs_real, lhs_imag = xops.Real(lhs), xops.Imag(lhs)
rhs_real, rhs_imag = xops.Real(rhs), xops.Imag(rhs)
k1 = conv(xops.Add(lhs_real, lhs_imag), rhs_real)
k2 = conv(lhs_real, xops.Sub(rhs_imag, rhs_real))
k3 = conv(lhs_imag, xops.Add(rhs_real, rhs_imag))
return xops.Complex(xops.Sub(k1, k3), xops.Add(k1, k2))
return conv(lhs, rhs)
def _conv_general_dilated_batch_rule(
batched_args, batch_dims, *, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count, precision, **unused_kwargs):
assert batch_group_count == 1 or feature_group_count == 1
lhs, rhs = batched_args
lhs_bdim, rhs_bdim = batch_dims
lhs_spec, rhs_spec, out_spec = dimension_numbers
if lhs_bdim is not None and rhs_bdim is not None:
assert lhs.shape[lhs_bdim] == rhs.shape[rhs_bdim]
if batch_group_count > 1:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[0], lhs)
batch_group_count *= lhs.shape[lhs_bdim]
else:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[1], lhs)
feature_group_count *= lhs.shape[lhs_bdim]
new_rhs = _reshape_axis_into(rhs_bdim, rhs_spec[0], rhs)
out = conv_general_dilated(
new_lhs, new_rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], lhs.shape[lhs_bdim], out)
return out, out_spec[1]
elif lhs_bdim is not None:
if batch_group_count == 1:
new_lhs = _reshape_axis_into(lhs_bdim, lhs_spec[0], lhs)
out = conv_general_dilated(new_lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, precision=precision)
out = _reshape_axis_out_of(out_spec[0], lhs.shape[lhs_bdim], out)
return out, out_spec[0]
else:
new_lhs = _reshape_axis_out_of(lhs_spec[0] + int(lhs_bdim <= lhs_spec[0]),
batch_group_count, lhs)
new_lhs = _reshape_axis_into(lhs_bdim + int(lhs_spec[0] < lhs_bdim),
lhs_spec[0] + 1,
new_lhs)
new_lhs = _reshape_axis_into(lhs_spec[0], lhs_spec[0], new_lhs)
out = conv_general_dilated(new_lhs, rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[0], lhs.shape[lhs_bdim], out)
return out, out_spec[0]
elif rhs_bdim is not None:
if feature_group_count == 1 and batch_group_count == 1:
new_rhs = _reshape_axis_into(rhs_bdim, rhs_spec[0], rhs)
out = conv_general_dilated(lhs, new_rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], rhs.shape[rhs_bdim], out)
return out, out_spec[1]
else:
# groups need to be outermost, so we need to factor them out of the
# rhs output feature dim, then factor the batch dim into the remaining rhs
# output feature dim, then put groups back in. We do something
# similar on the output. An alternative which would require more FLOPs but
# fewer reshapes would be to broadcast lhs.
group_count = (feature_group_count if feature_group_count > 1
else batch_group_count)
new_rhs = _reshape_axis_out_of(rhs_spec[0] + int(rhs_bdim <= rhs_spec[0]),
group_count, rhs)
new_rhs = _reshape_axis_into(rhs_bdim + int(rhs_spec[0] < rhs_bdim),
rhs_spec[0] + 1,
new_rhs)
new_rhs = _reshape_axis_into(rhs_spec[0], rhs_spec[0], new_rhs)
out = conv_general_dilated(lhs, new_rhs, window_strides, padding,
lhs_dilation, rhs_dilation, dimension_numbers,
feature_group_count, batch_group_count,
precision=precision)
out = _reshape_axis_out_of(out_spec[1], group_count, out)
out = _reshape_axis_out_of(out_spec[1] + 1, rhs.shape[rhs_bdim], out)
out = _reshape_axis_into(out_spec[1], out_spec[1] + 1, out)
return out, out_spec[1]
def _masked(padded_value, logical_shape, dimensions, value=0):
if len(dimensions) == 0:
return padded_value
masks = [broadcasted_iota(np.int32, padded_value.shape, d) < logical_shape[d]
for d in dimensions]
mask_intersection = masks[0]
for mask in masks[1:]:
mask_intersection &= mask
return select(mask_intersection, padded_value, full_like(padded_value, value))
def _conv_general_dilated_masking_rule(
padded_vals, logical_shapes, window_strides, padding, lhs_dilation,
rhs_dilation, dimension_numbers, feature_group_count, batch_group_count,
lhs_shape, rhs_shape, precision):
lhs, rhs = padded_vals
logical_lhs_shape, logical_rhs_shape = logical_shapes
o, i, *window_dimensions = dimension_numbers.rhs_spec
assert (np.all(np.take(rhs.shape, window_dimensions)
== np.take(logical_rhs_shape, window_dimensions))), \
"Conv filter masking not yet implemented."
n, c, *padded_dimensions = dimension_numbers.lhs_spec
return conv_general_dilated(
_masked(lhs, logical_lhs_shape, padded_dimensions),
_masked(rhs, logical_rhs_shape, (i,)),
window_strides=window_strides, padding=padding,
lhs_dilation=lhs_dilation, rhs_dilation=rhs_dilation,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count,
precision=precision)
conv_general_dilated_p = standard_primitive(
_conv_general_dilated_shape_rule, _conv_general_dilated_dtype_rule,
'conv_general_dilated', partial(_conv_general_dilated_translation_rule,
expand_complex_convolutions=False))
# TODO(b/161124619, b/161126248): XLA does not support complex convolution on
# CPU or GPU; on these backends, lower complex convolutions away.
xla.backend_specific_translations['cpu'][conv_general_dilated_p] = partial(
_conv_general_dilated_translation_rule, expand_complex_convolutions=True)
xla.backend_specific_translations['gpu'][conv_general_dilated_p] = partial(
_conv_general_dilated_translation_rule, expand_complex_convolutions=True)
ad.defbilinear(conv_general_dilated_p,
_conv_general_dilated_transpose_lhs,
_conv_general_dilated_transpose_rhs)
batching.primitive_batchers[conv_general_dilated_p] = \
_conv_general_dilated_batch_rule
masking.masking_rules[conv_general_dilated_p] = \
_conv_general_dilated_masking_rule
def _reshape_axis_into(src, dst, x):
perm = [i for i in range(x.ndim) if i != src]
perm.insert(dst, src)
new_shape = list(np.delete(x.shape, src))
new_shape[dst] *= x.shape[src]
return reshape(x, new_shape, perm)
def _reshape_axis_out_of(src, size1, x):
shape = list(x.shape)
size2, ragged = divmod(shape[src], size1)
assert not ragged
shape[src:src+1] = [size1, size2]
return reshape(x, shape)
def _precision_config(precision):
if precision is not None:
config = xla_client.PrecisionConfig()
if isinstance(precision, tuple):
config.operand_precision.extend(precision)
else:
config.operand_precision.extend((precision, precision))
return config
return None
def _dot_general_shape_rule(lhs, rhs, *, dimension_numbers, precision):
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
if not all(np.all(np.greater_equal(d, 0)) and np.all(np.less(d, lhs.ndim))
for d in (lhs_contracting, lhs_batch)):
msg = ("dot_general requires lhs dimension numbers to be nonnegative and "
"less than the number of axes of the lhs value, got "
f"lhs_batch of {lhs_batch} and lhs_contracting of {lhs_contracting} "
f"for lhs of rank {lhs.ndim}")
raise TypeError(msg)
if not all(np.all(np.greater_equal(d, 0)) and np.all(np.less(d, rhs.ndim))
for d in (rhs_contracting, rhs_batch)):
msg = ("dot_general requires rhs dimension numbers to be nonnegative and "
"less than the number of axes of the rhs value, got "
f"rhs_batch of {rhs_batch} and rhs_contracting of {rhs_contracting} "
f"for rhs of rank {rhs.ndim}")
raise TypeError(msg)
if len(lhs_batch) != len(rhs_batch):
msg = ("dot_general requires equal numbers of lhs_batch and rhs_batch "
"dimensions, got lhs_batch {} and rhs_batch {}.")
raise TypeError(msg.format(lhs_batch, rhs_batch))
lhs_contracting_set, lhs_batch_set = set(lhs_contracting), set(lhs_batch)
rhs_contracting_set, rhs_batch_set = set(rhs_contracting), set(rhs_batch)
if len(lhs_batch_set) != len(lhs_batch):
msg = ("dot_general requires lhs batch dimensions to be distinct, got "
f"lhs_batch {lhs_batch}.")
raise TypeError(msg)
if len(rhs_batch_set) != len(rhs_batch):
msg = ("dot_general requires rhs batch dimensions to be distinct, got "
f"rhs_batch {rhs_batch}.")
raise TypeError(msg)
if len(lhs_contracting_set) != len(lhs_contracting):
msg = ("dot_general requires lhs contracting dimensions to be distinct, "
f"got lhs_contracting {lhs_contracting}.")
raise TypeError(msg)
if len(rhs_contracting_set) != len(rhs_contracting):
msg = ("dot_general requires rhs contracting dimensions to be distinct, "
f"got rhs_contracting {rhs_contracting}.")
raise TypeError(msg)
if lhs_contracting_set & lhs_batch_set:
msg = ("dot_general requires lhs batch dimensions to be disjoint from "
"contracting dimensions, got lhs_batch {} and lhs_contracting {}.")
raise TypeError(msg.format(lhs_batch, lhs_contracting))
if rhs_contracting_set & rhs_batch_set:
msg = ("dot_general requires rhs batch dimensions to be disjoint from "
"contracting dimensions, got rhs_batch {} and rhs_contracting {}.")
raise TypeError(msg.format(rhs_batch, rhs_contracting))
lhs_batch_shape = np.take(lhs.shape, lhs_batch)
rhs_batch_shape = np.take(rhs.shape, rhs_batch)
if not np.all(np.equal(lhs_batch_shape, rhs_batch_shape)):
msg = ("dot_general requires lhs batch dimensions and rhs batch dimensions "
"to have the same shape, got {} and {}.")
raise TypeError(msg.format(lhs_batch_shape, rhs_batch_shape))
lhs_contracting_shape = np.take(lhs.shape, lhs_contracting)
rhs_contracting_shape = np.take(rhs.shape, rhs_contracting)
if not np.all(np.equal(lhs_contracting_shape, rhs_contracting_shape)):
msg = ("dot_general requires contracting dimensions to have the same "
"shape, got {} and {}.")
raise TypeError(msg.format(lhs_contracting_shape, rhs_contracting_shape))
batch_shape = tuple(lhs_batch_shape)
lhs_contract_or_batch = tuple(sorted(tuple(lhs_contracting) + tuple(lhs_batch)))
lhs_tensored_shape = tuple(np.delete(lhs.shape, lhs_contract_or_batch))
rhs_contract_or_batch = tuple(sorted(tuple(rhs_contracting) + tuple(rhs_batch)))
rhs_tensored_shape = tuple(np.delete(rhs.shape, rhs_contract_or_batch))
return batch_shape + lhs_tensored_shape + rhs_tensored_shape
def _dot_general_dtype_rule(lhs, rhs, *, dimension_numbers, precision):
return naryop_dtype_rule(_input_dtype, [_any, _any], 'dot_general', lhs, rhs)
def _dot_general_transpose_lhs(g, y, *, dimension_numbers, precision,
swap_ans=False):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
x_ndim = g.ndim - y.ndim + len(x_batch) + 2 * len(x_contract)
x_kept = remaining(range(x_ndim), x_contract, x_batch)
y_kept = remaining(range(y.ndim), y_contract, y_batch)
if swap_ans:
ans_batch, ans_y, _ = ranges_like(x_batch, y_kept, x_kept)
else:
ans_batch, _, ans_y = ranges_like(x_batch, x_kept, y_kept)
dims = ((ans_y, y_kept), (ans_batch, y_batch))
x_contract_sorted_by_y = list(np.take(x_contract, np.argsort(y_contract)))
out_axes = np.argsort(list(x_batch) + x_kept + x_contract_sorted_by_y)
return transpose(dot_general(g, y, dims, precision=precision),
tuple(out_axes))
def _dot_general_transpose_rhs(g, x, *, dimension_numbers, precision):
(x_contract, y_contract), (x_batch, y_batch) = dimension_numbers
swapped_dimension_numbers = ((y_contract, x_contract), (y_batch, x_batch))
return _dot_general_transpose_lhs(
g, x, dimension_numbers=swapped_dimension_numbers, precision=precision,
swap_ans=True)
def _dot_general_batch_rule(batched_args, batch_dims, *, dimension_numbers,
precision):
# there are three kinds of dimensions in a dot_general:
# - contraction dimensions appear in lhs and rhs but not the result
# - batch dimensions appear in lhs, rhs, and result
# - tensor product dimensions appear in the result and one of lhs or rhs
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
lhs, rhs = batched_args
lbd, rbd = batch_dims
assert lbd is not None or rbd is not None
def bump_dims(dims, b):
return tuple(np.add(dims, np.greater_equal(dims, b)))
if lbd is not None and rbd is not None:
# adding a batch dimension
lhs_batch = (lbd,) + bump_dims(lhs_batch, lbd)
rhs_batch = (rbd,) + bump_dims(rhs_batch, rbd)
lhs_contract = bump_dims(lhs_contract, lbd)
rhs_contract = bump_dims(rhs_contract, rbd)
result_batch_dim = 0
else:
# adding a tensor product dimension
if lbd is not None:
other = tuple(d for d in range(lhs.ndim)
if d not in lhs_batch and d not in lhs_contract)
result_batch_dim = (len(lhs_batch) + sum(np.less(other, lbd)))
lhs_batch = bump_dims(lhs_batch, lbd)
lhs_contract = bump_dims(lhs_contract, lbd)
else:
other = tuple(d for d in range(rhs.ndim)
if d not in rhs_batch and d not in rhs_contract)
result_batch_dim = (lhs.ndim - len(lhs_contract) +
sum(np.less(other, rbd)))
rhs_batch = bump_dims(rhs_batch, rbd)
rhs_contract = bump_dims(rhs_contract, rbd)
new_dimension_numbers = ((lhs_contract, rhs_contract), (lhs_batch, rhs_batch))
batched_out = dot_general(lhs, rhs, new_dimension_numbers,
precision=precision)
return batched_out, int(result_batch_dim)
def _dot_using_sum_of_products(lhs, rhs, *, dimension_numbers):
contract_dims, batch_dims = dimension_numbers
lhs_contract_dims, rhs_contract_dims = contract_dims
lhs_batch_dims, rhs_batch_dims = batch_dims
lhs_noncontract_dims = tuple(sorted(
set(range(np.ndim(lhs))) - set(lhs_batch_dims) - set(lhs_contract_dims)))
rhs_noncontract_dims = tuple(sorted(
set(range(np.ndim(rhs))) - set(rhs_batch_dims) - set(rhs_contract_dims)))
lhs = transpose(lhs,
lhs_batch_dims + lhs_noncontract_dims + lhs_contract_dims)
rhs = transpose(rhs,
rhs_batch_dims + rhs_noncontract_dims + rhs_contract_dims)
lhs_start_expand = len(lhs_batch_dims) + len(lhs_noncontract_dims)
lhs_end_expand = lhs_start_expand + len(rhs_noncontract_dims)
lhs = expand_dims(lhs, tuple(range(lhs_start_expand, lhs_end_expand)))
rhs_start_expand = len(lhs_batch_dims)
rhs_end_expand = rhs_start_expand + len(lhs_noncontract_dims)
rhs = expand_dims(rhs, tuple(range(rhs_start_expand, rhs_end_expand)))
out_ndim = (len(lhs_batch_dims) + len(lhs_noncontract_dims) +
len(rhs_noncontract_dims))
op_product = bitwise_and if lhs.dtype == np.bool_ else mul
op_sum = bitwise_or if lhs.dtype == np.bool_ else add
return reduce(op_product(lhs, rhs), _zero(lhs), op_sum,
tuple(range(out_ndim, out_ndim + len(lhs_contract_dims))))
def _dot_general_translation_rule(c, lhs, rhs, *, dimension_numbers, precision):
dtype = c.get_shape(lhs).numpy_dtype()
if dtypes.issubdtype(dtype, np.inexact):
return xops.DotGeneral(lhs, rhs,
xc.make_dot_dimension_numbers(dimension_numbers),
precision_config=_precision_config(precision))
else:
# TODO(b/134526360): XLA doesn't support bool or integer dots, so we emit a
translation = xla.lower_fun(_dot_using_sum_of_products,
multiple_results=False)
return translation(c, lhs, rhs, dimension_numbers=dimension_numbers)
def _dot_general_masking_rule(padded_vals, logical_shapes, *, dimension_numbers,
precision):
lhs, rhs = padded_vals
lhs_shape, _ = logical_shapes
(lhs_contract, _), _ = dimension_numbers
return dot_general(_masked(lhs, lhs_shape, lhs_contract),
rhs, dimension_numbers, precision=precision)
dot_general_p = standard_primitive(_dot_general_shape_rule,
_dot_general_dtype_rule, 'dot_general',
_dot_general_translation_rule)
ad.defbilinear(dot_general_p,
_dot_general_transpose_lhs, _dot_general_transpose_rhs)
batching.primitive_batchers[dot_general_p] = _dot_general_batch_rule
masking.masking_rules[dot_general_p] = _dot_general_masking_rule
def _broadcast_shape_rule(operand, sizes):
_check_shapelike('broadcast', 'sizes', sizes)
return tuple(sizes) + operand.shape
def _broadcast_batch_rule(batched_args, batch_dims, *, sizes):
operand, = batched_args
bdim, = batch_dims
new_bdim = None if bdim is None else bdim + len(sizes)
return broadcast(operand, sizes), new_bdim
broadcast_p = standard_primitive(
_broadcast_shape_rule, _input_dtype, 'broadcast')
ad.deflinear(broadcast_p, lambda t, sizes: [_reduce_sum(t, range(len(sizes)))])
batching.primitive_batchers[broadcast_p] = _broadcast_batch_rule
def _broadcast_in_dim_impl(operand, *, shape, broadcast_dimensions):
if type(operand) is np.ndarray:
operand = _device_put_raw(operand)
if xla.type_is_device_array(operand) and np.all(
np.equal(operand.shape, np.take(shape, broadcast_dimensions))):
shape = _broadcast_in_dim_shape_rule(
operand, shape=shape, broadcast_dimensions=broadcast_dimensions)
aval = ShapedArray(shape, _dtype(operand))
if operand._lazy_expr is None:
lazy_expr = lazy.broadcast(lazy.array(operand), shape, broadcast_dimensions)
else:
lazy_expr = lazy.broadcast(operand._lazy_expr, shape, broadcast_dimensions)
return xla.make_device_array(aval, operand._device, lazy_expr, operand.device_buffer)
else:
return xla.apply_primitive(broadcast_in_dim_p, operand, shape=shape,
broadcast_dimensions=broadcast_dimensions)
def _broadcast_in_dim_shape_rule(operand, *, shape, broadcast_dimensions):
_check_shapelike('broadcast_in_dim', 'shape', shape)
_check_shapelike('broadcast_in_dim', 'broadcast_dimensions',
broadcast_dimensions)
operand_ndim = np.ndim(operand)
if operand_ndim != len(broadcast_dimensions):
msg = ('broadcast_in_dim broadcast_dimensions must have length equal to '
'operand ndim; got broadcast_dimensions {} for operand ndim {}.')
raise TypeError(msg.format(broadcast_dimensions, operand_ndim))
if len(shape) < operand_ndim:
msg = ('broadcast_in_dim target broadcast shape must have equal or higher rank '
'to the operand shape; got operand ndim {} and target broadcast ndim {}.')
raise TypeError(msg.format(operand_ndim, len(shape)))
if not set(broadcast_dimensions).issubset(set(range(len(shape)))):
msg = ('broadcast_in_dim broadcast_dimensions must be a subset of output '
'dimensions, got {} for operand ndim {} and shape {}.')
raise TypeError(msg.format(broadcast_dimensions, operand_ndim, shape))
if any(operand.shape[i] != shape[broadcast_dimensions[i]] and
operand.shape[i] != 1 for i in range(operand_ndim)):
msg = (
"broadcast_in_dim operand dimension sizes must either be 1, or be "
"equal to their corresponding dimensions in the target broadcast "
"shape; got operand of shape {}, target broadcast shape {}, "
"broadcast_dimensions {} ")
raise TypeError(msg.format(operand.shape, shape, broadcast_dimensions))
if (len(broadcast_dimensions) != len(set(broadcast_dimensions)) or
tuple(broadcast_dimensions) != tuple(sorted(broadcast_dimensions))):
msg = ("broadcast_in_dim broadcast_dimensions must be strictly increasing; "
"got broadcast_dimensions {}")
raise TypeError(msg.format(broadcast_dimensions))
return shape
def _broadcast_in_dim_transpose_rule(t, *, shape, broadcast_dimensions):
axes = tuple(np.delete(range(len(shape)), broadcast_dimensions))
return [_reduce_sum(t, axes)]
def _broadcast_in_dim_batch_rule(batched_args, batch_dims, *, shape,
broadcast_dimensions):
operand, = batched_args
bdim, = batch_dims
new_operand = batching.moveaxis(operand, bdim, 0)
new_shape = (operand.shape[bdim],) + shape
new_broadcast_dimensions = (0,) + tuple(np.add(1, broadcast_dimensions))
return broadcast_in_dim(new_operand, new_shape, new_broadcast_dimensions), 0
broadcast_in_dim_p = standard_primitive(
_broadcast_in_dim_shape_rule, _input_dtype, 'broadcast_in_dim')
broadcast_in_dim_p.def_impl(_broadcast_in_dim_impl)
ad.deflinear(broadcast_in_dim_p, _broadcast_in_dim_transpose_rule)
batching.primitive_batchers[broadcast_in_dim_p] = _broadcast_in_dim_batch_rule
def _clamp_shape_rule(min, operand, max):
if min.shape and min.shape != operand.shape:
m = "clamp requires min.shape == operand.shape or min.shape == (), got {}."
raise TypeError(m.format(min.shape))
if max.shape and max.shape != operand.shape:
m = "clamp requires max.shape == operand.shape or max.shape == (), got {}."
raise TypeError(m.format(max.shape))
return operand.shape
_clamp_dtype_rule = partial(naryop_dtype_rule, _input_dtype, [_any, _any, _any],
'clamp')
clamp_p = standard_primitive(_clamp_shape_rule, _clamp_dtype_rule, 'clamp')
ad.defjvp(clamp_p,
lambda g, min, operand, max:
select(bitwise_and(gt(min, operand), lt(min, max)),
_brcast(g, operand), _zeros(operand)),
lambda g, min, operand, max:
select(bitwise_and(gt(operand, min), lt(operand, max)),
g, _zeros(operand)),
lambda g, min, operand, max:
select(lt(max, operand), _brcast(g, operand), _zeros(operand)))
batching.defbroadcasting(clamp_p)
def _concatenate_shape_rule(*operands, **kwargs):
dimension = kwargs.pop('dimension')
if not operands:
msg = "concatenate expects at least one operand, got 0."
raise TypeError(msg)
if not all(isinstance(operand, UnshapedArray) for operand in operands):
msg = "All objects to concatenate must be arrays, got {}."
op = next(op for op in operands if not isinstance(op, UnshapedArray))
raise TypeError(msg.format(type(op)))
if len({operand.ndim for operand in operands}) != 1:
msg = "Cannot concatenate arrays with different ranks, got {}."
raise TypeError(msg.format(", ".join(str(o.ndim) for o in operands)))
if not 0 <= dimension < operands[0].ndim:
msg = "concatenate dimension out of bounds: dimension {} for shapes {}."
raise TypeError(msg.format(dimension, ", ".join([str(o.shape) for o in operands])))
shapes = [operand.shape[:dimension] + operand.shape[dimension+1:]
for operand in operands]
if not shapes[:-1] == shapes[1:]:
msg = ("Cannot concatenate arrays with shapes that differ in dimensions "
"other than the one being concatenated: concatenating along "
"dimension {} for shapes {}.")
shapes = [operand.shape for operand in operands]
raise TypeError(msg.format(dimension, ", ".join(map(str, shapes))))
concat_size = sum(o.shape[dimension] for o in operands)
ex_shape = operands[0].shape
return ex_shape[:dimension] + (concat_size,) + ex_shape[dimension+1:]
def _concatenate_dtype_rule(*operands, **kwargs):
_check_same_dtypes('concatenate', False, *(o.dtype for o in operands))
return operands[0].dtype
def _concatenate_translation_rule(c, *operands, **kwargs):
dimension = kwargs.pop('dimension')
return xops.ConcatInDim(c, operands, dimension)
def _concatenate_transpose_rule(t, *operands, dimension):
operand_shapes = [o.aval.shape if ad.is_undefined_primal(o) else o.shape
for o in operands]
if type(t) is ad_util.Zero:
return ad_util.Zero
else:
limit_points = np.cumsum([shape[dimension] for shape in operand_shapes])
starts = np.zeros((len(operands), t.ndim), dtype=int)
starts[1:, dimension] = limit_points[:-1]
limits = np.tile(t.shape, (len(operands), 1))
limits[:, dimension] = limit_points
return [slice(t, start, limit) if ad.is_undefined_primal(o) else None
for o, start, limit in zip(operands, starts, limits)]
def _concatenate_batch_rule(batched_args, batch_dims, *, dimension):
size = next(op.shape[bdim] for op, bdim in zip(batched_args, batch_dims)
if bdim is not None)
operands = [batching.moveaxis(op, bdim, 0) if bdim is not None
else broadcast(op, (size,))
for op, bdim in zip(batched_args, batch_dims)]
return concatenate(operands, dimension + 1), 0
concatenate_p = standard_primitive(
_concatenate_shape_rule, _concatenate_dtype_rule, 'concatenate',
_concatenate_translation_rule)
ad.deflinear(concatenate_p, _concatenate_transpose_rule)
ad.primitive_transposes[concatenate_p] = _concatenate_transpose_rule
batching.primitive_batchers[concatenate_p] = _concatenate_batch_rule
def _pad_dtype_rule(operand, padding_value, *, padding_config):
if operand.dtype != padding_value.dtype:
msg = "pad operand and padding_value must be same dtype: got {} and {}."
raise TypeError(msg.format(operand.dtype, padding_value.dtype))
return _input_dtype(operand, padding_value)
def _pad_shape_rule(operand, padding_value, *, padding_config):
del padding_value
if not len(padding_config) == np.ndim(operand):
raise ValueError("length of padding_config must equal the number of axes "
f"of operand, got padding_config {padding_config} "
f"for operand shape {np.shape(operand)}")
if not all(i >= 0 for _, _, i in padding_config):
raise ValueError("interior padding in padding_config must be nonnegative, "
f"got padding_config {padding_config}")
return tuple(l + h + d + (_max(0, d - 1) * i if i > 0 else 0)
for (l, h, i), d in zip(padding_config, np.shape(operand)))
def _pad_transpose(t, operand, padding_value, *, padding_config):
if type(t) is ad_util.Zero:
return ad_util.Zero
lo, hi, interior = zip(*padding_config)
total = lambda x: _reduce_sum(x, list(range(t.ndim)))
def t_op():
unpad_config = safe_zip(np.negative(lo), np.negative(hi),
np.zeros_like(interior))
unpadded = pad(t, np.array(0., t.dtype), unpad_config)
return slice(unpadded, np.zeros_like(lo), unpadded.shape, np.add(interior, 1))
t_operand = t_op() if ad.is_undefined_primal(operand) else None
t_padv = sub(total(t), total(t_operand)) if ad.is_undefined_primal(padding_value) else None
return [t_operand, t_padv]
def _pad_batch_rule(batched_args, batch_dims, *, padding_config):
operand, padding_value = batched_args
operand_bdim, padding_value_bdim = batch_dims
if padding_value_bdim is None:
assert operand_bdim is not None
padding_config = list(padding_config)
padding_config.insert(operand_bdim, (0, 0, 0))
return pad(operand, padding_value, padding_config), operand_bdim
else:
raise NotImplementedError
def _pad_translation_rule(c, operand, padding_value, *, padding_config):
return xops.Pad(operand, padding_value,
xc.make_padding_config(padding_config))
def _pad_masking_rule(padded_vals, logical_shapes, padding_config):
operand, padding_value = padded_vals
shape, _ = logical_shapes
out = pad(operand, padding_value, padding_config)
out_shape = [lo + shape[i] * (interior + 1)
for i, (lo, hi, interior) in enumerate(padding_config)]
padded_dims = [i for i, config in enumerate(padding_config)
if config != (0, 0, 0)]
return _masked(out, out_shape, padded_dims, padding_value)
pad_p = standard_primitive(_pad_shape_rule, _pad_dtype_rule, 'pad',
translation_rule=_pad_translation_rule)
ad.deflinear(pad_p, _pad_transpose)
ad.primitive_transposes[pad_p] = _pad_transpose
batching.primitive_batchers[pad_p] = _pad_batch_rule
masking.masking_rules[pad_p] = _pad_masking_rule
def squeeze(array: Array, dimensions: Tuple[int, ...]) -> Array:
ndim = np.ndim(array)
dimensions = tuple(sorted(canonicalize_axis(i, ndim) for i in dimensions))
if not dimensions:
return array
return squeeze_p.bind(array, dimensions=dimensions)
def _squeeze_dtype_rule(operand, *, dimensions):
return operand.dtype
def _squeeze_shape_rule(operand, *, dimensions):
return _compute_squeeze_shape(np.shape(operand), dimensions)
def _compute_squeeze_shape(shape, dimensions):
dims_set = set(dimensions)
if len(dims_set) != len(dimensions):
raise ValueError(f"dimensions are not unique: {dimensions}")
if not all(0 <= d < len(shape) for d in dims_set):
raise ValueError(f"dimensions outside range [0, ndim): {dimensions}")
if any(shape[d] != 1 for d in dimensions):
raise ValueError(
"cannot select an axis to squeeze out which has size not equal to "
f"one, got shape={shape} and dimensions={dimensions}")
return tuple(s for i, s in enumerate(shape) if i not in dims_set)
def _squeeze_translation_rule(c, arg, *, dimensions):
new_shape = _compute_squeeze_shape(c.get_shape(arg).dimensions(), dimensions)
return xops.Reshape(arg, new_shape)
def _squeeze_transpose_rule(t, operand, *, dimensions):
assert ad.is_undefined_primal(operand)
return [expand_dims(t, dimensions)]
def _squeeze_batch_rule(batched_args, batch_dims, *, dimensions):
operand, = batched_args
bdim, = batch_dims
operand = batching.moveaxis(operand, bdim, 0)
dimensions = tuple(np.add(1, dimensions))
return squeeze(operand, dimensions=dimensions), 0
squeeze_p = standard_primitive(_squeeze_shape_rule, _squeeze_dtype_rule,
'squeeze', _squeeze_translation_rule)
ad.deflinear2(squeeze_p, _squeeze_transpose_rule)
batching.primitive_batchers[squeeze_p] = _squeeze_batch_rule
def expand_dims(array: Array, dimensions: Tuple[int, ...]) -> Array:
ndim_out = np.ndim(array) + len(dimensions)
dims_set = frozenset(canonicalize_axis(i, ndim_out) for i in dimensions)
result_shape = list(np.shape(array))
for i in sorted(dims_set):
result_shape.insert(i, 1)
broadcast_dims = [i for i in range(ndim_out) if i not in dims_set]
return broadcast_in_dim(array, result_shape, broadcast_dims)
def _reshape_impl(operand, *, new_sizes, dimensions):
old_sizes = np.shape(operand)
if xla.type_is_device_array(operand) and dimensions is None:
bcast_dims = _is_singleton_reshape(old_sizes, new_sizes)
if bcast_dims is not None:
aval = ShapedArray(new_sizes, operand.dtype)
if operand._lazy_expr is None:
lazy_expr = lazy.broadcast(lazy.array(operand), new_sizes, bcast_dims)
else:
lazy_expr = lazy.broadcast(operand._lazy_expr, new_sizes, bcast_dims)
return xla.make_device_array(aval, operand._device, lazy_expr, operand.device_buffer)
return xla.apply_primitive(reshape_p, operand, new_sizes=new_sizes,
dimensions=dimensions)
def _is_singleton_reshape(old, new):
old, new = iter(old), iter(new)
d1, d2 = next(old, None), next(new, None)
bcast_dims = []
i = 0
while True:
if d1 is d2 is None:
return bcast_dims
elif d1 == d2:
bcast_dims.append(i)
i += 1
d1, d2 = next(old, None), next(new, None)
elif d2 == 1:
i += 1
d2 = next(new, None)
else:
return None
def _reshape_shape_rule(operand, *, new_sizes, dimensions):
if not np.all(np.greater_equal(new_sizes, 0)):
msg = 'reshape new_sizes must all be positive, got {}.'
raise TypeError(msg.format(new_sizes))
if prod(np.shape(operand)) != prod(new_sizes):
msg = 'reshape total size must be unchanged, got new_sizes {} for shape {}.'
raise TypeError(msg.format(new_sizes, np.shape(operand)))
if dimensions is not None:
if set(dimensions) != set(range(np.ndim(operand))):
msg = ('reshape dimensions must be a permutation of operand dimensions, '
'got dimensions {} for shape {}.')
raise TypeError(msg.format(dimensions, np.shape(operand)))
return tuple(new_sizes)
def _reshape_dtype_rule(operand, *, new_sizes, dimensions):
return operand.dtype
def _reshape_translation_rule(c, operand, *, new_sizes, dimensions):
if dimensions is None:
return xops.Reshape(operand, new_sizes)
else:
return xops.Reshape(operand, dimensions, new_sizes)
def _reshape_transpose_rule(t, operand, *, new_sizes, dimensions):
assert ad.is_undefined_primal(operand)
if dimensions is None:
return [reshape(t, operand.aval.shape)]
else:
return [transpose(reshape(t, np.take(operand.aval.shape, dimensions)),
np.argsort(dimensions))]
def _reshape_batch_rule(batched_args, batch_dims, *, new_sizes, dimensions):
operand, = batched_args
bdim, = batch_dims
operand = batching.moveaxis(operand, bdim, 0)
if dimensions is not None:
dimensions = (0,) + tuple(np.add(1, dimensions))
return reshape(operand, operand.shape[:1] + new_sizes, dimensions), 0
def _reshape_masking_rule(padded_args, logical_shapes, polymorphic_shapes,
new_sizes, dimensions):
operand, = padded_args
old_shape, = polymorphic_shapes
def is_poly(size): return type(size) is masking.Poly and not size.is_constant
def merge_const_sizes(shape):
poly_dims = [i for i, size in enumerate(shape) if is_poly(size)]
return [prod(shape[start:stop])
for start, stop in zip([0] + poly_dims, poly_dims + [len(shape)])]
if merge_const_sizes(old_shape) != merge_const_sizes(new_sizes):
raise NotImplementedError(
"Reshape on padded dimensions causing fragmentation is not supported.")
return reshape(operand,
new_sizes=masking.padded_shape_as_value(new_sizes),
dimensions=dimensions)
reshape_p = standard_primitive(_reshape_shape_rule, _reshape_dtype_rule,
'reshape', _reshape_translation_rule)
reshape_p.def_impl(_reshape_impl)
ad.deflinear2(reshape_p, _reshape_transpose_rule)
batching.primitive_batchers[reshape_p] = _reshape_batch_rule
masking.masking_rules[reshape_p] = _reshape_masking_rule
def _rev_shape_rule(operand, *, dimensions):
_check_shapelike('rev', 'dimensions', dimensions)
if len(set(dimensions)) != len(dimensions):
msg = 'rev dimensions must be unique, got {}.'
raise TypeError(msg.format(dimensions))
if dimensions and not _max(dimensions) < operand.ndim:
msg = ('rev dimensions must all be less than operand ndim, got dimensions '
'{} for operand ndim {}.')
raise TypeError(msg.format(dimensions, operand.ndim))
return operand.shape
def _rev_batch_rule(batched_args, batch_dims, *, dimensions):
operand, = batched_args
bdim, = batch_dims
new_dimensions = [i + 1 if i >= bdim else i for i in dimensions]
return rev(operand, new_dimensions), bdim
rev_p = standard_primitive(_rev_shape_rule, _input_dtype, 'rev')
ad.deflinear(rev_p, lambda t, dimensions: [rev(t, dimensions)])
batching.primitive_batchers[rev_p] = _rev_batch_rule
def _transpose_impl(operand, *, permutation):
if xla.type_is_device_array(operand):
if operand._lazy_expr is None:
lazy_expr = lazy.transpose(lazy.array(operand), permutation)
else:
lazy_expr = lazy.transpose(operand._lazy_expr, permutation)
aval = ShapedArray(lazy_expr.shape, operand.dtype)
return xla.make_device_array(aval, operand._device, lazy_expr, operand.device_buffer)
else:
return xla.apply_primitive(transpose_p, operand, permutation=permutation)
def _transpose_shape_rule(operand, *, permutation):
if not isinstance(permutation, (tuple, list, np.ndarray)):
msg = "transpose permutation must be a tuple/list/ndarray, got {}."
raise TypeError(msg.format(type(permutation)))
if tuple(sorted(permutation)) != tuple(range(operand.ndim)):
msg = ("transpose permutation isn't a permutation of operand dimensions, "
"got permutation {} for operand shape {}.")
raise TypeError(msg.format(permutation, operand.shape))
return tuple(np.take(operand.shape, permutation))
def _transpose_batch_rule(batched_args, batch_dims, *, permutation):
operand, = batched_args
bdim, = batch_dims
perm = (bdim,) + tuple(i if i < bdim else i+1 for i in permutation)
return transpose(operand, perm), 0
def _transpose_masking_rule(padded_vals, logical_shapes, permutation):
return transpose(*padded_vals, permutation=permutation)
transpose_p = standard_primitive(_transpose_shape_rule, _input_dtype,
'transpose')
transpose_p.def_impl(_transpose_impl)
ad.deflinear(transpose_p,
lambda t, permutation: [transpose(t, np.argsort(permutation))])
batching.primitive_batchers[transpose_p] = _transpose_batch_rule
masking.masking_rules[transpose_p] = _transpose_masking_rule
def _select_shape_rule(pred, on_true, on_false):
if on_true.shape != on_false.shape:
msg = "select on_true and on_false must have the same shape, got {} and {}."
raise TypeError(msg.format(on_true.shape, on_false.shape))
if pred.shape and pred.shape != on_true.shape:
msg = ("select pred must be scalar or have the same shape as on_true and "
"on_false, got pred shape {} for on_true and on_false of shape {}.")
raise TypeError(msg.format(pred.shape, on_true.shape))
return on_true.shape
def _select_dtype_rule(pred, on_true, on_false):
_check_same_dtypes("select", False, on_true.dtype, on_false.dtype)
if not dtypes.issubdtype(pred.dtype, np.bool_):
msg = "select pred must be boolean type, got {}."
raise TypeError(msg.format(pred.dtype))
return on_true.dtype
def _select_transpose_rule(t, pred, on_true, on_false):
assert not ad.is_undefined_primal(pred)
if type(t) is ad_util.Zero:
return ad_util.Zero
else:
zeros = full_like(t, 0)
return [None,
select(pred, t, zeros) if ad.is_undefined_primal(on_true) else None,
select(pred, zeros, t) if ad.is_undefined_primal(on_false) else None]
def _select_batch_rule(batched_args, batch_dims, **unused_kwargs):
pred, on_true, on_false, = batched_args
pred_bdim, ot_bdim, of_bdim = batch_dims
size = next(x.shape[i] for x, i in zip(batched_args, batch_dims)
if i is not None)
# avoid transposes and some broadcasts in special cases
if pred_bdim == ot_bdim == of_bdim:
if np.shape(pred) == np.shape(on_true):
return select(pred, on_true, on_false), pred_bdim
else:
# vmapped function had a scalar pred with nonscalar args
assert np.ndim(pred) == 1
pred = broadcast_in_dim(pred, on_true.shape, [pred_bdim])
return select(pred, on_true, on_false), pred_bdim
elif np.ndim(pred) == 0 and ot_bdim is not None and of_bdim is not None:
if ot_bdim == of_bdim:
return select(pred, on_true, on_false), ot_bdim
elif np.shape(on_true) == np.shape(on_false):
on_false = batching.moveaxis(on_false, of_bdim, ot_bdim)
return select(pred, on_true, on_false), ot_bdim
pred = batching.bdim_at_front(pred, pred_bdim, size) if np.shape(pred) else pred
if not np.shape(on_true) == np.shape(on_false) == ():
on_true = batching.bdim_at_front(on_true, ot_bdim, size)
on_false = batching.bdim_at_front(on_false, of_bdim, size)
assert np.shape(on_true) == np.shape(on_false)
if 0 < np.ndim(pred) < np.ndim(on_true):
# vmapped function had a scalar pred with nonscalar args
assert np.ndim(pred) == 1
pred = broadcast_in_dim(pred, on_true.shape, [0])
if np.ndim(pred) > np.ndim(on_true):
assert np.ndim(on_true) == 0
on_true = broadcast(on_true, pred.shape)
on_false = broadcast(on_false, pred.shape)
return select(pred, on_true, on_false), 0
def _select_masking_rule(padded_vals, logical_shapes):
pred_shape, true_shape, false_shape = [
masking.padded_shape_as_value(val.shape) for val in padded_vals]
assert np.array_equal(pred_shape, true_shape)
assert np.array_equal(pred_shape, false_shape)
return select(*padded_vals)
def _select_jvp(primals, tangents):
pred, on_true, on_false = primals
_, on_true_dot, on_false_dot = tangents
out = select(pred, on_true, on_false)
if type(on_true_dot) is ad_util.Zero:
out_dot = select(pred, _zeros(on_false_dot), on_false_dot)
elif type(on_false_dot) is ad_util.Zero:
out_dot = select(pred, on_true_dot, _zeros(on_true_dot))
else:
out_dot = select(pred, on_true_dot, on_false_dot)
return out, out_dot
select_p = standard_primitive(_select_shape_rule, _select_dtype_rule, 'select')
ad.primitive_jvps[select_p] = _select_jvp
ad.primitive_transposes[select_p] = _select_transpose_rule
batching.primitive_batchers[select_p] = _select_batch_rule
masking.masking_rules[select_p] = _select_masking_rule
def _slice_shape_rule(operand, *, start_indices, limit_indices, strides):
_check_shapelike("slice", "start_indices", start_indices)
_check_shapelike("slice", "limit_indices", limit_indices)
if operand.ndim != len(start_indices):
msg = ("slice start_indices must have length equal to the number of "
"dimensions of the operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if len(start_indices) != len(limit_indices):
msg = ("slice limit_indices must have the same length as start_indices, "
"got start_inidices {} and limit_indices {}.")
raise TypeError(msg.format(start_indices, limit_indices))
if (not masking.is_polymorphic(limit_indices) and
not masking.is_polymorphic(operand.shape) and
not np.all(np.less_equal(limit_indices, operand.shape))):
msg = ("slice limit_indices must be less than or equal to operand shape, "
"got limit_indices {} for operand shape {}.")
raise TypeError(msg.format(limit_indices, operand.shape))
if not np.all(np.greater_equal(start_indices, 0)):
msg = ("slice start_indices must be greater than or equal to zero, "
"got start_indices of {}.")
raise TypeError(msg.format(start_indices))
if (not masking.is_polymorphic(limit_indices) and
not np.all(np.greater_equal(limit_indices, start_indices))):
msg = ("slice limit_indices must be greater than or equal to start_indices,"
" got start_indices {} and limit_indices {}.")
raise TypeError(msg.format(start_indices, limit_indices))
if strides is None:
strides = np.ones(operand.ndim, np.int32)
else:
_check_shapelike("slice", "strides", strides)
if len(strides) != operand.ndim:
msg = ("slice strides must have length equal to the number of dimensions "
"of the operand, got strides {} for operand shape {}.")
raise TypeError(msg.format(strides, operand.shape))
if not np.all(np.greater(strides, 0)):
msg = "slice strides must be positive, got {}"
raise TypeError(msg.format(strides))
diff = np.subtract(limit_indices, start_indices)
# Not np.divmod since Poly.__rdivmod__ is ignored by NumPy, breaks poly stride
return tuple(q + (r > 0) for q, r in map(divmod, diff, strides))
def _slice_translation_rule(c, operand, *, start_indices, limit_indices,
strides):
return xops.Slice(operand, start_indices, limit_indices,
strides or [1] * len(start_indices))
def _slice_transpose_rule(t, operand, *, start_indices, limit_indices, strides):
assert ad.is_undefined_primal(operand)
operand_shape = operand.aval.shape
if strides is None or np.all(np.equal(strides, 1)):
pads = zip(start_indices, np.subtract(operand_shape, limit_indices),
(0,) * len(start_indices))
else:
real_limits = np.add(
start_indices,
np.where(np.array(t.shape) == 0, 0,
np.add(1, np.multiply(np.subtract(t.shape, 1), strides))))
pads = safe_zip(start_indices, np.subtract(operand_shape, real_limits),
np.subtract(strides, 1))
result = pad(t, _const(t, 0), pads)
assert result.shape == operand_shape, (
f"result.shape={result.shape} operand_shape={operand_shape}")
return [result]
def _slice_batching_rule(batched_args, batch_dims, *, start_indices,
limit_indices, strides):
operand, = batched_args
bdim, = batch_dims
new_start_indices = list(start_indices)
new_start_indices.insert(bdim, 0)
new_limit_indices = list(limit_indices)
new_limit_indices.insert(bdim, operand.shape[bdim])
if strides is None:
new_strides = None
else:
new_strides = list(strides)
new_strides.insert(bdim, 1)
out = slice(operand, new_start_indices, new_limit_indices, new_strides)
return out, bdim
def _slice_masking_rule(
padded_vals, logical_shapes, start_indices, limit_indices, strides):
operand, = padded_vals
strides = masking.padded_shape_as_value(strides) if strides else None
return slice(operand,
start_indices=masking.padded_shape_as_value(start_indices),
limit_indices=masking.padded_shape_as_value(limit_indices),
strides=strides)
slice_p = standard_primitive(_slice_shape_rule, _input_dtype, 'slice',
_slice_translation_rule)
ad.deflinear2(slice_p, _slice_transpose_rule)
batching.primitive_batchers[slice_p] = _slice_batching_rule
masking.masking_rules[slice_p] = _slice_masking_rule
def _dynamic_slice_shape_rule(operand, *start_indices, slice_sizes):
if operand.ndim != len(start_indices):
msg = ("dynamic_slice start_indices must have length equal to the number "
"of dimensions of the operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if len(start_indices) != len(slice_sizes):
msg = ("dynamic_slice slice_sizes must have the same length as "
"start_indices, got start_inidices length {} and slice_sizes {}.")
raise TypeError(msg.format(len(start_indices), slice_sizes))
if not np.all(np.less_equal(slice_sizes, operand.shape)):
msg = ("slice slice_sizes must be less than or equal to operand shape, "
"got slice_sizes {} for operand shape {}.")
raise TypeError(msg.format(slice_sizes, operand.shape))
if not np.all(np.greater_equal(slice_sizes, 0)):
msg = ("slice slice_sizes must be greater than or equal to zero, "
"got slice_sizes of {}.")
raise TypeError(msg.format(slice_sizes))
return tuple(slice_sizes)
def _dynamic_slice_dtype_rule(operand, *start_indices, slice_sizes):
if any(i.dtype != start_indices[0].dtype or
not dtypes.issubdtype(i.dtype, np.integer) for i in start_indices):
msg = ("index arguments to dynamic_slice must be integers of the same "
"type, got: {}")
raise TypeError(msg.format(", ".join(i.dtype.name for i in start_indices)))
return operand.dtype
def _dynamic_slice_translation_rule(c, operand, *start_indices, slice_sizes):
return xops.DynamicSlice(operand, start_indices, slice_sizes)
def _dynamic_slice_jvp(primals, tangents, *, slice_sizes):
tangent_out = tangents[0]
if type(tangent_out) is not ad_util.Zero:
tangent_out = dynamic_slice(tangent_out, primals[1:], slice_sizes)
return dynamic_slice(primals[0], primals[1:], slice_sizes), tangent_out
def _dynamic_slice_transpose_rule(t, operand, *start_indices, slice_sizes):
assert ad.is_undefined_primal(operand)
assert all(not ad.is_undefined_primal(s) for s in start_indices)
operand_shape, operand_dtype = operand.aval.shape, operand.aval.dtype
if config.omnistaging_enabled:
zeros = full(operand_shape, 0, operand_dtype)
else:
zeros = full(operand_shape, tie_in(t, _zero(t)))
if type(t) is ad_util.Zero:
return [zeros] + [None] * len(start_indices)
else:
return ([dynamic_update_slice(zeros, t, start_indices)] +
[None] * len(start_indices))
def _batch_dynamic_slice_indices(indices, bdims):
if len(indices) == 0:
return np.array([], 'int32'), None
size = next((x.shape[i] for x, i in zip(indices, bdims) if i is not None), -1)
if size < 0:
return concatenate([broadcast(i, (1,)) for i in indices], 0), None
indices = concatenate(
[broadcast_in_dim(x, (size, 1),
broadcast_dimensions=((0,) if i is not None else ()))
for x, i in zip(indices, bdims)],
dimension=1)
return indices, 0
def _dynamic_slice_batching_rule(batched_args, batch_dims, *, slice_sizes):
# A dynamic slice is a special case of gather; we can delegate to the gather
# batching rule.
# TODO(phawkins): consider removing dynamic_slice entirely and using gather
# always.
operand, *start_indices = batched_args
operand_bd, *start_idx_bds = batch_dims
operand_shape = (operand.shape if operand_bd is batching.not_mapped
else tuple(np.delete(operand.shape, operand_bd)))
dims = tuple(range(len(operand_shape)))
dnums = GatherDimensionNumbers(offset_dims=dims, collapsed_slice_dims=(),
start_index_map=dims)
index, index_bdim = _batch_dynamic_slice_indices(start_indices, start_idx_bds)
return _gather_batching_rule(
[operand, index], [operand_bd, index_bdim], dimension_numbers=dnums,
slice_sizes=slice_sizes)
dynamic_slice_p = standard_primitive(
_dynamic_slice_shape_rule, _dynamic_slice_dtype_rule, 'dynamic_slice',
_dynamic_slice_translation_rule)
ad.primitive_jvps[dynamic_slice_p] = _dynamic_slice_jvp # TODO
ad.primitive_transposes[dynamic_slice_p] = _dynamic_slice_transpose_rule
batching.primitive_batchers[dynamic_slice_p] = _dynamic_slice_batching_rule
def _dynamic_update_slice_shape_rule(operand, update, *start_indices):
if operand.ndim != update.ndim:
msg = ("dynamic_update_slice update must have the same rank as operand, "
"got update shape {} for operand shape {}.")
raise TypeError(msg.format(update.shape, operand.shape))
if operand.ndim != len(start_indices):
msg = ("dynamic_update_slice start_indices must have length equal to the "
"rank of operand, got indices {} for operand shape {}.")
raise TypeError(msg.format(start_indices, operand.shape))
if not np.all(np.less_equal(update.shape, operand.shape)):
msg = ("dynamic_update_slice update shape must be smaller than operand "
"shape, got update shape {} for operand shape {}.")
raise TypeError(msg.format(update.shape, operand.shape))
return operand.shape
def _dynamic_update_slice_dtype_rule(operand, update, *start_indices):
_check_same_dtypes("dynamic_update_slice", False, operand.dtype, update.dtype)
if any(i.dtype != start_indices[0].dtype or
not dtypes.issubdtype(i.dtype, np.integer) for i in start_indices):
msg = ("index arguments to dynamic_update_slice must be integers of the "
"same type, got {}")
raise TypeError(msg.format(", ".join(i.dtype.name for i in start_indices)))
return operand.dtype
def _dynamic_update_slice_jvp(primals, tangents):
operand, update = primals[:2]
start_indices = primals[2:]
g_operand, g_update = tangents[:2]
val_out = dynamic_update_slice(operand, update, start_indices)
if type(g_operand) is ad_util.Zero and type(g_update) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(val_out)
else:
g_operand = ad.instantiate_zeros(g_operand)
g_update = ad.instantiate_zeros(g_update)
tangent_out = dynamic_update_slice(g_operand, g_update, start_indices)
return val_out, tangent_out
def _dynamic_update_slice_transpose_rule(t, operand, update, *start_indices):
assert all(not ad.is_undefined_primal(x) for x in start_indices)
if ad.is_undefined_primal(update):
update_shape = update.aval.shape
else:
update_shape = update.shape
dus = dynamic_update_slice
ds = dynamic_slice
zeros = _zeros(t, shape=update_shape)
operand_t = dus(t, zeros, start_indices) if ad.is_undefined_primal(operand) else None
update_t = ds(t, start_indices, update_shape) if ad.is_undefined_primal(update) else None
return [operand_t, update_t] + [None] * len(start_indices)
def _dynamic_update_slice_translation_rule(c, operand, update, *start_indices):
return xops.DynamicUpdateSlice(operand, update, start_indices)
def _dynamic_update_slice_batching_rule(batched_args, batch_dims):
# A dynamic update slice is a special case of scatter; we can delegate to the
# scatter batching rule.
# TODO(phawkins): consider removing dynamic_update_slice entirely and using
# scatter always.
operand, update, *start_idx = batched_args
operand_bd, update_bd, *start_idx_bd = batch_dims
update_shape = (np.shape(update) if update_bd is batching.not_mapped
else tuple(np.delete(np.shape(update), update_bd)))
dims = tuple(range(len(update_shape)))
dnums = ScatterDimensionNumbers(update_window_dims=dims,
inserted_window_dims=(),
scatter_dims_to_operand_dims=dims)
index, index_bdim = _batch_dynamic_slice_indices(start_idx, start_idx_bd)
return _scatter_batching_rule(
scatter, (operand, index, update), (operand_bd, index_bdim, update_bd),
update_jaxpr=None, update_consts=None, dimension_numbers=dnums,
indices_are_sorted=True, unique_indices=True)
dynamic_update_slice_p = standard_primitive(
_dynamic_update_slice_shape_rule, _dynamic_update_slice_dtype_rule,
'dynamic_update_slice', _dynamic_update_slice_translation_rule)
ad.primitive_jvps[dynamic_update_slice_p] = _dynamic_update_slice_jvp
ad.primitive_transposes[dynamic_update_slice_p] = \
_dynamic_update_slice_transpose_rule
batching.primitive_batchers[dynamic_update_slice_p] = \
_dynamic_update_slice_batching_rule
def _gather_dimensions_proto(indices_shape, dimension_numbers):
assert type(dimension_numbers) is GatherDimensionNumbers
proto = xla_client.GatherDimensionNumbers()
proto.offset_dims.extend(dimension_numbers.offset_dims)
proto.collapsed_slice_dims.extend(dimension_numbers.collapsed_slice_dims)
proto.start_index_map.extend(dimension_numbers.start_index_map)
assert indices_shape.rank() > 0
proto.index_vector_dim = indices_shape.rank() - 1
return proto
def _gather_dtype_rule(operand, start_indices, **kwargs):
if not dtypes.issubdtype(start_indices.dtype, np.integer):
raise ValueError("start_indices must have an integer type")
return dtypes.canonicalize_dtype(operand.dtype)
_rank = lambda arr: len(arr.shape)
def _is_sorted(dims, op_name, name):
for i in range(1, len(dims)):
if dims[i] < dims[i - 1]:
raise TypeError(f"{name} in {op_name} op must be sorted; got {dims}")
def _sorted_dims_in_range(dims, rank, op_name, name):
if len(dims) == 0:
return
invalid_dim = None
if dims[0] < 0:
invalid_dim = dims[0]
elif dims[-1] >= rank:
invalid_dim = dims[-1]
if invalid_dim:
raise TypeError(f"Invalid {name} set in {op_name} op; valid range is "
f"[0, {rank}); got: {invalid_dim}.")
def _no_duplicate_dims(dims, op_name, name):
if len(set(dims)) != len(dims):
raise TypeError(f"{name} in {op_name} op must not repeat; got: {dims}.")
def _gather_shape_rule(operand, start_indices, *, dimension_numbers,
slice_sizes):
offset_dims = dimension_numbers.offset_dims
collapsed_slice_dims = dimension_numbers.collapsed_slice_dims
start_index_map = dimension_numbers.start_index_map
# Note: in JAX, index_vector_dim is always computed as below, cf. the
# documentation of the GatherDimensionNumbers class.
index_vector_dim = _rank(start_indices) - 1
# This case should never happen in JAX, due to the implicit construction of
# index_vector_dim, but is included for completeness.
if _rank(start_indices) < index_vector_dim or index_vector_dim < 0:
raise TypeError(f"Gather index leaf dimension must be within [0, rank("
f"start_indices) + 1). rank(start_indices) is "
f"{_rank(start_indices)} and gather index leaf dimension "
f"is {index_vector_dim}.")
expanded_start_indices_shape = list(start_indices.shape)
# This case should never happen in JAX, due to the implicit construction of
# index_vector_dim, but is included for completeness.
if len(expanded_start_indices_shape) == index_vector_dim:
expanded_start_indices_shape.append(1)
# Start ValidateGatherDimensions
# In the error messages output by XLA, "offset_dims" is called "Output window
# dimensions" in error messages. For consistency's sake, our error messages
_is_sorted(offset_dims, "gather", "offset_dims")
_no_duplicate_dims(offset_dims, "gather", "offset_dims")
output_offset_dim_count = len(offset_dims)
output_shape_rank = len(offset_dims) + _rank(start_indices) - 1
for i in range(output_offset_dim_count):
offset_dim = offset_dims[i]
if offset_dim < 0 or offset_dim >= output_shape_rank:
raise TypeError(f"Offset dimension {i} in gather op is out of bounds; "
f"got {offset_dim}, but should have been in "
f"[0, {output_shape_rank})")
if len(start_index_map) != start_indices.shape[index_vector_dim]:
raise TypeError(f"Gather op has {len(start_index_map)} elements in "
f"start_index_map and the bound of dimension "
f"index_vector_dim={index_vector_dim} of start_indices is "
f"{start_indices.shape[index_vector_dim]}. These two "
f"numbers must be equal.")
for i in range(len(start_index_map)):
operand_dim_for_start_index_i = start_index_map[i]
if (operand_dim_for_start_index_i < 0 or
operand_dim_for_start_index_i >= _rank(operand)):
raise TypeError(f"Invalid start_index_map; domain is "
f"[0, {_rank(operand)}), got: "
f"{i}->{operand_dim_for_start_index_i}.")
_no_duplicate_dims(start_index_map, "gather", "start_index_map")
_is_sorted(collapsed_slice_dims, "gather", "collapsed_slice_dims")
_sorted_dims_in_range(collapsed_slice_dims, _rank(operand), "gather",
"collapsed_slice_dims")
_no_duplicate_dims(collapsed_slice_dims, "gather", "collapsed_slice_dims")
if _rank(operand) != len(slice_sizes):
raise TypeError(f"Gather op must have one slice size for every input "
f"dimension; got: len(slice_sizes)={len(slice_sizes)}, "
f"input_shape.rank={_rank(operand)}")
if len(slice_sizes) != len(offset_dims) + len(collapsed_slice_dims):
raise TypeError(f"All components of the offset index in a gather op must "
f"either be a offset dimension or explicitly collapsed; "
f"got len(slice_sizes)={len(slice_sizes)}, "
f"output_slice_sizes={offset_dims}, collapsed_slice_dims="
f"{collapsed_slice_dims}.")
for i in range(len(slice_sizes)):
slice_size = slice_sizes[i]
corresponding_input_size = operand.shape[i]
if slice_size < 0 or slice_size > corresponding_input_size:
raise TypeError(f"Slice size at index {i} in gather op is out of range, "
f"must be within [0, {corresponding_input_size + 1}), "
f"got {slice_size}.")
for i in range(len(collapsed_slice_dims)):
bound = slice_sizes[collapsed_slice_dims[i]]
if bound > 1:
raise TypeError(f"Gather op can only collapse slice dims with bound 1 "
f"or 0, but bound is {bound} for index "
f"{collapsed_slice_dims[i]} at position {i}.")
expanded_start_indices_shape.pop(index_vector_dim)
start_indices_shape = iter(expanded_start_indices_shape)
slice_sizes = iter(np.delete(slice_sizes, collapsed_slice_dims))
return tuple(next(slice_sizes) if i in offset_dims
else next(start_indices_shape) for i in range(output_shape_rank))
def _gather_translation_rule(c, operand, start_indices, *, dimension_numbers,
slice_sizes):
indices_shape = c.get_shape(start_indices)
return xops.Gather(
operand, start_indices,
_gather_dimensions_proto(indices_shape, dimension_numbers), slice_sizes,
indices_are_sorted=False)
def _gather_jvp_rule(g, operand, start_indices, *, dimension_numbers,
slice_sizes):
return gather(g, start_indices, dimension_numbers, slice_sizes)
def _gather_transpose_rule(t, operand, start_indices, *, dimension_numbers,
slice_sizes):
assert ad.is_undefined_primal(operand)
operand_shape = operand.aval.shape
if type(t) is ad_util.Zero:
return ad_util.Zero
if config.omnistaging_enabled:
zeros = full(operand_shape, _zero(t))
else:
zeros = full(operand_shape, tie_in(t, _zero(t)))
scatter_dnums = ScatterDimensionNumbers(
update_window_dims=dimension_numbers.offset_dims,
inserted_window_dims=dimension_numbers.collapsed_slice_dims,
scatter_dims_to_operand_dims=dimension_numbers.start_index_map)
out = scatter_add(zeros, start_indices, t, scatter_dnums,
indices_are_sorted=False,
unique_indices=False)
return [out, ad_util.Zero.from_value(start_indices)]
def _gather_batching_rule(batched_args, batch_dims, *, dimension_numbers,
slice_sizes):
operand, start_indices = batched_args
operand_bdim, start_indices_bdim = batch_dims
if operand_bdim is not None and start_indices_bdim is None:
operand = batching.moveaxis(operand, operand_bdim, 0)
slice_sizes = (operand.shape[0],) + slice_sizes
offset_dims = (0,) + tuple(np.add(1, dimension_numbers.offset_dims))
collapsed_slice_dims = tuple(np.add(1, dimension_numbers.collapsed_slice_dims))
start_index_map = tuple(np.add(1, dimension_numbers.start_index_map))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=collapsed_slice_dims,
start_index_map=start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
elif operand_bdim is None and start_indices_bdim is not None:
start_indices = batching.moveaxis(start_indices, start_indices_bdim, 0)
offset_dims = tuple(np.add(1, dimension_numbers.offset_dims))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=dimension_numbers.collapsed_slice_dims,
start_index_map=dimension_numbers.start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
else:
operand = batching.moveaxis(operand, operand_bdim, 0)
start_indices = batching.moveaxis(start_indices, start_indices_bdim, 0)
count_shape = list(start_indices.shape)
count_shape[-1] = 1
counts = broadcasted_iota(start_indices.dtype, tuple(count_shape), 0)
start_indices = concatenate([counts, start_indices], len(count_shape) - 1)
slice_sizes = (1,) + slice_sizes
collapsed_slice_dims = (0,) + tuple(np.add(1, dimension_numbers.collapsed_slice_dims))
offset_dims = tuple(np.add(1, dimension_numbers.offset_dims))
start_index_map = (0,) + tuple(np.add(1, dimension_numbers.start_index_map))
dnums = GatherDimensionNumbers(
offset_dims=offset_dims,
collapsed_slice_dims=collapsed_slice_dims,
start_index_map=start_index_map)
return gather(operand, start_indices, dimension_numbers=dnums,
slice_sizes=slice_sizes), 0
gather_p = standard_primitive(
_gather_shape_rule, _gather_dtype_rule, 'gather',
_gather_translation_rule)
ad.defjvp(gather_p, _gather_jvp_rule, None)
ad.primitive_transposes[gather_p] = _gather_transpose_rule
batching.primitive_batchers[gather_p] = _gather_batching_rule
def _scatter_dimensions_proto(indices_shape, dimension_numbers):
assert type(dimension_numbers) is ScatterDimensionNumbers
proto = xla_client.ScatterDimensionNumbers()
proto.update_window_dims.extend(dimension_numbers.update_window_dims)
proto.inserted_window_dims.extend(dimension_numbers.inserted_window_dims)
proto.scatter_dims_to_operand_dims.extend(
dimension_numbers.scatter_dims_to_operand_dims)
assert indices_shape.rank() > 0
proto.index_vector_dim = indices_shape.rank() - 1
return proto
def _scatter_dtype_rule(operand, scatter_indices, updates, **kwargs):
if not dtypes.issubdtype(scatter_indices.dtype, np.integer):
raise ValueError("scatter_indices must have an integer type")
_check_same_dtypes("scatter", False, operand.dtype, updates.dtype)
return dtypes.canonicalize_dtype(operand.dtype)
def _scatter_shape_rule(operand, scatter_indices, updates, *, update_jaxpr,
update_consts, dimension_numbers, indices_are_sorted,
unique_indices):
update_window_dims = dimension_numbers.update_window_dims
inserted_window_dims = dimension_numbers.inserted_window_dims
scatter_dims_to_operand_dims = dimension_numbers.scatter_dims_to_operand_dims
index_vector_dim = _rank(scatter_indices) - 1
if _rank(scatter_indices) < index_vector_dim or index_vector_dim < 0:
raise TypeError(f"Scatter index leaf dimension must be within [0, "
f"rank(scatter_indices) + 1). rank(scatter_indices) is "
f"{_rank(scatter_indices)} and scatter index leaf "
f"dimension is {index_vector_dim}.")
expanded_scatter_indices_shape = list(scatter_indices.shape)
if len(expanded_scatter_indices_shape) == index_vector_dim:
expanded_scatter_indices_shape.append(1)
expected_updates_rank = (len(expanded_scatter_indices_shape) - 1 +
len(update_window_dims))
if _rank(updates) != expected_updates_rank:
raise TypeError(f"Updates tensor must be of rank {expected_updates_rank}; "
f"got {_rank(updates)}.")
_is_sorted(update_window_dims, "scatter", "update_window_dims")
_no_duplicate_dims(update_window_dims, "scatter", "update_window_dims")
_sorted_dims_in_range(update_window_dims, _rank(updates), "scatter",
"update_window_dims")
_is_sorted(inserted_window_dims, "scatter", "inserted_window_dims")
_no_duplicate_dims(inserted_window_dims, "scatter", "inserted_window_dims")
_sorted_dims_in_range(inserted_window_dims, _rank(operand), "scatter",
"inserted_window_dims")
window_size = len(update_window_dims) + len(inserted_window_dims)
if _rank(operand) != window_size:
raise TypeError(f"Scatter op has window of size {window_size}; doesn't "
f"match operand of rank {_rank(operand)}.")
# Validate scatter_dims_to_operand_dims
if (len(scatter_dims_to_operand_dims) !=
scatter_indices.shape[index_vector_dim]):
raise TypeError(f"Scatter op has {len(scatter_dims_to_operand_dims)} "
f"elements in scatter_dims_to_operand_dims and the bound "
f"of dimension index_vector_dim={index_vector_dim} of "
f"scatter_indices is "
f"{scatter_indices.shape[index_vector_dim]}. These two "
f"numbers must be equal")
for i in range(len(scatter_dims_to_operand_dims)):
dim = scatter_dims_to_operand_dims[i]
if dim < 0 or dim >= _rank(operand):
raise TypeError(f"Invalid scatter_dims_to_operand_dims mapping; domain "
f"is [0, {_rank(operand)}), got: {i}->{dim}.")
_no_duplicate_dims(scatter_dims_to_operand_dims, "scatter",
"scatter_dims_to_operand_dims")
max_update_slice_sizes = [operand.shape[i] for i in range(len(operand.shape))
if not i in set(inserted_window_dims)]
for i in range(len(update_window_dims)):
update_window_dim = update_window_dims[i]
if updates.shape[update_window_dim] > max_update_slice_sizes[i]:
raise TypeError(f"Bounds of the window dimensions of updates must not "
f"exceed the bounds of the corresponding dimensions of "
f"operand. For dimension {update_window_dim}, updates "
f"bound is {updates.shape[update_window_dim]}, operand "
f"bound is {max_update_slice_sizes[i]}.")
update_scatter_dims = [dim for dim in range(_rank(updates)) if dim not in
set(update_window_dims)]
scatter_dims_seen = 0
for i in update_scatter_dims:
if scatter_dims_seen == index_vector_dim:
scatter_dims_seen += 1
if updates.shape[i] != expanded_scatter_indices_shape[scatter_dims_seen]:
raise TypeError(f"Bounds of the scatter dimensions of updates must be "
f"the same as the bounds of the corresponding dimensions "
f"of scatter indices. For scatter dimension {i}, updates "
f"bound is {updates.shape[i]}, scatter_indices bound is "
f"{expanded_scatter_indices_shape[scatter_dims_seen]}.")
scatter_dims_seen += 1
return operand.shape
def _scatter_translation_rule(c, operand, scatter_indices, updates, *,
update_jaxpr, update_consts, dimension_numbers,
indices_are_sorted, unique_indices):
dtype = c.get_shape(operand).numpy_dtype()
init_value = xb.constant(c, np.array(0, dtype))
update_computation = _reduction_computation(
c, update_jaxpr, update_consts, init_value)
indices_shape = c.get_shape(scatter_indices)
return xops.Scatter(operand, scatter_indices, updates, update_computation,
_scatter_dimensions_proto(indices_shape, dimension_numbers),
indices_are_sorted, unique_indices)
def _scatter_add_translation_rule(
c, operand, scatter_indices, updates, *, update_jaxpr, update_consts,
dimension_numbers, indices_are_sorted, unique_indices,
expand_complex128=False):
dtype = c.get_shape(operand).numpy_dtype()
scatter_dims = _scatter_dimensions_proto(c.get_shape(scatter_indices),
dimension_numbers)
def _make_reducer(dtype):
subc = xla_bridge.make_computation_builder("scatter_add_reducer")
shape = xc.Shape.array_shape(np.dtype(dtype), ())
args = [xb.parameter(subc, 0, shape), xb.parameter(subc, 1, shape)]
out = xops.Add(args[0], args[1])
return subc.build(out)
if expand_complex128 and dtype == np.complex128:
update_computation = _make_reducer(np.float64)
re = xops.Scatter(xops.Real(operand), scatter_indices, xops.Real(updates),
update_computation, scatter_dims, indices_are_sorted,
unique_indices)
im = xops.Scatter(xops.Imag(operand), scatter_indices, xops.Imag(updates),
update_computation, scatter_dims, indices_are_sorted,
unique_indices)
return xops.Complex(re, im)
else:
update_computation = _make_reducer(dtype)
return xops.Scatter(operand, scatter_indices, updates, update_computation,
scatter_dims, indices_are_sorted, unique_indices)
def _scatter_add_jvp(primals, tangents, *, update_jaxpr, update_consts,
dimension_numbers, indices_are_sorted, unique_indices):
operand, scatter_indices, updates = primals
g_operand, g_scatter_indices, g_updates = tangents
val_out = scatter_add_p.bind(
operand, scatter_indices, updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
if type(g_operand) is ad_util.Zero and type(g_updates) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(val_out)
else:
g_operand = ad.instantiate_zeros(g_operand)
g_updates = ad.instantiate_zeros(g_updates)
tangent_out = scatter_add_p.bind(
g_operand, scatter_indices, g_updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
return val_out, tangent_out
def _scatter_add_transpose_rule(t, operand, scatter_indices, updates, *,
update_jaxpr, update_consts, dimension_numbers,
indices_are_sorted, unique_indices):
assert not ad.is_undefined_primal(scatter_indices)
if ad.is_undefined_primal(updates):
updates_shape = updates.aval.shape
else:
updates_shape = updates.shape
if type(t) is ad_util.Zero:
return ad_util.Zero
operand_t = update_t = None
if ad.is_undefined_primal(operand):
operand_t = t
if ad.is_undefined_primal(updates):
gather_dnums = GatherDimensionNumbers(
offset_dims=dimension_numbers.update_window_dims,
collapsed_slice_dims=dimension_numbers.inserted_window_dims,
start_index_map=dimension_numbers.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(t.shape)):
if i in dimension_numbers.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[dimension_numbers.update_window_dims[pos]])
pos += 1
update_t = gather(t, scatter_indices, dimension_numbers=gather_dnums,
slice_sizes=slice_sizes)
return [operand_t, None, update_t]
def _scatter_mul_transpose_rule(t, operand, scatter_indices, updates, *,
update_jaxpr, update_consts, dimension_numbers,
indices_are_sorted, unique_indices):
assert not ad.is_undefined_primal(scatter_indices)
if ad.is_undefined_primal(updates):
updates_shape = updates.aval.shape
else:
updates_shape = updates.shape
if type(t) is ad_util.Zero:
return ad_util.Zero
operand_t = update_t = None
if ad.is_undefined_primal(operand):
operand_t = scatter_mul(
t, scatter_indices, updates, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
if ad.is_undefined_primal(updates):
gather_dnums = GatherDimensionNumbers(
offset_dims=dimension_numbers.update_window_dims,
collapsed_slice_dims=dimension_numbers.inserted_window_dims,
start_index_map=dimension_numbers.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(t.shape)):
if i in dimension_numbers.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[dimension_numbers.update_window_dims[pos]])
pos += 1
update_t = gather(mul(t, operand), scatter_indices,
dimension_numbers=gather_dnums, slice_sizes=slice_sizes)
return [operand_t, None, update_t]
def _scatter_batching_rule(scatter_op, batched_args, batch_dims, *,
update_jaxpr, update_consts, dimension_numbers,
indices_are_sorted, unique_indices):
operand, scatter_indices, updates = batched_args
operand_bdim, scatter_indices_bdim, updates_bdim = batch_dims
del update_jaxpr, update_consts # Unused.
# move the operand batch dim to the front if it is not None, otherwise create
# it at the front (so that we can scatter into it)
size = next(x.shape[ax] for x, ax in zip(batched_args, batch_dims)
if ax is not None)
operand = batching.bdim_at_front(operand, operand_bdim, size)
operand_bdim = 0
updates = batching.bdim_at_front(updates, updates_bdim, size)
if scatter_indices_bdim is None:
inserted_window_dims = tuple(np.add(1, dimension_numbers.inserted_window_dims))
update_window_dims = (0,) + tuple(np.add(1, dimension_numbers.update_window_dims))
scatter_dims_to_operand_dims = tuple(np.add(1, dimension_numbers.scatter_dims_to_operand_dims))
dnums = ScatterDimensionNumbers(
update_window_dims=update_window_dims,
inserted_window_dims=inserted_window_dims,
scatter_dims_to_operand_dims=scatter_dims_to_operand_dims)
return scatter_op(
operand, scatter_indices, updates, dnums,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices), 0
# see the third case in _gather_batching_rule for comparison and comments
scatter_indices = batching.bdim_at_front(
scatter_indices, scatter_indices_bdim, size)
count_shape = list(scatter_indices.shape)
count_shape[-1] = 1
counts = broadcasted_iota(scatter_indices.dtype, tuple(count_shape), 0)
scatter_indices = concatenate([counts, scatter_indices],
len(count_shape) - 1)
update_window_dims = tuple(np.add(1, dimension_numbers.update_window_dims))
inserted_window_dims = (0,) + tuple(np.add(1, dimension_numbers.inserted_window_dims))
scatter_dims_to_operand_dims = (0,) + tuple(np.add(1, dimension_numbers.scatter_dims_to_operand_dims))
dnums = ScatterDimensionNumbers(
update_window_dims=update_window_dims,
inserted_window_dims=inserted_window_dims,
scatter_dims_to_operand_dims=scatter_dims_to_operand_dims)
return scatter_op(
operand, scatter_indices, updates, dnums,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices), 0
scatter_add_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-add',
_scatter_add_translation_rule)
ad.primitive_jvps[scatter_add_p] = _scatter_add_jvp
ad.primitive_transposes[scatter_add_p] = _scatter_add_transpose_rule
batching.primitive_batchers[scatter_add_p] = (
partial(_scatter_batching_rule, scatter_add))
xla.backend_specific_translations['gpu'][scatter_add_p] = partial(
_scatter_add_translation_rule, expand_complex128=True)
scatter_mul_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-mul',
_scatter_translation_rule)
def _scatter_mul_jvp_rhs(g, x, i, y, *, dimension_numbers,
indices_are_sorted, unique_indices, **kw):
return mul(x, scatter_add(
zeros_like_array(x), i, g, dimension_numbers=dimension_numbers,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices))
ad.defjvp(scatter_mul_p,
lambda g, x, i, y, **kw: scatter_mul_p.bind(g, i, y, **kw),
None,
_scatter_mul_jvp_rhs)
ad.primitive_transposes[scatter_mul_p] = _scatter_mul_transpose_rule
batching.primitive_batchers[scatter_mul_p] = (
partial(_scatter_batching_rule, scatter_mul))
def _scatter_extremal_jvp(scatter_op, primals, tangents, update_jaxpr,
update_consts, dimension_numbers,
indices_are_sorted, unique_indices):
operand, scatter_indices, updates = primals
g_operand, g_scatter_indices, g_updates = tangents
scatter_dnums = dimension_numbers
updates_shape = updates.shape
val_out = scatter_op.bind(
operand, scatter_indices, updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=scatter_dnums,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
if type(g_operand) is ad_util.Zero and type(g_updates) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(val_out)
else:
g_operand = ad.instantiate_zeros(g_operand)
g_updates = ad.instantiate_zeros(g_updates)
# gather_dnums and slice_sizes define the gather op that is the inverse of
# the scatter op specified by scatter_dnums
gather_dnums = GatherDimensionNumbers(
offset_dims=scatter_dnums.update_window_dims,
collapsed_slice_dims=scatter_dnums.inserted_window_dims,
start_index_map=scatter_dnums.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(operand.shape)):
if i in scatter_dnums.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates_shape[scatter_dnums.update_window_dims[pos]])
pos += 1
# For consistency with other max operations, if there are two or more values
# in updates that are contending to replace the same index location, the
# resulting tangent at that location will be the average of the associated
# tangents for the values in updates.
initial_vals = gather(
operand, scatter_indices, gather_dnums, np.array(slice_sizes))
target_vals = gather(
val_out, scatter_indices, gather_dnums, np.array(slice_sizes))
successful_updates = (updates == target_vals)
retained_values = (initial_vals == target_vals)
num_updates = gather(
scatter_add(_zeros(operand),
scatter_indices,
select(successful_updates, _ones(updates), _zeros(updates)),
scatter_dnums),
scatter_indices,
gather_dnums,
np.array(slice_sizes))
num_refs = gather(
scatter_add(_zeros(operand),
scatter_indices,
_ones(updates),
scatter_dnums),
scatter_indices,
gather_dnums,
np.array(slice_sizes))
updates_normalizer = select(retained_values,
1.0 / (num_updates + 1),
1.0 / num_updates)
updates_coef = select(successful_updates,
updates_normalizer,
_zeros(updates))
operand_normalizer = select(retained_values,
1.0 / (num_updates + 1),
_zeros(num_updates))
operand_coef = (-1.0 + operand_normalizer) / num_refs
# This can be simplified once scatter has transpose implemented
target_tangents = gather(
g_operand, scatter_indices, gather_dnums, np.array(slice_sizes))
tangent_updates = (target_tangents * operand_coef +
g_updates * updates_coef)
tangent_out = scatter_add(g_operand,
scatter_indices,
tangent_updates,
scatter_dnums,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
return val_out, tangent_out
scatter_min_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-min',
_scatter_translation_rule)
batching.primitive_batchers[scatter_min_p] = (
partial(_scatter_batching_rule, scatter_min))
ad.primitive_jvps[scatter_min_p] = partial(_scatter_extremal_jvp, scatter_min_p)
scatter_max_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter-max',
_scatter_translation_rule)
batching.primitive_batchers[scatter_max_p] = (
partial(_scatter_batching_rule, scatter_max))
ad.primitive_jvps[scatter_max_p] = partial(_scatter_extremal_jvp, scatter_max_p)
def _scatter_jvp(primals, tangents, *, update_jaxpr, update_consts,
dimension_numbers, indices_are_sorted, unique_indices):
operand, scatter_indices, updates = primals
g_operand, g_scatter_indices, g_updates = tangents
dnums = dimension_numbers
if type(g_operand) is ad_util.Zero and type(g_updates) is ad_util.Zero:
val_out = scatter_p.bind(
operand, scatter_indices, updates, update_jaxpr=update_jaxpr,
update_consts=update_consts, dimension_numbers=dnums,
indices_are_sorted=indices_are_sorted, unique_indices=unique_indices)
return val_out, ad_util.Zero.from_value(val_out)
g_operand = ad.instantiate_zeros(g_operand)
g_updates = ad.instantiate_zeros(g_updates)
# If there are overlapping indices in the scatter, it is unspecified which
# update "wins". So we use the following perhaps surprising scheme:
# a) attach a positive ID to each update in updates, and perform the scatter
# on the IDs
# b) perform the inverse gather on the scattered IDs (similar to
# _scatter_add_transpose).
# c) use the gathered IDs to mask the primal and tangent values.
# d) perform a scatter-add on the masked primal and tangent values. A benefit
# of using scatter-add here is that we don't need a `scatter` transpose
ids_shape = np.array(updates.shape, dtype=np.int64)
ids_shape[dnums.update_window_dims,] = 1
num_ids = np.prod(ids_shape)
id_dtype = np.uint32 if (num_ids + 1) < np.iinfo(np.uint32).max else np.uint64
update_ids = add(reshape(iota(id_dtype, num_ids), ids_shape),
_ones(updates, dtype=id_dtype))
scattered_ids = scatter(full(operand.shape, 0, id_dtype),
scatter_indices, update_ids, dnums,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
gather_dnums = GatherDimensionNumbers(
offset_dims=dnums.update_window_dims,
collapsed_slice_dims=dnums.inserted_window_dims,
start_index_map=dnums.scatter_dims_to_operand_dims)
slice_sizes = []
pos = 0
for i in range(len(scattered_ids.shape)):
if i in dnums.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(updates.shape[dnums.update_window_dims[pos]])
pos += 1
gathered_update_ids = gather(scattered_ids, scatter_indices,
dimension_numbers=gather_dnums,
slice_sizes=slice_sizes)
masked_operand = select(eq(scattered_ids, _zeros(scattered_ids)),
operand, _zeros(operand))
masked_updates = select(eq(update_ids, gathered_update_ids),
updates, _zeros(updates))
masked_g_operand = select(eq(scattered_ids, _zeros(scattered_ids)),
g_operand, _zeros(g_operand))
masked_g_updates = select(eq(update_ids, gathered_update_ids),
g_updates, _zeros(g_updates))
val_out = scatter_add(masked_operand, scatter_indices, masked_updates,
dimension_numbers=dnums,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
tangent_out = scatter_add(masked_g_operand, scatter_indices, masked_g_updates,
dimension_numbers=dnums,
indices_are_sorted=indices_are_sorted,
unique_indices=unique_indices)
return val_out, tangent_out
scatter_p = standard_primitive(
_scatter_shape_rule, _scatter_dtype_rule, 'scatter',
_scatter_translation_rule)
ad.primitive_jvps[scatter_p] = _scatter_jvp
batching.primitive_batchers[scatter_p] = (
partial(_scatter_batching_rule, scatter))
def _reduce_shape_rule(*args, computation, jaxpr, consts, dimensions):
operand_args, init_value_args = split_list(args, [len(args) // 2])
if any(arg.shape != () for arg in init_value_args):
init_value_shapes = [a.shape for a in init_value_args]
raise ValueError(f'Found non-scalar init_value: {init_value_shapes}')
return [
tuple(np.delete(op_arg.shape, dimensions))
for op_arg in operand_args
]
def _reduce_dtype_rule(*args, computation, jaxpr, consts, dimensions):
_, init_value_args = split_list(args, [len(args) // 2])
return [
dtypes.canonicalize_dtype(in_arg.dtype)
for in_arg in init_value_args
]
def _reduce_translation_rule(c, *values, computation, jaxpr,
consts, dimensions):
operands, init_values = split_list(values, [len(values) // 2])
if len(operands) == 1:
init_value = init_values[0]
xla_computation = _reduction_computation(c, jaxpr, consts, init_value)
out = xops.Reduce(c, operands, init_values, xla_computation, dimensions)
return xops.Tuple(c, (out,))
xla_computation = _reduction_computation(c, jaxpr, consts, init_values, singleton=False)
return xops.Reduce(c, operands, init_values, xla_computation, dimensions)
def _reduce_batch_rule(batched_args, batch_dims, *, computation, jaxpr,
consts, dimensions):
num_operands = len(batched_args) // 2
operands, init_values = split_list(batched_args, [num_operands])
operand_bdims, init_value_bdims = split_list(batch_dims, [num_operands])
if all(init_value_bdim is None for init_value_bdim in init_value_bdims):
assert all(operand_bdim is not None for operand_bdim in operand_bdims)
assert all(operand_bdim == operand_bdims[0] for operand_bdim in operand_bdims)
operand_bdim = operand_bdims[0]
new_dimensions = [d + bool(d >= operand_bdim) for d in dimensions]
new_operand_bdim = operand_bdim - int(np.sum(np.less(dimensions, operand_bdim)))
new_operand_bdims = [new_operand_bdim] * num_operands
return reduce_p.bind(*(operands + init_values),
computation=computation, dimensions=tuple(new_dimensions),
consts=consts,
jaxpr=jaxpr), new_operand_bdims
else:
raise NotImplementedError
def _reduction_computation(c, jaxpr, consts, init_values, singleton=True):
if singleton:
init_values = [init_values]
shapes = safe_map(c.get_shape, init_values + init_values)
axis_env = xla.AxisEnv(1, (), ()) subc = xla_bridge.make_computation_builder("reduction_computation")
assert len(consts) == 0, "Reduction computations cannot have constants"
args = [xb.parameter(subc, i, shape) for i, shape in enumerate(shapes)]
out_nodes = xla.jaxpr_subcomp(subc, jaxpr, None, axis_env, consts, '', *args)
if singleton:
return subc.build(out_nodes[0])
out_nodes = xops.Tuple(subc, out_nodes)
return subc.build(out_nodes)
def _masking_defreducer(prim, identity):
masking.masking_rules[prim] = partial(_reducer_masking_rule, prim, identity)
def _reducer_masking_rule(prim, identity, padded_vals, logical_shapes,
axes, input_shape=None, **reduce_kwargs):
(padded_val,), (logical_shape,) = padded_vals, logical_shapes
padded_shape = masking.padded_shape_as_value(padded_val.shape)
masks = [broadcasted_iota(np.int32, padded_shape, i) < d
for i, d in enumerate(logical_shape) if i in axes]
mask = _reduce(operator.and_, masks)
masked_val = select(mask, padded_val, identity(padded_shape, padded_val.dtype))
prim_bind = partial(prim.bind, **reduce_kwargs)
bind = prim_bind if input_shape is None else partial(prim_bind, input_shape=padded_shape)
return bind(masked_val, axes=axes)
reduce_p = standard_primitive(_reduce_shape_rule, _reduce_dtype_rule,
'reduce', translation_rule=_reduce_translation_rule,
multiple_results=True)
batching.primitive_batchers[reduce_p] = _reduce_batch_rule
def _reduce_number_dtype_rule(name, operand, *args, **kw):
if not dtypes.issubdtype(operand.dtype, np.number):
raise TypeError("{} does not accept dtype {}. Accepted dtypes are subtypes "
"of number.".format(name, np.dtype(operand.dtype).name))
return dtypes.canonicalize_dtype(operand.dtype)
def _reduce_sum_shape_rule(operand, *, axes):
return _reduce_op_shape_rule(operand, axes=axes)
def _reduce_sum_translation_rule(c, operand, *, axes):
shape = c.get_shape(operand)
dtype = shape.numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.Reduce(c, [operand], [xb.constant(c, np.array(0, dtype))],
xla.primitive_subcomputation(add_p, scalar, scalar),
axes)
def _reduce_sum_transpose_rule(cotangent, operand, *, axes):
assert ad.is_undefined_primal(operand)
input_shape = operand.aval.shape
broadcast_dimensions = tuple(np.delete(np.arange(len(input_shape)), axes))
result = broadcast_in_dim(cotangent, input_shape, broadcast_dimensions)
assert result.shape == input_shape
return [result]
reduce_sum_p = standard_primitive(
_reduce_sum_shape_rule, partial(_reduce_number_dtype_rule, 'reduce_sum'),
'reduce_sum', _reduce_sum_translation_rule)
ad.deflinear2(reduce_sum_p, _reduce_sum_transpose_rule)
batching.defreducer(reduce_sum_p)
_masking_defreducer(reduce_sum_p,
lambda shape, dtype: np.broadcast_to(np.array(0, dtype), shape))
def _reduce_op_shape_rule(operand, *, axes, input_shape=None):
del input_shape if len(axes) != len(set(axes)):
raise ValueError(f"duplicate value in 'axes' of reduction: {axes}")
return tuple(np.delete(operand.shape, axes))
def _reduce_prod_translation_rule(c, operand, *, axes):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.Reduce(c, [operand], [xb.constant(c, np.array(1, dtype))],
xla.primitive_subcomputation(mul_p, scalar, scalar), axes)
def _reduce_prod_jvp_rule(primals, tangents, *, axes):
operand, = primals
tangent, = tangents
input_shape = np.array(operand.shape)
n = np.prod(input_shape[list(axes)])
non_axes = np.delete(np.arange(len(input_shape)), axes)
permutation = axes + tuple(non_axes)
new_shape = (n,) + tuple(input_shape[non_axes])
operand = reshape(operand, new_shape, permutation)
tangent = reshape(tangent, new_shape, permutation)
def _reduce_prod_tree(x, axis=0):
while x.shape[axis] > 1:
n = x.shape[axis]
n1 = (n + 1) // 2
n2 = n - n1
x1 = slice_in_dim(x, 0, n1)
x2 = slice_in_dim(x, n1, None)
if n2 != n1:
paddings = [(0, 0, 0)] * len(x.shape)
paddings[axis] = (0, 1, 0)
x2 = pad(x2, _const(x, 1), paddings)
x = x1 * x2
if x.shape[axis] == 0:
return full(input_shape[non_axes], _one(x))
return squeeze(x, (axis,))
return api.jvp(_reduce_prod_tree, (operand,), (tangent,))
reduce_prod_p = standard_primitive(
_reduce_op_shape_rule, partial(_reduce_number_dtype_rule, 'reduce_prod'),
'reduce_prod', _reduce_prod_translation_rule)
ad.primitive_jvps[reduce_prod_p] = _reduce_prod_jvp_rule
batching.defreducer(reduce_prod_p)
_masking_defreducer(reduce_prod_p,
lambda shape, dtype: np.broadcast_to(np.array(1, dtype), shape))
def _reduce_chooser_shape_rule(operand, *, axes):
return tuple(np.delete(operand.shape, axes))
def _reduce_chooser_translation_rule(prim, identity, c, operand, *, axes):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.Reduce(c, [operand], [xb.constant(c, identity(dtype))],
xla.primitive_subcomputation(prim, scalar, scalar), axes)
def _reduce_chooser_jvp_rule(g, ans, operand, *, axes):
shape = [1 if i in axes else d for i, d in enumerate(operand.shape)]
location_indicators = convert_element_type(
_eq_meet(operand, reshape(ans, shape)), g.dtype)
counts = _reduce_sum(location_indicators, axes)
return div(_reduce_sum(mul(g, location_indicators), axes), counts)
_reduce_max_translation_rule = partial(_reduce_chooser_translation_rule, max_p,
_get_max_identity)
reduce_max_p = standard_primitive(_reduce_op_shape_rule, _input_dtype,
'reduce_max', _reduce_max_translation_rule)
ad.defjvp2(reduce_max_p, _reduce_chooser_jvp_rule)
batching.defreducer(reduce_max_p)
_masking_defreducer(reduce_max_p,
lambda shape, dtype: np.broadcast_to(np.array(-np.inf, dtype), shape))
_reduce_min_translation_rule = partial(
_reduce_chooser_translation_rule, min_p, _get_min_identity)
reduce_min_p = standard_primitive(_reduce_op_shape_rule, _input_dtype,
'reduce_min', _reduce_min_translation_rule)
ad.defjvp2(reduce_min_p, _reduce_chooser_jvp_rule)
batching.defreducer(reduce_min_p)
_masking_defreducer(reduce_min_p,
lambda shape, dtype: np.broadcast_to(np.array(np.inf, dtype), shape))
def _argminmax_shape_rule(operand, *, axes, index_dtype):
axis, = axes
return tuple(np.delete(operand.shape, axis))
def _argminmax_dtype_rule(operand, *, axes, index_dtype):
if not dtypes.issubdtype(index_dtype, np.integer):
raise TypeError("index_dtype must be an integer type, but got {}"
.format(np.dtype(index_dtype).name))
return index_dtype
def _argminmax_translation_rule(value_comparator, identity,
c, operand, *, axes, index_dtype):
axis, = axes
shape = c.get_shape(operand)
dtype = shape.numpy_dtype()
subc = xb.make_computation_builder("argminmax_comparator")
value_shape = xc.Shape.array_shape(shape.xla_element_type(), ())
index_shape = xc.Shape.array_shape(index_dtype, ())
x_value = xb.parameter(subc, 0, value_shape)
x_index = xb.parameter(subc, 1, index_shape)
y_value = xb.parameter(subc, 2, value_shape)
y_index = xb.parameter(subc, 3, index_shape)
which_value = value_comparator(x_value, y_value)
which_index = xops.Or(which_value, xops.And(xops.Eq(x_value, y_value),
xops.Lt(x_index, y_index)))
xops.Tuple(subc, [xops.Select(which_value, x_value, y_value),
xops.Select(which_index, x_index, y_index)])
comparator = subc.build()
iota_shape = xc.Shape.array_shape(index_dtype, shape.dimensions())
iota = xc.ops.Iota(c, iota_shape, axis)
out = xops.Reduce(
c, [operand, iota],
[xb.constant(c, identity(dtype)),
xb.constant(c, np.array(0, index_dtype))], comparator, [axis])
return xops.GetTupleElement(out, 1)
def _argminmax_gpu_translation_rule(op, a, *, axes, index_dtype):
axis, = axes
idxs = tie_in(a, broadcasted_iota(index_dtype, a.shape, axis))
maxval = np.array(dtypes.iinfo(index_dtype).max, dtype=index_dtype)
maxval = broadcast(tie_in(a, maxval), a.shape)
mask_idxs = select(eq(a, expand_dims(op(a, (axis,)), (axis,))), idxs,
maxval)
return _reduce_min(mask_idxs, (axis,))
_argmin_translation_rule = partial(_argminmax_translation_rule, xops.Lt,
_get_min_identity)
_argmax_translation_rule = partial(_argminmax_translation_rule, xops.Gt,
_get_max_identity)
argmin_p = standard_primitive(_argminmax_shape_rule, _argminmax_dtype_rule,
'argmin', _argmin_translation_rule)
batching.defreducer(argmin_p)
ad.defjvp_zero(argmin_p)
xla.backend_specific_translations['gpu'][argmin_p] = xla.lower_fun(
partial(_argminmax_gpu_translation_rule, _reduce_min),
multiple_results=False)
argmax_p = standard_primitive(_argminmax_shape_rule, _argminmax_dtype_rule,
'argmax', _argmax_translation_rule)
batching.defreducer(argmax_p)
ad.defjvp_zero(argmax_p)
xla.backend_specific_translations['gpu'][argmax_p] = xla.lower_fun(
partial(_argminmax_gpu_translation_rule, _reduce_max),
multiple_results=False)
def _reduce_logical_shape_rule(operand, *, axes):
if operand.dtype != np.bool_:
msg = "logical reduction requires operand dtype bool, got {}."
raise TypeError(msg.format(operand.dtype))
return tuple(np.delete(operand.shape, axes))
def _reduce_logical_translation_rule(prim, identity, c, operand, *, axes):
scalar = ShapedArray((), np.bool_)
return xops.Reduce(c, [operand], [xb.constant(c, identity(np.bool_))],
xla.primitive_subcomputation(prim, scalar, scalar), axes)
_reduce_or_translation_rule = partial(_reduce_logical_translation_rule,
or_p, _get_max_identity)
reduce_or_p = standard_primitive(_reduce_logical_shape_rule, _fixed_dtype(np.bool_),
'reduce_or', _reduce_or_translation_rule)
batching.defreducer(reduce_or_p)
_reduce_and_translation_rule = partial(_reduce_logical_translation_rule,
and_p, _get_min_identity)
reduce_and_p = standard_primitive(_reduce_logical_shape_rule, _fixed_dtype(np.bool_),
'reduce_and', _reduce_and_translation_rule)
batching.defreducer(reduce_and_p)
def _reduce_window_shape_rule(operand, init_value, *, jaxpr, consts,
window_dimensions, window_strides, padding,
base_dilation, window_dilation):
if operand.dtype != init_value.dtype:
msg = ("reduce_window got inconsistent dtypes for operand and init_value: "
" got operand dtype {} and init_value dtype {}.")
raise TypeError(msg.format(operand.dtype, init_value.dtype))
if init_value.shape != ():
msg = ("reduce_window expected init_value to be a scalar but init_value "
"has shape {}.")
raise TypeError(msg.format(init_value.shape))
return _common_reduce_window_shape_rule(
operand, window_dimensions, window_strides, padding, base_dilation,
window_dilation)
def _reduce_window_translation_rule(c, operand, init_value, *, jaxpr, consts,
window_dimensions, window_strides, padding,
base_dilation, window_dilation):
xla_computation = _reduction_computation(c, jaxpr, consts, init_value)
return xops.ReduceWindowWithGeneralPadding(
operand, init_value, xla_computation, window_dimensions,
window_strides, base_dilation, window_dilation, padding)
def _generic_reduce_window_batch_rule(
batched_args, batch_dims, *, jaxpr, consts, window_dimensions,
window_strides, padding, base_dilation, window_dilation):
operand, init = batched_args
bdim, init_bdim = batch_dims
if init_bdim is not None:
raise NotImplementedError("reduce_window batching is not implemented for "
"initial values")
def reduce_window(x, window_dimensions, window_strides, padding, base_dilation,
window_dilation):
return reduce_window_p.bind(
x, init, jaxpr=jaxpr, consts=consts, window_dimensions=window_dimensions,
window_strides=window_strides, padding=padding, base_dilation=base_dilation,
window_dilation=window_dilation)
return _reduce_window_batch_rule(
reduce_window, (operand,), (bdim,), window_dimensions=window_dimensions,
window_strides=window_strides, padding=padding, base_dilation=base_dilation,
window_dilation=window_dilation)
reduce_window_p = standard_primitive(
_reduce_window_shape_rule, _input_dtype, 'reduce_window',
_reduce_window_translation_rule)
batching.primitive_batchers[reduce_window_p] = _generic_reduce_window_batch_rule
def _reduce_window_sum_shape_rule(operand, *, window_dimensions, window_strides,
padding, base_dilation, window_dilation):
if not dtypes.issubdtype(operand.dtype, np.number):
msg = "operand to reduce_window_sum must have a number dtype, got {}"
raise TypeError(msg.format(np.dtype(operand.dtype).name))
return _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding, base_dilation,
window_dilation)
def _reduce_window_sum_translation_rule(c, operand, *, window_dimensions,
window_strides, padding, base_dilation,
window_dilation):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.ReduceWindowWithGeneralPadding(
operand, xb.constant(c, np.array(0, dtype)),
xla.primitive_subcomputation(add_p, scalar, scalar), window_dimensions,
window_strides, base_dilation, window_dilation, padding)
def _reduce_window_sum_transpose_rule(cotangent, operand, *, window_dimensions,
window_strides, padding, base_dilation,
window_dilation):
assert ad.is_undefined_primal(operand)
input_shape = operand.aval.shape
pads = _conv_general_vjp_lhs_padding(
input_shape, window_dimensions, window_strides, cotangent.shape, padding,
base_dilation, window_dilation)
ones = [1] * len(input_shape)
padding_config = [(lo, hi, stride - 1)
for (lo, hi), stride in zip(pads, window_strides)]
pad_cotangent = pad(cotangent, _zero(cotangent), padding_config)
result = _reduce_window_sum(pad_cotangent, window_dimensions, base_dilation,
[(0, 0)] * len(input_shape),
base_dilation=ones,
window_dilation=window_dilation)
assert result.shape == input_shape, (result.shape, input_shape)
return [result]
def _reduce_window_batch_rule(reduce_window, batched_args, bdims, *,
window_dimensions, window_strides, padding,
base_dilation, window_dilation):
operand, = batched_args
bdim, = bdims
if bdim is not None:
window_dimensions = \
window_dimensions[:bdim] + (1,) + window_dimensions[bdim:]
window_strides = window_strides[:bdim] + (1,) + window_strides[bdim:]
padding = padding[:bdim] + ((0, 0),) + padding[bdim:]
base_dilation = base_dilation[:bdim] + (1,) + base_dilation[bdim:]
window_dilation = window_dilation[:bdim] + (1,) + window_dilation[bdim:]
operand = reduce_window(operand, window_dimensions, window_strides, padding,
base_dilation, window_dilation)
return operand, bdim
reduce_window_sum_p = standard_primitive(
_reduce_window_sum_shape_rule, _input_dtype, 'reduce_window_sum',
_reduce_window_sum_translation_rule)
ad.deflinear2(reduce_window_sum_p, _reduce_window_sum_transpose_rule)
batching.primitive_batchers[reduce_window_sum_p] = partial(
_reduce_window_batch_rule, _reduce_window_sum)
def _reduce_window_chooser_translation_rule(
prim, identity, c, operand, *, window_dimensions, window_strides, padding,
base_dilation, window_dilation):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
return xops.ReduceWindowWithGeneralPadding(
operand, xb.constant(c, identity(dtype)),
xla.primitive_subcomputation(prim, scalar, scalar), window_dimensions,
window_strides, base_dilation, window_dilation, padding)
def _reduce_window_chooser_jvp_rule(prim, g, operand, *, window_dimensions,
window_strides, padding, base_dilation,
window_dilation):
assert prim is max_p or prim is min_p
select_prim = ge_p if prim is max_p else le_p
return _select_and_gather_add(g, operand, select_prim, window_dimensions,
window_strides, padding, base_dilation,
window_dilation)
def _common_reduce_window_shape_rule(operand, window_dimensions,
window_strides, padding, base_dilation,
window_dilation):
_check_shapelike("reduce_window", "window_dimensions", window_dimensions,
non_zero_shape=True)
_check_shapelike("reduce_window", "window_strides", window_strides,
non_zero_shape=True)
_check_shapelike("reduce_window", "base_dilation", base_dilation)
_check_shapelike("reduce_window", "window_dilation", window_dilation)
if operand.ndim != len(window_dimensions):
msg = ("reduce_window got the wrong number of window_dimensions for "
"operand: got operand shape {} with window_dimensions {}.")
raise TypeError(msg.format(operand.shape, window_dimensions))
if len(window_strides) != len(window_dimensions):
msg = ("reduce_window got inconsistent window_strides and "
"window_dimensions: got window_strides {} and window_dimensions {}.")
raise TypeError(msg.format(window_strides, window_dimensions))
if len(base_dilation) != len(window_dimensions):
msg = ("reduce_window got inconsistent base_dilation and "
"window_dimensions: got base_dilation {} and window_dimensions {}.")
raise TypeError(msg.format(base_dilation, window_dimensions))
if len(window_dilation) != len(window_dimensions):
msg = ("reduce_window got inconsistent window_dilation and "
"window_dimensions: got window_dilation {} and window_dimensions "
"{}.")
raise TypeError(msg.format(window_dilation, window_dimensions))
return reduce_window_shape_tuple(operand.shape, window_dimensions,
window_strides, padding, base_dilation,
window_dilation)
def reduce_window_shape_tuple(operand_shape, window_dimensions, window_strides,
padding, base_dilation=None,
window_dilation=None):
if base_dilation is not None:
operand_shape = _dilate_shape(operand_shape, base_dilation)
if window_dilation is not None:
window_dimensions = _dilate_shape(window_dimensions, window_dilation)
operand_padded = np.add(operand_shape, np.add(*zip(*padding)))
t = np.floor_divide(
np.subtract(operand_padded, window_dimensions), window_strides) + 1
return tuple(t)
_reduce_window_max_translation_rule = partial(
_reduce_window_chooser_translation_rule, max_p, _get_max_identity)
reduce_window_max_p = standard_primitive(
_common_reduce_window_shape_rule, _input_dtype, 'reduce_window_max',
_reduce_window_max_translation_rule)
ad.defjvp(reduce_window_max_p, partial(_reduce_window_chooser_jvp_rule, max_p))
batching.primitive_batchers[reduce_window_max_p] = partial(
_reduce_window_batch_rule, _reduce_window_max)
_reduce_window_min_translation_rule = partial(
_reduce_window_chooser_translation_rule, min_p, _get_min_identity)
reduce_window_min_p = standard_primitive(
_common_reduce_window_shape_rule, _input_dtype, 'reduce_window_min',
_reduce_window_min_translation_rule)
ad.defjvp(reduce_window_min_p, partial(_reduce_window_chooser_jvp_rule, min_p))
_reduce_window_min_batch_rule = partial(_reduce_window_batch_rule,
_reduce_window_min)
batching.primitive_batchers[reduce_window_min_p] = partial(
_reduce_window_batch_rule, _reduce_window_min)
def _select_and_scatter_shape_rule(
operand, source, init_value, *, select_jaxpr, select_consts, scatter_jaxpr,
scatter_consts, window_dimensions, window_strides, padding):
_check_shapelike("select_and_scatter", "window_dimensions", window_dimensions)
_check_shapelike("select_and_scatter", "window_strides", window_strides)
if len(window_dimensions) != len(window_strides):
msg = ("select_and_scatter got inconsistent window_strides and "
"window_dimensions: got window_strides {} and window_dimensions {}.")
raise TypeError(msg.format(window_strides, window_dimensions))
return operand.shape
def _select_and_scatter_translation(
c, operand, source, init_value, *, select_jaxpr, select_consts, scatter_jaxpr,
scatter_consts, window_dimensions, window_strides, padding):
select = _reduction_computation(c, select_jaxpr, select_consts, init_value)
scatter = _reduction_computation(c, scatter_jaxpr, scatter_consts, init_value)
return xops.SelectAndScatterWithGeneralPadding(
operand, select, window_dimensions, window_strides, padding, source,
init_value, scatter)
select_and_scatter_p = standard_primitive(
_select_and_scatter_shape_rule, _input_dtype, 'select_and_scatter',
_select_and_scatter_translation)
def _select_and_scatter_add_shape_rule(
source, operand, *, select_prim, window_dimensions, window_strides,
padding):
return operand.shape
def _select_and_scatter_add_translation(
c, source, operand, *, select_prim, window_dimensions, window_strides,
padding):
dtype = c.get_shape(operand).numpy_dtype()
scalar = ShapedArray((), dtype)
select = xla.primitive_subcomputation(select_prim, scalar, scalar)
scatter = xla.primitive_subcomputation(add_p, scalar, scalar)
zero = xb.constant(c, np.array(0, dtype))
return xops.SelectAndScatterWithGeneralPadding(
operand, select, window_dimensions, window_strides, padding, source, zero,
scatter)
def _select_and_scatter_add_jvp(
primals, tangents, *, select_prim, window_dimensions, window_strides,
padding):
source, operand = primals
g_source, g_operand = tangents
val_out = _select_and_scatter_add(
source, operand, select_prim, window_dimensions, window_strides,
padding)
del g_operand
if type(g_source) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(val_out)
else:
tangent_out = _select_and_scatter_add(
g_source, operand, select_prim, window_dimensions,
window_strides, padding)
return val_out, tangent_out
def _select_and_scatter_add_transpose(
t, source, operand, *, select_prim, window_dimensions, window_strides,
padding):
assert ad.is_undefined_primal(source) and not ad.is_undefined_primal(operand)
ones = (1,) * len(window_dimensions)
source_t = _select_and_gather_add(t, operand, select_prim, window_dimensions,
window_strides, padding, ones, ones)
return [source_t, None]
def _select_and_scatter_add_batch_rule(
batched_args, batch_dims, *, select_prim, window_dimensions, window_strides,
padding):
source, operand = batched_args
s_bdim, o_bdim = batch_dims
size = next(a.shape[bdim] for a, bdim in zip(batched_args, batch_dims)
if bdim is not None)
source = batching.bdim_at_front(source, s_bdim, size)
operand = batching.bdim_at_front(operand, o_bdim, size)
window_dimensions = (1,) + window_dimensions
window_strides = (1,) + window_strides
padding = ((0, 0),) + padding
out = _select_and_scatter_add(source, operand, select_prim, window_dimensions,
window_strides, padding)
return out, 0
select_and_scatter_add_p = standard_primitive(
_select_and_scatter_add_shape_rule, _input_dtype, 'select_and_scatter_add',
_select_and_scatter_add_translation)
ad.primitive_transposes[select_and_scatter_add_p] = \
_select_and_scatter_add_transpose
ad.primitive_jvps[select_and_scatter_add_p] = _select_and_scatter_add_jvp
batching.primitive_batchers[select_and_scatter_add_p] = \
_select_and_scatter_add_batch_rule
def _select_and_gather_add_shape_rule(
tangents, operand, *, select_prim, window_dimensions, window_strides,
padding, base_dilation, window_dilation):
if tangents.shape != operand.shape:
msg = ("select_and_gather_add tangents and operand shapes must match, "
"got {} and {}.")
raise TypeError(msg.format(tangents.shape, operand.shape))
return _common_reduce_window_shape_rule(
operand, window_dimensions, window_strides, padding, base_dilation,
window_dilation)
_UINT_DTYPES = {
16: np.uint16,
32: np.uint32,
64: np.uint64,
}
_INT_DTYPES = {
16: np.int16,
32: np.int32,
64: np.int64,
}
def _select_and_gather_add_translation(
c, tangents, operand, *, select_prim, window_dimensions, window_strides,
padding, base_dilation, window_dilation, max_bits=64):
shape = c.get_shape(operand)
dtype = shape.numpy_dtype()
etype = shape.xla_element_type()
nbits = dtypes.finfo(dtype).bits
assert nbits <= max_bits
double_word_reduction = nbits * 2 <= max_bits
const = lambda c, dtype, x: xb.constant(c, np.array(x, dtype=dtype),
canonicalize_types=False)
if double_word_reduction:
# we implement a pair-wise ReduceWindow by packing two k-bit values into
# 2k-bit unsigned integer using bit tricks.
word_dtype = _UINT_DTYPES[nbits]
double_word_dtype = _UINT_DTYPES[nbits * 2]
word_type = xla_client.dtype_to_etype(word_dtype)
double_word_type = xla_client.dtype_to_etype(double_word_dtype)
# Packs two values into a tuple.
def pack(a, b):
a = xops.BitcastConvertType(a, word_type)
b = xops.BitcastConvertType(b, word_type)
a = xops.ConvertElementType(a, double_word_type)
b = xops.ConvertElementType(b, double_word_type)
a = xops.ShiftLeft(a, const(c, double_word_dtype, nbits))
return xops.Or(a, b)
# Unpacks the first element of a tuple.
def fst(c, t):
st = xops.ShiftRightLogical(t, const(c, double_word_dtype, nbits))
return xops.BitcastConvertType(xops.ConvertElementType(st, word_type), etype)
# Unpacks the second element of a tuple.
def snd(t):
return xops.BitcastConvertType(xops.ConvertElementType(t, word_type), etype)
else:
# The double-word trick above only works if we have a sufficiently large
# type. As an alternative, we can pack two half words into a single word,
# at the cost of precision.
# TODO(b/73062247): add support for tuple reductions and remove this case.
warnings.warn("Using reduced precision for gradient of reduce-window "
"min/max operator to work around missing XLA support for "
"pair-reductions. This is likely from a second or "
"higher derivative of a max-pooling operation.")
r_nbits = nbits // 2
# Drop/round the bottom mantissa bits.
nexp = dtypes.finfo(dtype).nexp
nmant = r_nbits - nexp - 1
double_word_dtype = word_dtype = _UINT_DTYPES[nbits]
word_type = xla_client.dtype_to_etype(word_dtype)
# Packs two values into a tuple.
def pack(a, b):
a = xops.ReducePrecision(a, exponent_bits=nexp, mantissa_bits=nmant)
b = xops.ReducePrecision(b, exponent_bits=nexp, mantissa_bits=nmant)
a = xops.BitcastConvertType(a, word_type)
b = xops.BitcastConvertType(b, word_type)
b = xops.ShiftRightLogical(b, const(c, word_dtype, r_nbits))
return xops.Or(a, b)
# Unpacks the first element of a tuple.
def fst(c, t):
st = xops.And(t, const(c, word_dtype, ((1 << r_nbits) - 1) << r_nbits))
return xops.BitcastConvertType(st, etype)
# Unpacks the second element of a tuple.
def snd(t):
return xops.BitcastConvertType(xops.ShiftLeft(t, const(c, word_dtype, r_nbits)),
etype)
def reducer():
c = xla_bridge.make_computation_builder("select_and_gather_pair_reducer")
x = xb.parameter(c, 0,
xla_client.Shape.array_shape(np.dtype(double_word_dtype), ()))
y = xb.parameter(c, 1,
xla_client.Shape.array_shape(np.dtype(double_word_dtype), ()))
assert select_prim is ge_p or select_prim is le_p
which = xops.Ge if select_prim is ge_p else xops.Le
xops.Select(which(fst(c, x), fst(c, y)), x, y)
return c.build()
assert select_prim is ge_p or select_prim is le_p, select_prim
init = -np.inf if select_prim is ge_p else np.inf
out = xops.ReduceWindowWithGeneralPadding(
pack(operand, tangents), pack(const(c, dtype, init), const(c, dtype, 0)),
reducer(), window_dimensions, window_strides, base_dilation,
window_dilation, padding)
return snd(out)
def _select_and_gather_add_jvp(
primals, tangents, *, select_prim, window_dimensions, window_strides,
padding, base_dilation, window_dilation):
source, operand = primals
g_source, g_operand = tangents
val_out = _select_and_gather_add(
source, operand, select_prim, window_dimensions, window_strides,
padding, base_dilation, window_dilation)
del g_operand
if type(g_source) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(val_out)
else:
tangent_out = _select_and_gather_add(
g_source, operand, select_prim, window_dimensions,
window_strides, padding, base_dilation, window_dilation)
return val_out, tangent_out
def _select_and_gather_add_transpose(
t, tangents, operand, *, select_prim, window_dimensions, window_strides,
padding, base_dilation, window_dilation):
assert select_prim in (le_p, ge_p)
assert ad.is_undefined_primal(tangents) and not ad.is_undefined_primal(operand)
if any(d != 1 for d in window_dilation):
msg = ("VJP not implemented for select_and_gather (MaxPool) with window "
"dilation, got window_dilation={}.")
raise NotImplementedError(msg.format(window_dilation))
if type(t) is ad_util.Zero:
return [ad_util.Zero, None]
has_base_dilation = any(d != 1 for d in base_dilation)
if has_base_dilation:
select_identity = (_get_max_identity if select_prim is ge_p
else _get_min_identity)
operand = pad(operand, select_identity(operand.dtype),
tuple((0, 0, d - 1) for d in base_dilation))
result = _select_and_scatter_add(t, operand, select_prim, window_dimensions,
window_strides, padding)
if has_base_dilation:
result = slice(operand, (0,) * len(operand.shape), operand.shape,
base_dilation)
return [result, None]
def _select_and_gather_add_batching_rule(
batched_args, batch_dims, *, select_prim, window_dimensions, window_strides,
padding, base_dilation, window_dilation):
t, x = batched_args
t_bdim, x_bdim = batch_dims
size = next(a.shape[bdim] for a, bdim in zip(batched_args, batch_dims)
if bdim is not None)
t = batching.bdim_at_front(t, t_bdim, size)
x = batching.bdim_at_front(x, x_bdim, size)
window_dimensions = (1,) + window_dimensions
window_strides = (1,) + window_strides
padding = ((0, 0),) + padding
base_dilation = (1,) + base_dilation
window_dilation = (1,) + window_dilation
out = _select_and_gather_add(t, x, select_prim, window_dimensions,
window_strides, padding, base_dilation,
window_dilation)
return (out, 0)
select_and_gather_add_p = standard_primitive(
_select_and_gather_add_shape_rule, _input_dtype, 'select_and_gather_add',
_select_and_gather_add_translation)
ad.primitive_jvps[select_and_gather_add_p] = _select_and_gather_add_jvp
ad.primitive_transposes[select_and_gather_add_p] = \
_select_and_gather_add_transpose
batching.primitive_batchers[select_and_gather_add_p] = \
_select_and_gather_add_batching_rule
xla.backend_specific_translations['tpu'][select_and_gather_add_p] = partial(
_select_and_gather_add_translation,
max_bits=32)
def _sort_abstract_eval(*args, **kwargs):
args = tuple(raise_to_shaped(arg) for arg in args)
if any(arg.shape != args[0].shape for arg in args[1:]):
shapes = " ".join(str(a.shape) for a in args)
raise TypeError(f"Arguments to sort must have equal shapes, got: {shapes}")
return args
def _float_to_int_for_sort(x):
# Switch from a floating point value to a integer value in such a way that
# when using the integer value to compare, we get the same result for normal
# values, and -nan is treated as the smallest value, and nan is treated as
# the largest value.
# If f is a float, and
# x = bit_cast<int32>(f);
# y = x < 0 ? int32_max - x : x;
# then y is ordered as an int32 such that finite values have the obvious
# order, -0 is ordered before 0, and -NaN and NaN appear at the beginning
# and end of the ordering.
# Note that in order to avoid -x to overflow, we calculate
# int32_max - x as unsigned, and then convert back to signed.
if x.dtype == dtypes.bfloat16:
x = convert_element_type(x, np.float32)
nbits = np.finfo(x).bits
signed_dtype = _INT_DTYPES[nbits]
unsigned_dtype = _UINT_DTYPES[nbits]
signed = bitcast_convert_type(x, signed_dtype)
unsigned = bitcast_convert_type(x, unsigned_dtype)
flipped = bitcast_convert_type(
sub(unsigned_dtype(np.iinfo(signed_dtype).max), unsigned), signed_dtype)
return select(lt(signed, _zero(signed)), flipped, signed)
# Default comparator that sorts the operands lexicographically on the
# first `num_keys` arguments.
# For floating point types, a total order is created where
# -NaN < -infinity < ... < -0 < 0 < ... < infinity < NaN.
# For complex types, the (real, imag) pairs are sorted lexicographically
# (following NumPy's semantics).
def _sort_lt_comparator(*operands, num_keys=1):
assert len(operands) >= 2 and len(operands) % 2 == 0, operands
assert len(operands) // 2 >= num_keys, (operands, num_keys)
x_keys, y_keys = [], []
for x, y in zip(operands[:2*num_keys:2], operands[1:2*num_keys:2]):
assert x.dtype == y.dtype, (x.dtype, y.dtype)
if np.issubdtype(x.dtype, np.complexfloating):
x_keys.extend([_float_to_int_for_sort(real(x)), _float_to_int_for_sort(imag(x))])
y_keys.extend([_float_to_int_for_sort(real(y)), _float_to_int_for_sort(imag(y))])
elif np.issubdtype(x.dtype, np.floating):
x_keys.append(_float_to_int_for_sort(x))
y_keys.append(_float_to_int_for_sort(y))
else:
x_keys.append(x)
y_keys.append(y)
p = None
for xk, yk in zip(x_keys[::-1], y_keys[::-1]):
p = (bitwise_or(lt(xk, yk), bitwise_and(eq(xk, yk), p)) if p is not None
else lt(xk, yk))
return p
def _sort_translation_rule(c, *operands, dimension, is_stable, num_keys):
types = [c.get_shape(x).xla_element_type() for x in operands]
subc = xla_bridge.make_computation_builder("sort_lt_comparator")
params = [xb.parameter(subc, 2 * i + j, xc.Shape.array_shape(typ, ()))
for i, typ in enumerate(types) for j in range(2)]
result = xla.lower_fun(partial(_sort_lt_comparator, num_keys=num_keys),
multiple_results=False)(subc, *params)
comparator = subc.build(result)
out = xops.Sort(c, operands, dimension=dimension, is_stable=is_stable,
comparator=comparator)
return out if len(operands) != 1 else xops.Tuple(c, [out])
def _sort_jvp(primals, tangents, *, dimension, is_stable, num_keys):
shape = primals[0].shape
iotas = []
for dim, size in enumerate(shape):
dtype = np.int32 if size < np.iinfo(np.int32).max else np.int64
iotas.append(broadcasted_iota(dtype, shape, dim))
primals = sort_p.bind(*(primals + (iotas[dimension],)), dimension=dimension,
is_stable=is_stable, num_keys=num_keys)
idx = tuple(primals[-1] if i == dimension else iotas[i]
for i in range(len(shape)))
tangents_out = tuple(t if type(t) is ad_util.Zero else t[idx] for t in tangents)
return tuple(primals[:-1]), tangents_out
def _sort_batch_rule(batched_args, batch_dims, *, dimension, is_stable, num_keys):
prototype_arg, new_bdim = next(
(a, b) for a, b in zip(batched_args, batch_dims) if b is not None)
new_args = []
for arg, bdim in zip(batched_args, batch_dims):
if bdim is None:
dims = np.delete(np.arange(prototype_arg.ndim), new_bdim)
new_args.append(broadcast_in_dim(arg, prototype_arg.shape, dims))
else:
new_args.append(batching.moveaxis(arg, bdim, new_bdim))
new_dimension = dimension + (new_bdim <= dimension)
bdims = (new_bdim,) * len(new_args)
return (sort_p.bind(*new_args, dimension=new_dimension, is_stable=is_stable, num_keys=num_keys),
bdims)
sort_p = Primitive('sort')
sort_p.multiple_results = True
sort_p.def_impl(partial(xla.apply_primitive, sort_p))
sort_p.def_abstract_eval(_sort_abstract_eval)
xla.translations[sort_p] = _sort_translation_rule
ad.primitive_jvps[sort_p] = _sort_jvp
batching.primitive_batchers[sort_p] = _sort_batch_rule
def _top_k_abstract_eval(operand, *, k):
if k < 0:
raise ValueError("k argument to top_k must be nonnegative, got {}".format(k))
if len(operand.shape) == 0:
raise TypeError("top_k operand must have >= 1 dimension, got {}"
.format(operand.shape))
shape = list(operand.shape)
if shape[-1] < k:
msg = "k argument to top_k must be no larger than minor dimension; {} vs {}"
raise ValueError(msg.format(k, shape))
shape[-1] = k
return (ShapedArray(shape, operand.dtype),
ShapedArray(shape, np.dtype(np.int32)))
def _top_k_jvp(primals, tangents, *, k):
operand, = primals
tangent, = tangents
primals_out = top_k(operand, k)
if type(tangent) is ad_util.Zero:
tangent_out = ad_util.Zero.from_value(primals_out[0])
else:
_, k_idxs = primals_out
idx_shape = k_idxs.shape
rank = len(idx_shape)
gather_index_shape = idx_shape + (1,)
gather_indices = []
for i in range(rank-1):
_iota = iota(k_idxs.dtype, idx_shape[i])
if not config.omnistaging_enabled:
_iota = tie_in(operand, _iota)
_iota = broadcast_in_dim(_iota, gather_index_shape, (i,))
gather_indices.append(_iota)
gather_indices.append(reshape(k_idxs, gather_index_shape))
gather_indices = concatenate(gather_indices, dimension=rank)
slice_sizes = (1,) * rank
dnums = GatherDimensionNumbers(
offset_dims=(),
collapsed_slice_dims=tuple(range(rank)),
start_index_map=tuple(range(rank)))
tangent_out = gather(tangent, gather_indices, dnums, slice_sizes)
return primals_out, (tangent_out, ad_util.Zero.from_value(primals_out[1]))
def _top_k_batch_rule(batched_args, batch_dims, *, k):
operand, = batched_args
bdim, = batch_dims
if bdim == operand.ndim-1:
perm = np.arange(operand.ndim)
perm[bdim-1], perm[bdim] = perm[bdim], perm[bdim-1]
top_k_v, top_k_i = top_k(transpose(operand, perm), k=k)
return (transpose(top_k_v, perm),
transpose(top_k_i, perm)), (bdim, bdim)
else:
return top_k(operand, k=k), (bdim, bdim)
top_k_p = Primitive('top_k')
top_k_p.multiple_results = True
top_k_p.def_impl(partial(xla.apply_primitive, top_k_p))
top_k_p.def_abstract_eval(_top_k_abstract_eval)
xla.translations[top_k_p] = partial(standard_translate, 'top_k')
ad.primitive_jvps[top_k_p] = _top_k_jvp
batching.primitive_batchers[top_k_p] = _top_k_batch_rule
def _stop_gradient_jvp_rule(primals, tangents):
x, = primals
return stop_gradient(x), ad_util.Zero.from_value(x)
def _stop_gradient_batch_rule(batched_args, batch_dims):
x, = batched_args
dim, = batch_dims
return stop_gradient(x), dim
ad.primitive_jvps[ad_util.stop_gradient_p] = _stop_gradient_jvp_rule
batching.primitive_batchers[ad_util.stop_gradient_p] = _stop_gradient_batch_rule
def create_token(x):
return create_token_p.bind(stop_gradient(x))
create_token_p = Primitive("create_token")
create_token_p.def_impl(partial(xla.apply_primitive, create_token_p))
create_token_p.def_abstract_eval(lambda _: abstract_token)
xla.translations[create_token_p] = lambda c, *_: xops.CreateToken(c)
def after_all(*operands):
return after_all_p.bind(*operands)
def _after_all_abstract_eval(*operands):
if any(x is not abstract_token for x in operands):
raise TypeError("Arguments to after_all must be tokens")
return abstract_token
def _after_all_translation_rule(c, *operands):
return xops.AfterAll(c, operands)
after_all_p = Primitive("after_all")
after_all_p.def_impl(partial(xla.apply_primitive, after_all_p))
after_all_p.def_abstract_eval(_after_all_abstract_eval)
xla.translations[after_all_p] = _after_all_translation_rule
def infeed(token, shape=None, partitions=None):
flat_shapes, treedef = pytree.flatten(shape)
for shape in flat_shapes:
if not isinstance(shape, ShapedArray):
raise TypeError("shape argument to infeed must be a pytree of "
"ShapedArray values, got {}".format(shape))
if partitions is not None:
if type(partitions) != tuple: raise ValueError(f"'partitions' argument to infeed should be a tuple, "
f"got {partitions}")
partitions = partitions + (None,)
xs_and_token = infeed_p.bind(token, shapes=tuple(flat_shapes),
partitions=partitions)
return (treedef.unflatten(xs_and_token[:-1]), xs_and_token[-1])
def _infeed_abstract_eval(token, *, shapes, partitions):
if token is not abstract_token:
raise TypeError("First argument to infeed must be a token")
return shapes + (abstract_token,)
def _infeed_translation_rule(c, token, *, shapes, partitions):
shape = tuple(shape.with_major_to_minor_layout_if_absent()
for x in shapes for shape in xla.aval_to_xla_shapes(x))
build_infeed = partial(xops.InfeedWithToken, token,
xla_client.Shape.tuple_shape(shape))
if partitions:
xs_and_token = xb.with_sharding(c, partitions, build_infeed)
else:
xs_and_token = build_infeed()
xs = xops.GetTupleElement(xs_and_token, 0)
token = xops.GetTupleElement(xs_and_token, 1)
outs = [xops.GetTupleElement(xs, i) for i in range(len(shapes))] + [token]
return xops.Tuple(c, outs)
infeed_p = Primitive("infeed")
infeed_p.multiple_results = True
infeed_p.def_impl(partial(xla.apply_primitive, infeed_p))
infeed_p.def_abstract_eval(_infeed_abstract_eval)
xla.translations[infeed_p] = _infeed_translation_rule
def outfeed(token, xs):
flat_xs, _ = pytree.flatten(xs)
return outfeed_p.bind(token, *flat_xs)
def _outfeed_abstract_eval(token, *xs):
if token is not abstract_token:
raise TypeError("First argument to outfeed must be a token")
return abstract_token
def _outfeed_translation_rule(c, token, *xs):
t = xops.Tuple(c, xs)
return xops.OutfeedWithToken(t, token, c.get_shape(t))
outfeed_p = Primitive("outfeed")
outfeed_p.def_impl(partial(xla.apply_primitive, outfeed_p))
outfeed_p.def_abstract_eval(_outfeed_abstract_eval)
xla.translations[outfeed_p] = _outfeed_translation_rule
def rng_uniform(a, b, shape):
return rng_uniform_p.bind(a, b, shape=tuple(shape))
def _rng_uniform_abstract_eval(a, b, *, shape):
if a.dtype != b.dtype:
raise ValueError(
"Arguments to rng_uniform must have identical dtypes, got {} "
"and {}.".format(a.dtype, b.dtype))
if a.shape != () or b.shape != ():
raise ValueError(
"Arguments to rng_uniform must be scalars; got shapes {} and {}."
.format(a.shape, b.shape))
return ShapedArray(shape, a.dtype)
def _rng_uniform_translation_rule(c, a, b, *, shape):
xla_shape = xc.Shape.array_shape(c.get_shape(a).xla_element_type(), shape)
return xops.RngUniform(a, b, xla_shape)
rng_uniform_p = Primitive("rng_uniform")
rng_uniform_p.def_impl(partial(xla.apply_primitive, rng_uniform_p))
rng_uniform_p.def_abstract_eval(_rng_uniform_abstract_eval)
xla.translations[rng_uniform_p] = _rng_uniform_translation_rule
def _iota_abstract_eval(*, dtype, shape, dimension):
_check_shapelike("iota", "shape", shape)
if not any(dtypes.issubdtype(dtype, t) for t in _num):
msg = 'iota does not accept dtype {}. Accepted dtypes are subtypes of {}.'
typename = str(np.dtype(dtype).name)
accepted_typenames = (t.__name__ for t in _num)
raise TypeError(msg.format(typename, ', '.join(accepted_typenames)))
if not 0 <= dimension < len(shape):
raise ValueError("iota dimension must be between 0 and len(shape), got "
f"dimension={dimension} for shape {shape}")
return ShapedArray(shape, dtype)
def _iota_translation_rule(c, dtype, shape, dimension):
etype = xla_client.dtype_to_etype(dtype)
xla_shape = xc.Shape.array_shape(etype, shape)
return xops.Iota(c, xla_shape, dimension)
iota_p = Primitive('iota')
iota_p.def_impl(partial(xla.apply_primitive, iota_p))
iota_p.def_abstract_eval(_iota_abstract_eval)
xla.translations[iota_p] = _iota_translation_rule
_ndim = np.ndim
def _dilate_shape(shape, dilation):
if not np.all(np.greater(dilation, 0)):
msg = "All dilations must be positive, got {}."
raise TypeError(msg.format(dilation))
dilation = (1,) * (len(shape) - len(dilation)) + tuple(dilation)
return np.where(shape == 0, 0,
np.multiply(dilation, np.subtract(shape, 1)) + 1)
def _ceil_divide(x1, x2):
return -np.floor_divide(np.negative(x1), x2)
def padtype_to_pads(in_shape, window_shape, window_strides, padding):
PaddingType = xla_client.PaddingType
if isinstance(padding, str):
mapping = {'VALID': PaddingType.VALID, 'SAME': PaddingType.SAME}
try:
padding = mapping[padding.upper()]
except KeyError as err:
msg = "Unrecognized padding type: expected 'VALID' or 'SAME', got {}."
raise RuntimeError(msg.format(padding)) from err
if padding == PaddingType.SAME:
out_shape = _ceil_divide(in_shape, window_strides)
pad_sizes = np.maximum(0, (out_shape - 1) * window_strides +
window_shape - in_shape)
return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes]
elif padding == PaddingType.VALID:
return [(0, 0)] * len(in_shape)
else:
msg = "Unknown padding type: {}."
raise TypeError(msg.format(padding))
def _check_same_dtypes(name, ignore_fp_precision, *ttypes):
types = list(map(np.dtype, ttypes)) if ignore_fp_precision:
types = [
np.floating if dtypes.issubdtype(dtype, np.floating)
else np.complexfloating if dtypes.issubdtype(dtype, np.complexfloating)
else dtype for dtype in types]
if len({dtypes.canonicalize_dtype(t) for t in types}) != 1:
if ignore_fp_precision:
msg = ("{} requires arguments to have same dtypes up to floating point "
"precision, got {}.")
else:
msg = "{} requires arguments to have the same dtypes, got {}."
raise TypeError(msg.format(name, ", ".join(map(str, types))))
def _check_conv_shapes(name, lhs_shape, rhs_shape, window_strides):
if len(lhs_shape) != len(rhs_shape):
msg = "Arguments to {} must have same rank, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if len(lhs_shape) < 2:
msg = "Arguments to {} must have rank at least 2, got {} and {}."
raise TypeError(msg.format(name, len(lhs_shape), len(rhs_shape)))
if lhs_shape[1] != rhs_shape[1]:
msg = "Arguments to {} must agree on input feature size, got {} and {}."
raise TypeError(msg.format(name, lhs_shape[1], rhs_shape[1]))
_check_shapelike(name, "window_strides", window_strides)
if not np.all(np.greater(window_strides, 0)):
msg = "All elements of window_strides must be positive, got {}."
raise TypeError(msg.format(window_strides))
if len(window_strides) != len(lhs_shape) - 2:
msg = "{} window_strides has wrong length: expected {}, got {}."
expected_length = len(lhs_shape) - 2
raise TypeError(msg.format(name, expected_length, len(window_strides)))
def conv_shape_tuple(lhs_shape, rhs_shape, strides, pads, batch_group_count=1):
if isinstance(pads, str):
pads = padtype_to_pads(lhs_shape[2:], rhs_shape[2:], strides, pads)
if len(pads) != len(lhs_shape) - 2:
msg = "Wrong number of explicit pads for convolution: expected {}, got {}."
raise TypeError(msg.format(len(lhs_shape) - 2, len(pads)))
lhs_padded = np.add(lhs_shape[2:], np.sum(np.array(pads).reshape(-1, 2),
axis=1))
out_space = np.floor_divide(
np.subtract(lhs_padded, rhs_shape[2:]), strides) + 1
out_space = np.maximum(0, out_space)
assert lhs_shape[0] % batch_group_count == 0
out_shape = (lhs_shape[0] // batch_group_count, rhs_shape[0])
return tuple(out_shape + tuple(out_space))
def conv_general_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,
dimension_numbers):
lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)
lhs_trans = np.take(lhs_shape, lhs_perm)
rhs_trans = np.take(rhs_shape, rhs_perm)
out_trans = conv_shape_tuple(lhs_trans, rhs_trans, window_strides, padding)
return tuple(np.take(out_trans, np.argsort(out_perm)))
def conv_transpose_shape_tuple(lhs_shape, rhs_shape, window_strides, padding,
dimension_numbers):
lhs_perm, rhs_perm, out_perm = conv_general_permutations(dimension_numbers)
lhs_trans = np.take(lhs_shape, lhs_perm)
rhs_trans = np.take(rhs_shape, rhs_perm)
if isinstance(padding, str):
padding = [_conv_transpose_padding(k, s, padding)
for k,s in zip(rhs_trans[2:], window_strides)]
padding = list(map(np.sum, padding))
unpad_out_space = [(i-1) * s - k + 2
for i, k, s in zip(lhs_trans[2:],
rhs_trans[2:],
window_strides)]
out_space = np.sum([unpad_out_space, padding], axis=0).tolist()
out_trans = tuple((lhs_trans[0], rhs_trans[0]) + tuple(out_space))
return tuple(np.take(out_trans, np.argsort(out_perm)))
def _check_shapelike(fun_name, arg_name, obj, non_zero_shape=False):
if not isinstance(obj, (tuple, list, np.ndarray)):
msg = "{} {} must be of type tuple/list/ndarray, got {}."
raise TypeError(msg.format(fun_name, arg_name, type(obj)))
if not len(obj): return
obj_arr = np.array(obj)
if obj_arr.ndim != 1:
msg = "{} {} must be rank 1, got {}."
raise TypeError(msg.format(obj_arr.ndim))
try:
canonicalize_shape(obj_arr)
except TypeError as err:
msg = "{} {} must have every element be an integer type, got {}."
raise TypeError(msg.format(fun_name, arg_name, tuple(map(type, obj)))) from err
lower_bound, bound_error = (
(1, "strictly positive") if non_zero_shape else (0, "nonnegative"))
if not (obj_arr >= lower_bound).all():
msg = "{} {} must have every element be {}, got {}."
raise TypeError(msg.format(fun_name, arg_name, bound_error, obj))
def _dynamic_slice_indices(operand, start_indices):
if len(start_indices) != operand.ndim:
msg = ("Length of slice indices must match number of operand dimensions ({} "
"vs {})")
raise ValueError(msg.format(len(start_indices), operand.shape))
safe_map(int, operand.shape)
if not isinstance(start_indices, (tuple, list)):
if start_indices.ndim != 1:
raise ValueError("Slice indices must be a 1D sequence, got {}"
.format(start_indices.shape))
return select(lt(start_indices, _zeros(start_indices)),
add(start_indices, _const(start_indices, operand.shape)),
start_indices)
else:
return [np.asarray(i + d if i < 0 else i, getattr(i, 'dtype', dtypes.int_))
if isinstance(i, (int, np.integer))
else select(lt(i, _const(i, 0)), add(i, _const(i, d)), i)
for i, d in zip(start_indices, operand.shape)]
def _const(example, val):
if dtypes.is_python_scalar(example):
return dtypes.scalar_type_of(example)(val)
return np.array(val, _dtype(example))
_zeros: Callable = partial(full_like, fill_value=0)
_zero: Callable = partial(full_like, shape=(), fill_value=0)
_ones: Callable = partial(full_like, fill_value=1)
_one: Callable = partial(full_like, shape=(), fill_value=1)
_twos: Callable = partial(full_like, fill_value=2)
_two: Callable = partial(full_like, shape=(), fill_value=2)
dtype: Callable = dtypes.result_type
_dtype: Callable = dtypes.result_type
def _iscomplex(x) -> bool:
return dtypes.issubdtype(_dtype(x), np.complexfloating)
def ranges_like(*xs):
start = 0
for x in xs:
x_len = len(x)
yield range(start, start + x_len)
start += x_len
def remaining(original, *removed_lists):
removed = set(itertools.chain(*removed_lists))
return [i for i in original if i not in removed]
def _canonicalize_precision(precision):
if precision is None:
return None
if isinstance(precision, Precision) or (
isinstance(precision, tuple)
and len(precision) == 2
and all(isinstance(p, Precision) for p in precision)
):
return precision
else:
raise ValueError("Precision argument must be None, a lax.Precision value "
f"or a tuple of two lax.Precision values; got {precision}")
def conv_dimension_numbers(lhs_shape, rhs_shape, dimension_numbers
) -> ConvDimensionNumbers:
if isinstance(dimension_numbers, ConvDimensionNumbers):
return dimension_numbers
if len(lhs_shape) != len(rhs_shape):
msg = "convolution requires lhs and rhs ndim to be equal, got {} and {}."
raise TypeError(msg.format(len(lhs_shape), len(rhs_shape)))
if dimension_numbers is None:
iota = tuple(range(len(lhs_shape)))
return ConvDimensionNumbers(iota, iota, iota)
elif isinstance(dimension_numbers, (list, tuple)):
if len(dimension_numbers) != 3:
msg = "convolution dimension_numbers list/tuple must be length 3, got {}."
raise TypeError(msg.format(len(dimension_numbers)))
if not all(isinstance(elt, str) for elt in dimension_numbers):
msg = "convolution dimension_numbers elements must be strings, got {}."
raise TypeError(msg.format(tuple(map(type, dimension_numbers))))
msg = ("convolution dimension_numbers[{}] must have len equal to the ndim "
"of lhs and rhs, got {} for lhs and rhs shapes {} and {}.")
for i, elt in enumerate(dimension_numbers):
if len(elt) != len(lhs_shape):
raise TypeError(msg.format(i, len(elt), lhs_shape, rhs_shape))
lhs_spec, rhs_spec, out_spec = conv_general_permutations(dimension_numbers)
return ConvDimensionNumbers(lhs_spec, rhs_spec, out_spec)
else:
msg = "convolution dimension_numbers must be tuple/list or None, got {}."
raise TypeError(msg.format(type(dimension_numbers)))
def conv_general_permutations(dimension_numbers):
lhs_spec, rhs_spec, out_spec = dimension_numbers
lhs_char, rhs_char, out_char = charpairs = ("N", "C"), ("O", "I"), ("N", "C")
for i, (a, b) in enumerate(charpairs):
if not dimension_numbers[i].count(a) == dimension_numbers[i].count(b) == 1:
msg = ("convolution dimension_numbers[{}] must contain the characters "
"'{}' and '{}' exactly once, got {}.")
raise TypeError(msg.format(i, a, b, dimension_numbers[i]))
if len(dimension_numbers[i]) != len(set(dimension_numbers[i])):
msg = ("convolution dimension_numbers[{}] cannot have duplicate "
"characters, got {}.")
raise TypeError(msg.format(i, dimension_numbers[i]))
if not (set(lhs_spec) - set(lhs_char) == set(rhs_spec) - set(rhs_char) ==
set(out_spec) - set(out_char)):
msg = ("convolution dimension_numbers elements must each have the same "
"set of spatial characters, got {}.")
raise TypeError(msg.format(dimension_numbers))
def getperm(spec, charpair):
spatial = (i for i, c in enumerate(spec) if c not in charpair)
if spec is not rhs_spec:
spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i]))
return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial)
lhs_perm, rhs_perm, out_perm = map(getperm, dimension_numbers, charpairs)
return lhs_perm, rhs_perm, out_perm
def _conv_general_proto(dimension_numbers):
assert type(dimension_numbers) is ConvDimensionNumbers
lhs_spec, rhs_spec, out_spec = dimension_numbers
proto = xla_client.ConvolutionDimensionNumbers()
proto.input_batch_dimension = lhs_spec[0]
proto.input_feature_dimension = lhs_spec[1]
proto.output_batch_dimension = out_spec[0]
proto.output_feature_dimension = out_spec[1]
proto.kernel_output_feature_dimension = rhs_spec[0]
proto.kernel_input_feature_dimension = rhs_spec[1]
proto.input_spatial_dimensions.extend(lhs_spec[2:])
proto.kernel_spatial_dimensions.extend(rhs_spec[2:])
proto.output_spatial_dimensions.extend(out_spec[2:])
return proto
def _conv_general_vjp_lhs_padding(
in_shape, window_dimensions, window_strides, out_shape, padding,
lhs_dilation, rhs_dilation) -> List[Tuple[int, int]]:
lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = _dilate_shape(out_shape, window_strides)
pad_before = np.subtract(rhs_dilated_shape, [lo for lo, _ in padding]) - 1
pad_after = (np.add(lhs_dilated_shape, rhs_dilated_shape) - 1
- out_dilated_shape - pad_before)
return safe_zip(pad_before, pad_after)
def _conv_general_vjp_rhs_padding(
in_shape, window_dimensions, window_strides, out_shape, padding,
lhs_dilation, rhs_dilation):
lhs_dilated_shape = _dilate_shape(in_shape, lhs_dilation)
rhs_dilated_shape = _dilate_shape(window_dimensions, rhs_dilation)
out_dilated_shape = _dilate_shape(out_shape, window_strides)
total_in_pad = out_dilated_shape + rhs_dilated_shape - lhs_dilated_shape - 1
return [(pad[0], tot - pad[0]) for pad, tot in zip(padding, total_in_pad)]
def _balanced_eq(x, z, y):
return div(select(_eq_meet(x, z), _ones(z), _zeros(z)),
select(_eq_meet(y, z), _twos(z), _ones(z)))
def _eq_meet(a, b):
a_dtype, b_dtype = _dtype(a), _dtype(b)
if a_dtype != b_dtype:
higher_dtype = dtypes.promote_types(a_dtype, b_dtype)
if higher_dtype == a_dtype:
a = convert_element_type(a, b_dtype)
else:
b = convert_element_type(b, a_dtype)
return eq(a, b)
def _abstractify(x):
return raise_to_shaped(core.get_aval(x))
def _check_user_dtype_supported(dtype, fun_name=None):
if isinstance(dtype, type) and dtype in {bool, int, float, complex}:
return
np_dtype = np.dtype(dtype)
if np_dtype.kind not in "biufc" and np_dtype.type != dtypes.bfloat16:
msg = f"JAX only supports number and bool dtypes, got dtype {dtype}"
msg += f" in {fun_name}" if fun_name else ""
raise TypeError(msg)
if dtype is not None and np_dtype != dtypes.canonicalize_dtype(dtype):
msg = ("Explicitly requested dtype {} {} is not available, "
"and will be truncated to dtype {}. To enable more dtypes, set the "
"jax_enable_x64 configuration option or the JAX_ENABLE_X64 shell "
"environment variable. "
"See https://github.com/google/jax#current-gotchas for more.")
fun_name = f"requested in {fun_name}" if fun_name else ""
truncated_dtype = dtypes.canonicalize_dtype(dtype).name
warnings.warn(msg.format(dtype, fun_name , truncated_dtype))
def _canonicalize_axis(axis, num_dims):
axis = operator.index(axis)
if not -num_dims <= axis < num_dims:
raise ValueError(
"axis {} is out of bounds for array of dimension {}".format(
axis, num_dims))
if axis < 0:
axis = axis + num_dims
return axis
tie_in_p = Primitive('tie_in')
@config.register_omnistaging_disabler
def omnistaging_disabler() -> None:
global tie_in
def tie_in(x: Array, y: Array) -> Array:
if config.omnistaging_enabled:
return y
else:
return tie_in_p.bind(x, y)
try:
jax.lax.tie_in = tie_in
except AttributeError:
pass
def _tie_in_transpose_rule(t, x, y):
if ad.is_undefined_primal(x):
return [ad_util.Zero(x.aval), t]
else:
return [ad_util.Zero.from_value(x), t]
def _tie_in_batch_rule(batched_args, batch_dims):
y = tie_in(*batched_args)
_, bdim_y = batch_dims
return y, bdim_y
def _tie_in_impl(x, y):
core.check_valid_jaxtype(x)
core.check_valid_jaxtype(y)
return y
def _tie_in_jvp(primals, tangents):
x, y = primals
x_dot, y_dot = tangents
if type(y_dot) is ad_util.Zero or core.get_aval(y_dot).dtype is dtypes.float0:
return y, y_dot # skip tying in in this case
else:
return ad.linear_jvp(tie_in_p, primals, tangents)
tie_in_p.def_impl(_tie_in_impl)
tie_in_p.def_abstract_eval(lambda x, y: raise_to_shaped(y))
xla.translations[tie_in_p] = lambda c, x, y: y
ad.primitive_jvps[tie_in_p] = _tie_in_jvp
ad.primitive_transposes[tie_in_p] = partial(ad.linear_transpose2, _tie_in_transpose_rule)
batching.primitive_batchers[tie_in_p] = _tie_in_batch_rule
masking.masking_rules[tie_in_p] = lambda vals, logical_shapes: vals[1]
| true
| true
|
f70cb274777106042cfdd47c23d84236c554d7ee
| 6,235
|
py
|
Python
|
server/api/organisations/campaigns.py
|
Guillon88/tasking-manager
|
8f05eef7cb008c680a82dc9884ab7a83a12ab1f8
|
[
"BSD-2-Clause"
] | 1
|
2021-08-02T15:32:31.000Z
|
2021-08-02T15:32:31.000Z
|
server/api/organisations/campaigns.py
|
Guillon88/tasking-manager
|
8f05eef7cb008c680a82dc9884ab7a83a12ab1f8
|
[
"BSD-2-Clause"
] | null | null | null |
server/api/organisations/campaigns.py
|
Guillon88/tasking-manager
|
8f05eef7cb008c680a82dc9884ab7a83a12ab1f8
|
[
"BSD-2-Clause"
] | null | null | null |
from flask_restful import Resource, current_app
from server.services.campaign_service import CampaignService
from server.services.organisation_service import OrganisationService
from server.models.postgis.utils import NotFound
from server.models.postgis.campaign import Campaign
from server.services.users.authentication_service import token_auth, tm
class OrganisationsCampaignsAPI(Resource):
@token_auth.login_required
def post(self, organisation_id, campaign_id):
"""
Assigns a campaign to an organisation
---
tags:
- campaigns
produces:
- application/json
parameters:
- in: header
name: Authorization
description: Base64 encoded session token
required: true
type: string
default: Token sessionTokenHere==
- name: organisation_id
in: path
description: Unique organisation ID
required: true
type: integer
default: 1
- name: campaign_id
in: path
description: Unique campaign ID
required: true
type: integer
default: 1
responses:
200:
description: Organisation and campaign assigned successfully
401:
description: Unauthorized - Invalid credentials
403:
description: Forbidden - users have submitted mapping
404:
description: Project not found
500:
description: Internal Server Error
"""
try:
if OrganisationService.can_user_manage_organisation(
organisation_id, tm.authenticated_user_id
):
if Campaign.campaign_organisation_exists(campaign_id, organisation_id):
message = "Campaign {} is already assigned to organisation {}.".format(
campaign_id, organisation_id
)
return {"Error": message}, 400
CampaignService.create_campaign_organisation(
organisation_id, campaign_id
)
message = "campaign with id {} assigned for organisation with id {}".format(
campaign_id, organisation_id
)
return {"Success": message}, 200
else:
return {"Error": "User is not a manager of the organisation"}, 403
except Exception as e:
error_msg = f"Campaign Organisation POST - unhandled error: {str(e)}"
current_app.logger.critical(error_msg)
return {"Error": error_msg}, 500
def get(self, organisation_id):
"""
Returns all campaigns related to an organisation
---
tags:
- campaigns
produces:
- application/json
parameters:
- in: header
name: Authorization
description: Base64 encoded session token
required: false
type: string
default: Token sessionTokenHere==
- name: organisation_id
in: path
description: Unique project ID
required: true
type: integer
default: 1
responses:
200:
description: Success
404:
description: Organisation not found
500:
description: Internal Server Error
"""
try:
campaigns = CampaignService.get_organisation_campaigns_as_dto(
organisation_id
)
return campaigns.to_primitive(), 200
except NotFound:
return {"Error": "No campaign found"}, 404
except Exception as e:
error_msg = f"Organisation Campaigns GET - unhandled error: {str(e)}"
current_app.logger.critical(error_msg)
return {"Error": error_msg}, 500
@token_auth.login_required
def delete(self, organisation_id, campaign_id):
"""
Unassigns an organization from an campaign
---
tags:
- campaigns
produces:
- application/json
parameters:
- in: header
name: Authorization
description: Base64 encoded session token
required: true
type: string
default: Token sessionTokenHere==
- name: organisation_id
in: path
description: Unique organisation ID
required: true
type: integer
default: 1
- name: campaign_id
in: path
description: Unique campaign ID
required: true
type: integer
default: 1
responses:
200:
description: Organisation and campaign unassociated successfully
401:
description: Unauthorized - Invalid credentials
403:
description: Forbidden - users have submitted mapping
404:
description: Project not found
500:
description: Internal Server Error
"""
try:
if OrganisationService.can_user_manage_organisation(
organisation_id, tm.authenticated_user_id
):
CampaignService.delete_organisation_campaign(
organisation_id, campaign_id
)
return (
{"Success": "Organisation and campaign unassociated successfully"},
200,
)
else:
return {"Error": "User is not a manager of the organisation"}, 403
except NotFound:
return {"Error": "Organisation Campaign Not Found"}, 404
except Exception as e:
error_msg = f"Organisation Campaigns DELETE - unhandled error: {str(e)}"
current_app.logger.critical(error_msg)
return {"Error": error_msg}, 500
| 35.628571
| 92
| 0.54146
|
from flask_restful import Resource, current_app
from server.services.campaign_service import CampaignService
from server.services.organisation_service import OrganisationService
from server.models.postgis.utils import NotFound
from server.models.postgis.campaign import Campaign
from server.services.users.authentication_service import token_auth, tm
class OrganisationsCampaignsAPI(Resource):
@token_auth.login_required
def post(self, organisation_id, campaign_id):
try:
if OrganisationService.can_user_manage_organisation(
organisation_id, tm.authenticated_user_id
):
if Campaign.campaign_organisation_exists(campaign_id, organisation_id):
message = "Campaign {} is already assigned to organisation {}.".format(
campaign_id, organisation_id
)
return {"Error": message}, 400
CampaignService.create_campaign_organisation(
organisation_id, campaign_id
)
message = "campaign with id {} assigned for organisation with id {}".format(
campaign_id, organisation_id
)
return {"Success": message}, 200
else:
return {"Error": "User is not a manager of the organisation"}, 403
except Exception as e:
error_msg = f"Campaign Organisation POST - unhandled error: {str(e)}"
current_app.logger.critical(error_msg)
return {"Error": error_msg}, 500
def get(self, organisation_id):
try:
campaigns = CampaignService.get_organisation_campaigns_as_dto(
organisation_id
)
return campaigns.to_primitive(), 200
except NotFound:
return {"Error": "No campaign found"}, 404
except Exception as e:
error_msg = f"Organisation Campaigns GET - unhandled error: {str(e)}"
current_app.logger.critical(error_msg)
return {"Error": error_msg}, 500
@token_auth.login_required
def delete(self, organisation_id, campaign_id):
try:
if OrganisationService.can_user_manage_organisation(
organisation_id, tm.authenticated_user_id
):
CampaignService.delete_organisation_campaign(
organisation_id, campaign_id
)
return (
{"Success": "Organisation and campaign unassociated successfully"},
200,
)
else:
return {"Error": "User is not a manager of the organisation"}, 403
except NotFound:
return {"Error": "Organisation Campaign Not Found"}, 404
except Exception as e:
error_msg = f"Organisation Campaigns DELETE - unhandled error: {str(e)}"
current_app.logger.critical(error_msg)
return {"Error": error_msg}, 500
| true
| true
|
f70cb2dc8973317c598e3990ad6648c0f1901bc2
| 15,151
|
py
|
Python
|
tools/program.py
|
ruyijidan/PaddleClas
|
b986937cd2935d1a6591a9d19de7c07710632624
|
[
"Apache-2.0"
] | null | null | null |
tools/program.py
|
ruyijidan/PaddleClas
|
b986937cd2935d1a6591a9d19de7c07710632624
|
[
"Apache-2.0"
] | null | null | null |
tools/program.py
|
ruyijidan/PaddleClas
|
b986937cd2935d1a6591a9d19de7c07710632624
|
[
"Apache-2.0"
] | null | null | null |
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from collections import OrderedDict
import paddle.fluid as fluid
from ppcls.optimizer import LearningRateBuilder
from ppcls.optimizer import OptimizerBuilder
from ppcls.modeling import architectures
from ppcls.modeling.loss import CELoss
from ppcls.modeling.loss import MixCELoss
from ppcls.modeling.loss import JSDivLoss
from ppcls.modeling.loss import GoogLeNetLoss
from ppcls.utils.misc import AverageMeter
from ppcls.utils import logger
from paddle.fluid.incubate.fleet.collective import fleet
from paddle.fluid.incubate.fleet.collective import DistributedStrategy
from ema import ExponentialMovingAverage
def create_feeds(image_shape, use_mix=None):
"""
Create feeds as model input
Args:
image_shape(list[int]): model input shape, such as [3, 224, 224]
use_mix(bool): whether to use mix(include mixup, cutmix, fmix)
Returns:
feeds(dict): dict of model input variables
"""
feeds = OrderedDict()
feeds['image'] = fluid.data(
name="feed_image", shape=[None] + image_shape, dtype="float32")
if use_mix:
feeds['feed_y_a'] = fluid.data(
name="feed_y_a", shape=[None, 1], dtype="int64")
feeds['feed_y_b'] = fluid.data(
name="feed_y_b", shape=[None, 1], dtype="int64")
feeds['feed_lam'] = fluid.data(
name="feed_lam", shape=[None, 1], dtype="float32")
else:
feeds['label'] = fluid.data(
name="feed_label", shape=[None, 1], dtype="int64")
return feeds
def create_dataloader(feeds):
"""
Create a dataloader with model input variables
Args:
feeds(dict): dict of model input variables
Returns:
dataloader(fluid dataloader):
"""
trainer_num = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))
capacity = 64 if trainer_num <= 1 else 8
dataloader = fluid.io.DataLoader.from_generator(
feed_list=feeds,
capacity=capacity,
use_double_buffer=True,
iterable=True)
return dataloader
def create_model(architecture, image, classes_num, is_train):
"""
Create a model
Args:
architecture(dict): architecture information,
name(such as ResNet50) is needed
image(variable): model input variable
classes_num(int): num of classes
Returns:
out(variable): model output variable
"""
name = architecture["name"]
params = architecture.get("params", {})
if "is_test" in params:
params['is_test'] = not is_train
model = architectures.__dict__[name](**params)
out = model.net(input=image, class_dim=classes_num)
return out
def create_loss(out,
feeds,
architecture,
classes_num=1000,
epsilon=None,
use_mix=False,
use_distillation=False):
"""
Create a loss for optimization, such as:
1. CrossEnotry loss
2. CrossEnotry loss with label smoothing
3. CrossEnotry loss with mix(mixup, cutmix, fmix)
4. CrossEnotry loss with label smoothing and (mixup, cutmix, fmix)
5. GoogLeNet loss
Args:
out(variable): model output variable
feeds(dict): dict of model input variables
architecture(dict): architecture information,
name(such as ResNet50) is needed
classes_num(int): num of classes
epsilon(float): parameter for label smoothing, 0.0 <= epsilon <= 1.0
use_mix(bool): whether to use mix(include mixup, cutmix, fmix)
Returns:
loss(variable): loss variable
"""
if architecture["name"] == "GoogLeNet":
assert len(out) == 3, "GoogLeNet should have 3 outputs"
loss = GoogLeNetLoss(class_dim=classes_num, epsilon=epsilon)
target = feeds['label']
return loss(out[0], out[1], out[2], target)
if use_distillation:
assert len(out) == 2, ("distillation output length must be 2, "
"but got {}".format(len(out)))
loss = JSDivLoss(class_dim=classes_num, epsilon=epsilon)
return loss(out[1], out[0])
if use_mix:
loss = MixCELoss(class_dim=classes_num, epsilon=epsilon)
feed_y_a = feeds['feed_y_a']
feed_y_b = feeds['feed_y_b']
feed_lam = feeds['feed_lam']
return loss(out, feed_y_a, feed_y_b, feed_lam)
else:
loss = CELoss(class_dim=classes_num, epsilon=epsilon)
target = feeds['label']
return loss(out, target)
def create_metric(out,
feeds,
architecture,
topk=5,
classes_num=1000,
use_distillation=False):
"""
Create measures of model accuracy, such as top1 and top5
Args:
out(variable): model output variable
feeds(dict): dict of model input variables(included label)
topk(int): usually top5
classes_num(int): num of classes
Returns:
fetchs(dict): dict of measures
"""
if architecture["name"] == "GoogLeNet":
assert len(out) == 3, "GoogLeNet should have 3 outputs"
softmax_out = out[0]
else:
# just need student label to get metrics
if use_distillation:
out = out[1]
softmax_out = fluid.layers.softmax(out, use_cudnn=False)
fetchs = OrderedDict()
# set top1 to fetchs
top1 = fluid.layers.accuracy(softmax_out, label=feeds['label'], k=1)
fetchs['top1'] = (top1, AverageMeter('top1', '.4f', need_avg=True))
# set topk to fetchs
k = min(topk, classes_num)
topk = fluid.layers.accuracy(softmax_out, label=feeds['label'], k=k)
topk_name = 'top{}'.format(k)
fetchs[topk_name] = (topk, AverageMeter(topk_name, '.4f', need_avg=True))
return fetchs
def create_fetchs(out,
feeds,
architecture,
topk=5,
classes_num=1000,
epsilon=None,
use_mix=False,
use_distillation=False):
"""
Create fetchs as model outputs(included loss and measures),
will call create_loss and create_metric(if use_mix).
Args:
out(variable): model output variable
feeds(dict): dict of model input variables.
If use mix_up, it will not include label.
architecture(dict): architecture information,
name(such as ResNet50) is needed
topk(int): usually top5
classes_num(int): num of classes
epsilon(float): parameter for label smoothing, 0.0 <= epsilon <= 1.0
use_mix(bool): whether to use mix(include mixup, cutmix, fmix)
Returns:
fetchs(dict): dict of model outputs(included loss and measures)
"""
fetchs = OrderedDict()
loss = create_loss(out, feeds, architecture, classes_num, epsilon, use_mix,
use_distillation)
fetchs['loss'] = (loss, AverageMeter('loss', '7.4f', need_avg=True))
if not use_mix:
metric = create_metric(out, feeds, architecture, topk, classes_num,
use_distillation)
fetchs.update(metric)
return fetchs
def create_optimizer(config):
"""
Create an optimizer using config, usually including
learning rate and regularization.
Args:
config(dict): such as
{
'LEARNING_RATE':
{'function': 'Cosine',
'params': {'lr': 0.1}
},
'OPTIMIZER':
{'function': 'Momentum',
'params':{'momentum': 0.9},
'regularizer':
{'function': 'L2', 'factor': 0.0001}
}
}
Returns:
an optimizer instance
"""
# create learning_rate instance
lr_config = config['LEARNING_RATE']
lr_config['params'].update({
'epochs': config['epochs'],
'step_each_epoch':
config['total_images'] // config['TRAIN']['batch_size'],
})
lr = LearningRateBuilder(**lr_config)()
# create optimizer instance
opt_config = config['OPTIMIZER']
opt = OptimizerBuilder(**opt_config)
return opt(lr)
def dist_optimizer(config, optimizer):
"""
Create a distributed optimizer based on a normal optimizer
Args:
config(dict):
optimizer(): a normal optimizer
Returns:
optimizer: a distributed optimizer
"""
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = 3
exec_strategy.num_iteration_per_drop_scope = 10
dist_strategy = DistributedStrategy()
dist_strategy.nccl_comm_num = 1
dist_strategy.fuse_all_reduce_ops = True
dist_strategy.exec_strategy = exec_strategy
optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)
return optimizer
def mixed_precision_optimizer(config, optimizer):
use_fp16 = config.get('use_fp16', False)
amp_scale_loss = config.get('amp_scale_loss', 1.0)
use_dynamic_loss_scaling = config.get('use_dynamic_loss_scaling', False)
if use_fp16:
optimizer = fluid.contrib.mixed_precision.decorate(
optimizer,
init_loss_scaling=amp_scale_loss,
use_dynamic_loss_scaling=use_dynamic_loss_scaling)
return optimizer
def build(config, main_prog, startup_prog, is_train=True):
"""
Build a program using a model and an optimizer
1. create feeds
2. create a dataloader
3. create a model
4. create fetchs
5. create an optimizer
Args:
config(dict): config
main_prog(): main program
startup_prog(): startup program
is_train(bool): train or valid
Returns:
dataloader(): a bridge between the model and the data
fetchs(dict): dict of model outputs(included loss and measures)
"""
with fluid.program_guard(main_prog, startup_prog):
with fluid.unique_name.guard():
use_mix = config.get('use_mix') and is_train
use_distillation = config.get('use_distillation')
feeds = create_feeds(config.image_shape, use_mix=use_mix)
dataloader = create_dataloader(feeds.values())
out = create_model(config.ARCHITECTURE, feeds['image'],
config.classes_num, is_train)
fetchs = create_fetchs(
out,
feeds,
config.ARCHITECTURE,
config.topk,
config.classes_num,
epsilon=config.get('ls_epsilon'),
use_mix=use_mix,
use_distillation=use_distillation)
if is_train:
optimizer = create_optimizer(config)
lr = optimizer._global_learning_rate()
fetchs['lr'] = (lr, AverageMeter('lr', 'f', need_avg=False))
optimizer = mixed_precision_optimizer(config, optimizer)
optimizer = dist_optimizer(config, optimizer)
optimizer.minimize(fetchs['loss'][0])
if config.get('use_ema'):
global_steps = fluid.layers.learning_rate_scheduler._decay_step_counter(
)
ema = ExponentialMovingAverage(
config.get('ema_decay'), thres_steps=global_steps)
ema.update()
return dataloader, fetchs, ema
return dataloader, fetchs
def compile(config, program, loss_name=None):
"""
Compile the program
Args:
config(dict): config
program(): the program which is wrapped by
loss_name(str): loss name
Returns:
compiled_program(): a compiled program
"""
build_strategy = fluid.compiler.BuildStrategy()
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = 1
exec_strategy.num_iteration_per_drop_scope = 10
compiled_program = fluid.CompiledProgram(program).with_data_parallel(
loss_name=loss_name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
return compiled_program
total_step = 0
def run(dataloader,
exe,
program,
fetchs,
epoch=0,
mode='train',
vdl_writer=None):
"""
Feed data to the model and fetch the measures and loss
Args:
dataloader(fluid dataloader):
exe():
program():
fetchs(dict): dict of measures and the loss
epoch(int): epoch of training or validation
model(str): log only
Returns:
"""
fetch_list = [f[0] for f in fetchs.values()]
metric_list = [f[1] for f in fetchs.values()]
for m in metric_list:
m.reset()
batch_time = AverageMeter('elapse', '.3f')
tic = time.time()
for idx, batch in enumerate(dataloader()):
metrics = exe.run(program=program, feed=batch, fetch_list=fetch_list)
batch_time.update(time.time() - tic)
tic = time.time()
for i, m in enumerate(metrics):
metric_list[i].update(m[0], len(batch[0]))
fetchs_str = ''.join([str(m.value) + ' '
for m in metric_list] + [batch_time.value]) + 's'
if vdl_writer:
global total_step
logger.scaler('loss', metrics[0][0], total_step, vdl_writer)
total_step += 1
if mode == 'eval':
logger.info("{:s} step:{:<4d} {:s}s".format(mode, idx, fetchs_str))
else:
epoch_str = "epoch:{:<3d}".format(epoch)
step_str = "{:s} step:{:<4d}".format(mode, idx)
logger.info("{:s} {:s} {:s}".format(
logger.coloring(epoch_str, "HEADER")
if idx == 0 else epoch_str,
logger.coloring(step_str, "PURPLE"),
logger.coloring(fetchs_str, 'OKGREEN')))
end_str = ''.join([str(m.mean) + ' '
for m in metric_list] + [batch_time.total]) + 's'
if mode == 'eval':
logger.info("END {:s} {:s}s".format(mode, end_str))
else:
end_epoch_str = "END epoch:{:<3d}".format(epoch)
logger.info("{:s} {:s} {:s}".format(
logger.coloring(end_epoch_str, "RED"),
logger.coloring(mode, "PURPLE"),
logger.coloring(end_str, "OKGREEN")))
# return top1_acc in order to save the best model
if mode == 'valid':
return fetchs["top1"][1].avg
| 32.443255
| 92
| 0.613029
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from collections import OrderedDict
import paddle.fluid as fluid
from ppcls.optimizer import LearningRateBuilder
from ppcls.optimizer import OptimizerBuilder
from ppcls.modeling import architectures
from ppcls.modeling.loss import CELoss
from ppcls.modeling.loss import MixCELoss
from ppcls.modeling.loss import JSDivLoss
from ppcls.modeling.loss import GoogLeNetLoss
from ppcls.utils.misc import AverageMeter
from ppcls.utils import logger
from paddle.fluid.incubate.fleet.collective import fleet
from paddle.fluid.incubate.fleet.collective import DistributedStrategy
from ema import ExponentialMovingAverage
def create_feeds(image_shape, use_mix=None):
feeds = OrderedDict()
feeds['image'] = fluid.data(
name="feed_image", shape=[None] + image_shape, dtype="float32")
if use_mix:
feeds['feed_y_a'] = fluid.data(
name="feed_y_a", shape=[None, 1], dtype="int64")
feeds['feed_y_b'] = fluid.data(
name="feed_y_b", shape=[None, 1], dtype="int64")
feeds['feed_lam'] = fluid.data(
name="feed_lam", shape=[None, 1], dtype="float32")
else:
feeds['label'] = fluid.data(
name="feed_label", shape=[None, 1], dtype="int64")
return feeds
def create_dataloader(feeds):
trainer_num = int(os.environ.get('PADDLE_TRAINERS_NUM', 1))
capacity = 64 if trainer_num <= 1 else 8
dataloader = fluid.io.DataLoader.from_generator(
feed_list=feeds,
capacity=capacity,
use_double_buffer=True,
iterable=True)
return dataloader
def create_model(architecture, image, classes_num, is_train):
name = architecture["name"]
params = architecture.get("params", {})
if "is_test" in params:
params['is_test'] = not is_train
model = architectures.__dict__[name](**params)
out = model.net(input=image, class_dim=classes_num)
return out
def create_loss(out,
feeds,
architecture,
classes_num=1000,
epsilon=None,
use_mix=False,
use_distillation=False):
if architecture["name"] == "GoogLeNet":
assert len(out) == 3, "GoogLeNet should have 3 outputs"
loss = GoogLeNetLoss(class_dim=classes_num, epsilon=epsilon)
target = feeds['label']
return loss(out[0], out[1], out[2], target)
if use_distillation:
assert len(out) == 2, ("distillation output length must be 2, "
"but got {}".format(len(out)))
loss = JSDivLoss(class_dim=classes_num, epsilon=epsilon)
return loss(out[1], out[0])
if use_mix:
loss = MixCELoss(class_dim=classes_num, epsilon=epsilon)
feed_y_a = feeds['feed_y_a']
feed_y_b = feeds['feed_y_b']
feed_lam = feeds['feed_lam']
return loss(out, feed_y_a, feed_y_b, feed_lam)
else:
loss = CELoss(class_dim=classes_num, epsilon=epsilon)
target = feeds['label']
return loss(out, target)
def create_metric(out,
feeds,
architecture,
topk=5,
classes_num=1000,
use_distillation=False):
if architecture["name"] == "GoogLeNet":
assert len(out) == 3, "GoogLeNet should have 3 outputs"
softmax_out = out[0]
else:
if use_distillation:
out = out[1]
softmax_out = fluid.layers.softmax(out, use_cudnn=False)
fetchs = OrderedDict()
top1 = fluid.layers.accuracy(softmax_out, label=feeds['label'], k=1)
fetchs['top1'] = (top1, AverageMeter('top1', '.4f', need_avg=True))
k = min(topk, classes_num)
topk = fluid.layers.accuracy(softmax_out, label=feeds['label'], k=k)
topk_name = 'top{}'.format(k)
fetchs[topk_name] = (topk, AverageMeter(topk_name, '.4f', need_avg=True))
return fetchs
def create_fetchs(out,
feeds,
architecture,
topk=5,
classes_num=1000,
epsilon=None,
use_mix=False,
use_distillation=False):
fetchs = OrderedDict()
loss = create_loss(out, feeds, architecture, classes_num, epsilon, use_mix,
use_distillation)
fetchs['loss'] = (loss, AverageMeter('loss', '7.4f', need_avg=True))
if not use_mix:
metric = create_metric(out, feeds, architecture, topk, classes_num,
use_distillation)
fetchs.update(metric)
return fetchs
def create_optimizer(config):
lr_config = config['LEARNING_RATE']
lr_config['params'].update({
'epochs': config['epochs'],
'step_each_epoch':
config['total_images'] // config['TRAIN']['batch_size'],
})
lr = LearningRateBuilder(**lr_config)()
opt_config = config['OPTIMIZER']
opt = OptimizerBuilder(**opt_config)
return opt(lr)
def dist_optimizer(config, optimizer):
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = 3
exec_strategy.num_iteration_per_drop_scope = 10
dist_strategy = DistributedStrategy()
dist_strategy.nccl_comm_num = 1
dist_strategy.fuse_all_reduce_ops = True
dist_strategy.exec_strategy = exec_strategy
optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)
return optimizer
def mixed_precision_optimizer(config, optimizer):
use_fp16 = config.get('use_fp16', False)
amp_scale_loss = config.get('amp_scale_loss', 1.0)
use_dynamic_loss_scaling = config.get('use_dynamic_loss_scaling', False)
if use_fp16:
optimizer = fluid.contrib.mixed_precision.decorate(
optimizer,
init_loss_scaling=amp_scale_loss,
use_dynamic_loss_scaling=use_dynamic_loss_scaling)
return optimizer
def build(config, main_prog, startup_prog, is_train=True):
with fluid.program_guard(main_prog, startup_prog):
with fluid.unique_name.guard():
use_mix = config.get('use_mix') and is_train
use_distillation = config.get('use_distillation')
feeds = create_feeds(config.image_shape, use_mix=use_mix)
dataloader = create_dataloader(feeds.values())
out = create_model(config.ARCHITECTURE, feeds['image'],
config.classes_num, is_train)
fetchs = create_fetchs(
out,
feeds,
config.ARCHITECTURE,
config.topk,
config.classes_num,
epsilon=config.get('ls_epsilon'),
use_mix=use_mix,
use_distillation=use_distillation)
if is_train:
optimizer = create_optimizer(config)
lr = optimizer._global_learning_rate()
fetchs['lr'] = (lr, AverageMeter('lr', 'f', need_avg=False))
optimizer = mixed_precision_optimizer(config, optimizer)
optimizer = dist_optimizer(config, optimizer)
optimizer.minimize(fetchs['loss'][0])
if config.get('use_ema'):
global_steps = fluid.layers.learning_rate_scheduler._decay_step_counter(
)
ema = ExponentialMovingAverage(
config.get('ema_decay'), thres_steps=global_steps)
ema.update()
return dataloader, fetchs, ema
return dataloader, fetchs
def compile(config, program, loss_name=None):
build_strategy = fluid.compiler.BuildStrategy()
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = 1
exec_strategy.num_iteration_per_drop_scope = 10
compiled_program = fluid.CompiledProgram(program).with_data_parallel(
loss_name=loss_name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
return compiled_program
total_step = 0
def run(dataloader,
exe,
program,
fetchs,
epoch=0,
mode='train',
vdl_writer=None):
fetch_list = [f[0] for f in fetchs.values()]
metric_list = [f[1] for f in fetchs.values()]
for m in metric_list:
m.reset()
batch_time = AverageMeter('elapse', '.3f')
tic = time.time()
for idx, batch in enumerate(dataloader()):
metrics = exe.run(program=program, feed=batch, fetch_list=fetch_list)
batch_time.update(time.time() - tic)
tic = time.time()
for i, m in enumerate(metrics):
metric_list[i].update(m[0], len(batch[0]))
fetchs_str = ''.join([str(m.value) + ' '
for m in metric_list] + [batch_time.value]) + 's'
if vdl_writer:
global total_step
logger.scaler('loss', metrics[0][0], total_step, vdl_writer)
total_step += 1
if mode == 'eval':
logger.info("{:s} step:{:<4d} {:s}s".format(mode, idx, fetchs_str))
else:
epoch_str = "epoch:{:<3d}".format(epoch)
step_str = "{:s} step:{:<4d}".format(mode, idx)
logger.info("{:s} {:s} {:s}".format(
logger.coloring(epoch_str, "HEADER")
if idx == 0 else epoch_str,
logger.coloring(step_str, "PURPLE"),
logger.coloring(fetchs_str, 'OKGREEN')))
end_str = ''.join([str(m.mean) + ' '
for m in metric_list] + [batch_time.total]) + 's'
if mode == 'eval':
logger.info("END {:s} {:s}s".format(mode, end_str))
else:
end_epoch_str = "END epoch:{:<3d}".format(epoch)
logger.info("{:s} {:s} {:s}".format(
logger.coloring(end_epoch_str, "RED"),
logger.coloring(mode, "PURPLE"),
logger.coloring(end_str, "OKGREEN")))
if mode == 'valid':
return fetchs["top1"][1].avg
| true
| true
|
f70cb3c8e0c52829eda3f1fe33c1cc0d730b9691
| 6,355
|
py
|
Python
|
repo/plugin.video.frenchdj.media.center/resources/lib/sources/oneclickwatch_mv_tv.py
|
Hades01/Addons
|
710da97ac850197498a3cd64be1811c593610add
|
[
"Apache-2.0"
] | null | null | null |
repo/plugin.video.frenchdj.media.center/resources/lib/sources/oneclickwatch_mv_tv.py
|
Hades01/Addons
|
710da97ac850197498a3cd64be1811c593610add
|
[
"Apache-2.0"
] | null | null | null |
repo/plugin.video.frenchdj.media.center/resources/lib/sources/oneclickwatch_mv_tv.py
|
Hades01/Addons
|
710da97ac850197498a3cd64be1811c593610add
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse,time, urllib
from resources.lib.libraries import client
from resources.lib.libraries import client2
from resources.lib.libraries import cleantitle
from resources.lib.libraries import workers
from resources.lib.libraries import control
from resources.lib.resolvers import cloudzilla
from resources.lib.resolvers import openload
from resources.lib.resolvers import uptobox
from resources.lib.resolvers import zstream
from resources.lib.resolvers import videomega
from resources.lib import resolvers
class source:
def __init__(self):
self.base_link = 'http://oneclickwatch.ws'
self.search_link = '/search/%s'
self.title = ''
def get_movie(self, imdb, title, year):
try:
query = self.search_link % urllib.quote_plus(title +' '+year)
query = urlparse.urljoin(self.base_link, query)
result = client2.http_get(query)
years = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1)]
result = client.parseDOM(result, 'h2', attrs={'class': 'title'})
result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a')[0]) for i in result]
print('R',result)
result = [i for i in result if cleantitle.movie(title.lower()) in cleantitle.movie(i[1]).lower()]
print('R',result)
result = [i for i in result if any(x in i[1] for x in years)]
print('R',result)
result2 = [i for i in result if '1080' in i[1]]
print('R',result)
result3 = [i for i in result if '720' in i[1].lower()]
print('R',result)
if len(result3) > 0: result = result3
if len(result2) > 0: result = result2
url = result[0][0]
return url
except:
return
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
url = tvshowtitle
url = client.cleanHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
if url == None: return
mytitile = url.lower()
url = '%s S%02dE%02d' % (url, int(season), int(episode))
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
url = self.search_link % urllib.quote_plus(url)
query = urlparse.urljoin(self.base_link, url)
result = client2.http_get(query)
result = client.parseDOM(result, 'h2', attrs={'class': 'title'})
result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a')[0]) for i in result]
result = [i for i in result if mytitile in i[1].lower()]
result2 = [i for i in result if '1080' in i[1].lower()]
result3 = [i for i in result if '720' in i[1].lower()]
if len(result3) > 0: result = result3
if len(result2) > 0: result = result2
url=result[0][0]
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
self.sources =[]
mylinks = []
result = client2.http_get(url)
mytitle = re.compile('<title>(.*?)</title>', re.DOTALL).findall(result)[0]
if any(word in mytitle.lower() for word in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'ts']):
quality = 'CAM'
elif '1080p' in mytitle:
quality = '1080p'
elif '720p' in mytitle:
quality = 'HD'
else:
quality = 'MQ'
links = client.parseDOM(result, 'a', attrs={'rel': 'nofollow'})
links = [i for i in links if i.startswith('http')]
for a in links:
mylinks.append([a,quality])
threads = []
for i in mylinks: threads.append(workers.Thread(self.check, i))
[i.start() for i in threads]
for i in range(0, 10 * 2):
is_alive = [x.is_alive() for x in threads]
if all(x == False for x in is_alive): break
time.sleep(1)
return self.sources
except:
return self.sources
def check(self, i):
try:
url = client.replaceHTMLCodes(i[0])
url = url.encode('utf-8')
host = urlparse.urlparse(url).netloc
host = host.replace('www.', '').replace('embed.', '')
host = host.rsplit('.', 1)[0]
host = host.lower()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
control.log("##OneClickWatch %s - url %s" % (host, i[0]))
#if host in i[2]: check = url = resolvers.request(url)
if host == 'openload': check = openload.check(url)
elif host == 'uptobox': check = uptobox.check(url)
elif host == 'cloudzilla': check = cloudzilla.check(url)
elif host == 'zstream': check = zstream.check(url)
elif host == 'videomega': check = videomega.check(url)
else: raise Exception()
if check == None or check == False: raise Exception()
self.sources.append({'source': host, 'quality': i[1], 'provider': 'Oneclickwatch', 'url': url})
except:
pass
def resolve(self, url):
try:
url = resolvers.request(url)
return url
except:
return
| 36.947674
| 125
| 0.561448
|
import re,urlparse,time, urllib
from resources.lib.libraries import client
from resources.lib.libraries import client2
from resources.lib.libraries import cleantitle
from resources.lib.libraries import workers
from resources.lib.libraries import control
from resources.lib.resolvers import cloudzilla
from resources.lib.resolvers import openload
from resources.lib.resolvers import uptobox
from resources.lib.resolvers import zstream
from resources.lib.resolvers import videomega
from resources.lib import resolvers
class source:
def __init__(self):
self.base_link = 'http://oneclickwatch.ws'
self.search_link = '/search/%s'
self.title = ''
def get_movie(self, imdb, title, year):
try:
query = self.search_link % urllib.quote_plus(title +' '+year)
query = urlparse.urljoin(self.base_link, query)
result = client2.http_get(query)
years = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1)]
result = client.parseDOM(result, 'h2', attrs={'class': 'title'})
result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a')[0]) for i in result]
print('R',result)
result = [i for i in result if cleantitle.movie(title.lower()) in cleantitle.movie(i[1]).lower()]
print('R',result)
result = [i for i in result if any(x in i[1] for x in years)]
print('R',result)
result2 = [i for i in result if '1080' in i[1]]
print('R',result)
result3 = [i for i in result if '720' in i[1].lower()]
print('R',result)
if len(result3) > 0: result = result3
if len(result2) > 0: result = result2
url = result[0][0]
return url
except:
return
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
url = tvshowtitle
url = client.cleanHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
if url == None: return
mytitile = url.lower()
url = '%s S%02dE%02d' % (url, int(season), int(episode))
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
url = self.search_link % urllib.quote_plus(url)
query = urlparse.urljoin(self.base_link, url)
result = client2.http_get(query)
result = client.parseDOM(result, 'h2', attrs={'class': 'title'})
result = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a')[0]) for i in result]
result = [i for i in result if mytitile in i[1].lower()]
result2 = [i for i in result if '1080' in i[1].lower()]
result3 = [i for i in result if '720' in i[1].lower()]
if len(result3) > 0: result = result3
if len(result2) > 0: result = result2
url=result[0][0]
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
self.sources =[]
mylinks = []
result = client2.http_get(url)
mytitle = re.compile('<title>(.*?)</title>', re.DOTALL).findall(result)[0]
if any(word in mytitle.lower() for word in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'ts']):
quality = 'CAM'
elif '1080p' in mytitle:
quality = '1080p'
elif '720p' in mytitle:
quality = 'HD'
else:
quality = 'MQ'
links = client.parseDOM(result, 'a', attrs={'rel': 'nofollow'})
links = [i for i in links if i.startswith('http')]
for a in links:
mylinks.append([a,quality])
threads = []
for i in mylinks: threads.append(workers.Thread(self.check, i))
[i.start() for i in threads]
for i in range(0, 10 * 2):
is_alive = [x.is_alive() for x in threads]
if all(x == False for x in is_alive): break
time.sleep(1)
return self.sources
except:
return self.sources
def check(self, i):
try:
url = client.replaceHTMLCodes(i[0])
url = url.encode('utf-8')
host = urlparse.urlparse(url).netloc
host = host.replace('www.', '').replace('embed.', '')
host = host.rsplit('.', 1)[0]
host = host.lower()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
control.log("##OneClickWatch %s - url %s" % (host, i[0]))
if host == 'openload': check = openload.check(url)
elif host == 'uptobox': check = uptobox.check(url)
elif host == 'cloudzilla': check = cloudzilla.check(url)
elif host == 'zstream': check = zstream.check(url)
elif host == 'videomega': check = videomega.check(url)
else: raise Exception()
if check == None or check == False: raise Exception()
self.sources.append({'source': host, 'quality': i[1], 'provider': 'Oneclickwatch', 'url': url})
except:
pass
def resolve(self, url):
try:
url = resolvers.request(url)
return url
except:
return
| true
| true
|
f70cb404ff70014a8e6b2a47aa2bfba1304d3c59
| 31,360
|
py
|
Python
|
hd_recognition/GUI.py
|
Seledriac/A-small-python-library-for-deep-learning
|
c041287b04ba217910f621d34c7739365c36ad48
|
[
"MIT"
] | 1
|
2020-06-11T05:04:08.000Z
|
2020-06-11T05:04:08.000Z
|
hd_recognition/GUI.py
|
Seledriac/A-small-python-library-for-deep-learning
|
c041287b04ba217910f621d34c7739365c36ad48
|
[
"MIT"
] | 3
|
2020-06-14T10:26:57.000Z
|
2020-06-14T10:37:58.000Z
|
hd_recognition/GUI.py
|
Seledriac/A-small-python-library-for-deep-learning
|
c041287b04ba217910f621d34c7739365c36ad48
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
"""Handwritten digits recognition Graphic interface module : training done with the mnist dataset"""
# Third-party gui/system/plotting Libraries
import numpy as np
import tkinter as tk
import tkinter.font as tkFont
from tkinter import messagebox
from tkinter import filedialog
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from PIL import ImageTk, Image
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel
from PyQt5.QtGui import QPainter, QPixmap, QPen, QScreen
import pickle
import webbrowser
import os
import sys
sys.path.insert(1, str(os.getcwd()))
# Neural network module
import network
# ------------------------------------------------------------------------------tkinter GUI---------------------------------------------------------------------------------------------
class Interface(tk.Frame):
"""graphic interface class"""
# ------------------------------------------------------------------------------__init__------------------------------------------------------------------------------------------------
def __init__(self, window, **kwargs):
"""Displays the main menu"""
# Fonts
self.big_font_button = tkFont.Font(family='Calibri', size=20, weight='bold')
self.medium_large_font_button = tkFont.Font(family='Calibri', size=16, weight='bold')
self.medium_font_button = tkFont.Font(family='Calibri', size=14, weight='bold')
self.font_title = tkFont.Font(family='Calibri', size=36, weight='bold')
self.number_button_font = tkFont.Font(family='Calibri', size=25, weight='bold')
# Display main menu
self.main_menu(window, **kwargs)
# ------------------------------------------------------------------------------Main Menu Interface--------------------------------------------------------------------------------------
def main_menu(self, window, **kwargs):
"""Main menu Frame"""
# Frame creation
if hasattr(self, 'children'):
self.destroy()
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
self.pack()
# Github Button
img_github = ImageTk.PhotoImage(Image.open("hd_recognition/assets/github.jpg").resize((50,50)))
btn_github = tk.Button(self, image=img_github, command=lambda: webbrowser.open("https://github.com/Seledriac/A-small-pedagogic-python-library-for-supervised-neural-networks/"))
btn_github.img = img_github
btn_github.grid(column=0, row=0, padx=50, pady=(0,50))
# Title
title = tk.Label(self, text="Supervised neural networks\n applied to handwritten digits recognition", bg="#fff2f2", font=self.font_title)
title.grid(column=1, row=0, pady=25)
# Readme Button
img_readme = ImageTk.PhotoImage(Image.open("hd_recognition/assets/readme.png").resize((50,50)))
btn_readme = tk.Button(self, image=img_readme, command=lambda: os.startfile("README.md"))
btn_readme.img = img_readme
btn_readme.grid(column=2, row=0, padx=60, pady=(0,50))
# Button selection frame
btns_frames = tk.LabelFrame(self, padx=50, pady=50, borderwidth=5)
btns_frames.grid(row=1, column=1, columnspan=3, pady=(65,80), padx=(0,180))
# Menu Buttons
create_model_button = tk.Button(btns_frames, text="Create a model", font=self.big_font_button, command=lambda: self.create_model(window, **kwargs))
create_model_button.grid(column=0, row=0, padx=10, pady=10)
train_model_button = tk.Button(btns_frames, text="Train a model", font=self.big_font_button, command=lambda: self.train_model(window, **kwargs))
train_model_button.grid(column = 1, row = 0, padx=10, pady=10)
evaluate_button = tk.Button(btns_frames, text="Accuracy Ladder", font=self.big_font_button, command=lambda: self.models_ladder(window, **kwargs))
evaluate_button.grid(column = 0, row = 1, padx=10, pady=10)
predict_button = tk.Button(btns_frames, text="Predict", font=self.big_font_button, command=lambda: self.choose_prediction(window, **kwargs))
predict_button.grid(column = 1, row = 1, padx=10, pady=10)
# ------------------------------------------------------------------------------Model Creation Interface------------------------------------------------------------------------------------
def create_model(self, window, **kwargs):
"""Model creation Frame"""
# Frame creation
self.destroy()
if hasattr(self, 'hidden_layers_label'):
delattr(self, 'hidden_layers_label')
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
self.pack()
# Main menu Button
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0)
# Title
title = tk.Label(self, text="Model Creation", bg="#fff2f2", font=self.font_title)
title.grid(column=1, row=0)
# Model Validation frame
model_creation_validation_frame = tk.LabelFrame(self, borderwidth=3)
model_creation_validation_frame.grid(row=0, column=2, pady=(20,0))
model_creation_validation_label = tk.Label(model_creation_validation_frame, text="Model name", font=self.medium_font_button)
model_creation_validation_label.pack()
self.model_creation_validation_entry = tk.Entry(model_creation_validation_frame)
self.model_creation_validation_entry.pack()
model_creation_validation_button = tk.Button(model_creation_validation_frame, text="Create Model", font=self.medium_font_button, command=self.model_creation_validation)
model_creation_validation_button.pack()
# Model customization frame
creation_custom_frame = tk.LabelFrame(self, padx=50, pady=50, borderwidth=5)
creation_custom_frame.grid(row=1, column=0, columnspan=3, pady=(30,0))
# Input layer Frame
input_layer_frame = tk.LabelFrame(creation_custom_frame)
input_layer_frame.grid(row=0, column=0)
input_layer_label = tk.Label(input_layer_frame, text="Input Layer", font=self.medium_font_button)
input_layer_label.pack()
self.input_layer_number = tk.Entry(input_layer_frame)
self.input_layer_number.insert(0,784)
self.input_layer_number.pack()
# Hidden layers Frame
self.hidden_layers = []
self.hidden_layers_frame = tk.LabelFrame(creation_custom_frame)
self.hidden_layers_frame.grid(row=0, column=1)
self.add_hidden_layer()
self.add_hidden_layer()
# Output layer Frame
output_layer_frame = tk.LabelFrame(creation_custom_frame)
output_layer_frame.grid(row=0, column=2, padx=70)
output_layer_label = tk.Label(output_layer_frame, text="Output Layer", font=self.medium_font_button)
output_layer_label.pack()
self.output_layer_number = tk.Entry(output_layer_frame)
self.output_layer_number.insert(0,10)
self.output_layer_number.pack()
# Hidden layer adding/deleting buttons
add_hidden_layer_button = tk.Button(creation_custom_frame, text="Add a hidden layer", font=self.medium_font_button, command=self.add_hidden_layer)
add_hidden_layer_button.grid(column = 0, row = 1, padx=50, pady=40)
del_hidden_layer_button = tk.Button(creation_custom_frame, text="Delete the last hidden layer", font=self.medium_font_button, command=self.del_hidden_layer)
del_hidden_layer_button.grid(column = 1, row = 1, padx=50, pady=40, columnspan=2)
def add_hidden_layer(self):
"""Add a hidden layer in the model creation Frame"""
if not hasattr(self, 'hidden_layers_label'):
self.hidden_layers_label = tk.Label(self.hidden_layers_frame, text="Hidden Layer(s)", font=self.medium_font_button)
self.hidden_layers_label.grid(row=0, column=0, columnspan=10)
if len(self.hidden_layers) < 5:
new_hidden_layer = tk.Scale(self.hidden_layers_frame, from_=1, to=128, length=150)
new_hidden_layer.grid(row=1,column=len(self.hidden_layers), padx=(0,20))
self.hidden_layers.append(new_hidden_layer)
def del_hidden_layer(self):
"""Delete a hidden layer in the model creation Frame"""
if len(self.hidden_layers) > 1:
self.hidden_layers[-1].destroy()
del self.hidden_layers[-1]
elif hasattr(self, 'hidden_layers_label'):
self.hidden_layers[-1].destroy()
del self.hidden_layers[-1]
self.hidden_layers_label.destroy()
delattr(self, 'hidden_layers_label')
def model_creation_validation(self):
"""This method is executed when the model creation validation button is clicked. It creates the model, serlializes it, and shows a recap od the model in a message box to the user"""
model_name = self.model_creation_validation_entry.get()
try:
input_number = int(self.input_layer_number.get())
output_number = int(self.output_layer_number.get())
except ValueError:
messagebox.showerror("Error", "Error : enter a number of neurons for all the layers")
if model_name and input_number and output_number:
sizes = [input_number]
msg = "Model \"{}\" successfully created.\n\nInput layer : {} neurons\n".format(str(self.model_creation_validation_entry.get()), str(input_number))
for i,layer in enumerate(self.hidden_layers):
nb_neurons = int(layer.get())
sizes.append(nb_neurons)
msg = msg + "Hidden layer {} : {} neurons\n".format(str(i + 1), str(nb_neurons))
sizes.append(output_number)
msg = msg + "Output layer : {} neurons\n\nActivation function : sigmoid (by default)".format(str(output_number))
net = network.Network(model_name, sizes)
with open("models/hd_recognition/{}.pickle".format(model_name), "wb") as fic:
pickler = pickle.Pickler(fic)
pickler.dump(net)
messagebox.showinfo("Model Info", msg)
else:
messagebox.showerror("Error", "Error : missing required fields")
# ------------------------------------------------------------------------------Model Training Interface------------------------------------------------------------------------------------
def train_model(self, window, **kwargs):
"""Model training specs Frame"""
# Frame creation
self.destroy()
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
self.pack()
# Chosing the model which we will train
self.open_model_file()
# Main menu Button
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0, padx=(25,0))
# Title
title = tk.Label(self, text="Model Training\n(mnist dataset)", bg="#fff2f2", font=self.font_title)
title.grid(column=1, row=0, pady=80, padx=(200,0))
# Model training validation frame
model_training_validation_frame = tk.LabelFrame(self, borderwidth=3)
model_training_validation_frame.grid(row=0, column=2, padx=(200,0), pady=(10,0))
model_training_validation_button = tk.Button(model_training_validation_frame, text="Train", font=self.medium_large_font_button, command=lambda: self.model_training(window, **kwargs))
model_training_validation_button.pack()
# Model training customization frame
training_custom_frame = tk.LabelFrame(self, padx=50, pady=50, borderwidth=5)
training_custom_frame.grid(row=1, column=0, columnspan=100, padx=(0,15))
# Epochs Frame
epochs_frame = tk.LabelFrame(training_custom_frame)
epochs_frame.grid(row=0, column=0)
epochs_label = tk.Label(epochs_frame, text="Epochs", font=self.medium_font_button)
epochs_label.pack()
self.epochs_number = tk.Entry(epochs_frame)
self.epochs_number.insert(0,3)
self.epochs_number.pack()
# Batch size Frame
batch_size_frame = tk.LabelFrame(training_custom_frame)
batch_size_frame.grid(row=0, column=2, padx=70)
batch_size_label = tk.Label(batch_size_frame, text="batch size", font=self.medium_font_button)
batch_size_label.pack()
self.batch_size_number = tk.Entry(batch_size_frame)
self.batch_size_number.insert(0,10)
self.batch_size_number.pack()
# Display weights checkbox
display_weights_frame = tk.LabelFrame(training_custom_frame)
display_weights_frame.grid(row=0, column=3)
self.display_weights_value = tk.IntVar()
display_weights_cb = tk.Checkbutton(display_weights_frame, text="Dynamically display the weights of the first layer", font=self.medium_font_button, variable=self.display_weights_value)
display_weights_cb.pack()
def model_training(self, window, **kwargs):
"""Model training Frame"""
# Training values retrieving
disp_weights = bool(self.display_weights_value.get())
try:
epochs = int(self.epochs_number.get())
batch_size = int(self.batch_size_number.get())
except ValueError:
messagebox.showerror("Error", "Error : please enter a numeric value for each field")
if epochs and batch_size:
# Frame creation
self.destroy()
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
# Main menu Button
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0)
# Training trigger button
doIt = tk.Button(self, text="Start the Training", command=lambda: self.start_training(epochs, batch_size, disp_weights), font=self.big_font_button)
doIt.grid(row=0, column=1, pady=20)
# Training logs textbox
textbox_frame = tk.LabelFrame(self)
textbox_frame.grid(row=1, column=0, columnspan=2)
self.output = tk.Text(textbox_frame, width=110, height=30, bg='black', fg='white')
self.output.pack(side=tk.LEFT)
# Scrollbar
scrollbar = tk.Scrollbar(textbox_frame, orient="vertical", command = self.output.yview)
scrollbar.pack(side=tk.RIGHT, fill="y")
self.output['yscrollcommand'] = scrollbar.set
self.pack()
else:
messagebox.showerror("Error", "Error : missing required fields")
def start_training(self, epochs, batch_size, disp_weights):
"""This method executes the SGD training method on a given model"""
# Importing the mnist dataset
import mnist_loader
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
training_data = list(training_data)
validation_data = list(validation_data)
test_data = list(test_data)
# Model training via SGD
net = self.model_file
self.output.insert(tk.END, "\n" + str(net) + "\n")
self.update_idletasks()
net.SGD(training_data, epochs, batch_size, test_data=test_data, display_weights=disp_weights, gui=self)
# Model saving
with open("models/hd_recognition/{}.pickle".format(net.id), "wb") as saving:
saver = pickle.Pickler(saving)
saver.dump(net)
# Performance test of the network on the validation data
accuracy = str(100 * net.evaluate(validation_data) / 10000)
self.output.insert(tk.END, "\nTest on the validation data -> Accuracy : {0}%\n".format(accuracy))
self.update_idletasks()
self.output.see("end")
# Ladder update
with open("models/hd_recognition/accuracy_ladder.md", "a") as ladder:
adding = str(net) + " --> accuracy = " + accuracy + "\n"
ladder.write(adding)
with open("models/hd_recognition/accuracy_ladder.md", "r") as ladder:
shove_percent = ladder.read().replace("%", "")
content = [net.split("= ") for net in shove_percent.split('\n')]
content.pop()
content_updated = sorted([(acc,net) for net,acc in content], reverse = True)
tostring = "%\n".join(["= ".join((net,acc)) for acc,net in content_updated]) + "%\n"
with open("models/hd_recognition/accuracy_ladder.md", "w") as ladder:
ladder.write(tostring)
# ------------------------------------------------------------------------------Models Ladder Interface------------------------------------------------------------------------------------
def models_ladder(self, window, **kwargs):
"""Models ladder frame"""
# Frame creation
self.destroy()
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
# Main menu Button
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0)
# Ladder label
ladder_label = tk.Label(self, text="Models Accuracy Ladder", font=self.font_title, bg="#fff2f2")
ladder_label.grid(row=0, column=1, padx=(0,150), pady=20)
# Ladder textbox
textbox_frame = tk.LabelFrame(self)
textbox_frame.grid(row=1, column=0, columnspan=2)
output = tk.Text(textbox_frame, width=100, height=20, font=self.medium_font_button)
output.pack(side=tk.LEFT)
with open("models/hd_recognition/accuracy_ladder.md", "r") as ladder:
content = ladder.read()
output.insert(tk.END, content)
self.update_idletasks()
output.see("end")
# Scrollbar
scrollbar = tk.Scrollbar(textbox_frame, orient="vertical", command = output.yview)
scrollbar.pack(side=tk.RIGHT, fill="y")
output['yscrollcommand'] = scrollbar.set
self.pack()
# ------------------------------------------------------------------------------Prediction Interface---------------------------------------------------------------------------------------
def choose_prediction(self, window, **kwargs):
"""Prediction style choice frame"""
# Frame creation
self.destroy()
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
self.pack()
# Opening the model which will predict
self.open_model_file()
# Main menu Button
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0, padx=(0,125), pady=(15,100))
# Ladder label
choice_label = tk.Label(self, text="Choose the prediction style", font=self.font_title, bg="#fff2f2")
choice_label.grid(row=0, column=1, columnspan=10, padx=(50,250), pady=50)
# Choice buttons
choice_custom = tk.Button(self, text="Predict with custom test images", font=self.big_font_button, command=lambda: self.custom_prediction_frame(window, **kwargs))
choice_custom.grid(row=1, column=1, padx=(0,0), pady=(100))
choice_live = tk.Button(self, text="Live prediction", font=self.big_font_button, command=lambda: self.live_prediction_frame(window, **kwargs))
choice_live.grid(row=1, column=2, padx=(50,200), pady=(100))
def custom_prediction_frame(self, window, **kwargs):
"""Custom images prediction frame"""
# Frame creation
self.destroy()
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
self.pack()
# Main menu Button
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0, pady=(10,30))
# Title label
title_label = tk.Label(self, text="Custom images prediction\nChoose the number to predict", font=self.number_button_font, bg="#fff2f2")
title_label.grid(row=0, column=1, columnspan=2, padx=(0,150), pady=10)
# Number buttons Frame
number_buttons_frame = tk.LabelFrame(self, borderwidth=3, bg='white', pady=10)
number_buttons_frame.grid(row=1,column=1, columnspan=2, padx=(0,150))
# Number buttons
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="0", command=lambda: self.number_button_click(0))
btn_home.grid(column=0, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="1", command=lambda: self.number_button_click(1))
btn_home.grid(column=1, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="2", command=lambda: self.number_button_click(2))
btn_home.grid(column=2, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="3", command=lambda: self.number_button_click(3))
btn_home.grid(column=3, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="4", command=lambda: self.number_button_click(4))
btn_home.grid(column=4, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="5", command=lambda: self.number_button_click(5))
btn_home.grid(column=5, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="6", command=lambda: self.number_button_click(6))
btn_home.grid(column=6, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="7", command=lambda: self.number_button_click(7))
btn_home.grid(column=7, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="8", command=lambda: self.number_button_click(8))
btn_home.grid(column=8, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="9", command=lambda: self.number_button_click(9))
btn_home.grid(column=9, row=1, padx=15)
def number_button_click(self, number):
"""This method is executed when a number button is clicked. It displays the model's prediction on a matplotlib figure"""
# Opening the corresponding custom image
img_filename_bmp = "hd_recognition/custom_test_images/test_image_"+str(number)+".bmp"
test_image = Image.open(img_filename_bmp)
# Predicting based on the custom image
image_array = 1 - (np.array(test_image).reshape(784,1) / 255)
model_activations = self.model_file.feedforward(image_array)
# Custom image display
img_filename_png = "hd_recognition/custom_test_images/test_image_"+str(number)+".png"
custom_image = ImageTk.PhotoImage(Image.open(img_filename_png))
custom_image_label = tk.Label(self, image=custom_image, relief='ridge')
custom_image_label.image=custom_image
custom_image_label.grid(row=2, column=1, padx=10, pady=(5,5))
# Prediction plot frame
prediction_frame = tk.LabelFrame(self)
prediction_frame.grid(row=2,column=2, padx=(10,150), pady=(5,5))
# Plotting the model activations
self.plot_model_activation(model_activations, prediction_frame)
def live_prediction_frame(self, window, **kwargs):
"""Live prediction of the numbers drew by the user"""
# Frame creation
self.destroy()
window.geometry("1500x800")
tk.Frame.__init__(self, window, width=1500, height=800, bg="#fff2f2", **kwargs)
self.pack()
# Main menu Button
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0, padx=100)
# Title
title = tk.Label(self, text="Live prediction\nDraw the number to predict", bg="#fff2f2", font=self.font_title)
title.grid(column=1, row=0, pady=80)
# Start button frame
live_prediction_starting_frame = tk.LabelFrame(self, borderwidth=3)
live_prediction_starting_frame.grid(row=0, column=2, padx=100)
live_prediction_starting_button = tk.Button(live_prediction_starting_frame, text="Start", font=self.medium_large_font_button, command=lambda: self.start_live_prediction(window))
live_prediction_starting_button.pack()
def start_live_prediction(self, window):
"""Live prediction Qt drawing window display"""
# DrawingWindow creation
App = QApplication(sys.argv)
QtWindow = DrawingWindow(App, self)
QtWindow.setWindowTitle("Digit drawing window")
QtWindow.show()
sys.exit(App.exec())
# ------------------------------------------------------------------------------Miscellaneous Methods--------------------------------------------------------------------------------------
def open_model_file(self):
"""Prompts the user to choose a model file"""
re = True
while re:
try:
# Model file opening prompt
self.model_filename = filedialog.askopenfilename(initialdir="models/hd_recognition", title="Choose the model", filetypes=(("pickle files","*.pickle"), ("model files","*.model"), ("all files", "*.*")))
assert self.model_filename
re = False
except:
messagebox.showerror("Error", "Error : please select a model file")
with open(self.model_filename, "rb") as fic:
unpickler = pickle.Unpickler(fic)
self.model_file = unpickler.load()
def plot_model_activation(self, model_activations, frame):
"""Plots the current model activations in a given frame (in a prediction context)"""
fig = Figure(figsize = (4, 4))
fig.clf()
fig.add_subplot(111).plot(range(10), model_activations)
fig.suptitle("corresponding model activations")
axes = fig.gca()
axes.set_xlabel("digit")
axes.set_ylabel("activation")
axes.set_ylim([0, 1])
axes.set_xticks(range(10))
axes.set_yticks(np.array(range(11))/10)
canvas = FigureCanvasTkAgg(fig, master=frame)
canvas.draw()
canvas.flush_events()
canvas.get_tk_widget().grid(row=0, column=1)
self.annot_max(range(10), model_activations, axes)
def annot_max(x, y, ax):
"""Max network activation anotation for a number image"""
xmax = x[np.argmax(y)]
ymax = y.max()
text = "digit = {}, activation = {:.3f}".format(xmax,ymax)
if xmax <= 4:
orientation = str((1 / abs(5 - (xmax + 1))) / 10)
else:
orientation = str(-(1 / abs(5 - (xmax + 1))) / 10)
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=1)
arrowprops=dict(arrowstyle="-|>",connectionstyle="arc3,rad="+orientation)
kw = dict(xycoords='data',textcoords="axes fraction",
arrowprops=arrowprops, bbox=bbox_props, ha="right", va="top")
# ax.annotate(text, xy=(xmax, ymax), xytext=(xmax/10 - 0.1, ymax - 0.1), **kw)
ax.annotate(text, xy=(xmax, ymax), xytext=(0.8, 0.5), **kw)
annot_max = staticmethod(annot_max)
# ------------------------------------------------------------------------------PyQt drawing window----------------------------------------------------------------------------------------
class DrawingWindow(QMainWindow):
"""Drawing window for live model prediction"""
def __init__(self, App, tkinter_root):
"""Initialization of the Drawing Window : we create a label centered in the window, in which we put a blank pixmap"""
super().__init__()
self.label = QLabel()
self.blank()
self.setCentralWidget(self.label)
self.App = App
self.tkinter_root = tkinter_root
self.last_x, self.last_y = None, None
def blank(self):
"""This method clears the QtWindow, setting the content of the centered label to a white pixmap"""
self.label.setPixmap(QPixmap("hd_recognition/assets/white.png"))
def mouseMoveEvent(self, e):
"""This method is executed while the click button is held"""
if self.last_x is None:
self.last_x = e.x()
self.last_y = e.y()
return
painter = QPainter(self.label.pixmap())
painter.drawLine(self.last_x, self.last_y, e.x(), e.y())
painter.end()
self.update()
# Updating the origin for next time
self.last_x = e.x()
self.last_y = e.y()
# Saving the screenshot and compressing it to a 28x28 image
QScreen.grabWindow(self.App.primaryScreen(), self.winId()).save("hd_recognition/tmp/screenshot.png", 'png')
resize_img = Image.open("hd_recognition/tmp/screenshot.png")
resize_img = resize_img.resize((28,28))
resize_img.save("hd_recognition/tmp/screenshot.png", 'png')
# Converting from standard png to greyscale
img_array = np.array(Image.open("hd_recognition/tmp/screenshot.png"))
img_array = np.array([[pixel[0] for pixel in line] for line in img_array])
image_array = 1 - (img_array.reshape(784,1) / 255)
# Predicting the number
model_activations = self.tkinter_root.model_file.feedforward(image_array)
# Prediction plot frame
prediction_frame = tk.LabelFrame(self.tkinter_root)
prediction_frame.grid(row=2,column=2)
# Plotting the model activations
self.tkinter_root.plot_model_activation(model_activations, prediction_frame)
def mouseReleaseEvent(self, e):
self.last_x = None
self.last_y = None
# -----------------------------------------------------------------------------Tkinter Window creation-------------------------------------------------------------------------------------
window = tk.Tk()
window.geometry("1180x620")
window.title("Neural Networks")
window.configure(bg="#fff2f2")
interface = Interface(window)
interface.mainloop()
| 48.395062
| 216
| 0.625797
|
import numpy as np
import tkinter as tk
import tkinter.font as tkFont
from tkinter import messagebox
from tkinter import filedialog
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from PIL import ImageTk, Image
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel
from PyQt5.QtGui import QPainter, QPixmap, QPen, QScreen
import pickle
import webbrowser
import os
import sys
sys.path.insert(1, str(os.getcwd()))
import network
class Interface(tk.Frame):
def __init__(self, window, **kwargs):
self.big_font_button = tkFont.Font(family='Calibri', size=20, weight='bold')
self.medium_large_font_button = tkFont.Font(family='Calibri', size=16, weight='bold')
self.medium_font_button = tkFont.Font(family='Calibri', size=14, weight='bold')
self.font_title = tkFont.Font(family='Calibri', size=36, weight='bold')
self.number_button_font = tkFont.Font(family='Calibri', size=25, weight='bold')
self.main_menu(window, **kwargs)
def main_menu(self, window, **kwargs):
if hasattr(self, 'children'):
self.destroy()
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
self.pack()
img_github = ImageTk.PhotoImage(Image.open("hd_recognition/assets/github.jpg").resize((50,50)))
btn_github = tk.Button(self, image=img_github, command=lambda: webbrowser.open("https://github.com/Seledriac/A-small-pedagogic-python-library-for-supervised-neural-networks/"))
btn_github.img = img_github
btn_github.grid(column=0, row=0, padx=50, pady=(0,50))
title = tk.Label(self, text="Supervised neural networks\n applied to handwritten digits recognition", bg="#fff2f2", font=self.font_title)
title.grid(column=1, row=0, pady=25)
img_readme = ImageTk.PhotoImage(Image.open("hd_recognition/assets/readme.png").resize((50,50)))
btn_readme = tk.Button(self, image=img_readme, command=lambda: os.startfile("README.md"))
btn_readme.img = img_readme
btn_readme.grid(column=2, row=0, padx=60, pady=(0,50))
btns_frames = tk.LabelFrame(self, padx=50, pady=50, borderwidth=5)
btns_frames.grid(row=1, column=1, columnspan=3, pady=(65,80), padx=(0,180))
create_model_button = tk.Button(btns_frames, text="Create a model", font=self.big_font_button, command=lambda: self.create_model(window, **kwargs))
create_model_button.grid(column=0, row=0, padx=10, pady=10)
train_model_button = tk.Button(btns_frames, text="Train a model", font=self.big_font_button, command=lambda: self.train_model(window, **kwargs))
train_model_button.grid(column = 1, row = 0, padx=10, pady=10)
evaluate_button = tk.Button(btns_frames, text="Accuracy Ladder", font=self.big_font_button, command=lambda: self.models_ladder(window, **kwargs))
evaluate_button.grid(column = 0, row = 1, padx=10, pady=10)
predict_button = tk.Button(btns_frames, text="Predict", font=self.big_font_button, command=lambda: self.choose_prediction(window, **kwargs))
predict_button.grid(column = 1, row = 1, padx=10, pady=10)
def create_model(self, window, **kwargs):
self.destroy()
if hasattr(self, 'hidden_layers_label'):
delattr(self, 'hidden_layers_label')
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
self.pack()
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0)
title = tk.Label(self, text="Model Creation", bg="#fff2f2", font=self.font_title)
title.grid(column=1, row=0)
model_creation_validation_frame = tk.LabelFrame(self, borderwidth=3)
model_creation_validation_frame.grid(row=0, column=2, pady=(20,0))
model_creation_validation_label = tk.Label(model_creation_validation_frame, text="Model name", font=self.medium_font_button)
model_creation_validation_label.pack()
self.model_creation_validation_entry = tk.Entry(model_creation_validation_frame)
self.model_creation_validation_entry.pack()
model_creation_validation_button = tk.Button(model_creation_validation_frame, text="Create Model", font=self.medium_font_button, command=self.model_creation_validation)
model_creation_validation_button.pack()
creation_custom_frame = tk.LabelFrame(self, padx=50, pady=50, borderwidth=5)
creation_custom_frame.grid(row=1, column=0, columnspan=3, pady=(30,0))
input_layer_frame = tk.LabelFrame(creation_custom_frame)
input_layer_frame.grid(row=0, column=0)
input_layer_label = tk.Label(input_layer_frame, text="Input Layer", font=self.medium_font_button)
input_layer_label.pack()
self.input_layer_number = tk.Entry(input_layer_frame)
self.input_layer_number.insert(0,784)
self.input_layer_number.pack()
self.hidden_layers = []
self.hidden_layers_frame = tk.LabelFrame(creation_custom_frame)
self.hidden_layers_frame.grid(row=0, column=1)
self.add_hidden_layer()
self.add_hidden_layer()
output_layer_frame = tk.LabelFrame(creation_custom_frame)
output_layer_frame.grid(row=0, column=2, padx=70)
output_layer_label = tk.Label(output_layer_frame, text="Output Layer", font=self.medium_font_button)
output_layer_label.pack()
self.output_layer_number = tk.Entry(output_layer_frame)
self.output_layer_number.insert(0,10)
self.output_layer_number.pack()
add_hidden_layer_button = tk.Button(creation_custom_frame, text="Add a hidden layer", font=self.medium_font_button, command=self.add_hidden_layer)
add_hidden_layer_button.grid(column = 0, row = 1, padx=50, pady=40)
del_hidden_layer_button = tk.Button(creation_custom_frame, text="Delete the last hidden layer", font=self.medium_font_button, command=self.del_hidden_layer)
del_hidden_layer_button.grid(column = 1, row = 1, padx=50, pady=40, columnspan=2)
def add_hidden_layer(self):
if not hasattr(self, 'hidden_layers_label'):
self.hidden_layers_label = tk.Label(self.hidden_layers_frame, text="Hidden Layer(s)", font=self.medium_font_button)
self.hidden_layers_label.grid(row=0, column=0, columnspan=10)
if len(self.hidden_layers) < 5:
new_hidden_layer = tk.Scale(self.hidden_layers_frame, from_=1, to=128, length=150)
new_hidden_layer.grid(row=1,column=len(self.hidden_layers), padx=(0,20))
self.hidden_layers.append(new_hidden_layer)
def del_hidden_layer(self):
if len(self.hidden_layers) > 1:
self.hidden_layers[-1].destroy()
del self.hidden_layers[-1]
elif hasattr(self, 'hidden_layers_label'):
self.hidden_layers[-1].destroy()
del self.hidden_layers[-1]
self.hidden_layers_label.destroy()
delattr(self, 'hidden_layers_label')
def model_creation_validation(self):
model_name = self.model_creation_validation_entry.get()
try:
input_number = int(self.input_layer_number.get())
output_number = int(self.output_layer_number.get())
except ValueError:
messagebox.showerror("Error", "Error : enter a number of neurons for all the layers")
if model_name and input_number and output_number:
sizes = [input_number]
msg = "Model \"{}\" successfully created.\n\nInput layer : {} neurons\n".format(str(self.model_creation_validation_entry.get()), str(input_number))
for i,layer in enumerate(self.hidden_layers):
nb_neurons = int(layer.get())
sizes.append(nb_neurons)
msg = msg + "Hidden layer {} : {} neurons\n".format(str(i + 1), str(nb_neurons))
sizes.append(output_number)
msg = msg + "Output layer : {} neurons\n\nActivation function : sigmoid (by default)".format(str(output_number))
net = network.Network(model_name, sizes)
with open("models/hd_recognition/{}.pickle".format(model_name), "wb") as fic:
pickler = pickle.Pickler(fic)
pickler.dump(net)
messagebox.showinfo("Model Info", msg)
else:
messagebox.showerror("Error", "Error : missing required fields")
def train_model(self, window, **kwargs):
self.destroy()
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
self.pack()
self.open_model_file()
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0, padx=(25,0))
title = tk.Label(self, text="Model Training\n(mnist dataset)", bg="#fff2f2", font=self.font_title)
title.grid(column=1, row=0, pady=80, padx=(200,0))
model_training_validation_frame = tk.LabelFrame(self, borderwidth=3)
model_training_validation_frame.grid(row=0, column=2, padx=(200,0), pady=(10,0))
model_training_validation_button = tk.Button(model_training_validation_frame, text="Train", font=self.medium_large_font_button, command=lambda: self.model_training(window, **kwargs))
model_training_validation_button.pack()
training_custom_frame = tk.LabelFrame(self, padx=50, pady=50, borderwidth=5)
training_custom_frame.grid(row=1, column=0, columnspan=100, padx=(0,15))
epochs_frame = tk.LabelFrame(training_custom_frame)
epochs_frame.grid(row=0, column=0)
epochs_label = tk.Label(epochs_frame, text="Epochs", font=self.medium_font_button)
epochs_label.pack()
self.epochs_number = tk.Entry(epochs_frame)
self.epochs_number.insert(0,3)
self.epochs_number.pack()
batch_size_frame = tk.LabelFrame(training_custom_frame)
batch_size_frame.grid(row=0, column=2, padx=70)
batch_size_label = tk.Label(batch_size_frame, text="batch size", font=self.medium_font_button)
batch_size_label.pack()
self.batch_size_number = tk.Entry(batch_size_frame)
self.batch_size_number.insert(0,10)
self.batch_size_number.pack()
display_weights_frame = tk.LabelFrame(training_custom_frame)
display_weights_frame.grid(row=0, column=3)
self.display_weights_value = tk.IntVar()
display_weights_cb = tk.Checkbutton(display_weights_frame, text="Dynamically display the weights of the first layer", font=self.medium_font_button, variable=self.display_weights_value)
display_weights_cb.pack()
def model_training(self, window, **kwargs):
disp_weights = bool(self.display_weights_value.get())
try:
epochs = int(self.epochs_number.get())
batch_size = int(self.batch_size_number.get())
except ValueError:
messagebox.showerror("Error", "Error : please enter a numeric value for each field")
if epochs and batch_size:
self.destroy()
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0)
doIt = tk.Button(self, text="Start the Training", command=lambda: self.start_training(epochs, batch_size, disp_weights), font=self.big_font_button)
doIt.grid(row=0, column=1, pady=20)
textbox_frame = tk.LabelFrame(self)
textbox_frame.grid(row=1, column=0, columnspan=2)
self.output = tk.Text(textbox_frame, width=110, height=30, bg='black', fg='white')
self.output.pack(side=tk.LEFT)
scrollbar = tk.Scrollbar(textbox_frame, orient="vertical", command = self.output.yview)
scrollbar.pack(side=tk.RIGHT, fill="y")
self.output['yscrollcommand'] = scrollbar.set
self.pack()
else:
messagebox.showerror("Error", "Error : missing required fields")
def start_training(self, epochs, batch_size, disp_weights):
import mnist_loader
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
training_data = list(training_data)
validation_data = list(validation_data)
test_data = list(test_data)
net = self.model_file
self.output.insert(tk.END, "\n" + str(net) + "\n")
self.update_idletasks()
net.SGD(training_data, epochs, batch_size, test_data=test_data, display_weights=disp_weights, gui=self)
with open("models/hd_recognition/{}.pickle".format(net.id), "wb") as saving:
saver = pickle.Pickler(saving)
saver.dump(net)
accuracy = str(100 * net.evaluate(validation_data) / 10000)
self.output.insert(tk.END, "\nTest on the validation data -> Accuracy : {0}%\n".format(accuracy))
self.update_idletasks()
self.output.see("end")
with open("models/hd_recognition/accuracy_ladder.md", "a") as ladder:
adding = str(net) + " --> accuracy = " + accuracy + "\n"
ladder.write(adding)
with open("models/hd_recognition/accuracy_ladder.md", "r") as ladder:
shove_percent = ladder.read().replace("%", "")
content = [net.split("= ") for net in shove_percent.split('\n')]
content.pop()
content_updated = sorted([(acc,net) for net,acc in content], reverse = True)
tostring = "%\n".join(["= ".join((net,acc)) for acc,net in content_updated]) + "%\n"
with open("models/hd_recognition/accuracy_ladder.md", "w") as ladder:
ladder.write(tostring)
def models_ladder(self, window, **kwargs):
self.destroy()
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0)
ladder_label = tk.Label(self, text="Models Accuracy Ladder", font=self.font_title, bg="#fff2f2")
ladder_label.grid(row=0, column=1, padx=(0,150), pady=20)
textbox_frame = tk.LabelFrame(self)
textbox_frame.grid(row=1, column=0, columnspan=2)
output = tk.Text(textbox_frame, width=100, height=20, font=self.medium_font_button)
output.pack(side=tk.LEFT)
with open("models/hd_recognition/accuracy_ladder.md", "r") as ladder:
content = ladder.read()
output.insert(tk.END, content)
self.update_idletasks()
output.see("end")
scrollbar = tk.Scrollbar(textbox_frame, orient="vertical", command = output.yview)
scrollbar.pack(side=tk.RIGHT, fill="y")
output['yscrollcommand'] = scrollbar.set
self.pack()
def choose_prediction(self, window, **kwargs):
self.destroy()
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
self.pack()
self.open_model_file()
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0, padx=(0,125), pady=(15,100))
choice_label = tk.Label(self, text="Choose the prediction style", font=self.font_title, bg="#fff2f2")
choice_label.grid(row=0, column=1, columnspan=10, padx=(50,250), pady=50)
choice_custom = tk.Button(self, text="Predict with custom test images", font=self.big_font_button, command=lambda: self.custom_prediction_frame(window, **kwargs))
choice_custom.grid(row=1, column=1, padx=(0,0), pady=(100))
choice_live = tk.Button(self, text="Live prediction", font=self.big_font_button, command=lambda: self.live_prediction_frame(window, **kwargs))
choice_live.grid(row=1, column=2, padx=(50,200), pady=(100))
def custom_prediction_frame(self, window, **kwargs):
self.destroy()
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
self.pack()
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0, pady=(10,30))
title_label = tk.Label(self, text="Custom images prediction\nChoose the number to predict", font=self.number_button_font, bg="#fff2f2")
title_label.grid(row=0, column=1, columnspan=2, padx=(0,150), pady=10)
number_buttons_frame = tk.LabelFrame(self, borderwidth=3, bg='white', pady=10)
number_buttons_frame.grid(row=1,column=1, columnspan=2, padx=(0,150))
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="0", command=lambda: self.number_button_click(0))
btn_home.grid(column=0, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="1", command=lambda: self.number_button_click(1))
btn_home.grid(column=1, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="2", command=lambda: self.number_button_click(2))
btn_home.grid(column=2, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="3", command=lambda: self.number_button_click(3))
btn_home.grid(column=3, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="4", command=lambda: self.number_button_click(4))
btn_home.grid(column=4, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="5", command=lambda: self.number_button_click(5))
btn_home.grid(column=5, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="6", command=lambda: self.number_button_click(6))
btn_home.grid(column=6, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="7", command=lambda: self.number_button_click(7))
btn_home.grid(column=7, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="8", command=lambda: self.number_button_click(8))
btn_home.grid(column=8, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="9", command=lambda: self.number_button_click(9))
btn_home.grid(column=9, row=1, padx=15)
def number_button_click(self, number):
img_filename_bmp = "hd_recognition/custom_test_images/test_image_"+str(number)+".bmp"
test_image = Image.open(img_filename_bmp)
image_array = 1 - (np.array(test_image).reshape(784,1) / 255)
model_activations = self.model_file.feedforward(image_array)
img_filename_png = "hd_recognition/custom_test_images/test_image_"+str(number)+".png"
custom_image = ImageTk.PhotoImage(Image.open(img_filename_png))
custom_image_label = tk.Label(self, image=custom_image, relief='ridge')
custom_image_label.image=custom_image
custom_image_label.grid(row=2, column=1, padx=10, pady=(5,5))
prediction_frame = tk.LabelFrame(self)
prediction_frame.grid(row=2,column=2, padx=(10,150), pady=(5,5))
self.plot_model_activation(model_activations, prediction_frame)
def live_prediction_frame(self, window, **kwargs):
self.destroy()
window.geometry("1500x800")
tk.Frame.__init__(self, window, width=1500, height=800, bg="#fff2f2", **kwargs)
self.pack()
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0, padx=100)
title = tk.Label(self, text="Live prediction\nDraw the number to predict", bg="#fff2f2", font=self.font_title)
title.grid(column=1, row=0, pady=80)
live_prediction_starting_frame = tk.LabelFrame(self, borderwidth=3)
live_prediction_starting_frame.grid(row=0, column=2, padx=100)
live_prediction_starting_button = tk.Button(live_prediction_starting_frame, text="Start", font=self.medium_large_font_button, command=lambda: self.start_live_prediction(window))
live_prediction_starting_button.pack()
def start_live_prediction(self, window):
App = QApplication(sys.argv)
QtWindow = DrawingWindow(App, self)
QtWindow.setWindowTitle("Digit drawing window")
QtWindow.show()
sys.exit(App.exec())
def open_model_file(self):
re = True
while re:
try:
self.model_filename = filedialog.askopenfilename(initialdir="models/hd_recognition", title="Choose the model", filetypes=(("pickle files","*.pickle"), ("model files","*.model"), ("all files", "*.*")))
assert self.model_filename
re = False
except:
messagebox.showerror("Error", "Error : please select a model file")
with open(self.model_filename, "rb") as fic:
unpickler = pickle.Unpickler(fic)
self.model_file = unpickler.load()
def plot_model_activation(self, model_activations, frame):
fig = Figure(figsize = (4, 4))
fig.clf()
fig.add_subplot(111).plot(range(10), model_activations)
fig.suptitle("corresponding model activations")
axes = fig.gca()
axes.set_xlabel("digit")
axes.set_ylabel("activation")
axes.set_ylim([0, 1])
axes.set_xticks(range(10))
axes.set_yticks(np.array(range(11))/10)
canvas = FigureCanvasTkAgg(fig, master=frame)
canvas.draw()
canvas.flush_events()
canvas.get_tk_widget().grid(row=0, column=1)
self.annot_max(range(10), model_activations, axes)
def annot_max(x, y, ax):
xmax = x[np.argmax(y)]
ymax = y.max()
text = "digit = {}, activation = {:.3f}".format(xmax,ymax)
if xmax <= 4:
orientation = str((1 / abs(5 - (xmax + 1))) / 10)
else:
orientation = str(-(1 / abs(5 - (xmax + 1))) / 10)
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=1)
arrowprops=dict(arrowstyle="-|>",connectionstyle="arc3,rad="+orientation)
kw = dict(xycoords='data',textcoords="axes fraction",
arrowprops=arrowprops, bbox=bbox_props, ha="right", va="top")
ax.annotate(text, xy=(xmax, ymax), xytext=(0.8, 0.5), **kw)
annot_max = staticmethod(annot_max)
class DrawingWindow(QMainWindow):
def __init__(self, App, tkinter_root):
super().__init__()
self.label = QLabel()
self.blank()
self.setCentralWidget(self.label)
self.App = App
self.tkinter_root = tkinter_root
self.last_x, self.last_y = None, None
def blank(self):
self.label.setPixmap(QPixmap("hd_recognition/assets/white.png"))
def mouseMoveEvent(self, e):
if self.last_x is None:
self.last_x = e.x()
self.last_y = e.y()
return
painter = QPainter(self.label.pixmap())
painter.drawLine(self.last_x, self.last_y, e.x(), e.y())
painter.end()
self.update()
self.last_x = e.x()
self.last_y = e.y()
QScreen.grabWindow(self.App.primaryScreen(), self.winId()).save("hd_recognition/tmp/screenshot.png", 'png')
resize_img = Image.open("hd_recognition/tmp/screenshot.png")
resize_img = resize_img.resize((28,28))
resize_img.save("hd_recognition/tmp/screenshot.png", 'png')
img_array = np.array(Image.open("hd_recognition/tmp/screenshot.png"))
img_array = np.array([[pixel[0] for pixel in line] for line in img_array])
image_array = 1 - (img_array.reshape(784,1) / 255)
model_activations = self.tkinter_root.model_file.feedforward(image_array)
prediction_frame = tk.LabelFrame(self.tkinter_root)
prediction_frame.grid(row=2,column=2)
self.tkinter_root.plot_model_activation(model_activations, prediction_frame)
def mouseReleaseEvent(self, e):
self.last_x = None
self.last_y = None
window = tk.Tk()
window.geometry("1180x620")
window.title("Neural Networks")
window.configure(bg="#fff2f2")
interface = Interface(window)
interface.mainloop()
| true
| true
|
f70cb4b1b49d0fb0f3970e183f9e1c31c8780f60
| 13,476
|
py
|
Python
|
nomenklatura/resolver.py
|
opensanctions/nomenklatura
|
d845ff821a57beffb49b269b4d8c08f57bc588d7
|
[
"MIT"
] | 4
|
2022-02-25T13:32:20.000Z
|
2022-03-15T23:35:42.000Z
|
nomenklatura/resolver.py
|
opensanctions/nomenklatura
|
d845ff821a57beffb49b269b4d8c08f57bc588d7
|
[
"MIT"
] | null | null | null |
nomenklatura/resolver.py
|
opensanctions/nomenklatura
|
d845ff821a57beffb49b269b4d8c08f57bc588d7
|
[
"MIT"
] | 1
|
2022-03-02T16:56:48.000Z
|
2022-03-02T16:56:48.000Z
|
import json
import getpass
import shortuuid # type: ignore
from datetime import datetime
from functools import lru_cache
from collections import defaultdict
from typing import Any, Dict, Generator, Generic, List, Optional, Set, Tuple, Union
from followthemoney.types import registry
from nomenklatura.entity import CE
from nomenklatura.judgement import Judgement
from nomenklatura.util import PathLike, is_qid
StrIdent = Union[str, "Identifier"]
Pair = Tuple["Identifier", "Identifier"]
class ResolverLogicError(Exception):
pass
class Identifier(object):
PREFIX = "NK-"
__slots__ = ("id", "canonical", "weight")
def __init__(self, id: str):
self.id = id
self.weight: int = 1
if self.id.startswith(self.PREFIX):
self.weight = 2
elif is_qid(id):
self.weight = 3
self.canonical = self.weight > 1
def __eq__(self, other: Any) -> bool:
return str(self) == str(other)
def __lt__(self, other: Any) -> bool:
return (self.weight, self.id) < (other.weight, other.id)
def __str__(self) -> str:
return self.id
def __hash__(self) -> int:
return hash(self.id)
def __len__(self) -> int:
return len(self.id)
def __repr__(self) -> str:
return f"<I({self.id})>"
@classmethod
def get(cls, id: StrIdent) -> "Identifier":
if isinstance(id, str):
return cls(id)
return id
@classmethod
def pair(cls, left_id: StrIdent, right_id: StrIdent) -> Pair:
left = cls.get(left_id)
right = cls.get(right_id)
if left == right:
raise ResolverLogicError()
return (max(left, right), min(left, right))
@classmethod
def make(cls, value: Optional[str] = None) -> "Identifier":
key = value or shortuuid.uuid()
return cls.get(f"{cls.PREFIX}{key}")
class Edge(object):
__slots__ = ("key", "source", "target", "judgement", "score", "user", "timestamp")
def __init__(
self,
left_id: StrIdent,
right_id: StrIdent,
judgement: Judgement = Judgement.NO_JUDGEMENT,
score: Optional[float] = None,
user: Optional[str] = None,
timestamp: Optional[str] = None,
):
self.key = Identifier.pair(left_id, right_id)
self.target, self.source = self.key
self.judgement = judgement
self.score = score
self.user = user
self.timestamp = timestamp
def other(self, cur: Identifier) -> Identifier:
if cur == self.target:
return self.source
return self.target
def to_line(self) -> str:
row = [
self.target.id,
self.source.id,
self.judgement.value,
self.score,
self.user,
self.timestamp,
]
return json.dumps(row) + "\n"
def __str__(self) -> str:
return self.to_line()
def __hash__(self) -> int:
return hash(self.key)
def __eq__(self, other: Any) -> bool:
return hash(self) == hash(other)
def __lt__(self, other: Any) -> bool:
return bool(self.key < other.key)
def __repr__(self) -> str:
return f"<E({self.target.id}, {self.source.id}, {self.judgement.value})>"
@classmethod
def from_line(cls, line: str) -> "Edge":
data = json.loads(line)
return cls(
data[0],
data[1],
judgement=Judgement(data[2]),
score=data[3],
user=data[4],
timestamp=data[5],
)
class Resolver(Generic[CE]):
UNDECIDED = (Judgement.NO_JUDGEMENT, Judgement.UNSURE)
def __init__(self, path: Optional[PathLike] = None) -> None:
self.path = path
self.edges: Dict[Pair, Edge] = {}
self.nodes: Dict[Identifier, Set[Edge]] = defaultdict(set)
def get_edge(self, left_id: StrIdent, right_id: StrIdent) -> Optional[Edge]:
key = Identifier.pair(left_id, right_id)
return self.edges.get(key)
def _traverse(self, node: Identifier, seen: Set[Identifier]) -> Set[Identifier]:
connected = set([node])
if node in seen:
return connected
seen.add(node)
for edge in self.nodes.get(node, []):
if edge.judgement == Judgement.POSITIVE:
other = edge.other(node)
rec = self._traverse(other, seen)
connected.update(rec)
return connected
@lru_cache(maxsize=None)
def connected(self, node: Identifier) -> Set[Identifier]:
return self._traverse(node, set())
def get_canonical(self, entity_id: StrIdent) -> str:
"""Return the canonical identifier for the given entity ID."""
node = Identifier.get(entity_id)
best = max(self.connected(node))
if best.canonical:
return best.id
return node.id
def canonicals(self) -> Generator[Identifier, None, None]:
"""Return all the canonical cluster identifiers."""
for node in self.nodes.keys():
if not node.canonical:
continue
canonical = self.get_canonical(node)
if canonical == node.id:
yield node
def get_referents(
self, canonical_id: StrIdent, canonicals: bool = True
) -> Set[str]:
"""Get all the non-canonical entity identifiers which refer to a given
canonical identifier."""
node = Identifier.get(canonical_id)
referents: Set[str] = set()
for connected in self.connected(node):
if not canonicals and connected.canonical:
continue
if connected == node:
continue
referents.add(connected.id)
return referents
def get_resolved_edge(
self, left_id: StrIdent, right_id: StrIdent
) -> Optional[Edge]:
(left, right) = Identifier.pair(left_id, right_id)
left_connected = self.connected(left)
right_connected = self.connected(right)
for e in left_connected:
for o in right_connected:
edge = self.edges.get(Identifier.pair(e, o))
if edge is None:
continue
return edge
return None
def get_judgement(self, entity_id: StrIdent, other_id: StrIdent) -> Judgement:
"""Get the existing decision between two entities with dedupe factored in."""
entity = Identifier.get(entity_id)
other = Identifier.get(other_id)
if entity == other:
return Judgement.POSITIVE
if is_qid(entity.id) and is_qid(other.id):
return Judgement.NEGATIVE
entity_connected = self.connected(entity)
if other in entity_connected:
return Judgement.POSITIVE
other_connected = self.connected(other)
for e in entity_connected:
for o in other_connected:
edge = self.edges.get(Identifier.pair(e, o))
if edge is None:
continue
if edge.judgement == Judgement.NEGATIVE:
return edge.judgement
return Judgement.NO_JUDGEMENT
def check_candidate(self, left: StrIdent, right: StrIdent) -> bool:
"""Check if the two IDs could be merged, i.e. if there's no existing
judgement."""
judgement = self.get_judgement(left, right)
return judgement == Judgement.NO_JUDGEMENT
def _get_suggested(self) -> List[Edge]:
"""Get all NO_JUDGEMENT edges in descending order of score."""
edges_all = self.edges.values()
candidates = (e for e in edges_all if e.judgement == Judgement.NO_JUDGEMENT)
cmp = lambda x: x.score or -1.0
return sorted(candidates, key=cmp, reverse=True)
def get_candidates(
self, limit: int = 100
) -> Generator[Tuple[str, str, Optional[float]], None, None]:
returned = 0
for edge in self._get_suggested():
if not self.check_candidate(edge.source, edge.target):
continue
yield edge.target.id, edge.source.id, edge.score
returned += 1
if returned >= limit:
break
def suggest(
self, left_id: StrIdent, right_id: StrIdent, score: float
) -> Identifier:
"""Make a NO_JUDGEMENT link between two identifiers to suggest that a user
should make a decision about whether they are the same or not."""
edge = self.get_edge(left_id, right_id)
if edge is not None:
if edge.judgement in self.UNDECIDED:
edge.score = score
return edge.target
return self.decide(left_id, right_id, Judgement.NO_JUDGEMENT, score=score)
def decide(
self,
left_id: StrIdent,
right_id: StrIdent,
judgement: Judgement,
user: Optional[str] = None,
score: Optional[float] = None,
) -> Identifier:
edge = self.get_edge(left_id, right_id)
if edge is None:
edge = Edge(left_id, right_id, judgement=judgement)
# Canonicalise positive matches, i.e. make both identifiers refer to a
# canonical identifier, instead of making a direct link.
if judgement == Judgement.POSITIVE:
connected = set(self.connected(edge.target))
connected.update(self.connected(edge.source))
target = max(connected)
if not target.canonical:
canonical = Identifier.make()
self._remove(edge)
self.decide(edge.source, canonical, judgement=judgement, user=user)
self.decide(edge.target, canonical, judgement=judgement, user=user)
return canonical
edge.judgement = judgement
edge.timestamp = datetime.utcnow().isoformat()[:16]
edge.user = user or getpass.getuser()
edge.score = score or edge.score
self._register(edge)
return edge.target
def _register(self, edge: Edge) -> None:
if edge.judgement != Judgement.NO_JUDGEMENT:
edge.score = None
self.edges[edge.key] = edge
self.nodes[edge.source].add(edge)
self.nodes[edge.target].add(edge)
self.connected.cache_clear()
def _remove(self, edge: Edge) -> None:
"""Remove an edge from the graph."""
self.edges.pop(edge.key, None)
for node in (edge.source, edge.target):
if node in self.nodes:
self.nodes[node].discard(edge)
def explode(self, node_id: StrIdent) -> Set[str]:
"""Dissolve all edges linked to the cluster to which the node belongs.
This is the hard way to make sure we re-do context once we realise
there's been a mistake."""
node = Identifier.get(node_id)
affected: Set[str] = set()
for part in self.connected(node):
affected.add(str(part))
edges = self.nodes.get(part)
if edges is None:
continue
for edge in list(edges):
if edge.judgement != Judgement.NO_JUDGEMENT:
self._remove(edge)
self.connected.cache_clear()
return affected
def prune(self, keep: int = 0) -> None:
"""Remove suggested (i.e. NO_JUDGEMENT) edges, keep only the n with the
highest score. This also checks if a transitive judgement has been
established in the mean time and removes those candidates."""
kept = 0
for edge in self._get_suggested():
judgement = self.get_judgement(edge.source, edge.target)
if judgement != Judgement.NO_JUDGEMENT:
self._remove(edge)
if kept >= keep:
self._remove(edge)
kept += 1
self.connected.cache_clear()
def apply(self, proxy: CE) -> CE:
"""Replace all entity references in a given proxy with their canonical
identifiers. This is essentially the harmonisation post de-dupe."""
canonical_id = self.get_canonical(proxy.id)
if canonical_id != proxy.id:
proxy.referents = self.get_referents(canonical_id)
proxy.id = canonical_id
for prop in proxy.iterprops():
if prop.type != registry.entity:
continue
for value in proxy.pop(prop):
canonical = self.get_canonical(value)
proxy.unsafe_add(prop, canonical, cleaned=True)
return proxy
def save(self) -> None:
"""Store the resolver adjacency list to a plain text JSON list."""
if self.path is None:
raise RuntimeError("Resolver has no path")
edges = sorted(self.edges.values())
with open(self.path, "w") as fh:
for edge in edges:
fh.write(edge.to_line())
@classmethod
def load(cls, path: PathLike) -> "Resolver[CE]":
resolver = cls(path=path)
if not path.exists():
return resolver
with open(path, "r") as fh:
while True:
line = fh.readline()
if not line:
break
edge = Edge.from_line(line)
resolver._register(edge)
return resolver
def __repr__(self) -> str:
path = self.path.name if self.path is not None else ":memory:"
return f"<Resolver({path!r}, {len(self.edges)})>"
| 34.642674
| 86
| 0.58875
|
import json
import getpass
import shortuuid from datetime import datetime
from functools import lru_cache
from collections import defaultdict
from typing import Any, Dict, Generator, Generic, List, Optional, Set, Tuple, Union
from followthemoney.types import registry
from nomenklatura.entity import CE
from nomenklatura.judgement import Judgement
from nomenklatura.util import PathLike, is_qid
StrIdent = Union[str, "Identifier"]
Pair = Tuple["Identifier", "Identifier"]
class ResolverLogicError(Exception):
pass
class Identifier(object):
PREFIX = "NK-"
__slots__ = ("id", "canonical", "weight")
def __init__(self, id: str):
self.id = id
self.weight: int = 1
if self.id.startswith(self.PREFIX):
self.weight = 2
elif is_qid(id):
self.weight = 3
self.canonical = self.weight > 1
def __eq__(self, other: Any) -> bool:
return str(self) == str(other)
def __lt__(self, other: Any) -> bool:
return (self.weight, self.id) < (other.weight, other.id)
def __str__(self) -> str:
return self.id
def __hash__(self) -> int:
return hash(self.id)
def __len__(self) -> int:
return len(self.id)
def __repr__(self) -> str:
return f"<I({self.id})>"
@classmethod
def get(cls, id: StrIdent) -> "Identifier":
if isinstance(id, str):
return cls(id)
return id
@classmethod
def pair(cls, left_id: StrIdent, right_id: StrIdent) -> Pair:
left = cls.get(left_id)
right = cls.get(right_id)
if left == right:
raise ResolverLogicError()
return (max(left, right), min(left, right))
@classmethod
def make(cls, value: Optional[str] = None) -> "Identifier":
key = value or shortuuid.uuid()
return cls.get(f"{cls.PREFIX}{key}")
class Edge(object):
__slots__ = ("key", "source", "target", "judgement", "score", "user", "timestamp")
def __init__(
self,
left_id: StrIdent,
right_id: StrIdent,
judgement: Judgement = Judgement.NO_JUDGEMENT,
score: Optional[float] = None,
user: Optional[str] = None,
timestamp: Optional[str] = None,
):
self.key = Identifier.pair(left_id, right_id)
self.target, self.source = self.key
self.judgement = judgement
self.score = score
self.user = user
self.timestamp = timestamp
def other(self, cur: Identifier) -> Identifier:
if cur == self.target:
return self.source
return self.target
def to_line(self) -> str:
row = [
self.target.id,
self.source.id,
self.judgement.value,
self.score,
self.user,
self.timestamp,
]
return json.dumps(row) + "\n"
def __str__(self) -> str:
return self.to_line()
def __hash__(self) -> int:
return hash(self.key)
def __eq__(self, other: Any) -> bool:
return hash(self) == hash(other)
def __lt__(self, other: Any) -> bool:
return bool(self.key < other.key)
def __repr__(self) -> str:
return f"<E({self.target.id}, {self.source.id}, {self.judgement.value})>"
@classmethod
def from_line(cls, line: str) -> "Edge":
data = json.loads(line)
return cls(
data[0],
data[1],
judgement=Judgement(data[2]),
score=data[3],
user=data[4],
timestamp=data[5],
)
class Resolver(Generic[CE]):
UNDECIDED = (Judgement.NO_JUDGEMENT, Judgement.UNSURE)
def __init__(self, path: Optional[PathLike] = None) -> None:
self.path = path
self.edges: Dict[Pair, Edge] = {}
self.nodes: Dict[Identifier, Set[Edge]] = defaultdict(set)
def get_edge(self, left_id: StrIdent, right_id: StrIdent) -> Optional[Edge]:
key = Identifier.pair(left_id, right_id)
return self.edges.get(key)
def _traverse(self, node: Identifier, seen: Set[Identifier]) -> Set[Identifier]:
connected = set([node])
if node in seen:
return connected
seen.add(node)
for edge in self.nodes.get(node, []):
if edge.judgement == Judgement.POSITIVE:
other = edge.other(node)
rec = self._traverse(other, seen)
connected.update(rec)
return connected
@lru_cache(maxsize=None)
def connected(self, node: Identifier) -> Set[Identifier]:
return self._traverse(node, set())
def get_canonical(self, entity_id: StrIdent) -> str:
node = Identifier.get(entity_id)
best = max(self.connected(node))
if best.canonical:
return best.id
return node.id
def canonicals(self) -> Generator[Identifier, None, None]:
for node in self.nodes.keys():
if not node.canonical:
continue
canonical = self.get_canonical(node)
if canonical == node.id:
yield node
def get_referents(
self, canonical_id: StrIdent, canonicals: bool = True
) -> Set[str]:
node = Identifier.get(canonical_id)
referents: Set[str] = set()
for connected in self.connected(node):
if not canonicals and connected.canonical:
continue
if connected == node:
continue
referents.add(connected.id)
return referents
def get_resolved_edge(
self, left_id: StrIdent, right_id: StrIdent
) -> Optional[Edge]:
(left, right) = Identifier.pair(left_id, right_id)
left_connected = self.connected(left)
right_connected = self.connected(right)
for e in left_connected:
for o in right_connected:
edge = self.edges.get(Identifier.pair(e, o))
if edge is None:
continue
return edge
return None
def get_judgement(self, entity_id: StrIdent, other_id: StrIdent) -> Judgement:
entity = Identifier.get(entity_id)
other = Identifier.get(other_id)
if entity == other:
return Judgement.POSITIVE
if is_qid(entity.id) and is_qid(other.id):
return Judgement.NEGATIVE
entity_connected = self.connected(entity)
if other in entity_connected:
return Judgement.POSITIVE
other_connected = self.connected(other)
for e in entity_connected:
for o in other_connected:
edge = self.edges.get(Identifier.pair(e, o))
if edge is None:
continue
if edge.judgement == Judgement.NEGATIVE:
return edge.judgement
return Judgement.NO_JUDGEMENT
def check_candidate(self, left: StrIdent, right: StrIdent) -> bool:
judgement = self.get_judgement(left, right)
return judgement == Judgement.NO_JUDGEMENT
def _get_suggested(self) -> List[Edge]:
edges_all = self.edges.values()
candidates = (e for e in edges_all if e.judgement == Judgement.NO_JUDGEMENT)
cmp = lambda x: x.score or -1.0
return sorted(candidates, key=cmp, reverse=True)
def get_candidates(
self, limit: int = 100
) -> Generator[Tuple[str, str, Optional[float]], None, None]:
returned = 0
for edge in self._get_suggested():
if not self.check_candidate(edge.source, edge.target):
continue
yield edge.target.id, edge.source.id, edge.score
returned += 1
if returned >= limit:
break
def suggest(
self, left_id: StrIdent, right_id: StrIdent, score: float
) -> Identifier:
edge = self.get_edge(left_id, right_id)
if edge is not None:
if edge.judgement in self.UNDECIDED:
edge.score = score
return edge.target
return self.decide(left_id, right_id, Judgement.NO_JUDGEMENT, score=score)
def decide(
self,
left_id: StrIdent,
right_id: StrIdent,
judgement: Judgement,
user: Optional[str] = None,
score: Optional[float] = None,
) -> Identifier:
edge = self.get_edge(left_id, right_id)
if edge is None:
edge = Edge(left_id, right_id, judgement=judgement)
if judgement == Judgement.POSITIVE:
connected = set(self.connected(edge.target))
connected.update(self.connected(edge.source))
target = max(connected)
if not target.canonical:
canonical = Identifier.make()
self._remove(edge)
self.decide(edge.source, canonical, judgement=judgement, user=user)
self.decide(edge.target, canonical, judgement=judgement, user=user)
return canonical
edge.judgement = judgement
edge.timestamp = datetime.utcnow().isoformat()[:16]
edge.user = user or getpass.getuser()
edge.score = score or edge.score
self._register(edge)
return edge.target
def _register(self, edge: Edge) -> None:
if edge.judgement != Judgement.NO_JUDGEMENT:
edge.score = None
self.edges[edge.key] = edge
self.nodes[edge.source].add(edge)
self.nodes[edge.target].add(edge)
self.connected.cache_clear()
def _remove(self, edge: Edge) -> None:
self.edges.pop(edge.key, None)
for node in (edge.source, edge.target):
if node in self.nodes:
self.nodes[node].discard(edge)
def explode(self, node_id: StrIdent) -> Set[str]:
node = Identifier.get(node_id)
affected: Set[str] = set()
for part in self.connected(node):
affected.add(str(part))
edges = self.nodes.get(part)
if edges is None:
continue
for edge in list(edges):
if edge.judgement != Judgement.NO_JUDGEMENT:
self._remove(edge)
self.connected.cache_clear()
return affected
def prune(self, keep: int = 0) -> None:
kept = 0
for edge in self._get_suggested():
judgement = self.get_judgement(edge.source, edge.target)
if judgement != Judgement.NO_JUDGEMENT:
self._remove(edge)
if kept >= keep:
self._remove(edge)
kept += 1
self.connected.cache_clear()
def apply(self, proxy: CE) -> CE:
canonical_id = self.get_canonical(proxy.id)
if canonical_id != proxy.id:
proxy.referents = self.get_referents(canonical_id)
proxy.id = canonical_id
for prop in proxy.iterprops():
if prop.type != registry.entity:
continue
for value in proxy.pop(prop):
canonical = self.get_canonical(value)
proxy.unsafe_add(prop, canonical, cleaned=True)
return proxy
def save(self) -> None:
if self.path is None:
raise RuntimeError("Resolver has no path")
edges = sorted(self.edges.values())
with open(self.path, "w") as fh:
for edge in edges:
fh.write(edge.to_line())
@classmethod
def load(cls, path: PathLike) -> "Resolver[CE]":
resolver = cls(path=path)
if not path.exists():
return resolver
with open(path, "r") as fh:
while True:
line = fh.readline()
if not line:
break
edge = Edge.from_line(line)
resolver._register(edge)
return resolver
def __repr__(self) -> str:
path = self.path.name if self.path is not None else ":memory:"
return f"<Resolver({path!r}, {len(self.edges)})>"
| true
| true
|
f70cb50a820e5478ac56b1d142049faed4d62e57
| 4,778
|
py
|
Python
|
httpie_oauth2_client_credentials.py
|
satodoc/httpie-oauth2-client-credentials
|
831d1c910e43457d6c64f2f940ee279cd9f58895
|
[
"MIT"
] | null | null | null |
httpie_oauth2_client_credentials.py
|
satodoc/httpie-oauth2-client-credentials
|
831d1c910e43457d6c64f2f940ee279cd9f58895
|
[
"MIT"
] | null | null | null |
httpie_oauth2_client_credentials.py
|
satodoc/httpie-oauth2-client-credentials
|
831d1c910e43457d6c64f2f940ee279cd9f58895
|
[
"MIT"
] | null | null | null |
'''
OAuth2.0 client credentials flow plugin for HTTPie.
'''
import sys
from httpie.plugins import AuthPlugin
from httpie.cli.definition import parser as httpie_args_parser
from urllib.request import Request, urlopen
from urllib.parse import urlencode
from urllib.error import HTTPError
import json
from base64 import b64encode
class OAuth2ClientCredentials:
def __init__(self, client_id, client_secret):
if not client_id:
raise ValueError('client_id is required.')
self.client_id = client_id
if not client_secret:
raise ValueError('client_secret is required.')
self.client_secret = client_secret
options = httpie_args_parser.args
if not options.token_endpoint:
raise ValueError('token_endpoint is required.')
self.token_endpoint = options.token_endpoint
self.token_request_type = options.token_request_type
self.scope = options.scope
self.print_token_response = options.print_token_response
def __call__(self, request):
token_response = self.__get_token()
token_type = token_response.get('token_type', 'Bearer')
token = token_response.get('access_token', '')
request.headers['Authorization'] = '%s %s' % (token_type, token)
return request
def __get_token(self):
req_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
post_params = {'grant_type': 'client_credentials'}
if self.scope:
post_params['scope'] = self.scope
post_data = None
if self.token_request_type == 'basic':
credentials = u'%s:%s' % (self.client_id, self.client_secret)
token = b64encode(credentials.encode('utf8')).strip().decode('latin1')
req_headers['Authorzaion'] = 'Basic %s' % token
post_data = urlencode(post_params).encode()
else:
post_params['client_id'] = self.client_id
post_params['client_secret'] = self.client_secret
if self.token_request_type == 'form':
post_data = urlencode(post_params).encode()
elif self.token_request_type == 'json':
req_headers = {'Content-Type': 'application/json'}
post_data = json.dumps(post_params).encode("utf-8")
else:
raise ValueError('token-request-type is invalid value.')
# Execute token request.
try:
res = urlopen(Request(self.token_endpoint, method='POST', headers=req_headers, data=post_data))
res_body = json.loads(res.read())
if self.print_token_response:
sys.stdout.write(f'token_response: \n========== \n{json.dumps(res_body, indent=2)}\n==========\n')
return res_body
except HTTPError as e:
if self.print_token_response:
sys.stderr.write(f'oauth2 error response:\nstatus={e.status}\n')
res_body = e.read()
try:
res_body = json.loads(res_body)
sys.stderr.write(f'token_error_response: \n========== \n{json.dumps(res_body, indent=2)}\n==========\n')
except:
sys.stderr.write(f'error_response: \n========== \n{res_body}\n==========\n')
raise e
class OAuth2ClientCredentialsPlugin(AuthPlugin):
name = 'OAuth2.0 client credentilas flow.'
auth_type = 'oauth2-client-credentials'
description = 'Set the Bearer token obtained in the OAuth2.0 client_credentials flow to the Authorization header.'
params = httpie_args_parser.add_argument_group(title='OAuth2.0 client credentilas flow options')
params.add_argument(
'--token-endpoint',
default=None,
metavar='TOKEN_ENDPOINT_URL',
help='OAuth 2.0 Token endpoint URI'
)
params.add_argument(
'--token-request-type',
default='basic',
choices=('basic','form','json'),
help='OAuth 2.0 Token request types.'
)
params.add_argument(
'--scope',
default=None,
metavar='OAUTH2_SCOPE',
help='OAuth 2.0 Scopes'
)
params.add_argument(
'--print-token-response',
dest='print_token_response',
action='store_true',
default=False,
help='print oauth2 token response.'
)
def get_auth(self, username=None, password=None):
'''Add to authorization header
Args:
username str: client_id(client_id)
password str: client_secret(client_sercret)
Returns:
requests.models.PreparedRequest:
Added authorization header at the request object.
'''
return OAuth2ClientCredentials(username, password)
| 38.532258
| 124
| 0.619506
|
import sys
from httpie.plugins import AuthPlugin
from httpie.cli.definition import parser as httpie_args_parser
from urllib.request import Request, urlopen
from urllib.parse import urlencode
from urllib.error import HTTPError
import json
from base64 import b64encode
class OAuth2ClientCredentials:
def __init__(self, client_id, client_secret):
if not client_id:
raise ValueError('client_id is required.')
self.client_id = client_id
if not client_secret:
raise ValueError('client_secret is required.')
self.client_secret = client_secret
options = httpie_args_parser.args
if not options.token_endpoint:
raise ValueError('token_endpoint is required.')
self.token_endpoint = options.token_endpoint
self.token_request_type = options.token_request_type
self.scope = options.scope
self.print_token_response = options.print_token_response
def __call__(self, request):
token_response = self.__get_token()
token_type = token_response.get('token_type', 'Bearer')
token = token_response.get('access_token', '')
request.headers['Authorization'] = '%s %s' % (token_type, token)
return request
def __get_token(self):
req_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
post_params = {'grant_type': 'client_credentials'}
if self.scope:
post_params['scope'] = self.scope
post_data = None
if self.token_request_type == 'basic':
credentials = u'%s:%s' % (self.client_id, self.client_secret)
token = b64encode(credentials.encode('utf8')).strip().decode('latin1')
req_headers['Authorzaion'] = 'Basic %s' % token
post_data = urlencode(post_params).encode()
else:
post_params['client_id'] = self.client_id
post_params['client_secret'] = self.client_secret
if self.token_request_type == 'form':
post_data = urlencode(post_params).encode()
elif self.token_request_type == 'json':
req_headers = {'Content-Type': 'application/json'}
post_data = json.dumps(post_params).encode("utf-8")
else:
raise ValueError('token-request-type is invalid value.')
try:
res = urlopen(Request(self.token_endpoint, method='POST', headers=req_headers, data=post_data))
res_body = json.loads(res.read())
if self.print_token_response:
sys.stdout.write(f'token_response: \n========== \n{json.dumps(res_body, indent=2)}\n==========\n')
return res_body
except HTTPError as e:
if self.print_token_response:
sys.stderr.write(f'oauth2 error response:\nstatus={e.status}\n')
res_body = e.read()
try:
res_body = json.loads(res_body)
sys.stderr.write(f'token_error_response: \n========== \n{json.dumps(res_body, indent=2)}\n==========\n')
except:
sys.stderr.write(f'error_response: \n========== \n{res_body}\n==========\n')
raise e
class OAuth2ClientCredentialsPlugin(AuthPlugin):
name = 'OAuth2.0 client credentilas flow.'
auth_type = 'oauth2-client-credentials'
description = 'Set the Bearer token obtained in the OAuth2.0 client_credentials flow to the Authorization header.'
params = httpie_args_parser.add_argument_group(title='OAuth2.0 client credentilas flow options')
params.add_argument(
'--token-endpoint',
default=None,
metavar='TOKEN_ENDPOINT_URL',
help='OAuth 2.0 Token endpoint URI'
)
params.add_argument(
'--token-request-type',
default='basic',
choices=('basic','form','json'),
help='OAuth 2.0 Token request types.'
)
params.add_argument(
'--scope',
default=None,
metavar='OAUTH2_SCOPE',
help='OAuth 2.0 Scopes'
)
params.add_argument(
'--print-token-response',
dest='print_token_response',
action='store_true',
default=False,
help='print oauth2 token response.'
)
def get_auth(self, username=None, password=None):
return OAuth2ClientCredentials(username, password)
| true
| true
|
f70cb531f0d051e17a54782543c3607133af43c5
| 10,577
|
py
|
Python
|
tests/test_edgeql_userddl.py
|
haikyuu/edgedb
|
73125882a4eff337692ad10af4bfdf15eef341ab
|
[
"Apache-2.0"
] | null | null | null |
tests/test_edgeql_userddl.py
|
haikyuu/edgedb
|
73125882a4eff337692ad10af4bfdf15eef341ab
|
[
"Apache-2.0"
] | null | null | null |
tests/test_edgeql_userddl.py
|
haikyuu/edgedb
|
73125882a4eff337692ad10af4bfdf15eef341ab
|
[
"Apache-2.0"
] | null | null | null |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2018-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import edgedb
from edb.testbase import server as tb
class TestEdgeQLUserDDL(tb.DDLTestCase):
INTERNAL_TESTMODE = False
async def test_edgeql_userddl_01(self):
# testing anytype polymorphism
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*test::func_01.*'
r'generic types are not supported in '
r'user-defined functions'):
await self.con.execute('''
CREATE FUNCTION test::func_01(
a: anytype
) -> bool
USING EdgeQL $$
SELECT a IS float32
$$;
''')
async def test_edgeql_userddl_02(self):
# testing anyreal polymorphism, which is an actual abstract
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*test::func_02.*'
r'generic types are not supported in '
r'user-defined functions'):
await self.con.execute('''
CREATE FUNCTION test::func_02(
a: anyreal
) -> bool
USING EdgeQL $$
SELECT a IS float32
$$;
''')
async def test_edgeql_userddl_03(self):
# testing anytype as return type
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*test::func_03.*'
r'generic types are not supported in '
r'user-defined functions'):
await self.con.execute('''
CREATE FUNCTION test::func_03(
a: str
) -> anytype
USING EdgeQL $$
SELECT a
$$;
''')
async def test_edgeql_userddl_04(self):
# testing anyreal as return type
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*test::func_04.*'
r'generic types are not supported in '
r'user-defined functions'):
await self.con.execute('''
CREATE FUNCTION test::func_04(
a: str
) -> anyscalar
USING EdgeQL $$
SELECT a
$$;
''')
async def test_edgeql_userddl_05(self):
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*test::func_05.*'
r'USING SQL FUNCTION.*not supported in '
r'user-defined functions'):
await self.con.execute('''
CREATE FUNCTION test::func_05(
a: str
) -> str
USING SQL FUNCTION 'lower';
''')
async def test_edgeql_userddl_06(self):
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*test::func_06.*'
r'USING SQL.*not supported in '
r'user-defined functions'):
await self.con.execute('''
CREATE FUNCTION test::func_06(
a: str
) -> str
USING SQL $$ SELECT "a" $$;
''')
async def test_edgeql_userddl_07(self):
with self.assertRaisesRegex(
edgedb.UnsupportedFeatureError,
r'user-defined operators are not supported'):
await self.con.execute('''
CREATE INFIX OPERATOR
std::`+` (l: std::str, r: std::str) -> std::str
USING SQL OPERATOR r'||';
''')
async def test_edgeql_userddl_08(self):
with self.assertRaisesRegex(
edgedb.UnsupportedFeatureError,
r'user-defined casts are not supported'):
await self.con.execute('''
CREATE CAST FROM std::int64 TO std::duration {
USING SQL CAST;
ALLOW ASSIGNMENT;
};
''')
async def test_edgeql_userddl_09(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot create.*module std is read-only'):
await self.con.execute('''
CREATE FUNCTION std::func_09(
a: str
) -> str
USING EdgeQL $$
SELECT a
$$;
''')
async def test_edgeql_userddl_10(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot create.*module math is read-only'):
await self.con.execute('''
CREATE FUNCTION math::func_10(
a: str
) -> str
USING EdgeQL $$
SELECT a
$$;
''')
async def test_edgeql_userddl_11(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot create.*module std is read-only'):
await self.con.execute('''
CREATE TYPE std::Foo_11;
''')
async def test_edgeql_userddl_12(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot create.*module math is read-only'):
await self.con.execute('''
CREATE TYPE math::Foo_11;
''')
async def test_edgeql_userddl_13(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot delete.*module std is read-only'):
await self.con.execute('''
DROP TYPE std::Object;
''')
async def test_edgeql_userddl_14(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot delete.*module stdgraphql is read-only'):
await self.con.execute('''
DROP TYPE stdgraphql::Query;
''')
async def test_edgeql_userddl_15(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot alter.*module std is read-only'):
await self.con.execute('''
ALTER TYPE std::Object {
CREATE PROPERTY foo_15 -> std::str;
};
''')
async def test_edgeql_userddl_16(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot alter.*module stdgraphql is read-only'):
await self.con.execute('''
ALTER TYPE stdgraphql::Query {
CREATE PROPERTY foo_15 -> std::str;
};
''')
async def test_edgeql_userddl_17(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot delete.*module std is read-only'):
await self.con.execute('''
DROP MODULE std;
''')
async def test_edgeql_userddl_18(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot delete.*module math is read-only'):
await self.con.execute('''
DROP MODULE math;
''')
async def test_edgeql_userddl_19(self):
with self.assertRaisesRegex(
edgedb.UnsupportedFeatureError,
r'cannot create.*test::func_19.*'
r'SET OF parameters in user-defined EdgeQL '
r'functions are not supported'):
await self.con.execute('''
CREATE FUNCTION test::func_19(
a: SET OF str
) -> bool
USING EdgeQL $$
SELECT EXISTS a
$$;
''')
async def test_edgeql_userddl_20(self):
await self.con.execute('''
CREATE FUNCTION test::func_20(
a: str
) -> SET OF str
USING EdgeQL $$
SELECT {a, 'a'}
$$;
''')
await self.assert_query_result(
r'''
SELECT test::func_20('q');
''',
{'q', 'a'},
)
await self.assert_query_result(
r'''
SELECT count(test::func_20({'q', 'w'}));
''',
{4},
)
async def test_edgeql_userddl_21(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r"'force_return_cast' is not a valid field"):
await self.con.execute('''
CREATE FUNCTION test::func(
a: str
) -> bool
{
USING EdgeQL $$
SELECT True;
$$;
SET force_return_cast := true;
};
''')
async def test_edgeql_userddl_22(self):
await self.con.execute('''
CREATE ABSTRACT CONSTRAINT test::uppercase {
CREATE ANNOTATION title := "Upper case constraint";
USING (str_upper(__subject__) = __subject__);
SET errmessage := "{__subject__} is not in upper case";
};
CREATE SCALAR TYPE test::upper_str EXTENDING str {
CREATE CONSTRAINT test::uppercase
};
''')
await self.assert_query_result(
r'''
SELECT <test::upper_str>'123_HELLO';
''',
{'123_HELLO'},
)
| 34.452769
| 74
| 0.502695
|
import edgedb
from edb.testbase import server as tb
class TestEdgeQLUserDDL(tb.DDLTestCase):
INTERNAL_TESTMODE = False
async def test_edgeql_userddl_01(self):
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*test::func_01.*'
r'generic types are not supported in '
r'user-defined functions'):
await self.con.execute('''
CREATE FUNCTION test::func_01(
a: anytype
) -> bool
USING EdgeQL $$
SELECT a IS float32
$$;
''')
async def test_edgeql_userddl_02(self):
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*test::func_02.*'
r'generic types are not supported in '
r'user-defined functions'):
await self.con.execute('''
CREATE FUNCTION test::func_02(
a: anyreal
) -> bool
USING EdgeQL $$
SELECT a IS float32
$$;
''')
async def test_edgeql_userddl_03(self):
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*test::func_03.*'
r'generic types are not supported in '
r'user-defined functions'):
await self.con.execute('''
CREATE FUNCTION test::func_03(
a: str
) -> anytype
USING EdgeQL $$
SELECT a
$$;
''')
async def test_edgeql_userddl_04(self):
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*test::func_04.*'
r'generic types are not supported in '
r'user-defined functions'):
await self.con.execute('''
CREATE FUNCTION test::func_04(
a: str
) -> anyscalar
USING EdgeQL $$
SELECT a
$$;
''')
async def test_edgeql_userddl_05(self):
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*test::func_05.*'
r'USING SQL FUNCTION.*not supported in '
r'user-defined functions'):
await self.con.execute('''
CREATE FUNCTION test::func_05(
a: str
) -> str
USING SQL FUNCTION 'lower';
''')
async def test_edgeql_userddl_06(self):
with self.assertRaisesRegex(
edgedb.InvalidFunctionDefinitionError,
r'cannot create.*test::func_06.*'
r'USING SQL.*not supported in '
r'user-defined functions'):
await self.con.execute('''
CREATE FUNCTION test::func_06(
a: str
) -> str
USING SQL $$ SELECT "a" $$;
''')
async def test_edgeql_userddl_07(self):
with self.assertRaisesRegex(
edgedb.UnsupportedFeatureError,
r'user-defined operators are not supported'):
await self.con.execute('''
CREATE INFIX OPERATOR
std::`+` (l: std::str, r: std::str) -> std::str
USING SQL OPERATOR r'||';
''')
async def test_edgeql_userddl_08(self):
with self.assertRaisesRegex(
edgedb.UnsupportedFeatureError,
r'user-defined casts are not supported'):
await self.con.execute('''
CREATE CAST FROM std::int64 TO std::duration {
USING SQL CAST;
ALLOW ASSIGNMENT;
};
''')
async def test_edgeql_userddl_09(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot create.*module std is read-only'):
await self.con.execute('''
CREATE FUNCTION std::func_09(
a: str
) -> str
USING EdgeQL $$
SELECT a
$$;
''')
async def test_edgeql_userddl_10(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot create.*module math is read-only'):
await self.con.execute('''
CREATE FUNCTION math::func_10(
a: str
) -> str
USING EdgeQL $$
SELECT a
$$;
''')
async def test_edgeql_userddl_11(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot create.*module std is read-only'):
await self.con.execute('''
CREATE TYPE std::Foo_11;
''')
async def test_edgeql_userddl_12(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot create.*module math is read-only'):
await self.con.execute('''
CREATE TYPE math::Foo_11;
''')
async def test_edgeql_userddl_13(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot delete.*module std is read-only'):
await self.con.execute('''
DROP TYPE std::Object;
''')
async def test_edgeql_userddl_14(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot delete.*module stdgraphql is read-only'):
await self.con.execute('''
DROP TYPE stdgraphql::Query;
''')
async def test_edgeql_userddl_15(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot alter.*module std is read-only'):
await self.con.execute('''
ALTER TYPE std::Object {
CREATE PROPERTY foo_15 -> std::str;
};
''')
async def test_edgeql_userddl_16(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot alter.*module stdgraphql is read-only'):
await self.con.execute('''
ALTER TYPE stdgraphql::Query {
CREATE PROPERTY foo_15 -> std::str;
};
''')
async def test_edgeql_userddl_17(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot delete.*module std is read-only'):
await self.con.execute('''
DROP MODULE std;
''')
async def test_edgeql_userddl_18(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r'cannot delete.*module math is read-only'):
await self.con.execute('''
DROP MODULE math;
''')
async def test_edgeql_userddl_19(self):
with self.assertRaisesRegex(
edgedb.UnsupportedFeatureError,
r'cannot create.*test::func_19.*'
r'SET OF parameters in user-defined EdgeQL '
r'functions are not supported'):
await self.con.execute('''
CREATE FUNCTION test::func_19(
a: SET OF str
) -> bool
USING EdgeQL $$
SELECT EXISTS a
$$;
''')
async def test_edgeql_userddl_20(self):
await self.con.execute('''
CREATE FUNCTION test::func_20(
a: str
) -> SET OF str
USING EdgeQL $$
SELECT {a, 'a'}
$$;
''')
await self.assert_query_result(
r'''
SELECT test::func_20('q');
''',
{'q', 'a'},
)
await self.assert_query_result(
r'''
SELECT count(test::func_20({'q', 'w'}));
''',
{4},
)
async def test_edgeql_userddl_21(self):
with self.assertRaisesRegex(
edgedb.SchemaDefinitionError,
r"'force_return_cast' is not a valid field"):
await self.con.execute('''
CREATE FUNCTION test::func(
a: str
) -> bool
{
USING EdgeQL $$
SELECT True;
$$;
SET force_return_cast := true;
};
''')
async def test_edgeql_userddl_22(self):
await self.con.execute('''
CREATE ABSTRACT CONSTRAINT test::uppercase {
CREATE ANNOTATION title := "Upper case constraint";
USING (str_upper(__subject__) = __subject__);
SET errmessage := "{__subject__} is not in upper case";
};
CREATE SCALAR TYPE test::upper_str EXTENDING str {
CREATE CONSTRAINT test::uppercase
};
''')
await self.assert_query_result(
r'''
SELECT <test::upper_str>'123_HELLO';
''',
{'123_HELLO'},
)
| true
| true
|
f70cb62b82822ffa1aaa43899d060bc7e23fed1e
| 11,377
|
py
|
Python
|
aoc2021/d18.py
|
jbudynek/advent-of-code
|
16ab71b110e9766b445bce3d3172b11d421b2f75
|
[
"CC0-1.0"
] | null | null | null |
aoc2021/d18.py
|
jbudynek/advent-of-code
|
16ab71b110e9766b445bce3d3172b11d421b2f75
|
[
"CC0-1.0"
] | null | null | null |
aoc2021/d18.py
|
jbudynek/advent-of-code
|
16ab71b110e9766b445bce3d3172b11d421b2f75
|
[
"CC0-1.0"
] | null | null | null |
# coding: utf-8
import copy
from timeit import default_timer as timer
import numpy as np
# Main function
##########
class Pair:
def __init__(self, lhs, rhs, parent):
self.lhs = lhs
self.rhs = rhs
self.parent = parent
def __str__(self):
return "["+str(self.lhs)+","+str(self.rhs)+"]"
class RegularNumber:
def __init__(self, value, parent):
self.value = value
self.parent = parent
def __str__(self):
return str(self.value)
def increase_depth(n, d_map):
if isinstance(n, RegularNumber):
return None
else:
if d_map[n] == 4:
return n
d_map[n.lhs] = d_map[n] + 1
d_map[n.rhs] = d_map[n] + 1
ret = increase_depth(n.lhs, d_map)
if ret != None:
return ret
ret = increase_depth(n.rhs, d_map)
if ret != None:
return ret
def find_deep_pair(root):
d_map = {}
d_map[root] = 0
pair = increase_depth(root, d_map)
return pair
def look_for_ten(n, n_map):
if isinstance(n, RegularNumber) and n.value >= 10:
return n
elif isinstance(n, RegularNumber):
return None
else:
ret = look_for_ten(n.lhs, n_map)
if ret != None:
return ret
ret = look_for_ten(n.rhs, n_map)
if ret != None:
return ret
def find_big_number(root):
n_map = {}
n_map[root] = 0
pair = look_for_ten(root, n_map)
return pair
def reduce(pair, DBG=True):
cont = True
while cont:
while cont:
cont = False
# If any pair is nested inside four pairs, the leftmost such pair explodes.
l_to_r = build_left_to_right(pair)
# find first pair that has depth >=4
to_explode = find_deep_pair(pair)
# explode
if to_explode != None:
explode(to_explode, l_to_r)
cont = True
cont = False
# If any regular number is 10 or greater, the leftmost such regular number splits
# find first reg num >= 10
bigger_than_ten = find_big_number(pair)
# split
if bigger_than_ten != None:
split(bigger_than_ten)
cont = True
def explore(n, l_to_r):
if isinstance(n, RegularNumber):
l_to_r.append(n)
else:
explore(n.lhs, l_to_r)
explore(n.rhs, l_to_r)
def build_left_to_right(root):
l_to_r = []
explore(root, l_to_r)
return l_to_r
def fing_reg_num_to_the_left(regnum, l_to_r):
l = len(l_to_r)
for i in range(l):
if l_to_r[i] == regnum and i > 0:
return l_to_r[i-1]
return None
def fing_reg_num_to_the_right(regnum, l_to_r):
l = len(l_to_r)
for i in range(l):
if l_to_r[i] == regnum and i < l-1:
return l_to_r[i+1]
return None
def explode(pair, l_to_r):
# To explode a pair, the pair's left value is added to the first regular number
# to the left of the exploding pair (if any), and the pair's right value is added
# to the first regular number to the right of the exploding pair (if any). Exploding pairs
# will always consist of two regular numbers. Then, the entire exploding pair is replaced
# with the regular number 0.
regnum_left = fing_reg_num_to_the_left(pair.lhs, l_to_r)
regnum_right = fing_reg_num_to_the_right(pair.rhs, l_to_r)
if regnum_left != None:
regnum_left.value += pair.lhs.value
if regnum_right != None:
regnum_right.value += pair.rhs.value
if pair.parent.lhs == pair:
pair.parent.lhs = RegularNumber(0, pair.parent)
else:
pair.parent.rhs = RegularNumber(0, pair.parent)
def split(regnum):
# To split a regular number, replace it with a pair; the left element of the pair
# should be the regular number divided by two and rounded down, while the right
# element of the pair should be the regular number divided by two and rounded up.
# For example, 10 becomes [5,5], 11 becomes [5,6], 12 becomes [6,6], and so on.
newpair = Pair(None, None, None)
newpair.lhs = RegularNumber(regnum.value//2, newpair)
newpair.rhs = RegularNumber(
(regnum.value//2) + (regnum.value % 2), newpair)
if regnum.parent.lhs == regnum:
regnum.parent.lhs = newpair
newpair.parent = regnum.parent
else:
regnum.parent.rhs = newpair
newpair.parent = regnum.parent
def sf_add(lhsf, rhsf, DBG=True):
ret = Pair(lhsf, rhsf, None)
lhsf.parent = ret
rhsf.parent = ret
reduce(ret, DBG)
return ret
def parse_sf(lll, DBG=True):
idx = 0
l = len(lll)
root = Pair(None, None, None)
idx += 1
cur = root
while idx < l:
c = lll[idx]
if c == '[':
node = Pair(None, None, cur)
if cur.lhs == None:
cur.lhs = node
else:
cur.rhs = node
cur = node
elif c == ',':
cur = cur.parent
elif c == ']':
cur = cur.parent
else:
num = RegularNumber(int(c), cur)
if cur.lhs == None:
cur.lhs = num
else:
cur.rhs = num
cur = num
idx += 1
if DBG:
print(str(root))
return root
def magnitude(n):
if isinstance(n, RegularNumber):
return n.value
else:
return 3*magnitude(n.lhs)+2*magnitude(n.rhs)
def boom_part1(input_val, DBG=True):
sum_sf = parse_sf(input_val[0])
for lll in input_val[1:]:
to_add = parse_sf(lll, DBG)
new_sum_sf = sf_add(sum_sf, to_add, DBG)
if DBG:
print("= ", str(new_sum_sf))
sum_sf = new_sum_sf
return str(sum_sf)
def boom_part2(input_val, DBG=True):
all_fishes = []
sum_sf = parse_sf(input_val[0], DBG)
for lll in input_val:
all_fishes.append(parse_sf(lll, DBG))
l = len(all_fishes)
max_val = 0
for i in range(l):
for j in range(l):
if i != j:
max_val = max(max_val, magnitude(
sf_add(copy.deepcopy(all_fishes[i]), copy.deepcopy(all_fishes[j]))))
return max_val
# Testing and timing
##########
def print_time(t_start, t_end):
s = t_end-t_start
print(int(s*1000), "ms = ", int(s), "s = ", int(s/60), "min")
RED_FG = '\x1b[91m'
GREEN_FG = '\x1b[92m'
YELLOW_FG = '\x1b[93m'
DEFAULT_FG = '\x1b[39m'
def output_test(cc, t_start, t_end, result, expected):
result = str(result)
expected = str(expected)
flag = (result == expected)
sflag = ""
if flag == True:
sflag = GREEN_FG+str(flag)+DEFAULT_FG
else:
sflag = RED_FG+str(flag)+DEFAULT_FG
if(expected == "None"):
print("*** "+str(cc) + " *** -> Result = "+str(result))
else:
print("*** "+str(cc) + " *** -> Result = "+str(result) +
" -> success = " + sflag + " -> expected " + expected)
print_time(t_start, t_end)
return flag
def test_part1(cc=None, expected=None, DBG=False):
t_start = timer()
result = boom_part1(cc, DBG)
t_end = timer()
return output_test(cc, t_start, t_end, result, expected)
def test_part2(cc=None, expected=None, DBG=False):
t_start = timer()
result = boom_part2(cc, DBG)
t_end = timer()
return output_test(cc, t_start, t_end, result, expected)
# Test cases
##########
# tests explode
root = parse_sf('[[[[[9,8],1],2],3],4]')
l_to_r = build_left_to_right(root)
to_explode = find_deep_pair(root)
explode(to_explode, l_to_r)
print(str(root)) # [[[[0,9],2],3],4]
root = parse_sf('[7,[6,[5,[4,[3,2]]]]]')
l_to_r = build_left_to_right(root)
to_explode = find_deep_pair(root)
explode(to_explode, l_to_r)
print(str(root)) # [7,[6,[5,[7,0]]]]
root = parse_sf('[[6,[5,[4,[3,2]]]],1]')
l_to_r = build_left_to_right(root)
to_explode = find_deep_pair(root)
explode(to_explode, l_to_r)
print(str(root)) # [[6,[5,[7,0]]],3]
root = parse_sf('[[3,[2,[1,[7,3]]]],[6,[5,[4,[3,2]]]]]')
l_to_r = build_left_to_right(root)
to_explode = find_deep_pair(root)
explode(to_explode, l_to_r)
print(str(root)) # [[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]
root = parse_sf('[[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]')
l_to_r = build_left_to_right(root)
to_explode = find_deep_pair(root)
explode(to_explode, l_to_r)
print(str(root)) # [[3,[2,[8,0]]],[9,[5,[7,0]]]]
# tests sums
tt1 = """[[[[4,3],4],4],[7,[[8,4],9]]]
[1,1]"""
tt1 = tt1.splitlines()
test_part1(tt1, "[[[[0,7],4],[[7,8],[6,0]]],[8,1]]", True)
tt1 = """[1,1]
[2,2]
[3,3]
[4,4]"""
tt1 = tt1.splitlines()
test_part1(tt1, "[[[[1,1],[2,2]],[3,3]],[4,4]]", True)
tt1 = """[1,1]
[2,2]
[3,3]
[4,4]
[5,5]"""
tt1 = tt1.splitlines()
test_part1(tt1, "[[[[3,0],[5,3]],[4,4]],[5,5]]", True)
tt1 = """[1,1]
[2,2]
[3,3]
[4,4]
[5,5]
[6,6]"""
tt1 = tt1.splitlines()
test_part1(tt1, "[[[[5,0],[7,4]],[5,5]],[6,6]]", True)
tt1 = """[[[0,[4,5]],[0,0]],[[[4,5],[2,6]],[9,5]]]
[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]
[[2,[[0,8],[3,4]]],[[[6,7],1],[7,[1,6]]]]
[[[[2,4],7],[6,[0,5]]],[[[6,8],[2,8]],[[2,1],[4,5]]]]
[7,[5,[[3,8],[1,4]]]]
[[2,[2,2]],[8,[8,1]]]
[2,9]
[1,[[[9,3],9],[[9,0],[0,7]]]]
[[[5,[7,4]],7],1]
[[[[4,2],2],6],[8,7]]"""
tt1 = tt1.splitlines()
test_part1(tt1, "[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]", True)
# Test magnitudes
mag = magnitude(parse_sf("[[1,2],[[3,4],5]]"))
print(mag, mag == 143)
mag = magnitude(parse_sf("[[[[0,7],4],[[7,8],[6,0]]],[8,1]]"))
print(mag, mag == 1384)
mag = magnitude(parse_sf("[[[[1,1],[2,2]],[3,3]],[4,4]]"))
print(mag, mag == 445)
mag = magnitude(parse_sf("[[[[3,0],[5,3]],[4,4]],[5,5]]"))
print(mag, mag == 791)
mag = magnitude(parse_sf("[[[[5,0],[7,4]],[5,5]],[6,6]]"))
print(mag, mag == 1137)
mag = magnitude(
parse_sf("[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]"))
print(mag, mag == 3488)
tt1 = """[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]"""
tt1 = tt1.splitlines()
test_part1(
tt1, "[[[[6,6],[7,6]],[[7,7],[7,0]]],[[[7,7],[7,7]],[[7,8],[9,9]]]]", True)
mag = magnitude(
parse_sf("[[[[6,6],[7,6]],[[7,7],[7,0]]],[[[7,7],[7,7]],[[7,8],[9,9]]]]"))
print(mag, mag == 4140)
# test part 2
tt1 = """[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]"""
tt1 = tt1.splitlines()
test_part2(tt1, 3993, True)
# Real data
##########
INPUT_FILE = "input-d18.txt"
f = open(INPUT_FILE, "r")
contents = f.read()
puzzle_input = contents.splitlines()
f.close()
# part 1
t_start = timer()
ret = boom_part1(puzzle_input, DBG=False)
t_end = timer()
print_time(t_start, t_end)
print(ret)
print(magnitude(parse_sf(ret)))
# part 2
t_start = timer()
ret = boom_part2(puzzle_input, DBG=False)
t_end = timer()
print_time(t_start, t_end)
print(ret)
# PART 1 OK = 4137
# PART 2 OK = 4573
| 24.466667
| 94
| 0.54452
|
import copy
from timeit import default_timer as timer
import numpy as np
class Pair:
def __init__(self, lhs, rhs, parent):
self.lhs = lhs
self.rhs = rhs
self.parent = parent
def __str__(self):
return "["+str(self.lhs)+","+str(self.rhs)+"]"
class RegularNumber:
def __init__(self, value, parent):
self.value = value
self.parent = parent
def __str__(self):
return str(self.value)
def increase_depth(n, d_map):
if isinstance(n, RegularNumber):
return None
else:
if d_map[n] == 4:
return n
d_map[n.lhs] = d_map[n] + 1
d_map[n.rhs] = d_map[n] + 1
ret = increase_depth(n.lhs, d_map)
if ret != None:
return ret
ret = increase_depth(n.rhs, d_map)
if ret != None:
return ret
def find_deep_pair(root):
d_map = {}
d_map[root] = 0
pair = increase_depth(root, d_map)
return pair
def look_for_ten(n, n_map):
if isinstance(n, RegularNumber) and n.value >= 10:
return n
elif isinstance(n, RegularNumber):
return None
else:
ret = look_for_ten(n.lhs, n_map)
if ret != None:
return ret
ret = look_for_ten(n.rhs, n_map)
if ret != None:
return ret
def find_big_number(root):
n_map = {}
n_map[root] = 0
pair = look_for_ten(root, n_map)
return pair
def reduce(pair, DBG=True):
cont = True
while cont:
while cont:
cont = False
l_to_r = build_left_to_right(pair)
to_explode = find_deep_pair(pair)
if to_explode != None:
explode(to_explode, l_to_r)
cont = True
cont = False
bigger_than_ten = find_big_number(pair)
if bigger_than_ten != None:
split(bigger_than_ten)
cont = True
def explore(n, l_to_r):
if isinstance(n, RegularNumber):
l_to_r.append(n)
else:
explore(n.lhs, l_to_r)
explore(n.rhs, l_to_r)
def build_left_to_right(root):
l_to_r = []
explore(root, l_to_r)
return l_to_r
def fing_reg_num_to_the_left(regnum, l_to_r):
l = len(l_to_r)
for i in range(l):
if l_to_r[i] == regnum and i > 0:
return l_to_r[i-1]
return None
def fing_reg_num_to_the_right(regnum, l_to_r):
l = len(l_to_r)
for i in range(l):
if l_to_r[i] == regnum and i < l-1:
return l_to_r[i+1]
return None
def explode(pair, l_to_r):
# to the left of the exploding pair (if any), and the pair's right value is added
regnum_left = fing_reg_num_to_the_left(pair.lhs, l_to_r)
regnum_right = fing_reg_num_to_the_right(pair.rhs, l_to_r)
if regnum_left != None:
regnum_left.value += pair.lhs.value
if regnum_right != None:
regnum_right.value += pair.rhs.value
if pair.parent.lhs == pair:
pair.parent.lhs = RegularNumber(0, pair.parent)
else:
pair.parent.rhs = RegularNumber(0, pair.parent)
def split(regnum):
newpair = Pair(None, None, None)
newpair.lhs = RegularNumber(regnum.value//2, newpair)
newpair.rhs = RegularNumber(
(regnum.value//2) + (regnum.value % 2), newpair)
if regnum.parent.lhs == regnum:
regnum.parent.lhs = newpair
newpair.parent = regnum.parent
else:
regnum.parent.rhs = newpair
newpair.parent = regnum.parent
def sf_add(lhsf, rhsf, DBG=True):
ret = Pair(lhsf, rhsf, None)
lhsf.parent = ret
rhsf.parent = ret
reduce(ret, DBG)
return ret
def parse_sf(lll, DBG=True):
idx = 0
l = len(lll)
root = Pair(None, None, None)
idx += 1
cur = root
while idx < l:
c = lll[idx]
if c == '[':
node = Pair(None, None, cur)
if cur.lhs == None:
cur.lhs = node
else:
cur.rhs = node
cur = node
elif c == ',':
cur = cur.parent
elif c == ']':
cur = cur.parent
else:
num = RegularNumber(int(c), cur)
if cur.lhs == None:
cur.lhs = num
else:
cur.rhs = num
cur = num
idx += 1
if DBG:
print(str(root))
return root
def magnitude(n):
if isinstance(n, RegularNumber):
return n.value
else:
return 3*magnitude(n.lhs)+2*magnitude(n.rhs)
def boom_part1(input_val, DBG=True):
sum_sf = parse_sf(input_val[0])
for lll in input_val[1:]:
to_add = parse_sf(lll, DBG)
new_sum_sf = sf_add(sum_sf, to_add, DBG)
if DBG:
print("= ", str(new_sum_sf))
sum_sf = new_sum_sf
return str(sum_sf)
def boom_part2(input_val, DBG=True):
all_fishes = []
sum_sf = parse_sf(input_val[0], DBG)
for lll in input_val:
all_fishes.append(parse_sf(lll, DBG))
l = len(all_fishes)
max_val = 0
for i in range(l):
for j in range(l):
if i != j:
max_val = max(max_val, magnitude(
sf_add(copy.deepcopy(all_fishes[i]), copy.deepcopy(all_fishes[j]))))
return max_val
def print_time(t_start, t_end):
s = t_end-t_start
print(int(s*1000), "ms = ", int(s), "s = ", int(s/60), "min")
RED_FG = '\x1b[91m'
GREEN_FG = '\x1b[92m'
YELLOW_FG = '\x1b[93m'
DEFAULT_FG = '\x1b[39m'
def output_test(cc, t_start, t_end, result, expected):
result = str(result)
expected = str(expected)
flag = (result == expected)
sflag = ""
if flag == True:
sflag = GREEN_FG+str(flag)+DEFAULT_FG
else:
sflag = RED_FG+str(flag)+DEFAULT_FG
if(expected == "None"):
print("*** "+str(cc) + " *** -> Result = "+str(result))
else:
print("*** "+str(cc) + " *** -> Result = "+str(result) +
" -> success = " + sflag + " -> expected " + expected)
print_time(t_start, t_end)
return flag
def test_part1(cc=None, expected=None, DBG=False):
t_start = timer()
result = boom_part1(cc, DBG)
t_end = timer()
return output_test(cc, t_start, t_end, result, expected)
def test_part2(cc=None, expected=None, DBG=False):
t_start = timer()
result = boom_part2(cc, DBG)
t_end = timer()
return output_test(cc, t_start, t_end, result, expected)
root = parse_sf('[[[[[9,8],1],2],3],4]')
l_to_r = build_left_to_right(root)
to_explode = find_deep_pair(root)
explode(to_explode, l_to_r)
print(str(root))
root = parse_sf('[7,[6,[5,[4,[3,2]]]]]')
l_to_r = build_left_to_right(root)
to_explode = find_deep_pair(root)
explode(to_explode, l_to_r)
print(str(root))
root = parse_sf('[[6,[5,[4,[3,2]]]],1]')
l_to_r = build_left_to_right(root)
to_explode = find_deep_pair(root)
explode(to_explode, l_to_r)
print(str(root))
root = parse_sf('[[3,[2,[1,[7,3]]]],[6,[5,[4,[3,2]]]]]')
l_to_r = build_left_to_right(root)
to_explode = find_deep_pair(root)
explode(to_explode, l_to_r)
print(str(root))
root = parse_sf('[[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]')
l_to_r = build_left_to_right(root)
to_explode = find_deep_pair(root)
explode(to_explode, l_to_r)
print(str(root))
tt1 = """[[[[4,3],4],4],[7,[[8,4],9]]]
[1,1]"""
tt1 = tt1.splitlines()
test_part1(tt1, "[[[[0,7],4],[[7,8],[6,0]]],[8,1]]", True)
tt1 = """[1,1]
[2,2]
[3,3]
[4,4]"""
tt1 = tt1.splitlines()
test_part1(tt1, "[[[[1,1],[2,2]],[3,3]],[4,4]]", True)
tt1 = """[1,1]
[2,2]
[3,3]
[4,4]
[5,5]"""
tt1 = tt1.splitlines()
test_part1(tt1, "[[[[3,0],[5,3]],[4,4]],[5,5]]", True)
tt1 = """[1,1]
[2,2]
[3,3]
[4,4]
[5,5]
[6,6]"""
tt1 = tt1.splitlines()
test_part1(tt1, "[[[[5,0],[7,4]],[5,5]],[6,6]]", True)
tt1 = """[[[0,[4,5]],[0,0]],[[[4,5],[2,6]],[9,5]]]
[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]
[[2,[[0,8],[3,4]]],[[[6,7],1],[7,[1,6]]]]
[[[[2,4],7],[6,[0,5]]],[[[6,8],[2,8]],[[2,1],[4,5]]]]
[7,[5,[[3,8],[1,4]]]]
[[2,[2,2]],[8,[8,1]]]
[2,9]
[1,[[[9,3],9],[[9,0],[0,7]]]]
[[[5,[7,4]],7],1]
[[[[4,2],2],6],[8,7]]"""
tt1 = tt1.splitlines()
test_part1(tt1, "[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]", True)
mag = magnitude(parse_sf("[[1,2],[[3,4],5]]"))
print(mag, mag == 143)
mag = magnitude(parse_sf("[[[[0,7],4],[[7,8],[6,0]]],[8,1]]"))
print(mag, mag == 1384)
mag = magnitude(parse_sf("[[[[1,1],[2,2]],[3,3]],[4,4]]"))
print(mag, mag == 445)
mag = magnitude(parse_sf("[[[[3,0],[5,3]],[4,4]],[5,5]]"))
print(mag, mag == 791)
mag = magnitude(parse_sf("[[[[5,0],[7,4]],[5,5]],[6,6]]"))
print(mag, mag == 1137)
mag = magnitude(
parse_sf("[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]"))
print(mag, mag == 3488)
tt1 = """[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]"""
tt1 = tt1.splitlines()
test_part1(
tt1, "[[[[6,6],[7,6]],[[7,7],[7,0]]],[[[7,7],[7,7]],[[7,8],[9,9]]]]", True)
mag = magnitude(
parse_sf("[[[[6,6],[7,6]],[[7,7],[7,0]]],[[[7,7],[7,7]],[[7,8],[9,9]]]]"))
print(mag, mag == 4140)
tt1 = """[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]"""
tt1 = tt1.splitlines()
test_part2(tt1, 3993, True)
INPUT_FILE = "input-d18.txt"
f = open(INPUT_FILE, "r")
contents = f.read()
puzzle_input = contents.splitlines()
f.close()
t_start = timer()
ret = boom_part1(puzzle_input, DBG=False)
t_end = timer()
print_time(t_start, t_end)
print(ret)
print(magnitude(parse_sf(ret)))
t_start = timer()
ret = boom_part2(puzzle_input, DBG=False)
t_end = timer()
print_time(t_start, t_end)
print(ret)
| true
| true
|
f70cb65970c65875e3f0aa0fc55bcef6c0fa0575
| 3,752
|
py
|
Python
|
markdownextradata/plugin.py
|
timvink/mkdocs-markdownextradata-plugin
|
35e91288a97aee8b4302c4e06b140c21339c4ca0
|
[
"MIT"
] | null | null | null |
markdownextradata/plugin.py
|
timvink/mkdocs-markdownextradata-plugin
|
35e91288a97aee8b4302c4e06b140c21339c4ca0
|
[
"MIT"
] | null | null | null |
markdownextradata/plugin.py
|
timvink/mkdocs-markdownextradata-plugin
|
35e91288a97aee8b4302c4e06b140c21339c4ca0
|
[
"MIT"
] | null | null | null |
import os
import sys
import json
import yaml
import mkdocs
import logging
from mkdocs.plugins import BasePlugin
from mkdocs.utils import warning_filter
from jinja2 import Template
from pathlib import Path
from itertools import chain
log = logging.getLogger(__name__)
log.addFilter(warning_filter)
CONFIG_KEYS = ["site_name", "site_author", "site_url", "repo_url", "repo_name"]
if sys.version_info[0] >= 3:
str_type = str
else:
str_type = mkdocs.utils.string_types
class MarkdownExtraDataPlugin(BasePlugin):
"""
Inject certain config variables into the markdown
"""
config_scheme = (
("data", mkdocs.config.config_options.Type(str_type, default=None)),
)
def __add_data__(self, config, namespace, data):
# creates the namespace and adds the data there
namespace = ["extra"] + namespace.split(os.sep)
holder = config
while len(namespace) > 1:
if not namespace[0] in holder:
holder[namespace[0]] = {}
holder = holder[namespace[0]]
del namespace[0]
holder[namespace[0]] = data
def on_pre_build(self, config):
# Loads all data from the supplied data directories
# or, otherwise a _data directory next to mkdocs.yml and/or inside the docs_dir.
# Does nothing if the dir does not exist.
# assume an empty list if not defined
data_source_folders = self.config.get("data")
# cast as a list if is defined but is a string
if isinstance(data_source_folders, str):
data_source_folders = data_source_folders.split(',')
# if we have not value, then proceed to look in default folders
# and assume a _data folder, add to list of folders to check
if not data_source_folders:
for datadir in [
os.path.dirname(config["config_file_path"]),
config["docs_dir"],
]:
ds_folder = os.path.join(datadir, "_data")
if os.path.exists(ds_folder):
data_source_folders.append(ds_folder)
if not data_source_folders:
return
# iterate of a list of folders and look for data files
for ds_folder in data_source_folders:
if os.path.exists(ds_folder):
path = Path(ds_folder)
for filename in chain(
path.glob("**/*.yaml"),
path.glob("**/*.yml"),
path.glob("**/*.json"),
):
namespace = os.path.splitext(os.path.relpath(filename, ds_folder))[0]
# add data into dict based on its path as a namespace
self.__add_data__(
config,
namespace,
(
yaml.load(filename.read_bytes(), Loader=yaml.FullLoader)
if filename.suffix in [".yml", ".yaml"]
else json.loads(filename.read_bytes())
),
)
def on_page_read_source(self, page, config, **kwargs):
context = {key: config.get(key) for key in CONFIG_KEYS if key in config}
context.update(config.get("extra", {}))
try:
with open(page.file.abs_src_path, 'r', encoding='utf-8-sig', errors='strict') as f:
md_template = Template(f.read())
return md_template.render(**config.get("extra"))
except OSError:
log.error('File not found: {}'.format(self.file.src_path))
raise
except ValueError:
log.error('Encoding error reading file: {}'.format(self.file.src_path))
raise
| 36.076923
| 95
| 0.577026
|
import os
import sys
import json
import yaml
import mkdocs
import logging
from mkdocs.plugins import BasePlugin
from mkdocs.utils import warning_filter
from jinja2 import Template
from pathlib import Path
from itertools import chain
log = logging.getLogger(__name__)
log.addFilter(warning_filter)
CONFIG_KEYS = ["site_name", "site_author", "site_url", "repo_url", "repo_name"]
if sys.version_info[0] >= 3:
str_type = str
else:
str_type = mkdocs.utils.string_types
class MarkdownExtraDataPlugin(BasePlugin):
config_scheme = (
("data", mkdocs.config.config_options.Type(str_type, default=None)),
)
def __add_data__(self, config, namespace, data):
namespace = ["extra"] + namespace.split(os.sep)
holder = config
while len(namespace) > 1:
if not namespace[0] in holder:
holder[namespace[0]] = {}
holder = holder[namespace[0]]
del namespace[0]
holder[namespace[0]] = data
def on_pre_build(self, config):
data_source_folders = self.config.get("data")
if isinstance(data_source_folders, str):
data_source_folders = data_source_folders.split(',')
if not data_source_folders:
for datadir in [
os.path.dirname(config["config_file_path"]),
config["docs_dir"],
]:
ds_folder = os.path.join(datadir, "_data")
if os.path.exists(ds_folder):
data_source_folders.append(ds_folder)
if not data_source_folders:
return
for ds_folder in data_source_folders:
if os.path.exists(ds_folder):
path = Path(ds_folder)
for filename in chain(
path.glob("**/*.yaml"),
path.glob("**/*.yml"),
path.glob("**/*.json"),
):
namespace = os.path.splitext(os.path.relpath(filename, ds_folder))[0]
self.__add_data__(
config,
namespace,
(
yaml.load(filename.read_bytes(), Loader=yaml.FullLoader)
if filename.suffix in [".yml", ".yaml"]
else json.loads(filename.read_bytes())
),
)
def on_page_read_source(self, page, config, **kwargs):
context = {key: config.get(key) for key in CONFIG_KEYS if key in config}
context.update(config.get("extra", {}))
try:
with open(page.file.abs_src_path, 'r', encoding='utf-8-sig', errors='strict') as f:
md_template = Template(f.read())
return md_template.render(**config.get("extra"))
except OSError:
log.error('File not found: {}'.format(self.file.src_path))
raise
except ValueError:
log.error('Encoding error reading file: {}'.format(self.file.src_path))
raise
| true
| true
|
f70cb6789bae3b78fdb0e73196234cc5eaacd775
| 543
|
py
|
Python
|
graphql/flask-graphql-basic_example2/app.py
|
CALlanoR/virtual_environments
|
90214851d6c3760e1a4afb48017bb7f91593e29e
|
[
"Apache-2.0"
] | null | null | null |
graphql/flask-graphql-basic_example2/app.py
|
CALlanoR/virtual_environments
|
90214851d6c3760e1a4afb48017bb7f91593e29e
|
[
"Apache-2.0"
] | 1
|
2022-03-02T14:54:47.000Z
|
2022-03-02T14:54:47.000Z
|
graphql/flask-graphql-basic_example2/app.py
|
CALlanoR/virtual_environments
|
90214851d6c3760e1a4afb48017bb7f91593e29e
|
[
"Apache-2.0"
] | 1
|
2017-03-16T14:58:03.000Z
|
2017-03-16T14:58:03.000Z
|
from flask import Flask
from flask_graphql import GraphQLView
from models import db_session
from schema import schema, Department
app = Flask(__name__)
app.debug = True
@app.route('/')
def index():
return '<p> Hello World!</p>'
app.add_url_rule(
'/graphql',
view_func=GraphQLView.as_view(
'graphql',
schema=schema,
graphiql=True # for having the GraphiQL interface
)
)
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
if __name__ == '__main__':
app.run()
| 18.724138
| 57
| 0.694291
|
from flask import Flask
from flask_graphql import GraphQLView
from models import db_session
from schema import schema, Department
app = Flask(__name__)
app.debug = True
@app.route('/')
def index():
return '<p> Hello World!</p>'
app.add_url_rule(
'/graphql',
view_func=GraphQLView.as_view(
'graphql',
schema=schema,
graphiql=True )
)
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
if __name__ == '__main__':
app.run()
| true
| true
|
f70cb75e06707f355bad7ef96e6deb9bd473dc24
| 3,291
|
py
|
Python
|
MICRO_CPU_profiling/plot_SIFT1000M/plot_profiling_experiment_4_nprobe.py
|
WenqiJiang/faiss-cpu-profiling
|
f2c7b3051f8860e8918c713ef4baddd563cc515c
|
[
"MIT"
] | null | null | null |
MICRO_CPU_profiling/plot_SIFT1000M/plot_profiling_experiment_4_nprobe.py
|
WenqiJiang/faiss-cpu-profiling
|
f2c7b3051f8860e8918c713ef4baddd563cc515c
|
[
"MIT"
] | null | null | null |
MICRO_CPU_profiling/plot_SIFT1000M/plot_profiling_experiment_4_nprobe.py
|
WenqiJiang/faiss-cpu-profiling
|
f2c7b3051f8860e8918c713ef4baddd563cc515c
|
[
"MIT"
] | null | null | null |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
from analyze_perf import group_perf_by_events, filter_events_after_timestamp, \
classify_events_by_stages, get_percentage
from profiling_stages import draw_profiling_plot
x_labels = ['OPQ16,IVF262144\nnprobe=1', \
'OPQ16,IVF262144\nnprobe=2', \
'OPQ16,IVF262144\nnprobe=4', \
'OPQ16,IVF262144\nnprobe=8', \
'OPQ16,IVF262144\nnprobe=16', \
'OPQ16,IVF262144\nnprobe=32', \
'OPQ16,IVF262144\nnprobe=64', \
'OPQ16,IVF262144\nnprobe=128']
file_prefixes = [ \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_1_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_2_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_4_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_8_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_16_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_32_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_64_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_128_qbs_10000']
assert len(x_labels) == len(file_prefixes)
path_prefixes = []
for p in file_prefixes:
path_prefixes.append(os.path.join('../result_experiment_4_nprobe', p))
# time range of the search function, according to the search log, e.g.,
# time_bias_start = 135.656
# time_bias_end = 200.659
time_ranges = [ # pair of (time_bias_start, time_bias_end)
# ==== nprobe=1 ====
(28.157, 36.409),
# ==== nprobe=2 ====
(28.017, 35.706),
# ==== nprobe=4 ====
(27.268, 35.276),
# ==== nprobe=8 ====
(28.237, 37.730),
# ==== nprobe=16 ====
(27.252, 38.686),
# ==== nprobe=32 ====
(27.234, 43.001),
# ==== nprobe=64 ====
(27.344, 52.246),
# ==== nprobe=128 ====
(27.443, 69.042)]
# Stage 1: OPQ
# Stage 2: vector quantizer
# Stage 3: select centroids
# Stage 4: construct distance LUT
# Stage 5: PQ code scan
# Stage 6: collect topK results
profile_perc_array = []
# example_profile_array = [
# # 100M, 1
# [8.606278140845747, 0.11607633274229297, 3.3378707089447355, 78.57136070072978, 9.368414116737446], \
# # 100M, 10
# [32.7008185883583, 0.5164703077320218, 4.674772663594282, 33.70847203114799, 28.399466409167403]
# ]
for i in range(len(path_prefixes)):
print("Processing {}".format(path_prefixes[i]))
all_events = group_perf_by_events(path_prefixes[i])
time_bias_start, time_bias_end = time_ranges[i][0], time_ranges[i][1]
filtered_events = filter_events_after_timestamp(all_events, time_bias_start, time_bias_end)
t_1_4, t_5, t_6, t_other = classify_events_by_stages(filtered_events, track_non_faiss_func=False, remove_unrecognized_faiss_function=False)
p_1_4, p_5, p_6, p_other = get_percentage(t_1_4, t_5, t_6, t_other)
profile_perc_array.append([p_1_4, p_5, p_6, p_other])
y_stage_1_4 = [r[0] for r in profile_perc_array]
y_stage_5 = [r[1] for r in profile_perc_array]
y_stage_6 = [r[2] for r in profile_perc_array]
y_other = [r[3] for r in profile_perc_array]
draw_profiling_plot(x_labels, y_stage_1_4, y_stage_5, y_stage_6, y_other, 'cpu_profile_experiment_4_nprobe_SIFT1000M', x_tick_rotation=45)
| 36.566667
| 143
| 0.718323
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
from analyze_perf import group_perf_by_events, filter_events_after_timestamp, \
classify_events_by_stages, get_percentage
from profiling_stages import draw_profiling_plot
x_labels = ['OPQ16,IVF262144\nnprobe=1', \
'OPQ16,IVF262144\nnprobe=2', \
'OPQ16,IVF262144\nnprobe=4', \
'OPQ16,IVF262144\nnprobe=8', \
'OPQ16,IVF262144\nnprobe=16', \
'OPQ16,IVF262144\nnprobe=32', \
'OPQ16,IVF262144\nnprobe=64', \
'OPQ16,IVF262144\nnprobe=128']
file_prefixes = [ \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_1_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_2_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_4_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_8_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_16_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_32_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_64_qbs_10000', \
'perf.out_SIFT1000M_OPQ16,IVF262144,PQ16_K_100_nprobe_128_qbs_10000']
assert len(x_labels) == len(file_prefixes)
path_prefixes = []
for p in file_prefixes:
path_prefixes.append(os.path.join('../result_experiment_4_nprobe', p))
time_ranges = [ (28.157, 36.409),
(28.017, 35.706),
(27.268, 35.276),
(28.237, 37.730),
(27.252, 38.686),
(27.234, 43.001),
(27.344, 52.246),
(27.443, 69.042)]
profile_perc_array = []
for i in range(len(path_prefixes)):
print("Processing {}".format(path_prefixes[i]))
all_events = group_perf_by_events(path_prefixes[i])
time_bias_start, time_bias_end = time_ranges[i][0], time_ranges[i][1]
filtered_events = filter_events_after_timestamp(all_events, time_bias_start, time_bias_end)
t_1_4, t_5, t_6, t_other = classify_events_by_stages(filtered_events, track_non_faiss_func=False, remove_unrecognized_faiss_function=False)
p_1_4, p_5, p_6, p_other = get_percentage(t_1_4, t_5, t_6, t_other)
profile_perc_array.append([p_1_4, p_5, p_6, p_other])
y_stage_1_4 = [r[0] for r in profile_perc_array]
y_stage_5 = [r[1] for r in profile_perc_array]
y_stage_6 = [r[2] for r in profile_perc_array]
y_other = [r[3] for r in profile_perc_array]
draw_profiling_plot(x_labels, y_stage_1_4, y_stage_5, y_stage_6, y_other, 'cpu_profile_experiment_4_nprobe_SIFT1000M', x_tick_rotation=45)
| true
| true
|
f70cb7897a6a577a01800875e9d620824adf7fe5
| 784
|
py
|
Python
|
p038.py
|
piohhmy/euler
|
f1b548a28a503fb8a0878fda75c32e1dcfb33243
|
[
"MIT"
] | null | null | null |
p038.py
|
piohhmy/euler
|
f1b548a28a503fb8a0878fda75c32e1dcfb33243
|
[
"MIT"
] | null | null | null |
p038.py
|
piohhmy/euler
|
f1b548a28a503fb8a0878fda75c32e1dcfb33243
|
[
"MIT"
] | null | null | null |
def concat_multiples(num, multiples):
return int("".join([str(num*multiple) for multiple in range(1,multiples+1)]))
def is_pandigital(num):
return sorted([int(digit) for digit in str(num)]) == list(range(1,10))
def solve_p038():
# retrieve only 9 digit concatinations of multiples where n = (1,2,..n)
n6 = [concat_multiples(num, 6) for num in [3]]
n5 = [concat_multiples(num, 5) for num in range(5,10)]
n4 = [concat_multiples(num, 4) for num in range(25,33)]
n3 = [concat_multiples(num, 3) for num in range(100,333)]
n2 = [concat_multiples(num, 2) for num in range(5000,9999)]
all_concats = set(n2 + n3 + n4 + n5 + n6)
return max([num for num in all_concats if is_pandigital(num)])
if __name__ == '__main__':
print((solve_p038()))
| 37.333333
| 81
| 0.659439
|
def concat_multiples(num, multiples):
return int("".join([str(num*multiple) for multiple in range(1,multiples+1)]))
def is_pandigital(num):
return sorted([int(digit) for digit in str(num)]) == list(range(1,10))
def solve_p038():
n6 = [concat_multiples(num, 6) for num in [3]]
n5 = [concat_multiples(num, 5) for num in range(5,10)]
n4 = [concat_multiples(num, 4) for num in range(25,33)]
n3 = [concat_multiples(num, 3) for num in range(100,333)]
n2 = [concat_multiples(num, 2) for num in range(5000,9999)]
all_concats = set(n2 + n3 + n4 + n5 + n6)
return max([num for num in all_concats if is_pandigital(num)])
if __name__ == '__main__':
print((solve_p038()))
| true
| true
|
f70cb81d6cb4681978d9f6a592d2d4785bab8056
| 4,159
|
py
|
Python
|
utils/UserUtils.py
|
vishu221b/bookme-flask-REST-API-Collection
|
9ee923e13d786af9b11421370edac718743855af
|
[
"MIT"
] | null | null | null |
utils/UserUtils.py
|
vishu221b/bookme-flask-REST-API-Collection
|
9ee923e13d786af9b11421370edac718743855af
|
[
"MIT"
] | null | null | null |
utils/UserUtils.py
|
vishu221b/bookme-flask-REST-API-Collection
|
9ee923e13d786af9b11421370edac718743855af
|
[
"MIT"
] | null | null | null |
import models
import json
import re
import constants.userConstants as UserConstants
from enums import UserEnums
from databaseService.bookDatabaseService import BookDatabaseService
def validate_and_convert_new_user_request_object(aa: dict, bb: models.User):
for field in UserConstants.USER_MANDATORY_FIELDS:
if field not in aa.keys():
return f"Required field {field} is missing"
return bb.from_json(json.dumps(aa)) # returns a dictionary
def convert_update_request_for_persistence(user_request, user_object):
try:
user_object.last_name = user_request.get(
'last_name') if user_request.get('last_name') else user_object.last_name
user_object.first_name = user_request.get(
'first_name') if user_request.get('first_name') else user_object.first_name
user_object.date_of_birth = user_request.get(
'date_of_birth') if user_request.get('date_of_birth') else user_object.date_of_birth
user_object.phone_number = user_request.get(
'phone_number') if user_request.get('phone_number') else user_object.phone_number
user_object.email = user_request.get(
'email') if user_request.get('email') else user_object.email
user_object.username = user_request.get(
'username') if user_request.get('username') else user_object.username
user_object.alt_username = user_request.get(
'email').rsplit('@')[0] if user_request.get('email') else user_object.alt_username
return user_object
except Exception as e:
return e
def convert_email_update_request_for_persistence(user_request, user_object):
user_object.email = user_request.get('newEmail')
return user_object
def convert_user_dto_to_public_response_dto(user):
try:
response_dto = dict()
response_dto.setdefault('id', str(user.get('id')))
response_dto.setdefault('first_name', user.get('first_name'))
response_dto.setdefault('last_name', user.get('last_name') if user.get('last_name') else "")
response_dto.setdefault('date_of_birth', str(user.get('date_of_birth')))
response_dto.setdefault('email', user.get('email'))
response_dto.setdefault('username', user.get('username'))
return response_dto
except Exception as e:
print("DEBUG: Exception occurred in _USER_DTO_PUBLIC - {}".format(e))
return "There was some error."
def convert_request_to_user_update_dto(request_dto, user_identity):
try:
response_user = clone_dto(user_identity)
for field in UserConstants.USER_FIELDS_FOR_DETAILS_UPDATE:
if field is not None:
response_user[field] = request_dto[field]
return response_user
except Exception as e:
return "Error: {}".format(e)
def clone_dto(user):
_cloned_response = {}
for field in user.keys():
_cloned_response.setdefault(field, user.get(field))
return _cloned_response
def is_length_valid_for_id_in_request(mongo_id) -> bool:
if len(mongo_id) > 12*2 or len(mongo_id) < 12*2:
return True
return False
def validate_min_length(value, limit):
if len(value) < int(limit):
return False
return True
def verify_username_length(curr, new):
if len(curr) < UserEnums.MIN_USER_NAME_LENGTH.value or len(new) < UserEnums.MIN_USER_NAME_LENGTH.value:
return [{'error': 'Invalid username length. Minimum username length should be 4.'}, 404]
return False
def verify_email_length(curr, new):
if len(curr) < UserEnums.MIN_EMAIL_LENGTH.value or len(new) < UserEnums.MIN_EMAIL_LENGTH.value:
return [
{
'error':
'Invalid email length. Minimum email length should be 6. Please check your email and try again.'
}, 404
]
return False
def get_user_favourite_books(user):
book_service = BookDatabaseService()
book_bucket = list()
for book in user.fav_books:
book_bucket.append(
(
book_service.find_active_book_by_id(book.id)
)
)
return book_bucket
| 36.482456
| 116
| 0.689108
|
import models
import json
import re
import constants.userConstants as UserConstants
from enums import UserEnums
from databaseService.bookDatabaseService import BookDatabaseService
def validate_and_convert_new_user_request_object(aa: dict, bb: models.User):
for field in UserConstants.USER_MANDATORY_FIELDS:
if field not in aa.keys():
return f"Required field {field} is missing"
return bb.from_json(json.dumps(aa))
def convert_update_request_for_persistence(user_request, user_object):
try:
user_object.last_name = user_request.get(
'last_name') if user_request.get('last_name') else user_object.last_name
user_object.first_name = user_request.get(
'first_name') if user_request.get('first_name') else user_object.first_name
user_object.date_of_birth = user_request.get(
'date_of_birth') if user_request.get('date_of_birth') else user_object.date_of_birth
user_object.phone_number = user_request.get(
'phone_number') if user_request.get('phone_number') else user_object.phone_number
user_object.email = user_request.get(
'email') if user_request.get('email') else user_object.email
user_object.username = user_request.get(
'username') if user_request.get('username') else user_object.username
user_object.alt_username = user_request.get(
'email').rsplit('@')[0] if user_request.get('email') else user_object.alt_username
return user_object
except Exception as e:
return e
def convert_email_update_request_for_persistence(user_request, user_object):
user_object.email = user_request.get('newEmail')
return user_object
def convert_user_dto_to_public_response_dto(user):
try:
response_dto = dict()
response_dto.setdefault('id', str(user.get('id')))
response_dto.setdefault('first_name', user.get('first_name'))
response_dto.setdefault('last_name', user.get('last_name') if user.get('last_name') else "")
response_dto.setdefault('date_of_birth', str(user.get('date_of_birth')))
response_dto.setdefault('email', user.get('email'))
response_dto.setdefault('username', user.get('username'))
return response_dto
except Exception as e:
print("DEBUG: Exception occurred in _USER_DTO_PUBLIC - {}".format(e))
return "There was some error."
def convert_request_to_user_update_dto(request_dto, user_identity):
try:
response_user = clone_dto(user_identity)
for field in UserConstants.USER_FIELDS_FOR_DETAILS_UPDATE:
if field is not None:
response_user[field] = request_dto[field]
return response_user
except Exception as e:
return "Error: {}".format(e)
def clone_dto(user):
_cloned_response = {}
for field in user.keys():
_cloned_response.setdefault(field, user.get(field))
return _cloned_response
def is_length_valid_for_id_in_request(mongo_id) -> bool:
if len(mongo_id) > 12*2 or len(mongo_id) < 12*2:
return True
return False
def validate_min_length(value, limit):
if len(value) < int(limit):
return False
return True
def verify_username_length(curr, new):
if len(curr) < UserEnums.MIN_USER_NAME_LENGTH.value or len(new) < UserEnums.MIN_USER_NAME_LENGTH.value:
return [{'error': 'Invalid username length. Minimum username length should be 4.'}, 404]
return False
def verify_email_length(curr, new):
if len(curr) < UserEnums.MIN_EMAIL_LENGTH.value or len(new) < UserEnums.MIN_EMAIL_LENGTH.value:
return [
{
'error':
'Invalid email length. Minimum email length should be 6. Please check your email and try again.'
}, 404
]
return False
def get_user_favourite_books(user):
book_service = BookDatabaseService()
book_bucket = list()
for book in user.fav_books:
book_bucket.append(
(
book_service.find_active_book_by_id(book.id)
)
)
return book_bucket
| true
| true
|
f70cb8387afcc1faddc7425cb48245392fbb3a47
| 779
|
py
|
Python
|
django/conf/locale/fa/formats.py
|
benjaoming/django
|
6dbe979b4d9396e1b307c7d27388c97c13beb21c
|
[
"BSD-3-Clause"
] | 2
|
2015-01-21T15:45:07.000Z
|
2015-02-21T02:38:13.000Z
|
django/conf/locale/fa/formats.py
|
HenriqueLR/django
|
d1ca70110f49f0be90206c8da516ac16aebc8c75
|
[
"BSD-3-Clause"
] | null | null | null |
django/conf/locale/fa/formats.py
|
HenriqueLR/django
|
d1ca70110f49f0be90206c8da516ac16aebc8c75
|
[
"BSD-3-Clause"
] | 1
|
2021-03-06T14:22:00.000Z
|
2021-03-06T14:22:00.000Z
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j F Y، ساعت G:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'Y/n/j'
SHORT_DATETIME_FORMAT = 'Y/n/j، G:i'
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
# NUMBER_GROUPING =
| 31.16
| 77
| 0.750963
|
from __future__ import unicode_literals
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j F Y، ساعت G:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'Y/n/j'
SHORT_DATETIME_FORMAT = 'Y/n/j، G:i'
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
| true
| true
|
f70cb844699855bb0927d27e9ebd6c814ab45767
| 584
|
py
|
Python
|
HW_Controls/test_servo_sanity_check.py
|
NerdboyQ/Raspberry_Pi_Learning
|
ad6d9e658d456bbce9a8d09f6bc983f66d8ecbf3
|
[
"MIT"
] | null | null | null |
HW_Controls/test_servo_sanity_check.py
|
NerdboyQ/Raspberry_Pi_Learning
|
ad6d9e658d456bbce9a8d09f6bc983f66d8ecbf3
|
[
"MIT"
] | null | null | null |
HW_Controls/test_servo_sanity_check.py
|
NerdboyQ/Raspberry_Pi_Learning
|
ad6d9e658d456bbce9a8d09f6bc983f66d8ecbf3
|
[
"MIT"
] | 1
|
2021-07-18T18:16:34.000Z
|
2021-07-18T18:16:34.000Z
|
from car_ctrl import servo
import time
#max angle turns right
#0 turns left
def test_servo_rotation():
s = servo()
print(vars(s))
print("max_angle: " +str(s.max_angle))
print("slope: " +str(s.slope))
for i in range(0,3):
s.steer(s.max_angle)
print("turning left")
time.sleep(0.5)
for i in range(0,3):
s.steer(0)
time.sleep(0.5)
print("turning right")
for i in range(0,3):
s.steer(s.max_angle)
time.sleep(0.5)
print("Return to center")
s.kill_servo()
test_servo_rotation()
| 20.857143
| 42
| 0.578767
|
from car_ctrl import servo
import time
def test_servo_rotation():
s = servo()
print(vars(s))
print("max_angle: " +str(s.max_angle))
print("slope: " +str(s.slope))
for i in range(0,3):
s.steer(s.max_angle)
print("turning left")
time.sleep(0.5)
for i in range(0,3):
s.steer(0)
time.sleep(0.5)
print("turning right")
for i in range(0,3):
s.steer(s.max_angle)
time.sleep(0.5)
print("Return to center")
s.kill_servo()
test_servo_rotation()
| true
| true
|
f70cb881eeadfc9235bf32aed10635460d74828d
| 644
|
py
|
Python
|
config.py
|
millywayne/PITCH-NOW
|
67f5a5ae7e4d99c5ed2eb87a0335f89372c0813b
|
[
"MIT"
] | null | null | null |
config.py
|
millywayne/PITCH-NOW
|
67f5a5ae7e4d99c5ed2eb87a0335f89372c0813b
|
[
"MIT"
] | null | null | null |
config.py
|
millywayne/PITCH-NOW
|
67f5a5ae7e4d99c5ed2eb87a0335f89372c0813b
|
[
"MIT"
] | null | null | null |
import os
class Config:
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
SQLALCHEMY_TRACK_MODIFICATIONS = True
SECRET_KEY = os.environ.get('SECRET_KEY')
UPLOADED_PHOTOS_DEST = 'app/static/photos'
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 450
MAIL_USE_TLS = False
MAIL_USE_SSL = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class DevConfig(Config):
DEBUG = True
config_options = {
'development': DevConfig,
'production': ProdConfig
}
| 23
| 60
| 0.717391
|
import os
class Config:
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
SQLALCHEMY_TRACK_MODIFICATIONS = True
SECRET_KEY = os.environ.get('SECRET_KEY')
UPLOADED_PHOTOS_DEST = 'app/static/photos'
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 450
MAIL_USE_TLS = False
MAIL_USE_SSL = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class DevConfig(Config):
DEBUG = True
config_options = {
'development': DevConfig,
'production': ProdConfig
}
| true
| true
|
f70cb9155b794d8ba907ec634161848c361b78f1
| 3,671
|
py
|
Python
|
scrape_mars.py
|
tsubedy/web-scraping-challenge
|
785faf0b9855086dedaef1c1df2ea7b81a0dde4b
|
[
"MIT"
] | null | null | null |
scrape_mars.py
|
tsubedy/web-scraping-challenge
|
785faf0b9855086dedaef1c1df2ea7b81a0dde4b
|
[
"MIT"
] | null | null | null |
scrape_mars.py
|
tsubedy/web-scraping-challenge
|
785faf0b9855086dedaef1c1df2ea7b81a0dde4b
|
[
"MIT"
] | null | null | null |
# Dependencies
from splinter import Browser
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
import time
import pandas as pd
from pprint import pprint
from urllib.parse import urlsplit
import pymongo
# Initialize PyMongo to work with MongoDBs
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
# Define database and collection
db = client.mars_db
collection = db.items
def init_browser():
# capture path to chrome driver
executable_path = {'executable_path': ChromeDriverManager().install()}
return Browser('chrome', **executable_path, headless=False)
def scrape_info():
browser = init_browser()
mars_info = {}
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# scrape latest news headline and para
news_title=soup.find('ul', class_='item_list').\
find('li', class_='slide').\
find('div', class_= 'content_title').text
news_para=soup.find("div", class_='article_teaser_body').text
mars_info['news_title'] = news_title
mars_info['news_para'] = news_para
# Featured image
featured_image = "https://www.nasa.gov/image-feature/jpl/perseverance-s-first-full-color-look-at-mars"
browser.visit(featured_image)
base_url = "{0.scheme}://{0.netloc}/".format(urlsplit(featured_image))
# click on featured image using xpath
xpath = '//*[@id="468477"]/div[2]/div[2]/a/img'
results = browser.find_by_xpath(xpath)
img = results[0]
img.click()
time.sleep(1)
#get image url using BeautifulSoup
html_image = browser.html
soup = BeautifulSoup(html_image, "html.parser")
featured_img_url = soup.find('img')['src']
mars_info['featured_img_url'] = featured_img_url
# Mars Facts
url_facts = "https://space-facts.com/mars/"
table = pd.read_html(url_facts)
table[0]
df_mars_facts = table[0]
df_mars_facts.columns = ["Parameter", "Values"]
fact_table = df_mars_facts.set_index(["Parameter"])
mars_html_table = fact_table.to_html()
mars_html_table = mars_html_table.replace("\n", "")
mars_info['mars_facts_table'] = mars_html_table
# Mars Hemisphere
hemisphere_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(hemisphere_url)
#Get base url
hemisphere_base_url = "{0.scheme}://{0.netloc}/".format(urlsplit(hemisphere_url))
# list of xpaths for mars hemispheres
xpaths = ['//*[@id="product-section"]/div[2]/div[1]/a/img', '//*[@id="product-section"]/div[2]/div[2]/a/img', '//*[@id="product-section"]/div[2]/div[3]/a/img', '//*[@id="product-section"]/div[2]/div[4]/a/img']
hemisphere_img_urls = []
for xpath in xpaths:
hemisphere_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(hemisphere_url)
results = browser.find_by_xpath(xpath)
img = results[0]
img.click()
time.sleep(1)
#get image url using BeautifulSoup
html_image = browser.html
soup = BeautifulSoup(html_image, "html.parser")
img_url = soup.find("img", class_='wide-image')["src"]
time.sleep(1)
img_url = hemisphere_base_url + img_url
title = soup.find("h2",class_="title").text
hemisphere_img_urls.append({'title': title, 'image_url':img_url})
mars_info['hemisphere_img_urls'] = hemisphere_img_urls
browser.quit()
# collection.insert_one(mars_info)
return mars_info
| 29.604839
| 213
| 0.6693
|
from splinter import Browser
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
import time
import pandas as pd
from pprint import pprint
from urllib.parse import urlsplit
import pymongo
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
db = client.mars_db
collection = db.items
def init_browser():
executable_path = {'executable_path': ChromeDriverManager().install()}
return Browser('chrome', **executable_path, headless=False)
def scrape_info():
browser = init_browser()
mars_info = {}
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
news_title=soup.find('ul', class_='item_list').\
find('li', class_='slide').\
find('div', class_= 'content_title').text
news_para=soup.find("div", class_='article_teaser_body').text
mars_info['news_title'] = news_title
mars_info['news_para'] = news_para
featured_image = "https://www.nasa.gov/image-feature/jpl/perseverance-s-first-full-color-look-at-mars"
browser.visit(featured_image)
base_url = "{0.scheme}://{0.netloc}/".format(urlsplit(featured_image))
xpath = '//*[@id="468477"]/div[2]/div[2]/a/img'
results = browser.find_by_xpath(xpath)
img = results[0]
img.click()
time.sleep(1)
html_image = browser.html
soup = BeautifulSoup(html_image, "html.parser")
featured_img_url = soup.find('img')['src']
mars_info['featured_img_url'] = featured_img_url
url_facts = "https://space-facts.com/mars/"
table = pd.read_html(url_facts)
table[0]
df_mars_facts = table[0]
df_mars_facts.columns = ["Parameter", "Values"]
fact_table = df_mars_facts.set_index(["Parameter"])
mars_html_table = fact_table.to_html()
mars_html_table = mars_html_table.replace("\n", "")
mars_info['mars_facts_table'] = mars_html_table
hemisphere_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(hemisphere_url)
hemisphere_base_url = "{0.scheme}://{0.netloc}/".format(urlsplit(hemisphere_url))
xpaths = ['//*[@id="product-section"]/div[2]/div[1]/a/img', '//*[@id="product-section"]/div[2]/div[2]/a/img', '//*[@id="product-section"]/div[2]/div[3]/a/img', '//*[@id="product-section"]/div[2]/div[4]/a/img']
hemisphere_img_urls = []
for xpath in xpaths:
hemisphere_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
browser.visit(hemisphere_url)
results = browser.find_by_xpath(xpath)
img = results[0]
img.click()
time.sleep(1)
html_image = browser.html
soup = BeautifulSoup(html_image, "html.parser")
img_url = soup.find("img", class_='wide-image')["src"]
time.sleep(1)
img_url = hemisphere_base_url + img_url
title = soup.find("h2",class_="title").text
hemisphere_img_urls.append({'title': title, 'image_url':img_url})
mars_info['hemisphere_img_urls'] = hemisphere_img_urls
browser.quit()
return mars_info
| true
| true
|
f70cb9cb3840540bacd7f076130cfadbabaaf32f
| 812
|
py
|
Python
|
diff_of_files.py
|
mairieli/botSE-2019
|
bfcda1197fccd05650db1e37c85c43db9e28b26d
|
[
"MIT"
] | null | null | null |
diff_of_files.py
|
mairieli/botSE-2019
|
bfcda1197fccd05650db1e37c85c43db9e28b26d
|
[
"MIT"
] | 1
|
2020-11-06T18:47:10.000Z
|
2020-11-19T18:51:29.000Z
|
diff_of_files.py
|
mairieli/botSE-2019
|
bfcda1197fccd05650db1e37c85c43db9e28b26d
|
[
"MIT"
] | null | null | null |
from pydriller import RepositoryMining
import iocsv
import csv
repos = iocsv.read_csv_repos_fil("data_filtered.csv")
out = open('project_bot.csv', 'w')
w_out = csv.writer(out)
for commit in RepositoryMining(path_to_repo=repos, only_modifications_with_file_types= ['.yml']).traverse_commits():
files = []
for mod in commit.modifications:
if mod.filename == "stale.yml":
file = [commit.project_name, mod.change_type.name, commit.in_main_branch, commit.hash, commit.msg, commit.author.name, commit.committer.name, commit.merge,
commit.author_date.strftime("%Y-%m-%d %H:%M:%S"), mod.source_code, mod.diff, mod.added, mod.removed]
files.append(file)
break
if files:
w_out.writerow(files)
out.flush()
out.close()
| 30.074074
| 168
| 0.669951
|
from pydriller import RepositoryMining
import iocsv
import csv
repos = iocsv.read_csv_repos_fil("data_filtered.csv")
out = open('project_bot.csv', 'w')
w_out = csv.writer(out)
for commit in RepositoryMining(path_to_repo=repos, only_modifications_with_file_types= ['.yml']).traverse_commits():
files = []
for mod in commit.modifications:
if mod.filename == "stale.yml":
file = [commit.project_name, mod.change_type.name, commit.in_main_branch, commit.hash, commit.msg, commit.author.name, commit.committer.name, commit.merge,
commit.author_date.strftime("%Y-%m-%d %H:%M:%S"), mod.source_code, mod.diff, mod.added, mod.removed]
files.append(file)
break
if files:
w_out.writerow(files)
out.flush()
out.close()
| true
| true
|
f70cbad2376446a505af61eaa2bb681268c8b57c
| 638
|
py
|
Python
|
src/apis/text/text/transliteration.py
|
theunifai/unifai-apis-core
|
1f2a9051c1e3df1bd19a96f22e4a07767ef3973a
|
[
"MIT"
] | 2
|
2021-11-09T07:18:06.000Z
|
2022-01-04T19:37:17.000Z
|
src/apis/text/text/transliteration.py
|
theunifai/unifai-apis-core
|
1f2a9051c1e3df1bd19a96f22e4a07767ef3973a
|
[
"MIT"
] | 4
|
2021-11-04T08:28:59.000Z
|
2021-11-07T05:59:59.000Z
|
src/apis/text/text/transliteration.py
|
theunifai/unifai-apis-core
|
1f2a9051c1e3df1bd19a96f22e4a07767ef3973a
|
[
"MIT"
] | 1
|
2022-01-07T09:12:22.000Z
|
2022-01-07T09:12:22.000Z
|
from fastapi import APIRouter
from gladia_api_utils.submodules import TaskRouter
router = APIRouter()
inputs = [
{
"type": "text",
"name": "text",
"default": "Лорем ипсум долор сит амет",
"tooltip": "Insert the text to transliterate here",
},
{
"type": "text",
"name": "language",
"default": "ru",
"tooltip": "Insert the language code here",
},
]
output = {
"name": "transliterated_text",
"type": "str",
"example": "transliterated_text"
}
TaskRouter(router=router, input=inputs, output=output, default_model="transliterate")
| 22.785714
| 85
| 0.581505
|
from fastapi import APIRouter
from gladia_api_utils.submodules import TaskRouter
router = APIRouter()
inputs = [
{
"type": "text",
"name": "text",
"default": "Лорем ипсум долор сит амет",
"tooltip": "Insert the text to transliterate here",
},
{
"type": "text",
"name": "language",
"default": "ru",
"tooltip": "Insert the language code here",
},
]
output = {
"name": "transliterated_text",
"type": "str",
"example": "transliterated_text"
}
TaskRouter(router=router, input=inputs, output=output, default_model="transliterate")
| true
| true
|
f70cbb17e5522f2101a01509fdf02a32b98ffdba
| 3,081
|
py
|
Python
|
flatpandoc.py
|
apas/athena
|
74e79723c53237275c40c10731717d7ca0dc130d
|
[
"MIT"
] | 321
|
2016-06-04T11:49:15.000Z
|
2022-01-05T17:01:26.000Z
|
flatpandoc.py
|
apas/athena
|
74e79723c53237275c40c10731717d7ca0dc130d
|
[
"MIT"
] | 17
|
2016-06-12T18:08:44.000Z
|
2020-08-08T09:31:17.000Z
|
flatpandoc.py
|
apas/athena
|
74e79723c53237275c40c10731717d7ca0dc130d
|
[
"MIT"
] | 40
|
2016-06-12T17:35:23.000Z
|
2022-02-04T18:35:55.000Z
|
"""
flask_flatpages_pandoc
~~~~~~~~~~~~~~~~~~~~~~
Flask-FlatPages-Pandoc is an HTML renderer for Flask-FlatPages that uses
pandoc as backend.
:copyright: (c) 2014 Fabian Hirschmann <fabian@hirschmann.email>
:license: MIT, see LICENSE.txt for more details.
With some changes by @apas:
- Invoke pandoc via pypandoc instead subprocess
- Indentation changes
- Support of Pandoc 2.0 by @ThoseGrapefruits
- Support of Python 3 by @frstp64
License: MIT
"""
import pkg_resources
import pypandoc
from flask import render_template_string, Markup
try:
__version__ = pkg_resources.require("Flask-FlatPages-Pandoc")[0]
except pkg_resources.DistributionNotFound:
__version__ = "0.0-dev"
class FlatPagesPandoc(object):
"""
Class that, when applied to a :class:`flask.Flask` instance,
sets up an HTML renderer using pandoc.
"""
def __init__(self, source_format, app=None, pandoc_args=[],
pre_render=False):
"""
Initializes Flask-FlatPages-Pandoc.
:param source_format: the source file format; directly passed
to pandoc.
:type source_format: string
:param app: your application. Can be omitted if you call
:meth:`init_app` later.
:type app: :class:`flask.Flask`
:param pandoc_args: extra arguments passed to pandoc
:type pandoc_args: sequence
:param pre_render: pre-render the page as :class:`flask.Markup`
:type pre_render: boolean
"""
self.source_format = source_format
self.pandoc_args = pandoc_args
self.pre_render = pre_render
if app:
self.init_app(app)
def init_app(self, app):
"""
Used to initialize an application. This is useful when passing
an app later.
:param app: your application
:type app: :class:`flask.Flask`
"""
self.app = app
# The following lambda expression works around Flask-FlatPage's
# reflection magic.
self.app.config["FLATPAGES_HTML_RENDERER"] = lambda t: self.renderer(t)
def renderer(self, text):
"""
Renders a flat page to HTML.
:param text: the text of the flat page
:type text: string
"""
#if type(text) == str:
# text = str(text, self.app.config["FLATPAGES_ENCODING"])
if self.pre_render:
text = render_template_string(Markup(text))
extra_args = [
"--filter=pandoc-crossref",
"--filter=pandoc-citeproc",
"--filter=pandoc-sidenote",
"--standalone",
"--mathml",
"--base-header-level=2",
"--highlight-style", "pygments",
"--bibliography=pages/all.bib",
"--csl=pages/lncs.csl",
"-Mreference-section-title=References",
"-Mlink-citations=true"
]
pandocver = int(pypandoc.get_pandoc_version()[0])
if pandocver < 2:
extra_args.append("-S")
format_str = "markdown+raw_tex+yaml_metadata_block"
else:
format_str = "markdown+raw_tex+smart+yaml_metadata_block+header_attributes"
output = pypandoc.convert_text(
text.encode("utf8"),
'html',
format = format_str,
extra_args=extra_args
)
return output
| 26.560345
| 81
| 0.666667
|
import pkg_resources
import pypandoc
from flask import render_template_string, Markup
try:
__version__ = pkg_resources.require("Flask-FlatPages-Pandoc")[0]
except pkg_resources.DistributionNotFound:
__version__ = "0.0-dev"
class FlatPagesPandoc(object):
def __init__(self, source_format, app=None, pandoc_args=[],
pre_render=False):
self.source_format = source_format
self.pandoc_args = pandoc_args
self.pre_render = pre_render
if app:
self.init_app(app)
def init_app(self, app):
self.app = app
# reflection magic.
self.app.config["FLATPAGES_HTML_RENDERER"] = lambda t: self.renderer(t)
def renderer(self, text):
#if type(text) == str:
# text = str(text, self.app.config["FLATPAGES_ENCODING"])
if self.pre_render:
text = render_template_string(Markup(text))
extra_args = [
"--filter=pandoc-crossref",
"--filter=pandoc-citeproc",
"--filter=pandoc-sidenote",
"--standalone",
"--mathml",
"--base-header-level=2",
"--highlight-style", "pygments",
"--bibliography=pages/all.bib",
"--csl=pages/lncs.csl",
"-Mreference-section-title=References",
"-Mlink-citations=true"
]
pandocver = int(pypandoc.get_pandoc_version()[0])
if pandocver < 2:
extra_args.append("-S")
format_str = "markdown+raw_tex+yaml_metadata_block"
else:
format_str = "markdown+raw_tex+smart+yaml_metadata_block+header_attributes"
output = pypandoc.convert_text(
text.encode("utf8"),
'html',
format = format_str,
extra_args=extra_args
)
return output
| true
| true
|
f70cbc1338df7077e1e741af0d075462ddf93d9d
| 1,528
|
py
|
Python
|
Kai/crab/NANOv7_Fri13/2018/ST/crab_cfg_2018_ST_tW.py
|
NJManganelli/FourTopNAOD
|
9743d5b49bdbad27a74abb7b2d5b7295f678a0e3
|
[
"Apache-2.0"
] | 1
|
2022-01-17T17:29:38.000Z
|
2022-01-17T17:29:38.000Z
|
Kai/crab/NANOv7_Fri13/2018/ST/crab_cfg_2018_ST_tW.py
|
NJManganelli/FourTopNAOD
|
9743d5b49bdbad27a74abb7b2d5b7295f678a0e3
|
[
"Apache-2.0"
] | null | null | null |
Kai/crab/NANOv7_Fri13/2018/ST/crab_cfg_2018_ST_tW.py
|
NJManganelli/FourTopNAOD
|
9743d5b49bdbad27a74abb7b2d5b7295f678a0e3
|
[
"Apache-2.0"
] | 1
|
2021-12-15T10:56:50.000Z
|
2021-12-15T10:56:50.000Z
|
import os
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromCRIC
config = Configuration()
config.section_("General")
config.General.requestName = '2018_ST_tW'
config.General.transferOutputs = True
config.General.transferLogs = True
config.section_("JobType")
config.JobType.allowUndistributedCMSSW = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'PSet.py'
config.JobType.maxMemoryMB = 2000
config.JobType.maxJobRuntimeMin = 1315
config.JobType.numCores = 1
config.JobType.scriptExe = 'crab_script_2018_ST_tW.sh'
config.JobType.inputFiles = ['crab_script_2018_ST_tW.py',
os.path.join(os.environ['CMSSW_BASE'],'src/PhysicsTools/NanoAODTools/scripts/haddnano.py'),
]
config.JobType.outputFiles = ['hist.root']
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/ST_tW_top_5f_inclusiveDecays_TuneCP5_13TeV-powheg-pythia8/RunIIAutumn18NanoAODv7-Nano02Apr2020_102X_upgrade2018_realistic_v21_ext1-v1/NANOAODSIM'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
if config.Data.splitting == 'FileBased':
config.Data.unitsPerJob = 1
# config.Data.totalUnits = $TOTAL_UNITS
# config.Data.userInputFiles = []
config.Data.outLFNDirBase = '/store/user/{user}/Fri13'.format(user=getUsernameFromCRIC())
config.Data.publication = True
config.Data.outputDatasetTag = 'Fri13'
config.section_("Site")
config.Site.storageSite = 'T2_CH_CERN'
| 39.179487
| 174
| 0.777487
|
import os
from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromCRIC
config = Configuration()
config.section_("General")
config.General.requestName = '2018_ST_tW'
config.General.transferOutputs = True
config.General.transferLogs = True
config.section_("JobType")
config.JobType.allowUndistributedCMSSW = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'PSet.py'
config.JobType.maxMemoryMB = 2000
config.JobType.maxJobRuntimeMin = 1315
config.JobType.numCores = 1
config.JobType.scriptExe = 'crab_script_2018_ST_tW.sh'
config.JobType.inputFiles = ['crab_script_2018_ST_tW.py',
os.path.join(os.environ['CMSSW_BASE'],'src/PhysicsTools/NanoAODTools/scripts/haddnano.py'),
]
config.JobType.outputFiles = ['hist.root']
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/ST_tW_top_5f_inclusiveDecays_TuneCP5_13TeV-powheg-pythia8/RunIIAutumn18NanoAODv7-Nano02Apr2020_102X_upgrade2018_realistic_v21_ext1-v1/NANOAODSIM'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
if config.Data.splitting == 'FileBased':
config.Data.unitsPerJob = 1
config.Data.outLFNDirBase = '/store/user/{user}/Fri13'.format(user=getUsernameFromCRIC())
config.Data.publication = True
config.Data.outputDatasetTag = 'Fri13'
config.section_("Site")
config.Site.storageSite = 'T2_CH_CERN'
| true
| true
|
f70cbc8b3b98f38e47c4f74a3b7d54186d5f3d15
| 333
|
py
|
Python
|
sandbox/lib/jumpscale/JumpScale9Lib/clients/gogs/GogsFactory.py
|
Jumpscale/sandbox_linux
|
2aacd36b467ef30ac83718abfa82c6883b67a02f
|
[
"Apache-2.0"
] | 2
|
2017-06-07T08:11:47.000Z
|
2017-11-10T02:19:48.000Z
|
JumpScale9Lib/clients/gogs/GogsFactory.py
|
Jumpscale/lib9
|
82224784ef2a7071faeb48349007211c367bc673
|
[
"Apache-2.0"
] | 188
|
2017-06-21T06:16:13.000Z
|
2020-06-17T14:20:24.000Z
|
sandbox/lib/jumpscale/JumpScale9Lib/clients/gogs/GogsFactory.py
|
Jumpscale/sandbox_linux
|
2aacd36b467ef30ac83718abfa82c6883b67a02f
|
[
"Apache-2.0"
] | 3
|
2018-06-12T05:18:28.000Z
|
2019-09-24T06:49:17.000Z
|
from .GogsClient import GogsClient
from js9 import j
JSConfigBaseFactory = j.tools.configmanager.base_class_configs
class GogsFactory(JSConfigBaseFactory):
def __init__(self):
self.__jslocation__ = "j.clients.gogs"
self.__imports__ = "requests,psycopg2"
JSConfigBaseFactory.__init__(self, GogsClient)
| 25.615385
| 62
| 0.756757
|
from .GogsClient import GogsClient
from js9 import j
JSConfigBaseFactory = j.tools.configmanager.base_class_configs
class GogsFactory(JSConfigBaseFactory):
def __init__(self):
self.__jslocation__ = "j.clients.gogs"
self.__imports__ = "requests,psycopg2"
JSConfigBaseFactory.__init__(self, GogsClient)
| true
| true
|
f70cbccefa5857dc098e4aa84320e84b66e9b510
| 5,962
|
py
|
Python
|
tests/integration/test_mutations_hardlinks/test.py
|
chalice19/ClickHouse
|
2f38e7bc5c2113935ab86260439bb543a1737291
|
[
"Apache-2.0"
] | 8,629
|
2016-06-14T21:03:01.000Z
|
2019-09-23T07:46:38.000Z
|
tests/integration/test_mutations_hardlinks/test.py
|
chalice19/ClickHouse
|
2f38e7bc5c2113935ab86260439bb543a1737291
|
[
"Apache-2.0"
] | 4,335
|
2016-06-15T12:58:31.000Z
|
2019-09-23T11:18:43.000Z
|
tests/integration/test_mutations_hardlinks/test.py
|
chalice19/ClickHouse
|
2f38e7bc5c2113935ab86260439bb543a1737291
|
[
"Apache-2.0"
] | 1,700
|
2016-06-15T09:25:11.000Z
|
2019-09-23T11:16:38.000Z
|
import os
import time
from multiprocessing.dummy import Pool
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance("node1", main_configs=["configs/wide_parts_only.xml"])
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def check_hardlinks(table, part_path, column_file, count):
column_path = os.path.join(
"/var/lib/clickhouse/data/default", table, part_path, column_file
)
script = """
export INODE=`ls -i {column_path} | awk '{{print $1}}'`
export COUNT=`find /var/lib/clickhouse -inum $INODE | wc -l`
test $COUNT = {count}
""".format(
column_path=column_path, count=count
)
node1.exec_in_container(["bash", "-c", script])
def check_exists(table, part_path, column_file):
column_path = os.path.join(
"/var/lib/clickhouse/data/default", table, part_path, column_file
)
node1.exec_in_container(["bash", "-c", "test -f {}".format(column_path)])
def test_update_mutation(started_cluster):
node1.query(
"CREATE TABLE table_for_update(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()"
)
node1.query(
"INSERT INTO table_for_update SELECT number, number, toString(number) from numbers(100)"
)
assert int(node1.query("SELECT sum(value1) FROM table_for_update").strip()) == sum(
range(100)
)
node1.query(
"ALTER TABLE table_for_update UPDATE value1 = value1 * value1 WHERE 1",
settings={"mutations_sync": "2"},
)
assert int(node1.query("SELECT sum(value1) FROM table_for_update").strip()) == sum(
i * i for i in range(100)
)
check_hardlinks("table_for_update", "all_1_1_0_2", "key.bin", 2)
check_hardlinks("table_for_update", "all_1_1_0_2", "value2.bin", 2)
check_hardlinks("table_for_update", "all_1_1_0_2", "value1.bin", 1)
node1.query(
"ALTER TABLE table_for_update UPDATE key=key, value1=value1, value2=value2 WHERE 1",
settings={"mutations_sync": "2"},
)
assert int(node1.query("SELECT sum(value1) FROM table_for_update").strip()) == sum(
i * i for i in range(100)
)
check_hardlinks("table_for_update", "all_1_1_0_3", "key.bin", 1)
check_hardlinks("table_for_update", "all_1_1_0_3", "value1.bin", 1)
check_hardlinks("table_for_update", "all_1_1_0_3", "value2.bin", 1)
def test_modify_mutation(started_cluster):
node1.query(
"CREATE TABLE table_for_modify(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()"
)
node1.query(
"INSERT INTO table_for_modify SELECT number, number, toString(number) from numbers(100)"
)
assert int(node1.query("SELECT sum(value1) FROM table_for_modify").strip()) == sum(
range(100)
)
node1.query(
"ALTER TABLE table_for_modify MODIFY COLUMN value2 UInt64",
settings={"mutations_sync": "2"},
)
assert int(node1.query("SELECT sum(value2) FROM table_for_modify").strip()) == sum(
range(100)
)
check_hardlinks("table_for_modify", "all_1_1_0_2", "key.bin", 2)
check_hardlinks("table_for_modify", "all_1_1_0_2", "value1.bin", 2)
check_hardlinks("table_for_modify", "all_1_1_0_2", "value2.bin", 1)
def test_drop_mutation(started_cluster):
node1.query(
"CREATE TABLE table_for_drop(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()"
)
node1.query(
"INSERT INTO table_for_drop SELECT number, number, toString(number) from numbers(100)"
)
assert int(node1.query("SELECT sum(value1) FROM table_for_drop").strip()) == sum(
range(100)
)
node1.query(
"ALTER TABLE table_for_drop DROP COLUMN value2",
settings={"mutations_sync": "2"},
)
check_hardlinks("table_for_drop", "all_1_1_0_2", "key.bin", 2)
check_hardlinks("table_for_drop", "all_1_1_0_2", "value1.bin", 2)
with pytest.raises(Exception):
check_exists("table_for_drop", "all_1_1_0_2", "value2.bin")
with pytest.raises(Exception):
check_exists("table_for_drop", "all_1_1_0_2", "value2.mrk")
def test_delete_and_drop_mutation(started_cluster):
node1.query(
"CREATE TABLE table_for_delete_and_drop(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()"
)
node1.query(
"INSERT INTO table_for_delete_and_drop SELECT number, number, toString(number) from numbers(100)"
)
assert int(
node1.query("SELECT sum(value1) FROM table_for_delete_and_drop").strip()
) == sum(range(100))
node1.query("SYSTEM STOP MERGES")
def mutate():
node1.query(
"ALTER TABLE table_for_delete_and_drop DELETE WHERE key % 2 == 0, DROP COLUMN value2"
)
p = Pool(2)
p.apply_async(mutate)
for _ in range(1, 100):
result = node1.query(
"SELECT COUNT() FROM system.mutations WHERE table = 'table_for_delete_and_drop' and is_done=0"
)
try:
if int(result.strip()) == 2:
break
except:
print("Result", result)
pass
time.sleep(0.5)
node1.query("SYSTEM START MERGES")
assert_eq_with_retry(
node1,
"SELECT COUNT() FROM table_for_delete_and_drop",
str(sum(1 for i in range(100) if i % 2 != 0)),
)
check_hardlinks("table_for_delete_and_drop", "all_1_1_0_3", "key.bin", 1)
check_hardlinks("table_for_delete_and_drop", "all_1_1_0_3", "value1.bin", 1)
with pytest.raises(Exception):
check_exists("table_for_delete_and_drop", "all_1_1_0_3", "value2.bin")
with pytest.raises(Exception):
check_exists("table_for_delete_and_drop", "all_1_1_0_3", "value2.mrk")
| 31.378947
| 126
| 0.661691
|
import os
import time
from multiprocessing.dummy import Pool
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance("node1", main_configs=["configs/wide_parts_only.xml"])
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def check_hardlinks(table, part_path, column_file, count):
column_path = os.path.join(
"/var/lib/clickhouse/data/default", table, part_path, column_file
)
script = """
export INODE=`ls -i {column_path} | awk '{{print $1}}'`
export COUNT=`find /var/lib/clickhouse -inum $INODE | wc -l`
test $COUNT = {count}
""".format(
column_path=column_path, count=count
)
node1.exec_in_container(["bash", "-c", script])
def check_exists(table, part_path, column_file):
column_path = os.path.join(
"/var/lib/clickhouse/data/default", table, part_path, column_file
)
node1.exec_in_container(["bash", "-c", "test -f {}".format(column_path)])
def test_update_mutation(started_cluster):
node1.query(
"CREATE TABLE table_for_update(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()"
)
node1.query(
"INSERT INTO table_for_update SELECT number, number, toString(number) from numbers(100)"
)
assert int(node1.query("SELECT sum(value1) FROM table_for_update").strip()) == sum(
range(100)
)
node1.query(
"ALTER TABLE table_for_update UPDATE value1 = value1 * value1 WHERE 1",
settings={"mutations_sync": "2"},
)
assert int(node1.query("SELECT sum(value1) FROM table_for_update").strip()) == sum(
i * i for i in range(100)
)
check_hardlinks("table_for_update", "all_1_1_0_2", "key.bin", 2)
check_hardlinks("table_for_update", "all_1_1_0_2", "value2.bin", 2)
check_hardlinks("table_for_update", "all_1_1_0_2", "value1.bin", 1)
node1.query(
"ALTER TABLE table_for_update UPDATE key=key, value1=value1, value2=value2 WHERE 1",
settings={"mutations_sync": "2"},
)
assert int(node1.query("SELECT sum(value1) FROM table_for_update").strip()) == sum(
i * i for i in range(100)
)
check_hardlinks("table_for_update", "all_1_1_0_3", "key.bin", 1)
check_hardlinks("table_for_update", "all_1_1_0_3", "value1.bin", 1)
check_hardlinks("table_for_update", "all_1_1_0_3", "value2.bin", 1)
def test_modify_mutation(started_cluster):
node1.query(
"CREATE TABLE table_for_modify(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()"
)
node1.query(
"INSERT INTO table_for_modify SELECT number, number, toString(number) from numbers(100)"
)
assert int(node1.query("SELECT sum(value1) FROM table_for_modify").strip()) == sum(
range(100)
)
node1.query(
"ALTER TABLE table_for_modify MODIFY COLUMN value2 UInt64",
settings={"mutations_sync": "2"},
)
assert int(node1.query("SELECT sum(value2) FROM table_for_modify").strip()) == sum(
range(100)
)
check_hardlinks("table_for_modify", "all_1_1_0_2", "key.bin", 2)
check_hardlinks("table_for_modify", "all_1_1_0_2", "value1.bin", 2)
check_hardlinks("table_for_modify", "all_1_1_0_2", "value2.bin", 1)
def test_drop_mutation(started_cluster):
node1.query(
"CREATE TABLE table_for_drop(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()"
)
node1.query(
"INSERT INTO table_for_drop SELECT number, number, toString(number) from numbers(100)"
)
assert int(node1.query("SELECT sum(value1) FROM table_for_drop").strip()) == sum(
range(100)
)
node1.query(
"ALTER TABLE table_for_drop DROP COLUMN value2",
settings={"mutations_sync": "2"},
)
check_hardlinks("table_for_drop", "all_1_1_0_2", "key.bin", 2)
check_hardlinks("table_for_drop", "all_1_1_0_2", "value1.bin", 2)
with pytest.raises(Exception):
check_exists("table_for_drop", "all_1_1_0_2", "value2.bin")
with pytest.raises(Exception):
check_exists("table_for_drop", "all_1_1_0_2", "value2.mrk")
def test_delete_and_drop_mutation(started_cluster):
node1.query(
"CREATE TABLE table_for_delete_and_drop(key UInt64, value1 UInt64, value2 String) ENGINE MergeTree() ORDER BY tuple()"
)
node1.query(
"INSERT INTO table_for_delete_and_drop SELECT number, number, toString(number) from numbers(100)"
)
assert int(
node1.query("SELECT sum(value1) FROM table_for_delete_and_drop").strip()
) == sum(range(100))
node1.query("SYSTEM STOP MERGES")
def mutate():
node1.query(
"ALTER TABLE table_for_delete_and_drop DELETE WHERE key % 2 == 0, DROP COLUMN value2"
)
p = Pool(2)
p.apply_async(mutate)
for _ in range(1, 100):
result = node1.query(
"SELECT COUNT() FROM system.mutations WHERE table = 'table_for_delete_and_drop' and is_done=0"
)
try:
if int(result.strip()) == 2:
break
except:
print("Result", result)
pass
time.sleep(0.5)
node1.query("SYSTEM START MERGES")
assert_eq_with_retry(
node1,
"SELECT COUNT() FROM table_for_delete_and_drop",
str(sum(1 for i in range(100) if i % 2 != 0)),
)
check_hardlinks("table_for_delete_and_drop", "all_1_1_0_3", "key.bin", 1)
check_hardlinks("table_for_delete_and_drop", "all_1_1_0_3", "value1.bin", 1)
with pytest.raises(Exception):
check_exists("table_for_delete_and_drop", "all_1_1_0_3", "value2.bin")
with pytest.raises(Exception):
check_exists("table_for_delete_and_drop", "all_1_1_0_3", "value2.mrk")
| true
| true
|
f70cbdb054754ef3a69b9032f0332e2b00e041b8
| 841
|
py
|
Python
|
blueprints/aws_s3_bucket/management/upload-file-object.py
|
hciudad/cloudbolt-forge
|
d1109c90dcd189defa70876906d394e0c91feab5
|
[
"Apache-2.0"
] | null | null | null |
blueprints/aws_s3_bucket/management/upload-file-object.py
|
hciudad/cloudbolt-forge
|
d1109c90dcd189defa70876906d394e0c91feab5
|
[
"Apache-2.0"
] | null | null | null |
blueprints/aws_s3_bucket/management/upload-file-object.py
|
hciudad/cloudbolt-forge
|
d1109c90dcd189defa70876906d394e0c91feab5
|
[
"Apache-2.0"
] | null | null | null |
from common.methods import set_progress
from resourcehandlers.aws.models import AWSHandler
def run(job, resource, **kwargs):
set_progress("Connecting to AWS s3 cloud")
aws = AWSHandler.objects.get(id=resource.aws_rh_id)
wrapper = aws.get_api_wrapper()
set_progress("This resource belongs to {}".format(aws))
file = "{{ file }}"
key_name = "{{ name }}"
s3 = wrapper.get_boto3_resource(
aws.serviceaccount,
aws.servicepasswd,
None,
service_name='s3'
)
try:
set_progress('uploading file from "{}"'.format(file))
s3.Bucket(resource.s3_bucket_name).upload_file(file, key_name)
except Exception as e:
return "FAILURE", str(e), ""
return "SUCCESS", "The file has been successfully uploaded to '{}' bucket".format(resource.s3_bucket_name), ""
| 30.035714
| 114
| 0.662307
|
from common.methods import set_progress
from resourcehandlers.aws.models import AWSHandler
def run(job, resource, **kwargs):
set_progress("Connecting to AWS s3 cloud")
aws = AWSHandler.objects.get(id=resource.aws_rh_id)
wrapper = aws.get_api_wrapper()
set_progress("This resource belongs to {}".format(aws))
file = "{{ file }}"
key_name = "{{ name }}"
s3 = wrapper.get_boto3_resource(
aws.serviceaccount,
aws.servicepasswd,
None,
service_name='s3'
)
try:
set_progress('uploading file from "{}"'.format(file))
s3.Bucket(resource.s3_bucket_name).upload_file(file, key_name)
except Exception as e:
return "FAILURE", str(e), ""
return "SUCCESS", "The file has been successfully uploaded to '{}' bucket".format(resource.s3_bucket_name), ""
| true
| true
|
f70cbe6619bf7fdcb6ced25a2c9399e389f26f25
| 2,412
|
py
|
Python
|
tank-generator-script/tank.py
|
the-other-mariana/maya-scripts
|
6af1fdea243990081b8aeb3c5aa1bf07852ff0a3
|
[
"MIT"
] | 2
|
2021-11-17T23:01:13.000Z
|
2021-11-27T18:28:38.000Z
|
tank-generator-script/tank.py
|
the-other-mariana/maya-scripts
|
6af1fdea243990081b8aeb3c5aa1bf07852ff0a3
|
[
"MIT"
] | null | null | null |
tank-generator-script/tank.py
|
the-other-mariana/maya-scripts
|
6af1fdea243990081b8aeb3c5aa1bf07852ff0a3
|
[
"MIT"
] | null | null | null |
#File: tank.py
#Author: Mariana Avalos
#Date: 22/02/2019
#Description: Python code that makes a 3D tank
import maya.cmds as c
import math as math
# 8 tires
tireTranslation = [3, -3]
tireRadius = 1.25
for j in range(len(tireTranslation)):
for i in range(4):
name = 'c' + str(i + (j * 4) + 1)
c.polyCylinder(r = tireRadius, sx = 20, sy = 1, n = 'c' + str(i + (j * 4) + 1))
c.setAttr(name + '.rotateZ', 90)
c.setAttr(name + '.scaleY', 0.8)
c.setAttr(name + '.translateZ', i * (tireRadius * 2) - (tireRadius * 3))
c.setAttr(name + '.translateX', tireTranslation[j])
# body made with the coolest for
body = 'body'
c.polyCube(sx = 4, sy = 2, sz = 1, d = 5.25, h = 3, w = 4, n = body)
c.setAttr(body + '.translateY', 0.5)
bodyRadius = 0.5
zFactor = [1, -1]
for j in range(len(zFactor)):
for i in range(0, 15):
rads = (360.0 / 8)*(3.1416 / 180)
x = -1 * bodyRadius * math.cos(rads * (i % 5))
z = zFactor[j] * bodyRadius * math.sin(rads * (i % 5))
c.polyMoveVertex(body + '.vtx[' + str(i + 15 * j) + ']', tx = x)
c.polyMoveVertex(body + '.vtx[' + str(i + 15 * j) + ']', tz = z)
if i in (5, 6, 7, 8, 9):
c.polyMoveVertex(body + '.vtx[' + str(i + 15 * j) + ']', tz = 3 * zFactor[j])
# head of tank
head = 'head'
headRadius = 0.5
c.polyCube(sx = 4, sy = 1, sz = 1, d = 3, h = 1.0, w = 4, n = head)
c.setAttr(head + '.translateY', 2.6)
c.setAttr(head + '.translateZ', -1)
for i in range(10, 20):
rads = (360.0 / 8)*(3.1416 / 180)
z = -1 * headRadius * math.sin(rads * (i % 5))
c.polyMoveVertex(head + '.vtx[' + str(i) + ']', tz = z)
if i in (10, 11, 12, 13, 14):
c.polyMoveVertex(head + '.vtx[' + str(i) + ']', tz = 1)
# axis under head
axis = 'axis'
c.polyCylinder(r = 1.5, sx = 20, sy = 1, h = 0.5, n = axis)
c.setAttr(axis + '.translateY', 2)
c.setAttr(axis + '.translateZ', -1.1)
# gun making: even parts are length 2 and odd parts are length 0.5
heights = [2, 0.5]
t = 1
gunRadius = 0.25
for i in range(0, 4):
name = 'gun' + str(i)
c.polyCylinder(r = gunRadius, sx = 8, sy = 1, h = heights[i % 2], n = name)
c.setAttr(name + '.translateY', 2.6)
c.setAttr(name + '.translateZ', t)
c.setAttr(name + '.rotateX', 90)
# translating: my height / 2 + next height / 2
t += heights[i % 2] / 2 + heights[(i + 1) % 2] / 2
gunRadius += 0.1
| 34.457143
| 89
| 0.541459
|
import maya.cmds as c
import math as math
tireTranslation = [3, -3]
tireRadius = 1.25
for j in range(len(tireTranslation)):
for i in range(4):
name = 'c' + str(i + (j * 4) + 1)
c.polyCylinder(r = tireRadius, sx = 20, sy = 1, n = 'c' + str(i + (j * 4) + 1))
c.setAttr(name + '.rotateZ', 90)
c.setAttr(name + '.scaleY', 0.8)
c.setAttr(name + '.translateZ', i * (tireRadius * 2) - (tireRadius * 3))
c.setAttr(name + '.translateX', tireTranslation[j])
body = 'body'
c.polyCube(sx = 4, sy = 2, sz = 1, d = 5.25, h = 3, w = 4, n = body)
c.setAttr(body + '.translateY', 0.5)
bodyRadius = 0.5
zFactor = [1, -1]
for j in range(len(zFactor)):
for i in range(0, 15):
rads = (360.0 / 8)*(3.1416 / 180)
x = -1 * bodyRadius * math.cos(rads * (i % 5))
z = zFactor[j] * bodyRadius * math.sin(rads * (i % 5))
c.polyMoveVertex(body + '.vtx[' + str(i + 15 * j) + ']', tx = x)
c.polyMoveVertex(body + '.vtx[' + str(i + 15 * j) + ']', tz = z)
if i in (5, 6, 7, 8, 9):
c.polyMoveVertex(body + '.vtx[' + str(i + 15 * j) + ']', tz = 3 * zFactor[j])
head = 'head'
headRadius = 0.5
c.polyCube(sx = 4, sy = 1, sz = 1, d = 3, h = 1.0, w = 4, n = head)
c.setAttr(head + '.translateY', 2.6)
c.setAttr(head + '.translateZ', -1)
for i in range(10, 20):
rads = (360.0 / 8)*(3.1416 / 180)
z = -1 * headRadius * math.sin(rads * (i % 5))
c.polyMoveVertex(head + '.vtx[' + str(i) + ']', tz = z)
if i in (10, 11, 12, 13, 14):
c.polyMoveVertex(head + '.vtx[' + str(i) + ']', tz = 1)
axis = 'axis'
c.polyCylinder(r = 1.5, sx = 20, sy = 1, h = 0.5, n = axis)
c.setAttr(axis + '.translateY', 2)
c.setAttr(axis + '.translateZ', -1.1)
heights = [2, 0.5]
t = 1
gunRadius = 0.25
for i in range(0, 4):
name = 'gun' + str(i)
c.polyCylinder(r = gunRadius, sx = 8, sy = 1, h = heights[i % 2], n = name)
c.setAttr(name + '.translateY', 2.6)
c.setAttr(name + '.translateZ', t)
c.setAttr(name + '.rotateX', 90)
t += heights[i % 2] / 2 + heights[(i + 1) % 2] / 2
gunRadius += 0.1
| true
| true
|
f70cbee84274bde26aeddaa7ebed722c81efeeab
| 22,746
|
py
|
Python
|
sdk/storage/azure-storage-blob/azure/storage/blob/_shared/uploads.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/storage/azure-storage-blob/azure/storage/blob/_shared/uploads.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/storage/azure-storage-blob/azure/storage/blob/_shared/uploads.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=no-self-use
from concurrent import futures
from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation)
from threading import Lock
from itertools import islice
from math import ceil
import six
from azure.core.tracing.common import with_current_context
from . import encode_base64, url_quote
from .request_handlers import get_length
from .response_handlers import return_response_headers
from .encryption import get_blob_encryptor_and_padder
_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object."
def _parallel_uploads(executor, uploader, pending, running):
range_ids = []
while True:
# Wait for some download to finish before adding a new one
done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED)
range_ids.extend([chunk.result() for chunk in done])
try:
for _ in range(0, len(done)):
next_chunk = next(pending)
running.add(executor.submit(with_current_context(uploader), next_chunk))
except StopIteration:
break
# Wait for the remaining uploads to finish
done, _running = futures.wait(running)
range_ids.extend([chunk.result() for chunk in done])
return range_ids
def upload_data_chunks(
service=None,
uploader_class=None,
total_size=None,
chunk_size=None,
max_concurrency=None,
stream=None,
validate_content=None,
encryption_options=None,
progress_hook=None,
**kwargs):
if encryption_options:
encryptor, padder = get_blob_encryptor_and_padder(
encryption_options.get('cek'),
encryption_options.get('vector'),
uploader_class is not PageBlobChunkUploader)
kwargs['encryptor'] = encryptor
kwargs['padder'] = padder
parallel = max_concurrency > 1
if parallel and 'modified_access_conditions' in kwargs:
# Access conditions do not work with parallelism
kwargs['modified_access_conditions'] = None
uploader = uploader_class(
service=service,
total_size=total_size,
chunk_size=chunk_size,
stream=stream,
parallel=parallel,
validate_content=validate_content,
progress_hook=progress_hook,
**kwargs)
if parallel:
with futures.ThreadPoolExecutor(max_concurrency) as executor:
upload_tasks = uploader.get_chunk_streams()
running_futures = [
executor.submit(with_current_context(uploader.process_chunk), u)
for u in islice(upload_tasks, 0, max_concurrency)
]
range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures)
else:
range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
if any(range_ids):
return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
return uploader.response_headers
def upload_substream_blocks(
service=None,
uploader_class=None,
total_size=None,
chunk_size=None,
max_concurrency=None,
stream=None,
progress_hook=None,
**kwargs):
parallel = max_concurrency > 1
if parallel and 'modified_access_conditions' in kwargs:
# Access conditions do not work with parallelism
kwargs['modified_access_conditions'] = None
uploader = uploader_class(
service=service,
total_size=total_size,
chunk_size=chunk_size,
stream=stream,
parallel=parallel,
progress_hook=progress_hook,
**kwargs)
if parallel:
with futures.ThreadPoolExecutor(max_concurrency) as executor:
upload_tasks = uploader.get_substream_blocks()
running_futures = [
executor.submit(with_current_context(uploader.process_substream_block), u)
for u in islice(upload_tasks, 0, max_concurrency)
]
range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures)
else:
range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()]
if any(range_ids):
return sorted(range_ids)
return []
class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes
def __init__(
self, service,
total_size,
chunk_size,
stream,
parallel,
encryptor=None,
padder=None,
progress_hook=None,
**kwargs):
self.service = service
self.total_size = total_size
self.chunk_size = chunk_size
self.stream = stream
self.parallel = parallel
# Stream management
self.stream_start = stream.tell() if parallel else None
self.stream_lock = Lock() if parallel else None
# Progress feedback
self.progress_total = 0
self.progress_lock = Lock() if parallel else None
self.progress_hook = progress_hook
# Encryption
self.encryptor = encryptor
self.padder = padder
self.response_headers = None
self.etag = None
self.last_modified = None
self.request_options = kwargs
def get_chunk_streams(self):
index = 0
while True:
data = b""
read_size = self.chunk_size
# Buffer until we either reach the end of the stream or get a whole chunk.
while True:
if self.total_size:
read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
temp = self.stream.read(read_size)
if not isinstance(temp, six.binary_type):
raise TypeError("Blob data should be of type bytes.")
data += temp or b""
# We have read an empty string and so are at the end
# of the buffer or we have read a full chunk.
if temp == b"" or len(data) == self.chunk_size:
break
if len(data) == self.chunk_size:
if self.padder:
data = self.padder.update(data)
if self.encryptor:
data = self.encryptor.update(data)
yield index, data
else:
if self.padder:
data = self.padder.update(data) + self.padder.finalize()
if self.encryptor:
data = self.encryptor.update(data) + self.encryptor.finalize()
if data:
yield index, data
break
index += len(data)
def process_chunk(self, chunk_data):
chunk_bytes = chunk_data[1]
chunk_offset = chunk_data[0]
return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
def _update_progress(self, length):
if self.progress_lock is not None:
with self.progress_lock:
self.progress_total += length
else:
self.progress_total += length
if self.progress_hook:
self.progress_hook(self.progress_total, self.total_size)
def _upload_chunk(self, chunk_offset, chunk_data):
raise NotImplementedError("Must be implemented by child class.")
def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
range_id = self._upload_chunk(chunk_offset, chunk_data)
self._update_progress(len(chunk_data))
return range_id
def get_substream_blocks(self):
assert self.chunk_size is not None
lock = self.stream_lock
blob_length = self.total_size
if blob_length is None:
blob_length = get_length(self.stream)
if blob_length is None:
raise ValueError("Unable to determine content length of upload data.")
blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
for i in range(blocks):
index = i * self.chunk_size
length = last_block_size if i == blocks - 1 else self.chunk_size
yield index, SubStream(self.stream, index, length, lock)
def process_substream_block(self, block_data):
return self._upload_substream_block_with_progress(block_data[0], block_data[1])
def _upload_substream_block(self, index, block_stream):
raise NotImplementedError("Must be implemented by child class.")
def _upload_substream_block_with_progress(self, index, block_stream):
range_id = self._upload_substream_block(index, block_stream)
self._update_progress(len(block_stream))
return range_id
def set_response_properties(self, resp):
self.etag = resp.etag
self.last_modified = resp.last_modified
class BlockBlobChunkUploader(_ChunkUploader):
def __init__(self, *args, **kwargs):
kwargs.pop("modified_access_conditions", None)
super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
self.current_length = None
def _upload_chunk(self, chunk_offset, chunk_data):
# TODO: This is incorrect, but works with recording.
index = '{0:032d}'.format(chunk_offset)
block_id = encode_base64(url_quote(encode_base64(index)))
self.service.stage_block(
block_id,
len(chunk_data),
chunk_data,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
return index, block_id
def _upload_substream_block(self, index, block_stream):
try:
block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size))
self.service.stage_block(
block_id,
len(block_stream),
block_stream,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
finally:
block_stream.close()
return block_id
class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
def _is_chunk_empty(self, chunk_data):
# read until non-zero byte is encountered
# if reached the end without returning, then chunk_data is all 0's
return not any(bytearray(chunk_data))
def _upload_chunk(self, chunk_offset, chunk_data):
# avoid uploading the empty pages
if not self._is_chunk_empty(chunk_data):
chunk_end = chunk_offset + len(chunk_data) - 1
content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end)
computed_md5 = None
self.response_headers = self.service.upload_pages(
body=chunk_data,
content_length=len(chunk_data),
transactional_content_md5=computed_md5,
range=content_range,
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
if not self.parallel and self.request_options.get('modified_access_conditions'):
self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
def _upload_substream_block(self, index, block_stream):
pass
class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
def __init__(self, *args, **kwargs):
super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
self.current_length = None
def _upload_chunk(self, chunk_offset, chunk_data):
if self.current_length is None:
self.response_headers = self.service.append_block(
body=chunk_data,
content_length=len(chunk_data),
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
self.current_length = int(self.response_headers["blob_append_offset"])
else:
self.request_options['append_position_access_conditions'].append_position = \
self.current_length + chunk_offset
self.response_headers = self.service.append_block(
body=chunk_data,
content_length=len(chunk_data),
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
def _upload_substream_block(self, index, block_stream):
pass
class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
def _upload_chunk(self, chunk_offset, chunk_data):
# avoid uploading the empty pages
self.response_headers = self.service.append_data(
body=chunk_data,
position=chunk_offset,
content_length=len(chunk_data),
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
if not self.parallel and self.request_options.get('modified_access_conditions'):
self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
def _upload_substream_block(self, index, block_stream):
try:
self.service.append_data(
body=block_stream,
position=index,
content_length=len(block_stream),
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
finally:
block_stream.close()
class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
def _upload_chunk(self, chunk_offset, chunk_data):
length = len(chunk_data)
chunk_end = chunk_offset + length - 1
response = self.service.upload_range(
chunk_data,
chunk_offset,
length,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response
# TODO: Implement this method.
def _upload_substream_block(self, index, block_stream):
pass
class SubStream(IOBase):
def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
# Python 2.7: file-like objects created with open() typically support seek(), but are not
# derivations of io.IOBase and thus do not implement seekable().
# Python > 3.0: file-like objects created with open() are derived from io.IOBase.
try:
# only the main thread runs this, so there's no need grabbing the lock
wrapped_stream.seek(0, SEEK_CUR)
except:
raise ValueError("Wrapped stream must support seek().")
self._lock = lockObj
self._wrapped_stream = wrapped_stream
self._position = 0
self._stream_begin_index = stream_begin_index
self._length = length
self._buffer = BytesIO()
# we must avoid buffering more than necessary, and also not use up too much memory
# so the max buffer size is capped at 4MB
self._max_buffer_size = (
length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
)
self._current_buffer_start = 0
self._current_buffer_size = 0
super(SubStream, self).__init__()
def __len__(self):
return self._length
def close(self):
if self._buffer:
self._buffer.close()
self._wrapped_stream = None
IOBase.close(self)
def fileno(self):
return self._wrapped_stream.fileno()
def flush(self):
pass
def read(self, size=None):
if self.closed: # pylint: disable=using-constant-test
raise ValueError("Stream is closed.")
if size is None:
size = self._length - self._position
# adjust if out of bounds
if size + self._position >= self._length:
size = self._length - self._position
# return fast
if size == 0 or self._buffer.closed:
return b""
# attempt first read from the read buffer and update position
read_buffer = self._buffer.read(size)
bytes_read = len(read_buffer)
bytes_remaining = size - bytes_read
self._position += bytes_read
# repopulate the read buffer from the underlying stream to fulfill the request
# ensure the seek and read operations are done atomically (only if a lock is provided)
if bytes_remaining > 0:
with self._buffer:
# either read in the max buffer size specified on the class
# or read in just enough data for the current block/sub stream
current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
# lock is only defined if max_concurrency > 1 (parallel uploads)
if self._lock:
with self._lock:
# reposition the underlying stream to match the start of the data to read
absolute_position = self._stream_begin_index + self._position
self._wrapped_stream.seek(absolute_position, SEEK_SET)
# If we can't seek to the right location, our read will be corrupted so fail fast.
if self._wrapped_stream.tell() != absolute_position:
raise IOError("Stream failed to seek to the desired location.")
buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
else:
absolute_position = self._stream_begin_index + self._position
# It's possible that there's connection problem during data transfer,
# so when we retry we don't want to read from current position of wrapped stream,
# instead we should seek to where we want to read from.
if self._wrapped_stream.tell() != absolute_position:
self._wrapped_stream.seek(absolute_position, SEEK_SET)
buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
if buffer_from_stream:
# update the buffer with new data from the wrapped stream
# we need to note down the start position and size of the buffer, in case seek is performed later
self._buffer = BytesIO(buffer_from_stream)
self._current_buffer_start = self._position
self._current_buffer_size = len(buffer_from_stream)
# read the remaining bytes from the new buffer and update position
second_read_buffer = self._buffer.read(bytes_remaining)
read_buffer += second_read_buffer
self._position += len(second_read_buffer)
return read_buffer
def readable(self):
return True
def readinto(self, b):
raise UnsupportedOperation
def seek(self, offset, whence=0):
if whence is SEEK_SET:
start_index = 0
elif whence is SEEK_CUR:
start_index = self._position
elif whence is SEEK_END:
start_index = self._length
offset = -offset
else:
raise ValueError("Invalid argument for the 'whence' parameter.")
pos = start_index + offset
if pos > self._length:
pos = self._length
elif pos < 0:
pos = 0
# check if buffer is still valid
# if not, drop buffer
if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
self._buffer.close()
self._buffer = BytesIO()
else: # if yes seek to correct position
delta = pos - self._current_buffer_start
self._buffer.seek(delta, SEEK_SET)
self._position = pos
return pos
def seekable(self):
return True
def tell(self):
return self._position
def write(self):
raise UnsupportedOperation
def writelines(self):
raise UnsupportedOperation
def writeable(self):
return False
class IterStreamer(object):
"""
File-like streaming iterator.
"""
def __init__(self, generator, encoding="UTF-8"):
self.generator = generator
self.iterator = iter(generator)
self.leftover = b""
self.encoding = encoding
def __len__(self):
return self.generator.__len__()
def __iter__(self):
return self.iterator
def seekable(self):
return False
def __next__(self):
return next(self.iterator)
next = __next__ # Python 2 compatibility.
def tell(self, *args, **kwargs):
raise UnsupportedOperation("Data generator does not support tell.")
def seek(self, *args, **kwargs):
raise UnsupportedOperation("Data generator is unseekable.")
def read(self, size):
data = self.leftover
count = len(self.leftover)
try:
while count < size:
chunk = self.__next__()
if isinstance(chunk, six.text_type):
chunk = chunk.encode(self.encoding)
data += chunk
count += len(chunk)
# This means count < size and what's leftover will be returned in this call.
except StopIteration:
self.leftover = b""
if count >= size:
self.leftover = data[size:]
return data[:size]
| 36.628019
| 116
| 0.622483
|
from concurrent import futures
from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation)
from threading import Lock
from itertools import islice
from math import ceil
import six
from azure.core.tracing.common import with_current_context
from . import encode_base64, url_quote
from .request_handlers import get_length
from .response_handlers import return_response_headers
from .encryption import get_blob_encryptor_and_padder
_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object."
def _parallel_uploads(executor, uploader, pending, running):
range_ids = []
while True:
done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED)
range_ids.extend([chunk.result() for chunk in done])
try:
for _ in range(0, len(done)):
next_chunk = next(pending)
running.add(executor.submit(with_current_context(uploader), next_chunk))
except StopIteration:
break
done, _running = futures.wait(running)
range_ids.extend([chunk.result() for chunk in done])
return range_ids
def upload_data_chunks(
service=None,
uploader_class=None,
total_size=None,
chunk_size=None,
max_concurrency=None,
stream=None,
validate_content=None,
encryption_options=None,
progress_hook=None,
**kwargs):
if encryption_options:
encryptor, padder = get_blob_encryptor_and_padder(
encryption_options.get('cek'),
encryption_options.get('vector'),
uploader_class is not PageBlobChunkUploader)
kwargs['encryptor'] = encryptor
kwargs['padder'] = padder
parallel = max_concurrency > 1
if parallel and 'modified_access_conditions' in kwargs:
kwargs['modified_access_conditions'] = None
uploader = uploader_class(
service=service,
total_size=total_size,
chunk_size=chunk_size,
stream=stream,
parallel=parallel,
validate_content=validate_content,
progress_hook=progress_hook,
**kwargs)
if parallel:
with futures.ThreadPoolExecutor(max_concurrency) as executor:
upload_tasks = uploader.get_chunk_streams()
running_futures = [
executor.submit(with_current_context(uploader.process_chunk), u)
for u in islice(upload_tasks, 0, max_concurrency)
]
range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures)
else:
range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
if any(range_ids):
return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
return uploader.response_headers
def upload_substream_blocks(
service=None,
uploader_class=None,
total_size=None,
chunk_size=None,
max_concurrency=None,
stream=None,
progress_hook=None,
**kwargs):
parallel = max_concurrency > 1
if parallel and 'modified_access_conditions' in kwargs:
kwargs['modified_access_conditions'] = None
uploader = uploader_class(
service=service,
total_size=total_size,
chunk_size=chunk_size,
stream=stream,
parallel=parallel,
progress_hook=progress_hook,
**kwargs)
if parallel:
with futures.ThreadPoolExecutor(max_concurrency) as executor:
upload_tasks = uploader.get_substream_blocks()
running_futures = [
executor.submit(with_current_context(uploader.process_substream_block), u)
for u in islice(upload_tasks, 0, max_concurrency)
]
range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures)
else:
range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()]
if any(range_ids):
return sorted(range_ids)
return []
class _ChunkUploader(object):
def __init__(
self, service,
total_size,
chunk_size,
stream,
parallel,
encryptor=None,
padder=None,
progress_hook=None,
**kwargs):
self.service = service
self.total_size = total_size
self.chunk_size = chunk_size
self.stream = stream
self.parallel = parallel
self.stream_start = stream.tell() if parallel else None
self.stream_lock = Lock() if parallel else None
self.progress_total = 0
self.progress_lock = Lock() if parallel else None
self.progress_hook = progress_hook
self.encryptor = encryptor
self.padder = padder
self.response_headers = None
self.etag = None
self.last_modified = None
self.request_options = kwargs
def get_chunk_streams(self):
index = 0
while True:
data = b""
read_size = self.chunk_size
while True:
if self.total_size:
read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
temp = self.stream.read(read_size)
if not isinstance(temp, six.binary_type):
raise TypeError("Blob data should be of type bytes.")
data += temp or b""
if temp == b"" or len(data) == self.chunk_size:
break
if len(data) == self.chunk_size:
if self.padder:
data = self.padder.update(data)
if self.encryptor:
data = self.encryptor.update(data)
yield index, data
else:
if self.padder:
data = self.padder.update(data) + self.padder.finalize()
if self.encryptor:
data = self.encryptor.update(data) + self.encryptor.finalize()
if data:
yield index, data
break
index += len(data)
def process_chunk(self, chunk_data):
chunk_bytes = chunk_data[1]
chunk_offset = chunk_data[0]
return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
def _update_progress(self, length):
if self.progress_lock is not None:
with self.progress_lock:
self.progress_total += length
else:
self.progress_total += length
if self.progress_hook:
self.progress_hook(self.progress_total, self.total_size)
def _upload_chunk(self, chunk_offset, chunk_data):
raise NotImplementedError("Must be implemented by child class.")
def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
range_id = self._upload_chunk(chunk_offset, chunk_data)
self._update_progress(len(chunk_data))
return range_id
def get_substream_blocks(self):
assert self.chunk_size is not None
lock = self.stream_lock
blob_length = self.total_size
if blob_length is None:
blob_length = get_length(self.stream)
if blob_length is None:
raise ValueError("Unable to determine content length of upload data.")
blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
for i in range(blocks):
index = i * self.chunk_size
length = last_block_size if i == blocks - 1 else self.chunk_size
yield index, SubStream(self.stream, index, length, lock)
def process_substream_block(self, block_data):
return self._upload_substream_block_with_progress(block_data[0], block_data[1])
def _upload_substream_block(self, index, block_stream):
raise NotImplementedError("Must be implemented by child class.")
def _upload_substream_block_with_progress(self, index, block_stream):
range_id = self._upload_substream_block(index, block_stream)
self._update_progress(len(block_stream))
return range_id
def set_response_properties(self, resp):
self.etag = resp.etag
self.last_modified = resp.last_modified
class BlockBlobChunkUploader(_ChunkUploader):
def __init__(self, *args, **kwargs):
kwargs.pop("modified_access_conditions", None)
super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
self.current_length = None
def _upload_chunk(self, chunk_offset, chunk_data):
index = '{0:032d}'.format(chunk_offset)
block_id = encode_base64(url_quote(encode_base64(index)))
self.service.stage_block(
block_id,
len(chunk_data),
chunk_data,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
return index, block_id
def _upload_substream_block(self, index, block_stream):
try:
block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size))
self.service.stage_block(
block_id,
len(block_stream),
block_stream,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
finally:
block_stream.close()
return block_id
class PageBlobChunkUploader(_ChunkUploader):
def _is_chunk_empty(self, chunk_data):
return not any(bytearray(chunk_data))
def _upload_chunk(self, chunk_offset, chunk_data):
# avoid uploading the empty pages
if not self._is_chunk_empty(chunk_data):
chunk_end = chunk_offset + len(chunk_data) - 1
content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end)
computed_md5 = None
self.response_headers = self.service.upload_pages(
body=chunk_data,
content_length=len(chunk_data),
transactional_content_md5=computed_md5,
range=content_range,
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
if not self.parallel and self.request_options.get('modified_access_conditions'):
self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
def _upload_substream_block(self, index, block_stream):
pass
class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
def __init__(self, *args, **kwargs):
super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
self.current_length = None
def _upload_chunk(self, chunk_offset, chunk_data):
if self.current_length is None:
self.response_headers = self.service.append_block(
body=chunk_data,
content_length=len(chunk_data),
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
self.current_length = int(self.response_headers["blob_append_offset"])
else:
self.request_options['append_position_access_conditions'].append_position = \
self.current_length + chunk_offset
self.response_headers = self.service.append_block(
body=chunk_data,
content_length=len(chunk_data),
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
def _upload_substream_block(self, index, block_stream):
pass
class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
def _upload_chunk(self, chunk_offset, chunk_data):
# avoid uploading the empty pages
self.response_headers = self.service.append_data(
body=chunk_data,
position=chunk_offset,
content_length=len(chunk_data),
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
if not self.parallel and self.request_options.get('modified_access_conditions'):
self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
def _upload_substream_block(self, index, block_stream):
try:
self.service.append_data(
body=block_stream,
position=index,
content_length=len(block_stream),
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
finally:
block_stream.close()
class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
def _upload_chunk(self, chunk_offset, chunk_data):
length = len(chunk_data)
chunk_end = chunk_offset + length - 1
response = self.service.upload_range(
chunk_data,
chunk_offset,
length,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response
# TODO: Implement this method.
def _upload_substream_block(self, index, block_stream):
pass
class SubStream(IOBase):
def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
# Python 2.7: file-like objects created with open() typically support seek(), but are not
# derivations of io.IOBase and thus do not implement seekable().
# Python > 3.0: file-like objects created with open() are derived from io.IOBase.
try:
# only the main thread runs this, so there's no need grabbing the lock
wrapped_stream.seek(0, SEEK_CUR)
except:
raise ValueError("Wrapped stream must support seek().")
self._lock = lockObj
self._wrapped_stream = wrapped_stream
self._position = 0
self._stream_begin_index = stream_begin_index
self._length = length
self._buffer = BytesIO()
self._max_buffer_size = (
length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
)
self._current_buffer_start = 0
self._current_buffer_size = 0
super(SubStream, self).__init__()
def __len__(self):
return self._length
def close(self):
if self._buffer:
self._buffer.close()
self._wrapped_stream = None
IOBase.close(self)
def fileno(self):
return self._wrapped_stream.fileno()
def flush(self):
pass
def read(self, size=None):
if self.closed: raise ValueError("Stream is closed.")
if size is None:
size = self._length - self._position
if size + self._position >= self._length:
size = self._length - self._position
if size == 0 or self._buffer.closed:
return b""
read_buffer = self._buffer.read(size)
bytes_read = len(read_buffer)
bytes_remaining = size - bytes_read
self._position += bytes_read
if bytes_remaining > 0:
with self._buffer:
current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
if self._lock:
with self._lock:
absolute_position = self._stream_begin_index + self._position
self._wrapped_stream.seek(absolute_position, SEEK_SET)
if self._wrapped_stream.tell() != absolute_position:
raise IOError("Stream failed to seek to the desired location.")
buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
else:
absolute_position = self._stream_begin_index + self._position
# It's possible that there's connection problem during data transfer,
# so when we retry we don't want to read from current position of wrapped stream,
if self._wrapped_stream.tell() != absolute_position:
self._wrapped_stream.seek(absolute_position, SEEK_SET)
buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
if buffer_from_stream:
self._buffer = BytesIO(buffer_from_stream)
self._current_buffer_start = self._position
self._current_buffer_size = len(buffer_from_stream)
second_read_buffer = self._buffer.read(bytes_remaining)
read_buffer += second_read_buffer
self._position += len(second_read_buffer)
return read_buffer
def readable(self):
return True
def readinto(self, b):
raise UnsupportedOperation
def seek(self, offset, whence=0):
if whence is SEEK_SET:
start_index = 0
elif whence is SEEK_CUR:
start_index = self._position
elif whence is SEEK_END:
start_index = self._length
offset = -offset
else:
raise ValueError("Invalid argument for the 'whence' parameter.")
pos = start_index + offset
if pos > self._length:
pos = self._length
elif pos < 0:
pos = 0
if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
self._buffer.close()
self._buffer = BytesIO()
else: delta = pos - self._current_buffer_start
self._buffer.seek(delta, SEEK_SET)
self._position = pos
return pos
def seekable(self):
return True
def tell(self):
return self._position
def write(self):
raise UnsupportedOperation
def writelines(self):
raise UnsupportedOperation
def writeable(self):
return False
class IterStreamer(object):
def __init__(self, generator, encoding="UTF-8"):
self.generator = generator
self.iterator = iter(generator)
self.leftover = b""
self.encoding = encoding
def __len__(self):
return self.generator.__len__()
def __iter__(self):
return self.iterator
def seekable(self):
return False
def __next__(self):
return next(self.iterator)
next = __next__
def tell(self, *args, **kwargs):
raise UnsupportedOperation("Data generator does not support tell.")
def seek(self, *args, **kwargs):
raise UnsupportedOperation("Data generator is unseekable.")
def read(self, size):
data = self.leftover
count = len(self.leftover)
try:
while count < size:
chunk = self.__next__()
if isinstance(chunk, six.text_type):
chunk = chunk.encode(self.encoding)
data += chunk
count += len(chunk)
except StopIteration:
self.leftover = b""
if count >= size:
self.leftover = data[size:]
return data[:size]
| true
| true
|
f70cbfab08e86ea8aa181c3d36fe24445d6ac075
| 4,273
|
py
|
Python
|
clusters/dataset_utils.py
|
danielamassiceti/geneval_visdial
|
fbbe12b1e4ed7e21a002b16a87bdf42b2af3b35e
|
[
"MIT"
] | 4
|
2020-08-29T09:49:22.000Z
|
2020-09-22T02:15:04.000Z
|
clusters/dataset_utils.py
|
danielamassiceti/geneval_visdial
|
fbbe12b1e4ed7e21a002b16a87bdf42b2af3b35e
|
[
"MIT"
] | null | null | null |
clusters/dataset_utils.py
|
danielamassiceti/geneval_visdial
|
fbbe12b1e4ed7e21a002b16a87bdf42b2af3b35e
|
[
"MIT"
] | null | null | null |
import sys
import utils
import torch
from datasets import VisualDialogDataset
import torchvision.transforms as transforms
def build_dataset(mode, args, shared_dictionary=None, with_options=True):
normalize = transforms.Normalize(mean=[0.4711, 0.4475, 0.4080], std=[0.1223, 0.1221, 0.1450]) #visdial
transform = transforms.Compose([ transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize])
dataset = VisualDialogDataset(mode, args, with_options, transform)
dataset.load_dictionary(shared_dictionary)
dataset.load_data()
return dataset
def get_dataloader(mode, args, shared_dictionary=None, with_options=True):
loader = torch.utils.data.DataLoader(
build_dataset(mode, args, shared_dictionary, with_options),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=False)
nelements = len(loader.dataset)
return loader
def get_mask(human_set_mask, human_set_only=True):
if human_set_only:
if torch.sum(human_set_mask) == 0:
return None
else:
return human_set_mask
else:
return torch.ones_like(human_set_mask)
def get_flat_features(loader, args, human_set_only=False):
print('flattening {:} features...'.format(loader.dataset.mode))
if human_set_only:
return get_flat_human_features(loader, args)
else:
return get_flat_full_features(loader, args)
def get_flat_human_features(loader, args):
avg_fn = loader.dataset.get_avg_embedding
E = loader.dataset.dictionary.emb_size
questions, answers = [], []
for i, batch in enumerate(loader):
sys.stdout.write('\r{}/{} --> {:3.1f}%'.format(str(i+1), str(len(loader)), (i+1)/float(len(loader))*100))
sys.stdout.flush()
mask = get_mask(batch['in_human_set'])
if isinstance(mask, torch.Tensor):
bsz = mask.sum()
batch = utils.send_to_device(batch, args.gpu)
human_scores = batch['answer_options_scores'][mask].view(bsz,-1,100)
cluster_mask = (human_scores > 0)
cluster_mask.scatter_(2, batch['gtidxs'][mask].view(bsz,-1, 1), 1)
cluster_sizes = cluster_mask.sum(dim=2).view(bsz)
emb_question = avg_fn(batch['questions_ids'][mask].view(bsz,-1,args.S), batch['questions_length'][mask].view(bsz,-1)).cpu()
emb_answer_set = avg_fn(batch['answer_options_ids'][mask].view(-1,100,args.S), batch['answer_options_length'][mask].view(-1,100))
emb_answer_set = emb_answer_set.view(bsz,-1,100,E)
emb_cluster_set = emb_answer_set[cluster_mask].cpu()
batch_idx, counter = 0, 1
acc_cluster_sizes = torch.cumsum(cluster_sizes, dim=0)
for emb_answer in emb_cluster_set:
questions.append(emb_question[batch_idx])
answers.append(emb_answer)
if counter == acc_cluster_sizes[batch_idx]:
batch_idx += 1
counter += 1
sys.stdout.write("\n")
questions = torch.stack(questions)
answers = torch.stack(answers)
return [ answers.view(-1, E), questions.view(-1, E)]
def get_flat_full_features(loader, args):
avg_fn = loader.dataset.get_avg_embedding
E = loader.dataset.dictionary.emb_size
questions = torch.FloatTensor(loader.dataset.N, args.D, E)
answers = torch.FloatTensor(loader.dataset.N, args.D, E)
for i, batch in enumerate(loader):
sys.stdout.write('\r{}/{} --> {:3.1f}%'.format(str(i+1), str(len(loader)), (i+1)/float(len(loader))*100))
sys.stdout.flush()
batch = utils.send_to_device(batch, args.gpu)
bsz = batch['questions_ids'].size(0)
questions[i*loader.batch_size:i*loader.batch_size+bsz] = avg_fn(batch['questions_ids'], batch['questions_length']).cpu()
answers[i*loader.batch_size:i*loader.batch_size+bsz] = avg_fn(batch['answers_ids'], batch['answers_length']).cpu()
sys.stdout.write("\n")
return [ answers.view(-1, E), questions.view(-1, E)]
| 39.201835
| 141
| 0.632811
|
import sys
import utils
import torch
from datasets import VisualDialogDataset
import torchvision.transforms as transforms
def build_dataset(mode, args, shared_dictionary=None, with_options=True):
normalize = transforms.Normalize(mean=[0.4711, 0.4475, 0.4080], std=[0.1223, 0.1221, 0.1450]) transform = transforms.Compose([ transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize])
dataset = VisualDialogDataset(mode, args, with_options, transform)
dataset.load_dictionary(shared_dictionary)
dataset.load_data()
return dataset
def get_dataloader(mode, args, shared_dictionary=None, with_options=True):
loader = torch.utils.data.DataLoader(
build_dataset(mode, args, shared_dictionary, with_options),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=False)
nelements = len(loader.dataset)
return loader
def get_mask(human_set_mask, human_set_only=True):
if human_set_only:
if torch.sum(human_set_mask) == 0:
return None
else:
return human_set_mask
else:
return torch.ones_like(human_set_mask)
def get_flat_features(loader, args, human_set_only=False):
print('flattening {:} features...'.format(loader.dataset.mode))
if human_set_only:
return get_flat_human_features(loader, args)
else:
return get_flat_full_features(loader, args)
def get_flat_human_features(loader, args):
avg_fn = loader.dataset.get_avg_embedding
E = loader.dataset.dictionary.emb_size
questions, answers = [], []
for i, batch in enumerate(loader):
sys.stdout.write('\r{}/{} --> {:3.1f}%'.format(str(i+1), str(len(loader)), (i+1)/float(len(loader))*100))
sys.stdout.flush()
mask = get_mask(batch['in_human_set'])
if isinstance(mask, torch.Tensor):
bsz = mask.sum()
batch = utils.send_to_device(batch, args.gpu)
human_scores = batch['answer_options_scores'][mask].view(bsz,-1,100)
cluster_mask = (human_scores > 0)
cluster_mask.scatter_(2, batch['gtidxs'][mask].view(bsz,-1, 1), 1)
cluster_sizes = cluster_mask.sum(dim=2).view(bsz)
emb_question = avg_fn(batch['questions_ids'][mask].view(bsz,-1,args.S), batch['questions_length'][mask].view(bsz,-1)).cpu()
emb_answer_set = avg_fn(batch['answer_options_ids'][mask].view(-1,100,args.S), batch['answer_options_length'][mask].view(-1,100))
emb_answer_set = emb_answer_set.view(bsz,-1,100,E)
emb_cluster_set = emb_answer_set[cluster_mask].cpu()
batch_idx, counter = 0, 1
acc_cluster_sizes = torch.cumsum(cluster_sizes, dim=0)
for emb_answer in emb_cluster_set:
questions.append(emb_question[batch_idx])
answers.append(emb_answer)
if counter == acc_cluster_sizes[batch_idx]:
batch_idx += 1
counter += 1
sys.stdout.write("\n")
questions = torch.stack(questions)
answers = torch.stack(answers)
return [ answers.view(-1, E), questions.view(-1, E)]
def get_flat_full_features(loader, args):
avg_fn = loader.dataset.get_avg_embedding
E = loader.dataset.dictionary.emb_size
questions = torch.FloatTensor(loader.dataset.N, args.D, E)
answers = torch.FloatTensor(loader.dataset.N, args.D, E)
for i, batch in enumerate(loader):
sys.stdout.write('\r{}/{} --> {:3.1f}%'.format(str(i+1), str(len(loader)), (i+1)/float(len(loader))*100))
sys.stdout.flush()
batch = utils.send_to_device(batch, args.gpu)
bsz = batch['questions_ids'].size(0)
questions[i*loader.batch_size:i*loader.batch_size+bsz] = avg_fn(batch['questions_ids'], batch['questions_length']).cpu()
answers[i*loader.batch_size:i*loader.batch_size+bsz] = avg_fn(batch['answers_ids'], batch['answers_length']).cpu()
sys.stdout.write("\n")
return [ answers.view(-1, E), questions.view(-1, E)]
| true
| true
|
f70cbfd58c3850aeb8634fd85c4b5c012fb5df22
| 3,311
|
py
|
Python
|
src/utils/core_commands_utils.py
|
Sublime-Instincts/CommandsBrowser
|
16ef299085854d1f4b61aaf8280c23c3f98a43a5
|
[
"MIT"
] | 21
|
2021-11-15T14:18:50.000Z
|
2022-03-31T12:35:41.000Z
|
src/utils/core_commands_utils.py
|
Sublime-Instincts/CommandsBrowser
|
16ef299085854d1f4b61aaf8280c23c3f98a43a5
|
[
"MIT"
] | 5
|
2021-11-15T11:23:12.000Z
|
2022-02-02T19:28:13.000Z
|
src/utils/core_commands_utils.py
|
Sublime-Instincts/CommandsBrowser
|
16ef299085854d1f4b61aaf8280c23c3f98a43a5
|
[
"MIT"
] | 1
|
2021-11-23T11:50:18.000Z
|
2021-11-23T11:50:18.000Z
|
import os
import json
import string
import inspect
import sublime
from .miscellaneous_utils import command_kind_type
kind_mapping = {
"window": command_kind_type("window"),
"text": command_kind_type("text"),
"application": command_kind_type("application"),
"find": command_kind_type("find")
}
def core_commands_doc_panel(window, docs):
""" For core commands, since they are impemented in ST core, they can't be
navigated to, unlike plugin based commands that have an associated python file.
The JSON files have enough information to store the docs however, so we simply
present that informaion in a panel.
Args:
window (sublime.Window): The window object for which the panel has to be
created.
docs (List): This is a list of 2 items. The first one is the command name
and the second one is the command metadata.
Returns:
None
"""
doc_panel = window.create_output_panel("CommandsBrowser")
doc_panel.set_read_only(False)
final_doc_string = ""
description_string = f"""
Name of the command: {docs[0]}
Description: {docs[1]["doc_string"]}
"""
final_doc_string += inspect.cleandoc(description_string.strip()) + "\n" * 2
final_doc_string += "Arguments:" + "\n" * 2
if docs[1].get("args") is not None:
max_arg_length = max([len(doc["name"]) for doc in docs[1]["args"]])
max_length = max([(len(doc["name"]) + len(doc["type"]) + 4) for doc in docs[1]["args"]])
for doc in docs[1]["args"]:
length_1 = abs(max_arg_length - len(doc["name"]))
length_2 = abs(max_length - (len(doc["name"]) + len(doc["type"]) + length_1 + 4))
doc_string = doc["doc_string"] if doc["doc_string"] is not None else "No available description."
initial_string = f"""
{doc["name"]}{"":^{length_1}} ({doc["type"]}){"":^{length_2}} - {doc_string}
"""
final_doc_string += initial_string.strip() + "\n"
else:
final_doc_string += "No known args exist for this command."
doc_panel.run_command("insert", { "characters": final_doc_string })
doc_panel.settings().set("syntax", "Packages/CommandsBrowser/resources/CommandsBrowser.sublime-syntax")
doc_panel.settings().set("gutter", False)
doc_panel.set_read_only(True)
window.run_command("show_panel", {
"panel": "output.CommandsBrowser",
})
doc_panel.run_command("scroll_to_bof")
def get_core_commands_data(application = "st"):
""" Given the application type, generates a list of items representing
command data that can be returned from a CommandInputHandler.list_items
method.
Args:
application (str): The application for which the commands need to be
retrived. Valid values are 'st' (Sublime Text) or 'sm' (Sublime Merge).
Returns:
final_dict (Dict): The final dictionary of commands and their docs.
"""
json_file_names = [a for a in sublime.find_resources("*.json") if a.startswith(f"Packages/CommandsBrowser/{application}_commands_metadata")]
final_dict = {}
for file_name in json_file_names:
data = json.loads(sublime.load_resource(file_name))
if data is not None:
final_dict.update(data)
return final_dict
| 34.852632
| 144
| 0.659619
|
import os
import json
import string
import inspect
import sublime
from .miscellaneous_utils import command_kind_type
kind_mapping = {
"window": command_kind_type("window"),
"text": command_kind_type("text"),
"application": command_kind_type("application"),
"find": command_kind_type("find")
}
def core_commands_doc_panel(window, docs):
doc_panel = window.create_output_panel("CommandsBrowser")
doc_panel.set_read_only(False)
final_doc_string = ""
description_string = f"""
Name of the command: {docs[0]}
Description: {docs[1]["doc_string"]}
"""
final_doc_string += inspect.cleandoc(description_string.strip()) + "\n" * 2
final_doc_string += "Arguments:" + "\n" * 2
if docs[1].get("args") is not None:
max_arg_length = max([len(doc["name"]) for doc in docs[1]["args"]])
max_length = max([(len(doc["name"]) + len(doc["type"]) + 4) for doc in docs[1]["args"]])
for doc in docs[1]["args"]:
length_1 = abs(max_arg_length - len(doc["name"]))
length_2 = abs(max_length - (len(doc["name"]) + len(doc["type"]) + length_1 + 4))
doc_string = doc["doc_string"] if doc["doc_string"] is not None else "No available description."
initial_string = f"""
{doc["name"]}{"":^{length_1}} ({doc["type"]}){"":^{length_2}} - {doc_string}
"""
final_doc_string += initial_string.strip() + "\n"
else:
final_doc_string += "No known args exist for this command."
doc_panel.run_command("insert", { "characters": final_doc_string })
doc_panel.settings().set("syntax", "Packages/CommandsBrowser/resources/CommandsBrowser.sublime-syntax")
doc_panel.settings().set("gutter", False)
doc_panel.set_read_only(True)
window.run_command("show_panel", {
"panel": "output.CommandsBrowser",
})
doc_panel.run_command("scroll_to_bof")
def get_core_commands_data(application = "st"):
json_file_names = [a for a in sublime.find_resources("*.json") if a.startswith(f"Packages/CommandsBrowser/{application}_commands_metadata")]
final_dict = {}
for file_name in json_file_names:
data = json.loads(sublime.load_resource(file_name))
if data is not None:
final_dict.update(data)
return final_dict
| true
| true
|
f70cc31b421f32a1e1227a2279a89d83e39d23df
| 3,281
|
py
|
Python
|
src/vprwave-bot.py
|
vornao/vaporwave-bot
|
7af13d27aec50b72f407d4060b9cf4ec1c44a817
|
[
"MIT"
] | null | null | null |
src/vprwave-bot.py
|
vornao/vaporwave-bot
|
7af13d27aec50b72f407d4060b9cf4ec1c44a817
|
[
"MIT"
] | null | null | null |
src/vprwave-bot.py
|
vornao/vaporwave-bot
|
7af13d27aec50b72f407d4060b9cf4ec1c44a817
|
[
"MIT"
] | null | null | null |
# ⋆ ˚。⋆୨୧˚ v a p o r w a v e b o t ˚୨୧⋆。˚ ⋆
# Simple Telegram bot that converts standard unicode chars to full-width ones
# Unicode full width characters, means that all characters has the size of a chinese character.
# Full width characters goes from 0xFF1 to 0xFFE5
# Japanese hirigana characters go from 0x3040 to 0x309f
# ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚ ⋆ ˚。⋆୨୧˚
import os
from telegram.inline.inlinequery import InlineQuery
from telegram.inline.inlinequeryresult import InlineQueryResult
from uuid import uuid4
from telegram import (
Update,
ParseMode,
InlineQueryResultArticle,
InputTextMessageContent,
)
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
CallbackContext,
InlineQueryHandler,
)
import logging
import random
import utils
import config
import threading
import userutils
# initialize lists with characters
def main():
# enable logging
try:
os.mkdir(config.FILES_PATH)
except:
print("directory already exists")
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
filename=config.FILES_PATH + "vaporwave-bot.log",
filemode="a+",
)
logging.info("VPRWV BOT STARTED")
userutils.init_cache()
ucheck = threading.Thread(target=userutils.usercheck, daemon=True)
ucheck.start()
updater = Updater(config.BOT_TOKEN)
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(CommandHandler("help", help))
dispatcher.add_handler(CommandHandler("privacy", privacy_message))
dispatcher.add_handler(InlineQueryHandler(inline_vaporize_query))
updater.start_polling()
updater.idle()
def start(update: Update, context: CallbackContext):
try:
log = (
"User started bot. id : "
+ str(update.message.from_user.id)
+ " - username: "
+ update.message.from_user.username
)
logging.info(log)
except:
logging.exception("exception start method", exc_info=True)
update.message.reply_text(utils.start_msg, parse_mode=ParseMode.MARKDOWN)
def help(update, context):
update.message.reply_text(utils.help_msg, parse_mode=ParseMode.MARKDOWN)
def privacy_message(update, context):
update.message.reply_text(utils.privacy_msg, parse_mode=ParseMode.MARKDOWN)
def inline_vaporize_query(update: Update, context: CallbackContext):
query = update.inline_query.query
try:
userutils.queue.put(update.inline_query.from_user.username)
except:
logging.exception("Exception!", exc_info=True)
if query == "":
return
ans = [utils.hiramize(query), utils.emojize(query), utils.sparkleize(query)]
results = [
InlineQueryResultArticle(
id=str(uuid4()),
input_message_content=InputTextMessageContent(x),
title=x,
description=random.choice(utils.sparkles),
)
for x in ans
]
update.inline_query.answer(results, cache_time=utils.inline_cache_time)
if __name__ == "__main__":
main()
| 27.341667
| 109
| 0.670527
|
import os
from telegram.inline.inlinequery import InlineQuery
from telegram.inline.inlinequeryresult import InlineQueryResult
from uuid import uuid4
from telegram import (
Update,
ParseMode,
InlineQueryResultArticle,
InputTextMessageContent,
)
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
CallbackContext,
InlineQueryHandler,
)
import logging
import random
import utils
import config
import threading
import userutils
def main():
try:
os.mkdir(config.FILES_PATH)
except:
print("directory already exists")
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
filename=config.FILES_PATH + "vaporwave-bot.log",
filemode="a+",
)
logging.info("VPRWV BOT STARTED")
userutils.init_cache()
ucheck = threading.Thread(target=userutils.usercheck, daemon=True)
ucheck.start()
updater = Updater(config.BOT_TOKEN)
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(CommandHandler("help", help))
dispatcher.add_handler(CommandHandler("privacy", privacy_message))
dispatcher.add_handler(InlineQueryHandler(inline_vaporize_query))
updater.start_polling()
updater.idle()
def start(update: Update, context: CallbackContext):
try:
log = (
"User started bot. id : "
+ str(update.message.from_user.id)
+ " - username: "
+ update.message.from_user.username
)
logging.info(log)
except:
logging.exception("exception start method", exc_info=True)
update.message.reply_text(utils.start_msg, parse_mode=ParseMode.MARKDOWN)
def help(update, context):
update.message.reply_text(utils.help_msg, parse_mode=ParseMode.MARKDOWN)
def privacy_message(update, context):
update.message.reply_text(utils.privacy_msg, parse_mode=ParseMode.MARKDOWN)
def inline_vaporize_query(update: Update, context: CallbackContext):
query = update.inline_query.query
try:
userutils.queue.put(update.inline_query.from_user.username)
except:
logging.exception("Exception!", exc_info=True)
if query == "":
return
ans = [utils.hiramize(query), utils.emojize(query), utils.sparkleize(query)]
results = [
InlineQueryResultArticle(
id=str(uuid4()),
input_message_content=InputTextMessageContent(x),
title=x,
description=random.choice(utils.sparkles),
)
for x in ans
]
update.inline_query.answer(results, cache_time=utils.inline_cache_time)
if __name__ == "__main__":
main()
| true
| true
|
f70cc349fc535bd4d262d2556742c1e4dca4b0d3
| 15,569
|
py
|
Python
|
tools/BBAM/BBAM_utils.py
|
jbeomlee93/BBAM
|
bebd2358d0497960c9a8415e5dca8de4a25fd899
|
[
"MIT"
] | 44
|
2021-03-17T03:00:01.000Z
|
2022-03-30T03:30:44.000Z
|
tools/BBAM/BBAM_utils.py
|
jbeomlee93/BBAM
|
bebd2358d0497960c9a8415e5dca8de4a25fd899
|
[
"MIT"
] | 5
|
2021-03-20T08:35:13.000Z
|
2021-12-18T14:07:02.000Z
|
tools/BBAM/BBAM_utils.py
|
jbeomlee93/BBAM
|
bebd2358d0497960c9a8415e5dca8de4a25fd899
|
[
"MIT"
] | 7
|
2021-06-15T14:10:31.000Z
|
2022-01-27T14:16:26.000Z
|
import torch
from torch.autograd import Variable
from torchvision import models
import cv2
import sys
import numpy as np
import os
import math
import torch.nn.functional as F
idx_to_class = {0 : 'aeroplane', 1 : 'bicycle', 2 : 'bird', 3 : 'boat', 4 : 'bottle', 5 : 'bus', 6 : 'car', 7 : 'cat',
8 : 'chair', 9 : 'cow', 10 : 'table', 11 : 'dog', 12 : 'horse', 13 : 'motorbike', 14 : 'person',
15 : 'plant', 16 : 'sheep', 17 : 'sofa', 18 : 'train', 19 : 'monitor'}
def tv_norm(input, tv_beta, diagonal=False, sum=False):
# print(input.shape)
img = input[0, :]
if sum:
row_grad = torch.sum(torch.abs((img[:-1 , :] - img[1 :, :])).pow(tv_beta))
col_grad = torch.sum(torch.abs((img[: , :-1] - img[: , 1 :])).pow(tv_beta))
else:
row_grad = torch.mean(torch.abs((img[:-1, :] - img[1:, :])).pow(tv_beta))
col_grad = torch.mean(torch.abs((img[:, :-1] - img[:, 1:])).pow(tv_beta))
if diagonal:
diag = 0
if sum:
diag += torch.sum(torch.abs((img[:-1, :-1] - img[1:, 1:])).pow(tv_beta))
diag += torch.sum(torch.abs((img[1:, :-1] - img[:-1, 1:])).pow(tv_beta))
diag += torch.sum(torch.abs((img[:-1, 1:] - img[1:, :-1])).pow(tv_beta))
diag += torch.sum(torch.abs((img[1:, 1:] - img[:-1, :-1])).pow(tv_beta))
else:
diag += torch.mean(torch.abs((img[:-1, :-1] - img[1:, 1:])).pow(tv_beta))
diag += torch.mean(torch.abs((img[1:, :-1] - img[:-1, 1:])).pow(tv_beta))
diag += torch.mean(torch.abs((img[:-1, 1:] - img[1:, :-1])).pow(tv_beta))
diag += torch.mean(torch.abs((img[1:, 1:] - img[:-1, :-1])).pow(tv_beta))
return row_grad + col_grad + diag
return row_grad + col_grad
def numpy_to_torch(img, requires_grad = True, cuda_device=None):
use_cuda = torch.cuda.is_available()
if len(img.shape) < 3:
output = np.float32([img])
else:
output = np.expand_dims(img, axis=1)
# output = np.transpose(img, (2, 0, 1))
output = torch.from_numpy(output)
if use_cuda:
if cuda_device==None:
output = output.cuda()
else:
output = output.cuda(cuda_device)
# output = output.repeat(3, 1, 1)
v = Variable(output, requires_grad = requires_grad)
# v = v.repeat(3, 1, 1)
return v
color_dicts = [
[0.6, 0, 0.05],
[0.03, 0.19, 0.42],
[0, 0.27, 0.11],
[0.24, 0, 0.49],
[0.5, 0.25, 0.02],
[1, 0.5, 0],
[0.2, 0.2, 0.2],
[1, 0.1, 0.6],
[0.8, 0.8, 0]
]
def save_pred(image, boxes, save_path, image_id):
image[0] += 102.9801
image[1] += 115.9465
image[2] += 122.7717
image = image.data.cpu().numpy().transpose(1, 2, 0).astype('uint8')
for coord_idx, coords in enumerate(boxes):
image = cv2.UMat(image).get()
color = color_dicts[coord_idx%len(color_dicts)]
color = [int(c*255.0) for c in color]
color = color[::-1]
image = cv2.rectangle(image, (int(coords[0]), int(coords[1])),
(int(coords[2]), int(coords[3])), color, 5)
save_name = '%s/%s/box_prediction.jpg' % (save_path, image_id)
cv2.imwrite(save_name, image)
def save_mask(mask, masked_img=None, proposal=None, original_coord=None, perturbed_coord=None, iteration=None, proposal_idx=None, image_id=None, class_name=None, save_path_root=None, single_p_idx=None):
if not (masked_img is None):
masked_img[0] += 102.9801
masked_img[1] += 115.9465
masked_img[2] += 122.7717
masked_img = masked_img.data.cpu().numpy().transpose(1, 2, 0).astype('uint8')
mask = (255*mask.data.cpu().numpy().transpose(1, 2, 0)).astype('uint8')
color = [(255, 0, 0), (0, 255, 0), (0, 0, 255)] # blue: proposal, green: unturbed, red_ perturbed
if (proposal is not None) and (original_coord is not None) and (perturbed_coord is None):
for coord_idx, coords in enumerate([proposal, original_coord]):
coords = coords.detach().data.cpu().numpy()
masked_img = cv2.UMat(masked_img).get()
masked_img = cv2.rectangle(masked_img, (int(coords[0]), int(coords[1])),
(int(coords[2]), int(coords[3])), color[coord_idx], 5)
if not((proposal is None) or (original_coord is None) or (perturbed_coord is None)):
for coord_idx, coords in enumerate([proposal, original_coord, perturbed_coord]):
coords = coords.detach().data.cpu().numpy()
masked_img = cv2.UMat(masked_img).get()
masked_img = cv2.rectangle(masked_img, (int(coords[0]), int(coords[1])),
(int(coords[2]), int(coords[3])), color[coord_idx], 5)
if not (masked_img is None):
masked_img = cv2.resize(masked_img, None, fx=0.5, fy=0.5)
mask = cv2.resize(mask, (masked_img.shape[1], masked_img.shape[0]))
if single_p_idx is None:
save_path = '%s/%s/pidx_%04d_%s/' % (save_path_root, image_id, proposal_idx, class_name)
else:
save_path = '%s/%s/pidx_%04d_%s/' % (save_path_root, image_id, proposal_idx, class_name)
if not os.path.exists(save_path):
os.makedirs(save_path)
if single_p_idx is None:
if not (masked_img is None):
cv2.imwrite('%s/iter_%04d.jpg' % (save_path, iteration), masked_img)
cv2.imwrite('%s/iter_%04d_mask.jpg' % (save_path, iteration), mask)
else:
if not (masked_img is None):
cv2.imwrite('%s/pidx_%04d_img.jpg' % (save_path, single_p_idx), masked_img)
cv2.imwrite('%s/pidx_%04d_mask.jpg' % (save_path, single_p_idx), mask)
def get_max_iou(source, targets):
# target: multiple boxes
maxIoU = 0
for target in targets.bbox:
bb1, bb2 = {}, {}
bb1['x1'], bb1['x2'] = int(source[0]), int(source[2])
bb1['y1'], bb1['y2'] = int(source[1]), int(source[3])
bb2['x1'], bb2['x2'] = int(target[0]), int(target[2])
bb2['y1'], bb2['y2'] = int(target[1]), int(target[3])
# determine the coordinates of the intersection rectangle
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if not(x_right < x_left or y_bottom < y_top):
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
if maxIoU < iou:
maxIoU = iou
return maxIoU
def get_single_iou(source, target):
# target: multiple boxes
maxIoU = 0
bb1, bb2 = {}, {}
bb1['x1'], bb1['x2'] = int(source[0]), int(source[2])
bb1['y1'], bb1['y2'] = int(source[1]), int(source[3])
bb2['x1'], bb2['x2'] = int(target[0]), int(target[2])
bb2['y1'], bb2['y2'] = int(target[1]), int(target[3])
# determine the coordinates of the intersection rectangle
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
return iou
def selected_positives(ious, pred_classes, displacements, proposal_iter):
ious, pred_classes, displacements = np.array(ious), np.array(pred_classes), np.array(displacements)
top_ious = np.argsort(-ious)
top_displacement = np.argsort(-displacements)
# include top 30%
positive_idxs = list(top_ious[:int(proposal_iter * 0.3)])
for d in top_displacement:
if ious[d] > 0.8:
positive_idxs.append(d)
return positive_idxs[:proposal_iter]
def imsmooth(tensor,
sigma,
stride=1,
padding=0,
padding_mode='constant',
padding_value=0):
"From TorchRay (https://github.com/facebookresearch/TorchRay)"
assert sigma >= 0
width = math.ceil(4 * sigma)
SQRT_TWO_DOUBLE = torch.tensor(math.sqrt(2), dtype=torch.float32)
SQRT_TWO_SINGLE = SQRT_TWO_DOUBLE.to(torch.float32)
EPSILON_SINGLE = torch.tensor(1.19209290E-07, dtype=torch.float32)
filt = (torch.arange(-width,
width + 1,
dtype=torch.float32,
device=tensor.device) /
(SQRT_TWO_SINGLE * sigma + EPSILON_SINGLE))
filt = torch.exp(-filt * filt)
filt /= torch.sum(filt)
num_channels = tensor.shape[1]
width = width + padding
if padding_mode == 'constant' and padding_value == 0:
other_padding = width
x = tensor
else:
# pad: (before, after) pairs starting from last dimension backward
x = F.pad(tensor,
(width, width, width, width),
mode=padding_mode,
value=padding_value)
other_padding = 0
padding = 0
x = F.conv2d(x,
filt.reshape((1, 1, -1, 1)).expand(num_channels, -1, -1, -1),
padding=(other_padding, padding),
stride=(stride, 1),
groups=num_channels)
x = F.conv2d(x,
filt.reshape((1, 1, 1, -1)).expand(num_channels, -1, -1, -1),
padding=(padding, other_padding),
stride=(1, stride),
groups=num_channels)
return x
class MaskGenerator:
r"""Mask generator.
The class takes as input the mask parameters and returns
as output a mask.
Args:
shape (tuple of int): output shape.
step (int): parameterization step in pixels.
sigma (float): kernel size.
clamp (bool, optional): whether to clamp the mask to [0,1]. Defaults to True.
pooling_mehtod (str, optional): `'softmax'` (default), `'sum'`, '`sigmoid`'.
Attributes:
shape (tuple): the same as the specified :attr:`shape` parameter.
shape_in (tuple): spatial size of the parameter tensor.
shape_out (tuple): spatial size of the output mask including margin.
"""
def __init__(self, shape, step, sigma, clamp=True, pooling_method='softmax'):
self.shape = shape
self.step = step
self.sigma = sigma
self.coldness = 20
self.clamp = clamp
self.pooling_method = pooling_method
assert int(step) == step
# self.kernel = lambda z: (z < 1).float()
self.kernel = lambda z: torch.exp(-2 * ((z - .5).clamp(min=0)**2))
self.margin = self.sigma
# self.margin = 0
self.padding = 1 + math.ceil((self.margin + sigma) / step)
self.radius = 1 + math.ceil(sigma / step)
self.shape_in = [math.ceil(z / step) for z in self.shape]
self.shape_mid = [
z + 2 * self.padding - (2 * self.radius + 1) + 1
for z in self.shape_in
]
self.shape_up = [self.step * z for z in self.shape_mid]
self.shape_out = [z - step + 1 for z in self.shape_up]
self.weight = torch.zeros((
1,
(2 * self.radius + 1)**2,
self.shape_out[0],
self.shape_out[1]
))
step_inv = [
torch.tensor(zm, dtype=torch.float32) /
torch.tensor(zo, dtype=torch.float32)
for zm, zo in zip(self.shape_mid, self.shape_up)
]
for ky in range(2 * self.radius + 1):
for kx in range(2 * self.radius + 1):
uy, ux = torch.meshgrid(
torch.arange(self.shape_out[0], dtype=torch.float32),
torch.arange(self.shape_out[1], dtype=torch.float32)
)
iy = torch.floor(step_inv[0] * uy) + ky - self.padding
ix = torch.floor(step_inv[1] * ux) + kx - self.padding
delta = torch.sqrt(
(uy - (self.margin + self.step * iy))**2 +
(ux - (self.margin + self.step * ix))**2
)
k = ky * (2 * self.radius + 1) + kx
self.weight[0, k] = self.kernel(delta / sigma)
def generate(self, mask_in):
r"""Generate a mask.
The function takes as input a parameter tensor :math:`\bar m` for
:math:`K` masks, which is a :math:`K\times 1\times H_i\times W_i`
tensor where `H_i\times W_i` are given by :attr:`shape_in`.
Args:
mask_in (:class:`torch.Tensor`): mask parameters.
Returns:
tuple: a pair of mask, cropped and full. The cropped mask is a
:class:`torch.Tensor` with the same spatial shape :attr:`shape`
as specfied upon creating this object. The second mask is the same,
but with an additional margin and shape :attr:`shape_out`.
"""
mask = F.unfold(mask_in,
(2 * self.radius + 1,) * 2,
padding=(self.padding,) * 2)
mask = mask.reshape(
len(mask_in), -1, self.shape_mid[0], self.shape_mid[1])
mask = F.interpolate(mask, size=self.shape_up, mode='nearest')
mask = F.pad(mask, (0, -self.step + 1, 0, -self.step + 1))
mask = self.weight * mask
if self.pooling_method == 'sigmoid':
if self.coldness == float('+Inf'):
mask = (mask.sum(dim=1, keepdim=True) - 5 > 0).float()
else:
mask = torch.sigmoid(
self.coldness * mask.sum(dim=1, keepdim=True) - 3
)
elif self.pooling_method == 'softmax':
if self.coldness == float('+Inf'):
mask = mask.max(dim=1, keepdim=True)[0]
else:
mask = (
mask * F.softmax(self.coldness * mask, dim=1)
).sum(dim=1, keepdim=True)
elif self.pooling_method == 'sum':
mask = mask.sum(dim=1, keepdim=True)
else:
assert False, f"Unknown pooling method {self.pooling_method}"
m = round(self.margin)
if self.clamp:
mask = mask.clamp(min=0, max=1)
cropped = mask[:, :, m:m + self.shape[0], m:m + self.shape[1]]
return cropped, mask
def to(self, dev):
"""Switch to another device.
Args:
dev: PyTorch device.
Returns:
MaskGenerator: self.
"""
self.weight = self.weight.to(dev)
return self
| 38.9225
| 202
| 0.558353
|
import torch
from torch.autograd import Variable
from torchvision import models
import cv2
import sys
import numpy as np
import os
import math
import torch.nn.functional as F
idx_to_class = {0 : 'aeroplane', 1 : 'bicycle', 2 : 'bird', 3 : 'boat', 4 : 'bottle', 5 : 'bus', 6 : 'car', 7 : 'cat',
8 : 'chair', 9 : 'cow', 10 : 'table', 11 : 'dog', 12 : 'horse', 13 : 'motorbike', 14 : 'person',
15 : 'plant', 16 : 'sheep', 17 : 'sofa', 18 : 'train', 19 : 'monitor'}
def tv_norm(input, tv_beta, diagonal=False, sum=False):
img = input[0, :]
if sum:
row_grad = torch.sum(torch.abs((img[:-1 , :] - img[1 :, :])).pow(tv_beta))
col_grad = torch.sum(torch.abs((img[: , :-1] - img[: , 1 :])).pow(tv_beta))
else:
row_grad = torch.mean(torch.abs((img[:-1, :] - img[1:, :])).pow(tv_beta))
col_grad = torch.mean(torch.abs((img[:, :-1] - img[:, 1:])).pow(tv_beta))
if diagonal:
diag = 0
if sum:
diag += torch.sum(torch.abs((img[:-1, :-1] - img[1:, 1:])).pow(tv_beta))
diag += torch.sum(torch.abs((img[1:, :-1] - img[:-1, 1:])).pow(tv_beta))
diag += torch.sum(torch.abs((img[:-1, 1:] - img[1:, :-1])).pow(tv_beta))
diag += torch.sum(torch.abs((img[1:, 1:] - img[:-1, :-1])).pow(tv_beta))
else:
diag += torch.mean(torch.abs((img[:-1, :-1] - img[1:, 1:])).pow(tv_beta))
diag += torch.mean(torch.abs((img[1:, :-1] - img[:-1, 1:])).pow(tv_beta))
diag += torch.mean(torch.abs((img[:-1, 1:] - img[1:, :-1])).pow(tv_beta))
diag += torch.mean(torch.abs((img[1:, 1:] - img[:-1, :-1])).pow(tv_beta))
return row_grad + col_grad + diag
return row_grad + col_grad
def numpy_to_torch(img, requires_grad = True, cuda_device=None):
use_cuda = torch.cuda.is_available()
if len(img.shape) < 3:
output = np.float32([img])
else:
output = np.expand_dims(img, axis=1)
output = torch.from_numpy(output)
if use_cuda:
if cuda_device==None:
output = output.cuda()
else:
output = output.cuda(cuda_device)
v = Variable(output, requires_grad = requires_grad)
return v
color_dicts = [
[0.6, 0, 0.05],
[0.03, 0.19, 0.42],
[0, 0.27, 0.11],
[0.24, 0, 0.49],
[0.5, 0.25, 0.02],
[1, 0.5, 0],
[0.2, 0.2, 0.2],
[1, 0.1, 0.6],
[0.8, 0.8, 0]
]
def save_pred(image, boxes, save_path, image_id):
image[0] += 102.9801
image[1] += 115.9465
image[2] += 122.7717
image = image.data.cpu().numpy().transpose(1, 2, 0).astype('uint8')
for coord_idx, coords in enumerate(boxes):
image = cv2.UMat(image).get()
color = color_dicts[coord_idx%len(color_dicts)]
color = [int(c*255.0) for c in color]
color = color[::-1]
image = cv2.rectangle(image, (int(coords[0]), int(coords[1])),
(int(coords[2]), int(coords[3])), color, 5)
save_name = '%s/%s/box_prediction.jpg' % (save_path, image_id)
cv2.imwrite(save_name, image)
def save_mask(mask, masked_img=None, proposal=None, original_coord=None, perturbed_coord=None, iteration=None, proposal_idx=None, image_id=None, class_name=None, save_path_root=None, single_p_idx=None):
if not (masked_img is None):
masked_img[0] += 102.9801
masked_img[1] += 115.9465
masked_img[2] += 122.7717
masked_img = masked_img.data.cpu().numpy().transpose(1, 2, 0).astype('uint8')
mask = (255*mask.data.cpu().numpy().transpose(1, 2, 0)).astype('uint8')
color = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
if (proposal is not None) and (original_coord is not None) and (perturbed_coord is None):
for coord_idx, coords in enumerate([proposal, original_coord]):
coords = coords.detach().data.cpu().numpy()
masked_img = cv2.UMat(masked_img).get()
masked_img = cv2.rectangle(masked_img, (int(coords[0]), int(coords[1])),
(int(coords[2]), int(coords[3])), color[coord_idx], 5)
if not((proposal is None) or (original_coord is None) or (perturbed_coord is None)):
for coord_idx, coords in enumerate([proposal, original_coord, perturbed_coord]):
coords = coords.detach().data.cpu().numpy()
masked_img = cv2.UMat(masked_img).get()
masked_img = cv2.rectangle(masked_img, (int(coords[0]), int(coords[1])),
(int(coords[2]), int(coords[3])), color[coord_idx], 5)
if not (masked_img is None):
masked_img = cv2.resize(masked_img, None, fx=0.5, fy=0.5)
mask = cv2.resize(mask, (masked_img.shape[1], masked_img.shape[0]))
if single_p_idx is None:
save_path = '%s/%s/pidx_%04d_%s/' % (save_path_root, image_id, proposal_idx, class_name)
else:
save_path = '%s/%s/pidx_%04d_%s/' % (save_path_root, image_id, proposal_idx, class_name)
if not os.path.exists(save_path):
os.makedirs(save_path)
if single_p_idx is None:
if not (masked_img is None):
cv2.imwrite('%s/iter_%04d.jpg' % (save_path, iteration), masked_img)
cv2.imwrite('%s/iter_%04d_mask.jpg' % (save_path, iteration), mask)
else:
if not (masked_img is None):
cv2.imwrite('%s/pidx_%04d_img.jpg' % (save_path, single_p_idx), masked_img)
cv2.imwrite('%s/pidx_%04d_mask.jpg' % (save_path, single_p_idx), mask)
def get_max_iou(source, targets):
maxIoU = 0
for target in targets.bbox:
bb1, bb2 = {}, {}
bb1['x1'], bb1['x2'] = int(source[0]), int(source[2])
bb1['y1'], bb1['y2'] = int(source[1]), int(source[3])
bb2['x1'], bb2['x2'] = int(target[0]), int(target[2])
bb2['y1'], bb2['y2'] = int(target[1]), int(target[3])
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if not(x_right < x_left or y_bottom < y_top):
intersection_area = (x_right - x_left) * (y_bottom - y_top)
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
if maxIoU < iou:
maxIoU = iou
return maxIoU
def get_single_iou(source, target):
maxIoU = 0
bb1, bb2 = {}, {}
bb1['x1'], bb1['x2'] = int(source[0]), int(source[2])
bb1['y1'], bb1['y2'] = int(source[1]), int(source[3])
bb2['x1'], bb2['x2'] = int(target[0]), int(target[2])
bb2['y1'], bb2['y2'] = int(target[1]), int(target[3])
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if x_right < x_left or y_bottom < y_top:
return 0.0
intersection_area = (x_right - x_left) * (y_bottom - y_top)
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
return iou
def selected_positives(ious, pred_classes, displacements, proposal_iter):
ious, pred_classes, displacements = np.array(ious), np.array(pred_classes), np.array(displacements)
top_ious = np.argsort(-ious)
top_displacement = np.argsort(-displacements)
positive_idxs = list(top_ious[:int(proposal_iter * 0.3)])
for d in top_displacement:
if ious[d] > 0.8:
positive_idxs.append(d)
return positive_idxs[:proposal_iter]
def imsmooth(tensor,
sigma,
stride=1,
padding=0,
padding_mode='constant',
padding_value=0):
assert sigma >= 0
width = math.ceil(4 * sigma)
SQRT_TWO_DOUBLE = torch.tensor(math.sqrt(2), dtype=torch.float32)
SQRT_TWO_SINGLE = SQRT_TWO_DOUBLE.to(torch.float32)
EPSILON_SINGLE = torch.tensor(1.19209290E-07, dtype=torch.float32)
filt = (torch.arange(-width,
width + 1,
dtype=torch.float32,
device=tensor.device) /
(SQRT_TWO_SINGLE * sigma + EPSILON_SINGLE))
filt = torch.exp(-filt * filt)
filt /= torch.sum(filt)
num_channels = tensor.shape[1]
width = width + padding
if padding_mode == 'constant' and padding_value == 0:
other_padding = width
x = tensor
else:
x = F.pad(tensor,
(width, width, width, width),
mode=padding_mode,
value=padding_value)
other_padding = 0
padding = 0
x = F.conv2d(x,
filt.reshape((1, 1, -1, 1)).expand(num_channels, -1, -1, -1),
padding=(other_padding, padding),
stride=(stride, 1),
groups=num_channels)
x = F.conv2d(x,
filt.reshape((1, 1, 1, -1)).expand(num_channels, -1, -1, -1),
padding=(padding, other_padding),
stride=(1, stride),
groups=num_channels)
return x
class MaskGenerator:
def __init__(self, shape, step, sigma, clamp=True, pooling_method='softmax'):
self.shape = shape
self.step = step
self.sigma = sigma
self.coldness = 20
self.clamp = clamp
self.pooling_method = pooling_method
assert int(step) == step
self.kernel = lambda z: torch.exp(-2 * ((z - .5).clamp(min=0)**2))
self.margin = self.sigma
self.padding = 1 + math.ceil((self.margin + sigma) / step)
self.radius = 1 + math.ceil(sigma / step)
self.shape_in = [math.ceil(z / step) for z in self.shape]
self.shape_mid = [
z + 2 * self.padding - (2 * self.radius + 1) + 1
for z in self.shape_in
]
self.shape_up = [self.step * z for z in self.shape_mid]
self.shape_out = [z - step + 1 for z in self.shape_up]
self.weight = torch.zeros((
1,
(2 * self.radius + 1)**2,
self.shape_out[0],
self.shape_out[1]
))
step_inv = [
torch.tensor(zm, dtype=torch.float32) /
torch.tensor(zo, dtype=torch.float32)
for zm, zo in zip(self.shape_mid, self.shape_up)
]
for ky in range(2 * self.radius + 1):
for kx in range(2 * self.radius + 1):
uy, ux = torch.meshgrid(
torch.arange(self.shape_out[0], dtype=torch.float32),
torch.arange(self.shape_out[1], dtype=torch.float32)
)
iy = torch.floor(step_inv[0] * uy) + ky - self.padding
ix = torch.floor(step_inv[1] * ux) + kx - self.padding
delta = torch.sqrt(
(uy - (self.margin + self.step * iy))**2 +
(ux - (self.margin + self.step * ix))**2
)
k = ky * (2 * self.radius + 1) + kx
self.weight[0, k] = self.kernel(delta / sigma)
def generate(self, mask_in):
mask = F.unfold(mask_in,
(2 * self.radius + 1,) * 2,
padding=(self.padding,) * 2)
mask = mask.reshape(
len(mask_in), -1, self.shape_mid[0], self.shape_mid[1])
mask = F.interpolate(mask, size=self.shape_up, mode='nearest')
mask = F.pad(mask, (0, -self.step + 1, 0, -self.step + 1))
mask = self.weight * mask
if self.pooling_method == 'sigmoid':
if self.coldness == float('+Inf'):
mask = (mask.sum(dim=1, keepdim=True) - 5 > 0).float()
else:
mask = torch.sigmoid(
self.coldness * mask.sum(dim=1, keepdim=True) - 3
)
elif self.pooling_method == 'softmax':
if self.coldness == float('+Inf'):
mask = mask.max(dim=1, keepdim=True)[0]
else:
mask = (
mask * F.softmax(self.coldness * mask, dim=1)
).sum(dim=1, keepdim=True)
elif self.pooling_method == 'sum':
mask = mask.sum(dim=1, keepdim=True)
else:
assert False, f"Unknown pooling method {self.pooling_method}"
m = round(self.margin)
if self.clamp:
mask = mask.clamp(min=0, max=1)
cropped = mask[:, :, m:m + self.shape[0], m:m + self.shape[1]]
return cropped, mask
def to(self, dev):
self.weight = self.weight.to(dev)
return self
| true
| true
|
f70cc399eea6b881f6eba416d969374573c0ab6a
| 1,251
|
py
|
Python
|
tests/fixtures.py
|
owlen/wecs
|
6c386d5215b4acef9e3208d0dd547c1ffc12c2f3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/fixtures.py
|
owlen/wecs
|
6c386d5215b4acef9e3208d0dd547c1ffc12c2f3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/fixtures.py
|
owlen/wecs
|
6c386d5215b4acef9e3208d0dd547c1ffc12c2f3
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from wecs.core import Entity, System, Component, World
from wecs.core import and_filter
# Absolute basics
@pytest.fixture
def world():
return World()
@pytest.fixture
def entity(world):
return world.create_entity()
# Null stuff
@Component()
class NullComponent:
pass
@pytest.fixture
def null_component():
return NullComponent()
@pytest.fixture
def null_entity(world, null_component):
entity = world.create_entity(null_component)
world._flush_component_updates()
return entity
class NullSystem(System):
entity_filters = {
"null": and_filter([NullComponent])
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.entries = []
self.exits = []
self.updates = []
def enter_filters(self, filters, entity):
self.entries.append((filters, entity))
def exit_filters(self, filters, entity):
self.exits.append((filters, entity))
def update(self, entities_by_filter):
self.updates.append(entities_by_filter)
@pytest.fixture
def null_system():
return NullSystem()
@pytest.fixture
def null_system_world(world, null_system):
world.add_system(null_system, 0)
return world
| 18.397059
| 54
| 0.683453
|
import pytest
from wecs.core import Entity, System, Component, World
from wecs.core import and_filter
@pytest.fixture
def world():
return World()
@pytest.fixture
def entity(world):
return world.create_entity()
@Component()
class NullComponent:
pass
@pytest.fixture
def null_component():
return NullComponent()
@pytest.fixture
def null_entity(world, null_component):
entity = world.create_entity(null_component)
world._flush_component_updates()
return entity
class NullSystem(System):
entity_filters = {
"null": and_filter([NullComponent])
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.entries = []
self.exits = []
self.updates = []
def enter_filters(self, filters, entity):
self.entries.append((filters, entity))
def exit_filters(self, filters, entity):
self.exits.append((filters, entity))
def update(self, entities_by_filter):
self.updates.append(entities_by_filter)
@pytest.fixture
def null_system():
return NullSystem()
@pytest.fixture
def null_system_world(world, null_system):
world.add_system(null_system, 0)
return world
| true
| true
|
f70cc3d1fc5702c69ada755731dfb5a1e16e04d9
| 1,960
|
py
|
Python
|
src/ship.py
|
tomice/Blast
|
8521fb7784fc5269d3d54396f8d89933a78f6de5
|
[
"MIT"
] | null | null | null |
src/ship.py
|
tomice/Blast
|
8521fb7784fc5269d3d54396f8d89933a78f6de5
|
[
"MIT"
] | 1
|
2018-06-19T00:39:40.000Z
|
2018-06-19T01:40:57.000Z
|
src/ship.py
|
tomice/Blast
|
8521fb7784fc5269d3d54396f8d89933a78f6de5
|
[
"MIT"
] | null | null | null |
import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
"""Parent ship class for Blast."""
def __init__(self, blast_settings, screen):
"""Init ship and starting position."""
super(Ship, self).__init__()
self.screen = screen
self.blast_settings = blast_settings
self.image = pygame.image.load('../images/player.png')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
self.rect.centerx = self.screen_rect.centerx
self.rect.centery = self.screen_rect.centery
self.rect.bottom = self.screen_rect.bottom
self.center = float(self.rect.centerx)
self.vertical = float(self.rect.centery)
# Movement flags
self.moving_right = False
self.moving_left = False
self.moving_up = False
self.moving_down = False
def update(self):
"""Update the ship's pos based on the movement flags."""
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center += self.blast_settings.ship_speed_factor
if self.moving_left and self.rect.left > 0:
self.center -= self.blast_settings.ship_speed_factor
if self.moving_up and self.rect.top > 0:
self.vertical -= self.blast_settings.ship_speed_factor
if self.moving_down and self.rect.bottom < self.screen_rect.bottom:
self.vertical += self.blast_settings.ship_speed_factor
self.rect.centerx = self.center
self.rect.centery = self.vertical
def blitme(self):
"""Draw ship at current location."""
self.screen.blit(self.image, self.rect)
def center_ship(self):
"""Center ship on screen"""
self.center = self.screen_rect.centerx
# FIXME: Arbitrary "magic number" to get ship to bottom
self.vertical = self.screen_rect.bottom - 25
| 40
| 76
| 0.632653
|
import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
def __init__(self, blast_settings, screen):
super(Ship, self).__init__()
self.screen = screen
self.blast_settings = blast_settings
self.image = pygame.image.load('../images/player.png')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
self.rect.centerx = self.screen_rect.centerx
self.rect.centery = self.screen_rect.centery
self.rect.bottom = self.screen_rect.bottom
self.center = float(self.rect.centerx)
self.vertical = float(self.rect.centery)
self.moving_right = False
self.moving_left = False
self.moving_up = False
self.moving_down = False
def update(self):
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center += self.blast_settings.ship_speed_factor
if self.moving_left and self.rect.left > 0:
self.center -= self.blast_settings.ship_speed_factor
if self.moving_up and self.rect.top > 0:
self.vertical -= self.blast_settings.ship_speed_factor
if self.moving_down and self.rect.bottom < self.screen_rect.bottom:
self.vertical += self.blast_settings.ship_speed_factor
self.rect.centerx = self.center
self.rect.centery = self.vertical
def blitme(self):
self.screen.blit(self.image, self.rect)
def center_ship(self):
self.center = self.screen_rect.centerx
self.vertical = self.screen_rect.bottom - 25
| true
| true
|
f70cc55037c706d87e86a8c70e78b1817357a3aa
| 483
|
py
|
Python
|
seeker/__init__.py
|
imszhongj/django-seeker
|
6b1c4b03cd4d2ef8967496c0a9f76e032a425d00
|
[
"BSD-2-Clause"
] | 24
|
2015-01-05T21:03:56.000Z
|
2021-12-30T03:35:26.000Z
|
seeker/__init__.py
|
imszhongj/django-seeker
|
6b1c4b03cd4d2ef8967496c0a9f76e032a425d00
|
[
"BSD-2-Clause"
] | 30
|
2016-03-11T21:29:38.000Z
|
2022-03-29T21:09:05.000Z
|
seeker/__init__.py
|
imszhongj/django-seeker
|
6b1c4b03cd4d2ef8967496c0a9f76e032a425d00
|
[
"BSD-2-Clause"
] | 30
|
2015-01-08T19:49:04.000Z
|
2021-12-22T21:41:55.000Z
|
__version__ = '3.0-dev'
from .facets import Facet, GlobalTermsFacet, RangeFilter, TermsFacet, YearHistogram
from .mapping import (
DEFAULT_ANALYZER, Indexable, ModelIndex, RawMultiString, RawString, build_mapping, deep_field_factory,
document_field, document_from_model)
from .registry import app_documents, documents, model_documents, register
from .utils import delete, index, search
from .views import Column, SeekerView
default_app_config = 'seeker.apps.SeekerConfig'
| 37.153846
| 106
| 0.811594
|
__version__ = '3.0-dev'
from .facets import Facet, GlobalTermsFacet, RangeFilter, TermsFacet, YearHistogram
from .mapping import (
DEFAULT_ANALYZER, Indexable, ModelIndex, RawMultiString, RawString, build_mapping, deep_field_factory,
document_field, document_from_model)
from .registry import app_documents, documents, model_documents, register
from .utils import delete, index, search
from .views import Column, SeekerView
default_app_config = 'seeker.apps.SeekerConfig'
| true
| true
|
f70cc577111a8772618560e037874544ea61f005
| 2,616
|
py
|
Python
|
src/pyvfx_boilerplate/mayapalette.py
|
nzanepro/pyvfx.boilerplate
|
7e51e97836204c5ff18c3aef97684bd6253b719a
|
[
"MIT"
] | 121
|
2016-06-01T10:55:42.000Z
|
2022-03-10T15:32:36.000Z
|
src/pyvfx_boilerplate/mayapalette.py
|
nzanepro/pyvfx.boilerplate
|
7e51e97836204c5ff18c3aef97684bd6253b719a
|
[
"MIT"
] | 16
|
2016-06-01T08:44:02.000Z
|
2020-05-06T21:35:12.000Z
|
src/pyvfx_boilerplate/mayapalette.py
|
nzanepro/pyvfx.boilerplate
|
7e51e97836204c5ff18c3aef97684bd6253b719a
|
[
"MIT"
] | 31
|
2016-06-03T14:40:10.000Z
|
2022-02-24T21:30:03.000Z
|
import json
from Qt import QtGui
from Qt import QtWidgets
def set_palette_from_dict(dct):
"""Set palette to current QApplication based on given dictionary"""
groups = ["Disabled", "Active", "Inactive", "Normal"]
roles = [
"AlternateBase",
"Background",
"Base",
"Button",
"ButtonText",
"BrightText",
"Dark",
"Foreground",
"Highlight",
"HighlightedText",
"Light",
"Link",
"LinkVisited",
"Mid",
"Midlight",
"Shadow",
"ToolTipBase",
"ToolTipText",
"Text",
"Window",
"WindowText",
]
palette = QtGui.QPalette()
for role in roles:
try:
for group in groups:
color = QtGui.QColor(dct["%s:%s" % (role, group)])
qGrp = getattr(QtGui.QPalette, group)
qRl = getattr(QtGui.QPalette, role)
palette.setColor(qGrp, qRl, color)
except: # noqa
print("Could not use: " + str(palette))
try:
QtWidgets.QApplication.setPalette(palette)
except: # noqa
print("Could not set palette: " + str(palette))
def set_style():
"""Set style"""
available_styles = QtWidgets.QStyleFactory.keys()
if "Fusion" in available_styles:
QtWidgets.QApplication.setStyle("Fusion")
elif "Plastique" in available_styles:
QtWidgets.QApplication.setStyle("Plastique")
def set_maya_tweaks():
"""Apply Maya-specific styling"""
base_palette = QtWidgets.QApplication.palette()
# Set custom colors
LIGHT_COLOR = QtGui.QColor(100, 100, 100)
MID_COLOR = QtGui.QColor(68, 68, 68)
# Create a new palette
tab_palette = QtGui.QPalette(base_palette)
tab_palette.setBrush(QtGui.QPalette.Window, QtGui.QBrush(LIGHT_COLOR))
tab_palette.setBrush(QtGui.QPalette.Button, QtGui.QBrush(MID_COLOR))
# Define the widgets that needs tweaking
widget_palettes = {}
widget_palettes["QTabBar"] = tab_palette
widget_palettes["QTabWidget"] = tab_palette
# Set the new tweaked palette
for name, palette in widget_palettes.items():
QtWidgets.QApplication.setPalette(palette, name)
def read_json(filepath):
"""Read given JSON filepath into dictionary"""
with open(filepath, "r") as data_file:
data = json.load(data_file)
return data
def set_maya_palette_with_tweaks(palette_filepath):
"""Apply styling to current QApplication"""
data = read_json(palette_filepath)
set_palette_from_dict(data)
set_style()
set_maya_tweaks()
| 27.829787
| 74
| 0.623089
|
import json
from Qt import QtGui
from Qt import QtWidgets
def set_palette_from_dict(dct):
groups = ["Disabled", "Active", "Inactive", "Normal"]
roles = [
"AlternateBase",
"Background",
"Base",
"Button",
"ButtonText",
"BrightText",
"Dark",
"Foreground",
"Highlight",
"HighlightedText",
"Light",
"Link",
"LinkVisited",
"Mid",
"Midlight",
"Shadow",
"ToolTipBase",
"ToolTipText",
"Text",
"Window",
"WindowText",
]
palette = QtGui.QPalette()
for role in roles:
try:
for group in groups:
color = QtGui.QColor(dct["%s:%s" % (role, group)])
qGrp = getattr(QtGui.QPalette, group)
qRl = getattr(QtGui.QPalette, role)
palette.setColor(qGrp, qRl, color)
except: print("Could not use: " + str(palette))
try:
QtWidgets.QApplication.setPalette(palette)
except: print("Could not set palette: " + str(palette))
def set_style():
available_styles = QtWidgets.QStyleFactory.keys()
if "Fusion" in available_styles:
QtWidgets.QApplication.setStyle("Fusion")
elif "Plastique" in available_styles:
QtWidgets.QApplication.setStyle("Plastique")
def set_maya_tweaks():
base_palette = QtWidgets.QApplication.palette()
LIGHT_COLOR = QtGui.QColor(100, 100, 100)
MID_COLOR = QtGui.QColor(68, 68, 68)
tab_palette = QtGui.QPalette(base_palette)
tab_palette.setBrush(QtGui.QPalette.Window, QtGui.QBrush(LIGHT_COLOR))
tab_palette.setBrush(QtGui.QPalette.Button, QtGui.QBrush(MID_COLOR))
widget_palettes = {}
widget_palettes["QTabBar"] = tab_palette
widget_palettes["QTabWidget"] = tab_palette
for name, palette in widget_palettes.items():
QtWidgets.QApplication.setPalette(palette, name)
def read_json(filepath):
with open(filepath, "r") as data_file:
data = json.load(data_file)
return data
def set_maya_palette_with_tweaks(palette_filepath):
data = read_json(palette_filepath)
set_palette_from_dict(data)
set_style()
set_maya_tweaks()
| true
| true
|
f70cc5a1bcdac716b4c8be3c121f38829038f099
| 4,520
|
py
|
Python
|
scripts/pred_coinfo250.py
|
rdenaux/acred
|
ffe44953a96338acfe3860a9898e7f0b70b5c9cb
|
[
"Apache-2.0"
] | 8
|
2020-08-31T04:14:22.000Z
|
2021-09-29T06:00:31.000Z
|
scripts/pred_coinfo250.py
|
expertailab/acred
|
ee45840c942ef2fac4f26da8d756b7c47e42847c
|
[
"Apache-2.0"
] | null | null | null |
scripts/pred_coinfo250.py
|
expertailab/acred
|
ee45840c942ef2fac4f26da8d756b7c47e42847c
|
[
"Apache-2.0"
] | 1
|
2020-10-07T08:09:29.000Z
|
2020-10-07T08:09:29.000Z
|
#
# 2020 ExpertSystem
#
'''Script for generating predictions for the coinform250 dataset
using the acred predictor
See https://github.com/co-inform/Datasets
See also scripts/fetch-data.sh, which should download the input json file
and place it in the `data/evaluation/` folder.
'''
import argparse
import time
import json
import os
import os.path as osp
import requests
import traceback
import pandas as pd
def ensure_req_tweet_content(req):
for t in req['tweets']:
c = t['content']
if c is None:
t['content'] = ''
print('Fixed null content')
def acred_as_coinfo_label(credreview, thresh=0.4):
assert thresh >= 0.0
assert thresh <= 1.0
conf = credreview['reviewRating']['confidence']
if conf <= thresh:
return 'not_verifiable'
val = credreview['reviewRating']['ratingValue']
if val >= 0.5:
return 'credible'
if val >= 0.25:
return 'mostly_credible'
if val >= -0.25:
return 'credible_uncertain'
if val >= -0.5:
return 'credible_uncertain'
return 'not_credible'
def exec_req(i, req, args):
print('\n\nExecuting request %s' % (i))
ensure_req_tweet_content(req)
req['reviewFormat'] = 'schema.org'
start = time.time()
resp = requests.post(args.credpred_url, json=req,
verify=False,
timeout=args.req_timeout)
result = []
if resp.ok:
respd = resp.json()
result = [{
'tweet_id': request['tweet_id'],
'ratingValue': r['reviewRating']['ratingValue'],
'confidence': r['reviewRating']['confidence'],
'label': acred_as_coinfo_label(r)
} for request, r in zip(req['tweets'], respd)]
resp_f = 'coinform250_%s.json' % i
with open('%s/%s' % (args.outDir, resp_f), 'w') as outf:
json.dump(respd, outf)
else:
print("Failed: %s %s" % (str(resp), resp.text))
print('Processed in %ss.' % (time.time() - start))
return result
def as_acred_requests(tweets, batchSize=5):
batch = []
for i, t in enumerate(tweets):
batch.append({
'content': t['full_text'],
'tweet_id': t['id'],
'url': 'https://twitter.com/x/status/%s' % (t['id'])})
if len(batch) == batchSize:
yield {'tweets': batch,
'source': 'coinform250.json',
'batch_id': '%s-%s' % (i-batchSize, i)}
batch = []
if len(batch) > 0:
yield {'tweets': batch,
'source': 'coinform250.json',
'batch_id': '%s-%s' % (len(tweets) - len(batch), len(tweets))}
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate tweet credibility predictions for a dir with requests',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-inputJson',
help='Path to the coinform250.json file',
required=True)
parser.add_argument(
'-batchSize', type=int, default=5,
help='Number of tweets to send per request to acred endpoint')
parser.add_argument(
'-outDir',
help='Path to a local dir where the CredibilityReviews will be stored',
required=True)
parser.add_argument(
'-credpred_url',
help='URL of the acred endpoint for the tweet credibility')
parser.add_argument(
'-credpred_id',
help='ID of the generation task')
parser.add_argument(
'-req_timeout',
type=int, default=90,
help='Seconds to wait for a response')
args = parser.parse_args()
all_start = time.time()
assert osp.isdir(osp.join(args.outDir))
assert osp.isfile(args.inputJson)
tweets = []
with open(args.inputJson) as jsonl_file:
tweets = [json.loads(line) for line in jsonl_file]
assert len(tweets) > 0, '%s' % (len(tweets))
print('Reviewing credibility of %s tweets using batchSize %s' % (len(tweets), args.batchSize))
preds = []
for i, req in enumerate(as_acred_requests(tweets, args.batchSize)):
try:
preds.extend(exec_req(i, req, args))
except Exception as e:
print('Error executing request %s %s %s' % (i, req, str(e)))
print(traceback.format_exc())
pd.DataFrame(preds).to_csv('%s/%s.csv' % (args.outDir, 'predictions'), index=False)
print('Finished in %.3fs' % (time.time() - all_start))
| 31.388889
| 98
| 0.593363
|
import argparse
import time
import json
import os
import os.path as osp
import requests
import traceback
import pandas as pd
def ensure_req_tweet_content(req):
for t in req['tweets']:
c = t['content']
if c is None:
t['content'] = ''
print('Fixed null content')
def acred_as_coinfo_label(credreview, thresh=0.4):
assert thresh >= 0.0
assert thresh <= 1.0
conf = credreview['reviewRating']['confidence']
if conf <= thresh:
return 'not_verifiable'
val = credreview['reviewRating']['ratingValue']
if val >= 0.5:
return 'credible'
if val >= 0.25:
return 'mostly_credible'
if val >= -0.25:
return 'credible_uncertain'
if val >= -0.5:
return 'credible_uncertain'
return 'not_credible'
def exec_req(i, req, args):
print('\n\nExecuting request %s' % (i))
ensure_req_tweet_content(req)
req['reviewFormat'] = 'schema.org'
start = time.time()
resp = requests.post(args.credpred_url, json=req,
verify=False,
timeout=args.req_timeout)
result = []
if resp.ok:
respd = resp.json()
result = [{
'tweet_id': request['tweet_id'],
'ratingValue': r['reviewRating']['ratingValue'],
'confidence': r['reviewRating']['confidence'],
'label': acred_as_coinfo_label(r)
} for request, r in zip(req['tweets'], respd)]
resp_f = 'coinform250_%s.json' % i
with open('%s/%s' % (args.outDir, resp_f), 'w') as outf:
json.dump(respd, outf)
else:
print("Failed: %s %s" % (str(resp), resp.text))
print('Processed in %ss.' % (time.time() - start))
return result
def as_acred_requests(tweets, batchSize=5):
batch = []
for i, t in enumerate(tweets):
batch.append({
'content': t['full_text'],
'tweet_id': t['id'],
'url': 'https://twitter.com/x/status/%s' % (t['id'])})
if len(batch) == batchSize:
yield {'tweets': batch,
'source': 'coinform250.json',
'batch_id': '%s-%s' % (i-batchSize, i)}
batch = []
if len(batch) > 0:
yield {'tweets': batch,
'source': 'coinform250.json',
'batch_id': '%s-%s' % (len(tweets) - len(batch), len(tweets))}
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate tweet credibility predictions for a dir with requests',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-inputJson',
help='Path to the coinform250.json file',
required=True)
parser.add_argument(
'-batchSize', type=int, default=5,
help='Number of tweets to send per request to acred endpoint')
parser.add_argument(
'-outDir',
help='Path to a local dir where the CredibilityReviews will be stored',
required=True)
parser.add_argument(
'-credpred_url',
help='URL of the acred endpoint for the tweet credibility')
parser.add_argument(
'-credpred_id',
help='ID of the generation task')
parser.add_argument(
'-req_timeout',
type=int, default=90,
help='Seconds to wait for a response')
args = parser.parse_args()
all_start = time.time()
assert osp.isdir(osp.join(args.outDir))
assert osp.isfile(args.inputJson)
tweets = []
with open(args.inputJson) as jsonl_file:
tweets = [json.loads(line) for line in jsonl_file]
assert len(tweets) > 0, '%s' % (len(tweets))
print('Reviewing credibility of %s tweets using batchSize %s' % (len(tweets), args.batchSize))
preds = []
for i, req in enumerate(as_acred_requests(tweets, args.batchSize)):
try:
preds.extend(exec_req(i, req, args))
except Exception as e:
print('Error executing request %s %s %s' % (i, req, str(e)))
print(traceback.format_exc())
pd.DataFrame(preds).to_csv('%s/%s.csv' % (args.outDir, 'predictions'), index=False)
print('Finished in %.3fs' % (time.time() - all_start))
| true
| true
|
f70cc5a478b36b0bfb1fa3944bb0bc5dcb9f1b14
| 2,147
|
py
|
Python
|
bin/plotBandsWithOam.py
|
houchen-li/LhcVaspTools
|
ad7581cdfcd83741c6923b11f4d3257ee2494ce8
|
[
"MIT"
] | null | null | null |
bin/plotBandsWithOam.py
|
houchen-li/LhcVaspTools
|
ad7581cdfcd83741c6923b11f4d3257ee2494ce8
|
[
"MIT"
] | null | null | null |
bin/plotBandsWithOam.py
|
houchen-li/LhcVaspTools
|
ad7581cdfcd83741c6923b11f4d3257ee2494ce8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
from LhcVaspTools.BasicUtils import readDataFromJson
from LhcVaspTools.OamExts import EnergyBandsWithOam
def parseArgv() -> argparse.Namespace:
parser: argparse.ArgumentParser = argparse.ArgumentParser(
description="This script is used to plot bands")
parser.add_argument("input_file_name", metavar='INPUT_FILE_NAME',
nargs="?", type=str, help="input hdf5 file.")
parser.add_argument("-o", "--output-file", nargs="?", type=str,
dest="output_file_name", required=True, help="output figure file.")
parser.add_argument('-c', '--component', nargs="?", type=str,
choices=['Lx', 'Ly', 'Lz'], dest='component', required=True,
help='the OAM component to plot.')
parser.add_argument('-bf', '--band-indices-file', nargs='?', type=str,
dest='band_indices_file_name', help='band indices file name.')
parser.add_argument('-xl', '--xlim', nargs=2, type=float,
dest='xlim', help='xlim for the bands plot.')
parser.add_argument('-yl', '--ylim', nargs=2, type=float,
dest='ylim', default=[-2., 1.], help='ylim for the bands plot.')
options: argparse.Namespace = parser.parse_args()
return options
def main() -> int:
options: argparse.Namespace = parseArgv()
input_file_name: str = options.input_file_name
output_file_name: str = options.output_file_name
component: str = options.component
band_indices_file_name: str = options.band_indices_file_name
xlim: list = options.xlim
ylim: list = options.ylim
if band_indices_file_name is None:
band_indices: list = None
else:
band_indices: list = readDataFromJson(band_indices_file_name)
energy_bands_with_oam: EnergyBandsWithOam = EnergyBandsWithOam()
energy_bands_with_oam.readFile(input_file_name)
energy_bands_with_oam.plotFigure(output_file_name, component,
xlim=xlim, ylim=ylim, band_indices=band_indices)
return 0
if __name__ == "__main__":
main()
| 43.816327
| 91
| 0.651141
|
import argparse
from LhcVaspTools.BasicUtils import readDataFromJson
from LhcVaspTools.OamExts import EnergyBandsWithOam
def parseArgv() -> argparse.Namespace:
parser: argparse.ArgumentParser = argparse.ArgumentParser(
description="This script is used to plot bands")
parser.add_argument("input_file_name", metavar='INPUT_FILE_NAME',
nargs="?", type=str, help="input hdf5 file.")
parser.add_argument("-o", "--output-file", nargs="?", type=str,
dest="output_file_name", required=True, help="output figure file.")
parser.add_argument('-c', '--component', nargs="?", type=str,
choices=['Lx', 'Ly', 'Lz'], dest='component', required=True,
help='the OAM component to plot.')
parser.add_argument('-bf', '--band-indices-file', nargs='?', type=str,
dest='band_indices_file_name', help='band indices file name.')
parser.add_argument('-xl', '--xlim', nargs=2, type=float,
dest='xlim', help='xlim for the bands plot.')
parser.add_argument('-yl', '--ylim', nargs=2, type=float,
dest='ylim', default=[-2., 1.], help='ylim for the bands plot.')
options: argparse.Namespace = parser.parse_args()
return options
def main() -> int:
options: argparse.Namespace = parseArgv()
input_file_name: str = options.input_file_name
output_file_name: str = options.output_file_name
component: str = options.component
band_indices_file_name: str = options.band_indices_file_name
xlim: list = options.xlim
ylim: list = options.ylim
if band_indices_file_name is None:
band_indices: list = None
else:
band_indices: list = readDataFromJson(band_indices_file_name)
energy_bands_with_oam: EnergyBandsWithOam = EnergyBandsWithOam()
energy_bands_with_oam.readFile(input_file_name)
energy_bands_with_oam.plotFigure(output_file_name, component,
xlim=xlim, ylim=ylim, band_indices=band_indices)
return 0
if __name__ == "__main__":
main()
| true
| true
|
f70cc5d3d4ff2bd272d6b1d6d2c75dd4fb3381f8
| 83,466
|
py
|
Python
|
zerver/tests/tests.py
|
tobby2002/zulip
|
66e7c455759f9368bae16b9a604cf63f8e3524cd
|
[
"Apache-2.0"
] | 1
|
2017-07-27T19:49:12.000Z
|
2017-07-27T19:49:12.000Z
|
zerver/tests/tests.py
|
tobby2002/localzulip
|
bfedd3f5686b91a5e332c96b4102b16c4e1b6fa9
|
[
"Apache-2.0"
] | 8
|
2021-03-31T18:45:09.000Z
|
2022-03-11T23:25:59.000Z
|
zerver/tests/tests.py
|
tobby2002/zulip
|
66e7c455759f9368bae16b9a604cf63f8e3524cd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from typing import Any, Callable, Dict, Iterable, List, Mapping, Tuple, TypeVar
from mock import patch, MagicMock
from django.http import HttpResponse
from django.test import TestCase, override_settings
from zerver.lib.test_helpers import (
queries_captured, simulated_empty_cache,
simulated_queue_client, tornado_redirected_to_list, ZulipTestCase,
most_recent_message, make_client
)
from zerver.lib.test_runner import slow
from zerver.models import UserProfile, Recipient, \
Realm, RealmAlias, UserActivity, \
get_user_profile_by_email, get_realm, \
get_client, get_stream, Message, get_unique_open_realm, \
completely_open
from zerver.lib.avatar import get_avatar_url
from zerver.lib.initial_password import initial_password
from zerver.lib.email_mirror import create_missed_message_address
from zerver.lib.actions import \
get_emails_from_user_ids, do_deactivate_user, do_reactivate_user, \
do_change_is_admin, extract_recipients, \
do_set_realm_name, do_deactivate_realm, \
do_add_subscription, do_remove_subscription, do_make_stream_private
from zerver.lib.notifications import handle_missedmessage_emails
from zerver.lib.session_user import get_session_dict_user
from zerver.middleware import is_slow_query
from zerver.worker import queue_processors
from django.conf import settings
from django.core import mail
from six import text_type
from six.moves import range
import os
import re
import sys
import time
import ujson
import random
def bail(msg):
# type: (str) -> None
print('\nERROR: %s\n' % (msg,))
sys.exit(1)
try:
settings.TEST_SUITE
except:
bail('Test suite only runs correctly with --settings=zproject.test_settings')
# Even though we don't use pygments directly in this file, we need
# this import.
try:
import pygments
except ImportError:
bail('The Pygments library is required to run the backend test suite.')
K = TypeVar('K')
V = TypeVar('V')
def find_dict(lst, k, v):
# type: (Iterable[Dict[K, V]], K, V) -> Dict[K, V]
for dct in lst:
if dct[k] == v:
return dct
raise Exception('Cannot find element in list where key %s == %s' % (k, v))
# same as in test_uploads.py
TEST_AVATAR_DIR = os.path.join(os.path.dirname(__file__), 'images')
class SlowQueryTest(TestCase):
def test_is_slow_query(self):
# type: () -> None
self.assertFalse(is_slow_query(1.1, '/some/random/url'))
self.assertTrue(is_slow_query(2, '/some/random/url'))
self.assertTrue(is_slow_query(5.1, '/activity'))
self.assertFalse(is_slow_query(2, '/activity'))
self.assertFalse(is_slow_query(2, '/json/report_error'))
self.assertFalse(is_slow_query(2, '/api/v1/deployments/report_error'))
self.assertFalse(is_slow_query(2, '/realm_activity/whatever'))
self.assertFalse(is_slow_query(2, '/user_activity/whatever'))
self.assertFalse(is_slow_query(9, '/accounts/webathena_kerberos_login/'))
self.assertTrue(is_slow_query(11, '/accounts/webathena_kerberos_login/'))
class ModelTest(TestCase):
def test_miscellaneous_things(self):
# type: () -> None
'''
This is a kitchen sink test that is designed simply to get
test coverage up to 100% for models.py.
'''
client = make_client('some_client')
self.assertEqual(str(client), u'<Client: some_client>')
class RealmTest(ZulipTestCase):
def assert_user_profile_cache_gets_new_name(self, email, new_realm_name):
# type: (text_type, text_type) -> None
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.realm.name, new_realm_name)
def test_do_set_realm_name_caching(self):
# type: () -> None
"""The main complicated thing about setting realm names is fighting the
cache, and we start by populating the cache for Hamlet, and we end
by checking the cache to ensure that the new value is there."""
get_user_profile_by_email('hamlet@zulip.com')
realm = get_realm('zulip.com')
new_name = 'Zed You Elle Eye Pea'
do_set_realm_name(realm, new_name)
self.assertEqual(get_realm(realm.domain).name, new_name)
self.assert_user_profile_cache_gets_new_name('hamlet@zulip.com', new_name)
def test_do_set_realm_name_events(self):
# type: () -> None
realm = get_realm('zulip.com')
new_name = 'Puliz'
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
do_set_realm_name(realm, new_name)
event = events[0]['event']
self.assertEqual(event, dict(
type = 'realm',
op = 'update',
property = 'name',
value = new_name,
))
def test_update_realm_api(self):
# type: () -> None
new_name = 'Zulip: Worldwide Exporter of APIs'
email = 'cordelia@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
do_change_is_admin(user_profile, True)
def set_up_db(attr, value):
# type: (str, Any) -> None
realm = get_realm('zulip.com')
setattr(realm, attr, value)
realm.save()
def update_with_api(**kwarg):
# type: (**Any) -> Realm
params = {k: ujson.dumps(v) for k, v in kwarg.items()}
result = self.client_patch('/json/realm', params)
self.assert_json_success(result)
return get_realm('zulip.com') # refresh data
# name
realm = update_with_api(name=new_name)
self.assertEqual(realm.name, new_name)
# restricted
set_up_db('restricted_to_domain', False)
realm = update_with_api(restricted_to_domain=True)
self.assertEqual(realm.restricted_to_domain, True)
realm = update_with_api(restricted_to_domain=False)
self.assertEqual(realm.restricted_to_domain, False)
# invite_required
set_up_db('invite_required', False)
realm = update_with_api(invite_required=True)
self.assertEqual(realm.invite_required, True)
realm = update_with_api(invite_required=False)
self.assertEqual(realm.invite_required, False)
# invite_by_admins_only
set_up_db('invite_by_admins_only', False)
realm = update_with_api(invite_by_admins_only=True)
self.assertEqual(realm.invite_by_admins_only, True)
realm = update_with_api(invite_by_admins_only=False)
self.assertEqual(realm.invite_by_admins_only, False)
# create_stream_by_admins_only
set_up_db('create_stream_by_admins_only', False)
realm = update_with_api(create_stream_by_admins_only=True)
self.assertEqual(realm.create_stream_by_admins_only, True)
realm = update_with_api(create_stream_by_admins_only=False)
self.assertEqual(realm.create_stream_by_admins_only, False)
# allow_message_editing
set_up_db('allow_message_editing', False)
set_up_db('message_content_edit_limit_seconds', 0)
realm = update_with_api(allow_message_editing=True,
message_content_edit_limit_seconds=100)
self.assertEqual(realm.allow_message_editing, True)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
realm = update_with_api(allow_message_editing=False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
realm = update_with_api(message_content_edit_limit_seconds=200)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
def test_admin_restrictions_for_changing_realm_name(self):
# type: () -> None
new_name = 'Mice will play while the cat is away'
email = 'othello@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
do_change_is_admin(user_profile, False)
req = dict(name=ujson.dumps(new_name))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Must be a realm administrator')
def test_do_deactivate_realm(self):
# type: () -> None
"""The main complicated thing about deactivating realm names is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
get_user_profile_by_email('hamlet@zulip.com')
realm = get_realm('zulip.com')
do_deactivate_realm(realm)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertTrue(user.realm.deactivated)
def test_do_set_realm_default_language(self):
# type: () -> None
new_lang = "de"
realm = get_realm('zulip.com')
self.assertNotEqual(realm.default_language, new_lang)
# we need an admin user.
email = 'iago@zulip.com'
self.login(email)
req = dict(default_language=ujson.dumps(new_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip.com')
self.assertEqual(realm.default_language, new_lang)
# Test setting zh_CN, we set zh_HANS instead of zh_CN in db
chinese = "zh_CN"
simplified_chinese = "zh_HANS"
req = dict(default_language=ujson.dumps(chinese))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip.com')
self.assertEqual(realm.default_language, simplified_chinese)
# Test to make sure that when invalid languages are passed
# as the default realm language, correct validation error is
# raised and the invalid language is not saved in db
invalid_lang = "invalid_lang"
req = dict(default_language=ujson.dumps(invalid_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Invalid language '%s'" % (invalid_lang,))
realm = get_realm('zulip.com')
self.assertNotEqual(realm.default_language, invalid_lang)
class PermissionTest(ZulipTestCase):
def test_get_admin_users(self):
# type: () -> None
user_profile = get_user_profile_by_email('hamlet@zulip.com')
do_change_is_admin(user_profile, False)
admin_users = user_profile.realm.get_admin_users()
self.assertFalse(user_profile in admin_users)
do_change_is_admin(user_profile, True)
admin_users = user_profile.realm.get_admin_users()
self.assertTrue(user_profile in admin_users)
def test_updating_non_existent_user(self):
# type: () -> None
self.login('hamlet@zulip.com')
admin = get_user_profile_by_email('hamlet@zulip.com')
do_change_is_admin(admin, True)
result = self.client_patch('/json/users/nonexistentuser@zulip.com', {})
self.assert_json_error(result, 'No such user')
def test_admin_api(self):
# type: () -> None
self.login('hamlet@zulip.com')
admin = get_user_profile_by_email('hamlet@zulip.com')
user = get_user_profile_by_email('othello@zulip.com')
realm = admin.realm
do_change_is_admin(admin, True)
# Make sure we see is_admin flag in /json/users
result = self.client_get('/json/users')
self.assert_json_success(result)
members = ujson.loads(result.content)['members']
hamlet = find_dict(members, 'email', 'hamlet@zulip.com')
self.assertTrue(hamlet['is_admin'])
othello = find_dict(members, 'email', 'othello@zulip.com')
self.assertFalse(othello['is_admin'])
# Giveth
req = dict(is_admin=ujson.dumps(True))
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_patch('/json/users/othello@zulip.com', req)
self.assert_json_success(result)
admin_users = realm.get_admin_users()
self.assertTrue(user in admin_users)
person = events[0]['event']['person']
self.assertEqual(person['email'], 'othello@zulip.com')
self.assertEqual(person['is_admin'], True)
# Taketh away
req = dict(is_admin=ujson.dumps(False))
events = []
with tornado_redirected_to_list(events):
result = self.client_patch('/json/users/othello@zulip.com', req)
self.assert_json_success(result)
admin_users = realm.get_admin_users()
self.assertFalse(user in admin_users)
person = events[0]['event']['person']
self.assertEqual(person['email'], 'othello@zulip.com')
self.assertEqual(person['is_admin'], False)
# Make sure only admins can patch other user's info.
self.login('othello@zulip.com')
result = self.client_patch('/json/users/hamlet@zulip.com', req)
self.assert_json_error(result, 'Insufficient permission')
class ZephyrTest(ZulipTestCase):
def test_webathena_kerberos_login(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
def post(**kwargs):
# type: (**Any) -> HttpResponse
params = {k: ujson.dumps(v) for k, v in kwargs.items()}
return self.client_post('/accounts/webathena_kerberos_login/', params)
result = post()
self.assert_json_error(result, 'Could not find Kerberos credential')
result = post(cred='whatever')
self.assert_json_error(result, 'Webathena login not enabled')
email = 'starnine@mit.edu'
self.login(email)
def ccache_mock(**kwargs):
# type: (**Any) -> Any
return patch('zerver.views.zephyr.make_ccache', **kwargs)
def ssh_mock(**kwargs):
# type: (**Any) -> Any
return patch('zerver.views.zephyr.subprocess.check_call', **kwargs)
def mirror_mock():
# type: () -> Any
return self.settings(PERSONAL_ZMIRROR_SERVER='server')
def logging_mock():
# type: () -> Any
return patch('logging.exception')
cred = dict(cname=dict(nameString=['starnine']))
with ccache_mock(side_effect=KeyError('foo')):
result = post(cred=cred)
self.assert_json_error(result, 'Invalid Kerberos cache')
with \
ccache_mock(return_value=b'1234'), \
ssh_mock(side_effect=KeyError('foo')), \
logging_mock() as log:
result = post(cred=cred)
self.assert_json_error(result, 'We were unable to setup mirroring for you')
log.assert_called_with("Error updating the user's ccache")
with ccache_mock(return_value=b'1234'), mirror_mock(), ssh_mock() as ssh:
result = post(cred=cred)
self.assert_json_success(result)
ssh.assert_called_with([
'ssh',
'server',
'--',
'/home/zulip/zulip/bots/process_ccache',
'starnine',
get_user_profile_by_email(email).api_key,
'MTIzNA=='])
class AdminCreateUserTest(ZulipTestCase):
def test_create_user_backend(self):
# type: () -> None
# This test should give us complete coverage on
# create_user_backend. It mostly exercises error
# conditions, and it also does a basic test of the success
# path.
admin_email = 'hamlet@zulip.com'
self.login(admin_email)
admin = get_user_profile_by_email(admin_email)
do_change_is_admin(admin, True)
result = self.client_put("/json/users", dict())
self.assert_json_error(result, "Missing 'email' argument")
result = self.client_put("/json/users", dict(
email='romeo@not-zulip.com',
)
)
self.assert_json_error(result, "Missing 'password' argument")
result = self.client_put("/json/users", dict(
email='romeo@not-zulip.com',
password='xxxx',
)
)
self.assert_json_error(result, "Missing 'full_name' argument")
result = self.client_put("/json/users", dict(
email='romeo@not-zulip.com',
password='xxxx',
full_name='Romeo Montague',
)
)
self.assert_json_error(result, "Missing 'short_name' argument")
result = self.client_put("/json/users", dict(
email='broken',
password='xxxx',
full_name='Romeo Montague',
short_name='Romeo',
)
)
self.assert_json_error(result, "Bad name or username")
result = self.client_put("/json/users", dict(
email='romeo@not-zulip.com',
password='xxxx',
full_name='Romeo Montague',
short_name='Romeo',
)
)
self.assert_json_error(result,
"Email 'romeo@not-zulip.com' does not belong to domain 'zulip.com'")
RealmAlias.objects.create(realm=get_realm('zulip.com'), domain='zulip.net')
# HAPPY PATH STARTS HERE
valid_params = dict(
email='romeo@zulip.net',
password='xxxx',
full_name='Romeo Montague',
short_name='Romeo',
)
result = self.client_put("/json/users", valid_params)
self.assert_json_success(result)
new_user = get_user_profile_by_email('romeo@zulip.net')
self.assertEqual(new_user.full_name, 'Romeo Montague')
self.assertEqual(new_user.short_name, 'Romeo')
# One more error condition to test--we can't create
# the same user twice.
result = self.client_put("/json/users", valid_params)
self.assert_json_error(result,
"Email 'romeo@zulip.net' already in use")
class WorkerTest(TestCase):
class FakeClient(object):
def __init__(self):
# type: () -> None
self.consumers = {} # type: Dict[str, Callable]
self.queue = [] # type: List[Tuple[str, Dict[str, Any]]]
def register_json_consumer(self, queue_name, callback):
# type: (str, Callable) -> None
self.consumers[queue_name] = callback
def start_consuming(self):
# type: () -> None
for queue_name, data in self.queue:
callback = self.consumers[queue_name]
callback(data)
def test_UserActivityWorker(self):
# type: () -> None
fake_client = self.FakeClient()
user = get_user_profile_by_email('hamlet@zulip.com')
UserActivity.objects.filter(
user_profile = user.id,
client = get_client('ios')
).delete()
data = dict(
user_profile_id = user.id,
client = 'ios',
time = time.time(),
query = 'send_message'
)
fake_client.queue.append(('user_activity', data))
with simulated_queue_client(lambda: fake_client):
worker = queue_processors.UserActivityWorker()
worker.setup()
worker.start()
activity_records = UserActivity.objects.filter(
user_profile = user.id,
client = get_client('ios')
)
self.assertTrue(len(activity_records), 1)
self.assertTrue(activity_records[0].count, 1)
def test_error_handling(self):
# type: () -> None
processed = []
@queue_processors.assign_queue('unreliable_worker')
class UnreliableWorker(queue_processors.QueueProcessingWorker):
def consume(self, data):
# type: (Mapping[str, Any]) -> None
if data["type"] == 'unexpected behaviour':
raise Exception('Worker task not performing as expected!')
processed.append(data["type"])
def _log_problem(self):
# type: () -> None
# keep the tests quiet
pass
fake_client = self.FakeClient()
for msg in ['good', 'fine', 'unexpected behaviour', 'back to normal']:
fake_client.queue.append(('unreliable_worker', {'type': msg}))
fn = os.path.join(settings.QUEUE_ERROR_DIR, 'unreliable_worker.errors')
try:
os.remove(fn)
except OSError:
pass
with simulated_queue_client(lambda: fake_client):
worker = UnreliableWorker()
worker.setup()
worker.start()
self.assertEqual(processed, ['good', 'fine', 'back to normal'])
line = open(fn).readline().strip()
event = ujson.loads(line.split('\t')[1])
self.assertEqual(event["type"], 'unexpected behaviour')
def test_worker_noname(self):
# type: () -> None
class TestWorker(queue_processors.QueueProcessingWorker):
def __init__(self):
# type: () -> None
super(TestWorker, self).__init__()
def consume(self, data):
# type: (Mapping[str, Any]) -> None
pass
with self.assertRaises(queue_processors.WorkerDeclarationException):
TestWorker()
def test_worker_noconsume(self):
# type: () -> None
@queue_processors.assign_queue('test_worker')
class TestWorker(queue_processors.QueueProcessingWorker):
def __init__(self):
# type: () -> None
super(TestWorker, self).__init__()
with self.assertRaises(queue_processors.WorkerDeclarationException):
worker = TestWorker()
worker.consume({})
class DocPageTest(ZulipTestCase):
def _test(self, url, expected_content):
# type: (str, str) -> None
result = self.client_get(url)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
def test_doc_endpoints(self):
# type: () -> None
self._test('/api/', 'We hear you like APIs')
self._test('/api/endpoints/', 'pre-built API bindings for Python')
self._test('/apps/', 'Appsolutely')
self._test('/features/', 'Talk about multiple topics at once')
self._test('/hello/', 'workplace chat that actually improves your productivity')
self._test('/integrations/', 'require creating a Zulip bot')
self._test('/login/', '(Normal users)')
self._test('/register/', 'get started')
result = self.client_get('/new-user/')
self.assertEqual(result.status_code, 301)
self.assertIn('hello', result['Location'])
result = self.client_get('/robots.txt')
self.assertEqual(result.status_code, 301)
self.assertIn('static/robots.txt', result['Location'])
result = self.client_get('/static/robots.txt')
self.assertEqual(result.status_code, 200)
self.assertIn(
'Disallow: /',
''.join(str(x) for x in list(result.streaming_content))
)
class UserProfileTest(TestCase):
def test_get_emails_from_user_ids(self):
# type: () -> None
hamlet = get_user_profile_by_email('hamlet@zulip.com')
othello = get_user_profile_by_email('othello@zulip.com')
dct = get_emails_from_user_ids([hamlet.id, othello.id])
self.assertEqual(dct[hamlet.id], 'hamlet@zulip.com')
self.assertEqual(dct[othello.id], 'othello@zulip.com')
class UserChangesTest(ZulipTestCase):
def test_update_api_key(self):
# type: () -> None
email = "hamlet@zulip.com"
self.login(email)
user = get_user_profile_by_email(email)
old_api_key = user.api_key
result = self.client_post('/json/users/me/api_key/regenerate')
self.assert_json_success(result)
new_api_key = ujson.loads(result.content)['api_key']
self.assertNotEqual(old_api_key, new_api_key)
user = get_user_profile_by_email(email)
self.assertEqual(new_api_key, user.api_key)
class ActivateTest(ZulipTestCase):
def test_basics(self):
# type: () -> None
user = get_user_profile_by_email('hamlet@zulip.com')
do_deactivate_user(user)
self.assertFalse(user.is_active)
do_reactivate_user(user)
self.assertTrue(user.is_active)
def test_api(self):
# type: () -> None
admin = get_user_profile_by_email('othello@zulip.com')
do_change_is_admin(admin, True)
self.login('othello@zulip.com')
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/hamlet@zulip.com')
self.assert_json_success(result)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertFalse(user.is_active)
result = self.client_post('/json/users/hamlet@zulip.com/reactivate')
self.assert_json_success(result)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertTrue(user.is_active)
def test_api_with_nonexistent_user(self):
# type: () -> None
admin = get_user_profile_by_email('othello@zulip.com')
do_change_is_admin(admin, True)
self.login('othello@zulip.com')
# Can not deactivate a user with the bot api
result = self.client_delete('/json/bots/hamlet@zulip.com')
self.assert_json_error(result, 'No such bot')
# Can not deactivate a nonexistent user.
result = self.client_delete('/json/users/nonexistent@zulip.com')
self.assert_json_error(result, 'No such user')
# Can not reactivate a nonexistent user.
result = self.client_post('/json/users/nonexistent@zulip.com/reactivate')
self.assert_json_error(result, 'No such user')
def test_api_with_insufficient_permissions(self):
# type: () -> None
non_admin = get_user_profile_by_email('othello@zulip.com')
do_change_is_admin(non_admin, False)
self.login('othello@zulip.com')
# Can not deactivate a user with the users api
result = self.client_delete('/json/users/hamlet@zulip.com')
self.assert_json_error(result, 'Insufficient permission')
# Can not reactivate a user
result = self.client_post('/json/users/hamlet@zulip.com/reactivate')
self.assert_json_error(result, 'Insufficient permission')
class BotTest(ZulipTestCase):
def assert_num_bots_equal(self, count):
# type: (int) -> None
result = self.client_get("/json/bots")
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertEqual(count, len(json['bots']))
def create_bot(self, **extras):
# type: (**Any) -> Dict[str, Any]
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
bot_info.update(extras)
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
return ujson.loads(result.content)
def deactivate_bot(self):
# type: () -> None
result = self.client_delete("/json/bots/hambot-bot@zulip.com")
self.assert_json_success(result)
def test_add_bot_with_bad_username(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
bot_info = dict(
full_name='',
short_name='',
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'Bad name or username')
self.assert_num_bots_equal(0)
def test_add_bot(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.create_bot()
self.assert_num_bots_equal(1)
event = [e for e in events if e['event']['type'] == 'realm_bot'][0]
self.assertEqual(
dict(
type='realm_bot',
op='add',
bot=dict(email='hambot-bot@zulip.com',
full_name='The Bot of Hamlet',
api_key=result['api_key'],
avatar_url=result['avatar_url'],
default_sending_stream=None,
default_events_register_stream=None,
default_all_public_streams=False,
owner='hamlet@zulip.com',
)
),
event['event']
)
users_result = self.client_get('/json/users')
members = ujson.loads(users_result.content)['members']
bots = [m for m in members if m['email'] == 'hambot-bot@zulip.com']
self.assertEqual(len(bots), 1)
bot = bots[0]
self.assertEqual(bot['bot_owner'], 'hamlet@zulip.com')
def test_add_bot_with_username_in_use(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot()
self.assert_num_bots_equal(1)
bot_info = dict(
full_name='Duplicate',
short_name='hambot',
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'Username already in use')
def test_add_bot_with_user_avatar(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
with open(os.path.join(TEST_AVATAR_DIR, 'img.png'), 'rb') as fp:
self.create_bot(file=fp)
self.assert_num_bots_equal(1)
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_USER)
# TODO: check img.png was uploaded properly
def test_add_bot_with_too_many_files(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
with open(os.path.join(TEST_AVATAR_DIR, 'img.png'), 'rb') as fp1, \
open(os.path.join(TEST_AVATAR_DIR, 'img.gif'), 'rb') as fp2:
bot_info = dict(
full_name='whatever',
short_name='whatever',
file1=fp1,
file2=fp2,
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'You may only upload one file at a time')
self.assert_num_bots_equal(0)
def test_add_bot_with_default_sending_stream(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot(default_sending_stream='Denmark')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_sending_stream'], 'Denmark')
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.default_sending_stream.name, 'Denmark')
def test_add_bot_with_default_sending_stream_not_subscribed(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot(default_sending_stream='Rome')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_sending_stream'], 'Rome')
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.default_sending_stream.name, 'Rome')
def test_bot_add_subscription(self):
# type: () -> None
"""
Calling POST /json/users/me/subscriptions should successfully add
streams, and a stream to the
list of subscriptions and confirm the right number of events
are generated.
When 'principals' has a bot, no notification message event or invitation email
is sent when add_subscriptions_backend is called in the above api call.
"""
self.login("hamlet@zulip.com")
# Normal user i.e. not a bot.
request_data = {
'principals': '["iago@zulip.com"]'
}
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.common_subscribe_to_streams("hamlet@zulip.com", ['Rome'], request_data)
self.assert_json_success(result)
msg_event = [e for e in events if e['event']['type'] == 'message']
self.assert_length(msg_event, 1, exact=True) # Notification message event is sent.
# Create a bot.
self.assert_num_bots_equal(0)
result = self.create_bot()
self.assert_num_bots_equal(1)
# A bot
bot_request_data = {
'principals': '["hambot-bot@zulip.com"]'
}
events_bot = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events_bot):
result = self.common_subscribe_to_streams("hamlet@zulip.com", ['Rome'], bot_request_data)
self.assert_json_success(result)
# No notification message event or invitation email is sent because of bot.
msg_event = [e for e in events_bot if e['event']['type'] == 'message']
self.assert_length(msg_event, 0, exact=True)
self.assertEqual(len(events_bot), len(events) - 1)
# Test runner automatically redirects all sent email to a dummy 'outbox'.
self.assertEqual(len(mail.outbox), 0)
def test_add_bot_with_default_sending_stream_private_allowed(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_add_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
self.assert_num_bots_equal(0)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.create_bot(default_sending_stream='Denmark')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_sending_stream'], 'Denmark')
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.default_sending_stream.name, 'Denmark')
event = [e for e in events if e['event']['type'] == 'realm_bot'][0]
self.assertEqual(
dict(
type='realm_bot',
op='add',
bot=dict(email='hambot-bot@zulip.com',
full_name='The Bot of Hamlet',
api_key=result['api_key'],
avatar_url=result['avatar_url'],
default_sending_stream='Denmark',
default_events_register_stream=None,
default_all_public_streams=False,
owner='hamlet@zulip.com',
)
),
event['event']
)
self.assertEqual(event['users'], (user_profile.id,))
def test_add_bot_with_default_sending_stream_private_denied(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_remove_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
'default_sending_stream': 'Denmark',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'Insufficient permission')
def test_add_bot_with_default_events_register_stream(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot(default_events_register_stream='Denmark')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_events_register_stream'], 'Denmark')
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.default_events_register_stream.name, 'Denmark')
def test_add_bot_with_default_events_register_stream_private_allowed(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_add_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
self.assert_num_bots_equal(0)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.create_bot(default_events_register_stream='Denmark')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_events_register_stream'], 'Denmark')
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.default_events_register_stream.name, 'Denmark')
event = [e for e in events if e['event']['type'] == 'realm_bot'][0]
self.assertEqual(
dict(
type='realm_bot',
op='add',
bot=dict(email='hambot-bot@zulip.com',
full_name='The Bot of Hamlet',
api_key=result['api_key'],
avatar_url=result['avatar_url'],
default_sending_stream=None,
default_events_register_stream='Denmark',
default_all_public_streams=False,
owner='hamlet@zulip.com',
)
),
event['event']
)
self.assertEqual(event['users'], (user_profile.id,))
def test_add_bot_with_default_events_register_stream_private_denied(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_remove_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
self.assert_num_bots_equal(0)
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
'default_events_register_stream': 'Denmark',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'Insufficient permission')
def test_add_bot_with_default_all_public_streams(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot(default_all_public_streams=ujson.dumps(True))
self.assert_num_bots_equal(1)
self.assertTrue(result['default_all_public_streams'])
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.default_all_public_streams, True)
def test_deactivate_bot(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
self.deactivate_bot()
# You can deactivate the same bot twice.
self.deactivate_bot()
self.assert_num_bots_equal(0)
def test_deactivate_bogus_bot(self):
# type: () -> None
"""Deleting a bogus bot will succeed silently."""
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
result = self.client_delete("/json/bots/bogus-bot@zulip.com")
self.assert_json_error(result, 'No such bot')
self.assert_num_bots_equal(1)
def test_bot_deactivation_attacks(self):
# type: () -> None
"""You cannot deactivate somebody else's bot."""
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
# Have Othello try to deactivate both Hamlet and
# Hamlet's bot.
self.login("othello@zulip.com")
# Can not deactivate a user as a bot
result = self.client_delete("/json/bots/hamlet@zulip.com")
self.assert_json_error(result, 'No such bot')
result = self.client_delete("/json/bots/hambot-bot@zulip.com")
self.assert_json_error(result, 'Insufficient permission')
# But we don't actually deactivate the other person's bot.
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(1)
# Can not deactivate a bot as a user
result = self.client_delete("/json/users/hambot-bot@zulip.com")
self.assert_json_error(result, 'No such user')
self.assert_num_bots_equal(1)
def test_bot_permissions(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
# Have Othello try to mess with Hamlet's bots.
self.login("othello@zulip.com")
result = self.client_post("/json/bots/hambot-bot@zulip.com/api_key/regenerate")
self.assert_json_error(result, 'Insufficient permission')
bot_info = {
'full_name': 'Fred',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_error(result, 'Insufficient permission')
def get_bot(self):
# type: () -> Dict[str, Any]
result = self.client_get("/json/bots")
bots = ujson.loads(result.content)['bots']
return bots[0]
def test_update_api_key(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.create_bot()
bot = self.get_bot()
old_api_key = bot['api_key']
result = self.client_post('/json/bots/hambot-bot@zulip.com/api_key/regenerate')
self.assert_json_success(result)
new_api_key = ujson.loads(result.content)['api_key']
self.assertNotEqual(old_api_key, new_api_key)
bot = self.get_bot()
self.assertEqual(new_api_key, bot['api_key'])
def test_update_api_key_for_invalid_user(self):
# type: () -> None
self.login("hamlet@zulip.com")
result = self.client_post('/json/bots/nonexistentuser@zulip.com/api_key/regenerate')
self.assert_json_error(result, 'No such user')
def test_patch_bot_full_name(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'full_name': 'Fred',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
full_name = ujson.loads(result.content)['full_name']
self.assertEqual('Fred', full_name)
bot = self.get_bot()
self.assertEqual('Fred', bot['full_name'])
def test_patch_bot_avatar(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_GRAVATAR)
# Try error case first (too many files):
with open(os.path.join(TEST_AVATAR_DIR, 'img.png'), 'rb') as fp1, \
open(os.path.join(TEST_AVATAR_DIR, 'img.gif'), 'rb') as fp2:
result = self.client_patch_multipart(
'/json/bots/hambot-bot@zulip.com',
dict(file1=fp1, file2=fp2))
self.assert_json_error(result, 'You may only upload one file at a time')
# HAPPY PATH
with open(os.path.join(TEST_AVATAR_DIR, 'img.png'), 'rb') as fp:
result = self.client_patch_multipart(
'/json/bots/hambot-bot@zulip.com',
dict(file=fp))
self.assert_json_success(result)
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_USER)
# TODO: check img.png was uploaded properly
def test_patch_bot_to_stream(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_sending_stream = ujson.loads(result.content)['default_sending_stream']
self.assertEqual('Denmark', default_sending_stream)
bot = self.get_bot()
self.assertEqual('Denmark', bot['default_sending_stream'])
def test_patch_bot_to_stream_not_subscribed(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'Rome',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_sending_stream = ujson.loads(result.content)['default_sending_stream']
self.assertEqual('Rome', default_sending_stream)
bot = self.get_bot()
self.assertEqual('Rome', bot['default_sending_stream'])
def test_patch_bot_to_stream_none(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': '',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_sending_stream = ujson.loads(result.content)['default_sending_stream']
self.assertEqual(None, default_sending_stream)
bot = self.get_bot()
self.assertEqual(None, bot['default_sending_stream'])
def test_patch_bot_to_stream_private_allowed(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_add_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_sending_stream = ujson.loads(result.content)['default_sending_stream']
self.assertEqual('Denmark', default_sending_stream)
bot = self.get_bot()
self.assertEqual('Denmark', bot['default_sending_stream'])
def test_patch_bot_to_stream_private_denied(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_remove_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_error(result, 'Insufficient permission')
def test_patch_bot_to_stream_not_found(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'missing',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_error(result, 'No such stream \'missing\'')
def test_patch_bot_events_register_stream(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_events_register_stream']
self.assertEqual('Denmark', default_events_register_stream)
bot = self.get_bot()
self.assertEqual('Denmark', bot['default_events_register_stream'])
def test_patch_bot_events_register_stream_allowed(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_add_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_events_register_stream']
self.assertEqual('Denmark', default_events_register_stream)
bot = self.get_bot()
self.assertEqual('Denmark', bot['default_events_register_stream'])
def test_patch_bot_events_register_stream_denied(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_remove_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_error(result, 'Insufficient permission')
def test_patch_bot_events_register_stream_none(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': '',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_events_register_stream']
self.assertEqual(None, default_events_register_stream)
bot = self.get_bot()
self.assertEqual(None, bot['default_events_register_stream'])
def test_patch_bot_events_register_stream_not_found(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': 'missing',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_error(result, 'No such stream \'missing\'')
def test_patch_bot_default_all_public_streams_true(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_all_public_streams': ujson.dumps(True),
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_all_public_streams']
self.assertEqual(default_events_register_stream, True)
bot = self.get_bot()
self.assertEqual(bot['default_all_public_streams'], True)
def test_patch_bot_default_all_public_streams_false(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_all_public_streams': ujson.dumps(False),
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_all_public_streams']
self.assertEqual(default_events_register_stream, False)
bot = self.get_bot()
self.assertEqual(bot['default_all_public_streams'], False)
def test_patch_bot_via_post(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'full_name': 'Fred',
'method': 'PATCH'
}
result = self.client_post("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
full_name = ujson.loads(result.content)['full_name']
self.assertEqual('Fred', full_name)
bot = self.get_bot()
self.assertEqual('Fred', bot['full_name'])
def test_patch_bogus_bot(self):
# type: () -> None
"""Deleting a bogus bot will succeed silently."""
self.login("hamlet@zulip.com")
self.create_bot()
bot_info = {
'full_name': 'Fred',
}
result = self.client_patch("/json/bots/nonexistent-bot@zulip.com", bot_info)
self.assert_json_error(result, 'No such user')
self.assert_num_bots_equal(1)
class ChangeSettingsTest(ZulipTestCase):
def check_well_formed_change_settings_response(self, result):
# type: (Dict[str, Any]) -> None
self.assertIn("full_name", result)
def check_for_toggle_param(self, pattern, param):
# type: (str, str) -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
json_result = self.client_post(pattern,
{param: ujson.dumps(True)})
self.assert_json_success(json_result)
# refetch user_profile object to correctly handle caching
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertEqual(getattr(user_profile, param), True)
json_result = self.client_post(pattern,
{param: ujson.dumps(False)})
self.assert_json_success(json_result)
# refetch user_profile object to correctly handle caching
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertEqual(getattr(user_profile, param), False)
def test_successful_change_settings(self):
# type: () -> None
"""
A call to /json/settings/change with valid parameters changes the user's
settings correctly and returns correct values.
"""
self.login("hamlet@zulip.com")
json_result = self.client_post("/json/settings/change",
dict(
full_name='Foo Bar',
old_password=initial_password('hamlet@zulip.com'),
new_password='foobar1',
confirm_password='foobar1',
)
)
self.assert_json_success(json_result)
result = ujson.loads(json_result.content)
self.check_well_formed_change_settings_response(result)
self.assertEqual(get_user_profile_by_email("hamlet@zulip.com").
full_name, "Foo Bar")
self.client_post('/accounts/logout/')
self.login("hamlet@zulip.com", "foobar1")
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_illegal_name_changes(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user = get_user_profile_by_email(email)
full_name = user.full_name
with self.settings(NAME_CHANGES_DISABLED=True):
json_result = self.client_post("/json/settings/change",
dict(full_name='Foo Bar'))
# We actually fail silently here, since this only happens if
# somebody is trying to game our API, and there's no reason to
# give them the courtesy of an error reason.
self.assert_json_success(json_result)
user = get_user_profile_by_email(email)
self.assertEqual(user.full_name, full_name)
# Now try a too-long name
json_result = self.client_post("/json/settings/change",
dict(full_name='x' * 1000))
self.assert_json_error(json_result, 'Name too long!')
# This is basically a don't-explode test.
def test_notify_settings(self):
# type: () -> None
self.check_for_toggle_param("/json/notify_settings/change", "enable_desktop_notifications")
self.check_for_toggle_param("/json/notify_settings/change", "enable_stream_desktop_notifications")
self.check_for_toggle_param("/json/notify_settings/change", "enable_stream_sounds")
self.check_for_toggle_param("/json/notify_settings/change", "enable_sounds")
self.check_for_toggle_param("/json/notify_settings/change", "enable_offline_email_notifications")
self.check_for_toggle_param("/json/notify_settings/change", "enable_offline_push_notifications")
self.check_for_toggle_param("/json/notify_settings/change", "enable_digest_emails")
def test_ui_settings(self):
# type: () -> None
self.check_for_toggle_param("/json/ui_settings/change", "autoscroll_forever")
self.check_for_toggle_param("/json/ui_settings/change", "default_desktop_notifications")
def test_toggling_left_side_userlist(self):
# type: () -> None
self.check_for_toggle_param("/json/left_side_userlist", "left_side_userlist")
def test_time_setting(self):
# type: () -> None
self.check_for_toggle_param("/json/time_setting", "twenty_four_hour_time")
def test_enter_sends_setting(self):
# type: () -> None
self.check_for_toggle_param('/json/users/me/enter-sends', "enter_sends")
def test_mismatching_passwords(self):
# type: () -> None
"""
new_password and confirm_password must match
"""
self.login("hamlet@zulip.com")
result = self.client_post("/json/settings/change",
dict(
new_password="mismatched_password",
confirm_password="not_the_same",
)
)
self.assert_json_error(result,
"New password must match confirmation password!")
def test_wrong_old_password(self):
# type: () -> None
"""
new_password and confirm_password must match
"""
self.login("hamlet@zulip.com")
result = self.client_post("/json/settings/change",
dict(
old_password='bad_password',
new_password="ignored",
confirm_password="ignored",
)
)
self.assert_json_error(result, "Wrong password!")
def test_changing_nothing_returns_error(self):
# type: () -> None
"""
We need to supply at least one non-empty parameter
to this API, or it should fail. (Eventually, we should
probably use a patch interface for these changes.)
"""
self.login("hamlet@zulip.com")
result = self.client_post("/json/settings/change",
dict(
old_password='ignored',
)
)
self.assert_json_error(result, "No new data supplied")
def test_change_default_language(self):
# type: () -> None
"""
Test changing the default language of the user.
"""
email = "hamlet@zulip.com"
self.login(email)
german = "de"
data = dict(default_language=ujson.dumps(german))
result = self.client_post("/json/language_setting", data)
self.assert_json_success(result)
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.default_language, german)
# Test to make sure invalid languages are not accepted
# and saved in the db.
invalid_lang = "invalid_lang"
data = dict(default_language=ujson.dumps(invalid_lang))
result = self.client_post("/json/language_setting", data)
self.assert_json_error(result, "Invalid language '%s'" % (invalid_lang,))
user_profile = get_user_profile_by_email(email)
self.assertNotEqual(user_profile.default_language, invalid_lang)
class GetProfileTest(ZulipTestCase):
def common_update_pointer(self, email, pointer):
# type: (text_type, int) -> None
self.login(email)
result = self.client_put("/json/users/me/pointer", {"pointer": pointer})
self.assert_json_success(result)
def common_get_profile(self, email):
# type: (str) -> Dict[text_type, Any]
user_profile = get_user_profile_by_email(email)
self.send_message(email, "Verona", Recipient.STREAM, "hello")
result = self.client_get("/api/v1/users/me", **self.api_auth(email))
max_id = most_recent_message(user_profile).id
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("client_id", json)
self.assertIn("max_message_id", json)
self.assertIn("pointer", json)
self.assertEqual(json["max_message_id"], max_id)
return json
def test_get_pointer(self):
# type: () -> None
email = "hamlet@zulip.com"
self.login(email)
result = self.client_get("/json/users/me/pointer")
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("pointer", json)
def test_cache_behavior(self):
# type: () -> None
with queries_captured() as queries:
with simulated_empty_cache() as cache_queries:
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assert_length(queries, 1)
self.assert_length(cache_queries, 1, exact=True)
self.assertEqual(user_profile.email, 'hamlet@zulip.com')
def test_api_get_empty_profile(self):
# type: () -> None
"""
Ensure GET /users/me returns a max message id and returns successfully
"""
json = self.common_get_profile("othello@zulip.com")
self.assertEqual(json["pointer"], -1)
def test_profile_with_pointer(self):
# type: () -> None
"""
Ensure GET /users/me returns a proper pointer id after the pointer is updated
"""
id1 = self.send_message("othello@zulip.com", "Verona", Recipient.STREAM)
id2 = self.send_message("othello@zulip.com", "Verona", Recipient.STREAM)
json = self.common_get_profile("hamlet@zulip.com")
self.common_update_pointer("hamlet@zulip.com", id2)
json = self.common_get_profile("hamlet@zulip.com")
self.assertEqual(json["pointer"], id2)
self.common_update_pointer("hamlet@zulip.com", id1)
json = self.common_get_profile("hamlet@zulip.com")
self.assertEqual(json["pointer"], id2) # pointer does not move backwards
result = self.client_put("/json/users/me/pointer", {"pointer": 99999999})
self.assert_json_error(result, "Invalid message ID")
def test_get_all_profiles_avatar_urls(self):
# type: () -> None
user_profile = get_user_profile_by_email('hamlet@zulip.com')
result = self.client_get("/api/v1/users", **self.api_auth('hamlet@zulip.com'))
self.assert_json_success(result)
json = ujson.loads(result.content)
for user in json['members']:
if user['email'] == 'hamlet@zulip.com':
self.assertEqual(
user['avatar_url'],
get_avatar_url(user_profile.avatar_source, user_profile.email),
)
class HomeTest(ZulipTestCase):
@slow('big method')
def test_home(self):
# type: () -> None
# Keep this list sorted!!!
html_bits = [
'Compose your message here...',
'Exclude messages with topic',
'Get started',
'Keyboard shortcuts',
'Loading...',
'Manage Streams',
'Narrow by topic',
'Next message',
'SHARE THE LOVE',
'Search streams',
'Welcome to Zulip',
'pygments.css',
'var page_params',
]
# Keep this list sorted!!!
expected_keys = [
"alert_words",
"autoscroll_forever",
"avatar_url",
"bot_list",
"can_create_streams",
"cross_realm_user_emails",
"debug_mode",
"default_desktop_notifications",
"default_language",
"default_language_name",
"desktop_notifications_enabled",
"development_environment",
"domain",
"email",
"email_dict",
"enable_digest_emails",
"enable_offline_email_notifications",
"enable_offline_push_notifications",
"enter_sends",
"event_queue_id",
"first_in_realm",
"fullname",
"furthest_read_time",
"has_mobile_devices",
"have_initial_messages",
"initial_pointer",
"initial_presences",
"initial_servertime",
"is_admin",
"is_zephyr_mirror_realm",
"language_list",
"language_list_dbl_col",
"last_event_id",
"left_side_userlist",
"login_page",
"mandatory_topics",
"max_message_id",
"maxfilesize",
"muted_topics",
"name_changes_disabled",
"narrow",
"narrow_stream",
"needs_tutorial",
"neversubbed_info",
"notifications_stream",
"password_auth_enabled",
"people_list",
"poll_timeout",
"presence_disabled",
"product_name",
"prompt_for_invites",
"realm_allow_message_editing",
"realm_create_stream_by_admins_only",
"realm_default_language",
"realm_default_streams",
"realm_emoji",
"realm_filters",
"realm_invite_by_admins_only",
"realm_invite_required",
"realm_message_content_edit_limit_seconds",
"realm_name",
"realm_restricted_to_domain",
"realm_uri",
"referrals",
"save_stacktraces",
"server_generation",
"server_uri",
"share_the_love",
"show_digest_email",
"sounds_enabled",
"stream_desktop_notifications_enabled",
"stream_sounds_enabled",
"subbed_info",
"test_suite",
"twenty_four_hour_time",
"unread_count",
"unsubbed_info",
"user_id",
"zulip_version",
]
email = "hamlet@zulip.com"
# Verify fails if logged-out
result = self.client_get('/')
self.assertEqual(result.status_code, 302)
# Verify succeeds once logged-in
self.login(email)
result = self._get_home_page(stream='Denmark')
html = result.content.decode('utf-8')
for html_bit in html_bits:
if html_bit not in html:
self.fail('%s not in result' % (html_bit,))
page_params = self._get_page_params(result)
actual_keys = sorted([str(k) for k in page_params.keys()])
self.assertEqual(actual_keys, expected_keys)
# TODO: Inspect the page_params data further.
# print(ujson.dumps(page_params, indent=2))
def _get_home_page(self, **kwargs):
# type: (**Any) -> HttpResponse
with \
patch('zerver.lib.actions.request_event_queue', return_value=42), \
patch('zerver.lib.actions.get_user_events', return_value=[]):
result = self.client_get('/', dict(**kwargs))
return result
def _get_page_params(self, result):
# type: (HttpResponse) -> Dict[str, Any]
html = result.content.decode('utf-8')
lines = html.split('\n')
page_params_line = [l for l in lines if l.startswith('var page_params')][0]
page_params_json = page_params_line.split(' = ')[1].rstrip(';')
page_params = ujson.loads(page_params_json)
return page_params
def _sanity_check(self, result):
# type: (HttpResponse) -> None
'''
Use this for tests that are geared toward specific edge cases, but
which still want the home page to load properly.
'''
html = result.content.decode('utf-8')
if 'Compose your message' not in html:
self.fail('Home page probably did not load.')
def test_terms_of_service(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
with \
self.settings(TERMS_OF_SERVICE='whatever'), \
self.settings(TOS_VERSION='99.99'):
result = self.client_get('/', dict(stream='Denmark'))
html = result.content.decode('utf-8')
self.assertIn('There is a new terms of service', html)
def test_bad_narrow(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
with patch('logging.exception') as mock:
result = self._get_home_page(stream='Invalid Stream')
mock.assert_called_once_with('Narrow parsing')
self._sanity_check(result)
def test_bad_pointer(self):
# type: () -> None
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
user_profile.pointer = 999999
user_profile.save()
self.login(email)
with patch('logging.warning') as mock:
result = self._get_home_page()
mock.assert_called_once_with('hamlet@zulip.com has invalid pointer 999999')
self._sanity_check(result)
def test_topic_narrow(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
result = self._get_home_page(stream='Denmark', topic='lunch')
self._sanity_check(result)
html = result.content.decode('utf-8')
self.assertIn('lunch', html)
def test_notifications_stream(self):
# type: () -> None
email = 'hamlet@zulip.com'
realm = get_realm('zulip.com')
realm.notifications_stream = get_stream('Denmark', realm)
realm.save()
self.login(email)
result = self._get_home_page()
page_params = self._get_page_params(result)
self.assertEqual(page_params['notifications_stream'], 'Denmark')
def test_new_stream(self):
# type: () -> None
email = 'hamlet@zulip.com'
stream_name = 'New stream'
self.subscribe_to_stream(email, stream_name)
self.login(email)
result = self._get_home_page(stream=stream_name)
page_params = self._get_page_params(result)
self.assertEqual(page_params['narrow_stream'], stream_name)
self.assertEqual(page_params['narrow'], [dict(operator='stream', operand=stream_name)])
self.assertEqual(page_params['initial_pointer'], -1)
self.assertEqual(page_params['max_message_id'], -1)
self.assertEqual(page_params['have_initial_messages'], False)
def test_invites_by_admins_only(self):
# type: () -> None
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
realm.invite_by_admins_only = True
realm.save()
self.login(email)
self.assertFalse(user_profile.is_realm_admin)
result = self._get_home_page()
html = result.content.decode('utf-8')
self.assertNotIn('Invite more users', html)
user_profile.is_realm_admin = True
user_profile.save()
result = self._get_home_page()
html = result.content.decode('utf-8')
self.assertIn('Invite more users', html)
class MutedTopicsTests(ZulipTestCase):
def test_json_set(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
url = '/json/set_muted_topics'
data = {'muted_topics': '[["stream", "topic"]]'}
result = self.client_post(url, data)
self.assert_json_success(result)
user = get_user_profile_by_email(email)
self.assertEqual(ujson.loads(user.muted_topics), [["stream", "topic"]])
url = '/json/set_muted_topics'
data = {'muted_topics': '[["stream2", "topic2"]]'}
result = self.client_post(url, data)
self.assert_json_success(result)
user = get_user_profile_by_email(email)
self.assertEqual(ujson.loads(user.muted_topics), [["stream2", "topic2"]])
class ExtractedRecipientsTest(TestCase):
def test_extract_recipients(self):
# type: () -> None
# JSON list w/dups, empties, and trailing whitespace
s = ujson.dumps([' alice@zulip.com ', ' bob@zulip.com ', ' ', 'bob@zulip.com'])
self.assertEqual(sorted(extract_recipients(s)), ['alice@zulip.com', 'bob@zulip.com'])
# simple string with one name
s = 'alice@zulip.com '
self.assertEqual(extract_recipients(s), ['alice@zulip.com'])
# JSON-encoded string
s = '"alice@zulip.com"'
self.assertEqual(extract_recipients(s), ['alice@zulip.com'])
# bare comma-delimited string
s = 'bob@zulip.com, alice@zulip.com'
self.assertEqual(sorted(extract_recipients(s)), ['alice@zulip.com', 'bob@zulip.com'])
# JSON-encoded, comma-delimited string
s = '"bob@zulip.com,alice@zulip.com"'
self.assertEqual(sorted(extract_recipients(s)), ['alice@zulip.com', 'bob@zulip.com'])
# TODO: This class currently only tests the default-off
# SEND_MISSED_MESSAGE_EMAILS_AS_USER=True case. We should refactor it
# to test both cases (the False case being the most important).
class TestMissedMessages(ZulipTestCase):
def normalize_string(self, s):
# type: (text_type) -> text_type
s = s.strip()
return re.sub(r'\s+', ' ', s)
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
@patch('zerver.lib.email_mirror.generate_random_token')
def test_extra_context_in_missed_stream_messages(self, mock_random_token):
# type: (MagicMock) -> None
tokens = [str(random.getrandbits(32)) for _ in range(30)]
mock_random_token.side_effect = tokens
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '0')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '1')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '2')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '3')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '4')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '5')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '6')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '7')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '8')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '9')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '10')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '11', subject='test2')
msg_id = self.send_message("othello@zulip.com", "denmark", Recipient.STREAM, '@**hamlet**')
othello = get_user_profile_by_email('othello@zulip.com')
hamlet = get_user_profile_by_email('hamlet@zulip.com')
handle_missedmessage_emails(hamlet.id, [{'message_id': msg_id}])
msg = mail.outbox[0]
reply_to_addresses = [settings.EMAIL_GATEWAY_PATTERN % (u'mm' + t)
for t in tokens]
sender = 'Zulip <{}>'.format(settings.NOREPLY_EMAIL_ADDRESS)
self.assertEquals(len(mail.outbox), 1)
self.assertEqual(msg.from_email, '"%s" <%s>' % (othello.full_name, othello.email))
self.assertIn(msg.extra_headers['Reply-To'], reply_to_addresses)
self.assertEqual(msg.extra_headers['Sender'], sender)
self.assertIn(
'Denmark > test Othello, the Moor of Venice 1 2 3 4 5 6 7 8 9 10 @**hamlet**',
self.normalize_string(mail.outbox[0].body),
)
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
@patch('zerver.lib.email_mirror.generate_random_token')
def test_extra_context_in_personal_missed_stream_messages(self, mock_random_token):
# type: (MagicMock) -> None
tokens = [str(random.getrandbits(32)) for _ in range(30)]
mock_random_token.side_effect = tokens
msg_id = self.send_message("othello@zulip.com", "hamlet@zulip.com",
Recipient.PERSONAL,
'Extremely personal message!')
othello = get_user_profile_by_email('othello@zulip.com')
hamlet = get_user_profile_by_email('hamlet@zulip.com')
handle_missedmessage_emails(hamlet.id, [{'message_id': msg_id}])
msg = mail.outbox[0]
reply_to_addresses = [settings.EMAIL_GATEWAY_PATTERN % (u'mm' + t)
for t in tokens]
sender = 'Zulip <{}>'.format(settings.NOREPLY_EMAIL_ADDRESS)
self.assertEquals(len(mail.outbox), 1)
self.assertEqual(msg.from_email, '"%s" <%s>' % (othello.full_name, othello.email))
self.assertIn(msg.extra_headers['Reply-To'], reply_to_addresses)
self.assertEqual(msg.extra_headers['Sender'], sender)
self.assertIn('You and Othello, the Moor of Venice Extremely personal message!',
self.normalize_string(msg.body))
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
@patch('zerver.lib.email_mirror.generate_random_token')
def test_extra_context_in_huddle_missed_stream_messages(self, mock_random_token):
# type: (MagicMock) -> None
tokens = [str(random.getrandbits(32)) for _ in range(30)]
mock_random_token.side_effect = tokens
msg_id = self.send_message("othello@zulip.com",
["hamlet@zulip.com", "iago@zulip.com"],
Recipient.PERSONAL,
'Group personal message!')
othello = get_user_profile_by_email('othello@zulip.com')
hamlet = get_user_profile_by_email('hamlet@zulip.com')
handle_missedmessage_emails(hamlet.id, [{'message_id': msg_id}])
msg = mail.outbox[0]
reply_to_addresses = [settings.EMAIL_GATEWAY_PATTERN % (u'mm' + t)
for t in tokens]
sender = 'Zulip <{}>'.format(settings.NOREPLY_EMAIL_ADDRESS)
self.assertEquals(len(mail.outbox), 1)
self.assertEqual(msg.from_email, '"%s" <%s>' % (othello.full_name, othello.email))
self.assertIn(msg.extra_headers['Reply-To'], reply_to_addresses)
self.assertEqual(msg.extra_headers['Sender'], sender)
body = ('You and Iago, Othello, the Moor of Venice Othello,'
' the Moor of Venice Group personal message')
self.assertIn(body, self.normalize_string(msg.body))
class TestOpenRealms(ZulipTestCase):
def test_open_realm_logic(self):
# type: () -> None
mit_realm = get_realm("mit.edu")
self.assertEquals(get_unique_open_realm(), None)
mit_realm.restricted_to_domain = False
mit_realm.save()
self.assertTrue(completely_open(mit_realm.domain))
self.assertEquals(get_unique_open_realm(), None)
with self.settings(SYSTEM_ONLY_REALMS={"zulip.com"}):
self.assertEquals(get_unique_open_realm(), mit_realm)
mit_realm.restricted_to_domain = True
mit_realm.save()
| 39.149156
| 106
| 0.625177
|
from __future__ import absolute_import
from __future__ import print_function
from typing import Any, Callable, Dict, Iterable, List, Mapping, Tuple, TypeVar
from mock import patch, MagicMock
from django.http import HttpResponse
from django.test import TestCase, override_settings
from zerver.lib.test_helpers import (
queries_captured, simulated_empty_cache,
simulated_queue_client, tornado_redirected_to_list, ZulipTestCase,
most_recent_message, make_client
)
from zerver.lib.test_runner import slow
from zerver.models import UserProfile, Recipient, \
Realm, RealmAlias, UserActivity, \
get_user_profile_by_email, get_realm, \
get_client, get_stream, Message, get_unique_open_realm, \
completely_open
from zerver.lib.avatar import get_avatar_url
from zerver.lib.initial_password import initial_password
from zerver.lib.email_mirror import create_missed_message_address
from zerver.lib.actions import \
get_emails_from_user_ids, do_deactivate_user, do_reactivate_user, \
do_change_is_admin, extract_recipients, \
do_set_realm_name, do_deactivate_realm, \
do_add_subscription, do_remove_subscription, do_make_stream_private
from zerver.lib.notifications import handle_missedmessage_emails
from zerver.lib.session_user import get_session_dict_user
from zerver.middleware import is_slow_query
from zerver.worker import queue_processors
from django.conf import settings
from django.core import mail
from six import text_type
from six.moves import range
import os
import re
import sys
import time
import ujson
import random
def bail(msg):
print('\nERROR: %s\n' % (msg,))
sys.exit(1)
try:
settings.TEST_SUITE
except:
bail('Test suite only runs correctly with --settings=zproject.test_settings')
# this import.
try:
import pygments
except ImportError:
bail('The Pygments library is required to run the backend test suite.')
K = TypeVar('K')
V = TypeVar('V')
def find_dict(lst, k, v):
# type: (Iterable[Dict[K, V]], K, V) -> Dict[K, V]
for dct in lst:
if dct[k] == v:
return dct
raise Exception('Cannot find element in list where key %s == %s' % (k, v))
# same as in test_uploads.py
TEST_AVATAR_DIR = os.path.join(os.path.dirname(__file__), 'images')
class SlowQueryTest(TestCase):
def test_is_slow_query(self):
# type: () -> None
self.assertFalse(is_slow_query(1.1, '/some/random/url'))
self.assertTrue(is_slow_query(2, '/some/random/url'))
self.assertTrue(is_slow_query(5.1, '/activity'))
self.assertFalse(is_slow_query(2, '/activity'))
self.assertFalse(is_slow_query(2, '/json/report_error'))
self.assertFalse(is_slow_query(2, '/api/v1/deployments/report_error'))
self.assertFalse(is_slow_query(2, '/realm_activity/whatever'))
self.assertFalse(is_slow_query(2, '/user_activity/whatever'))
self.assertFalse(is_slow_query(9, '/accounts/webathena_kerberos_login/'))
self.assertTrue(is_slow_query(11, '/accounts/webathena_kerberos_login/'))
class ModelTest(TestCase):
def test_miscellaneous_things(self):
# type: () -> None
client = make_client('some_client')
self.assertEqual(str(client), u'<Client: some_client>')
class RealmTest(ZulipTestCase):
def assert_user_profile_cache_gets_new_name(self, email, new_realm_name):
# type: (text_type, text_type) -> None
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.realm.name, new_realm_name)
def test_do_set_realm_name_caching(self):
# type: () -> None
get_user_profile_by_email('hamlet@zulip.com')
realm = get_realm('zulip.com')
new_name = 'Zed You Elle Eye Pea'
do_set_realm_name(realm, new_name)
self.assertEqual(get_realm(realm.domain).name, new_name)
self.assert_user_profile_cache_gets_new_name('hamlet@zulip.com', new_name)
def test_do_set_realm_name_events(self):
# type: () -> None
realm = get_realm('zulip.com')
new_name = 'Puliz'
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
do_set_realm_name(realm, new_name)
event = events[0]['event']
self.assertEqual(event, dict(
type = 'realm',
op = 'update',
property = 'name',
value = new_name,
))
def test_update_realm_api(self):
# type: () -> None
new_name = 'Zulip: Worldwide Exporter of APIs'
email = 'cordelia@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
do_change_is_admin(user_profile, True)
def set_up_db(attr, value):
# type: (str, Any) -> None
realm = get_realm('zulip.com')
setattr(realm, attr, value)
realm.save()
def update_with_api(**kwarg):
# type: (**Any) -> Realm
params = {k: ujson.dumps(v) for k, v in kwarg.items()}
result = self.client_patch('/json/realm', params)
self.assert_json_success(result)
return get_realm('zulip.com') # refresh data
# name
realm = update_with_api(name=new_name)
self.assertEqual(realm.name, new_name)
# restricted
set_up_db('restricted_to_domain', False)
realm = update_with_api(restricted_to_domain=True)
self.assertEqual(realm.restricted_to_domain, True)
realm = update_with_api(restricted_to_domain=False)
self.assertEqual(realm.restricted_to_domain, False)
# invite_required
set_up_db('invite_required', False)
realm = update_with_api(invite_required=True)
self.assertEqual(realm.invite_required, True)
realm = update_with_api(invite_required=False)
self.assertEqual(realm.invite_required, False)
# invite_by_admins_only
set_up_db('invite_by_admins_only', False)
realm = update_with_api(invite_by_admins_only=True)
self.assertEqual(realm.invite_by_admins_only, True)
realm = update_with_api(invite_by_admins_only=False)
self.assertEqual(realm.invite_by_admins_only, False)
# create_stream_by_admins_only
set_up_db('create_stream_by_admins_only', False)
realm = update_with_api(create_stream_by_admins_only=True)
self.assertEqual(realm.create_stream_by_admins_only, True)
realm = update_with_api(create_stream_by_admins_only=False)
self.assertEqual(realm.create_stream_by_admins_only, False)
# allow_message_editing
set_up_db('allow_message_editing', False)
set_up_db('message_content_edit_limit_seconds', 0)
realm = update_with_api(allow_message_editing=True,
message_content_edit_limit_seconds=100)
self.assertEqual(realm.allow_message_editing, True)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
realm = update_with_api(allow_message_editing=False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
realm = update_with_api(message_content_edit_limit_seconds=200)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
def test_admin_restrictions_for_changing_realm_name(self):
# type: () -> None
new_name = 'Mice will play while the cat is away'
email = 'othello@zulip.com'
self.login(email)
user_profile = get_user_profile_by_email(email)
do_change_is_admin(user_profile, False)
req = dict(name=ujson.dumps(new_name))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Must be a realm administrator')
def test_do_deactivate_realm(self):
# type: () -> None
get_user_profile_by_email('hamlet@zulip.com')
realm = get_realm('zulip.com')
do_deactivate_realm(realm)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertTrue(user.realm.deactivated)
def test_do_set_realm_default_language(self):
# type: () -> None
new_lang = "de"
realm = get_realm('zulip.com')
self.assertNotEqual(realm.default_language, new_lang)
# we need an admin user.
email = 'iago@zulip.com'
self.login(email)
req = dict(default_language=ujson.dumps(new_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip.com')
self.assertEqual(realm.default_language, new_lang)
# Test setting zh_CN, we set zh_HANS instead of zh_CN in db
chinese = "zh_CN"
simplified_chinese = "zh_HANS"
req = dict(default_language=ujson.dumps(chinese))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip.com')
self.assertEqual(realm.default_language, simplified_chinese)
# Test to make sure that when invalid languages are passed
# as the default realm language, correct validation error is
# raised and the invalid language is not saved in db
invalid_lang = "invalid_lang"
req = dict(default_language=ujson.dumps(invalid_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Invalid language '%s'" % (invalid_lang,))
realm = get_realm('zulip.com')
self.assertNotEqual(realm.default_language, invalid_lang)
class PermissionTest(ZulipTestCase):
def test_get_admin_users(self):
# type: () -> None
user_profile = get_user_profile_by_email('hamlet@zulip.com')
do_change_is_admin(user_profile, False)
admin_users = user_profile.realm.get_admin_users()
self.assertFalse(user_profile in admin_users)
do_change_is_admin(user_profile, True)
admin_users = user_profile.realm.get_admin_users()
self.assertTrue(user_profile in admin_users)
def test_updating_non_existent_user(self):
# type: () -> None
self.login('hamlet@zulip.com')
admin = get_user_profile_by_email('hamlet@zulip.com')
do_change_is_admin(admin, True)
result = self.client_patch('/json/users/nonexistentuser@zulip.com', {})
self.assert_json_error(result, 'No such user')
def test_admin_api(self):
# type: () -> None
self.login('hamlet@zulip.com')
admin = get_user_profile_by_email('hamlet@zulip.com')
user = get_user_profile_by_email('othello@zulip.com')
realm = admin.realm
do_change_is_admin(admin, True)
# Make sure we see is_admin flag in /json/users
result = self.client_get('/json/users')
self.assert_json_success(result)
members = ujson.loads(result.content)['members']
hamlet = find_dict(members, 'email', 'hamlet@zulip.com')
self.assertTrue(hamlet['is_admin'])
othello = find_dict(members, 'email', 'othello@zulip.com')
self.assertFalse(othello['is_admin'])
# Giveth
req = dict(is_admin=ujson.dumps(True))
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_patch('/json/users/othello@zulip.com', req)
self.assert_json_success(result)
admin_users = realm.get_admin_users()
self.assertTrue(user in admin_users)
person = events[0]['event']['person']
self.assertEqual(person['email'], 'othello@zulip.com')
self.assertEqual(person['is_admin'], True)
# Taketh away
req = dict(is_admin=ujson.dumps(False))
events = []
with tornado_redirected_to_list(events):
result = self.client_patch('/json/users/othello@zulip.com', req)
self.assert_json_success(result)
admin_users = realm.get_admin_users()
self.assertFalse(user in admin_users)
person = events[0]['event']['person']
self.assertEqual(person['email'], 'othello@zulip.com')
self.assertEqual(person['is_admin'], False)
# Make sure only admins can patch other user's info.
self.login('othello@zulip.com')
result = self.client_patch('/json/users/hamlet@zulip.com', req)
self.assert_json_error(result, 'Insufficient permission')
class ZephyrTest(ZulipTestCase):
def test_webathena_kerberos_login(self):
email = 'hamlet@zulip.com'
self.login(email)
def post(**kwargs):
params = {k: ujson.dumps(v) for k, v in kwargs.items()}
return self.client_post('/accounts/webathena_kerberos_login/', params)
result = post()
self.assert_json_error(result, 'Could not find Kerberos credential')
result = post(cred='whatever')
self.assert_json_error(result, 'Webathena login not enabled')
email = 'starnine@mit.edu'
self.login(email)
def ccache_mock(**kwargs):
return patch('zerver.views.zephyr.make_ccache', **kwargs)
def ssh_mock(**kwargs):
return patch('zerver.views.zephyr.subprocess.check_call', **kwargs)
def mirror_mock():
return self.settings(PERSONAL_ZMIRROR_SERVER='server')
def logging_mock():
return patch('logging.exception')
cred = dict(cname=dict(nameString=['starnine']))
with ccache_mock(side_effect=KeyError('foo')):
result = post(cred=cred)
self.assert_json_error(result, 'Invalid Kerberos cache')
with \
ccache_mock(return_value=b'1234'), \
ssh_mock(side_effect=KeyError('foo')), \
logging_mock() as log:
result = post(cred=cred)
self.assert_json_error(result, 'We were unable to setup mirroring for you')
log.assert_called_with("Error updating the user's ccache")
with ccache_mock(return_value=b'1234'), mirror_mock(), ssh_mock() as ssh:
result = post(cred=cred)
self.assert_json_success(result)
ssh.assert_called_with([
'ssh',
'server',
'--',
'/home/zulip/zulip/bots/process_ccache',
'starnine',
get_user_profile_by_email(email).api_key,
'MTIzNA=='])
class AdminCreateUserTest(ZulipTestCase):
def test_create_user_backend(self):
# type: () -> None
# This test should give us complete coverage on
# create_user_backend. It mostly exercises error
# conditions, and it also does a basic test of the success
# path.
admin_email = 'hamlet@zulip.com'
self.login(admin_email)
admin = get_user_profile_by_email(admin_email)
do_change_is_admin(admin, True)
result = self.client_put("/json/users", dict())
self.assert_json_error(result, "Missing 'email' argument")
result = self.client_put("/json/users", dict(
email='romeo@not-zulip.com',
)
)
self.assert_json_error(result, "Missing 'password' argument")
result = self.client_put("/json/users", dict(
email='romeo@not-zulip.com',
password='xxxx',
)
)
self.assert_json_error(result, "Missing 'full_name' argument")
result = self.client_put("/json/users", dict(
email='romeo@not-zulip.com',
password='xxxx',
full_name='Romeo Montague',
)
)
self.assert_json_error(result, "Missing 'short_name' argument")
result = self.client_put("/json/users", dict(
email='broken',
password='xxxx',
full_name='Romeo Montague',
short_name='Romeo',
)
)
self.assert_json_error(result, "Bad name or username")
result = self.client_put("/json/users", dict(
email='romeo@not-zulip.com',
password='xxxx',
full_name='Romeo Montague',
short_name='Romeo',
)
)
self.assert_json_error(result,
"Email 'romeo@not-zulip.com' does not belong to domain 'zulip.com'")
RealmAlias.objects.create(realm=get_realm('zulip.com'), domain='zulip.net')
# HAPPY PATH STARTS HERE
valid_params = dict(
email='romeo@zulip.net',
password='xxxx',
full_name='Romeo Montague',
short_name='Romeo',
)
result = self.client_put("/json/users", valid_params)
self.assert_json_success(result)
new_user = get_user_profile_by_email('romeo@zulip.net')
self.assertEqual(new_user.full_name, 'Romeo Montague')
self.assertEqual(new_user.short_name, 'Romeo')
# One more error condition to test--we can't create
result = self.client_put("/json/users", valid_params)
self.assert_json_error(result,
"Email 'romeo@zulip.net' already in use")
class WorkerTest(TestCase):
class FakeClient(object):
def __init__(self):
self.consumers = {} self.queue = []
def register_json_consumer(self, queue_name, callback):
self.consumers[queue_name] = callback
def start_consuming(self):
for queue_name, data in self.queue:
callback = self.consumers[queue_name]
callback(data)
def test_UserActivityWorker(self):
fake_client = self.FakeClient()
user = get_user_profile_by_email('hamlet@zulip.com')
UserActivity.objects.filter(
user_profile = user.id,
client = get_client('ios')
).delete()
data = dict(
user_profile_id = user.id,
client = 'ios',
time = time.time(),
query = 'send_message'
)
fake_client.queue.append(('user_activity', data))
with simulated_queue_client(lambda: fake_client):
worker = queue_processors.UserActivityWorker()
worker.setup()
worker.start()
activity_records = UserActivity.objects.filter(
user_profile = user.id,
client = get_client('ios')
)
self.assertTrue(len(activity_records), 1)
self.assertTrue(activity_records[0].count, 1)
def test_error_handling(self):
processed = []
@queue_processors.assign_queue('unreliable_worker')
class UnreliableWorker(queue_processors.QueueProcessingWorker):
def consume(self, data):
if data["type"] == 'unexpected behaviour':
raise Exception('Worker task not performing as expected!')
processed.append(data["type"])
def _log_problem(self):
pass
fake_client = self.FakeClient()
for msg in ['good', 'fine', 'unexpected behaviour', 'back to normal']:
fake_client.queue.append(('unreliable_worker', {'type': msg}))
fn = os.path.join(settings.QUEUE_ERROR_DIR, 'unreliable_worker.errors')
try:
os.remove(fn)
except OSError:
pass
with simulated_queue_client(lambda: fake_client):
worker = UnreliableWorker()
worker.setup()
worker.start()
self.assertEqual(processed, ['good', 'fine', 'back to normal'])
line = open(fn).readline().strip()
event = ujson.loads(line.split('\t')[1])
self.assertEqual(event["type"], 'unexpected behaviour')
def test_worker_noname(self):
class TestWorker(queue_processors.QueueProcessingWorker):
def __init__(self):
super(TestWorker, self).__init__()
def consume(self, data):
pass
with self.assertRaises(queue_processors.WorkerDeclarationException):
TestWorker()
def test_worker_noconsume(self):
@queue_processors.assign_queue('test_worker')
class TestWorker(queue_processors.QueueProcessingWorker):
def __init__(self):
super(TestWorker, self).__init__()
with self.assertRaises(queue_processors.WorkerDeclarationException):
worker = TestWorker()
worker.consume({})
class DocPageTest(ZulipTestCase):
def _test(self, url, expected_content):
result = self.client_get(url)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
def test_doc_endpoints(self):
self._test('/api/', 'We hear you like APIs')
self._test('/api/endpoints/', 'pre-built API bindings for Python')
self._test('/apps/', 'Appsolutely')
self._test('/features/', 'Talk about multiple topics at once')
self._test('/hello/', 'workplace chat that actually improves your productivity')
self._test('/integrations/', 'require creating a Zulip bot')
self._test('/login/', '(Normal users)')
self._test('/register/', 'get started')
result = self.client_get('/new-user/')
self.assertEqual(result.status_code, 301)
self.assertIn('hello', result['Location'])
result = self.client_get('/robots.txt')
self.assertEqual(result.status_code, 301)
self.assertIn('static/robots.txt', result['Location'])
result = self.client_get('/static/robots.txt')
self.assertEqual(result.status_code, 200)
self.assertIn(
'Disallow: /',
''.join(str(x) for x in list(result.streaming_content))
)
class UserProfileTest(TestCase):
def test_get_emails_from_user_ids(self):
hamlet = get_user_profile_by_email('hamlet@zulip.com')
othello = get_user_profile_by_email('othello@zulip.com')
dct = get_emails_from_user_ids([hamlet.id, othello.id])
self.assertEqual(dct[hamlet.id], 'hamlet@zulip.com')
self.assertEqual(dct[othello.id], 'othello@zulip.com')
class UserChangesTest(ZulipTestCase):
def test_update_api_key(self):
email = "hamlet@zulip.com"
self.login(email)
user = get_user_profile_by_email(email)
old_api_key = user.api_key
result = self.client_post('/json/users/me/api_key/regenerate')
self.assert_json_success(result)
new_api_key = ujson.loads(result.content)['api_key']
self.assertNotEqual(old_api_key, new_api_key)
user = get_user_profile_by_email(email)
self.assertEqual(new_api_key, user.api_key)
class ActivateTest(ZulipTestCase):
def test_basics(self):
user = get_user_profile_by_email('hamlet@zulip.com')
do_deactivate_user(user)
self.assertFalse(user.is_active)
do_reactivate_user(user)
self.assertTrue(user.is_active)
def test_api(self):
admin = get_user_profile_by_email('othello@zulip.com')
do_change_is_admin(admin, True)
self.login('othello@zulip.com')
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/hamlet@zulip.com')
self.assert_json_success(result)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertFalse(user.is_active)
result = self.client_post('/json/users/hamlet@zulip.com/reactivate')
self.assert_json_success(result)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertTrue(user.is_active)
def test_api_with_nonexistent_user(self):
admin = get_user_profile_by_email('othello@zulip.com')
do_change_is_admin(admin, True)
self.login('othello@zulip.com')
result = self.client_delete('/json/bots/hamlet@zulip.com')
self.assert_json_error(result, 'No such bot')
result = self.client_delete('/json/users/nonexistent@zulip.com')
self.assert_json_error(result, 'No such user')
result = self.client_post('/json/users/nonexistent@zulip.com/reactivate')
self.assert_json_error(result, 'No such user')
def test_api_with_insufficient_permissions(self):
non_admin = get_user_profile_by_email('othello@zulip.com')
do_change_is_admin(non_admin, False)
self.login('othello@zulip.com')
result = self.client_delete('/json/users/hamlet@zulip.com')
self.assert_json_error(result, 'Insufficient permission')
result = self.client_post('/json/users/hamlet@zulip.com/reactivate')
self.assert_json_error(result, 'Insufficient permission')
class BotTest(ZulipTestCase):
def assert_num_bots_equal(self, count):
result = self.client_get("/json/bots")
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertEqual(count, len(json['bots']))
def create_bot(self, **extras):
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
bot_info.update(extras)
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
return ujson.loads(result.content)
def deactivate_bot(self):
result = self.client_delete("/json/bots/hambot-bot@zulip.com")
self.assert_json_success(result)
def test_add_bot_with_bad_username(self):
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
bot_info = dict(
full_name='',
short_name='',
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'Bad name or username')
self.assert_num_bots_equal(0)
def test_add_bot(self):
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
events = [] with tornado_redirected_to_list(events):
result = self.create_bot()
self.assert_num_bots_equal(1)
event = [e for e in events if e['event']['type'] == 'realm_bot'][0]
self.assertEqual(
dict(
type='realm_bot',
op='add',
bot=dict(email='hambot-bot@zulip.com',
full_name='The Bot of Hamlet',
api_key=result['api_key'],
avatar_url=result['avatar_url'],
default_sending_stream=None,
default_events_register_stream=None,
default_all_public_streams=False,
owner='hamlet@zulip.com',
)
),
event['event']
)
users_result = self.client_get('/json/users')
members = ujson.loads(users_result.content)['members']
bots = [m for m in members if m['email'] == 'hambot-bot@zulip.com']
self.assertEqual(len(bots), 1)
bot = bots[0]
self.assertEqual(bot['bot_owner'], 'hamlet@zulip.com')
def test_add_bot_with_username_in_use(self):
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot()
self.assert_num_bots_equal(1)
bot_info = dict(
full_name='Duplicate',
short_name='hambot',
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'Username already in use')
def test_add_bot_with_user_avatar(self):
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
with open(os.path.join(TEST_AVATAR_DIR, 'img.png'), 'rb') as fp:
self.create_bot(file=fp)
self.assert_num_bots_equal(1)
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_USER)
def test_add_bot_with_too_many_files(self):
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
with open(os.path.join(TEST_AVATAR_DIR, 'img.png'), 'rb') as fp1, \
open(os.path.join(TEST_AVATAR_DIR, 'img.gif'), 'rb') as fp2:
bot_info = dict(
full_name='whatever',
short_name='whatever',
file1=fp1,
file2=fp2,
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'You may only upload one file at a time')
self.assert_num_bots_equal(0)
def test_add_bot_with_default_sending_stream(self):
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot(default_sending_stream='Denmark')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_sending_stream'], 'Denmark')
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.default_sending_stream.name, 'Denmark')
def test_add_bot_with_default_sending_stream_not_subscribed(self):
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot(default_sending_stream='Rome')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_sending_stream'], 'Rome')
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.default_sending_stream.name, 'Rome')
def test_bot_add_subscription(self):
self.login("hamlet@zulip.com")
request_data = {
'principals': '["iago@zulip.com"]'
}
events = [] with tornado_redirected_to_list(events):
result = self.common_subscribe_to_streams("hamlet@zulip.com", ['Rome'], request_data)
self.assert_json_success(result)
msg_event = [e for e in events if e['event']['type'] == 'message']
self.assert_length(msg_event, 1, exact=True)
self.assert_num_bots_equal(0)
result = self.create_bot()
self.assert_num_bots_equal(1)
bot_request_data = {
'principals': '["hambot-bot@zulip.com"]'
}
events_bot = [] with tornado_redirected_to_list(events_bot):
result = self.common_subscribe_to_streams("hamlet@zulip.com", ['Rome'], bot_request_data)
self.assert_json_success(result)
msg_event = [e for e in events_bot if e['event']['type'] == 'message']
self.assert_length(msg_event, 0, exact=True)
self.assertEqual(len(events_bot), len(events) - 1)
self.assertEqual(len(mail.outbox), 0)
def test_add_bot_with_default_sending_stream_private_allowed(self):
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_add_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
self.assert_num_bots_equal(0)
events = [] with tornado_redirected_to_list(events):
result = self.create_bot(default_sending_stream='Denmark')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_sending_stream'], 'Denmark')
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.default_sending_stream.name, 'Denmark')
event = [e for e in events if e['event']['type'] == 'realm_bot'][0]
self.assertEqual(
dict(
type='realm_bot',
op='add',
bot=dict(email='hambot-bot@zulip.com',
full_name='The Bot of Hamlet',
api_key=result['api_key'],
avatar_url=result['avatar_url'],
default_sending_stream='Denmark',
default_events_register_stream=None,
default_all_public_streams=False,
owner='hamlet@zulip.com',
)
),
event['event']
)
self.assertEqual(event['users'], (user_profile.id,))
def test_add_bot_with_default_sending_stream_private_denied(self):
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_remove_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
'default_sending_stream': 'Denmark',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'Insufficient permission')
def test_add_bot_with_default_events_register_stream(self):
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot(default_events_register_stream='Denmark')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_events_register_stream'], 'Denmark')
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.default_events_register_stream.name, 'Denmark')
def test_add_bot_with_default_events_register_stream_private_allowed(self):
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_add_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
self.assert_num_bots_equal(0)
events = [] with tornado_redirected_to_list(events):
result = self.create_bot(default_events_register_stream='Denmark')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_events_register_stream'], 'Denmark')
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.default_events_register_stream.name, 'Denmark')
event = [e for e in events if e['event']['type'] == 'realm_bot'][0]
self.assertEqual(
dict(
type='realm_bot',
op='add',
bot=dict(email='hambot-bot@zulip.com',
full_name='The Bot of Hamlet',
api_key=result['api_key'],
avatar_url=result['avatar_url'],
default_sending_stream=None,
default_events_register_stream='Denmark',
default_all_public_streams=False,
owner='hamlet@zulip.com',
)
),
event['event']
)
self.assertEqual(event['users'], (user_profile.id,))
def test_add_bot_with_default_events_register_stream_private_denied(self):
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_remove_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
self.assert_num_bots_equal(0)
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
'default_events_register_stream': 'Denmark',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'Insufficient permission')
def test_add_bot_with_default_all_public_streams(self):
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot(default_all_public_streams=ujson.dumps(True))
self.assert_num_bots_equal(1)
self.assertTrue(result['default_all_public_streams'])
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.default_all_public_streams, True)
def test_deactivate_bot(self):
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
self.deactivate_bot()
self.deactivate_bot()
self.assert_num_bots_equal(0)
def test_deactivate_bogus_bot(self):
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
result = self.client_delete("/json/bots/bogus-bot@zulip.com")
self.assert_json_error(result, 'No such bot')
self.assert_num_bots_equal(1)
def test_bot_deactivation_attacks(self):
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
self.login("othello@zulip.com")
# Can not deactivate a user as a bot
result = self.client_delete("/json/bots/hamlet@zulip.com")
self.assert_json_error(result, 'No such bot')
result = self.client_delete("/json/bots/hambot-bot@zulip.com")
self.assert_json_error(result, 'Insufficient permission')
# But we don't actually deactivate the other person's bot.
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(1)
# Can not deactivate a bot as a user
result = self.client_delete("/json/users/hambot-bot@zulip.com")
self.assert_json_error(result, 'No such user')
self.assert_num_bots_equal(1)
def test_bot_permissions(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
# Have Othello try to mess with Hamlet's bots.
self.login("othello@zulip.com")
result = self.client_post("/json/bots/hambot-bot@zulip.com/api_key/regenerate")
self.assert_json_error(result, 'Insufficient permission')
bot_info = {
'full_name': 'Fred',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_error(result, 'Insufficient permission')
def get_bot(self):
result = self.client_get("/json/bots")
bots = ujson.loads(result.content)['bots']
return bots[0]
def test_update_api_key(self):
self.login("hamlet@zulip.com")
self.create_bot()
bot = self.get_bot()
old_api_key = bot['api_key']
result = self.client_post('/json/bots/hambot-bot@zulip.com/api_key/regenerate')
self.assert_json_success(result)
new_api_key = ujson.loads(result.content)['api_key']
self.assertNotEqual(old_api_key, new_api_key)
bot = self.get_bot()
self.assertEqual(new_api_key, bot['api_key'])
def test_update_api_key_for_invalid_user(self):
self.login("hamlet@zulip.com")
result = self.client_post('/json/bots/nonexistentuser@zulip.com/api_key/regenerate')
self.assert_json_error(result, 'No such user')
def test_patch_bot_full_name(self):
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'full_name': 'Fred',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
full_name = ujson.loads(result.content)['full_name']
self.assertEqual('Fred', full_name)
bot = self.get_bot()
self.assertEqual('Fred', bot['full_name'])
def test_patch_bot_avatar(self):
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_GRAVATAR)
with open(os.path.join(TEST_AVATAR_DIR, 'img.png'), 'rb') as fp1, \
open(os.path.join(TEST_AVATAR_DIR, 'img.gif'), 'rb') as fp2:
result = self.client_patch_multipart(
'/json/bots/hambot-bot@zulip.com',
dict(file1=fp1, file2=fp2))
self.assert_json_error(result, 'You may only upload one file at a time')
with open(os.path.join(TEST_AVATAR_DIR, 'img.png'), 'rb') as fp:
result = self.client_patch_multipart(
'/json/bots/hambot-bot@zulip.com',
dict(file=fp))
self.assert_json_success(result)
profile = get_user_profile_by_email('hambot-bot@zulip.com')
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_USER)
def test_patch_bot_to_stream(self):
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_sending_stream = ujson.loads(result.content)['default_sending_stream']
self.assertEqual('Denmark', default_sending_stream)
bot = self.get_bot()
self.assertEqual('Denmark', bot['default_sending_stream'])
def test_patch_bot_to_stream_not_subscribed(self):
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'Rome',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_sending_stream = ujson.loads(result.content)['default_sending_stream']
self.assertEqual('Rome', default_sending_stream)
bot = self.get_bot()
self.assertEqual('Rome', bot['default_sending_stream'])
def test_patch_bot_to_stream_none(self):
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': '',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_sending_stream = ujson.loads(result.content)['default_sending_stream']
self.assertEqual(None, default_sending_stream)
bot = self.get_bot()
self.assertEqual(None, bot['default_sending_stream'])
def test_patch_bot_to_stream_private_allowed(self):
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_add_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_sending_stream = ujson.loads(result.content)['default_sending_stream']
self.assertEqual('Denmark', default_sending_stream)
bot = self.get_bot()
self.assertEqual('Denmark', bot['default_sending_stream'])
def test_patch_bot_to_stream_private_denied(self):
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_remove_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_error(result, 'Insufficient permission')
def test_patch_bot_to_stream_not_found(self):
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'missing',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_error(result, 'No such stream \'missing\'')
def test_patch_bot_events_register_stream(self):
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_events_register_stream']
self.assertEqual('Denmark', default_events_register_stream)
bot = self.get_bot()
self.assertEqual('Denmark', bot['default_events_register_stream'])
def test_patch_bot_events_register_stream_allowed(self):
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_add_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_events_register_stream']
self.assertEqual('Denmark', default_events_register_stream)
bot = self.get_bot()
self.assertEqual('Denmark', bot['default_events_register_stream'])
def test_patch_bot_events_register_stream_denied(self):
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
do_remove_subscription(user_profile, stream)
do_make_stream_private(user_profile.realm, "Denmark")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_error(result, 'Insufficient permission')
def test_patch_bot_events_register_stream_none(self):
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': '',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_events_register_stream']
self.assertEqual(None, default_events_register_stream)
bot = self.get_bot()
self.assertEqual(None, bot['default_events_register_stream'])
def test_patch_bot_events_register_stream_not_found(self):
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': 'missing',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_error(result, 'No such stream \'missing\'')
def test_patch_bot_default_all_public_streams_true(self):
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_all_public_streams': ujson.dumps(True),
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_all_public_streams']
self.assertEqual(default_events_register_stream, True)
bot = self.get_bot()
self.assertEqual(bot['default_all_public_streams'], True)
def test_patch_bot_default_all_public_streams_false(self):
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_all_public_streams': ujson.dumps(False),
}
result = self.client_patch("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_all_public_streams']
self.assertEqual(default_events_register_stream, False)
bot = self.get_bot()
self.assertEqual(bot['default_all_public_streams'], False)
def test_patch_bot_via_post(self):
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'full_name': 'Fred',
'method': 'PATCH'
}
result = self.client_post("/json/bots/hambot-bot@zulip.com", bot_info)
self.assert_json_success(result)
full_name = ujson.loads(result.content)['full_name']
self.assertEqual('Fred', full_name)
bot = self.get_bot()
self.assertEqual('Fred', bot['full_name'])
def test_patch_bogus_bot(self):
self.login("hamlet@zulip.com")
self.create_bot()
bot_info = {
'full_name': 'Fred',
}
result = self.client_patch("/json/bots/nonexistent-bot@zulip.com", bot_info)
self.assert_json_error(result, 'No such user')
self.assert_num_bots_equal(1)
class ChangeSettingsTest(ZulipTestCase):
def check_well_formed_change_settings_response(self, result):
self.assertIn("full_name", result)
def check_for_toggle_param(self, pattern, param):
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
json_result = self.client_post(pattern,
{param: ujson.dumps(True)})
self.assert_json_success(json_result)
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertEqual(getattr(user_profile, param), True)
json_result = self.client_post(pattern,
{param: ujson.dumps(False)})
self.assert_json_success(json_result)
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertEqual(getattr(user_profile, param), False)
def test_successful_change_settings(self):
self.login("hamlet@zulip.com")
json_result = self.client_post("/json/settings/change",
dict(
full_name='Foo Bar',
old_password=initial_password('hamlet@zulip.com'),
new_password='foobar1',
confirm_password='foobar1',
)
)
self.assert_json_success(json_result)
result = ujson.loads(json_result.content)
self.check_well_formed_change_settings_response(result)
self.assertEqual(get_user_profile_by_email("hamlet@zulip.com").
full_name, "Foo Bar")
self.client_post('/accounts/logout/')
self.login("hamlet@zulip.com", "foobar1")
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_illegal_name_changes(self):
email = 'hamlet@zulip.com'
self.login(email)
user = get_user_profile_by_email(email)
full_name = user.full_name
with self.settings(NAME_CHANGES_DISABLED=True):
json_result = self.client_post("/json/settings/change",
dict(full_name='Foo Bar'))
# give them the courtesy of an error reason.
self.assert_json_success(json_result)
user = get_user_profile_by_email(email)
self.assertEqual(user.full_name, full_name)
# Now try a too-long name
json_result = self.client_post("/json/settings/change",
dict(full_name='x' * 1000))
self.assert_json_error(json_result, 'Name too long!')
# This is basically a don't-explode test.
def test_notify_settings(self):
self.check_for_toggle_param("/json/notify_settings/change", "enable_desktop_notifications")
self.check_for_toggle_param("/json/notify_settings/change", "enable_stream_desktop_notifications")
self.check_for_toggle_param("/json/notify_settings/change", "enable_stream_sounds")
self.check_for_toggle_param("/json/notify_settings/change", "enable_sounds")
self.check_for_toggle_param("/json/notify_settings/change", "enable_offline_email_notifications")
self.check_for_toggle_param("/json/notify_settings/change", "enable_offline_push_notifications")
self.check_for_toggle_param("/json/notify_settings/change", "enable_digest_emails")
def test_ui_settings(self):
self.check_for_toggle_param("/json/ui_settings/change", "autoscroll_forever")
self.check_for_toggle_param("/json/ui_settings/change", "default_desktop_notifications")
def test_toggling_left_side_userlist(self):
self.check_for_toggle_param("/json/left_side_userlist", "left_side_userlist")
def test_time_setting(self):
self.check_for_toggle_param("/json/time_setting", "twenty_four_hour_time")
def test_enter_sends_setting(self):
self.check_for_toggle_param('/json/users/me/enter-sends', "enter_sends")
def test_mismatching_passwords(self):
self.login("hamlet@zulip.com")
result = self.client_post("/json/settings/change",
dict(
new_password="mismatched_password",
confirm_password="not_the_same",
)
)
self.assert_json_error(result,
"New password must match confirmation password!")
def test_wrong_old_password(self):
self.login("hamlet@zulip.com")
result = self.client_post("/json/settings/change",
dict(
old_password='bad_password',
new_password="ignored",
confirm_password="ignored",
)
)
self.assert_json_error(result, "Wrong password!")
def test_changing_nothing_returns_error(self):
self.login("hamlet@zulip.com")
result = self.client_post("/json/settings/change",
dict(
old_password='ignored',
)
)
self.assert_json_error(result, "No new data supplied")
def test_change_default_language(self):
email = "hamlet@zulip.com"
self.login(email)
german = "de"
data = dict(default_language=ujson.dumps(german))
result = self.client_post("/json/language_setting", data)
self.assert_json_success(result)
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.default_language, german)
invalid_lang = "invalid_lang"
data = dict(default_language=ujson.dumps(invalid_lang))
result = self.client_post("/json/language_setting", data)
self.assert_json_error(result, "Invalid language '%s'" % (invalid_lang,))
user_profile = get_user_profile_by_email(email)
self.assertNotEqual(user_profile.default_language, invalid_lang)
class GetProfileTest(ZulipTestCase):
def common_update_pointer(self, email, pointer):
self.login(email)
result = self.client_put("/json/users/me/pointer", {"pointer": pointer})
self.assert_json_success(result)
def common_get_profile(self, email):
user_profile = get_user_profile_by_email(email)
self.send_message(email, "Verona", Recipient.STREAM, "hello")
result = self.client_get("/api/v1/users/me", **self.api_auth(email))
max_id = most_recent_message(user_profile).id
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("client_id", json)
self.assertIn("max_message_id", json)
self.assertIn("pointer", json)
self.assertEqual(json["max_message_id"], max_id)
return json
def test_get_pointer(self):
email = "hamlet@zulip.com"
self.login(email)
result = self.client_get("/json/users/me/pointer")
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("pointer", json)
def test_cache_behavior(self):
with queries_captured() as queries:
with simulated_empty_cache() as cache_queries:
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assert_length(queries, 1)
self.assert_length(cache_queries, 1, exact=True)
self.assertEqual(user_profile.email, 'hamlet@zulip.com')
def test_api_get_empty_profile(self):
json = self.common_get_profile("othello@zulip.com")
self.assertEqual(json["pointer"], -1)
def test_profile_with_pointer(self):
id1 = self.send_message("othello@zulip.com", "Verona", Recipient.STREAM)
id2 = self.send_message("othello@zulip.com", "Verona", Recipient.STREAM)
json = self.common_get_profile("hamlet@zulip.com")
self.common_update_pointer("hamlet@zulip.com", id2)
json = self.common_get_profile("hamlet@zulip.com")
self.assertEqual(json["pointer"], id2)
self.common_update_pointer("hamlet@zulip.com", id1)
json = self.common_get_profile("hamlet@zulip.com")
self.assertEqual(json["pointer"], id2)
result = self.client_put("/json/users/me/pointer", {"pointer": 99999999})
self.assert_json_error(result, "Invalid message ID")
def test_get_all_profiles_avatar_urls(self):
user_profile = get_user_profile_by_email('hamlet@zulip.com')
result = self.client_get("/api/v1/users", **self.api_auth('hamlet@zulip.com'))
self.assert_json_success(result)
json = ujson.loads(result.content)
for user in json['members']:
if user['email'] == 'hamlet@zulip.com':
self.assertEqual(
user['avatar_url'],
get_avatar_url(user_profile.avatar_source, user_profile.email),
)
class HomeTest(ZulipTestCase):
@slow('big method')
def test_home(self):
html_bits = [
'Compose your message here...',
'Exclude messages with topic',
'Get started',
'Keyboard shortcuts',
'Loading...',
'Manage Streams',
'Narrow by topic',
'Next message',
'SHARE THE LOVE',
'Search streams',
'Welcome to Zulip',
'pygments.css',
'var page_params',
]
expected_keys = [
"alert_words",
"autoscroll_forever",
"avatar_url",
"bot_list",
"can_create_streams",
"cross_realm_user_emails",
"debug_mode",
"default_desktop_notifications",
"default_language",
"default_language_name",
"desktop_notifications_enabled",
"development_environment",
"domain",
"email",
"email_dict",
"enable_digest_emails",
"enable_offline_email_notifications",
"enable_offline_push_notifications",
"enter_sends",
"event_queue_id",
"first_in_realm",
"fullname",
"furthest_read_time",
"has_mobile_devices",
"have_initial_messages",
"initial_pointer",
"initial_presences",
"initial_servertime",
"is_admin",
"is_zephyr_mirror_realm",
"language_list",
"language_list_dbl_col",
"last_event_id",
"left_side_userlist",
"login_page",
"mandatory_topics",
"max_message_id",
"maxfilesize",
"muted_topics",
"name_changes_disabled",
"narrow",
"narrow_stream",
"needs_tutorial",
"neversubbed_info",
"notifications_stream",
"password_auth_enabled",
"people_list",
"poll_timeout",
"presence_disabled",
"product_name",
"prompt_for_invites",
"realm_allow_message_editing",
"realm_create_stream_by_admins_only",
"realm_default_language",
"realm_default_streams",
"realm_emoji",
"realm_filters",
"realm_invite_by_admins_only",
"realm_invite_required",
"realm_message_content_edit_limit_seconds",
"realm_name",
"realm_restricted_to_domain",
"realm_uri",
"referrals",
"save_stacktraces",
"server_generation",
"server_uri",
"share_the_love",
"show_digest_email",
"sounds_enabled",
"stream_desktop_notifications_enabled",
"stream_sounds_enabled",
"subbed_info",
"test_suite",
"twenty_four_hour_time",
"unread_count",
"unsubbed_info",
"user_id",
"zulip_version",
]
email = "hamlet@zulip.com"
result = self.client_get('/')
self.assertEqual(result.status_code, 302)
self.login(email)
result = self._get_home_page(stream='Denmark')
html = result.content.decode('utf-8')
for html_bit in html_bits:
if html_bit not in html:
self.fail('%s not in result' % (html_bit,))
page_params = self._get_page_params(result)
actual_keys = sorted([str(k) for k in page_params.keys()])
self.assertEqual(actual_keys, expected_keys)
def _get_home_page(self, **kwargs):
with \
patch('zerver.lib.actions.request_event_queue', return_value=42), \
patch('zerver.lib.actions.get_user_events', return_value=[]):
result = self.client_get('/', dict(**kwargs))
return result
def _get_page_params(self, result):
html = result.content.decode('utf-8')
lines = html.split('\n')
page_params_line = [l for l in lines if l.startswith('var page_params')][0]
page_params_json = page_params_line.split(' = ')[1].rstrip(';')
page_params = ujson.loads(page_params_json)
return page_params
def _sanity_check(self, result):
html = result.content.decode('utf-8')
if 'Compose your message' not in html:
self.fail('Home page probably did not load.')
def test_terms_of_service(self):
email = 'hamlet@zulip.com'
self.login(email)
with \
self.settings(TERMS_OF_SERVICE='whatever'), \
self.settings(TOS_VERSION='99.99'):
result = self.client_get('/', dict(stream='Denmark'))
html = result.content.decode('utf-8')
self.assertIn('There is a new terms of service', html)
def test_bad_narrow(self):
email = 'hamlet@zulip.com'
self.login(email)
with patch('logging.exception') as mock:
result = self._get_home_page(stream='Invalid Stream')
mock.assert_called_once_with('Narrow parsing')
self._sanity_check(result)
def test_bad_pointer(self):
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
user_profile.pointer = 999999
user_profile.save()
self.login(email)
with patch('logging.warning') as mock:
result = self._get_home_page()
mock.assert_called_once_with('hamlet@zulip.com has invalid pointer 999999')
self._sanity_check(result)
def test_topic_narrow(self):
email = 'hamlet@zulip.com'
self.login(email)
result = self._get_home_page(stream='Denmark', topic='lunch')
self._sanity_check(result)
html = result.content.decode('utf-8')
self.assertIn('lunch', html)
def test_notifications_stream(self):
email = 'hamlet@zulip.com'
realm = get_realm('zulip.com')
realm.notifications_stream = get_stream('Denmark', realm)
realm.save()
self.login(email)
result = self._get_home_page()
page_params = self._get_page_params(result)
self.assertEqual(page_params['notifications_stream'], 'Denmark')
def test_new_stream(self):
email = 'hamlet@zulip.com'
stream_name = 'New stream'
self.subscribe_to_stream(email, stream_name)
self.login(email)
result = self._get_home_page(stream=stream_name)
page_params = self._get_page_params(result)
self.assertEqual(page_params['narrow_stream'], stream_name)
self.assertEqual(page_params['narrow'], [dict(operator='stream', operand=stream_name)])
self.assertEqual(page_params['initial_pointer'], -1)
self.assertEqual(page_params['max_message_id'], -1)
self.assertEqual(page_params['have_initial_messages'], False)
def test_invites_by_admins_only(self):
email = 'hamlet@zulip.com'
user_profile = get_user_profile_by_email(email)
realm = user_profile.realm
realm.invite_by_admins_only = True
realm.save()
self.login(email)
self.assertFalse(user_profile.is_realm_admin)
result = self._get_home_page()
html = result.content.decode('utf-8')
self.assertNotIn('Invite more users', html)
user_profile.is_realm_admin = True
user_profile.save()
result = self._get_home_page()
html = result.content.decode('utf-8')
self.assertIn('Invite more users', html)
class MutedTopicsTests(ZulipTestCase):
def test_json_set(self):
email = 'hamlet@zulip.com'
self.login(email)
url = '/json/set_muted_topics'
data = {'muted_topics': '[["stream", "topic"]]'}
result = self.client_post(url, data)
self.assert_json_success(result)
user = get_user_profile_by_email(email)
self.assertEqual(ujson.loads(user.muted_topics), [["stream", "topic"]])
url = '/json/set_muted_topics'
data = {'muted_topics': '[["stream2", "topic2"]]'}
result = self.client_post(url, data)
self.assert_json_success(result)
user = get_user_profile_by_email(email)
self.assertEqual(ujson.loads(user.muted_topics), [["stream2", "topic2"]])
class ExtractedRecipientsTest(TestCase):
def test_extract_recipients(self):
s = ujson.dumps([' alice@zulip.com ', ' bob@zulip.com ', ' ', 'bob@zulip.com'])
self.assertEqual(sorted(extract_recipients(s)), ['alice@zulip.com', 'bob@zulip.com'])
s = 'alice@zulip.com '
self.assertEqual(extract_recipients(s), ['alice@zulip.com'])
s = '"alice@zulip.com"'
self.assertEqual(extract_recipients(s), ['alice@zulip.com'])
s = 'bob@zulip.com, alice@zulip.com'
self.assertEqual(sorted(extract_recipients(s)), ['alice@zulip.com', 'bob@zulip.com'])
s = '"bob@zulip.com,alice@zulip.com"'
self.assertEqual(sorted(extract_recipients(s)), ['alice@zulip.com', 'bob@zulip.com'])
class TestMissedMessages(ZulipTestCase):
def normalize_string(self, s):
s = s.strip()
return re.sub(r'\s+', ' ', s)
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
@patch('zerver.lib.email_mirror.generate_random_token')
def test_extra_context_in_missed_stream_messages(self, mock_random_token):
tokens = [str(random.getrandbits(32)) for _ in range(30)]
mock_random_token.side_effect = tokens
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '0')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '1')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '2')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '3')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '4')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '5')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '6')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '7')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '8')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '9')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '10')
self.send_message("othello@zulip.com", "Denmark", Recipient.STREAM, '11', subject='test2')
msg_id = self.send_message("othello@zulip.com", "denmark", Recipient.STREAM, '@**hamlet**')
othello = get_user_profile_by_email('othello@zulip.com')
hamlet = get_user_profile_by_email('hamlet@zulip.com')
handle_missedmessage_emails(hamlet.id, [{'message_id': msg_id}])
msg = mail.outbox[0]
reply_to_addresses = [settings.EMAIL_GATEWAY_PATTERN % (u'mm' + t)
for t in tokens]
sender = 'Zulip <{}>'.format(settings.NOREPLY_EMAIL_ADDRESS)
self.assertEquals(len(mail.outbox), 1)
self.assertEqual(msg.from_email, '"%s" <%s>' % (othello.full_name, othello.email))
self.assertIn(msg.extra_headers['Reply-To'], reply_to_addresses)
self.assertEqual(msg.extra_headers['Sender'], sender)
self.assertIn(
'Denmark > test Othello, the Moor of Venice 1 2 3 4 5 6 7 8 9 10 @**hamlet**',
self.normalize_string(mail.outbox[0].body),
)
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
@patch('zerver.lib.email_mirror.generate_random_token')
def test_extra_context_in_personal_missed_stream_messages(self, mock_random_token):
tokens = [str(random.getrandbits(32)) for _ in range(30)]
mock_random_token.side_effect = tokens
msg_id = self.send_message("othello@zulip.com", "hamlet@zulip.com",
Recipient.PERSONAL,
'Extremely personal message!')
othello = get_user_profile_by_email('othello@zulip.com')
hamlet = get_user_profile_by_email('hamlet@zulip.com')
handle_missedmessage_emails(hamlet.id, [{'message_id': msg_id}])
msg = mail.outbox[0]
reply_to_addresses = [settings.EMAIL_GATEWAY_PATTERN % (u'mm' + t)
for t in tokens]
sender = 'Zulip <{}>'.format(settings.NOREPLY_EMAIL_ADDRESS)
self.assertEquals(len(mail.outbox), 1)
self.assertEqual(msg.from_email, '"%s" <%s>' % (othello.full_name, othello.email))
self.assertIn(msg.extra_headers['Reply-To'], reply_to_addresses)
self.assertEqual(msg.extra_headers['Sender'], sender)
self.assertIn('You and Othello, the Moor of Venice Extremely personal message!',
self.normalize_string(msg.body))
@override_settings(SEND_MISSED_MESSAGE_EMAILS_AS_USER=True)
@patch('zerver.lib.email_mirror.generate_random_token')
def test_extra_context_in_huddle_missed_stream_messages(self, mock_random_token):
tokens = [str(random.getrandbits(32)) for _ in range(30)]
mock_random_token.side_effect = tokens
msg_id = self.send_message("othello@zulip.com",
["hamlet@zulip.com", "iago@zulip.com"],
Recipient.PERSONAL,
'Group personal message!')
othello = get_user_profile_by_email('othello@zulip.com')
hamlet = get_user_profile_by_email('hamlet@zulip.com')
handle_missedmessage_emails(hamlet.id, [{'message_id': msg_id}])
msg = mail.outbox[0]
reply_to_addresses = [settings.EMAIL_GATEWAY_PATTERN % (u'mm' + t)
for t in tokens]
sender = 'Zulip <{}>'.format(settings.NOREPLY_EMAIL_ADDRESS)
self.assertEquals(len(mail.outbox), 1)
self.assertEqual(msg.from_email, '"%s" <%s>' % (othello.full_name, othello.email))
self.assertIn(msg.extra_headers['Reply-To'], reply_to_addresses)
self.assertEqual(msg.extra_headers['Sender'], sender)
body = ('You and Iago, Othello, the Moor of Venice Othello,'
' the Moor of Venice Group personal message')
self.assertIn(body, self.normalize_string(msg.body))
class TestOpenRealms(ZulipTestCase):
def test_open_realm_logic(self):
mit_realm = get_realm("mit.edu")
self.assertEquals(get_unique_open_realm(), None)
mit_realm.restricted_to_domain = False
mit_realm.save()
self.assertTrue(completely_open(mit_realm.domain))
self.assertEquals(get_unique_open_realm(), None)
with self.settings(SYSTEM_ONLY_REALMS={"zulip.com"}):
self.assertEquals(get_unique_open_realm(), mit_realm)
mit_realm.restricted_to_domain = True
mit_realm.save()
| true
| true
|
f70cc5efd6ee30bf6e24e6413ae411d0d6938392
| 34
|
py
|
Python
|
dataloader/__init__.py
|
sajith-rahim/transformer-classifier
|
543562fc22a4ee3b224eaf44876449552026d2e5
|
[
"Apache-2.0"
] | null | null | null |
dataloader/__init__.py
|
sajith-rahim/transformer-classifier
|
543562fc22a4ee3b224eaf44876449552026d2e5
|
[
"Apache-2.0"
] | null | null | null |
dataloader/__init__.py
|
sajith-rahim/transformer-classifier
|
543562fc22a4ee3b224eaf44876449552026d2e5
|
[
"Apache-2.0"
] | null | null | null |
from .sentence_dataloader import *
| 34
| 34
| 0.852941
|
from .sentence_dataloader import *
| true
| true
|
f70cc6649fea754647fe747bf5298e5f86180bb4
| 624
|
py
|
Python
|
InkWarmReloadDemo.py
|
andycb/inkyWhatInfoDisplay
|
b05e2b2d5fc084298bb9ad1eeecaa7fd5eb6732f
|
[
"MIT"
] | 5
|
2019-11-05T08:32:44.000Z
|
2022-01-01T19:12:20.000Z
|
InkWarmReloadDemo.py
|
andycb/inkyWhatInfoDisplay
|
b05e2b2d5fc084298bb9ad1eeecaa7fd5eb6732f
|
[
"MIT"
] | null | null | null |
InkWarmReloadDemo.py
|
andycb/inkyWhatInfoDisplay
|
b05e2b2d5fc084298bb9ad1eeecaa7fd5eb6732f
|
[
"MIT"
] | null | null | null |
import time
from inky_fork import InkyPHAT, InkyWHAT
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from datetime import datetime
from time import gmtime, strftime
inky_display = InkyWHAT("black_fast")
font = ImageFont.truetype("Nunito-ExtraLight.ttf", 130)
i = 10
while True:
image = Image.new("P", (inky_display.WIDTH, inky_display.HEIGHT))
draw = ImageDraw.Draw(image)
draw.rectangle((400, 300, 0, 0), fill=inky_display.WHITE)
draw.text((100, 100), str(i), inky_display.BLACK, font)
inky_display.set_image(image)
inky_display.show()
i = i + 1
time.sleep(1)
| 24.96
| 69
| 0.725962
|
import time
from inky_fork import InkyPHAT, InkyWHAT
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from datetime import datetime
from time import gmtime, strftime
inky_display = InkyWHAT("black_fast")
font = ImageFont.truetype("Nunito-ExtraLight.ttf", 130)
i = 10
while True:
image = Image.new("P", (inky_display.WIDTH, inky_display.HEIGHT))
draw = ImageDraw.Draw(image)
draw.rectangle((400, 300, 0, 0), fill=inky_display.WHITE)
draw.text((100, 100), str(i), inky_display.BLACK, font)
inky_display.set_image(image)
inky_display.show()
i = i + 1
time.sleep(1)
| true
| true
|
f70cc668822045cd6d0fe8c43d30364a6c8ec638
| 267
|
py
|
Python
|
Mundo 2 - Estruturas de Controle/#067 - Tabuada v3.0.py
|
Pedrohclelis/curso-em-video-python
|
84e61ccec570fdf9546844757f99ca11333565a8
|
[
"MIT"
] | null | null | null |
Mundo 2 - Estruturas de Controle/#067 - Tabuada v3.0.py
|
Pedrohclelis/curso-em-video-python
|
84e61ccec570fdf9546844757f99ca11333565a8
|
[
"MIT"
] | null | null | null |
Mundo 2 - Estruturas de Controle/#067 - Tabuada v3.0.py
|
Pedrohclelis/curso-em-video-python
|
84e61ccec570fdf9546844757f99ca11333565a8
|
[
"MIT"
] | null | null | null |
while True:
n = int(input("Quer ver a tabuada de qual valor? "))
if n < 0:
print('-=' * 19)
print('PROGRAMA TABUADA ENCERRADO, VOLTE SEMPRE')
break
print('-=' * 19)
for c in range(1, 11):
print(f'{n} x {c:2} = {n*c:2}')
| 29.666667
| 57
| 0.501873
|
while True:
n = int(input("Quer ver a tabuada de qual valor? "))
if n < 0:
print('-=' * 19)
print('PROGRAMA TABUADA ENCERRADO, VOLTE SEMPRE')
break
print('-=' * 19)
for c in range(1, 11):
print(f'{n} x {c:2} = {n*c:2}')
| true
| true
|
f70cc67d622524e48e5f02f581e211565a864500
| 27,384
|
py
|
Python
|
lib/python3.8/site-packages/ansible_collections/community/general/plugins/modules/docker_network.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | 5
|
2020-12-16T21:42:09.000Z
|
2022-03-28T16:04:32.000Z
|
.ansible/collections/ansible_collections/community/general/plugins/modules/cloud/docker/docker_network.py
|
chronicc/proving-ground
|
3e392122a05fb8383a3700954baebb0df330e9e3
|
[
"MIT"
] | null | null | null |
.ansible/collections/ansible_collections/community/general/plugins/modules/cloud/docker/docker_network.py
|
chronicc/proving-ground
|
3e392122a05fb8383a3700954baebb0df330e9e3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: docker_network
short_description: Manage Docker networks
description:
- Create/remove Docker networks and connect containers to them.
- Performs largely the same function as the "docker network" CLI subcommand.
options:
name:
description:
- Name of the network to operate on.
type: str
required: yes
aliases:
- network_name
connected:
description:
- List of container names or container IDs to connect to a network.
- Please note that the module only makes sure that these containers are connected to the network,
but does not care about connection options. If you rely on specific IP addresses etc., use the
M(community.general.docker_container) module to ensure your containers are correctly connected to this network.
type: list
elements: str
aliases:
- containers
driver:
description:
- Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
type: str
default: bridge
driver_options:
description:
- Dictionary of network settings. Consult docker docs for valid options and values.
type: dict
force:
description:
- With state C(absent) forces disconnecting all containers from the
network prior to deleting the network. With state C(present) will
disconnect all containers, delete the network and re-create the
network.
- This option is required if you have changed the IPAM or driver options
and want an existing network to be updated to use the new options.
type: bool
default: no
appends:
description:
- By default the connected list is canonical, meaning containers not on the list are removed from the network.
- Use I(appends) to leave existing containers connected.
type: bool
default: no
aliases:
- incremental
enable_ipv6:
description:
- Enable IPv6 networking.
type: bool
ipam_driver:
description:
- Specify an IPAM driver.
type: str
ipam_driver_options:
description:
- Dictionary of IPAM driver options.
type: dict
ipam_options:
description:
- Dictionary of IPAM options.
- Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter I(ipam_config) instead. In Docker 1.10.0, IPAM
options were introduced (see L(here,https://github.com/moby/moby/pull/17316)). This module parameter addresses
the IPAM config not the newly introduced IPAM options. For the IPAM options, see the I(ipam_driver_options)
parameter.
type: dict
suboptions:
subnet:
description:
- IP subset in CIDR notation.
type: str
iprange:
description:
- IP address range in CIDR notation.
type: str
gateway:
description:
- IP gateway address.
type: str
aux_addresses:
description:
- Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
type: dict
ipam_config:
description:
- List of IPAM config blocks. Consult
L(Docker docs,https://docs.docker.com/compose/compose-file/compose-file-v2/#ipam) for valid options and values.
Note that I(iprange) is spelled differently here (we use the notation from the Docker SDK for Python).
type: list
elements: dict
suboptions:
subnet:
description:
- IP subset in CIDR notation.
type: str
iprange:
description:
- IP address range in CIDR notation.
type: str
gateway:
description:
- IP gateway address.
type: str
aux_addresses:
description:
- Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
type: dict
state:
description:
- C(absent) deletes the network. If a network has connected containers, it
cannot be deleted. Use the I(force) option to disconnect all containers
and delete the network.
- C(present) creates the network, if it does not already exist with the
specified parameters, and connects the list of containers provided via
the connected parameter. Containers not on the list will be disconnected.
An empty list will leave no containers connected to the network. Use the
I(appends) option to leave existing containers connected. Use the I(force)
options to force re-creation of the network.
type: str
default: present
choices:
- absent
- present
internal:
description:
- Restrict external access to the network.
type: bool
labels:
description:
- Dictionary of labels.
type: dict
scope:
description:
- Specify the network's scope.
type: str
choices:
- local
- global
- swarm
attachable:
description:
- If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect to the network.
type: bool
extends_documentation_fragment:
- community.general.docker
- community.general.docker.docker_py_1_documentation
notes:
- When network options are changed, the module disconnects all containers from the network, deletes the network, and re-creates the network.
It does not try to reconnect containers, except the ones listed in (I(connected), and even for these, it does not consider specific
connection options like fixed IP addresses or MAC addresses. If you need more control over how the containers are connected to the
network, loop the M(community.general.docker_container) module to loop over your containers to make sure they are connected properly.
- The module does not support Docker Swarm, i.e. it will not try to disconnect or reconnect services. If services are connected to the
network, deleting the network will fail. When network options are changed, the network has to be deleted and recreated, so this will
fail as well.
author:
- "Ben Keith (@keitwb)"
- "Chris Houseknecht (@chouseknecht)"
- "Dave Bendit (@DBendit)"
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
- "The docker server >= 1.10.0"
'''
EXAMPLES = '''
- name: Create a network
community.general.docker_network:
name: network_one
- name: Remove all but selected list of containers
community.general.docker_network:
name: network_one
connected:
- container_a
- container_b
- container_c
- name: Remove a single container
community.general.docker_network:
name: network_one
connected: "{{ fulllist|difference(['container_a']) }}"
- name: Add a container to a network, leaving existing containers connected
community.general.docker_network:
name: network_one
connected:
- container_a
appends: yes
- name: Create a network with driver options
community.general.docker_network:
name: network_two
driver_options:
com.docker.network.bridge.name: net2
- name: Create a network with custom IPAM config
community.general.docker_network:
name: network_three
ipam_config:
- subnet: 172.3.27.0/24
gateway: 172.3.27.2
iprange: 172.3.27.0/26
aux_addresses:
host1: 172.3.27.3
host2: 172.3.27.4
- name: Create a network with labels
community.general.docker_network:
name: network_four
labels:
key1: value1
key2: value2
- name: Create a network with IPv6 IPAM config
community.general.docker_network:
name: network_ipv6_one
enable_ipv6: yes
ipam_config:
- subnet: fdd1:ac8c:0557:7ce1::/64
- name: Create a network with IPv6 and custom IPv4 IPAM config
community.general.docker_network:
name: network_ipv6_two
enable_ipv6: yes
ipam_config:
- subnet: 172.4.27.0/24
- subnet: fdd1:ac8c:0557:7ce2::/64
- name: Delete a network, disconnecting all containers
community.general.docker_network:
name: network_one
state: absent
force: yes
'''
RETURN = '''
network:
description:
- Network inspection results for the affected network.
- Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
are also accessible directly as C(docker_network). Note that the returned fact will be removed in community.general 2.0.0.
returned: success
type: dict
sample: {}
'''
import re
import traceback
from distutils.version import LooseVersion
from ansible_collections.community.general.plugins.module_utils.docker.common import (
AnsibleDockerClient,
DockerBaseClass,
docker_version,
DifferenceTracker,
clean_dict_booleans_for_docker_api,
RequestException,
)
try:
from docker import utils
from docker.errors import DockerException
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
from docker.types import IPAMPool, IPAMConfig
except Exception:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
class TaskParameters(DockerBaseClass):
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.name = None
self.connected = None
self.driver = None
self.driver_options = None
self.ipam_driver = None
self.ipam_driver_options = None
self.ipam_options = None
self.ipam_config = None
self.appends = None
self.force = None
self.internal = None
self.labels = None
self.debug = None
self.enable_ipv6 = None
self.scope = None
self.attachable = None
for key, value in client.module.params.items():
setattr(self, key, value)
def container_names_in_network(network):
return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
CIDR_IPV4 = re.compile(r'^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$')
CIDR_IPV6 = re.compile(r'^[0-9a-fA-F:]+/([0-9]|[1-9][0-9]|1[0-2][0-9])$')
def validate_cidr(cidr):
"""Validate CIDR. Return IP version of a CIDR string on success.
:param cidr: Valid CIDR
:type cidr: str
:return: ``ipv4`` or ``ipv6``
:rtype: str
:raises ValueError: If ``cidr`` is not a valid CIDR
"""
if CIDR_IPV4.match(cidr):
return 'ipv4'
elif CIDR_IPV6.match(cidr):
return 'ipv6'
raise ValueError('"{0}" is not a valid CIDR'.format(cidr))
def normalize_ipam_config_key(key):
"""Normalizes IPAM config keys returned by Docker API to match Ansible keys.
:param key: Docker API key
:type key: str
:return Ansible module key
:rtype str
"""
special_cases = {
'AuxiliaryAddresses': 'aux_addresses'
}
return special_cases.get(key, key.lower())
def dicts_are_essentially_equal(a, b):
"""Make sure that a is a subset of b, where None entries of a are ignored."""
for k, v in a.items():
if v is None:
continue
if b.get(k) != v:
return False
return True
class DockerNetworkManager(object):
def __init__(self, client):
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {
u'changed': False,
u'actions': []
}
self.diff = self.client.module._diff
self.diff_tracker = DifferenceTracker()
self.diff_result = dict()
self.existing_network = self.get_existing_network()
if not self.parameters.connected and self.existing_network:
self.parameters.connected = container_names_in_network(self.existing_network)
if (self.parameters.ipam_options['subnet'] or self.parameters.ipam_options['iprange'] or
self.parameters.ipam_options['gateway'] or self.parameters.ipam_options['aux_addresses']):
self.parameters.ipam_config = [self.parameters.ipam_options]
if self.parameters.ipam_config:
try:
for ipam_config in self.parameters.ipam_config:
validate_cidr(ipam_config['subnet'])
except ValueError as e:
self.client.fail(str(e))
if self.parameters.driver_options:
self.parameters.driver_options = clean_dict_booleans_for_docker_api(self.parameters.driver_options)
state = self.parameters.state
if state == 'present':
self.present()
elif state == 'absent':
self.absent()
if self.diff or self.check_mode or self.parameters.debug:
if self.diff:
self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
self.results['diff'] = self.diff_result
def get_existing_network(self):
return self.client.get_network(name=self.parameters.name)
def has_different_config(self, net):
'''
Evaluates an existing network and returns a tuple containing a boolean
indicating if the configuration is different and a list of differences.
:param net: the inspection output for an existing network
:return: (bool, list)
'''
differences = DifferenceTracker()
if self.parameters.driver and self.parameters.driver != net['Driver']:
differences.add('driver',
parameter=self.parameters.driver,
active=net['Driver'])
if self.parameters.driver_options:
if not net.get('Options'):
differences.add('driver_options',
parameter=self.parameters.driver_options,
active=net.get('Options'))
else:
for key, value in self.parameters.driver_options.items():
if not (key in net['Options']) or value != net['Options'][key]:
differences.add('driver_options.%s' % key,
parameter=value,
active=net['Options'].get(key))
if self.parameters.ipam_driver:
if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver:
differences.add('ipam_driver',
parameter=self.parameters.ipam_driver,
active=net.get('IPAM'))
if self.parameters.ipam_driver_options is not None:
ipam_driver_options = net['IPAM'].get('Options') or {}
if ipam_driver_options != self.parameters.ipam_driver_options:
differences.add('ipam_driver_options',
parameter=self.parameters.ipam_driver_options,
active=ipam_driver_options)
if self.parameters.ipam_config is not None and self.parameters.ipam_config:
if not net.get('IPAM') or not net['IPAM']['Config']:
differences.add('ipam_config',
parameter=self.parameters.ipam_config,
active=net.get('IPAM', {}).get('Config'))
else:
# Put network's IPAM config into the same format as module's IPAM config
net_ipam_configs = []
for net_ipam_config in net['IPAM']['Config']:
config = dict()
for k, v in net_ipam_config.items():
config[normalize_ipam_config_key(k)] = v
net_ipam_configs.append(config)
# Compare lists of dicts as sets of dicts
for idx, ipam_config in enumerate(self.parameters.ipam_config):
net_config = dict()
for net_ipam_config in net_ipam_configs:
if dicts_are_essentially_equal(ipam_config, net_ipam_config):
net_config = net_ipam_config
break
for key, value in ipam_config.items():
if value is None:
# due to recursive argument_spec, all keys are always present
# (but have default value None if not specified)
continue
if value != net_config.get(key):
differences.add('ipam_config[%s].%s' % (idx, key),
parameter=value,
active=net_config.get(key))
if self.parameters.enable_ipv6 is not None and self.parameters.enable_ipv6 != net.get('EnableIPv6', False):
differences.add('enable_ipv6',
parameter=self.parameters.enable_ipv6,
active=net.get('EnableIPv6', False))
if self.parameters.internal is not None and self.parameters.internal != net.get('Internal', False):
differences.add('internal',
parameter=self.parameters.internal,
active=net.get('Internal'))
if self.parameters.scope is not None and self.parameters.scope != net.get('Scope'):
differences.add('scope',
parameter=self.parameters.scope,
active=net.get('Scope'))
if self.parameters.attachable is not None and self.parameters.attachable != net.get('Attachable', False):
differences.add('attachable',
parameter=self.parameters.attachable,
active=net.get('Attachable'))
if self.parameters.labels:
if not net.get('Labels'):
differences.add('labels',
parameter=self.parameters.labels,
active=net.get('Labels'))
else:
for key, value in self.parameters.labels.items():
if not (key in net['Labels']) or value != net['Labels'][key]:
differences.add('labels.%s' % key,
parameter=value,
active=net['Labels'].get(key))
return not differences.empty, differences
def create_network(self):
if not self.existing_network:
params = dict(
driver=self.parameters.driver,
options=self.parameters.driver_options,
)
ipam_pools = []
if self.parameters.ipam_config:
for ipam_pool in self.parameters.ipam_config:
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
ipam_pools.append(IPAMPool(**ipam_pool))
else:
ipam_pools.append(utils.create_ipam_pool(**ipam_pool))
if self.parameters.ipam_driver or self.parameters.ipam_driver_options or ipam_pools:
# Only add ipam parameter if a driver was specified or if IPAM parameters
# were specified. Leaving this parameter away can significantly speed up
# creation; on my machine creation with this option needs ~15 seconds,
# and without just a few seconds.
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
params['ipam'] = IPAMConfig(driver=self.parameters.ipam_driver,
pool_configs=ipam_pools,
options=self.parameters.ipam_driver_options)
else:
params['ipam'] = utils.create_ipam_config(driver=self.parameters.ipam_driver,
pool_configs=ipam_pools)
if self.parameters.enable_ipv6 is not None:
params['enable_ipv6'] = self.parameters.enable_ipv6
if self.parameters.internal is not None:
params['internal'] = self.parameters.internal
if self.parameters.scope is not None:
params['scope'] = self.parameters.scope
if self.parameters.attachable is not None:
params['attachable'] = self.parameters.attachable
if self.parameters.labels:
params['labels'] = self.parameters.labels
if not self.check_mode:
resp = self.client.create_network(self.parameters.name, **params)
self.client.report_warnings(resp, ['Warning'])
self.existing_network = self.client.get_network(network_id=resp['Id'])
self.results['actions'].append("Created network %s with driver %s" % (self.parameters.name, self.parameters.driver))
self.results['changed'] = True
def remove_network(self):
if self.existing_network:
self.disconnect_all_containers()
if not self.check_mode:
self.client.remove_network(self.parameters.name)
self.results['actions'].append("Removed network %s" % (self.parameters.name,))
self.results['changed'] = True
def is_container_connected(self, container_name):
if not self.existing_network:
return False
return container_name in container_names_in_network(self.existing_network)
def connect_containers(self):
for name in self.parameters.connected:
if not self.is_container_connected(name):
if not self.check_mode:
self.client.connect_container_to_network(name, self.parameters.name)
self.results['actions'].append("Connected container %s" % (name,))
self.results['changed'] = True
self.diff_tracker.add('connected.{0}'.format(name),
parameter=True,
active=False)
def disconnect_missing(self):
if not self.existing_network:
return
containers = self.existing_network['Containers']
if not containers:
return
for c in containers.values():
name = c['Name']
if name not in self.parameters.connected:
self.disconnect_container(name)
def disconnect_all_containers(self):
containers = self.client.get_network(name=self.parameters.name)['Containers']
if not containers:
return
for cont in containers.values():
self.disconnect_container(cont['Name'])
def disconnect_container(self, container_name):
if not self.check_mode:
self.client.disconnect_container_from_network(container_name, self.parameters.name)
self.results['actions'].append("Disconnected container %s" % (container_name,))
self.results['changed'] = True
self.diff_tracker.add('connected.{0}'.format(container_name),
parameter=False,
active=True)
def present(self):
different = False
differences = DifferenceTracker()
if self.existing_network:
different, differences = self.has_different_config(self.existing_network)
self.diff_tracker.add('exists', parameter=True, active=self.existing_network is not None)
if self.parameters.force or different:
self.remove_network()
self.existing_network = None
self.create_network()
self.connect_containers()
if not self.parameters.appends:
self.disconnect_missing()
if self.diff or self.check_mode or self.parameters.debug:
self.diff_result['differences'] = differences.get_legacy_docker_diffs()
self.diff_tracker.merge(differences)
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
network_facts = self.get_existing_network()
self.results['ansible_facts'] = {u'docker_network': network_facts}
self.results['network'] = network_facts
def absent(self):
self.diff_tracker.add('exists', parameter=False, active=self.existing_network is not None)
self.remove_network()
def main():
argument_spec = dict(
name=dict(type='str', required=True, aliases=['network_name']),
connected=dict(type='list', default=[], elements='str', aliases=['containers']),
state=dict(type='str', default='present', choices=['present', 'absent']),
driver=dict(type='str', default='bridge'),
driver_options=dict(type='dict', default={}),
force=dict(type='bool', default=False),
appends=dict(type='bool', default=False, aliases=['incremental']),
ipam_driver=dict(type='str'),
ipam_driver_options=dict(type='dict'),
ipam_options=dict(type='dict', default={}, options=dict(
subnet=dict(type='str'),
iprange=dict(type='str'),
gateway=dict(type='str'),
aux_addresses=dict(type='dict'),
), removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
ipam_config=dict(type='list', elements='dict', options=dict(
subnet=dict(type='str'),
iprange=dict(type='str'),
gateway=dict(type='str'),
aux_addresses=dict(type='dict'),
)),
enable_ipv6=dict(type='bool'),
internal=dict(type='bool'),
labels=dict(type='dict', default={}),
debug=dict(type='bool', default=False),
scope=dict(type='str', choices=['local', 'global', 'swarm']),
attachable=dict(type='bool'),
)
mutually_exclusive = [
('ipam_config', 'ipam_options')
]
option_minimal_versions = dict(
scope=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
attachable=dict(docker_py_version='2.0.0', docker_api_version='1.26'),
labels=dict(docker_api_version='1.23'),
ipam_driver_options=dict(docker_py_version='2.0.0'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
min_docker_version='1.10.0',
min_docker_api_version='1.22',
# "The docker server >= 1.10.0"
option_minimal_versions=option_minimal_versions,
)
try:
cm = DockerNetworkManager(client)
client.module.exit_json(**cm.results)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
except RequestException as e:
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| 38.139276
| 159
| 0.620764
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: docker_network
short_description: Manage Docker networks
description:
- Create/remove Docker networks and connect containers to them.
- Performs largely the same function as the "docker network" CLI subcommand.
options:
name:
description:
- Name of the network to operate on.
type: str
required: yes
aliases:
- network_name
connected:
description:
- List of container names or container IDs to connect to a network.
- Please note that the module only makes sure that these containers are connected to the network,
but does not care about connection options. If you rely on specific IP addresses etc., use the
M(community.general.docker_container) module to ensure your containers are correctly connected to this network.
type: list
elements: str
aliases:
- containers
driver:
description:
- Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
type: str
default: bridge
driver_options:
description:
- Dictionary of network settings. Consult docker docs for valid options and values.
type: dict
force:
description:
- With state C(absent) forces disconnecting all containers from the
network prior to deleting the network. With state C(present) will
disconnect all containers, delete the network and re-create the
network.
- This option is required if you have changed the IPAM or driver options
and want an existing network to be updated to use the new options.
type: bool
default: no
appends:
description:
- By default the connected list is canonical, meaning containers not on the list are removed from the network.
- Use I(appends) to leave existing containers connected.
type: bool
default: no
aliases:
- incremental
enable_ipv6:
description:
- Enable IPv6 networking.
type: bool
ipam_driver:
description:
- Specify an IPAM driver.
type: str
ipam_driver_options:
description:
- Dictionary of IPAM driver options.
type: dict
ipam_options:
description:
- Dictionary of IPAM options.
- Deprecated in 2.8, will be removed in community.general 2.0.0. Use parameter I(ipam_config) instead. In Docker 1.10.0, IPAM
options were introduced (see L(here,https://github.com/moby/moby/pull/17316)). This module parameter addresses
the IPAM config not the newly introduced IPAM options. For the IPAM options, see the I(ipam_driver_options)
parameter.
type: dict
suboptions:
subnet:
description:
- IP subset in CIDR notation.
type: str
iprange:
description:
- IP address range in CIDR notation.
type: str
gateway:
description:
- IP gateway address.
type: str
aux_addresses:
description:
- Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
type: dict
ipam_config:
description:
- List of IPAM config blocks. Consult
L(Docker docs,https://docs.docker.com/compose/compose-file/compose-file-v2/#ipam) for valid options and values.
Note that I(iprange) is spelled differently here (we use the notation from the Docker SDK for Python).
type: list
elements: dict
suboptions:
subnet:
description:
- IP subset in CIDR notation.
type: str
iprange:
description:
- IP address range in CIDR notation.
type: str
gateway:
description:
- IP gateway address.
type: str
aux_addresses:
description:
- Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
type: dict
state:
description:
- C(absent) deletes the network. If a network has connected containers, it
cannot be deleted. Use the I(force) option to disconnect all containers
and delete the network.
- C(present) creates the network, if it does not already exist with the
specified parameters, and connects the list of containers provided via
the connected parameter. Containers not on the list will be disconnected.
An empty list will leave no containers connected to the network. Use the
I(appends) option to leave existing containers connected. Use the I(force)
options to force re-creation of the network.
type: str
default: present
choices:
- absent
- present
internal:
description:
- Restrict external access to the network.
type: bool
labels:
description:
- Dictionary of labels.
type: dict
scope:
description:
- Specify the network's scope.
type: str
choices:
- local
- global
- swarm
attachable:
description:
- If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect to the network.
type: bool
extends_documentation_fragment:
- community.general.docker
- community.general.docker.docker_py_1_documentation
notes:
- When network options are changed, the module disconnects all containers from the network, deletes the network, and re-creates the network.
It does not try to reconnect containers, except the ones listed in (I(connected), and even for these, it does not consider specific
connection options like fixed IP addresses or MAC addresses. If you need more control over how the containers are connected to the
network, loop the M(community.general.docker_container) module to loop over your containers to make sure they are connected properly.
- The module does not support Docker Swarm, i.e. it will not try to disconnect or reconnect services. If services are connected to the
network, deleting the network will fail. When network options are changed, the network has to be deleted and recreated, so this will
fail as well.
author:
- "Ben Keith (@keitwb)"
- "Chris Houseknecht (@chouseknecht)"
- "Dave Bendit (@DBendit)"
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
- "The docker server >= 1.10.0"
'''
EXAMPLES = '''
- name: Create a network
community.general.docker_network:
name: network_one
- name: Remove all but selected list of containers
community.general.docker_network:
name: network_one
connected:
- container_a
- container_b
- container_c
- name: Remove a single container
community.general.docker_network:
name: network_one
connected: "{{ fulllist|difference(['container_a']) }}"
- name: Add a container to a network, leaving existing containers connected
community.general.docker_network:
name: network_one
connected:
- container_a
appends: yes
- name: Create a network with driver options
community.general.docker_network:
name: network_two
driver_options:
com.docker.network.bridge.name: net2
- name: Create a network with custom IPAM config
community.general.docker_network:
name: network_three
ipam_config:
- subnet: 172.3.27.0/24
gateway: 172.3.27.2
iprange: 172.3.27.0/26
aux_addresses:
host1: 172.3.27.3
host2: 172.3.27.4
- name: Create a network with labels
community.general.docker_network:
name: network_four
labels:
key1: value1
key2: value2
- name: Create a network with IPv6 IPAM config
community.general.docker_network:
name: network_ipv6_one
enable_ipv6: yes
ipam_config:
- subnet: fdd1:ac8c:0557:7ce1::/64
- name: Create a network with IPv6 and custom IPv4 IPAM config
community.general.docker_network:
name: network_ipv6_two
enable_ipv6: yes
ipam_config:
- subnet: 172.4.27.0/24
- subnet: fdd1:ac8c:0557:7ce2::/64
- name: Delete a network, disconnecting all containers
community.general.docker_network:
name: network_one
state: absent
force: yes
'''
RETURN = '''
network:
description:
- Network inspection results for the affected network.
- Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
are also accessible directly as C(docker_network). Note that the returned fact will be removed in community.general 2.0.0.
returned: success
type: dict
sample: {}
'''
import re
import traceback
from distutils.version import LooseVersion
from ansible_collections.community.general.plugins.module_utils.docker.common import (
AnsibleDockerClient,
DockerBaseClass,
docker_version,
DifferenceTracker,
clean_dict_booleans_for_docker_api,
RequestException,
)
try:
from docker import utils
from docker.errors import DockerException
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
from docker.types import IPAMPool, IPAMConfig
except Exception:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
class TaskParameters(DockerBaseClass):
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.name = None
self.connected = None
self.driver = None
self.driver_options = None
self.ipam_driver = None
self.ipam_driver_options = None
self.ipam_options = None
self.ipam_config = None
self.appends = None
self.force = None
self.internal = None
self.labels = None
self.debug = None
self.enable_ipv6 = None
self.scope = None
self.attachable = None
for key, value in client.module.params.items():
setattr(self, key, value)
def container_names_in_network(network):
return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
CIDR_IPV4 = re.compile(r'^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$')
CIDR_IPV6 = re.compile(r'^[0-9a-fA-F:]+/([0-9]|[1-9][0-9]|1[0-2][0-9])$')
def validate_cidr(cidr):
if CIDR_IPV4.match(cidr):
return 'ipv4'
elif CIDR_IPV6.match(cidr):
return 'ipv6'
raise ValueError('"{0}" is not a valid CIDR'.format(cidr))
def normalize_ipam_config_key(key):
special_cases = {
'AuxiliaryAddresses': 'aux_addresses'
}
return special_cases.get(key, key.lower())
def dicts_are_essentially_equal(a, b):
for k, v in a.items():
if v is None:
continue
if b.get(k) != v:
return False
return True
class DockerNetworkManager(object):
def __init__(self, client):
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {
u'changed': False,
u'actions': []
}
self.diff = self.client.module._diff
self.diff_tracker = DifferenceTracker()
self.diff_result = dict()
self.existing_network = self.get_existing_network()
if not self.parameters.connected and self.existing_network:
self.parameters.connected = container_names_in_network(self.existing_network)
if (self.parameters.ipam_options['subnet'] or self.parameters.ipam_options['iprange'] or
self.parameters.ipam_options['gateway'] or self.parameters.ipam_options['aux_addresses']):
self.parameters.ipam_config = [self.parameters.ipam_options]
if self.parameters.ipam_config:
try:
for ipam_config in self.parameters.ipam_config:
validate_cidr(ipam_config['subnet'])
except ValueError as e:
self.client.fail(str(e))
if self.parameters.driver_options:
self.parameters.driver_options = clean_dict_booleans_for_docker_api(self.parameters.driver_options)
state = self.parameters.state
if state == 'present':
self.present()
elif state == 'absent':
self.absent()
if self.diff or self.check_mode or self.parameters.debug:
if self.diff:
self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
self.results['diff'] = self.diff_result
def get_existing_network(self):
return self.client.get_network(name=self.parameters.name)
def has_different_config(self, net):
differences = DifferenceTracker()
if self.parameters.driver and self.parameters.driver != net['Driver']:
differences.add('driver',
parameter=self.parameters.driver,
active=net['Driver'])
if self.parameters.driver_options:
if not net.get('Options'):
differences.add('driver_options',
parameter=self.parameters.driver_options,
active=net.get('Options'))
else:
for key, value in self.parameters.driver_options.items():
if not (key in net['Options']) or value != net['Options'][key]:
differences.add('driver_options.%s' % key,
parameter=value,
active=net['Options'].get(key))
if self.parameters.ipam_driver:
if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver:
differences.add('ipam_driver',
parameter=self.parameters.ipam_driver,
active=net.get('IPAM'))
if self.parameters.ipam_driver_options is not None:
ipam_driver_options = net['IPAM'].get('Options') or {}
if ipam_driver_options != self.parameters.ipam_driver_options:
differences.add('ipam_driver_options',
parameter=self.parameters.ipam_driver_options,
active=ipam_driver_options)
if self.parameters.ipam_config is not None and self.parameters.ipam_config:
if not net.get('IPAM') or not net['IPAM']['Config']:
differences.add('ipam_config',
parameter=self.parameters.ipam_config,
active=net.get('IPAM', {}).get('Config'))
else:
# Put network's IPAM config into the same format as module's IPAM config
net_ipam_configs = []
for net_ipam_config in net['IPAM']['Config']:
config = dict()
for k, v in net_ipam_config.items():
config[normalize_ipam_config_key(k)] = v
net_ipam_configs.append(config)
# Compare lists of dicts as sets of dicts
for idx, ipam_config in enumerate(self.parameters.ipam_config):
net_config = dict()
for net_ipam_config in net_ipam_configs:
if dicts_are_essentially_equal(ipam_config, net_ipam_config):
net_config = net_ipam_config
break
for key, value in ipam_config.items():
if value is None:
# due to recursive argument_spec, all keys are always present
# (but have default value None if not specified)
continue
if value != net_config.get(key):
differences.add('ipam_config[%s].%s' % (idx, key),
parameter=value,
active=net_config.get(key))
if self.parameters.enable_ipv6 is not None and self.parameters.enable_ipv6 != net.get('EnableIPv6', False):
differences.add('enable_ipv6',
parameter=self.parameters.enable_ipv6,
active=net.get('EnableIPv6', False))
if self.parameters.internal is not None and self.parameters.internal != net.get('Internal', False):
differences.add('internal',
parameter=self.parameters.internal,
active=net.get('Internal'))
if self.parameters.scope is not None and self.parameters.scope != net.get('Scope'):
differences.add('scope',
parameter=self.parameters.scope,
active=net.get('Scope'))
if self.parameters.attachable is not None and self.parameters.attachable != net.get('Attachable', False):
differences.add('attachable',
parameter=self.parameters.attachable,
active=net.get('Attachable'))
if self.parameters.labels:
if not net.get('Labels'):
differences.add('labels',
parameter=self.parameters.labels,
active=net.get('Labels'))
else:
for key, value in self.parameters.labels.items():
if not (key in net['Labels']) or value != net['Labels'][key]:
differences.add('labels.%s' % key,
parameter=value,
active=net['Labels'].get(key))
return not differences.empty, differences
def create_network(self):
if not self.existing_network:
params = dict(
driver=self.parameters.driver,
options=self.parameters.driver_options,
)
ipam_pools = []
if self.parameters.ipam_config:
for ipam_pool in self.parameters.ipam_config:
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
ipam_pools.append(IPAMPool(**ipam_pool))
else:
ipam_pools.append(utils.create_ipam_pool(**ipam_pool))
if self.parameters.ipam_driver or self.parameters.ipam_driver_options or ipam_pools:
# Only add ipam parameter if a driver was specified or if IPAM parameters
# were specified. Leaving this parameter away can significantly speed up
# creation; on my machine creation with this option needs ~15 seconds,
# and without just a few seconds.
if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
params['ipam'] = IPAMConfig(driver=self.parameters.ipam_driver,
pool_configs=ipam_pools,
options=self.parameters.ipam_driver_options)
else:
params['ipam'] = utils.create_ipam_config(driver=self.parameters.ipam_driver,
pool_configs=ipam_pools)
if self.parameters.enable_ipv6 is not None:
params['enable_ipv6'] = self.parameters.enable_ipv6
if self.parameters.internal is not None:
params['internal'] = self.parameters.internal
if self.parameters.scope is not None:
params['scope'] = self.parameters.scope
if self.parameters.attachable is not None:
params['attachable'] = self.parameters.attachable
if self.parameters.labels:
params['labels'] = self.parameters.labels
if not self.check_mode:
resp = self.client.create_network(self.parameters.name, **params)
self.client.report_warnings(resp, ['Warning'])
self.existing_network = self.client.get_network(network_id=resp['Id'])
self.results['actions'].append("Created network %s with driver %s" % (self.parameters.name, self.parameters.driver))
self.results['changed'] = True
def remove_network(self):
if self.existing_network:
self.disconnect_all_containers()
if not self.check_mode:
self.client.remove_network(self.parameters.name)
self.results['actions'].append("Removed network %s" % (self.parameters.name,))
self.results['changed'] = True
def is_container_connected(self, container_name):
if not self.existing_network:
return False
return container_name in container_names_in_network(self.existing_network)
def connect_containers(self):
for name in self.parameters.connected:
if not self.is_container_connected(name):
if not self.check_mode:
self.client.connect_container_to_network(name, self.parameters.name)
self.results['actions'].append("Connected container %s" % (name,))
self.results['changed'] = True
self.diff_tracker.add('connected.{0}'.format(name),
parameter=True,
active=False)
def disconnect_missing(self):
if not self.existing_network:
return
containers = self.existing_network['Containers']
if not containers:
return
for c in containers.values():
name = c['Name']
if name not in self.parameters.connected:
self.disconnect_container(name)
def disconnect_all_containers(self):
containers = self.client.get_network(name=self.parameters.name)['Containers']
if not containers:
return
for cont in containers.values():
self.disconnect_container(cont['Name'])
def disconnect_container(self, container_name):
if not self.check_mode:
self.client.disconnect_container_from_network(container_name, self.parameters.name)
self.results['actions'].append("Disconnected container %s" % (container_name,))
self.results['changed'] = True
self.diff_tracker.add('connected.{0}'.format(container_name),
parameter=False,
active=True)
def present(self):
different = False
differences = DifferenceTracker()
if self.existing_network:
different, differences = self.has_different_config(self.existing_network)
self.diff_tracker.add('exists', parameter=True, active=self.existing_network is not None)
if self.parameters.force or different:
self.remove_network()
self.existing_network = None
self.create_network()
self.connect_containers()
if not self.parameters.appends:
self.disconnect_missing()
if self.diff or self.check_mode or self.parameters.debug:
self.diff_result['differences'] = differences.get_legacy_docker_diffs()
self.diff_tracker.merge(differences)
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
network_facts = self.get_existing_network()
self.results['ansible_facts'] = {u'docker_network': network_facts}
self.results['network'] = network_facts
def absent(self):
self.diff_tracker.add('exists', parameter=False, active=self.existing_network is not None)
self.remove_network()
def main():
argument_spec = dict(
name=dict(type='str', required=True, aliases=['network_name']),
connected=dict(type='list', default=[], elements='str', aliases=['containers']),
state=dict(type='str', default='present', choices=['present', 'absent']),
driver=dict(type='str', default='bridge'),
driver_options=dict(type='dict', default={}),
force=dict(type='bool', default=False),
appends=dict(type='bool', default=False, aliases=['incremental']),
ipam_driver=dict(type='str'),
ipam_driver_options=dict(type='dict'),
ipam_options=dict(type='dict', default={}, options=dict(
subnet=dict(type='str'),
iprange=dict(type='str'),
gateway=dict(type='str'),
aux_addresses=dict(type='dict'),
), removed_in_version='2.0.0', removed_from_collection='community.general'), # was Ansible 2.12
ipam_config=dict(type='list', elements='dict', options=dict(
subnet=dict(type='str'),
iprange=dict(type='str'),
gateway=dict(type='str'),
aux_addresses=dict(type='dict'),
)),
enable_ipv6=dict(type='bool'),
internal=dict(type='bool'),
labels=dict(type='dict', default={}),
debug=dict(type='bool', default=False),
scope=dict(type='str', choices=['local', 'global', 'swarm']),
attachable=dict(type='bool'),
)
mutually_exclusive = [
('ipam_config', 'ipam_options')
]
option_minimal_versions = dict(
scope=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
attachable=dict(docker_py_version='2.0.0', docker_api_version='1.26'),
labels=dict(docker_api_version='1.23'),
ipam_driver_options=dict(docker_py_version='2.0.0'),
)
client = AnsibleDockerClient(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
min_docker_version='1.10.0',
min_docker_api_version='1.22',
# "The docker server >= 1.10.0"
option_minimal_versions=option_minimal_versions,
)
try:
cm = DockerNetworkManager(client)
client.module.exit_json(**cm.results)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
except RequestException as e:
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| true
| true
|
f70cc7139e0c240885f1ad904fb1b00d47ba34ff
| 176
|
py
|
Python
|
main.py
|
mohamadhayeri9/async
|
c63f2cb1532098ab2e115f90156cf073a99c0b85
|
[
"MIT"
] | 1
|
2020-05-13T22:34:03.000Z
|
2020-05-13T22:34:03.000Z
|
main.py
|
mohamadhayeri9/async
|
c63f2cb1532098ab2e115f90156cf073a99c0b85
|
[
"MIT"
] | null | null | null |
main.py
|
mohamadhayeri9/async
|
c63f2cb1532098ab2e115f90156cf073a99c0b85
|
[
"MIT"
] | null | null | null |
#!/uwsr/bin/env python3
import asyncio
async def main():
print('hello')
await asyncio.sleep(1)
print('world')
if __name__=="__main__":
asyncio.run(main())
| 12.571429
| 26
| 0.636364
|
import asyncio
async def main():
print('hello')
await asyncio.sleep(1)
print('world')
if __name__=="__main__":
asyncio.run(main())
| true
| true
|
f70cc72811e78a54aee8eb1e02a003cb5c2b56ec
| 234
|
py
|
Python
|
script/translations/const.py
|
fb22/home-assistant
|
a95ab9c8099e321a5be0e2d6689258ac9c262636
|
[
"Apache-2.0"
] | null | null | null |
script/translations/const.py
|
fb22/home-assistant
|
a95ab9c8099e321a5be0e2d6689258ac9c262636
|
[
"Apache-2.0"
] | 6
|
2021-02-08T20:54:31.000Z
|
2022-03-12T00:50:43.000Z
|
script/translations/const.py
|
fb22/home-assistant
|
a95ab9c8099e321a5be0e2d6689258ac9c262636
|
[
"Apache-2.0"
] | null | null | null |
"""Translation constants."""
import pathlib
PROJECT_ID = "130246255a974bd3b5e8a1.51616605"
DOCKER_IMAGE = "b8329d20280263cad04f65b843e54b9e8e6909a348a678eac959550b5ef5c75f"
INTEGRATIONS_DIR = pathlib.Path("homeassistant/components")
| 33.428571
| 81
| 0.850427
|
import pathlib
PROJECT_ID = "130246255a974bd3b5e8a1.51616605"
DOCKER_IMAGE = "b8329d20280263cad04f65b843e54b9e8e6909a348a678eac959550b5ef5c75f"
INTEGRATIONS_DIR = pathlib.Path("homeassistant/components")
| true
| true
|
f70cc782bacfe72f2515177fe5c77a0649b22ebf
| 568
|
py
|
Python
|
app.py
|
josh91hickman/react_wiki
|
4f38cf5edca567c1d04ec2d9184202a28f944c71
|
[
"MIT"
] | null | null | null |
app.py
|
josh91hickman/react_wiki
|
4f38cf5edca567c1d04ec2d9184202a28f944c71
|
[
"MIT"
] | null | null | null |
app.py
|
josh91hickman/react_wiki
|
4f38cf5edca567c1d04ec2d9184202a28f944c71
|
[
"MIT"
] | null | null | null |
import os
from flask import Flask, send_from_directory
app = Flask(__name__, static_folder='client/build')
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def serve(path):
if(path == ""):
return send_from_directory('client/build', 'index.html')
else:
if(os.path.exists("client/build/" + path)):
return send_from_directory('client/build', path)
else:
return send_from_directory('client/build', 'index.html')
if __name__ == '__main__':
app.run(use_reloader=True, port=3000, threaded=True)
| 29.894737
| 68
| 0.647887
|
import os
from flask import Flask, send_from_directory
app = Flask(__name__, static_folder='client/build')
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def serve(path):
if(path == ""):
return send_from_directory('client/build', 'index.html')
else:
if(os.path.exists("client/build/" + path)):
return send_from_directory('client/build', path)
else:
return send_from_directory('client/build', 'index.html')
if __name__ == '__main__':
app.run(use_reloader=True, port=3000, threaded=True)
| true
| true
|
f70cc7c8d0a5ba28ecc472eecce5eb80c4c255cb
| 7,321
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20181001/get_interface_endpoint.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20181001/get_interface_endpoint.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20181001/get_interface_endpoint.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetInterfaceEndpointResult',
'AwaitableGetInterfaceEndpointResult',
'get_interface_endpoint',
]
@pulumi.output_type
class GetInterfaceEndpointResult:
"""
Interface endpoint resource.
"""
def __init__(__self__, endpoint_service=None, etag=None, fqdn=None, location=None, name=None, network_interfaces=None, owner=None, provisioning_state=None, subnet=None, tags=None, type=None):
if endpoint_service and not isinstance(endpoint_service, dict):
raise TypeError("Expected argument 'endpoint_service' to be a dict")
pulumi.set(__self__, "endpoint_service", endpoint_service)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if fqdn and not isinstance(fqdn, str):
raise TypeError("Expected argument 'fqdn' to be a str")
pulumi.set(__self__, "fqdn", fqdn)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_interfaces and not isinstance(network_interfaces, list):
raise TypeError("Expected argument 'network_interfaces' to be a list")
pulumi.set(__self__, "network_interfaces", network_interfaces)
if owner and not isinstance(owner, str):
raise TypeError("Expected argument 'owner' to be a str")
pulumi.set(__self__, "owner", owner)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if subnet and not isinstance(subnet, dict):
raise TypeError("Expected argument 'subnet' to be a dict")
pulumi.set(__self__, "subnet", subnet)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="endpointService")
def endpoint_service(self) -> Optional['outputs.EndpointServiceResponse']:
"""
A reference to the service being brought into the virtual network.
"""
return pulumi.get(self, "endpoint_service")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def fqdn(self) -> Optional[str]:
"""
A first-party service's FQDN that is mapped to the private IP allocated via this interface endpoint.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Sequence['outputs.NetworkInterfaceResponse']:
"""
Gets an array of references to the network interfaces created for this interface endpoint.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter
def owner(self) -> str:
"""
A read-only property that identifies who created this interface endpoint.
"""
return pulumi.get(self, "owner")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the interface endpoint. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubnetResponse']:
"""
The ID of the subnet from which the private IP will be allocated.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetInterfaceEndpointResult(GetInterfaceEndpointResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetInterfaceEndpointResult(
endpoint_service=self.endpoint_service,
etag=self.etag,
fqdn=self.fqdn,
location=self.location,
name=self.name,
network_interfaces=self.network_interfaces,
owner=self.owner,
provisioning_state=self.provisioning_state,
subnet=self.subnet,
tags=self.tags,
type=self.type)
def get_interface_endpoint(expand: Optional[str] = None,
interface_endpoint_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInterfaceEndpointResult:
"""
Use this data source to access information about an existing resource.
:param str expand: Expands referenced resources.
:param str interface_endpoint_name: The name of the interface endpoint.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['interfaceEndpointName'] = interface_endpoint_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20181001:getInterfaceEndpoint', __args__, opts=opts, typ=GetInterfaceEndpointResult).value
return AwaitableGetInterfaceEndpointResult(
endpoint_service=__ret__.endpoint_service,
etag=__ret__.etag,
fqdn=__ret__.fqdn,
location=__ret__.location,
name=__ret__.name,
network_interfaces=__ret__.network_interfaces,
owner=__ret__.owner,
provisioning_state=__ret__.provisioning_state,
subnet=__ret__.subnet,
tags=__ret__.tags,
type=__ret__.type)
| 36.788945
| 195
| 0.647179
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetInterfaceEndpointResult',
'AwaitableGetInterfaceEndpointResult',
'get_interface_endpoint',
]
@pulumi.output_type
class GetInterfaceEndpointResult:
def __init__(__self__, endpoint_service=None, etag=None, fqdn=None, location=None, name=None, network_interfaces=None, owner=None, provisioning_state=None, subnet=None, tags=None, type=None):
if endpoint_service and not isinstance(endpoint_service, dict):
raise TypeError("Expected argument 'endpoint_service' to be a dict")
pulumi.set(__self__, "endpoint_service", endpoint_service)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if fqdn and not isinstance(fqdn, str):
raise TypeError("Expected argument 'fqdn' to be a str")
pulumi.set(__self__, "fqdn", fqdn)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_interfaces and not isinstance(network_interfaces, list):
raise TypeError("Expected argument 'network_interfaces' to be a list")
pulumi.set(__self__, "network_interfaces", network_interfaces)
if owner and not isinstance(owner, str):
raise TypeError("Expected argument 'owner' to be a str")
pulumi.set(__self__, "owner", owner)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if subnet and not isinstance(subnet, dict):
raise TypeError("Expected argument 'subnet' to be a dict")
pulumi.set(__self__, "subnet", subnet)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="endpointService")
def endpoint_service(self) -> Optional['outputs.EndpointServiceResponse']:
return pulumi.get(self, "endpoint_service")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
return pulumi.get(self, "etag")
@property
@pulumi.getter
def fqdn(self) -> Optional[str]:
return pulumi.get(self, "fqdn")
@property
@pulumi.getter
def location(self) -> Optional[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Sequence['outputs.NetworkInterfaceResponse']:
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter
def owner(self) -> str:
return pulumi.get(self, "owner")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def subnet(self) -> Optional['outputs.SubnetResponse']:
return pulumi.get(self, "subnet")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetInterfaceEndpointResult(GetInterfaceEndpointResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetInterfaceEndpointResult(
endpoint_service=self.endpoint_service,
etag=self.etag,
fqdn=self.fqdn,
location=self.location,
name=self.name,
network_interfaces=self.network_interfaces,
owner=self.owner,
provisioning_state=self.provisioning_state,
subnet=self.subnet,
tags=self.tags,
type=self.type)
def get_interface_endpoint(expand: Optional[str] = None,
interface_endpoint_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInterfaceEndpointResult:
__args__ = dict()
__args__['expand'] = expand
__args__['interfaceEndpointName'] = interface_endpoint_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20181001:getInterfaceEndpoint', __args__, opts=opts, typ=GetInterfaceEndpointResult).value
return AwaitableGetInterfaceEndpointResult(
endpoint_service=__ret__.endpoint_service,
etag=__ret__.etag,
fqdn=__ret__.fqdn,
location=__ret__.location,
name=__ret__.name,
network_interfaces=__ret__.network_interfaces,
owner=__ret__.owner,
provisioning_state=__ret__.provisioning_state,
subnet=__ret__.subnet,
tags=__ret__.tags,
type=__ret__.type)
| true
| true
|
f70cc8f9c80c33b5721649fb0844c8267df4d7e6
| 1,415
|
py
|
Python
|
cinderclient/tests/v2/test_snapshot_actions.py
|
Acidburn0zzz/python-cinderclient
|
a58e14fc4f33e1f1eea7aa4ced3cf8976cb112c2
|
[
"Apache-1.1"
] | null | null | null |
cinderclient/tests/v2/test_snapshot_actions.py
|
Acidburn0zzz/python-cinderclient
|
a58e14fc4f33e1f1eea7aa4ced3cf8976cb112c2
|
[
"Apache-1.1"
] | null | null | null |
cinderclient/tests/v2/test_snapshot_actions.py
|
Acidburn0zzz/python-cinderclient
|
a58e14fc4f33e1f1eea7aa4ced3cf8976cb112c2
|
[
"Apache-1.1"
] | null | null | null |
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.tests import utils
from cinderclient.tests.v2 import fakes
cs = fakes.FakeClient()
class SnapshotActionsTest(utils.TestCase):
def test_update_snapshot_status(self):
s = cs.volume_snapshots.get('1234')
cs.volume_snapshots.update_snapshot_status(s,
{'status': 'available'})
cs.assert_called('POST', '/snapshots/1234/action')
def test_update_snapshot_status_with_progress(self):
s = cs.volume_snapshots.get('1234')
cs.volume_snapshots.update_snapshot_status(s,
{'status': 'available',
'progress': '73%'})
cs.assert_called('POST', '/snapshots/1234/action')
| 39.305556
| 78
| 0.640989
|
from cinderclient.tests import utils
from cinderclient.tests.v2 import fakes
cs = fakes.FakeClient()
class SnapshotActionsTest(utils.TestCase):
def test_update_snapshot_status(self):
s = cs.volume_snapshots.get('1234')
cs.volume_snapshots.update_snapshot_status(s,
{'status': 'available'})
cs.assert_called('POST', '/snapshots/1234/action')
def test_update_snapshot_status_with_progress(self):
s = cs.volume_snapshots.get('1234')
cs.volume_snapshots.update_snapshot_status(s,
{'status': 'available',
'progress': '73%'})
cs.assert_called('POST', '/snapshots/1234/action')
| true
| true
|
f70cc90ac14d2efe82f4953bbeee0424dfd60ea8
| 4,769
|
py
|
Python
|
turbo_transformers/python/tests/bert_model_test.py
|
xcnick/TurboTransformers
|
48b6ba09af2219616c6b97cc5c09222408e080c2
|
[
"BSD-3-Clause"
] | null | null | null |
turbo_transformers/python/tests/bert_model_test.py
|
xcnick/TurboTransformers
|
48b6ba09af2219616c6b97cc5c09222408e080c2
|
[
"BSD-3-Clause"
] | null | null | null |
turbo_transformers/python/tests/bert_model_test.py
|
xcnick/TurboTransformers
|
48b6ba09af2219616c6b97cc5c09222408e080c2
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (C) 2020 THL A29 Limited, a Tencent company.
# All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
# https://opensource.org/licenses/BSD-3-Clause
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# See the AUTHORS file for names of contributors.
import unittest
import torch
from transformers.modeling_bert import BertModel, BertConfig
import numpy
import turbo_transformers
import sys
import os
sys.path.append(os.path.dirname(__file__))
import test_helper
class TestBertModel(unittest.TestCase):
def init_data(self, use_cuda) -> None:
torch.set_grad_enabled(False)
torch.set_num_threads(4)
turbo_transformers.set_num_threads(4)
self.test_device = torch.device('cuda:0') if use_cuda else \
torch.device('cpu:0')
self.cfg = BertConfig()
self.torch_model = BertModel(self.cfg)
self.torch_model.eval()
if torch.cuda.is_available():
self.torch_model.to(self.test_device)
self.turbo_model = turbo_transformers.BertModel.from_torch(
self.torch_model, self.test_device, "turbo", use_memory_opt=True)
def check_torch_and_turbo(self,
use_cuda,
batch_size,
seq_len,
use_memory_opt=True):
self.init_data(use_cuda)
num_iter = 1
device_name = "GPU" if use_cuda else "CPU"
input_ids = torch.randint(low=0,
high=self.cfg.vocab_size - 1,
size=(batch_size, seq_len),
dtype=torch.long,
device=self.test_device)
torch_model = lambda: self.torch_model(input_ids)
torch_result, torch_qps, torch_time = \
test_helper.run_model(torch_model, use_cuda, num_iter)
print(f'BertModel PyTorch({device_name}) QPS {torch_qps}')
turbo_model = (lambda: self.turbo_model(input_ids))
if use_memory_opt:
turbo_transformers.bert_opt_mem_allocate_api(
input_ids.size()[0], # batch
input_ids.size()[1], # seq_len
self.cfg.num_attention_heads,
self.cfg.hidden_size,
self.cfg.num_hidden_layers,
"GPU" if 'cuda' in input_ids.device.type else "CPU")
with turbo_transformers.pref_guard("bert_perf") as perf:
turbo_result, turbo_qps, turbo_time = \
test_helper.run_model(turbo_model, use_cuda, num_iter)
print(f'BertModel TurboTransformer({device_name}) QPS {turbo_qps}')
# set the allocator back to naive, otherwise it will affect
# the other inference processes.
if use_memory_opt:
turbo_transformers.reset_allocator_schema("naive")
print(f"batch {batch_size} seq_len {seq_len}")
print(torch.max(torch_result[0].cpu() - turbo_result[0].cpu()))
self.assertTrue(
numpy.allclose(torch_result[0].cpu(),
turbo_result[0].cpu(),
atol=1e-2,
rtol=1e-3))
def test_bert_model_helper(self, use_memory_opt=False):
if use_memory_opt:
turbo_transformers.reset_allocator_schema("model-aware")
for batch_size in [1, 4, 20]:
for seq_len in [50, 4, 16]:
if torch.cuda.is_available() and \
turbo_transformers.config.is_compiled_with_cuda():
self.check_torch_and_turbo(use_cuda=True,
batch_size=batch_size,
seq_len=seq_len,
use_memory_opt=use_memory_opt)
self.check_torch_and_turbo(use_cuda=False,
batch_size=batch_size,
seq_len=seq_len,
use_memory_opt=use_memory_opt)
if use_memory_opt:
turbo_transformers.reset_allocator_schema("naive")
def test_bert_model(self, use_memory_opt=False):
self.test_bert_model_helper(True)
self.test_bert_model_helper(False)
if __name__ == '__main__':
unittest.main()
| 40.07563
| 77
| 0.59698
|
import unittest
import torch
from transformers.modeling_bert import BertModel, BertConfig
import numpy
import turbo_transformers
import sys
import os
sys.path.append(os.path.dirname(__file__))
import test_helper
class TestBertModel(unittest.TestCase):
def init_data(self, use_cuda) -> None:
torch.set_grad_enabled(False)
torch.set_num_threads(4)
turbo_transformers.set_num_threads(4)
self.test_device = torch.device('cuda:0') if use_cuda else \
torch.device('cpu:0')
self.cfg = BertConfig()
self.torch_model = BertModel(self.cfg)
self.torch_model.eval()
if torch.cuda.is_available():
self.torch_model.to(self.test_device)
self.turbo_model = turbo_transformers.BertModel.from_torch(
self.torch_model, self.test_device, "turbo", use_memory_opt=True)
def check_torch_and_turbo(self,
use_cuda,
batch_size,
seq_len,
use_memory_opt=True):
self.init_data(use_cuda)
num_iter = 1
device_name = "GPU" if use_cuda else "CPU"
input_ids = torch.randint(low=0,
high=self.cfg.vocab_size - 1,
size=(batch_size, seq_len),
dtype=torch.long,
device=self.test_device)
torch_model = lambda: self.torch_model(input_ids)
torch_result, torch_qps, torch_time = \
test_helper.run_model(torch_model, use_cuda, num_iter)
print(f'BertModel PyTorch({device_name}) QPS {torch_qps}')
turbo_model = (lambda: self.turbo_model(input_ids))
if use_memory_opt:
turbo_transformers.bert_opt_mem_allocate_api(
input_ids.size()[0], input_ids.size()[1], self.cfg.num_attention_heads,
self.cfg.hidden_size,
self.cfg.num_hidden_layers,
"GPU" if 'cuda' in input_ids.device.type else "CPU")
with turbo_transformers.pref_guard("bert_perf") as perf:
turbo_result, turbo_qps, turbo_time = \
test_helper.run_model(turbo_model, use_cuda, num_iter)
print(f'BertModel TurboTransformer({device_name}) QPS {turbo_qps}')
if use_memory_opt:
turbo_transformers.reset_allocator_schema("naive")
print(f"batch {batch_size} seq_len {seq_len}")
print(torch.max(torch_result[0].cpu() - turbo_result[0].cpu()))
self.assertTrue(
numpy.allclose(torch_result[0].cpu(),
turbo_result[0].cpu(),
atol=1e-2,
rtol=1e-3))
def test_bert_model_helper(self, use_memory_opt=False):
if use_memory_opt:
turbo_transformers.reset_allocator_schema("model-aware")
for batch_size in [1, 4, 20]:
for seq_len in [50, 4, 16]:
if torch.cuda.is_available() and \
turbo_transformers.config.is_compiled_with_cuda():
self.check_torch_and_turbo(use_cuda=True,
batch_size=batch_size,
seq_len=seq_len,
use_memory_opt=use_memory_opt)
self.check_torch_and_turbo(use_cuda=False,
batch_size=batch_size,
seq_len=seq_len,
use_memory_opt=use_memory_opt)
if use_memory_opt:
turbo_transformers.reset_allocator_schema("naive")
def test_bert_model(self, use_memory_opt=False):
self.test_bert_model_helper(True)
self.test_bert_model_helper(False)
if __name__ == '__main__':
unittest.main()
| true
| true
|
f70cc93d1680ca37f3c39292da32ba7f7276f062
| 2,903
|
py
|
Python
|
panorama-commit-push.py
|
djspears/pythonscripts
|
f4795ade74da33d84b6b3db0a2792ebd48c9c3d5
|
[
"Apache-2.0"
] | null | null | null |
panorama-commit-push.py
|
djspears/pythonscripts
|
f4795ade74da33d84b6b3db0a2792ebd48c9c3d5
|
[
"Apache-2.0"
] | null | null | null |
panorama-commit-push.py
|
djspears/pythonscripts
|
f4795ade74da33d84b6b3db0a2792ebd48c9c3d5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2014, Palo Alto Networks
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: David Spears <dspears@paloaltonetworks.com>
"""
panorama-commit-push.py
==========
This script performs a panorama commit and will push to devices in a specific device group
that is by default an argument passed in at exectution.
**Usage**::
upgrade.py [-h] [-v] [-q] [-n] hostname username password devicegroup
**Examples**:
Commit to a Panorama at 13.129.150.75 that has a modified devicegroup named GWLB:
$ python panorama-commit-push.py 13.129.150.75 username password GWLB
Instructions for installing the PAN-OS-SDK are located here:
https://pandevice.readthedocs.io/en/latest/getting-started.html
"""
__author__ = "djspears"
import argparse
from panos import panorama
def main():
# Get command line arguments
parser = argparse.ArgumentParser(
description="Commit and Push an updated Panorama device group configuration"
)
parser.add_argument(
"-v", "--verbose", action="count", help="Verbose (-vv for extra verbose)"
)
parser.add_argument("-q", "--quiet", action="store_true", help="No output")
# Palo Alto Networks related arguments
fw_group = parser.add_argument_group("Palo Alto Networks Device")
fw_group.add_argument("hostname", help="Hostname of Panorama")
fw_group.add_argument("username", help="Username for Panorama")
fw_group.add_argument("password", help="Password for Panorama")
fw_group.add_argument("devicegroup", help="DeviceGroup for Panorama")
args = parser.parse_args()
# Connects to Panorama.
pano = panorama.Panorama(args.hostname, args.username, args.password,) # Create a panorama object
# Performs the commit and device group push
print("Performing commit...")
pano.commit(sync_all=True,sync=True)
print("Done")
print("Performing device push...")
pano.commit_all(sync=True,sync_all=True,cmd="<commit-all><shared-policy><device-group><entry name='%s'/></device-group></shared-policy></commit-all>"%(args.devicegroup))
print("Done")
# Call the main() function to begin the program if not
# loaded as a module.
if __name__ == "__main__":
main()
| 36.2875
| 173
| 0.729935
|
__author__ = "djspears"
import argparse
from panos import panorama
def main():
parser = argparse.ArgumentParser(
description="Commit and Push an updated Panorama device group configuration"
)
parser.add_argument(
"-v", "--verbose", action="count", help="Verbose (-vv for extra verbose)"
)
parser.add_argument("-q", "--quiet", action="store_true", help="No output")
fw_group = parser.add_argument_group("Palo Alto Networks Device")
fw_group.add_argument("hostname", help="Hostname of Panorama")
fw_group.add_argument("username", help="Username for Panorama")
fw_group.add_argument("password", help="Password for Panorama")
fw_group.add_argument("devicegroup", help="DeviceGroup for Panorama")
args = parser.parse_args()
pano = panorama.Panorama(args.hostname, args.username, args.password,) print("Performing commit...")
pano.commit(sync_all=True,sync=True)
print("Done")
print("Performing device push...")
pano.commit_all(sync=True,sync_all=True,cmd="<commit-all><shared-policy><device-group><entry name='%s'/></device-group></shared-policy></commit-all>"%(args.devicegroup))
print("Done")
if __name__ == "__main__":
main()
| true
| true
|
f70cc9b9c9627bc14e886c5c6f9138a31e34aa24
| 539
|
py
|
Python
|
shortener/utils.py
|
Alexmhack/django_url_shorter
|
cbdad3b08db3558ba4383d29964f88e110b92119
|
[
"MIT"
] | null | null | null |
shortener/utils.py
|
Alexmhack/django_url_shorter
|
cbdad3b08db3558ba4383d29964f88e110b92119
|
[
"MIT"
] | null | null | null |
shortener/utils.py
|
Alexmhack/django_url_shorter
|
cbdad3b08db3558ba4383d29964f88e110b92119
|
[
"MIT"
] | null | null | null |
import random
import string
from django.conf import settings
SHORTCODE_MIN = getattr(settings, "SHORTCODE_MIN", 5)
def code_generator(size=SHORTCODE_MIN, chars=string.ascii_lowercase + string.digits + string.ascii_uppercase):
return ''.join(random.choice(chars) for _ in range(size))
def create_shortcode(instance, size=SHORTCODE_MIN):
new_code = code_generator(size=size)
Klass = instance.__class__
qs_exists = Klass.objects.filter(shortcode=new_code).exists()
if qs_exists:
return code_generator(size=size)
return new_code
| 28.368421
| 110
| 0.794063
|
import random
import string
from django.conf import settings
SHORTCODE_MIN = getattr(settings, "SHORTCODE_MIN", 5)
def code_generator(size=SHORTCODE_MIN, chars=string.ascii_lowercase + string.digits + string.ascii_uppercase):
return ''.join(random.choice(chars) for _ in range(size))
def create_shortcode(instance, size=SHORTCODE_MIN):
new_code = code_generator(size=size)
Klass = instance.__class__
qs_exists = Klass.objects.filter(shortcode=new_code).exists()
if qs_exists:
return code_generator(size=size)
return new_code
| true
| true
|
f70ccb57b06d137684e4f325e080a29819d1a23b
| 142
|
py
|
Python
|
PATH/output_path.py
|
visionshao/TerminologyExtraction
|
ff23d7794e8bbfbc6c576fd5d46b37b40a652b67
|
[
"MIT"
] | 1
|
2021-04-26T21:44:20.000Z
|
2021-04-26T21:44:20.000Z
|
PATH/output_path.py
|
visionshao/TerminologyExtraction
|
ff23d7794e8bbfbc6c576fd5d46b37b40a652b67
|
[
"MIT"
] | null | null | null |
PATH/output_path.py
|
visionshao/TerminologyExtraction
|
ff23d7794e8bbfbc6c576fd5d46b37b40a652b67
|
[
"MIT"
] | null | null | null |
preprocess_output = r"D:\Codes\Wos_IE\result\content_dic.json"
abbreviate_dictionary_output = r"D:\Codes\Wos_IE\result\abbreviate_words.json"
| 71
| 78
| 0.830986
|
preprocess_output = r"D:\Codes\Wos_IE\result\content_dic.json"
abbreviate_dictionary_output = r"D:\Codes\Wos_IE\result\abbreviate_words.json"
| true
| true
|
f70ccd30d2b618720439af87f1cbf9a0e6f3a904
| 2,541
|
py
|
Python
|
tools/accuracy_checker/accuracy_checker/annotation_converters/cluttered_mnist.py
|
evgeny-izutov/open_model_zoo
|
2cd6145ef342fc9b7ccf32676af73f4a1cb8d9ba
|
[
"Apache-2.0"
] | 4
|
2019-09-17T13:11:02.000Z
|
2021-02-22T15:39:15.000Z
|
tools/accuracy_checker/accuracy_checker/annotation_converters/cluttered_mnist.py
|
evgeny-izutov/open_model_zoo
|
2cd6145ef342fc9b7ccf32676af73f4a1cb8d9ba
|
[
"Apache-2.0"
] | null | null | null |
tools/accuracy_checker/accuracy_checker/annotation_converters/cluttered_mnist.py
|
evgeny-izutov/open_model_zoo
|
2cd6145ef342fc9b7ccf32676af73f4a1cb8d9ba
|
[
"Apache-2.0"
] | 1
|
2022-01-12T03:47:00.000Z
|
2022-01-12T03:47:00.000Z
|
"""
Copyright (c) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from PIL import Image
import numpy as np
from .format_converter import BaseFormatConverter, ConverterReturn
from ..config import PathField, StringField, BoolField
from ..representation import ClassificationAnnotation
class ClutteredMNISTConverter(BaseFormatConverter):
__provider__ = 'cluttered_mnist'
@classmethod
def parameters(cls):
params = super().parameters()
params.update({
'data_file': PathField(),
'split': StringField(optional=True, default='test', choices=['train', 'valid', 'test']),
'convert_images': BoolField(optional=True, default=True),
'images_dir': PathField(is_directory=True, optional=True)
})
return params
def configure(self):
self.data_file = self.get_value_from_config('data_file')
self.split = self.get_value_from_config('split')
self.convert_images = self.get_value_from_config('convert_images')
self.images_dir = self.get_value_from_config('images_dir') or self.data_file.parent / 'converted_images'
if self.convert_images and not self.images_dir.exists():
self.images_dir.mkdir(parents=True)
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
data = np.load(str(self.data_file))
x_values = data['x_{}'.format(self.split)]
y_values = data['y_{}'.format(self.split)]
annotations = []
for idx, y in enumerate(y_values):
identifier = '{}_{}.png'.format(self.split, idx)
y_label = np.argmax(y)
if self.convert_images:
x = x_values[idx].reshape((60, 60)) * 255
image = Image.fromarray(x)
image = image.convert("L")
image.save(str(self.images_dir / identifier))
annotations.append(ClassificationAnnotation(identifier, y_label))
return ConverterReturn(annotations, None, None)
| 40.983871
| 112
| 0.68477
|
from PIL import Image
import numpy as np
from .format_converter import BaseFormatConverter, ConverterReturn
from ..config import PathField, StringField, BoolField
from ..representation import ClassificationAnnotation
class ClutteredMNISTConverter(BaseFormatConverter):
__provider__ = 'cluttered_mnist'
@classmethod
def parameters(cls):
params = super().parameters()
params.update({
'data_file': PathField(),
'split': StringField(optional=True, default='test', choices=['train', 'valid', 'test']),
'convert_images': BoolField(optional=True, default=True),
'images_dir': PathField(is_directory=True, optional=True)
})
return params
def configure(self):
self.data_file = self.get_value_from_config('data_file')
self.split = self.get_value_from_config('split')
self.convert_images = self.get_value_from_config('convert_images')
self.images_dir = self.get_value_from_config('images_dir') or self.data_file.parent / 'converted_images'
if self.convert_images and not self.images_dir.exists():
self.images_dir.mkdir(parents=True)
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
data = np.load(str(self.data_file))
x_values = data['x_{}'.format(self.split)]
y_values = data['y_{}'.format(self.split)]
annotations = []
for idx, y in enumerate(y_values):
identifier = '{}_{}.png'.format(self.split, idx)
y_label = np.argmax(y)
if self.convert_images:
x = x_values[idx].reshape((60, 60)) * 255
image = Image.fromarray(x)
image = image.convert("L")
image.save(str(self.images_dir / identifier))
annotations.append(ClassificationAnnotation(identifier, y_label))
return ConverterReturn(annotations, None, None)
| true
| true
|
f70ccd3f592caa16b7fa06c4c3e16ed23cb032f2
| 514
|
py
|
Python
|
migrations/versions/ee248674f637_.py
|
halonotes/personal_blog
|
05d03ec6d595b6b92296dfed23f168f0932ce660
|
[
"MIT"
] | null | null | null |
migrations/versions/ee248674f637_.py
|
halonotes/personal_blog
|
05d03ec6d595b6b92296dfed23f168f0932ce660
|
[
"MIT"
] | null | null | null |
migrations/versions/ee248674f637_.py
|
halonotes/personal_blog
|
05d03ec6d595b6b92296dfed23f168f0932ce660
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: ee248674f637
Revises: ebf728dc4d0d
Create Date: 2017-05-31 15:07:32.715000
"""
# revision identifiers, used by Alembic.
revision = 'ee248674f637'
down_revision = 'ebf728dc4d0d'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 19.037037
| 65
| 0.678988
|
revision = 'ee248674f637'
down_revision = 'ebf728dc4d0d'
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
| true
| true
|
f70ccd88b76265237437e8e41db9cb8a098c6294
| 32,347
|
py
|
Python
|
empress/tree.py
|
esayyari/empress
|
092044d4444a1569784cd9d336eb2a2a44a92abc
|
[
"Apache-2.0",
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
empress/tree.py
|
esayyari/empress
|
092044d4444a1569784cd9d336eb2a2a44a92abc
|
[
"Apache-2.0",
"CC0-1.0",
"BSD-3-Clause"
] | 1
|
2019-11-18T20:38:12.000Z
|
2019-11-18T20:38:12.000Z
|
empress/tree.py
|
esayyari/empress
|
092044d4444a1569784cd9d336eb2a2a44a92abc
|
[
"Apache-2.0",
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, empress development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import warnings
from skbio import TreeNode
import numpy as np
from bp import BP, from_skbio_treenode
class TreeFormatWarning(Warning):
pass
class Tree:
"""
Attributes
----------
length
leafcount
height
depth
Notes
-----
`length` refers to the branch length of a node to its parent.
`leafcount` is the number of tips within a subtree. `height` refers
to the longest path from root to the deepst leaf in that subtree.
`depth` is the number of nodes found in the longest path.
"""
def __init__(self, bp_tree):
""" Constructs a Dendrogram object for visualization.
Parameters
----------
bp_tree: bp.BP
BP tree object
Returns
-------
"""
self.bp_tree = bp_tree
self.B = self.bp_tree.B
self.leafcounts = np.zeros(self.B.size, np.int)
self.depths = np.zeros(self.B.size, np.double)
self.heights = np.zeros(self.B.size, np.double)
self.yr = np.zeros(self.B.size, np.double)
self.xr = np.zeros(self.B.size, np.double)
self.highest_child_yr = np.zeros(self.B.size, np.float)
self.lowest_child_yr = np.zeros(self.B.size, np.float)
self.clangle = np.zeros(self.B.size, np.double)
self.clradius = np.zeros(self.B.size, np.double)
self.xc0 = np.zeros(self.B.size, np.double)
self.yc0 = np.zeros(self.B.size, np.double)
self.xc1 = np.zeros(self.B.size, np.double)
self.yc1 = np.zeros(self.B.size, np.double)
self.highest_child_clangle = np.zeros(self.B.size, np.float)
self.lowest_child_clangle = np.zeros(self.B.size, np.float)
self.arcx0 = np.zeros(self.B.size, np.double)
self.arcy0 = np.zeros(self.B.size, np.double)
self.arcx1 = np.zeros(self.B.size, np.double)
self.arcy1 = np.zeros(self.B.size, np.double)
self.x1 = np.zeros(self.B.size, np.double)
self.y1 = np.zeros(self.B.size, np.double)
self.x2 = np.zeros(self.B.size, np.double)
self.y2 = np.zeros(self.B.size, np.double)
self.angle = np.zeros(self.B.size, np.double)
self.childRem = -1
@classmethod
def from_tree(cls, tree, use_lengths=True):
""" Creates an Tree object from a skbio tree.
Parameters
----------
tree : skbio.TreeNode
Input skbio tree
use_lengths: Boolean
Specify if the branch length should be incorporated into
the geometry calculations for visualization.
Returns
-------
Tree: bp.BP
"""
bp_tree = from_skbio_treenode(tree)
if sum(bp_tree.B) <= 1:
raise ValueError("Tree must contain at least 2 nodes.")
# While traversing the tree, record tip / internal node names
# (Nodes without names are ignored, since we'll assign those later
# using tools.fill_missing_node_names())
tip_names = []
internal_node_names = []
max_branch_length = 0
for i in range(sum(bp_tree.B)):
node_idx = bp_tree.postorderselect(i)
name = bp_tree.name(node_idx)
length = bp_tree.length(node_idx)
if name is not None:
# NOTE: This should eventually be taken out when
# fill_missing_node_names() is refactored. However, for now,
# this makes sure that users can't accidentally break things by
# naming nodes identical to our default names for missing nodes
if name.startswith("EmpressNode"):
raise ValueError(
'Node names can\'t start with "EmpressNode".'
)
if isleaf(bp_tree, node_idx):
tip_names.append(name)
else:
internal_node_names.append(name)
if length is None:
raise ValueError(
"Non-root branches of the tree must have lengths."
)
if length < 0:
raise ValueError(
"Non-root branches of the tree must have nonnegative "
"lengths."
)
max_branch_length = max(length, max_branch_length)
# We didn't consider the root node in the above traversal since we
# don't care about its length. However, we do care about its name,
# so we add the root's name to internal_node_names.
if max_branch_length == 0:
raise ValueError(
"At least one non-root branch of the tree must have a "
"positive length."
)
unique_tip_name_set = set(tip_names)
if len(unique_tip_name_set) != len(tip_names):
raise ValueError("Tip names in the tree must be unique.")
unique_internal_node_name_set = set(internal_node_names)
if len(unique_tip_name_set & unique_internal_node_name_set) > 0:
raise ValueError(
"Tip names in the tree cannot overlap with internal node "
"names."
)
if len(unique_internal_node_name_set) != len(internal_node_names):
warnings.warn(
"Internal node names in the tree are not unique.",
TreeFormatWarning
)
bp_tree = Tree(bp_tree)
bp_tree.update_geometry(use_lengths)
return bp_tree
def postorder(self, include_self=True):
e = sum(self.B) if include_self else sum(self.B) - 1
for i in range(e):
node_idx = self.bp_tree.postorderselect(i)
yield node_idx
def preorder(self, include_self=True):
s = 0 if include_self else 1
for i in range(s, sum(self.B)):
node_idx = self.bp_tree.preorderselect(i)
yield node_idx
def bp_tree_tips(self):
""" Extracts tip names in the tree, ignoring unnamed tips.
Parameters
----------
bp_tree : bp.BP
Input BP tree
Returns
-------
tips : list of strings
list of tip names in the tree
"""
tips = []
# Iterate through all open and closing parentheses and extract tip names
for i in range(self.B.size):
pos_name = self.bp_tree.name(i)
# Check if this is a leaf node with a label
if self.isleaf(i) and (pos_name is not None):
tips.append(pos_name)
return tips
def bp_tree_non_tips(self):
""" Extracts internal node names in the tree, ignoring unnamed nodes.
Parameters
----------
bp_tree : bp.BP
Input BP tree
Returns
-------
non_tips : list of strings
list of internal node names in the tree
"""
non_tips = []
for i in range(self.B.size):
pos_name = self.bp_tree.name(i)
# Check if this is an opening parenthesis, is not a leaf, and
# has a node label
if self.B[i] and not self.isleaf(i) and pos_name is not None:
non_tips.append(pos_name)
return non_tips
def update_geometry(self, use_lengths, depth=None):
"""Calculate tree node attributes such as height and depth.
Parameters
----------
use_lengths: bool
Specify if the branch length should be incorporated into
the geometry calculations for visualization.
depth: int
The number of nodes in the longest path from root to leaf.
This is agnostic to scale and orientation.
"""
new_heights = np.zeros(self.B.size, dtype=np.double)
new_leaf_count = np.zeros(self.B.size, dtype=np.int)
new_depths = np.zeros(self.B.size, dtype=np.double)
for node_idx in self.postorder():
length = self.bp_tree.length(node_idx)
if length is None or not use_lengths:
if not use_lengths:
if self.isleaf(node_idx):
length = 5
else:
length = 1
else:
length = 0
new_depths[node_idx] = (depth or 0) + length
if self.isleaf(node_idx):
new_heights[node_idx] = length
new_leaf_count[node_idx] = 1
else:
idx = self.bp_tree.fchild(node_idx)
height = 0
leafcount = 0
while idx:
height = max(height, new_heights[idx])
leafcount += new_leaf_count[idx]
idx = self.bp_tree.nsibling(idx)
height += length
new_heights[node_idx] = height
new_leaf_count[node_idx] = leafcount
self.leafcounts = new_leaf_count
self.heights = new_heights
self.depths = new_depths
def coords(self, height, width):
""" Computes the coordinates of nodes to be rendered in plot.
This runs multiple layout algorithms and saves all of the resulting
coordinates for each node, so that layout algorithms can be rapidly
toggled between in the JS interface.
Also adds on .highest_child_yr and .lowest_child_yr attributes to
internal nodes so that vertical bars for these nodes can be drawn in
the rectangular layout.
Parameters
----------
height : int
The height of the canvas.
width : int
The width of the canvas.
Returns
-------
dict:
Mapping between layout and the coordinate suffix.
str:
Name of the default layout.
"""
layout_to_coordsuffix = {}
layout_algs = (
self.layout_unrooted,
self.layout_rectangular,
self.layout_circular,
)
# We set the default layout to whatever the first layout in
# layout_algs is, but this behavior is of course modifiable
default_layout = None
for alg in layout_algs:
name, suffix = alg(width, height)
layout_to_coordsuffix[name] = suffix
self.alter_coordinates_relative_to_root(suffix)
if name == "Circular":
self.alter_coordinates_relative_to_root("c0")
if default_layout is None:
default_layout = name
# Determine highest and lowest child y-position for internal nodes in
# the rectangular layout; used to draw vertical lines for these nodes.
#
# NOTE / TODO: This will have the effect of drawing vertical lines even
# for nodes with only 1 child -- in this case lowest_child_yr ==
# highest_child_yr for this node, so all of the stuff drawn in WebGL
# for this vertical line shouldn't show up. I don't think this should
# cause any problems, but it may be worth detecting these cases and not
# drawing vertical lines for them in the future.
for node_idx in self.preorder():
if not self.isleaf(node_idx):
# wow, child does not look like a word any more
self.highest_child_yr[node_idx] = float("-inf")
self.lowest_child_yr[node_idx] = float("inf")
for c_idx in self.children(node_idx):
if self.yr[c_idx] > self.highest_child_yr[node_idx]:
self.highest_child_yr[node_idx] = self.yr[c_idx]
if self.yr[c_idx] < self.lowest_child_yr[node_idx]:
self.lowest_child_yr[node_idx] = self.yr[c_idx]
return layout_to_coordsuffix, default_layout
def alter_coordinates_relative_to_root(self, suffix):
""" Subtracts the root node's x- and y- coords from all nodes' coords.
This was previously done within coords(), but I moved it here so that
this logic can be used after arbitrary layout computations.
Parameters
----------
suffix : str
The suffix of the x- and y-coordinates to adjust.
For example, this is "2" for the unrooted layout since coordinates
are stored in the x2 and y2 attributes for every node; and it's "r"
for the rectangular layout since the coordinate attributes are now
xr and yr.
"""
xname = "x" + suffix
yname = "y" + suffix
centersX = getattr(self, xname)
centersY = getattr(self, yname)
centerX = centersX[0]
centerY = centersY[0]
for node_idx in self.postorder():
# This code might look sort of intimidating, but it's really just
# another way to write out:
# node.x2 = node.x2 - centerX
# node.y2 = node.y2 - centerY
# ...when we don't know what "x2" or "y2" will be named beforehand.
centersX[node_idx] = centersX[node_idx] - centerX
centersY[node_idx] = centersY[node_idx] - centerY
setattr(self, xname, centersX)
setattr(self, yname, centersY)
def isleaf(self, i):
""" Checks if node at position i belongs to a leaf node or not
Parameters
----------
bp_tree : bp.BP
Input BP tree
i : int
The query node index
Returns
-------
bool
True if this is a leaf node, False otherwise
"""
return self.B[i] and (not self.B[i + 1])
def children(self, i):
children = []
child = self.bp_tree.fchild(i)
while child > 0:
children.append(child)
child = self.bp_tree.nsibling(child)
return children
def layout_rectangular(self, width, height):
""" Rectangular layout.
In this sort of layout, each tip has a distinct y-position, and parent
y-positions are centered over their descendant tips' positions.
x-positions are computed based on nodes' branch lengths.
Following this algorithm, nodes' rectangular layout coordinates are
accessible at [node].xr and [node].yr.
For a simple tree, this layout should look something like:
__
___|
___| |__
| |___
| ___
|___|
|___
Parameters
----------
width : float
width of the canvas
height : float
height of the canvas
References
----------
https://rachel53461.wordpress.com/2014/04/20/algorithm-for-drawing-trees/
Clear explanation of Reingold-Tilford that I used a lot
https://github.com/qiime/Topiary-Explorer/blob/master/src/topiaryexplorer/TreeVis.java
Derived from the "Rectangular" layout algorithm code.
"""
# NOTE: This doesn't draw a horizontal line leading to the root "node"
# of the graph. See https://github.com/biocore/empress/issues/141 for
# context.
max_width = 0
max_height = 0
prev_y = 0
for node_idx in self.postorder():
if self.isleaf(node_idx):
self.yr[node_idx] = prev_y
prev_y += 1
if self.yr[node_idx] > max_height:
max_height = self.yr[node_idx]
else:
# Center internal nodes above their children
# We could also center them above their tips, but (IMO) this
# looks better ;)
children = self.children(node_idx)
self.yr[node_idx] = sum([self.yr[c_idx] for
c_idx in children]) / len(children)
for node_idx in self.preorder(include_self=False):
self.xr[node_idx] = self.xr[self.bp_tree.parent(node_idx)] + \
self.bp_tree.length(node_idx)
if self.xr[node_idx] > max_width:
max_width = self.xr[node_idx]
# We don't check if max_width == 0 here, because we check when
# constructing an Empress tree that it has at least one positive
# branch length and no negative branch lengths. (And if this is the
# case, then max_width must be > 0.)
x_scaling_factor = width / max_width
if max_height > 0:
# Having a max_height of 0 could actually happen, in the funky case
# where the entire tree is a straight line (e.g. A -> B -> C). In
# this case our "rectangular layout" drawing places all nodes on
# the same y-coordinate (0), resulting in max_height = 0.
# ... So, that's why we only do y-scaling if this *isn't* the case.
y_scaling_factor = height / max_height
else:
# Since this will be multiplied by 0 for every node, we can set
# this to any real number and get the intended "effect" of keeping
# every node's y-coordinate at 0.
y_scaling_factor = 1
for node_idx in self.preorder():
self.xr[node_idx] *= x_scaling_factor
self.yr[node_idx] *= y_scaling_factor
# Now we have the layout! In the JS we'll need to draw each internal
# node as a vertical line ranging from its lowest child y-position to
# its highest child y-position, and then draw horizontal lines from
# this line to all of its child nodes (where the length of the
# horizontal line is proportional to the node length in question).
return "Rectangular", "r"
def layout_circular(self, width, height):
""" Circular layout version of the rectangular layout.
Works analogously to the rectangular layout:
-Each tip is assigned a unique angle from the "center"/root of
the tree (out of the range [0, 2pi] in radians), and internal
nodes are set to an angle equal to the average of their
children's. This mirrors the assignment of y-coordinates for
the rectangular layout.
-All nodes are then assigned a radius equal to the sum of their
branch lengths descending from the root (but not including
the root's branch length, if provided -- the root is represented
as just a single point in the center of the layout). This mirrors
the assignment of x-coordinates for the rectangular layout.
-Lastly, we'll draw arcs for every internal node (except for the
root) connecting the "start points" of the child nodes of that
node with the minimum and maximum angle. (These points should
occur at the radius equal to the "end" of the given internal
node.)
We don't draw this arc for the root node because we don't draw
the root the same way we do the other nodes in the tree:
the root is represented as just a single point at the center
of the layout. Due to this, there isn't a way to draw an arc
from the root, since the root's "end" is at the same point as
its beginning (so the arc wouldn't be visible).
Following this algorithm, nodes' circular layout coordinates are
accessible at [node].xc and [node].yc. Angles will also be available
at [node].clangle, and radii will be available at [node].clradius; and
for non-root internal nodes, arc start and end coordinates will be
available at [node].arcx0, [node].arcy0, [node].arcx1, & [node].arcy1.
Parameters
----------
width : float
width of the canvas
height : float
height of the canvas
References
----------
https://github.com/qiime/Topiary-Explorer/blob/master/src/topiaryexplorer/TreeVis.java
Description above + the implementation of this algorithm
derived from the Polar layout algorithm code.
"""
anglepernode = (2 * np.pi) / self.leafcounts[0]
prev_clangle = 0
for node_idx in self.postorder():
if self.isleaf(node_idx):
self.clangle[node_idx] = prev_clangle
prev_clangle += anglepernode
else:
# Center internal nodes at an angle above their children
children = self.children(node_idx)
child_clangle_sum = sum([self.clangle[c_idx] for c_idx
in children])
self.clangle[node_idx] = child_clangle_sum / len(children)
max_clradius = 0
for node_idx in self.preorder(include_self=False):
self.clradius[node_idx] = self.clradius[self.bp_tree.parent(node_idx)] + \
self.bp_tree.length(node_idx)
if self.clradius[node_idx] > max_clradius:
max_clradius = self.clradius[node_idx]
# Now that we have the polar coordinates of the nodes, convert these
# coordinates to normal x/y coordinates.
# NOTE that non-root nodes will actually have two x/y coordinates we
# need to keep track of: one for the "end" of the node's line, and
# another for the "start" of the node's line. The latter of these is
# needed because the node's line begins at the parent node's radius but
# the child node's angle, if that makes sense -- and since converting
# from polar to x/y and back is annoying, it's easiest to just compute
# this in python.
max_x = max_y = float("-inf")
min_x = min_y = float("inf")
for node_idx in self.postorder():
self.xc1[node_idx] = self.clradius[node_idx] * \
np.cos(self.clangle[node_idx])
self.yc1[node_idx] = self.clradius[node_idx] * \
np.sin(self.clangle[node_idx])
if self.isleaf(node_idx):
# NOTE that the root has a clradius of 0 (since it's just
# represented as a point at the center of the layout). We don't
# even bother drawing the root in the Empress JS code, but for
# the purposes of alter_coordinates_relative_to_root() we need
# to explicitly position the root at (0, 0).
self.xc0[node_idx] = 0
self.yc0[node_idx] = 0
else:
self.xc0[node_idx] = self.clradius[
self.bp_tree.parent(node_idx)] *\
np.cos(self.clangle[node_idx])
self.yc0[node_idx] = self.clradius[
self.bp_tree.parent(node_idx)] *\
np.sin(self.clangle[node_idx])
# NOTE: We don't bother testing the xc0 / yc0 coordinates as
# "extrema" because they should always be further "within" the
# tree than the xc1 / yc1 coordinates.
# TODO: verify that the "tree is a line" case doesn't mess this up.
if self.xc1[node_idx] > max_x:
max_x = self.xc1[node_idx]
if self.yc1[node_idx] > max_y:
max_y = self.yc1[node_idx]
if self.xc1[node_idx] < min_x:
min_x = self.xc1[node_idx]
if self.yc1[node_idx] < min_y:
min_y = self.yc1[node_idx]
# TODO: raise error if the maximum and minimum are same for x or y.
# may happen if the tree is a straight line.
# set scaling factors
# normalize the coordinate based on the largest dimension
width_scale = width / (max_x - min_x)
height_scale = height / (max_y - min_y)
scale_factor = width_scale if width_scale > height_scale else \
height_scale
x_scaling_factor = scale_factor
y_scaling_factor = scale_factor
for node_idx in self.preorder():
self.xc0[node_idx] *= x_scaling_factor
self.yc0[node_idx] *= y_scaling_factor
self.xc1[node_idx] *= x_scaling_factor
self.yc1[node_idx] *= y_scaling_factor
if not self.isleaf(node_idx) and (node_idx != 0):
self.highest_child_clangle[node_idx] = float("-inf")
self.lowest_child_clangle[node_idx] = float("inf")
for c_idx in self.children(node_idx):
if self.clangle[c_idx] >\
self.highest_child_clangle[node_idx]:
self.highest_child_clangle[node_idx] =\
self.clangle[c_idx]
if self.clangle[c_idx] < \
self.lowest_child_clangle[node_idx]:
self.lowest_child_clangle[node_idx] =\
self.clangle[c_idx]
# Figure out "arc" endpoints for the circular layout
# NOTE: As with the "vertical lines" for internal nodes in the
# rectangular layout, these arcs will be drawn for nodes with
# only one child. Here, this case would mean that the
# highest_child_clangle would equal the lowest_child_clangle,
# so arcx0 would equal arcx1 and arcy0 would equal arcy1. So
# nothing should show up (but it may be worth addressing this
# in the future).
self.arcx0[node_idx] = self.clradius[node_idx] * \
np.cos(
self.highest_child_clangle[node_idx])
self.arcy0[node_idx] = self.clradius[node_idx] * \
np.sin(
self.highest_child_clangle[node_idx])
self.arcx1[node_idx] = self.clradius[node_idx] * \
np.cos(
self.lowest_child_clangle[node_idx])
self.arcy1[node_idx] = self.clradius[node_idx] * \
np.sin(
self.lowest_child_clangle[node_idx])
self.arcx0[node_idx] *= x_scaling_factor
self.arcy0[node_idx] *= y_scaling_factor
self.arcx1[node_idx] *= x_scaling_factor
self.arcy1[node_idx] *= y_scaling_factor
return "Circular", "c1"
def layout_unrooted(self, width, height):
""" Find best scaling factor for fitting the tree in the figure.
This method will find the best orientation and scaling possible to
fit the tree within the dimensions specified by width and height, using
an unrooted layout algorithm.
Following this algorithm, nodes' unrooted layout coordinates are
accessible at [node].x2 and [node].y2.
Parameters
----------
width : float
width of the canvas
height : float
height of the canvas
Returns
-------
best_scaling : float
largest scaling factor in which the tree can fit in the canvas.
Notes
-----
"""
# Recall that 360 degrees is equal to (2 * pi) radians.
# You can think of this variable as "the maximum angle we can 'give' to
# each leaf of the tree".
angle = (2 * np.pi) / self.leafcounts[0]
best_scale = 0
for i in range(60):
direction = i / 60.0 * np.pi
(max_x, min_x, max_y, min_y) = self.update_unrooted_coords(
1.0, 0, 0, direction, angle)
x_diff = max_x - min_x
width_min = 0
if x_diff != 0:
width_min = float(width) / x_diff
y_diff = max_y - min_y
height_min = 0
if y_diff != 0:
height_min = float(height) / y_diff
scale = min(width_min, height_min)
scale *= 0.95 # extra margin for labels
if scale >= best_scale:
best_scale = scale
mid_x = width / 2 - ((max_x + min_x) / 2) * scale
mid_y = height / 2 - ((max_y + min_y) / 2) * scale
best_args = (scale, mid_x, mid_y, direction, angle)
self.update_unrooted_coords(*best_args)
return "Unrooted", "2"
def update_unrooted_coords(self, s, x1, y1, a, da):
""" Update x, y coordinates of tree nodes in canvas.
This function will update the x1, y1, x2, y2, and angle attributes
for all of the nodes within the tree. Note that (once the unrooted
layout has finished) all that is really used are the x2 and y2
attributes.
In a server-based version of Empress, this could be applied when
the tree becomes modified (i.e. pruning or collapsing) and the
resulting coordinates would be modified to reflect the changes
to the tree structure. (In practice, we just run this once on the
Python side of things in order to precompute the layout.)
Parameters
----------
s : float
scaling
x1 : float
x midpoint
y1 : float
y midpoint
a : float
angle (degrees)
da : float
angle resolution (degrees)
Returns
-------
points : list of tuple
2D coordinates of all of the nodes.
"""
max_x = float('-inf')
min_x = float('inf')
max_y = float('-inf')
min_y = float('inf')
# calculates self coords/angle
# Constant angle algorithm. Should add maximum daylight step.
x2 = x1 + self.bp_tree.length(0) * s * np.sin(a)
y2 = y1 + self.bp_tree.length(0) * s * np.cos(a)
(self.x1[0], self.y1[0], self.x2[0], self.y2[0], self.angle[0]) = \
(x1, y1, x2, y2, a)
node_indices = [node_idx for node_idx in
self.postorder(include_self=False)]
node_indices.reverse()
# for node in self.preorder(include_self=False):
for node_idx in node_indices:
x1 = self.x2[self.bp_tree.parent(node_idx)]
y1 = self.y2[self.bp_tree.parent(node_idx)]
# init a
a = self.angle[self.bp_tree.parent(node_idx)]
# same modify across nodes
a = a - self.leafcounts[self.bp_tree.parent(node_idx)] * da / 2
# check for conditional higher order
for sib_idx in self.children(self.bp_tree.parent(node_idx)):
if sib_idx != node_idx:
a += self.leafcounts[sib_idx] * da
else:
a += (self.leafcounts[node_idx] * da) / 2
break
# Constant angle algorithm. Should add maximum daylight step.
x2 = x1 + self.bp_tree.length(node_idx) * s * np.sin(a)
y2 = y1 + self.bp_tree.length(node_idx) * s * np.cos(a)
(self.x1[node_idx], self.y1[node_idx], self.x2[node_idx],
self.y2[node_idx], self.angle[node_idx]) = (x1, y1, x2, y2, a)
max_x, min_x = max(max_x, x2), min(min_x, x2)
max_y, min_y = max(max_y, y2), min(min_y, y2)
return (max_x, min_x, max_y, min_y)
def isleaf(bp_tree, i):
""" Checks if node at position i belongs to a leaf node or not
Parameters
----------
bp_tree : bp.BP
Input BP tree
i : int
The query node index
Returns
-------
bool
True if this is a leaf node, False otherwise
"""
return bp_tree.B[i] and (not bp_tree.B[i + 1])
| 40.739295
| 94
| 0.56234
|
import warnings
from skbio import TreeNode
import numpy as np
from bp import BP, from_skbio_treenode
class TreeFormatWarning(Warning):
pass
class Tree:
def __init__(self, bp_tree):
self.bp_tree = bp_tree
self.B = self.bp_tree.B
self.leafcounts = np.zeros(self.B.size, np.int)
self.depths = np.zeros(self.B.size, np.double)
self.heights = np.zeros(self.B.size, np.double)
self.yr = np.zeros(self.B.size, np.double)
self.xr = np.zeros(self.B.size, np.double)
self.highest_child_yr = np.zeros(self.B.size, np.float)
self.lowest_child_yr = np.zeros(self.B.size, np.float)
self.clangle = np.zeros(self.B.size, np.double)
self.clradius = np.zeros(self.B.size, np.double)
self.xc0 = np.zeros(self.B.size, np.double)
self.yc0 = np.zeros(self.B.size, np.double)
self.xc1 = np.zeros(self.B.size, np.double)
self.yc1 = np.zeros(self.B.size, np.double)
self.highest_child_clangle = np.zeros(self.B.size, np.float)
self.lowest_child_clangle = np.zeros(self.B.size, np.float)
self.arcx0 = np.zeros(self.B.size, np.double)
self.arcy0 = np.zeros(self.B.size, np.double)
self.arcx1 = np.zeros(self.B.size, np.double)
self.arcy1 = np.zeros(self.B.size, np.double)
self.x1 = np.zeros(self.B.size, np.double)
self.y1 = np.zeros(self.B.size, np.double)
self.x2 = np.zeros(self.B.size, np.double)
self.y2 = np.zeros(self.B.size, np.double)
self.angle = np.zeros(self.B.size, np.double)
self.childRem = -1
@classmethod
def from_tree(cls, tree, use_lengths=True):
bp_tree = from_skbio_treenode(tree)
if sum(bp_tree.B) <= 1:
raise ValueError("Tree must contain at least 2 nodes.")
# using tools.fill_missing_node_names())
tip_names = []
internal_node_names = []
max_branch_length = 0
for i in range(sum(bp_tree.B)):
node_idx = bp_tree.postorderselect(i)
name = bp_tree.name(node_idx)
length = bp_tree.length(node_idx)
if name is not None:
# NOTE: This should eventually be taken out when
# fill_missing_node_names() is refactored. However, for now,
# this makes sure that users can't accidentally break things by
if name.startswith("EmpressNode"):
raise ValueError(
'Node names can\'t start with "EmpressNode".'
)
if isleaf(bp_tree, node_idx):
tip_names.append(name)
else:
internal_node_names.append(name)
if length is None:
raise ValueError(
"Non-root branches of the tree must have lengths."
)
if length < 0:
raise ValueError(
"Non-root branches of the tree must have nonnegative "
"lengths."
)
max_branch_length = max(length, max_branch_length)
# We didn't consider the root node in the above traversal since we
# so we add the root's name to internal_node_names.
if max_branch_length == 0:
raise ValueError(
"At least one non-root branch of the tree must have a "
"positive length."
)
unique_tip_name_set = set(tip_names)
if len(unique_tip_name_set) != len(tip_names):
raise ValueError("Tip names in the tree must be unique.")
unique_internal_node_name_set = set(internal_node_names)
if len(unique_tip_name_set & unique_internal_node_name_set) > 0:
raise ValueError(
"Tip names in the tree cannot overlap with internal node "
"names."
)
if len(unique_internal_node_name_set) != len(internal_node_names):
warnings.warn(
"Internal node names in the tree are not unique.",
TreeFormatWarning
)
bp_tree = Tree(bp_tree)
bp_tree.update_geometry(use_lengths)
return bp_tree
def postorder(self, include_self=True):
e = sum(self.B) if include_self else sum(self.B) - 1
for i in range(e):
node_idx = self.bp_tree.postorderselect(i)
yield node_idx
def preorder(self, include_self=True):
s = 0 if include_self else 1
for i in range(s, sum(self.B)):
node_idx = self.bp_tree.preorderselect(i)
yield node_idx
def bp_tree_tips(self):
tips = []
for i in range(self.B.size):
pos_name = self.bp_tree.name(i)
if self.isleaf(i) and (pos_name is not None):
tips.append(pos_name)
return tips
def bp_tree_non_tips(self):
non_tips = []
for i in range(self.B.size):
pos_name = self.bp_tree.name(i)
if self.B[i] and not self.isleaf(i) and pos_name is not None:
non_tips.append(pos_name)
return non_tips
def update_geometry(self, use_lengths, depth=None):
new_heights = np.zeros(self.B.size, dtype=np.double)
new_leaf_count = np.zeros(self.B.size, dtype=np.int)
new_depths = np.zeros(self.B.size, dtype=np.double)
for node_idx in self.postorder():
length = self.bp_tree.length(node_idx)
if length is None or not use_lengths:
if not use_lengths:
if self.isleaf(node_idx):
length = 5
else:
length = 1
else:
length = 0
new_depths[node_idx] = (depth or 0) + length
if self.isleaf(node_idx):
new_heights[node_idx] = length
new_leaf_count[node_idx] = 1
else:
idx = self.bp_tree.fchild(node_idx)
height = 0
leafcount = 0
while idx:
height = max(height, new_heights[idx])
leafcount += new_leaf_count[idx]
idx = self.bp_tree.nsibling(idx)
height += length
new_heights[node_idx] = height
new_leaf_count[node_idx] = leafcount
self.leafcounts = new_leaf_count
self.heights = new_heights
self.depths = new_depths
def coords(self, height, width):
layout_to_coordsuffix = {}
layout_algs = (
self.layout_unrooted,
self.layout_rectangular,
self.layout_circular,
)
default_layout = None
for alg in layout_algs:
name, suffix = alg(width, height)
layout_to_coordsuffix[name] = suffix
self.alter_coordinates_relative_to_root(suffix)
if name == "Circular":
self.alter_coordinates_relative_to_root("c0")
if default_layout is None:
default_layout = name
for node_idx in self.preorder():
if not self.isleaf(node_idx):
self.highest_child_yr[node_idx] = float("-inf")
self.lowest_child_yr[node_idx] = float("inf")
for c_idx in self.children(node_idx):
if self.yr[c_idx] > self.highest_child_yr[node_idx]:
self.highest_child_yr[node_idx] = self.yr[c_idx]
if self.yr[c_idx] < self.lowest_child_yr[node_idx]:
self.lowest_child_yr[node_idx] = self.yr[c_idx]
return layout_to_coordsuffix, default_layout
def alter_coordinates_relative_to_root(self, suffix):
xname = "x" + suffix
yname = "y" + suffix
centersX = getattr(self, xname)
centersY = getattr(self, yname)
centerX = centersX[0]
centerY = centersY[0]
for node_idx in self.postorder():
# another way to write out:
# node.x2 = node.x2 - centerX
# node.y2 = node.y2 - centerY
# ...when we don't know what "x2" or "y2" will be named beforehand.
centersX[node_idx] = centersX[node_idx] - centerX
centersY[node_idx] = centersY[node_idx] - centerY
setattr(self, xname, centersX)
setattr(self, yname, centersY)
def isleaf(self, i):
return self.B[i] and (not self.B[i + 1])
def children(self, i):
children = []
child = self.bp_tree.fchild(i)
while child > 0:
children.append(child)
child = self.bp_tree.nsibling(child)
return children
def layout_rectangular(self, width, height):
# of the graph. See https://github.com/biocore/empress/issues/141 for
# context.
max_width = 0
max_height = 0
prev_y = 0
for node_idx in self.postorder():
if self.isleaf(node_idx):
self.yr[node_idx] = prev_y
prev_y += 1
if self.yr[node_idx] > max_height:
max_height = self.yr[node_idx]
else:
# Center internal nodes above their children
# We could also center them above their tips, but (IMO) this
# looks better ;)
children = self.children(node_idx)
self.yr[node_idx] = sum([self.yr[c_idx] for
c_idx in children]) / len(children)
for node_idx in self.preorder(include_self=False):
self.xr[node_idx] = self.xr[self.bp_tree.parent(node_idx)] + \
self.bp_tree.length(node_idx)
if self.xr[node_idx] > max_width:
max_width = self.xr[node_idx]
# We don't check if max_width == 0 here, because we check when
x_scaling_factor = width / max_width
if max_height > 0:
y_scaling_factor = height / max_height
else:
y_scaling_factor = 1
for node_idx in self.preorder():
self.xr[node_idx] *= x_scaling_factor
self.yr[node_idx] *= y_scaling_factor
# Now we have the layout! In the JS we'll need to draw each internal
return "Rectangular", "r"
def layout_circular(self, width, height):
anglepernode = (2 * np.pi) / self.leafcounts[0]
prev_clangle = 0
for node_idx in self.postorder():
if self.isleaf(node_idx):
self.clangle[node_idx] = prev_clangle
prev_clangle += anglepernode
else:
children = self.children(node_idx)
child_clangle_sum = sum([self.clangle[c_idx] for c_idx
in children])
self.clangle[node_idx] = child_clangle_sum / len(children)
max_clradius = 0
for node_idx in self.preorder(include_self=False):
self.clradius[node_idx] = self.clradius[self.bp_tree.parent(node_idx)] + \
self.bp_tree.length(node_idx)
if self.clradius[node_idx] > max_clradius:
max_clradius = self.clradius[node_idx]
# another for the "start" of the node's line. The latter of these is
# from polar to x/y and back is annoying, it's easiest to just compute
max_x = max_y = float("-inf")
min_x = min_y = float("inf")
for node_idx in self.postorder():
self.xc1[node_idx] = self.clradius[node_idx] * \
np.cos(self.clangle[node_idx])
self.yc1[node_idx] = self.clradius[node_idx] * \
np.sin(self.clangle[node_idx])
if self.isleaf(node_idx):
# represented as a point at the center of the layout). We don't
self.xc0[node_idx] = 0
self.yc0[node_idx] = 0
else:
self.xc0[node_idx] = self.clradius[
self.bp_tree.parent(node_idx)] *\
np.cos(self.clangle[node_idx])
self.yc0[node_idx] = self.clradius[
self.bp_tree.parent(node_idx)] *\
np.sin(self.clangle[node_idx])
# "extrema" because they should always be further "within" the
# tree than the xc1 / yc1 coordinates.
# TODO: verify that the "tree is a line" case doesn't mess this up.
if self.xc1[node_idx] > max_x:
max_x = self.xc1[node_idx]
if self.yc1[node_idx] > max_y:
max_y = self.yc1[node_idx]
if self.xc1[node_idx] < min_x:
min_x = self.xc1[node_idx]
if self.yc1[node_idx] < min_y:
min_y = self.yc1[node_idx]
width_scale = width / (max_x - min_x)
height_scale = height / (max_y - min_y)
scale_factor = width_scale if width_scale > height_scale else \
height_scale
x_scaling_factor = scale_factor
y_scaling_factor = scale_factor
for node_idx in self.preorder():
self.xc0[node_idx] *= x_scaling_factor
self.yc0[node_idx] *= y_scaling_factor
self.xc1[node_idx] *= x_scaling_factor
self.yc1[node_idx] *= y_scaling_factor
if not self.isleaf(node_idx) and (node_idx != 0):
self.highest_child_clangle[node_idx] = float("-inf")
self.lowest_child_clangle[node_idx] = float("inf")
for c_idx in self.children(node_idx):
if self.clangle[c_idx] >\
self.highest_child_clangle[node_idx]:
self.highest_child_clangle[node_idx] =\
self.clangle[c_idx]
if self.clangle[c_idx] < \
self.lowest_child_clangle[node_idx]:
self.lowest_child_clangle[node_idx] =\
self.clangle[c_idx]
self.arcx0[node_idx] = self.clradius[node_idx] * \
np.cos(
self.highest_child_clangle[node_idx])
self.arcy0[node_idx] = self.clradius[node_idx] * \
np.sin(
self.highest_child_clangle[node_idx])
self.arcx1[node_idx] = self.clradius[node_idx] * \
np.cos(
self.lowest_child_clangle[node_idx])
self.arcy1[node_idx] = self.clradius[node_idx] * \
np.sin(
self.lowest_child_clangle[node_idx])
self.arcx0[node_idx] *= x_scaling_factor
self.arcy0[node_idx] *= y_scaling_factor
self.arcx1[node_idx] *= x_scaling_factor
self.arcy1[node_idx] *= y_scaling_factor
return "Circular", "c1"
def layout_unrooted(self, width, height):
# each leaf of the tree".
angle = (2 * np.pi) / self.leafcounts[0]
best_scale = 0
for i in range(60):
direction = i / 60.0 * np.pi
(max_x, min_x, max_y, min_y) = self.update_unrooted_coords(
1.0, 0, 0, direction, angle)
x_diff = max_x - min_x
width_min = 0
if x_diff != 0:
width_min = float(width) / x_diff
y_diff = max_y - min_y
height_min = 0
if y_diff != 0:
height_min = float(height) / y_diff
scale = min(width_min, height_min)
scale *= 0.95 if scale >= best_scale:
best_scale = scale
mid_x = width / 2 - ((max_x + min_x) / 2) * scale
mid_y = height / 2 - ((max_y + min_y) / 2) * scale
best_args = (scale, mid_x, mid_y, direction, angle)
self.update_unrooted_coords(*best_args)
return "Unrooted", "2"
def update_unrooted_coords(self, s, x1, y1, a, da):
max_x = float('-inf')
min_x = float('inf')
max_y = float('-inf')
min_y = float('inf')
x2 = x1 + self.bp_tree.length(0) * s * np.sin(a)
y2 = y1 + self.bp_tree.length(0) * s * np.cos(a)
(self.x1[0], self.y1[0], self.x2[0], self.y2[0], self.angle[0]) = \
(x1, y1, x2, y2, a)
node_indices = [node_idx for node_idx in
self.postorder(include_self=False)]
node_indices.reverse()
for node_idx in node_indices:
x1 = self.x2[self.bp_tree.parent(node_idx)]
y1 = self.y2[self.bp_tree.parent(node_idx)]
a = self.angle[self.bp_tree.parent(node_idx)]
a = a - self.leafcounts[self.bp_tree.parent(node_idx)] * da / 2
for sib_idx in self.children(self.bp_tree.parent(node_idx)):
if sib_idx != node_idx:
a += self.leafcounts[sib_idx] * da
else:
a += (self.leafcounts[node_idx] * da) / 2
break
x2 = x1 + self.bp_tree.length(node_idx) * s * np.sin(a)
y2 = y1 + self.bp_tree.length(node_idx) * s * np.cos(a)
(self.x1[node_idx], self.y1[node_idx], self.x2[node_idx],
self.y2[node_idx], self.angle[node_idx]) = (x1, y1, x2, y2, a)
max_x, min_x = max(max_x, x2), min(min_x, x2)
max_y, min_y = max(max_y, y2), min(min_y, y2)
return (max_x, min_x, max_y, min_y)
def isleaf(bp_tree, i):
return bp_tree.B[i] and (not bp_tree.B[i + 1])
| true
| true
|
f70cce9cf8e6f94035a97f0bd5920eba56414d69
| 25,988
|
py
|
Python
|
nmt/model_helper.py
|
luckmoon/nmt
|
4f6a4acf8d8e086f9d894444a2877ac1f0856ad0
|
[
"Apache-2.0"
] | null | null | null |
nmt/model_helper.py
|
luckmoon/nmt
|
4f6a4acf8d8e086f9d894444a2877ac1f0856ad0
|
[
"Apache-2.0"
] | null | null | null |
nmt/model_helper.py
|
luckmoon/nmt
|
4f6a4acf8d8e086f9d894444a2877ac1f0856ad0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for building models."""
from __future__ import print_function
import collections
import os
import time
import numpy as np
import six
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from .utils import iterator_utils
from .utils import misc_utils as utils
from .utils import vocab_utils
__all__ = [
"get_initializer", "get_device_str", "create_train_model",
"create_eval_model", "create_infer_model",
"create_emb_for_encoder_and_decoder", "create_rnn_cell", "gradient_clip",
"create_or_load_model", "load_model", "avg_checkpoints",
"compute_perplexity"
]
# If a vocab size is greater than this value, put the embedding on cpu instead
VOCAB_SIZE_THRESHOLD_CPU = 50000
def get_initializer(init_op, seed=None, init_weight=None):
"""Create an initializer. init_weight is only for uniform."""
if init_op == "uniform":
assert init_weight
return tf.random_uniform_initializer(
-init_weight, init_weight, seed=seed)
elif init_op == "glorot_normal":
return tf.keras.initializers.glorot_normal(
seed=seed)
elif init_op == "glorot_uniform":
return tf.keras.initializers.glorot_uniform(
seed=seed)
else:
raise ValueError("Unknown init_op %s" % init_op)
def get_device_str(device_id, num_gpus):
"""Return a device string for multi-GPU setup."""
if num_gpus == 0:
return "/cpu:0"
device_str_output = "/gpu:%d" % (device_id % num_gpus)
return device_str_output
class ExtraArgs(collections.namedtuple(
"ExtraArgs", ("single_cell_fn", "model_device_fn",
"attention_mechanism_fn", "encoder_emb_lookup_fn"))):
pass
class TrainModel(
collections.namedtuple("TrainModel", ("graph", "model", "iterator",
"skip_count_placeholder"))):
pass
def create_train_model(
model_creator, hparams, scope=None, num_workers=1, jobid=0,
extra_args=None):
"""Create train graph, model, and iterator."""
src_file = "%s.%s" % (hparams.train_prefix, hparams.src)
tgt_file = "%s.%s" % (hparams.train_prefix, hparams.tgt)
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "train"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
src_dataset = tf.data.TextLineDataset(tf.gfile.Glob(src_file))
tgt_dataset = tf.data.TextLineDataset(tf.gfile.Glob(tgt_file))
skip_count_placeholder = tf.placeholder(shape=(), dtype=tf.int64)
iterator = iterator_utils.get_iterator(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
batch_size=hparams.batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=hparams.src_max_len,
tgt_max_len=hparams.tgt_max_len,
skip_count=skip_count_placeholder,
num_shards=num_workers,
shard_index=jobid,
use_char_encode=hparams.use_char_encode)
# Note: One can set model_device_fn to
# `tf.train.replica_device_setter(ps_tasks)` for distributed training.
model_device_fn = None
if extra_args: model_device_fn = extra_args.model_device_fn
with tf.device(model_device_fn):
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.TRAIN,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return TrainModel(
graph=graph,
model=model,
iterator=iterator,
skip_count_placeholder=skip_count_placeholder)
class EvalModel(
collections.namedtuple("EvalModel",
("graph", "model", "src_file_placeholder",
"tgt_file_placeholder", "iterator"))):
pass
def create_eval_model(model_creator, hparams, scope=None, extra_args=None):
"""Create train graph, model, src/tgt file holders, and iterator."""
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "eval"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
tgt_vocab_file, default_value=vocab_utils.UNK)
src_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
tgt_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
src_dataset = tf.data.TextLineDataset(src_file_placeholder)
tgt_dataset = tf.data.TextLineDataset(tgt_file_placeholder)
iterator = iterator_utils.get_iterator(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
hparams.batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=hparams.src_max_len_infer,
tgt_max_len=hparams.tgt_max_len_infer,
use_char_encode=hparams.use_char_encode)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.EVAL,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return EvalModel(
graph=graph,
model=model,
src_file_placeholder=src_file_placeholder,
tgt_file_placeholder=tgt_file_placeholder,
iterator=iterator)
class InferModel(
collections.namedtuple("InferModel",
("graph", "model", "src_placeholder",
"batch_size_placeholder", "iterator"))):
pass
def create_infer_model(model_creator, hparams, scope=None, extra_args=None):
"""Create inference model."""
graph = tf.Graph()
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
with graph.as_default(), tf.container(scope or "infer"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
tgt_vocab_file, default_value=vocab_utils.UNK)
src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)
src_dataset = tf.data.Dataset.from_tensor_slices(
src_placeholder)
iterator = iterator_utils.get_infer_iterator(
src_dataset,
src_vocab_table,
batch_size=batch_size_placeholder,
eos=hparams.eos,
src_max_len=hparams.src_max_len_infer,
use_char_encode=hparams.use_char_encode)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.INFER,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return InferModel(
graph=graph,
model=model,
src_placeholder=src_placeholder,
batch_size_placeholder=batch_size_placeholder,
iterator=iterator)
def _get_embed_device(vocab_size):
"""Decide on which device to place an embed matrix given its vocab size."""
if vocab_size > VOCAB_SIZE_THRESHOLD_CPU:
return "/cpu:0"
else:
return "/gpu:0"
def _create_pretrained_emb_from_txt(
vocab_file, embed_file, num_trainable_tokens=3, dtype=tf.float32,
scope=None):
"""Load pretrain embeding from embed_file, and return an embedding matrix.
Args:
embed_file: Path to a Glove formated embedding txt file.
num_trainable_tokens: Make the first n tokens in the vocab file as trainable
variables. Default is 3, which is "<unk>", "<s>" and "</s>".
"""
vocab, _ = vocab_utils.load_vocab(vocab_file)
trainable_tokens = vocab[:num_trainable_tokens]
utils.print_out("# Using pretrained embedding: %s." % embed_file)
utils.print_out(" with trainable tokens: ")
emb_dict, emb_size = vocab_utils.load_embed_txt(embed_file)
for token in trainable_tokens:
utils.print_out(" %s" % token)
if token not in emb_dict:
emb_dict[token] = [0.0] * emb_size
emb_mat = np.array(
[emb_dict[token] for token in vocab], dtype=dtype.as_numpy_dtype())
emb_mat = tf.constant(emb_mat)
emb_mat_const = tf.slice(emb_mat, [num_trainable_tokens, 0], [-1, -1])
with tf.variable_scope(scope or "pretrain_embeddings", dtype=dtype) as scope:
with tf.device(_get_embed_device(num_trainable_tokens)):
emb_mat_var = tf.get_variable(
"emb_mat_var", [num_trainable_tokens, emb_size])
return tf.concat([emb_mat_var, emb_mat_const], 0)
def _create_or_load_embed(embed_name, vocab_file, embed_file,
vocab_size, embed_size, dtype):
"""Create a new or load an existing embedding matrix."""
if vocab_file and embed_file:
embedding = _create_pretrained_emb_from_txt(vocab_file, embed_file)
else:
with tf.device(_get_embed_device(vocab_size)):
embedding = tf.get_variable(
embed_name, [vocab_size, embed_size], dtype)
return embedding
def create_emb_for_encoder_and_decoder(share_vocab,
src_vocab_size,
tgt_vocab_size,
src_embed_size,
tgt_embed_size,
dtype=tf.float32,
num_enc_partitions=0,
num_dec_partitions=0,
src_vocab_file=None,
tgt_vocab_file=None,
src_embed_file=None,
tgt_embed_file=None,
use_char_encode=False,
scope=None):
"""Create embedding matrix for both encoder and decoder.
Args:
share_vocab: A boolean. Whether to share embedding matrix for both
encoder and decoder.
src_vocab_size: An integer. The source vocab size.
tgt_vocab_size: An integer. The target vocab size.
src_embed_size: An integer. The embedding dimension for the encoder's
embedding.
tgt_embed_size: An integer. The embedding dimension for the decoder's
embedding.
dtype: dtype of the embedding matrix. Default to float32.
num_enc_partitions: number of partitions used for the encoder's embedding
vars.
num_dec_partitions: number of partitions used for the decoder's embedding
vars.
scope: VariableScope for the created subgraph. Default to "embedding".
Returns:
embedding_encoder: Encoder's embedding matrix.
embedding_decoder: Decoder's embedding matrix.
Raises:
ValueError: if use share_vocab but source and target have different vocab
size.
"""
if num_enc_partitions <= 1:
enc_partitioner = None
else:
# Note: num_partitions > 1 is required for distributed training due to
# embedding_lookup tries to colocate single partition-ed embedding variable
# with lookup ops. This may cause embedding variables being placed on worker
# jobs.
enc_partitioner = tf.fixed_size_partitioner(num_enc_partitions)
if num_dec_partitions <= 1:
dec_partitioner = None
else:
# Note: num_partitions > 1 is required for distributed training due to
# embedding_lookup tries to colocate single partition-ed embedding variable
# with lookup ops. This may cause embedding variables being placed on worker
# jobs.
dec_partitioner = tf.fixed_size_partitioner(num_dec_partitions)
if src_embed_file and enc_partitioner:
raise ValueError(
"Can't set num_enc_partitions > 1 when using pretrained encoder "
"embedding")
if tgt_embed_file and dec_partitioner:
raise ValueError(
"Can't set num_dec_partitions > 1 when using pretrained decdoer "
"embedding")
with tf.variable_scope(
scope or "embeddings", dtype=dtype, partitioner=enc_partitioner) as scope:
# Share embedding
if share_vocab:
if src_vocab_size != tgt_vocab_size:
raise ValueError("Share embedding but different src/tgt vocab sizes"
" %d vs. %d" % (src_vocab_size, tgt_vocab_size))
assert src_embed_size == tgt_embed_size
utils.print_out("# Use the same embedding for source and target")
vocab_file = src_vocab_file or tgt_vocab_file
embed_file = src_embed_file or tgt_embed_file
embedding_encoder = _create_or_load_embed(
"embedding_share", vocab_file, embed_file,
src_vocab_size, src_embed_size, dtype)
embedding_decoder = embedding_encoder
else:
if not use_char_encode:
with tf.variable_scope("encoder", partitioner=enc_partitioner):
embedding_encoder = _create_or_load_embed(
"embedding_encoder", src_vocab_file, src_embed_file,
src_vocab_size, src_embed_size, dtype)
else:
embedding_encoder = None
with tf.variable_scope("decoder", partitioner=dec_partitioner):
embedding_decoder = _create_or_load_embed(
"embedding_decoder", tgt_vocab_file, tgt_embed_file,
tgt_vocab_size, tgt_embed_size, dtype)
return embedding_encoder, embedding_decoder
def _single_cell(unit_type, num_units, forget_bias, dropout, mode,
residual_connection=False, device_str=None, residual_fn=None):
"""Create an instance of a single RNN cell."""
# dropout (= 1 - keep_prob) is set to 0 during eval and infer
dropout = dropout if mode == tf.contrib.learn.ModeKeys.TRAIN else 0.0
# Cell Type
if unit_type == "lstm":
utils.print_out(" LSTM, forget_bias=%g" % forget_bias, new_line=False)
single_cell = tf.contrib.rnn.BasicLSTMCell(
num_units,
forget_bias=forget_bias)
elif unit_type == "gru":
utils.print_out(" GRU", new_line=False)
single_cell = tf.contrib.rnn.GRUCell(num_units)
elif unit_type == "layer_norm_lstm":
utils.print_out(" Layer Normalized LSTM, forget_bias=%g" % forget_bias,
new_line=False)
single_cell = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_units,
forget_bias=forget_bias,
layer_norm=True)
elif unit_type == "nas":
utils.print_out(" NASCell", new_line=False)
single_cell = tf.contrib.rnn.NASCell(num_units)
else:
raise ValueError("Unknown unit type %s!" % unit_type)
# Dropout (= 1 - keep_prob)
if dropout > 0.0:
single_cell = tf.contrib.rnn.DropoutWrapper(
cell=single_cell, input_keep_prob=(1.0 - dropout))
utils.print_out(" %s, dropout=%g " % (type(single_cell).__name__, dropout),
new_line=False)
# Residual
if residual_connection:
single_cell = tf.contrib.rnn.ResidualWrapper(
single_cell, residual_fn=residual_fn)
utils.print_out(" %s" % type(single_cell).__name__, new_line=False)
# Device Wrapper
if device_str:
single_cell = tf.contrib.rnn.DeviceWrapper(single_cell, device_str)
utils.print_out(" %s, device=%s" %
(type(single_cell).__name__, device_str), new_line=False)
return single_cell
def _cell_list(unit_type, num_units, num_layers, num_residual_layers,
forget_bias, dropout, mode, num_gpus, base_gpu=0,
single_cell_fn=None, residual_fn=None):
"""Create a list of RNN cells."""
if not single_cell_fn:
single_cell_fn = _single_cell
# Multi-GPU
cell_list = []
for i in range(num_layers):
utils.print_out(" cell %d" % i, new_line=False)
single_cell = single_cell_fn(
unit_type=unit_type,
num_units=num_units,
forget_bias=forget_bias,
dropout=dropout,
mode=mode,
residual_connection=(i >= num_layers - num_residual_layers),
device_str=get_device_str(i + base_gpu, num_gpus),
residual_fn=residual_fn
)
utils.print_out("")
cell_list.append(single_cell)
return cell_list
def create_rnn_cell(unit_type, num_units, num_layers, num_residual_layers,
forget_bias, dropout, mode, num_gpus, base_gpu=0,
single_cell_fn=None):
"""Create multi-layer RNN cell.
Args:
unit_type: string representing the unit type, i.e. "lstm".
num_units: the depth of each unit.
num_layers: number of cells.
num_residual_layers: Number of residual layers from top to bottom. For
example, if `num_layers=4` and `num_residual_layers=2`, the last 2 RNN
cells in the returned list will be wrapped with `ResidualWrapper`.
forget_bias: the initial forget bias of the RNNCell(s).
dropout: floating point value between 0.0 and 1.0:
the probability of dropout. this is ignored if `mode != TRAIN`.
mode: either tf.contrib.learn.TRAIN/EVAL/INFER
num_gpus: The number of gpus to use when performing round-robin
placement of layers.
base_gpu: The gpu device id to use for the first RNN cell in the
returned list. The i-th RNN cell will use `(base_gpu + i) % num_gpus`
as its device id.
single_cell_fn: allow for adding customized cell.
When not specified, we default to model_helper._single_cell
Returns:
An `RNNCell` instance.
"""
cell_list = _cell_list(unit_type=unit_type,
num_units=num_units,
num_layers=num_layers,
num_residual_layers=num_residual_layers,
forget_bias=forget_bias,
dropout=dropout,
mode=mode,
num_gpus=num_gpus,
base_gpu=base_gpu,
single_cell_fn=single_cell_fn)
if len(cell_list) == 1: # Single layer.
return cell_list[0]
else: # Multi layers
return tf.contrib.rnn.MultiRNNCell(cell_list)
def gradient_clip(gradients, max_gradient_norm):
"""Clipping gradients of a model."""
clipped_gradients, gradient_norm = tf.clip_by_global_norm(
gradients, max_gradient_norm)
gradient_norm_summary = [tf.summary.scalar("grad_norm", gradient_norm)]
gradient_norm_summary.append(
tf.summary.scalar("clipped_gradient", tf.global_norm(clipped_gradients)))
return clipped_gradients, gradient_norm_summary, gradient_norm
def print_variables_in_ckpt(ckpt_path):
"""Print a list of variables in a checkpoint together with their shapes."""
utils.print_out("# Variables in ckpt %s" % ckpt_path)
reader = tf.train.NewCheckpointReader(ckpt_path)
variable_map = reader.get_variable_to_shape_map()
for key in sorted(variable_map.keys()):
utils.print_out(" %s: %s" % (key, variable_map[key]))
def load_model(model, ckpt_path, session, name):
"""Load model from a checkpoint."""
start_time = time.time()
try:
model.saver.restore(session, ckpt_path)
except tf.errors.NotFoundError as e:
utils.print_out("Can't load checkpoint")
print_variables_in_ckpt(ckpt_path)
utils.print_out("%s" % str(e))
session.run(tf.tables_initializer())
utils.print_out(
" loaded %s model parameters from %s, time %.2fs" %
(name, ckpt_path, time.time() - start_time))
return model
def avg_checkpoints(model_dir, num_last_checkpoints, global_step,
global_step_name):
"""Average the last N checkpoints in the model_dir."""
checkpoint_state = tf.train.get_checkpoint_state(model_dir)
if not checkpoint_state:
utils.print_out("# No checkpoint file found in directory: %s" % model_dir)
return None
# Checkpoints are ordered from oldest to newest.
checkpoints = (
checkpoint_state.all_model_checkpoint_paths[-num_last_checkpoints:])
if len(checkpoints) < num_last_checkpoints:
utils.print_out(
"# Skipping averaging checkpoints because not enough checkpoints is "
"avaliable."
)
return None
avg_model_dir = os.path.join(model_dir, "avg_checkpoints")
if not tf.gfile.Exists(avg_model_dir):
utils.print_out(
"# Creating new directory %s for saving averaged checkpoints." %
avg_model_dir)
tf.gfile.MakeDirs(avg_model_dir)
utils.print_out("# Reading and averaging variables in checkpoints:")
var_list = tf.contrib.framework.list_variables(checkpoints[0])
var_values, var_dtypes = {}, {}
for (name, shape) in var_list:
if name != global_step_name:
var_values[name] = np.zeros(shape)
for checkpoint in checkpoints:
utils.print_out(" %s" % checkpoint)
reader = tf.contrib.framework.load_checkpoint(checkpoint)
for name in var_values:
tensor = reader.get_tensor(name)
var_dtypes[name] = tensor.dtype
var_values[name] += tensor
for name in var_values:
var_values[name] /= len(checkpoints)
# Build a graph with same variables in the checkpoints, and save the averaged
# variables into the avg_model_dir.
with tf.Graph().as_default():
tf_vars = [
tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[name])
for v in var_values
]
placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars]
assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]
global_step_var = tf.Variable(
global_step, name=global_step_name, trainable=False)
saver = tf.train.Saver(tf.all_variables())
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for p, assign_op, (name, value) in zip(placeholders, assign_ops,
six.iteritems(var_values)):
sess.run(assign_op, {p: value})
# Use the built saver to save the averaged checkpoint. Only keep 1
# checkpoint and the best checkpoint will be moved to avg_best_metric_dir.
saver.save(
sess,
os.path.join(avg_model_dir, "translate.ckpt"))
return avg_model_dir
def create_or_load_model(model, model_dir, session, name):
"""Create translation model and initialize or load parameters in session."""
latest_ckpt = tf.train.latest_checkpoint(model_dir)
if latest_ckpt:
model = load_model(model, latest_ckpt, session, name)
else:
start_time = time.time()
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
utils.print_out(" created %s model with fresh parameters, time %.2fs" %
(name, time.time() - start_time))
global_step = model.global_step.eval(session=session)
return model, global_step
def compute_perplexity(model, sess, name):
"""Compute perplexity of the output of the model.
Args:
model: model for compute perplexity.
sess: tensorflow session to use.
name: name of the batch.
Returns:
The perplexity of the eval outputs.
"""
total_loss = 0
total_predict_count = 0
start_time = time.time()
while True:
try:
output_tuple = model.eval(sess)
total_loss += output_tuple.eval_loss * output_tuple.batch_size
total_predict_count += output_tuple.predict_count
except tf.errors.OutOfRangeError:
break
perplexity = utils.safe_exp(total_loss / total_predict_count)
utils.print_time(" eval %s: perplexity %.2f" % (name, perplexity),
start_time)
return perplexity
| 39.138554
| 86
| 0.642951
|
from __future__ import print_function
import collections
import os
import time
import numpy as np
import six
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from .utils import iterator_utils
from .utils import misc_utils as utils
from .utils import vocab_utils
__all__ = [
"get_initializer", "get_device_str", "create_train_model",
"create_eval_model", "create_infer_model",
"create_emb_for_encoder_and_decoder", "create_rnn_cell", "gradient_clip",
"create_or_load_model", "load_model", "avg_checkpoints",
"compute_perplexity"
]
VOCAB_SIZE_THRESHOLD_CPU = 50000
def get_initializer(init_op, seed=None, init_weight=None):
if init_op == "uniform":
assert init_weight
return tf.random_uniform_initializer(
-init_weight, init_weight, seed=seed)
elif init_op == "glorot_normal":
return tf.keras.initializers.glorot_normal(
seed=seed)
elif init_op == "glorot_uniform":
return tf.keras.initializers.glorot_uniform(
seed=seed)
else:
raise ValueError("Unknown init_op %s" % init_op)
def get_device_str(device_id, num_gpus):
if num_gpus == 0:
return "/cpu:0"
device_str_output = "/gpu:%d" % (device_id % num_gpus)
return device_str_output
class ExtraArgs(collections.namedtuple(
"ExtraArgs", ("single_cell_fn", "model_device_fn",
"attention_mechanism_fn", "encoder_emb_lookup_fn"))):
pass
class TrainModel(
collections.namedtuple("TrainModel", ("graph", "model", "iterator",
"skip_count_placeholder"))):
pass
def create_train_model(
model_creator, hparams, scope=None, num_workers=1, jobid=0,
extra_args=None):
src_file = "%s.%s" % (hparams.train_prefix, hparams.src)
tgt_file = "%s.%s" % (hparams.train_prefix, hparams.tgt)
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "train"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
src_dataset = tf.data.TextLineDataset(tf.gfile.Glob(src_file))
tgt_dataset = tf.data.TextLineDataset(tf.gfile.Glob(tgt_file))
skip_count_placeholder = tf.placeholder(shape=(), dtype=tf.int64)
iterator = iterator_utils.get_iterator(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
batch_size=hparams.batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=hparams.src_max_len,
tgt_max_len=hparams.tgt_max_len,
skip_count=skip_count_placeholder,
num_shards=num_workers,
shard_index=jobid,
use_char_encode=hparams.use_char_encode)
model_device_fn = None
if extra_args: model_device_fn = extra_args.model_device_fn
with tf.device(model_device_fn):
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.TRAIN,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return TrainModel(
graph=graph,
model=model,
iterator=iterator,
skip_count_placeholder=skip_count_placeholder)
class EvalModel(
collections.namedtuple("EvalModel",
("graph", "model", "src_file_placeholder",
"tgt_file_placeholder", "iterator"))):
pass
def create_eval_model(model_creator, hparams, scope=None, extra_args=None):
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
graph = tf.Graph()
with graph.as_default(), tf.container(scope or "eval"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
tgt_vocab_file, default_value=vocab_utils.UNK)
src_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
tgt_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)
src_dataset = tf.data.TextLineDataset(src_file_placeholder)
tgt_dataset = tf.data.TextLineDataset(tgt_file_placeholder)
iterator = iterator_utils.get_iterator(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
hparams.batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=hparams.src_max_len_infer,
tgt_max_len=hparams.tgt_max_len_infer,
use_char_encode=hparams.use_char_encode)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.EVAL,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return EvalModel(
graph=graph,
model=model,
src_file_placeholder=src_file_placeholder,
tgt_file_placeholder=tgt_file_placeholder,
iterator=iterator)
class InferModel(
collections.namedtuple("InferModel",
("graph", "model", "src_placeholder",
"batch_size_placeholder", "iterator"))):
pass
def create_infer_model(model_creator, hparams, scope=None, extra_args=None):
graph = tf.Graph()
src_vocab_file = hparams.src_vocab_file
tgt_vocab_file = hparams.tgt_vocab_file
with graph.as_default(), tf.container(scope or "infer"):
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file, tgt_vocab_file, hparams.share_vocab)
reverse_tgt_vocab_table = lookup_ops.index_to_string_table_from_file(
tgt_vocab_file, default_value=vocab_utils.UNK)
src_placeholder = tf.placeholder(shape=[None], dtype=tf.string)
batch_size_placeholder = tf.placeholder(shape=[], dtype=tf.int64)
src_dataset = tf.data.Dataset.from_tensor_slices(
src_placeholder)
iterator = iterator_utils.get_infer_iterator(
src_dataset,
src_vocab_table,
batch_size=batch_size_placeholder,
eos=hparams.eos,
src_max_len=hparams.src_max_len_infer,
use_char_encode=hparams.use_char_encode)
model = model_creator(
hparams,
iterator=iterator,
mode=tf.contrib.learn.ModeKeys.INFER,
source_vocab_table=src_vocab_table,
target_vocab_table=tgt_vocab_table,
reverse_target_vocab_table=reverse_tgt_vocab_table,
scope=scope,
extra_args=extra_args)
return InferModel(
graph=graph,
model=model,
src_placeholder=src_placeholder,
batch_size_placeholder=batch_size_placeholder,
iterator=iterator)
def _get_embed_device(vocab_size):
if vocab_size > VOCAB_SIZE_THRESHOLD_CPU:
return "/cpu:0"
else:
return "/gpu:0"
def _create_pretrained_emb_from_txt(
vocab_file, embed_file, num_trainable_tokens=3, dtype=tf.float32,
scope=None):
vocab, _ = vocab_utils.load_vocab(vocab_file)
trainable_tokens = vocab[:num_trainable_tokens]
utils.print_out("# Using pretrained embedding: %s." % embed_file)
utils.print_out(" with trainable tokens: ")
emb_dict, emb_size = vocab_utils.load_embed_txt(embed_file)
for token in trainable_tokens:
utils.print_out(" %s" % token)
if token not in emb_dict:
emb_dict[token] = [0.0] * emb_size
emb_mat = np.array(
[emb_dict[token] for token in vocab], dtype=dtype.as_numpy_dtype())
emb_mat = tf.constant(emb_mat)
emb_mat_const = tf.slice(emb_mat, [num_trainable_tokens, 0], [-1, -1])
with tf.variable_scope(scope or "pretrain_embeddings", dtype=dtype) as scope:
with tf.device(_get_embed_device(num_trainable_tokens)):
emb_mat_var = tf.get_variable(
"emb_mat_var", [num_trainable_tokens, emb_size])
return tf.concat([emb_mat_var, emb_mat_const], 0)
def _create_or_load_embed(embed_name, vocab_file, embed_file,
vocab_size, embed_size, dtype):
if vocab_file and embed_file:
embedding = _create_pretrained_emb_from_txt(vocab_file, embed_file)
else:
with tf.device(_get_embed_device(vocab_size)):
embedding = tf.get_variable(
embed_name, [vocab_size, embed_size], dtype)
return embedding
def create_emb_for_encoder_and_decoder(share_vocab,
src_vocab_size,
tgt_vocab_size,
src_embed_size,
tgt_embed_size,
dtype=tf.float32,
num_enc_partitions=0,
num_dec_partitions=0,
src_vocab_file=None,
tgt_vocab_file=None,
src_embed_file=None,
tgt_embed_file=None,
use_char_encode=False,
scope=None):
if num_enc_partitions <= 1:
enc_partitioner = None
else:
enc_partitioner = tf.fixed_size_partitioner(num_enc_partitions)
if num_dec_partitions <= 1:
dec_partitioner = None
else:
dec_partitioner = tf.fixed_size_partitioner(num_dec_partitions)
if src_embed_file and enc_partitioner:
raise ValueError(
"Can't set num_enc_partitions > 1 when using pretrained encoder "
"embedding")
if tgt_embed_file and dec_partitioner:
raise ValueError(
"Can't set num_dec_partitions > 1 when using pretrained decdoer "
"embedding")
with tf.variable_scope(
scope or "embeddings", dtype=dtype, partitioner=enc_partitioner) as scope:
if share_vocab:
if src_vocab_size != tgt_vocab_size:
raise ValueError("Share embedding but different src/tgt vocab sizes"
" %d vs. %d" % (src_vocab_size, tgt_vocab_size))
assert src_embed_size == tgt_embed_size
utils.print_out("# Use the same embedding for source and target")
vocab_file = src_vocab_file or tgt_vocab_file
embed_file = src_embed_file or tgt_embed_file
embedding_encoder = _create_or_load_embed(
"embedding_share", vocab_file, embed_file,
src_vocab_size, src_embed_size, dtype)
embedding_decoder = embedding_encoder
else:
if not use_char_encode:
with tf.variable_scope("encoder", partitioner=enc_partitioner):
embedding_encoder = _create_or_load_embed(
"embedding_encoder", src_vocab_file, src_embed_file,
src_vocab_size, src_embed_size, dtype)
else:
embedding_encoder = None
with tf.variable_scope("decoder", partitioner=dec_partitioner):
embedding_decoder = _create_or_load_embed(
"embedding_decoder", tgt_vocab_file, tgt_embed_file,
tgt_vocab_size, tgt_embed_size, dtype)
return embedding_encoder, embedding_decoder
def _single_cell(unit_type, num_units, forget_bias, dropout, mode,
residual_connection=False, device_str=None, residual_fn=None):
dropout = dropout if mode == tf.contrib.learn.ModeKeys.TRAIN else 0.0
if unit_type == "lstm":
utils.print_out(" LSTM, forget_bias=%g" % forget_bias, new_line=False)
single_cell = tf.contrib.rnn.BasicLSTMCell(
num_units,
forget_bias=forget_bias)
elif unit_type == "gru":
utils.print_out(" GRU", new_line=False)
single_cell = tf.contrib.rnn.GRUCell(num_units)
elif unit_type == "layer_norm_lstm":
utils.print_out(" Layer Normalized LSTM, forget_bias=%g" % forget_bias,
new_line=False)
single_cell = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_units,
forget_bias=forget_bias,
layer_norm=True)
elif unit_type == "nas":
utils.print_out(" NASCell", new_line=False)
single_cell = tf.contrib.rnn.NASCell(num_units)
else:
raise ValueError("Unknown unit type %s!" % unit_type)
if dropout > 0.0:
single_cell = tf.contrib.rnn.DropoutWrapper(
cell=single_cell, input_keep_prob=(1.0 - dropout))
utils.print_out(" %s, dropout=%g " % (type(single_cell).__name__, dropout),
new_line=False)
if residual_connection:
single_cell = tf.contrib.rnn.ResidualWrapper(
single_cell, residual_fn=residual_fn)
utils.print_out(" %s" % type(single_cell).__name__, new_line=False)
if device_str:
single_cell = tf.contrib.rnn.DeviceWrapper(single_cell, device_str)
utils.print_out(" %s, device=%s" %
(type(single_cell).__name__, device_str), new_line=False)
return single_cell
def _cell_list(unit_type, num_units, num_layers, num_residual_layers,
forget_bias, dropout, mode, num_gpus, base_gpu=0,
single_cell_fn=None, residual_fn=None):
if not single_cell_fn:
single_cell_fn = _single_cell
cell_list = []
for i in range(num_layers):
utils.print_out(" cell %d" % i, new_line=False)
single_cell = single_cell_fn(
unit_type=unit_type,
num_units=num_units,
forget_bias=forget_bias,
dropout=dropout,
mode=mode,
residual_connection=(i >= num_layers - num_residual_layers),
device_str=get_device_str(i + base_gpu, num_gpus),
residual_fn=residual_fn
)
utils.print_out("")
cell_list.append(single_cell)
return cell_list
def create_rnn_cell(unit_type, num_units, num_layers, num_residual_layers,
forget_bias, dropout, mode, num_gpus, base_gpu=0,
single_cell_fn=None):
cell_list = _cell_list(unit_type=unit_type,
num_units=num_units,
num_layers=num_layers,
num_residual_layers=num_residual_layers,
forget_bias=forget_bias,
dropout=dropout,
mode=mode,
num_gpus=num_gpus,
base_gpu=base_gpu,
single_cell_fn=single_cell_fn)
if len(cell_list) == 1: return cell_list[0]
else: return tf.contrib.rnn.MultiRNNCell(cell_list)
def gradient_clip(gradients, max_gradient_norm):
clipped_gradients, gradient_norm = tf.clip_by_global_norm(
gradients, max_gradient_norm)
gradient_norm_summary = [tf.summary.scalar("grad_norm", gradient_norm)]
gradient_norm_summary.append(
tf.summary.scalar("clipped_gradient", tf.global_norm(clipped_gradients)))
return clipped_gradients, gradient_norm_summary, gradient_norm
def print_variables_in_ckpt(ckpt_path):
utils.print_out("# Variables in ckpt %s" % ckpt_path)
reader = tf.train.NewCheckpointReader(ckpt_path)
variable_map = reader.get_variable_to_shape_map()
for key in sorted(variable_map.keys()):
utils.print_out(" %s: %s" % (key, variable_map[key]))
def load_model(model, ckpt_path, session, name):
start_time = time.time()
try:
model.saver.restore(session, ckpt_path)
except tf.errors.NotFoundError as e:
utils.print_out("Can't load checkpoint")
print_variables_in_ckpt(ckpt_path)
utils.print_out("%s" % str(e))
session.run(tf.tables_initializer())
utils.print_out(
" loaded %s model parameters from %s, time %.2fs" %
(name, ckpt_path, time.time() - start_time))
return model
def avg_checkpoints(model_dir, num_last_checkpoints, global_step,
global_step_name):
checkpoint_state = tf.train.get_checkpoint_state(model_dir)
if not checkpoint_state:
utils.print_out("# No checkpoint file found in directory: %s" % model_dir)
return None
# Checkpoints are ordered from oldest to newest.
checkpoints = (
checkpoint_state.all_model_checkpoint_paths[-num_last_checkpoints:])
if len(checkpoints) < num_last_checkpoints:
utils.print_out(
"# Skipping averaging checkpoints because not enough checkpoints is "
"avaliable."
)
return None
avg_model_dir = os.path.join(model_dir, "avg_checkpoints")
if not tf.gfile.Exists(avg_model_dir):
utils.print_out(
"# Creating new directory %s for saving averaged checkpoints." %
avg_model_dir)
tf.gfile.MakeDirs(avg_model_dir)
utils.print_out("# Reading and averaging variables in checkpoints:")
var_list = tf.contrib.framework.list_variables(checkpoints[0])
var_values, var_dtypes = {}, {}
for (name, shape) in var_list:
if name != global_step_name:
var_values[name] = np.zeros(shape)
for checkpoint in checkpoints:
utils.print_out(" %s" % checkpoint)
reader = tf.contrib.framework.load_checkpoint(checkpoint)
for name in var_values:
tensor = reader.get_tensor(name)
var_dtypes[name] = tensor.dtype
var_values[name] += tensor
for name in var_values:
var_values[name] /= len(checkpoints)
# Build a graph with same variables in the checkpoints, and save the averaged
# variables into the avg_model_dir.
with tf.Graph().as_default():
tf_vars = [
tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[name])
for v in var_values
]
placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars]
assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]
global_step_var = tf.Variable(
global_step, name=global_step_name, trainable=False)
saver = tf.train.Saver(tf.all_variables())
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for p, assign_op, (name, value) in zip(placeholders, assign_ops,
six.iteritems(var_values)):
sess.run(assign_op, {p: value})
# Use the built saver to save the averaged checkpoint. Only keep 1
# checkpoint and the best checkpoint will be moved to avg_best_metric_dir.
saver.save(
sess,
os.path.join(avg_model_dir, "translate.ckpt"))
return avg_model_dir
def create_or_load_model(model, model_dir, session, name):
latest_ckpt = tf.train.latest_checkpoint(model_dir)
if latest_ckpt:
model = load_model(model, latest_ckpt, session, name)
else:
start_time = time.time()
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
utils.print_out(" created %s model with fresh parameters, time %.2fs" %
(name, time.time() - start_time))
global_step = model.global_step.eval(session=session)
return model, global_step
def compute_perplexity(model, sess, name):
total_loss = 0
total_predict_count = 0
start_time = time.time()
while True:
try:
output_tuple = model.eval(sess)
total_loss += output_tuple.eval_loss * output_tuple.batch_size
total_predict_count += output_tuple.predict_count
except tf.errors.OutOfRangeError:
break
perplexity = utils.safe_exp(total_loss / total_predict_count)
utils.print_time(" eval %s: perplexity %.2f" % (name, perplexity),
start_time)
return perplexity
| true
| true
|
f70ccecb7e0a0402aa2a3f38c974fe76c305bdb8
| 5,559
|
py
|
Python
|
script/workbench/TGS_salt/pretrain_Unet.py
|
demetoir/MLtools
|
8c42fcd4cc71728333d9c116ade639fe57d50d37
|
[
"MIT"
] | null | null | null |
script/workbench/TGS_salt/pretrain_Unet.py
|
demetoir/MLtools
|
8c42fcd4cc71728333d9c116ade639fe57d50d37
|
[
"MIT"
] | null | null | null |
script/workbench/TGS_salt/pretrain_Unet.py
|
demetoir/MLtools
|
8c42fcd4cc71728333d9c116ade639fe57d50d37
|
[
"MIT"
] | null | null | null |
import numpy as np
from tqdm import tqdm, trange
from script.data_handler.Base.BaseDataset import BaseDataset
from script.model.sklearn_like_model.BaseModel import BaseModel
from script.model.sklearn_like_model.Mixin import UnsupervisedMetricCallback
from script.model.sklearn_like_model.NetModule.BaseNetModule import BaseNetModule
from script.model.sklearn_like_model.NetModule.FusionNetStructure import FusionNetModule
from script.model.sklearn_like_model.NetModule.PlaceHolderModule import PlaceHolderModule
from script.model.sklearn_like_model.NetModule.TFDynamicLearningRate import TFDynamicLearningRate
from script.util.Stacker import Stacker
from script.util.tensor_ops import *
class pre_train_Unet(BaseModel):
def __init__(
self,
verbose=10,
learning_rate=0.01,
beta1=0.9,
batch_size=100,
stage=4,
n_classes=2,
capacity=64,
depth=1,
dropout_rate=0.5,
**kwargs
):
BaseModel.__init__(self, verbose, **kwargs)
self.batch_size = batch_size
self.learning_rate = learning_rate
self.beta1 = beta1
self.dropout_rate = dropout_rate
self.capacity = capacity
self.stage = stage
self.n_classes = n_classes
self.depth = depth
def _build_input_shapes(self, shapes):
self.x_ph_module = PlaceHolderModule(shapes['x'], tf.float32, name='x')
ret = {}
ret.update(self.x_ph_module.shape_dict)
return ret
def _build_main_graph(self):
self.Xs = self.x_ph_module.build().placeholder
self.net_module = FusionNetModule(
self.Xs, capacity=self.capacity, depth=self.depth, level=self.stage,
n_classes=self.n_classes, dropout_rate=self.dropout_rate
).build()
self.decode = self.net_module.decode
self.recon_module = reconModule(
self.decode, self.capacity
)
self.recon_module.build()
self._recon = self.recon_module.recon
self._recon = self.decode
self.vars = self.net_module.vars
self.vars += self.recon_module.vars
def _build_loss_ops(self):
self.loss = tf.squared_difference(self.Xs, self._recon, name='loss')
self.loss_mean = tf.reduce_mean(self.loss, name='loss_mean')
def _build_train_ops(self):
self.drl = TFDynamicLearningRate(self.learning_rate)
self.drl.build()
self.train_op = tf.train.AdamOptimizer(
self.drl.learning_rate, self.beta1
).minimize(
loss=self.loss_mean, var_list=self.vars
)
def _train_iter(self, dataset, batch_size):
# self.net_module.set_train(self.sess)
x = dataset.next_batch(self.batch_size)
_ = self.sess.run(self.train_op, {self.Xs: x})
# self.net_module.set_predict(self.sess)
def train_AE(
self, x, epoch=1, batch_size=None, dataset_callback=None,
epoch_pbar=True, iter_pbar=True, epoch_callbacks=None,
):
if not self.is_built:
raise RuntimeError(f'{self} not built')
batch_size = getattr(self, 'batch_size') if batch_size is None else batch_size
dataset = dataset_callback if dataset_callback else BaseDataset(x=x)
metric = None
epoch_pbar = tqdm([i for i in range(1, epoch + 1)]) if epoch_pbar else None
for _ in range(1, epoch + 1):
dataset.shuffle()
iter_pbar = trange if iter_pbar else range
for _ in iter_pbar(int(dataset.size / batch_size)):
self._train_iter(dataset, batch_size)
self.sess.run(self.op_inc_global_epoch)
global_epoch = self.sess.run(self.global_epoch)
if epoch_pbar: epoch_pbar.update(1)
metric = getattr(self, 'metric', None)(x)
if metric in (np.nan, np.inf, -np.inf):
tqdm.write(f'train fail, e = {global_epoch}, metric = {metric}')
break
results = []
if epoch_callbacks:
for callback in epoch_callbacks:
result = callback(self, dataset, metric, global_epoch)
results += [result]
break_epoch = False
for result in results:
if result and getattr(result, 'break_epoch', False):
break_epoch = True
if break_epoch: break
if epoch_pbar: epoch_pbar.close()
if dataset_callback: del dataset
return metric
def metric(self, x):
if not getattr(self, '_metric_callback', None):
self._metric_callback = UnsupervisedMetricCallback(
self, self.loss_mean, self.Xs,
)
return self._metric_callback(x)
def update_learning_rate(self, lr):
self.learning_rate = lr
if self.sess is not None:
self.drl.update(self.sess, self.learning_rate)
class reconModule(BaseNetModule):
def __init__(self, x, capacity=None, reuse=False, name=None, verbose=0):
super().__init__(capacity, reuse, name, verbose)
self.x = x
def build(self):
with tf.variable_scope(self.name):
stacker = Stacker(self.x)
stacker.conv2d(1, CONV_FILTER_3311)
self.recon = stacker.sigmoid()
return self
| 34.52795
| 98
| 0.61288
|
import numpy as np
from tqdm import tqdm, trange
from script.data_handler.Base.BaseDataset import BaseDataset
from script.model.sklearn_like_model.BaseModel import BaseModel
from script.model.sklearn_like_model.Mixin import UnsupervisedMetricCallback
from script.model.sklearn_like_model.NetModule.BaseNetModule import BaseNetModule
from script.model.sklearn_like_model.NetModule.FusionNetStructure import FusionNetModule
from script.model.sklearn_like_model.NetModule.PlaceHolderModule import PlaceHolderModule
from script.model.sklearn_like_model.NetModule.TFDynamicLearningRate import TFDynamicLearningRate
from script.util.Stacker import Stacker
from script.util.tensor_ops import *
class pre_train_Unet(BaseModel):
def __init__(
self,
verbose=10,
learning_rate=0.01,
beta1=0.9,
batch_size=100,
stage=4,
n_classes=2,
capacity=64,
depth=1,
dropout_rate=0.5,
**kwargs
):
BaseModel.__init__(self, verbose, **kwargs)
self.batch_size = batch_size
self.learning_rate = learning_rate
self.beta1 = beta1
self.dropout_rate = dropout_rate
self.capacity = capacity
self.stage = stage
self.n_classes = n_classes
self.depth = depth
def _build_input_shapes(self, shapes):
self.x_ph_module = PlaceHolderModule(shapes['x'], tf.float32, name='x')
ret = {}
ret.update(self.x_ph_module.shape_dict)
return ret
def _build_main_graph(self):
self.Xs = self.x_ph_module.build().placeholder
self.net_module = FusionNetModule(
self.Xs, capacity=self.capacity, depth=self.depth, level=self.stage,
n_classes=self.n_classes, dropout_rate=self.dropout_rate
).build()
self.decode = self.net_module.decode
self.recon_module = reconModule(
self.decode, self.capacity
)
self.recon_module.build()
self._recon = self.recon_module.recon
self._recon = self.decode
self.vars = self.net_module.vars
self.vars += self.recon_module.vars
def _build_loss_ops(self):
self.loss = tf.squared_difference(self.Xs, self._recon, name='loss')
self.loss_mean = tf.reduce_mean(self.loss, name='loss_mean')
def _build_train_ops(self):
self.drl = TFDynamicLearningRate(self.learning_rate)
self.drl.build()
self.train_op = tf.train.AdamOptimizer(
self.drl.learning_rate, self.beta1
).minimize(
loss=self.loss_mean, var_list=self.vars
)
def _train_iter(self, dataset, batch_size):
x = dataset.next_batch(self.batch_size)
_ = self.sess.run(self.train_op, {self.Xs: x})
def train_AE(
self, x, epoch=1, batch_size=None, dataset_callback=None,
epoch_pbar=True, iter_pbar=True, epoch_callbacks=None,
):
if not self.is_built:
raise RuntimeError(f'{self} not built')
batch_size = getattr(self, 'batch_size') if batch_size is None else batch_size
dataset = dataset_callback if dataset_callback else BaseDataset(x=x)
metric = None
epoch_pbar = tqdm([i for i in range(1, epoch + 1)]) if epoch_pbar else None
for _ in range(1, epoch + 1):
dataset.shuffle()
iter_pbar = trange if iter_pbar else range
for _ in iter_pbar(int(dataset.size / batch_size)):
self._train_iter(dataset, batch_size)
self.sess.run(self.op_inc_global_epoch)
global_epoch = self.sess.run(self.global_epoch)
if epoch_pbar: epoch_pbar.update(1)
metric = getattr(self, 'metric', None)(x)
if metric in (np.nan, np.inf, -np.inf):
tqdm.write(f'train fail, e = {global_epoch}, metric = {metric}')
break
results = []
if epoch_callbacks:
for callback in epoch_callbacks:
result = callback(self, dataset, metric, global_epoch)
results += [result]
break_epoch = False
for result in results:
if result and getattr(result, 'break_epoch', False):
break_epoch = True
if break_epoch: break
if epoch_pbar: epoch_pbar.close()
if dataset_callback: del dataset
return metric
def metric(self, x):
if not getattr(self, '_metric_callback', None):
self._metric_callback = UnsupervisedMetricCallback(
self, self.loss_mean, self.Xs,
)
return self._metric_callback(x)
def update_learning_rate(self, lr):
self.learning_rate = lr
if self.sess is not None:
self.drl.update(self.sess, self.learning_rate)
class reconModule(BaseNetModule):
def __init__(self, x, capacity=None, reuse=False, name=None, verbose=0):
super().__init__(capacity, reuse, name, verbose)
self.x = x
def build(self):
with tf.variable_scope(self.name):
stacker = Stacker(self.x)
stacker.conv2d(1, CONV_FILTER_3311)
self.recon = stacker.sigmoid()
return self
| true
| true
|
f70ccef9cd35c768a2257cab5fc3c2d3f8ada73b
| 5,332
|
py
|
Python
|
mask r-cnn/evaluation.py
|
bharatmahaur/ComparativeStudy
|
2e3b6de882acc2a465e1b7c8bcd23cc9c8181d3d
|
[
"Apache-2.0"
] | 5
|
2021-09-26T07:19:42.000Z
|
2022-03-11T23:25:36.000Z
|
mask r-cnn/evaluation.py
|
bharatmahaur/ComparativeStudy
|
2e3b6de882acc2a465e1b7c8bcd23cc9c8181d3d
|
[
"Apache-2.0"
] | null | null | null |
mask r-cnn/evaluation.py
|
bharatmahaur/ComparativeStudy
|
2e3b6de882acc2a465e1b7c8bcd23cc9c8181d3d
|
[
"Apache-2.0"
] | null | null | null |
"""
# Train a new model starting from pre-trained weights
python3 training.py --dataset=/path/to/dataset --weight=/path/to/pretrained/weight.h5
# Resume training a model
python3 training.py --dataset=/path/to/dataset --continue_train=/path/to/latest/weights.h5
"""
import logging
import warnings
import os
logging.getLogger("tensorflow").setLevel(logging.ERROR)
warnings.filterwarnings('ignore')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import sys
import json
import datetime
import numpy as np
import skimage.draw
import cv2
import matplotlib.pyplot as plt
import imgaug
# Root directory of the project
ROOT_DIR = os.getcwd()
from mrcnn.config import Config
from mrcnn import model as modellib, utils
from mrcnn import parse_args
import dataset
############################################################
# Args Configurations
############################################################
args = parse_args.parse_args()
# config parameter
pretrained_weight = os.path.join(ROOT_DIR, args.weight)
dataset_path = os.path.join(ROOT_DIR, args.dataset)
logs = os.path.join(ROOT_DIR, "logs")
if args.continue_train == "None":
continue_train = args.continue_train
else:
continue_train = os.path.join(ROOT_DIR, args.continue_train)
############################################################
# Configurations
############################################################
class CustomConfig(Config):
NAME = "custom_dataset"
IMAGES_PER_GPU = 1
IMAGE_MAX_DIM = 512
NUM_CLASSES = 1 + 4
STEPS_PER_EPOCH = 750
VALIDATION_STEPS = 250
DETECTION_MIN_CONFIDENCE = 0.9
LEARNING_RATE = 0.001
DETECTION_NMS_THRESHOLD = 0.2
TRAIN_ROIS_PER_IMAGE = 200
MAX_GT_INSTANCES = 50
DETECTION_MAX_INSTANCES = 50
############################################################
# Training
############################################################
def train(model):
# Training set.
dataset_train = dataset.CustomDataset()
dataset_train.load_custom(dataset_path, "train")
dataset_train.prepare()
print("Images: {}\nClasses: {}".format(len(dataset_train.image_ids), dataset_train.class_names))
# Validation set
dataset_val = dataset.CustomDataset()
dataset_val.load_custom(dataset_path, "val")
dataset_val.prepare()
print("Images: {}\nClasses: {}".format(len(dataset_val.image_ids), dataset_val.class_names))
augmentation = imgaug.augmenters.Sometimes(0.5, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.Flipud(0.5)])
model_inference = modellib.MaskRCNN(mode="inference", config=config,model_dir=logs)
#calculating COCO-mAP after every 5 epoch, limited to the first 1000 images
mAP_callback = modellib.MeanAveragePrecisionCallback(model, model_inference, dataset_val,
calculate_at_every_X_epoch=5, dataset_limit=1000, verbose=1)
# Training - Stage 1
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=20,
layers='heads',
custom_callbacks=[mAP_callback],
augmentation=augmentation)
# print("Fine tune Resnet stage 4 and up")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE,
# epochs=60,
# layers='4+',
# custom_callbacks=[mAP_callback],
# augmentation=augmentation)
# print("Fine tune Resnet stage 3 and up")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE/10,
# epochs=90,
# layers='3+',
# custom_callbacks=[mAP_callback],
# augmentation=augmentation)
# print("Fine tune all layers")
# model.train(dataset_train, dataset_val,
# learning_rate=config.LEARNING_RATE/100,
# epochs=100,
# layers='all',
# custom_callbacks=[mAP_callback])
# # augmentation=augmentation)
############################################################
# Main
############################################################
if __name__ == '__main__':
print("Pre-trained weight: ", pretrained_weight)
print("Dataset: ", dataset_path)
print("Logs: ", logs)
print("Continue Train: ", continue_train)
# Configurations
config = CustomConfig()
config.display()
# Create model
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=logs)
if continue_train.lower() == "none":
weights_path = pretrained_weight
else:
weights_path = continue_train
# Load weights
print("Loading weights ", weights_path)
if continue_train == "None":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
train(model)
| 31
| 112
| 0.597337
|
import logging
import warnings
import os
logging.getLogger("tensorflow").setLevel(logging.ERROR)
warnings.filterwarnings('ignore')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import sys
import json
import datetime
import numpy as np
import skimage.draw
import cv2
import matplotlib.pyplot as plt
import imgaug
ROOT_DIR = os.getcwd()
from mrcnn.config import Config
from mrcnn import model as modellib, utils
from mrcnn import parse_args
import dataset
args = parse_args.parse_args()
pretrained_weight = os.path.join(ROOT_DIR, args.weight)
dataset_path = os.path.join(ROOT_DIR, args.dataset)
logs = os.path.join(ROOT_DIR, "logs")
if args.continue_train == "None":
continue_train = args.continue_train
else:
continue_train = os.path.join(ROOT_DIR, args.continue_train)
class CustomConfig(Config):
NAME = "custom_dataset"
IMAGES_PER_GPU = 1
IMAGE_MAX_DIM = 512
NUM_CLASSES = 1 + 4
STEPS_PER_EPOCH = 750
VALIDATION_STEPS = 250
DETECTION_MIN_CONFIDENCE = 0.9
LEARNING_RATE = 0.001
DETECTION_NMS_THRESHOLD = 0.2
TRAIN_ROIS_PER_IMAGE = 200
MAX_GT_INSTANCES = 50
DETECTION_MAX_INSTANCES = 50
def train(model):
dataset_train = dataset.CustomDataset()
dataset_train.load_custom(dataset_path, "train")
dataset_train.prepare()
print("Images: {}\nClasses: {}".format(len(dataset_train.image_ids), dataset_train.class_names))
dataset_val = dataset.CustomDataset()
dataset_val.load_custom(dataset_path, "val")
dataset_val.prepare()
print("Images: {}\nClasses: {}".format(len(dataset_val.image_ids), dataset_val.class_names))
augmentation = imgaug.augmenters.Sometimes(0.5, [
imgaug.augmenters.Fliplr(0.5),
imgaug.augmenters.Flipud(0.5)])
model_inference = modellib.MaskRCNN(mode="inference", config=config,model_dir=logs)
mAP_callback = modellib.MeanAveragePrecisionCallback(model, model_inference, dataset_val,
calculate_at_every_X_epoch=5, dataset_limit=1000, verbose=1)
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=20,
layers='heads',
custom_callbacks=[mAP_callback],
augmentation=augmentation)
if __name__ == '__main__':
print("Pre-trained weight: ", pretrained_weight)
print("Dataset: ", dataset_path)
print("Logs: ", logs)
print("Continue Train: ", continue_train)
config = CustomConfig()
config.display()
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=logs)
if continue_train.lower() == "none":
weights_path = pretrained_weight
else:
weights_path = continue_train
print("Loading weights ", weights_path)
if continue_train == "None":
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
train(model)
| true
| true
|
f70ccfa334a29d3f84bfc3a580de91f8a135b6c9
| 16,978
|
py
|
Python
|
elastic/datadog_checks/elastic/elastic.py
|
idarlington/integrations-core
|
0d323ef4c3af18acec2b681f5389326257b45c1e
|
[
"BSD-3-Clause"
] | null | null | null |
elastic/datadog_checks/elastic/elastic.py
|
idarlington/integrations-core
|
0d323ef4c3af18acec2b681f5389326257b45c1e
|
[
"BSD-3-Clause"
] | 2
|
2021-04-26T13:37:48.000Z
|
2021-04-26T13:37:49.000Z
|
elastic/datadog_checks/elastic/elastic.py
|
idarlington/integrations-core
|
0d323ef4c3af18acec2b681f5389326257b45c1e
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import time
from collections import defaultdict
import requests
from six import iteritems, itervalues
from six.moves.urllib.parse import urljoin, urlparse
from datadog_checks.base import AgentCheck, is_affirmative, to_string
from .config import from_instance
from .metrics import (
CLUSTER_PENDING_TASKS,
health_stats_for_version,
index_stats_for_version,
node_system_stats_for_version,
pshard_stats_for_version,
slm_stats_for_version,
stats_for_version,
)
class AuthenticationError(requests.exceptions.HTTPError):
"""Authentication Error, unable to reach server"""
class ESCheck(AgentCheck):
HTTP_CONFIG_REMAPPER = {
'aws_service': {'name': 'aws_service', 'default': 'es'},
'ssl_verify': {'name': 'tls_verify'},
'ssl_cert': {'name': 'tls_cert'},
'ssl_key': {'name': 'tls_private_key'},
}
SERVICE_CHECK_CONNECT_NAME = 'elasticsearch.can_connect'
SERVICE_CHECK_CLUSTER_STATUS = 'elasticsearch.cluster_health'
SOURCE_TYPE_NAME = 'elasticsearch'
def __init__(self, name, init_config, instances):
super(ESCheck, self).__init__(name, init_config, instances)
# Host status needs to persist across all checks
self.cluster_status = {}
if self.instance.get('auth_type') == 'aws' and self.instance.get('url'):
self.HTTP_CONFIG_REMAPPER = self.HTTP_CONFIG_REMAPPER.copy()
self.HTTP_CONFIG_REMAPPER['aws_host'] = {
'name': 'aws_host',
'default': urlparse(self.instance['url']).hostname,
}
self._config = from_instance(self.instance)
def check(self, _):
admin_forwarder = self._config.admin_forwarder
jvm_rate = self.instance.get('gc_collectors_as_rate', False)
base_tags = list(self._config.tags)
service_check_tags = list(self._config.service_check_tags)
# Check ES version for this instance and define parameters
# (URLs and metrics) accordingly
try:
version = self._get_es_version()
except AuthenticationError:
self.log.exception("The ElasticSearch credentials are incorrect")
raise
health_url, stats_url, pshard_stats_url, pending_tasks_url, slm_url = self._get_urls(version)
stats_metrics = stats_for_version(version, jvm_rate)
if self._config.cluster_stats:
# Include Node System metrics
stats_metrics.update(node_system_stats_for_version(version))
pshard_stats_metrics = pshard_stats_for_version(version)
# Load stats data.
# This must happen before other URL processing as the cluster name
# is retrieved here, and added to the tag list.
stats_url = self._join_url(stats_url, admin_forwarder)
stats_data = self._get_data(stats_url)
if stats_data.get('cluster_name'):
# retrieve the cluster name from the data, and append it to the
# master tag list.
cluster_tags = ["elastic_cluster:{}".format(stats_data['cluster_name'])]
if not is_affirmative(self.instance.get('disable_legacy_cluster_tag', False)):
cluster_tags.append("cluster_name:{}".format(stats_data['cluster_name']))
base_tags.extend(cluster_tags)
service_check_tags.extend(cluster_tags)
self._process_stats_data(stats_data, stats_metrics, base_tags)
# Load cluster-wise data
# Note: this is a cluster-wide query, might TO.
if self._config.pshard_stats:
send_sc = bubble_ex = not self._config.pshard_graceful_to
pshard_stats_url = self._join_url(pshard_stats_url, admin_forwarder)
try:
pshard_stats_data = self._get_data(pshard_stats_url, send_sc=send_sc)
self._process_pshard_stats_data(pshard_stats_data, pshard_stats_metrics, base_tags)
except requests.ReadTimeout as e:
if bubble_ex:
raise
self.log.warning("Timed out reading pshard-stats from servers (%s) - stats will be missing", e)
# Get Snapshot Lifecycle Management (SLM) policies
if slm_url is not None:
slm_url = self._join_url(slm_url, admin_forwarder)
policy_data = self._get_data(slm_url)
self._process_policy_data(policy_data, version, base_tags)
# Load the health data.
health_url = self._join_url(health_url, admin_forwarder)
health_data = self._get_data(health_url)
self._process_health_data(health_data, version, base_tags, service_check_tags)
if self._config.pending_task_stats:
# Load the pending_tasks data.
pending_tasks_url = self._join_url(pending_tasks_url, admin_forwarder)
pending_tasks_data = self._get_data(pending_tasks_url)
self._process_pending_tasks_data(pending_tasks_data, base_tags)
if self._config.index_stats and version >= [1, 0, 0]:
try:
self._get_index_metrics(admin_forwarder, version, base_tags)
except requests.ReadTimeout as e:
self.log.warning("Timed out reading index stats from servers (%s) - stats will be missing", e)
# If we're here we did not have any ES conn issues
self.service_check(self.SERVICE_CHECK_CONNECT_NAME, AgentCheck.OK, tags=self._config.service_check_tags)
def _get_es_version(self):
"""
Get the running version of elasticsearch.
"""
try:
data = self._get_data(self._config.url, send_sc=False)
raw_version = data['version']['number']
self.set_metadata('version', raw_version)
# pre-release versions of elasticearch are suffixed with -rcX etc..
# peel that off so that the map below doesn't error out
raw_version = raw_version.split('-')[0]
version = [int(p) for p in raw_version.split('.')[0:3]]
except AuthenticationError:
raise
except Exception as e:
self.warning("Error while trying to get Elasticsearch version from %s %s", self._config.url, e)
version = [1, 0, 0]
self.log.debug("Elasticsearch version is %s", version)
return version
def _join_url(self, url, admin_forwarder=False):
"""
overrides `urlparse.urljoin` since it removes base url path
https://docs.python.org/2/library/urlparse.html#urlparse.urljoin
"""
if admin_forwarder:
return self._config.url + url
else:
return urljoin(self._config.url, url)
def _get_index_metrics(self, admin_forwarder, version, base_tags):
cat_url = '/_cat/indices?format=json&bytes=b'
index_url = self._join_url(cat_url, admin_forwarder)
index_resp = self._get_data(index_url)
index_stats_metrics = index_stats_for_version(version)
health_stat = {'green': 0, 'yellow': 1, 'red': 2}
reversed_health_stat = {'red': 0, 'yellow': 1, 'green': 2}
for idx in index_resp:
tags = base_tags + ['index_name:' + idx['index']]
# we need to remap metric names because the ones from elastic
# contain dots and that would confuse `_process_metric()` (sic)
index_data = {
'docs_count': idx.get('docs.count'),
'docs_deleted': idx.get('docs.deleted'),
'primary_shards': idx.get('pri'),
'replica_shards': idx.get('rep'),
'primary_store_size': idx.get('pri.store.size'),
'store_size': idx.get('store.size'),
'health': idx.get('health'),
}
# Convert the health status value
if index_data['health'] is not None:
status = index_data['health'].lower()
index_data['health'] = health_stat[status]
index_data['health_reverse'] = reversed_health_stat[status]
# Ensure that index_data does not contain None values
for key, value in list(iteritems(index_data)):
if value is None:
del index_data[key]
self.log.warning("The index %s has no metric data for %s", idx['index'], key)
for metric in index_stats_metrics:
# metric description
desc = index_stats_metrics[metric]
self._process_metric(index_data, metric, *desc, tags=tags)
def _get_urls(self, version):
"""
Compute the URLs we need to hit depending on the running ES version
"""
pshard_stats_url = "/_stats"
health_url = "/_cluster/health"
slm_url = None
if version >= [0, 90, 10]:
pending_tasks_url = "/_cluster/pending_tasks"
stats_url = "/_nodes/stats" if self._config.cluster_stats else "/_nodes/_local/stats"
if version < [5, 0, 0]:
# version 5 errors out if the `all` parameter is set
stats_url += "?all=true"
if version >= [7, 4, 0] and self._config.slm_stats:
slm_url = "/_slm/policy"
else:
# legacy
pending_tasks_url = None
stats_url = (
"/_cluster/nodes/stats?all=true"
if self._config.cluster_stats
else "/_cluster/nodes/_local/stats?all=true"
)
return health_url, stats_url, pshard_stats_url, pending_tasks_url, slm_url
def _get_data(self, url, send_sc=True):
"""
Hit a given URL and return the parsed json
"""
resp = None
try:
resp = self.http.get(url)
resp.raise_for_status()
except Exception as e:
# this means we've hit a particular kind of auth error that means the config is broken
if resp and resp.status_code == 400:
raise AuthenticationError("The ElasticSearch credentials are incorrect")
if send_sc:
self.service_check(
self.SERVICE_CHECK_CONNECT_NAME,
AgentCheck.CRITICAL,
message="Error {} when hitting {}".format(e, url),
tags=self._config.service_check_tags,
)
raise
self.log.debug("request to url %s returned: %s", url, resp)
return resp.json()
def _process_pending_tasks_data(self, data, base_tags):
p_tasks = defaultdict(int)
average_time_in_queue = 0
for task in data.get('tasks', []):
p_tasks[task.get('priority')] += 1
average_time_in_queue += task.get('time_in_queue_millis', 0)
total = sum(itervalues(p_tasks))
node_data = {
'pending_task_total': total,
'pending_tasks_priority_high': p_tasks['high'],
'pending_tasks_priority_urgent': p_tasks['urgent'],
# if total is 0 default to 1
'pending_tasks_time_in_queue': average_time_in_queue // (total or 1),
}
for metric in CLUSTER_PENDING_TASKS:
# metric description
desc = CLUSTER_PENDING_TASKS[metric]
self._process_metric(node_data, metric, *desc, tags=base_tags)
def _process_stats_data(self, data, stats_metrics, base_tags):
for node_data in itervalues(data.get('nodes', {})):
metric_hostname = None
metrics_tags = list(base_tags)
# Resolve the node's name
node_name = node_data.get('name')
if node_name:
metrics_tags.append('node_name:{}'.format(node_name))
# Resolve the node's hostname
if self._config.node_name_as_host:
if node_name:
metric_hostname = node_name
elif self._config.cluster_stats:
for k in ['hostname', 'host']:
if k in node_data:
metric_hostname = node_data[k]
break
for metric, desc in iteritems(stats_metrics):
self._process_metric(node_data, metric, *desc, tags=metrics_tags, hostname=metric_hostname)
def _process_pshard_stats_data(self, data, pshard_stats_metrics, base_tags):
for metric, desc in iteritems(pshard_stats_metrics):
self._process_metric(data, metric, *desc, tags=base_tags)
def _process_metric(self, data, metric, xtype, path, xform=None, tags=None, hostname=None):
"""
data: dictionary containing all the stats
metric: datadog metric
path: corresponding path in data, flattened, e.g. thread_pool.bulk.queue
xform: a lambda to apply to the numerical value
"""
value = data
# Traverse the nested dictionaries
for key in path.split('.'):
if value is not None:
value = value.get(key)
else:
break
if value is not None:
if xform:
value = xform(value)
if xtype == "gauge":
self.gauge(metric, value, tags=tags, hostname=hostname)
else:
self.rate(metric, value, tags=tags, hostname=hostname)
else:
self.log.debug("Metric not found: %s -> %s", path, metric)
def _process_health_data(self, data, version, base_tags, service_check_tags):
cluster_status = data.get('status')
if not self.cluster_status.get(self._config.url):
self.cluster_status[self._config.url] = cluster_status
if cluster_status in ["yellow", "red"]:
event = self._create_event(cluster_status, tags=base_tags)
self.event(event)
if cluster_status != self.cluster_status.get(self._config.url):
self.cluster_status[self._config.url] = cluster_status
event = self._create_event(cluster_status, tags=base_tags)
self.event(event)
cluster_health_metrics = health_stats_for_version(version)
for metric, desc in iteritems(cluster_health_metrics):
self._process_metric(data, metric, *desc, tags=base_tags)
# Process the service check
if cluster_status == 'green':
status = AgentCheck.OK
data['tag'] = "OK"
elif cluster_status == 'yellow':
status = AgentCheck.WARNING
data['tag'] = "WARN"
else:
status = AgentCheck.CRITICAL
data['tag'] = "ALERT"
msg = (
"{tag} on cluster \"{cluster_name}\" "
"| active_shards={active_shards} "
"| initializing_shards={initializing_shards} "
"| relocating_shards={relocating_shards} "
"| unassigned_shards={unassigned_shards} "
"| timed_out={timed_out}".format(
tag=data.get('tag'),
cluster_name=data.get('cluster_name'),
active_shards=data.get('active_shards'),
initializing_shards=data.get('initializing_shards'),
relocating_shards=data.get('relocating_shards'),
unassigned_shards=data.get('unassigned_shards'),
timed_out=data.get('timed_out'),
)
)
self.service_check(self.SERVICE_CHECK_CLUSTER_STATUS, status, message=msg, tags=service_check_tags)
def _process_policy_data(self, data, version, base_tags):
for policy, policy_data in iteritems(data):
repo = policy_data.get('policy', {}).get('repository', 'unknown')
tags = base_tags + ['policy:{}'.format(policy), 'repository:{}'.format(repo)]
slm_stats = slm_stats_for_version(version)
for metric, desc in iteritems(slm_stats):
self._process_metric(policy_data, metric, *desc, tags=tags)
def _create_event(self, status, tags=None):
hostname = to_string(self.hostname)
if status == "red":
alert_type = "error"
msg_title = "{} is {}".format(hostname, status)
elif status == "yellow":
alert_type = "warning"
msg_title = "{} is {}".format(hostname, status)
else:
# then it should be green
alert_type = "success"
msg_title = "{} recovered as {}".format(hostname, status)
msg = "ElasticSearch: {} just reported as {}".format(hostname, status)
return {
'timestamp': int(time.time()),
'event_type': 'elasticsearch',
'host': hostname,
'msg_text': msg,
'msg_title': msg_title,
'alert_type': alert_type,
'source_type_name': "elasticsearch",
'event_object': hostname,
'tags': tags,
}
| 41.309002
| 112
| 0.607963
|
import time
from collections import defaultdict
import requests
from six import iteritems, itervalues
from six.moves.urllib.parse import urljoin, urlparse
from datadog_checks.base import AgentCheck, is_affirmative, to_string
from .config import from_instance
from .metrics import (
CLUSTER_PENDING_TASKS,
health_stats_for_version,
index_stats_for_version,
node_system_stats_for_version,
pshard_stats_for_version,
slm_stats_for_version,
stats_for_version,
)
class AuthenticationError(requests.exceptions.HTTPError):
class ESCheck(AgentCheck):
HTTP_CONFIG_REMAPPER = {
'aws_service': {'name': 'aws_service', 'default': 'es'},
'ssl_verify': {'name': 'tls_verify'},
'ssl_cert': {'name': 'tls_cert'},
'ssl_key': {'name': 'tls_private_key'},
}
SERVICE_CHECK_CONNECT_NAME = 'elasticsearch.can_connect'
SERVICE_CHECK_CLUSTER_STATUS = 'elasticsearch.cluster_health'
SOURCE_TYPE_NAME = 'elasticsearch'
def __init__(self, name, init_config, instances):
super(ESCheck, self).__init__(name, init_config, instances)
self.cluster_status = {}
if self.instance.get('auth_type') == 'aws' and self.instance.get('url'):
self.HTTP_CONFIG_REMAPPER = self.HTTP_CONFIG_REMAPPER.copy()
self.HTTP_CONFIG_REMAPPER['aws_host'] = {
'name': 'aws_host',
'default': urlparse(self.instance['url']).hostname,
}
self._config = from_instance(self.instance)
def check(self, _):
admin_forwarder = self._config.admin_forwarder
jvm_rate = self.instance.get('gc_collectors_as_rate', False)
base_tags = list(self._config.tags)
service_check_tags = list(self._config.service_check_tags)
try:
version = self._get_es_version()
except AuthenticationError:
self.log.exception("The ElasticSearch credentials are incorrect")
raise
health_url, stats_url, pshard_stats_url, pending_tasks_url, slm_url = self._get_urls(version)
stats_metrics = stats_for_version(version, jvm_rate)
if self._config.cluster_stats:
stats_metrics.update(node_system_stats_for_version(version))
pshard_stats_metrics = pshard_stats_for_version(version)
stats_url = self._join_url(stats_url, admin_forwarder)
stats_data = self._get_data(stats_url)
if stats_data.get('cluster_name'):
cluster_tags = ["elastic_cluster:{}".format(stats_data['cluster_name'])]
if not is_affirmative(self.instance.get('disable_legacy_cluster_tag', False)):
cluster_tags.append("cluster_name:{}".format(stats_data['cluster_name']))
base_tags.extend(cluster_tags)
service_check_tags.extend(cluster_tags)
self._process_stats_data(stats_data, stats_metrics, base_tags)
if self._config.pshard_stats:
send_sc = bubble_ex = not self._config.pshard_graceful_to
pshard_stats_url = self._join_url(pshard_stats_url, admin_forwarder)
try:
pshard_stats_data = self._get_data(pshard_stats_url, send_sc=send_sc)
self._process_pshard_stats_data(pshard_stats_data, pshard_stats_metrics, base_tags)
except requests.ReadTimeout as e:
if bubble_ex:
raise
self.log.warning("Timed out reading pshard-stats from servers (%s) - stats will be missing", e)
if slm_url is not None:
slm_url = self._join_url(slm_url, admin_forwarder)
policy_data = self._get_data(slm_url)
self._process_policy_data(policy_data, version, base_tags)
health_url = self._join_url(health_url, admin_forwarder)
health_data = self._get_data(health_url)
self._process_health_data(health_data, version, base_tags, service_check_tags)
if self._config.pending_task_stats:
pending_tasks_url = self._join_url(pending_tasks_url, admin_forwarder)
pending_tasks_data = self._get_data(pending_tasks_url)
self._process_pending_tasks_data(pending_tasks_data, base_tags)
if self._config.index_stats and version >= [1, 0, 0]:
try:
self._get_index_metrics(admin_forwarder, version, base_tags)
except requests.ReadTimeout as e:
self.log.warning("Timed out reading index stats from servers (%s) - stats will be missing", e)
self.service_check(self.SERVICE_CHECK_CONNECT_NAME, AgentCheck.OK, tags=self._config.service_check_tags)
def _get_es_version(self):
try:
data = self._get_data(self._config.url, send_sc=False)
raw_version = data['version']['number']
self.set_metadata('version', raw_version)
# pre-release versions of elasticearch are suffixed with -rcX etc..
# peel that off so that the map below doesn't error out
raw_version = raw_version.split('-')[0]
version = [int(p) for p in raw_version.split('.')[0:3]]
except AuthenticationError:
raise
except Exception as e:
self.warning("Error while trying to get Elasticsearch version from %s %s", self._config.url, e)
version = [1, 0, 0]
self.log.debug("Elasticsearch version is %s", version)
return version
def _join_url(self, url, admin_forwarder=False):
if admin_forwarder:
return self._config.url + url
else:
return urljoin(self._config.url, url)
def _get_index_metrics(self, admin_forwarder, version, base_tags):
cat_url = '/_cat/indices?format=json&bytes=b'
index_url = self._join_url(cat_url, admin_forwarder)
index_resp = self._get_data(index_url)
index_stats_metrics = index_stats_for_version(version)
health_stat = {'green': 0, 'yellow': 1, 'red': 2}
reversed_health_stat = {'red': 0, 'yellow': 1, 'green': 2}
for idx in index_resp:
tags = base_tags + ['index_name:' + idx['index']]
index_data = {
'docs_count': idx.get('docs.count'),
'docs_deleted': idx.get('docs.deleted'),
'primary_shards': idx.get('pri'),
'replica_shards': idx.get('rep'),
'primary_store_size': idx.get('pri.store.size'),
'store_size': idx.get('store.size'),
'health': idx.get('health'),
}
if index_data['health'] is not None:
status = index_data['health'].lower()
index_data['health'] = health_stat[status]
index_data['health_reverse'] = reversed_health_stat[status]
for key, value in list(iteritems(index_data)):
if value is None:
del index_data[key]
self.log.warning("The index %s has no metric data for %s", idx['index'], key)
for metric in index_stats_metrics:
desc = index_stats_metrics[metric]
self._process_metric(index_data, metric, *desc, tags=tags)
def _get_urls(self, version):
pshard_stats_url = "/_stats"
health_url = "/_cluster/health"
slm_url = None
if version >= [0, 90, 10]:
pending_tasks_url = "/_cluster/pending_tasks"
stats_url = "/_nodes/stats" if self._config.cluster_stats else "/_nodes/_local/stats"
if version < [5, 0, 0]:
stats_url += "?all=true"
if version >= [7, 4, 0] and self._config.slm_stats:
slm_url = "/_slm/policy"
else:
pending_tasks_url = None
stats_url = (
"/_cluster/nodes/stats?all=true"
if self._config.cluster_stats
else "/_cluster/nodes/_local/stats?all=true"
)
return health_url, stats_url, pshard_stats_url, pending_tasks_url, slm_url
def _get_data(self, url, send_sc=True):
resp = None
try:
resp = self.http.get(url)
resp.raise_for_status()
except Exception as e:
if resp and resp.status_code == 400:
raise AuthenticationError("The ElasticSearch credentials are incorrect")
if send_sc:
self.service_check(
self.SERVICE_CHECK_CONNECT_NAME,
AgentCheck.CRITICAL,
message="Error {} when hitting {}".format(e, url),
tags=self._config.service_check_tags,
)
raise
self.log.debug("request to url %s returned: %s", url, resp)
return resp.json()
def _process_pending_tasks_data(self, data, base_tags):
p_tasks = defaultdict(int)
average_time_in_queue = 0
for task in data.get('tasks', []):
p_tasks[task.get('priority')] += 1
average_time_in_queue += task.get('time_in_queue_millis', 0)
total = sum(itervalues(p_tasks))
node_data = {
'pending_task_total': total,
'pending_tasks_priority_high': p_tasks['high'],
'pending_tasks_priority_urgent': p_tasks['urgent'],
# if total is 0 default to 1
'pending_tasks_time_in_queue': average_time_in_queue // (total or 1),
}
for metric in CLUSTER_PENDING_TASKS:
# metric description
desc = CLUSTER_PENDING_TASKS[metric]
self._process_metric(node_data, metric, *desc, tags=base_tags)
def _process_stats_data(self, data, stats_metrics, base_tags):
for node_data in itervalues(data.get('nodes', {})):
metric_hostname = None
metrics_tags = list(base_tags)
# Resolve the node's name
node_name = node_data.get('name')
if node_name:
metrics_tags.append('node_name:{}'.format(node_name))
if self._config.node_name_as_host:
if node_name:
metric_hostname = node_name
elif self._config.cluster_stats:
for k in ['hostname', 'host']:
if k in node_data:
metric_hostname = node_data[k]
break
for metric, desc in iteritems(stats_metrics):
self._process_metric(node_data, metric, *desc, tags=metrics_tags, hostname=metric_hostname)
def _process_pshard_stats_data(self, data, pshard_stats_metrics, base_tags):
for metric, desc in iteritems(pshard_stats_metrics):
self._process_metric(data, metric, *desc, tags=base_tags)
def _process_metric(self, data, metric, xtype, path, xform=None, tags=None, hostname=None):
value = data
# Traverse the nested dictionaries
for key in path.split('.'):
if value is not None:
value = value.get(key)
else:
break
if value is not None:
if xform:
value = xform(value)
if xtype == "gauge":
self.gauge(metric, value, tags=tags, hostname=hostname)
else:
self.rate(metric, value, tags=tags, hostname=hostname)
else:
self.log.debug("Metric not found: %s -> %s", path, metric)
def _process_health_data(self, data, version, base_tags, service_check_tags):
cluster_status = data.get('status')
if not self.cluster_status.get(self._config.url):
self.cluster_status[self._config.url] = cluster_status
if cluster_status in ["yellow", "red"]:
event = self._create_event(cluster_status, tags=base_tags)
self.event(event)
if cluster_status != self.cluster_status.get(self._config.url):
self.cluster_status[self._config.url] = cluster_status
event = self._create_event(cluster_status, tags=base_tags)
self.event(event)
cluster_health_metrics = health_stats_for_version(version)
for metric, desc in iteritems(cluster_health_metrics):
self._process_metric(data, metric, *desc, tags=base_tags)
# Process the service check
if cluster_status == 'green':
status = AgentCheck.OK
data['tag'] = "OK"
elif cluster_status == 'yellow':
status = AgentCheck.WARNING
data['tag'] = "WARN"
else:
status = AgentCheck.CRITICAL
data['tag'] = "ALERT"
msg = (
"{tag} on cluster \"{cluster_name}\" "
"| active_shards={active_shards} "
"| initializing_shards={initializing_shards} "
"| relocating_shards={relocating_shards} "
"| unassigned_shards={unassigned_shards} "
"| timed_out={timed_out}".format(
tag=data.get('tag'),
cluster_name=data.get('cluster_name'),
active_shards=data.get('active_shards'),
initializing_shards=data.get('initializing_shards'),
relocating_shards=data.get('relocating_shards'),
unassigned_shards=data.get('unassigned_shards'),
timed_out=data.get('timed_out'),
)
)
self.service_check(self.SERVICE_CHECK_CLUSTER_STATUS, status, message=msg, tags=service_check_tags)
def _process_policy_data(self, data, version, base_tags):
for policy, policy_data in iteritems(data):
repo = policy_data.get('policy', {}).get('repository', 'unknown')
tags = base_tags + ['policy:{}'.format(policy), 'repository:{}'.format(repo)]
slm_stats = slm_stats_for_version(version)
for metric, desc in iteritems(slm_stats):
self._process_metric(policy_data, metric, *desc, tags=tags)
def _create_event(self, status, tags=None):
hostname = to_string(self.hostname)
if status == "red":
alert_type = "error"
msg_title = "{} is {}".format(hostname, status)
elif status == "yellow":
alert_type = "warning"
msg_title = "{} is {}".format(hostname, status)
else:
# then it should be green
alert_type = "success"
msg_title = "{} recovered as {}".format(hostname, status)
msg = "ElasticSearch: {} just reported as {}".format(hostname, status)
return {
'timestamp': int(time.time()),
'event_type': 'elasticsearch',
'host': hostname,
'msg_text': msg,
'msg_title': msg_title,
'alert_type': alert_type,
'source_type_name': "elasticsearch",
'event_object': hostname,
'tags': tags,
}
| true
| true
|
f70cd102871d0a2b4ab9d248f4fe7f91a22d9fd8
| 2,833
|
py
|
Python
|
userman/uimodules.py
|
aanil/userman
|
c4b09a8183a01da6e4b0402be4da3bde88eada5c
|
[
"MIT"
] | null | null | null |
userman/uimodules.py
|
aanil/userman
|
c4b09a8183a01da6e4b0402be4da3bde88eada5c
|
[
"MIT"
] | null | null | null |
userman/uimodules.py
|
aanil/userman
|
c4b09a8183a01da6e4b0402be4da3bde88eada5c
|
[
"MIT"
] | 2
|
2019-08-14T08:33:14.000Z
|
2020-01-30T14:13:57.000Z
|
" Userman: UI modules. "
import tornado.web
from . import constants
class Icon(tornado.web.UIModule):
"HTML for an icon, optionally labelled with a title."
template = """<img src="{url}" class="icon" alt="{alt}" title="{title}">"""
def render(self, name, title=None, label=False):
if not isinstance(name, basestring):
name = name[constants.DB_DOCTYPE]
Name = name.capitalize()
value = self.template.format(url=self.handler.static_url(name + '.png'),
alt=Name,
title=title or Name)
if label:
value += ' ' + (title or Name)
return value
class Doc(tornado.web.UIModule):
"HTML for a linkified document."
iconfilename = None
keyfield = '_id'
template = """<a href="{url}">""" \
"""<img src="{src}" class="icon" alt="{title}" title="{title}">""" \
""" {title}</a>"""
def render(self, doc, title=None):
self.doc = doc
return self.template.format(
url=self.handler.reverse_url(self.__class__.__name__.lower(),
doc[self.keyfield]),
src=self.handler.static_url(self.iconfilename),
title=title or self.get_title())
def get_title(self):
try:
return self.doc['name']
except KeyError:
return self.doc['_id']
class User(Doc):
"HTML for a linkified user document."
keyfield = 'email'
@property
def iconfilename(self):
if self.doc['role'] == constants.ADMIN:
return 'admin.png'
else:
return 'user.png'
def get_title(self):
return self.doc['email']
class Team(Doc):
"HTML for a linkified team document."
iconfilename = 'team.png'
keyfield = 'name'
class Service(Doc):
"HTML for a linkified service document."
iconfilename = 'service.png'
keyfield = 'name'
class Submit(tornado.web.UIModule):
"HTML for a submit button with an icon, optionally with a different title."
def render(self, name, title=None, onclick=None):
if onclick:
result = """<button type="submit" onclick="{0}">""".format(onclick)
else:
result = """<button type="submit">"""
Name = name.capitalize()
result += """<img src="{url}" alt="{name}" title="{name}">""".format(
url=self.handler.static_url(name + '.png'),
name=Name)
result += ' ' + (title or Name)
result += '</button>'
return result
class Access(Icon):
"HTML for access flag: 'public' or 'private'."
def render(self, item, label=False):
name = item.get('public') and 'public' or 'private'
return super(Access, self).render(name, label=label)
| 28.33
| 80
| 0.560537
|
import tornado.web
from . import constants
class Icon(tornado.web.UIModule):
template = """<img src="{url}" class="icon" alt="{alt}" title="{title}">"""
def render(self, name, title=None, label=False):
if not isinstance(name, basestring):
name = name[constants.DB_DOCTYPE]
Name = name.capitalize()
value = self.template.format(url=self.handler.static_url(name + '.png'),
alt=Name,
title=title or Name)
if label:
value += ' ' + (title or Name)
return value
class Doc(tornado.web.UIModule):
iconfilename = None
keyfield = '_id'
template = """<a href="{url}">""" \
"""<img src="{src}" class="icon" alt="{title}" title="{title}">""" \
""" {title}</a>"""
def render(self, doc, title=None):
self.doc = doc
return self.template.format(
url=self.handler.reverse_url(self.__class__.__name__.lower(),
doc[self.keyfield]),
src=self.handler.static_url(self.iconfilename),
title=title or self.get_title())
def get_title(self):
try:
return self.doc['name']
except KeyError:
return self.doc['_id']
class User(Doc):
keyfield = 'email'
@property
def iconfilename(self):
if self.doc['role'] == constants.ADMIN:
return 'admin.png'
else:
return 'user.png'
def get_title(self):
return self.doc['email']
class Team(Doc):
iconfilename = 'team.png'
keyfield = 'name'
class Service(Doc):
iconfilename = 'service.png'
keyfield = 'name'
class Submit(tornado.web.UIModule):
def render(self, name, title=None, onclick=None):
if onclick:
result = """<button type="submit" onclick="{0}">""".format(onclick)
else:
result = """<button type="submit">"""
Name = name.capitalize()
result += """<img src="{url}" alt="{name}" title="{name}">""".format(
url=self.handler.static_url(name + '.png'),
name=Name)
result += ' ' + (title or Name)
result += '</button>'
return result
class Access(Icon):
def render(self, item, label=False):
name = item.get('public') and 'public' or 'private'
return super(Access, self).render(name, label=label)
| true
| true
|
f70cd157208600d80c321cc62075315fca2dedd4
| 1,453
|
py
|
Python
|
Loop_Induced_Vertices/coupling_orders.py
|
ycwu1030/2HDM_FR
|
599490fd785cb67e3e4ffad1fa7536906ac8bcd5
|
[
"MIT"
] | 1
|
2019-09-04T01:44:29.000Z
|
2019-09-04T01:44:29.000Z
|
Loop_Induced_Vertices/coupling_orders.py
|
ycwu1030/2HDM_FR
|
599490fd785cb67e3e4ffad1fa7536906ac8bcd5
|
[
"MIT"
] | null | null | null |
Loop_Induced_Vertices/coupling_orders.py
|
ycwu1030/2HDM_FR
|
599490fd785cb67e3e4ffad1fa7536906ac8bcd5
|
[
"MIT"
] | null | null | null |
NLOT = CouplingOrder(name = 'NLOT', # ggS triangle nlo couplings
expansion_order = 1,
hierarchy = 2)
NLOTHL = CouplingOrder(name = 'NLOTHL', # ggS triangle nlo couplings for HL
expansion_order = 1,
hierarchy = 2)
NLOTHH = CouplingOrder(name = 'NLOTHH', # ggS triangle nlo couplings for HH
expansion_order = 1,
hierarchy = 2)
NLOTHA = CouplingOrder(name = 'NLOTHA', # ggS triangle nlo couplings for HA
expansion_order = 1,
hierarchy = 2)
NLOB = CouplingOrder(name = 'NLOB', # ggSS box nlo couplings
expansion_order = 1,
hierarchy = 2)
NLOZ = CouplingOrder(name = 'NLOZ', # ggZ nlo couplings
expansion_order = 1,
hierarchy = 2)
NLOEW = CouplingOrder(name = 'NLOEW', # gagaS nlo couplings
expansion_order = 1,
hierarchy = 2)
NLOEWHL = CouplingOrder(name = 'NLOEWHL', # gagaS nlo couplings for HL
expansion_order = 1,
hierarchy = 2)
NLOEWHH = CouplingOrder(name = 'NLOEWHH', # gagaS nlo couplings for HH
expansion_order = 1,
hierarchy = 2)
NLOEWHA = CouplingOrder(name = 'NLOEWHA', # gagaS nlo couplings for HA
expansion_order = 1,
hierarchy = 2)
| 35.439024
| 75
| 0.527185
|
NLOT = CouplingOrder(name = 'NLOT', expansion_order = 1,
hierarchy = 2)
NLOTHL = CouplingOrder(name = 'NLOTHL', expansion_order = 1,
hierarchy = 2)
NLOTHH = CouplingOrder(name = 'NLOTHH', expansion_order = 1,
hierarchy = 2)
NLOTHA = CouplingOrder(name = 'NLOTHA', expansion_order = 1,
hierarchy = 2)
NLOB = CouplingOrder(name = 'NLOB', expansion_order = 1,
hierarchy = 2)
NLOZ = CouplingOrder(name = 'NLOZ', expansion_order = 1,
hierarchy = 2)
NLOEW = CouplingOrder(name = 'NLOEW', expansion_order = 1,
hierarchy = 2)
NLOEWHL = CouplingOrder(name = 'NLOEWHL', expansion_order = 1,
hierarchy = 2)
NLOEWHH = CouplingOrder(name = 'NLOEWHH', expansion_order = 1,
hierarchy = 2)
NLOEWHA = CouplingOrder(name = 'NLOEWHA', expansion_order = 1,
hierarchy = 2)
| true
| true
|
f70cd2afa555b4dcfd07279b863b47a27fa4da23
| 25,532
|
py
|
Python
|
backend/lib/grpc/_server.py
|
isaiah-solo/Droptalk
|
578a647adceecfae9d30ca6b98fdaae7077d683f
|
[
"MIT"
] | null | null | null |
backend/lib/grpc/_server.py
|
isaiah-solo/Droptalk
|
578a647adceecfae9d30ca6b98fdaae7077d683f
|
[
"MIT"
] | null | null | null |
backend/lib/grpc/_server.py
|
isaiah-solo/Droptalk
|
578a647adceecfae9d30ca6b98fdaae7077d683f
|
[
"MIT"
] | null | null | null |
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Service-side implementation of gRPC Python."""
import collections
import enum
import logging
import threading
import time
import grpc
from grpc import _common
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
_SHUTDOWN_TAG = 'shutdown'
_REQUEST_CALL_TAG = 'request_call'
_RECEIVE_CLOSE_ON_SERVER_TOKEN = 'receive_close_on_server'
_SEND_INITIAL_METADATA_TOKEN = 'send_initial_metadata'
_RECEIVE_MESSAGE_TOKEN = 'receive_message'
_SEND_MESSAGE_TOKEN = 'send_message'
_SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN = (
'send_initial_metadata * send_message')
_SEND_STATUS_FROM_SERVER_TOKEN = 'send_status_from_server'
_SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN = (
'send_initial_metadata * send_status_from_server')
_OPEN = 'open'
_CLOSED = 'closed'
_CANCELLED = 'cancelled'
_EMPTY_FLAGS = 0
_EMPTY_METADATA = cygrpc.Metadata(())
_UNEXPECTED_EXIT_SERVER_GRACE = 1.0
def _serialized_request(request_event):
return request_event.batch_operations[0].received_message.bytes()
def _application_code(code):
cygrpc_code = _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE.get(code)
return cygrpc.StatusCode.unknown if cygrpc_code is None else cygrpc_code
def _completion_code(state):
if state.code is None:
return cygrpc.StatusCode.ok
else:
return _application_code(state.code)
def _abortion_code(state, code):
if state.code is None:
return code
else:
return _application_code(state.code)
def _details(state):
return b'' if state.details is None else state.details
class _HandlerCallDetails(
collections.namedtuple(
'_HandlerCallDetails', ('method', 'invocation_metadata',)),
grpc.HandlerCallDetails):
pass
class _RPCState(object):
def __init__(self):
self.condition = threading.Condition()
self.due = set()
self.request = None
self.client = _OPEN
self.initial_metadata_allowed = True
self.disable_next_compression = False
self.trailing_metadata = None
self.code = None
self.details = None
self.statused = False
self.rpc_errors = []
self.callbacks = []
def _raise_rpc_error(state):
rpc_error = grpc.RpcError()
state.rpc_errors.append(rpc_error)
raise rpc_error
def _possibly_finish_call(state, token):
state.due.remove(token)
if (state.client is _CANCELLED or state.statused) and not state.due:
callbacks = state.callbacks
state.callbacks = None
return state, callbacks
else:
return None, ()
def _send_status_from_server(state, token):
def send_status_from_server(unused_send_status_from_server_event):
with state.condition:
return _possibly_finish_call(state, token)
return send_status_from_server
def _abort(state, call, code, details):
if state.client is not _CANCELLED:
effective_code = _abortion_code(state, code)
effective_details = details if state.details is None else state.details
if state.initial_metadata_allowed:
operations = (
cygrpc.operation_send_initial_metadata(
_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_common.cygrpc_metadata(state.trailing_metadata), effective_code,
effective_details, _EMPTY_FLAGS),
)
token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
else:
operations = (
cygrpc.operation_send_status_from_server(
_common.cygrpc_metadata(state.trailing_metadata), effective_code,
effective_details, _EMPTY_FLAGS),
)
token = _SEND_STATUS_FROM_SERVER_TOKEN
call.start_server_batch(
cygrpc.Operations(operations),
_send_status_from_server(state, token))
state.statused = True
state.due.add(token)
def _receive_close_on_server(state):
def receive_close_on_server(receive_close_on_server_event):
with state.condition:
if receive_close_on_server_event.batch_operations[0].received_cancelled:
state.client = _CANCELLED
elif state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_CLOSE_ON_SERVER_TOKEN)
return receive_close_on_server
def _receive_message(state, call, request_deserializer):
def receive_message(receive_message_event):
serialized_request = _serialized_request(receive_message_event)
if serialized_request is None:
with state.condition:
if state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
else:
request = _common.deserialize(serialized_request, request_deserializer)
with state.condition:
if request is None:
_abort(
state, call, cygrpc.StatusCode.internal,
b'Exception deserializing request!')
else:
state.request = request
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
return receive_message
def _send_initial_metadata(state):
def send_initial_metadata(unused_send_initial_metadata_event):
with state.condition:
return _possibly_finish_call(state, _SEND_INITIAL_METADATA_TOKEN)
return send_initial_metadata
def _send_message(state, token):
def send_message(unused_send_message_event):
with state.condition:
state.condition.notify_all()
return _possibly_finish_call(state, token)
return send_message
class _Context(grpc.ServicerContext):
def __init__(self, rpc_event, state, request_deserializer):
self._rpc_event = rpc_event
self._state = state
self._request_deserializer = request_deserializer
def is_active(self):
with self._state.condition:
return self._state.client is not _CANCELLED and not self._state.statused
def time_remaining(self):
return max(self._rpc_event.request_call_details.deadline - time.time(), 0)
def cancel(self):
self._rpc_event.operation_call.cancel()
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def disable_next_message_compression(self):
with self._state.condition:
self._state.disable_next_compression = True
def invocation_metadata(self):
return _common.application_metadata(self._rpc_event.request_metadata)
def peer(self):
return _common.decode(self._rpc_event.operation_call.peer())
def send_initial_metadata(self, initial_metadata):
with self._state.condition:
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
else:
if self._state.initial_metadata_allowed:
operation = cygrpc.operation_send_initial_metadata(
_common.cygrpc_metadata(initial_metadata), _EMPTY_FLAGS)
self._rpc_event.operation_call.start_server_batch(
cygrpc.Operations((operation,)),
_send_initial_metadata(self._state))
self._state.initial_metadata_allowed = False
self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
else:
raise ValueError('Initial metadata no longer allowed!')
def set_trailing_metadata(self, trailing_metadata):
with self._state.condition:
self._state.trailing_metadata = _common.cygrpc_metadata(
trailing_metadata)
def set_code(self, code):
with self._state.condition:
self._state.code = code
def set_details(self, details):
with self._state.condition:
self._state.details = _common.encode(details)
class _RequestIterator(object):
def __init__(self, state, call, request_deserializer):
self._state = state
self._call = call
self._request_deserializer = request_deserializer
def _raise_or_start_receive_message(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif self._state.client is _CLOSED or self._state.statused:
raise StopIteration()
else:
self._call.start_server_batch(
cygrpc.Operations((cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
_receive_message(self._state, self._call, self._request_deserializer))
self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
def _look_for_request(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif (self._state.request is None and
_RECEIVE_MESSAGE_TOKEN not in self._state.due):
raise StopIteration()
else:
request = self._state.request
self._state.request = None
return request
def _next(self):
with self._state.condition:
self._raise_or_start_receive_message()
while True:
self._state.condition.wait()
request = self._look_for_request()
if request is not None:
return request
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def _unary_request(rpc_event, state, request_deserializer):
def unary_request():
with state.condition:
if state.client is _CANCELLED or state.statused:
return None
else:
start_server_batch_result = rpc_event.operation_call.start_server_batch(
cygrpc.Operations(
(cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
_receive_message(
state, rpc_event.operation_call, request_deserializer))
state.due.add(_RECEIVE_MESSAGE_TOKEN)
while True:
state.condition.wait()
if state.request is None:
if state.client is _CLOSED:
details = '"{}" requires exactly one request message.'.format(
rpc_event.request_call_details.method)
_abort(
state, rpc_event.operation_call,
cygrpc.StatusCode.unimplemented, _common.encode(details))
return None
elif state.client is _CANCELLED:
return None
else:
request = state.request
state.request = None
return request
return unary_request
def _call_behavior(rpc_event, state, behavior, argument, request_deserializer):
context = _Context(rpc_event, state, request_deserializer)
try:
return behavior(argument, context), True
except Exception as e: # pylint: disable=broad-except
with state.condition:
if e not in state.rpc_errors:
details = 'Exception calling application: {}'.format(e)
logging.exception(details)
_abort(state, rpc_event.operation_call,
cygrpc.StatusCode.unknown, _common.encode(details))
return None, False
def _take_response_from_response_iterator(rpc_event, state, response_iterator):
try:
return next(response_iterator), True
except StopIteration:
return None, True
except Exception as e: # pylint: disable=broad-except
with state.condition:
if e not in state.rpc_errors:
details = 'Exception iterating responses: {}'.format(e)
logging.exception(details)
_abort(state, rpc_event.operation_call,
cygrpc.StatusCode.unknown, _common.encode(details))
return None, False
def _serialize_response(rpc_event, state, response, response_serializer):
serialized_response = _common.serialize(response, response_serializer)
if serialized_response is None:
with state.condition:
_abort(
state, rpc_event.operation_call, cygrpc.StatusCode.internal,
b'Failed to serialize response!')
return None
else:
return serialized_response
def _send_response(rpc_event, state, serialized_response):
with state.condition:
if state.client is _CANCELLED or state.statused:
return False
else:
if state.initial_metadata_allowed:
operations = (
cygrpc.operation_send_initial_metadata(
_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_send_message(serialized_response, _EMPTY_FLAGS),
)
state.initial_metadata_allowed = False
token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
else:
operations = (
cygrpc.operation_send_message(serialized_response, _EMPTY_FLAGS),
)
token = _SEND_MESSAGE_TOKEN
rpc_event.operation_call.start_server_batch(
cygrpc.Operations(operations), _send_message(state, token))
state.due.add(token)
while True:
state.condition.wait()
if token not in state.due:
return state.client is not _CANCELLED and not state.statused
def _status(rpc_event, state, serialized_response):
with state.condition:
if state.client is not _CANCELLED:
trailing_metadata = _common.cygrpc_metadata(state.trailing_metadata)
code = _completion_code(state)
details = _details(state)
operations = [
cygrpc.operation_send_status_from_server(
trailing_metadata, code, details, _EMPTY_FLAGS),
]
if state.initial_metadata_allowed:
operations.append(
cygrpc.operation_send_initial_metadata(
_EMPTY_METADATA, _EMPTY_FLAGS))
if serialized_response is not None:
operations.append(cygrpc.operation_send_message(
serialized_response, _EMPTY_FLAGS))
rpc_event.operation_call.start_server_batch(
cygrpc.Operations(operations),
_send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
state.statused = True
state.due.add(_SEND_STATUS_FROM_SERVER_TOKEN)
def _unary_response_in_pool(
rpc_event, state, behavior, argument_thunk, request_deserializer,
response_serializer):
argument = argument_thunk()
if argument is not None:
response, proceed = _call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
_status(rpc_event, state, serialized_response)
def _stream_response_in_pool(
rpc_event, state, behavior, argument_thunk, request_deserializer,
response_serializer):
argument = argument_thunk()
if argument is not None:
response_iterator, proceed = _call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
while True:
response, proceed = _take_response_from_response_iterator(
rpc_event, state, response_iterator)
if proceed:
if response is None:
_status(rpc_event, state, None)
break
else:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
proceed = _send_response(rpc_event, state, serialized_response)
if not proceed:
break
else:
break
else:
break
def _handle_unary_unary(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(
rpc_event, state, method_handler.request_deserializer)
thread_pool.submit(
_unary_response_in_pool, rpc_event, state, method_handler.unary_unary,
unary_request, method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_unary_stream(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(
rpc_event, state, method_handler.request_deserializer)
thread_pool.submit(
_stream_response_in_pool, rpc_event, state, method_handler.unary_stream,
unary_request, method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_unary(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(
state, rpc_event.operation_call, method_handler.request_deserializer)
thread_pool.submit(
_unary_response_in_pool, rpc_event, state, method_handler.stream_unary,
lambda: request_iterator, method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_stream(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(
state, rpc_event.operation_call, method_handler.request_deserializer)
thread_pool.submit(
_stream_response_in_pool, rpc_event, state, method_handler.stream_stream,
lambda: request_iterator, method_handler.request_deserializer,
method_handler.response_serializer)
def _find_method_handler(rpc_event, generic_handlers):
for generic_handler in generic_handlers:
method_handler = generic_handler.service(
_HandlerCallDetails(
_common.decode(rpc_event.request_call_details.method),
rpc_event.request_metadata))
if method_handler is not None:
return method_handler
else:
return None
def _handle_unrecognized_method(rpc_event):
operations = (
cygrpc.operation_send_initial_metadata(_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_EMPTY_METADATA, cygrpc.StatusCode.unimplemented,
b'Method not found!', _EMPTY_FLAGS),
)
rpc_state = _RPCState()
rpc_event.operation_call.start_server_batch(
operations, lambda ignored_event: (rpc_state, (),))
return rpc_state
def _handle_with_method_handler(rpc_event, method_handler, thread_pool):
state = _RPCState()
with state.condition:
rpc_event.operation_call.start_server_batch(
cygrpc.Operations(
(cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)),
_receive_close_on_server(state))
state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
if method_handler.request_streaming:
if method_handler.response_streaming:
_handle_stream_stream(rpc_event, state, method_handler, thread_pool)
else:
_handle_stream_unary(rpc_event, state, method_handler, thread_pool)
else:
if method_handler.response_streaming:
_handle_unary_stream(rpc_event, state, method_handler, thread_pool)
else:
_handle_unary_unary(rpc_event, state, method_handler, thread_pool)
return state
def _handle_call(rpc_event, generic_handlers, thread_pool):
if rpc_event.request_call_details.method is not None:
method_handler = _find_method_handler(rpc_event, generic_handlers)
if method_handler is None:
return _handle_unrecognized_method(rpc_event)
else:
return _handle_with_method_handler(rpc_event, method_handler, thread_pool)
else:
return None
@enum.unique
class _ServerStage(enum.Enum):
STOPPED = 'stopped'
STARTED = 'started'
GRACE = 'grace'
class _ServerState(object):
def __init__(self, completion_queue, server, generic_handlers, thread_pool):
self.lock = threading.Lock()
self.completion_queue = completion_queue
self.server = server
self.generic_handlers = list(generic_handlers)
self.thread_pool = thread_pool
self.stage = _ServerStage.STOPPED
self.shutdown_events = None
# TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields.
self.rpc_states = set()
self.due = set()
def _add_generic_handlers(state, generic_handlers):
with state.lock:
state.generic_handlers.extend(generic_handlers)
def _add_insecure_port(state, address):
with state.lock:
return state.server.add_http2_port(address)
def _add_secure_port(state, address, server_credentials):
with state.lock:
return state.server.add_http2_port(address, server_credentials._credentials)
def _request_call(state):
state.server.request_call(
state.completion_queue, state.completion_queue, _REQUEST_CALL_TAG)
state.due.add(_REQUEST_CALL_TAG)
# TODO(https://github.com/grpc/grpc/issues/6597): delete this function.
def _stop_serving(state):
if not state.rpc_states and not state.due:
for shutdown_event in state.shutdown_events:
shutdown_event.set()
state.stage = _ServerStage.STOPPED
return True
else:
return False
def _serve(state):
while True:
event = state.completion_queue.poll()
if event.tag is _SHUTDOWN_TAG:
with state.lock:
state.due.remove(_SHUTDOWN_TAG)
if _stop_serving(state):
return
elif event.tag is _REQUEST_CALL_TAG:
with state.lock:
state.due.remove(_REQUEST_CALL_TAG)
rpc_state = _handle_call(
event, state.generic_handlers, state.thread_pool)
if rpc_state is not None:
state.rpc_states.add(rpc_state)
if state.stage is _ServerStage.STARTED:
_request_call(state)
elif _stop_serving(state):
return
else:
rpc_state, callbacks = event.tag(event)
for callback in callbacks:
callable_util.call_logging_exceptions(
callback, 'Exception calling callback!')
if rpc_state is not None:
with state.lock:
state.rpc_states.remove(rpc_state)
if _stop_serving(state):
return
def _stop(state, grace):
with state.lock:
if state.stage is _ServerStage.STOPPED:
shutdown_event = threading.Event()
shutdown_event.set()
return shutdown_event
else:
if state.stage is _ServerStage.STARTED:
state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG)
state.stage = _ServerStage.GRACE
state.shutdown_events = []
state.due.add(_SHUTDOWN_TAG)
shutdown_event = threading.Event()
state.shutdown_events.append(shutdown_event)
if grace is None:
state.server.cancel_all_calls()
# TODO(https://github.com/grpc/grpc/issues/6597): delete this loop.
for rpc_state in state.rpc_states:
with rpc_state.condition:
rpc_state.client = _CANCELLED
rpc_state.condition.notify_all()
else:
def cancel_all_calls_after_grace():
shutdown_event.wait(timeout=grace)
with state.lock:
state.server.cancel_all_calls()
# TODO(https://github.com/grpc/grpc/issues/6597): delete this loop.
for rpc_state in state.rpc_states:
with rpc_state.condition:
rpc_state.client = _CANCELLED
rpc_state.condition.notify_all()
thread = threading.Thread(target=cancel_all_calls_after_grace)
thread.start()
return shutdown_event
shutdown_event.wait()
return shutdown_event
def _start(state):
with state.lock:
if state.stage is not _ServerStage.STOPPED:
raise ValueError('Cannot start already-started server!')
state.server.start()
state.stage = _ServerStage.STARTED
_request_call(state)
def cleanup_server(timeout):
if timeout is None:
_stop(state, _UNEXPECTED_EXIT_SERVER_GRACE).wait()
else:
_stop(state, timeout).wait()
thread = _common.CleanupThread(
cleanup_server, target=_serve, args=(state,))
thread.start()
class Server(grpc.Server):
def __init__(self, thread_pool, generic_handlers):
completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server()
server.register_completion_queue(completion_queue)
self._state = _ServerState(
completion_queue, server, generic_handlers, thread_pool)
def add_generic_rpc_handlers(self, generic_rpc_handlers):
_add_generic_handlers(self._state, generic_rpc_handlers)
def add_insecure_port(self, address):
return _add_insecure_port(self._state, _common.encode(address))
def add_secure_port(self, address, server_credentials):
return _add_secure_port(self._state, _common.encode(address), server_credentials)
def start(self):
_start(self._state)
def stop(self, grace):
return _stop(self._state, grace)
def __del__(self):
_stop(self._state, None)
| 33.727873
| 85
| 0.721722
|
import collections
import enum
import logging
import threading
import time
import grpc
from grpc import _common
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
_SHUTDOWN_TAG = 'shutdown'
_REQUEST_CALL_TAG = 'request_call'
_RECEIVE_CLOSE_ON_SERVER_TOKEN = 'receive_close_on_server'
_SEND_INITIAL_METADATA_TOKEN = 'send_initial_metadata'
_RECEIVE_MESSAGE_TOKEN = 'receive_message'
_SEND_MESSAGE_TOKEN = 'send_message'
_SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN = (
'send_initial_metadata * send_message')
_SEND_STATUS_FROM_SERVER_TOKEN = 'send_status_from_server'
_SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN = (
'send_initial_metadata * send_status_from_server')
_OPEN = 'open'
_CLOSED = 'closed'
_CANCELLED = 'cancelled'
_EMPTY_FLAGS = 0
_EMPTY_METADATA = cygrpc.Metadata(())
_UNEXPECTED_EXIT_SERVER_GRACE = 1.0
def _serialized_request(request_event):
return request_event.batch_operations[0].received_message.bytes()
def _application_code(code):
cygrpc_code = _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE.get(code)
return cygrpc.StatusCode.unknown if cygrpc_code is None else cygrpc_code
def _completion_code(state):
if state.code is None:
return cygrpc.StatusCode.ok
else:
return _application_code(state.code)
def _abortion_code(state, code):
if state.code is None:
return code
else:
return _application_code(state.code)
def _details(state):
return b'' if state.details is None else state.details
class _HandlerCallDetails(
collections.namedtuple(
'_HandlerCallDetails', ('method', 'invocation_metadata',)),
grpc.HandlerCallDetails):
pass
class _RPCState(object):
def __init__(self):
self.condition = threading.Condition()
self.due = set()
self.request = None
self.client = _OPEN
self.initial_metadata_allowed = True
self.disable_next_compression = False
self.trailing_metadata = None
self.code = None
self.details = None
self.statused = False
self.rpc_errors = []
self.callbacks = []
def _raise_rpc_error(state):
rpc_error = grpc.RpcError()
state.rpc_errors.append(rpc_error)
raise rpc_error
def _possibly_finish_call(state, token):
state.due.remove(token)
if (state.client is _CANCELLED or state.statused) and not state.due:
callbacks = state.callbacks
state.callbacks = None
return state, callbacks
else:
return None, ()
def _send_status_from_server(state, token):
def send_status_from_server(unused_send_status_from_server_event):
with state.condition:
return _possibly_finish_call(state, token)
return send_status_from_server
def _abort(state, call, code, details):
if state.client is not _CANCELLED:
effective_code = _abortion_code(state, code)
effective_details = details if state.details is None else state.details
if state.initial_metadata_allowed:
operations = (
cygrpc.operation_send_initial_metadata(
_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_common.cygrpc_metadata(state.trailing_metadata), effective_code,
effective_details, _EMPTY_FLAGS),
)
token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
else:
operations = (
cygrpc.operation_send_status_from_server(
_common.cygrpc_metadata(state.trailing_metadata), effective_code,
effective_details, _EMPTY_FLAGS),
)
token = _SEND_STATUS_FROM_SERVER_TOKEN
call.start_server_batch(
cygrpc.Operations(operations),
_send_status_from_server(state, token))
state.statused = True
state.due.add(token)
def _receive_close_on_server(state):
def receive_close_on_server(receive_close_on_server_event):
with state.condition:
if receive_close_on_server_event.batch_operations[0].received_cancelled:
state.client = _CANCELLED
elif state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_CLOSE_ON_SERVER_TOKEN)
return receive_close_on_server
def _receive_message(state, call, request_deserializer):
def receive_message(receive_message_event):
serialized_request = _serialized_request(receive_message_event)
if serialized_request is None:
with state.condition:
if state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
else:
request = _common.deserialize(serialized_request, request_deserializer)
with state.condition:
if request is None:
_abort(
state, call, cygrpc.StatusCode.internal,
b'Exception deserializing request!')
else:
state.request = request
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
return receive_message
def _send_initial_metadata(state):
def send_initial_metadata(unused_send_initial_metadata_event):
with state.condition:
return _possibly_finish_call(state, _SEND_INITIAL_METADATA_TOKEN)
return send_initial_metadata
def _send_message(state, token):
def send_message(unused_send_message_event):
with state.condition:
state.condition.notify_all()
return _possibly_finish_call(state, token)
return send_message
class _Context(grpc.ServicerContext):
def __init__(self, rpc_event, state, request_deserializer):
self._rpc_event = rpc_event
self._state = state
self._request_deserializer = request_deserializer
def is_active(self):
with self._state.condition:
return self._state.client is not _CANCELLED and not self._state.statused
def time_remaining(self):
return max(self._rpc_event.request_call_details.deadline - time.time(), 0)
def cancel(self):
self._rpc_event.operation_call.cancel()
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def disable_next_message_compression(self):
with self._state.condition:
self._state.disable_next_compression = True
def invocation_metadata(self):
return _common.application_metadata(self._rpc_event.request_metadata)
def peer(self):
return _common.decode(self._rpc_event.operation_call.peer())
def send_initial_metadata(self, initial_metadata):
with self._state.condition:
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
else:
if self._state.initial_metadata_allowed:
operation = cygrpc.operation_send_initial_metadata(
_common.cygrpc_metadata(initial_metadata), _EMPTY_FLAGS)
self._rpc_event.operation_call.start_server_batch(
cygrpc.Operations((operation,)),
_send_initial_metadata(self._state))
self._state.initial_metadata_allowed = False
self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
else:
raise ValueError('Initial metadata no longer allowed!')
def set_trailing_metadata(self, trailing_metadata):
with self._state.condition:
self._state.trailing_metadata = _common.cygrpc_metadata(
trailing_metadata)
def set_code(self, code):
with self._state.condition:
self._state.code = code
def set_details(self, details):
with self._state.condition:
self._state.details = _common.encode(details)
class _RequestIterator(object):
def __init__(self, state, call, request_deserializer):
self._state = state
self._call = call
self._request_deserializer = request_deserializer
def _raise_or_start_receive_message(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif self._state.client is _CLOSED or self._state.statused:
raise StopIteration()
else:
self._call.start_server_batch(
cygrpc.Operations((cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
_receive_message(self._state, self._call, self._request_deserializer))
self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
def _look_for_request(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif (self._state.request is None and
_RECEIVE_MESSAGE_TOKEN not in self._state.due):
raise StopIteration()
else:
request = self._state.request
self._state.request = None
return request
def _next(self):
with self._state.condition:
self._raise_or_start_receive_message()
while True:
self._state.condition.wait()
request = self._look_for_request()
if request is not None:
return request
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def _unary_request(rpc_event, state, request_deserializer):
def unary_request():
with state.condition:
if state.client is _CANCELLED or state.statused:
return None
else:
start_server_batch_result = rpc_event.operation_call.start_server_batch(
cygrpc.Operations(
(cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
_receive_message(
state, rpc_event.operation_call, request_deserializer))
state.due.add(_RECEIVE_MESSAGE_TOKEN)
while True:
state.condition.wait()
if state.request is None:
if state.client is _CLOSED:
details = '"{}" requires exactly one request message.'.format(
rpc_event.request_call_details.method)
_abort(
state, rpc_event.operation_call,
cygrpc.StatusCode.unimplemented, _common.encode(details))
return None
elif state.client is _CANCELLED:
return None
else:
request = state.request
state.request = None
return request
return unary_request
def _call_behavior(rpc_event, state, behavior, argument, request_deserializer):
context = _Context(rpc_event, state, request_deserializer)
try:
return behavior(argument, context), True
except Exception as e: with state.condition:
if e not in state.rpc_errors:
details = 'Exception calling application: {}'.format(e)
logging.exception(details)
_abort(state, rpc_event.operation_call,
cygrpc.StatusCode.unknown, _common.encode(details))
return None, False
def _take_response_from_response_iterator(rpc_event, state, response_iterator):
try:
return next(response_iterator), True
except StopIteration:
return None, True
except Exception as e: with state.condition:
if e not in state.rpc_errors:
details = 'Exception iterating responses: {}'.format(e)
logging.exception(details)
_abort(state, rpc_event.operation_call,
cygrpc.StatusCode.unknown, _common.encode(details))
return None, False
def _serialize_response(rpc_event, state, response, response_serializer):
serialized_response = _common.serialize(response, response_serializer)
if serialized_response is None:
with state.condition:
_abort(
state, rpc_event.operation_call, cygrpc.StatusCode.internal,
b'Failed to serialize response!')
return None
else:
return serialized_response
def _send_response(rpc_event, state, serialized_response):
with state.condition:
if state.client is _CANCELLED or state.statused:
return False
else:
if state.initial_metadata_allowed:
operations = (
cygrpc.operation_send_initial_metadata(
_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_send_message(serialized_response, _EMPTY_FLAGS),
)
state.initial_metadata_allowed = False
token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
else:
operations = (
cygrpc.operation_send_message(serialized_response, _EMPTY_FLAGS),
)
token = _SEND_MESSAGE_TOKEN
rpc_event.operation_call.start_server_batch(
cygrpc.Operations(operations), _send_message(state, token))
state.due.add(token)
while True:
state.condition.wait()
if token not in state.due:
return state.client is not _CANCELLED and not state.statused
def _status(rpc_event, state, serialized_response):
with state.condition:
if state.client is not _CANCELLED:
trailing_metadata = _common.cygrpc_metadata(state.trailing_metadata)
code = _completion_code(state)
details = _details(state)
operations = [
cygrpc.operation_send_status_from_server(
trailing_metadata, code, details, _EMPTY_FLAGS),
]
if state.initial_metadata_allowed:
operations.append(
cygrpc.operation_send_initial_metadata(
_EMPTY_METADATA, _EMPTY_FLAGS))
if serialized_response is not None:
operations.append(cygrpc.operation_send_message(
serialized_response, _EMPTY_FLAGS))
rpc_event.operation_call.start_server_batch(
cygrpc.Operations(operations),
_send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
state.statused = True
state.due.add(_SEND_STATUS_FROM_SERVER_TOKEN)
def _unary_response_in_pool(
rpc_event, state, behavior, argument_thunk, request_deserializer,
response_serializer):
argument = argument_thunk()
if argument is not None:
response, proceed = _call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
_status(rpc_event, state, serialized_response)
def _stream_response_in_pool(
rpc_event, state, behavior, argument_thunk, request_deserializer,
response_serializer):
argument = argument_thunk()
if argument is not None:
response_iterator, proceed = _call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
while True:
response, proceed = _take_response_from_response_iterator(
rpc_event, state, response_iterator)
if proceed:
if response is None:
_status(rpc_event, state, None)
break
else:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
proceed = _send_response(rpc_event, state, serialized_response)
if not proceed:
break
else:
break
else:
break
def _handle_unary_unary(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(
rpc_event, state, method_handler.request_deserializer)
thread_pool.submit(
_unary_response_in_pool, rpc_event, state, method_handler.unary_unary,
unary_request, method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_unary_stream(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(
rpc_event, state, method_handler.request_deserializer)
thread_pool.submit(
_stream_response_in_pool, rpc_event, state, method_handler.unary_stream,
unary_request, method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_unary(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(
state, rpc_event.operation_call, method_handler.request_deserializer)
thread_pool.submit(
_unary_response_in_pool, rpc_event, state, method_handler.stream_unary,
lambda: request_iterator, method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_stream(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(
state, rpc_event.operation_call, method_handler.request_deserializer)
thread_pool.submit(
_stream_response_in_pool, rpc_event, state, method_handler.stream_stream,
lambda: request_iterator, method_handler.request_deserializer,
method_handler.response_serializer)
def _find_method_handler(rpc_event, generic_handlers):
for generic_handler in generic_handlers:
method_handler = generic_handler.service(
_HandlerCallDetails(
_common.decode(rpc_event.request_call_details.method),
rpc_event.request_metadata))
if method_handler is not None:
return method_handler
else:
return None
def _handle_unrecognized_method(rpc_event):
operations = (
cygrpc.operation_send_initial_metadata(_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_EMPTY_METADATA, cygrpc.StatusCode.unimplemented,
b'Method not found!', _EMPTY_FLAGS),
)
rpc_state = _RPCState()
rpc_event.operation_call.start_server_batch(
operations, lambda ignored_event: (rpc_state, (),))
return rpc_state
def _handle_with_method_handler(rpc_event, method_handler, thread_pool):
state = _RPCState()
with state.condition:
rpc_event.operation_call.start_server_batch(
cygrpc.Operations(
(cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)),
_receive_close_on_server(state))
state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
if method_handler.request_streaming:
if method_handler.response_streaming:
_handle_stream_stream(rpc_event, state, method_handler, thread_pool)
else:
_handle_stream_unary(rpc_event, state, method_handler, thread_pool)
else:
if method_handler.response_streaming:
_handle_unary_stream(rpc_event, state, method_handler, thread_pool)
else:
_handle_unary_unary(rpc_event, state, method_handler, thread_pool)
return state
def _handle_call(rpc_event, generic_handlers, thread_pool):
if rpc_event.request_call_details.method is not None:
method_handler = _find_method_handler(rpc_event, generic_handlers)
if method_handler is None:
return _handle_unrecognized_method(rpc_event)
else:
return _handle_with_method_handler(rpc_event, method_handler, thread_pool)
else:
return None
@enum.unique
class _ServerStage(enum.Enum):
STOPPED = 'stopped'
STARTED = 'started'
GRACE = 'grace'
class _ServerState(object):
def __init__(self, completion_queue, server, generic_handlers, thread_pool):
self.lock = threading.Lock()
self.completion_queue = completion_queue
self.server = server
self.generic_handlers = list(generic_handlers)
self.thread_pool = thread_pool
self.stage = _ServerStage.STOPPED
self.shutdown_events = None
self.rpc_states = set()
self.due = set()
def _add_generic_handlers(state, generic_handlers):
with state.lock:
state.generic_handlers.extend(generic_handlers)
def _add_insecure_port(state, address):
with state.lock:
return state.server.add_http2_port(address)
def _add_secure_port(state, address, server_credentials):
with state.lock:
return state.server.add_http2_port(address, server_credentials._credentials)
def _request_call(state):
state.server.request_call(
state.completion_queue, state.completion_queue, _REQUEST_CALL_TAG)
state.due.add(_REQUEST_CALL_TAG)
def _stop_serving(state):
if not state.rpc_states and not state.due:
for shutdown_event in state.shutdown_events:
shutdown_event.set()
state.stage = _ServerStage.STOPPED
return True
else:
return False
def _serve(state):
while True:
event = state.completion_queue.poll()
if event.tag is _SHUTDOWN_TAG:
with state.lock:
state.due.remove(_SHUTDOWN_TAG)
if _stop_serving(state):
return
elif event.tag is _REQUEST_CALL_TAG:
with state.lock:
state.due.remove(_REQUEST_CALL_TAG)
rpc_state = _handle_call(
event, state.generic_handlers, state.thread_pool)
if rpc_state is not None:
state.rpc_states.add(rpc_state)
if state.stage is _ServerStage.STARTED:
_request_call(state)
elif _stop_serving(state):
return
else:
rpc_state, callbacks = event.tag(event)
for callback in callbacks:
callable_util.call_logging_exceptions(
callback, 'Exception calling callback!')
if rpc_state is not None:
with state.lock:
state.rpc_states.remove(rpc_state)
if _stop_serving(state):
return
def _stop(state, grace):
with state.lock:
if state.stage is _ServerStage.STOPPED:
shutdown_event = threading.Event()
shutdown_event.set()
return shutdown_event
else:
if state.stage is _ServerStage.STARTED:
state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG)
state.stage = _ServerStage.GRACE
state.shutdown_events = []
state.due.add(_SHUTDOWN_TAG)
shutdown_event = threading.Event()
state.shutdown_events.append(shutdown_event)
if grace is None:
state.server.cancel_all_calls()
for rpc_state in state.rpc_states:
with rpc_state.condition:
rpc_state.client = _CANCELLED
rpc_state.condition.notify_all()
else:
def cancel_all_calls_after_grace():
shutdown_event.wait(timeout=grace)
with state.lock:
state.server.cancel_all_calls()
for rpc_state in state.rpc_states:
with rpc_state.condition:
rpc_state.client = _CANCELLED
rpc_state.condition.notify_all()
thread = threading.Thread(target=cancel_all_calls_after_grace)
thread.start()
return shutdown_event
shutdown_event.wait()
return shutdown_event
def _start(state):
with state.lock:
if state.stage is not _ServerStage.STOPPED:
raise ValueError('Cannot start already-started server!')
state.server.start()
state.stage = _ServerStage.STARTED
_request_call(state)
def cleanup_server(timeout):
if timeout is None:
_stop(state, _UNEXPECTED_EXIT_SERVER_GRACE).wait()
else:
_stop(state, timeout).wait()
thread = _common.CleanupThread(
cleanup_server, target=_serve, args=(state,))
thread.start()
class Server(grpc.Server):
def __init__(self, thread_pool, generic_handlers):
completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server()
server.register_completion_queue(completion_queue)
self._state = _ServerState(
completion_queue, server, generic_handlers, thread_pool)
def add_generic_rpc_handlers(self, generic_rpc_handlers):
_add_generic_handlers(self._state, generic_rpc_handlers)
def add_insecure_port(self, address):
return _add_insecure_port(self._state, _common.encode(address))
def add_secure_port(self, address, server_credentials):
return _add_secure_port(self._state, _common.encode(address), server_credentials)
def start(self):
_start(self._state)
def stop(self, grace):
return _stop(self._state, grace)
def __del__(self):
_stop(self._state, None)
| true
| true
|
f70cd34424661c462973e9a8f65d8066ae8d1cf4
| 756
|
py
|
Python
|
var/spack/repos/builtin/packages/libxevie/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360
|
2017-11-06T08:47:01.000Z
|
2022-03-31T14:45:33.000Z
|
var/spack/repos/builtin/packages/libxevie/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838
|
2017-11-04T07:49:45.000Z
|
2022-03-31T23:38:39.000Z
|
var/spack/repos/builtin/packages/libxevie/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793
|
2017-11-04T07:45:50.000Z
|
2022-03-30T14:31:53.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libxevie(AutotoolsPackage, XorgPackage):
"""Xevie - X Event Interception Extension (XEvIE)."""
homepage = "https://cgit.freedesktop.org/xorg/lib/libXevie"
xorg_mirror_path = "lib/libXevie-1.0.3.tar.gz"
version('1.0.3', sha256='3759bb1f7fdade13ed99bfc05c0717bc42ce3f187e7da4eef80beddf5e461258')
depends_on('libx11')
depends_on('libxext')
depends_on('xproto')
depends_on('xextproto')
depends_on('evieext')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
| 30.24
| 95
| 0.72619
|
from spack import *
class Libxevie(AutotoolsPackage, XorgPackage):
homepage = "https://cgit.freedesktop.org/xorg/lib/libXevie"
xorg_mirror_path = "lib/libXevie-1.0.3.tar.gz"
version('1.0.3', sha256='3759bb1f7fdade13ed99bfc05c0717bc42ce3f187e7da4eef80beddf5e461258')
depends_on('libx11')
depends_on('libxext')
depends_on('xproto')
depends_on('xextproto')
depends_on('evieext')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
| true
| true
|
f70cd36f728286c806355f96ef79cdda124c1004
| 2,713
|
py
|
Python
|
app/recipe/tests/test_ingredients_api.py
|
alyssonDeAssis/recipe-app-api
|
00bc48afbf598ecf981a96c61535683c915946fb
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_ingredients_api.py
|
alyssonDeAssis/recipe-app-api
|
00bc48afbf598ecf981a96c61535683c915946fb
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_ingredients_api.py
|
alyssonDeAssis/recipe-app-api
|
00bc48afbf598ecf981a96c61535683c915946fb
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
# Test the publicly available ingredients API
def setUp(self):
self.client = APIClient()
def test_login_required(self):
# Test tha login is required to access the endpoint
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsApiTests(TestCase):
# Test the private ingredients API
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@test.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
# Test retrieving a list of ingredients
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
# Test that ingredients for the authenticated user are returned
user2 = get_user_model().objects.create_user(
'other@test.com',
'testpass'
)
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='Tumeric')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
# Test create a new ingredient
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name'],
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
# Test creating invalid ingredient fails
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| 31.917647
| 78
| 0.686694
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
def setUp(self):
self.client = APIClient()
def test_login_required(self):
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsApiTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@test.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
Ingredient.objects.create(user=self.user, name='Kale')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
user2 = get_user_model().objects.create_user(
'other@test.com',
'testpass'
)
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='Tumeric')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name'],
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
| true
| true
|
f70cd3ca35aac69835bd12a3ca1e659ecd71a94e
| 388
|
py
|
Python
|
files/lickeeper.py
|
xoosite/ido.py
|
811d6477560fc5f635a4d1c8817f4a4667e6c251
|
[
"MIT"
] | null | null | null |
files/lickeeper.py
|
xoosite/ido.py
|
811d6477560fc5f635a4d1c8817f4a4667e6c251
|
[
"MIT"
] | null | null | null |
files/lickeeper.py
|
xoosite/ido.py
|
811d6477560fc5f635a4d1c8817f4a4667e6c251
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
#=============================================================================
# FileName: lickeeper.py
# Desc:
# Author: Jeyrce.Lu
# Email: jianxin.lu@woqutech.com
# HomePage: www.woqutech.com
# Version: 0.0.1
# LastChange: 2021/1/13 上午11:18
# History:
#=============================================================================
"""
| 25.866667
| 78
| 0.384021
| true
| true
|
|
f70cd53a0ab1a7e8ce5258207595405255bcc6d9
| 1,343
|
py
|
Python
|
gandyloo/connection.py
|
kazimuth/gandyloo
|
caa3ae360b5498b147281cc59973f7da3069dfdd
|
[
"MIT"
] | 2
|
2015-11-10T20:42:51.000Z
|
2021-12-10T07:50:37.000Z
|
gandyloo/connection.py
|
kazimuth/gandyloo
|
caa3ae360b5498b147281cc59973f7da3069dfdd
|
[
"MIT"
] | null | null | null |
gandyloo/connection.py
|
kazimuth/gandyloo
|
caa3ae360b5498b147281cc59973f7da3069dfdd
|
[
"MIT"
] | null | null | null |
from twisted.internet.protocol import Protocol
from gandyloo import parse
class MinesweeperClient(Protocol):
'''Represents a connection to a server using twisted's Protocol framework.
Created with an event sink, where parsed events (subclasses of
gandyloo.message.Response) are fired. Sink should have a method
self.response(resp).
'''
def __init__(self, event_sink):
self.buffer = ""
self.hello_received = False
self.size = None
self.event_sink = event_sink
def dataReceived(self, data):
self.buffer += data
if not self.hello_received:
try:
resp, self.buffer = parse.parse_start(self.buffer, first=True)
except parse.NotReadyError:
return # Haven't received enough data yet
self.hello_received = True
self.size = resp.size
self.event_sink.response(resp)
try:
while True:
resp, self.buffer = parse.parse_start(self.buffer, self.size)
self.event_sink.response(resp)
except parse.NotReadyError:
return
def command(self, command):
self.transport.write(command.render())
def clientConnectionLost(self, connection, reason):
self.event_sink.response(message.CloseResp(reason))
| 31.97619
| 78
| 0.636634
|
from twisted.internet.protocol import Protocol
from gandyloo import parse
class MinesweeperClient(Protocol):
def __init__(self, event_sink):
self.buffer = ""
self.hello_received = False
self.size = None
self.event_sink = event_sink
def dataReceived(self, data):
self.buffer += data
if not self.hello_received:
try:
resp, self.buffer = parse.parse_start(self.buffer, first=True)
except parse.NotReadyError:
return self.hello_received = True
self.size = resp.size
self.event_sink.response(resp)
try:
while True:
resp, self.buffer = parse.parse_start(self.buffer, self.size)
self.event_sink.response(resp)
except parse.NotReadyError:
return
def command(self, command):
self.transport.write(command.render())
def clientConnectionLost(self, connection, reason):
self.event_sink.response(message.CloseResp(reason))
| true
| true
|
f70cd5999a4ccff18cd0e1d6ac5ea0040faf88ed
| 655
|
py
|
Python
|
tests/core/fixtures/features.py
|
jsam/datagears
|
d2400f20069898a959fbe182931faba1c90b7181
|
[
"BSD-3-Clause"
] | 3
|
2020-11-09T00:05:59.000Z
|
2021-03-31T11:30:22.000Z
|
tests/core/fixtures/features.py
|
jsam/datagears
|
d2400f20069898a959fbe182931faba1c90b7181
|
[
"BSD-3-Clause"
] | 28
|
2020-11-12T14:56:21.000Z
|
2022-03-07T11:07:08.000Z
|
tests/core/fixtures/features.py
|
jsam/datagears
|
d2400f20069898a959fbe182931faba1c90b7181
|
[
"BSD-3-Clause"
] | 1
|
2021-04-03T14:35:19.000Z
|
2021-04-03T14:35:19.000Z
|
import pytest
from datagears.core.network import Network
@pytest.fixture
def myfeature() -> Network:
"""Testing fixture for a feature."""
from datagears.core.network import Network
from datagears.features.dummy import my_out
network = Network("my-network", outputs=[my_out])
return network
@pytest.fixture
def store_feature() -> Network:
"""Testing fixture for a feature."""
from datagears.core.network import Network
from datagears.core.stores import FeatureStore
from datagears.features.dummy import my_out
network = Network("my-network", outputs=[my_out], feature_store=FeatureStore())
return network
| 26.2
| 83
| 0.734351
|
import pytest
from datagears.core.network import Network
@pytest.fixture
def myfeature() -> Network:
from datagears.core.network import Network
from datagears.features.dummy import my_out
network = Network("my-network", outputs=[my_out])
return network
@pytest.fixture
def store_feature() -> Network:
from datagears.core.network import Network
from datagears.core.stores import FeatureStore
from datagears.features.dummy import my_out
network = Network("my-network", outputs=[my_out], feature_store=FeatureStore())
return network
| true
| true
|
f70cd67da850213462cc6e238f500b2e7885ed5b
| 6,296
|
py
|
Python
|
curriculums.py
|
MrTornado24/FENeRF
|
9d90acda243b7c7d7f2c688a3bb333da2e7f8894
|
[
"MIT"
] | 22
|
2022-03-18T16:29:04.000Z
|
2022-03-31T12:17:55.000Z
|
curriculums.py
|
MrTornado24/FENeRF
|
9d90acda243b7c7d7f2c688a3bb333da2e7f8894
|
[
"MIT"
] | 2
|
2022-03-28T09:21:27.000Z
|
2022-03-28T09:30:16.000Z
|
curriculums.py
|
MrTornado24/FENeRF
|
9d90acda243b7c7d7f2c688a3bb333da2e7f8894
|
[
"MIT"
] | 1
|
2022-03-20T14:15:11.000Z
|
2022-03-20T14:15:11.000Z
|
from generators.neural_rendering import NeuralRenderer
import math
def next_upsample_step(curriculum, current_step):
# Return the epoch when it will next upsample
current_metadata = extract_metadata(curriculum, current_step)
current_size = current_metadata['img_size']
for curriculum_step in sorted([cs for cs in curriculum.keys() if type(cs) == int]):
if curriculum_step > current_step and curriculum[curriculum_step].get('img_size', 512) > current_size:
return curriculum_step
return float('Inf')
def last_upsample_step(curriculum, current_step):
# Returns the start epoch of the current stage, i.e. the epoch
# it last upsampled
current_metadata = extract_metadata(curriculum, current_step)
current_size = current_metadata['img_size']
for curriculum_step in sorted([cs for cs in curriculum.keys() if type(cs) == int]):
if curriculum_step <= current_step and curriculum[curriculum_step]['img_size'] == current_size:
return curriculum_step
return 0
def get_current_step(curriculum, epoch):
step = 0
for update_epoch in curriculum['update_epochs']:
if epoch >= update_epoch:
step += 1
return step
def extract_metadata(curriculum, current_step):
return_dict = {}
for curriculum_step in sorted([cs for cs in curriculum.keys() if type(cs) == int], reverse=True):
if curriculum_step <= current_step:
for key, value in curriculum[curriculum_step].items():
return_dict[key] = value
break
for key in [k for k in curriculum.keys() if type(k) != int]:
return_dict[key] = curriculum[key]
return return_dict
CelebA = {
0: {'batch_size': 24 * 2, 'num_steps': 12, 'img_size': 64, 'batch_split': 2, 'gen_lr': 6e-5, 'disc_lr': 2e-4},
int(200e3): {},
# 'dataset_path': '/home/ericryanchan/data/celeba/img_align_celeba/*.jpg',
'dataset_path': '/media/data2/sunjx/FENeRF/data/celebahq/data512x512/*.jpg',
'fov': 12,
'ray_start': 0.88,
'ray_end': 1.12,
'fade_steps': 10000,
'h_stddev': 0.3,
'v_stddev': 0.155,
'h_mean': math.pi*0.5,
'v_mean': math.pi*0.5,
'sample_dist': 'gaussian',
'topk_interval': 2000,
'topk_v': 0.6,
'betas': (0, 0.9),
'unique_lr': False,
'weight_decay': 0,
'r1_lambda': 0.2,
'latent_dim': 512,
'output_dim': 4,
'grad_clip': 10,
'model': 'SPATIALSIRENBASELINE',
# 'model': 'EmbeddingPiGAN128',
'generator': 'ImplicitGenerator3d',
'discriminator': 'CCSEncoderDiscriminator',
'dataset': 'CelebA',
'clamp_mode': 'relu',
'z_dist': 'gaussian',
'hierarchical_sample': True,
'z_lambda': 0,
'pos_lambda': 15,
'last_back': False,
'eval_last_back': True,
'fill_mode': 'eval_white_back',
'target_size': 128
}
CelebA_double_semantic = {
0: {'batch_size': 24, 'num_steps': 12, 'img_size': 32, 'batch_split': 6, 'gen_lr': 5e-5, 'disc_img_lr': 2e-4, 'disc_seg_lr': 1e-4},
int(10e3): {'batch_size': 12, 'num_steps': 12, 'img_size': 64, 'batch_split': 2, 'gen_lr':2e-5, 'disc_img_lr': 1e-4, 'disc_seg_lr': 5e-5},
int(50e3):{'batch_size': 4, 'num_steps': 24, 'img_size': 128, 'batch_split': 4, 'gen_lr': 5e-6, 'disc_img_lr': 5e-5, 'disc_seg_lr': 2e-5},
int(500e3): {},
# 'dataset_path': '/home/ericryanchan/data/celeba/img_align_celeba/*.jpg',
'dataset_path': 'data/celebahq_mask',
'background_mask': True,
'fov': 12,
'ray_start': 0.88,
'ray_end': 1.12,
'fade_steps': 10000,
'h_stddev': 0.3,
'v_stddev': 0.155,
'h_mean': math.pi*0.5,
'v_mean': math.pi*0.5,
'sample_dist': 'gaussian',
'topk_interval': 2000,
'topk_v': 0.6,
'betas': (0, 0.9),
'unique_lr': True,
'weight_decay': 0,
'r1_lambda': 0.2,
'latent_geo_dim': 256,
'latent_app_dim': 256,
'output_dim': 22,
'grad_clip': 10,
# 'model': 'SPATIALSIRENSEMANTICDISENTANGLE',
'model': 'SIRENBASELINESEMANTICDISENTANGLE',
'generator': 'DoubleImplicitGenerator3d',
'discriminator_img': 'CCSDoubleEncoderDiscriminator',
'discriminator_seg': 'CCSDoubleEncoderDiscriminator',
'dataset': 'CelebAMaskHQ_wo_background_seg_18',
'clamp_mode': 'relu',
'z_dist': 'gaussian',
'hierarchical_sample': True,
'z_geo_lambda': 0,
'z_app_lambda': 0,
'pos_lambda': 15,
'last_back': False,
'eval_last_back': False,
'd_seg_loss_lambda': 0.1,
'g_seg_loss_lambda': 0.1,
'softmax_label': False,
'target_size': 128,
'fill_mode': 'seg_padding_background'
}
CelebA_double_semantic_texture_embedding_256_dim_96 = {
0: {'batch_size': 24, 'num_steps': 24, 'img_size': 32, 'batch_split': 4, 'gen_lr': 6e-5, 'disc_img_lr': 2e-4, 'disc_seg_lr': 2e-4},
int(20e3): {'batch_size': 48, 'num_steps': 24, 'img_size': 64, 'batch_split': 4, 'gen_lr':6e-5, 'disc_img_lr': 2e-4, 'disc_seg_lr': 2e-4},
int(50e3):{'batch_size': 24, 'num_steps': 24, 'img_size': 128, 'batch_split': 4, 'gen_lr': 2e-5, 'disc_img_lr': 5e-5, 'disc_seg_lr': 2e-5},
int(500e3): {},
'dataset_path': 'data/celebahq_mask',
'background_mask': True,
'fov': 12,
'ray_start': 0.88,
'ray_end': 1.12,
'fade_steps': 10000,
'h_stddev': 0.3,
'v_stddev': 0.155,
'h_mean': math.pi*0.5,
'v_mean': math.pi*0.5,
'sample_dist': 'gaussian',
'topk_interval': 2000,
'topk_v': 0.6,
'betas': (0, 0.9),
'unique_lr': True,
'weight_decay': 0,
'r1_lambda': 0.2,
'latent_geo_dim': 256,
'latent_app_dim': 256,
'output_dim': 22,
'grad_clip': 10,
# 'model': 'SIRENBASELINESEMANTICDISENTANGLE',
'model': 'TextureEmbeddingPiGAN256SEMANTICDISENTANGLE_DIM_96',
'generator': 'DoubleImplicitGenerator3d',
'discriminator_img': 'CCSDoubleEncoderDiscriminator',
'discriminator_seg': 'CCSDoubleEncoderDiscriminator',
'dataset': 'CelebAMaskHQ_wo_background_seg_18',
'clamp_mode': 'relu',
'z_dist': 'gaussian',
'hierarchical_sample': True,
'z_geo_lambda': 0,
'z_app_lambda': 0,
'pos_lambda': 15,
'last_back': False,
'eval_last_back': False,
'd_seg_loss_lambda': 0.1,
'g_seg_loss_lambda': 0.1,
'softmax_label': False,
'target_size': 128,
'fill_mode': 'seg_padding_background'
}
| 35.173184
| 143
| 0.642471
|
from generators.neural_rendering import NeuralRenderer
import math
def next_upsample_step(curriculum, current_step):
current_metadata = extract_metadata(curriculum, current_step)
current_size = current_metadata['img_size']
for curriculum_step in sorted([cs for cs in curriculum.keys() if type(cs) == int]):
if curriculum_step > current_step and curriculum[curriculum_step].get('img_size', 512) > current_size:
return curriculum_step
return float('Inf')
def last_upsample_step(curriculum, current_step):
current_metadata = extract_metadata(curriculum, current_step)
current_size = current_metadata['img_size']
for curriculum_step in sorted([cs for cs in curriculum.keys() if type(cs) == int]):
if curriculum_step <= current_step and curriculum[curriculum_step]['img_size'] == current_size:
return curriculum_step
return 0
def get_current_step(curriculum, epoch):
step = 0
for update_epoch in curriculum['update_epochs']:
if epoch >= update_epoch:
step += 1
return step
def extract_metadata(curriculum, current_step):
return_dict = {}
for curriculum_step in sorted([cs for cs in curriculum.keys() if type(cs) == int], reverse=True):
if curriculum_step <= current_step:
for key, value in curriculum[curriculum_step].items():
return_dict[key] = value
break
for key in [k for k in curriculum.keys() if type(k) != int]:
return_dict[key] = curriculum[key]
return return_dict
CelebA = {
0: {'batch_size': 24 * 2, 'num_steps': 12, 'img_size': 64, 'batch_split': 2, 'gen_lr': 6e-5, 'disc_lr': 2e-4},
int(200e3): {},
'dataset_path': '/media/data2/sunjx/FENeRF/data/celebahq/data512x512/*.jpg',
'fov': 12,
'ray_start': 0.88,
'ray_end': 1.12,
'fade_steps': 10000,
'h_stddev': 0.3,
'v_stddev': 0.155,
'h_mean': math.pi*0.5,
'v_mean': math.pi*0.5,
'sample_dist': 'gaussian',
'topk_interval': 2000,
'topk_v': 0.6,
'betas': (0, 0.9),
'unique_lr': False,
'weight_decay': 0,
'r1_lambda': 0.2,
'latent_dim': 512,
'output_dim': 4,
'grad_clip': 10,
'model': 'SPATIALSIRENBASELINE',
'generator': 'ImplicitGenerator3d',
'discriminator': 'CCSEncoderDiscriminator',
'dataset': 'CelebA',
'clamp_mode': 'relu',
'z_dist': 'gaussian',
'hierarchical_sample': True,
'z_lambda': 0,
'pos_lambda': 15,
'last_back': False,
'eval_last_back': True,
'fill_mode': 'eval_white_back',
'target_size': 128
}
CelebA_double_semantic = {
0: {'batch_size': 24, 'num_steps': 12, 'img_size': 32, 'batch_split': 6, 'gen_lr': 5e-5, 'disc_img_lr': 2e-4, 'disc_seg_lr': 1e-4},
int(10e3): {'batch_size': 12, 'num_steps': 12, 'img_size': 64, 'batch_split': 2, 'gen_lr':2e-5, 'disc_img_lr': 1e-4, 'disc_seg_lr': 5e-5},
int(50e3):{'batch_size': 4, 'num_steps': 24, 'img_size': 128, 'batch_split': 4, 'gen_lr': 5e-6, 'disc_img_lr': 5e-5, 'disc_seg_lr': 2e-5},
int(500e3): {},
'dataset_path': 'data/celebahq_mask',
'background_mask': True,
'fov': 12,
'ray_start': 0.88,
'ray_end': 1.12,
'fade_steps': 10000,
'h_stddev': 0.3,
'v_stddev': 0.155,
'h_mean': math.pi*0.5,
'v_mean': math.pi*0.5,
'sample_dist': 'gaussian',
'topk_interval': 2000,
'topk_v': 0.6,
'betas': (0, 0.9),
'unique_lr': True,
'weight_decay': 0,
'r1_lambda': 0.2,
'latent_geo_dim': 256,
'latent_app_dim': 256,
'output_dim': 22,
'grad_clip': 10,
'model': 'SIRENBASELINESEMANTICDISENTANGLE',
'generator': 'DoubleImplicitGenerator3d',
'discriminator_img': 'CCSDoubleEncoderDiscriminator',
'discriminator_seg': 'CCSDoubleEncoderDiscriminator',
'dataset': 'CelebAMaskHQ_wo_background_seg_18',
'clamp_mode': 'relu',
'z_dist': 'gaussian',
'hierarchical_sample': True,
'z_geo_lambda': 0,
'z_app_lambda': 0,
'pos_lambda': 15,
'last_back': False,
'eval_last_back': False,
'd_seg_loss_lambda': 0.1,
'g_seg_loss_lambda': 0.1,
'softmax_label': False,
'target_size': 128,
'fill_mode': 'seg_padding_background'
}
CelebA_double_semantic_texture_embedding_256_dim_96 = {
0: {'batch_size': 24, 'num_steps': 24, 'img_size': 32, 'batch_split': 4, 'gen_lr': 6e-5, 'disc_img_lr': 2e-4, 'disc_seg_lr': 2e-4},
int(20e3): {'batch_size': 48, 'num_steps': 24, 'img_size': 64, 'batch_split': 4, 'gen_lr':6e-5, 'disc_img_lr': 2e-4, 'disc_seg_lr': 2e-4},
int(50e3):{'batch_size': 24, 'num_steps': 24, 'img_size': 128, 'batch_split': 4, 'gen_lr': 2e-5, 'disc_img_lr': 5e-5, 'disc_seg_lr': 2e-5},
int(500e3): {},
'dataset_path': 'data/celebahq_mask',
'background_mask': True,
'fov': 12,
'ray_start': 0.88,
'ray_end': 1.12,
'fade_steps': 10000,
'h_stddev': 0.3,
'v_stddev': 0.155,
'h_mean': math.pi*0.5,
'v_mean': math.pi*0.5,
'sample_dist': 'gaussian',
'topk_interval': 2000,
'topk_v': 0.6,
'betas': (0, 0.9),
'unique_lr': True,
'weight_decay': 0,
'r1_lambda': 0.2,
'latent_geo_dim': 256,
'latent_app_dim': 256,
'output_dim': 22,
'grad_clip': 10,
'model': 'TextureEmbeddingPiGAN256SEMANTICDISENTANGLE_DIM_96',
'generator': 'DoubleImplicitGenerator3d',
'discriminator_img': 'CCSDoubleEncoderDiscriminator',
'discriminator_seg': 'CCSDoubleEncoderDiscriminator',
'dataset': 'CelebAMaskHQ_wo_background_seg_18',
'clamp_mode': 'relu',
'z_dist': 'gaussian',
'hierarchical_sample': True,
'z_geo_lambda': 0,
'z_app_lambda': 0,
'pos_lambda': 15,
'last_back': False,
'eval_last_back': False,
'd_seg_loss_lambda': 0.1,
'g_seg_loss_lambda': 0.1,
'softmax_label': False,
'target_size': 128,
'fill_mode': 'seg_padding_background'
}
| true
| true
|
f70cd699c1c3b6d728e49d2f94fc7cd65520d19c
| 856
|
py
|
Python
|
vnpy/app/option_master/time.py
|
xiumingxu/vnpy-xx
|
8b2d9ecdabcb7931d46fd92fad2d3701b7e66975
|
[
"MIT"
] | null | null | null |
vnpy/app/option_master/time.py
|
xiumingxu/vnpy-xx
|
8b2d9ecdabcb7931d46fd92fad2d3701b7e66975
|
[
"MIT"
] | null | null | null |
vnpy/app/option_master/time.py
|
xiumingxu/vnpy-xx
|
8b2d9ecdabcb7931d46fd92fad2d3701b7e66975
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
import trading_calendars
ANNUAL_DAYS = 240
# Get public holidays data from Shanghai Stock Exchange
cn_calendar = trading_calendars.get_calendar('XSHG')
holidays = [x.to_pydatetime() for x in cn_calendar.precomputed_holidays]
# Filter future public holidays
start = datetime.today()
PUBLIC_HOLIDAYS = [x for x in holidays if x >= start]
def calculate_days_to_expiry(option_expiry: datetime) -> int:
""""""
current_dt = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
days = 1
while current_dt <= option_expiry:
current_dt += timedelta(days=1)
# Ignore weekends
if current_dt.weekday() in [5, 6]:
continue
# Ignore public holidays
if current_dt in PUBLIC_HOLIDAYS:
continue
days += 1
return days
| 24.457143
| 82
| 0.685748
|
from datetime import datetime, timedelta
import trading_calendars
ANNUAL_DAYS = 240
cn_calendar = trading_calendars.get_calendar('XSHG')
holidays = [x.to_pydatetime() for x in cn_calendar.precomputed_holidays]
start = datetime.today()
PUBLIC_HOLIDAYS = [x for x in holidays if x >= start]
def calculate_days_to_expiry(option_expiry: datetime) -> int:
current_dt = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
days = 1
while current_dt <= option_expiry:
current_dt += timedelta(days=1)
if current_dt.weekday() in [5, 6]:
continue
if current_dt in PUBLIC_HOLIDAYS:
continue
days += 1
return days
| true
| true
|
f70cd6cdb85b3472a4bda270047affb6c4255059
| 4,588
|
py
|
Python
|
server/scripts/services.py
|
anarkrypto/nano-dpow
|
339452bf312bd336b493f7dc56370adf31cab219
|
[
"MIT"
] | 35
|
2019-06-09T12:43:06.000Z
|
2022-03-26T21:06:13.000Z
|
server/scripts/services.py
|
anarkrypto/nano-dpow
|
339452bf312bd336b493f7dc56370adf31cab219
|
[
"MIT"
] | 32
|
2019-06-10T00:01:08.000Z
|
2022-03-11T23:53:16.000Z
|
server/scripts/services.py
|
anarkrypto/nano-dpow
|
339452bf312bd336b493f7dc56370adf31cab219
|
[
"MIT"
] | 22
|
2019-06-09T15:13:23.000Z
|
2021-09-10T17:15:27.000Z
|
#!/usr/bin/env python3
import redis
import argparse
import hashlib
from getpass import getpass
r = redis.StrictRedis(host="localhost", port=6379)
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--add', action='store_true', help='Adds a service')
group.add_argument('--check', action='store_true', help='Retrieve and print service details')
group.add_argument('--delete', action='store_true', help='Delete a service entry')
group.add_argument('--update', action='store_true', help='Update a service entry')
group.add_argument('--list', action='store_true', help='List all users')
group.add_argument('--stats', action='store_true', help='Statistics for all users')
parser.add_argument('service', nargs='?', default=None, type=str, help='Service username')
args = parser.parse_args()
if not args.service and not (args.list or args.stats):
from sys import exit
parser.print_help()
exit(1)
def hash_key(x: str):
m = hashlib.blake2b()
m.update(x.encode("utf-8"))
return m.digest()
def exists(key: str):
existing = r.exists(f"service:{key}") and r.sismember('services', key)
if not existing:
print(f"{key} not found")
else:
print(f"{key} exists")
return existing
def existing_users():
return [l.decode("utf-8") for l in r.smembers('services')]
def interactive_add():
public = "N/A"
public_opts = ["Y", "N"]
while public not in public_opts:
public = input("Public information? (Y/N): ")
display = input("Display name: ")
website = input("Website: ")
api_key = None
while not api_key:
api_key = getpass("API Key (hidden, will be hashed): ")
options = {
"public": public,
"display": display,
"website": website,
"api_key": hash_key(api_key),
"precache": 0,
"ondemand": 0
}
return options
def interactive_update():
public = "N/A"
public_opts = ["Y", "N", ""]
while public not in public_opts:
public = input("Public information? (Y/N): ")
display = input("Display name: ")
website = input("Website: ")
api_key = getpass("API Key (hidden, will be hashed): ")
options = dict()
if public:
options["public"] = public
if display:
options["display"] = display
if website:
options["website"] = website
if api_key:
options["api_key"] = hash_key(api_key)
return options
def display(user):
options = r.hgetall(f"service:{user}")
options = {k.decode("utf-8"): v for k,v in options.items()}
options = {k: v if k=="api_key" else v.decode("utf-8") for k,v in options.items()}
print(options)
def add(user):
print("Creating new entry.")
options = interactive_add()
r.hmset(f"service:{user}", options)
r.sadd("services", user)
print(f"User {user} created:")
display(user)
def update(user):
print("Updating entry. Leave a field blank to skip.")
options = interactive_update()
if options:
r.hmset(f"service:{user}", options)
print(f"User {user} updated:")
else:
print(f"No changes to {user}:")
display(user)
def delete(user):
print("Deleting entry.")
r.delete(f"service:{user}")
r.srem('services', user)
user_exists = exists(user)
if user_exists:
print("Failure in deleting")
else:
print("Deleting successfull")
def statistics(users):
for user in users:
stats = r.hgetall(f"service:{user}")
stats = {k.decode("utf-8"): v for k,v in stats.items()}
stats = {k: v if k=="api_key" else v.decode("utf-8") for k,v in stats.items()}
print(user)
print(f"\t{'PUBLIC' if stats['public']=='Y' else 'PRIVATE'}\n"
f"\tprecache: {stats.get('precache') or 0}"
f"\tondemand: {stats.get('ondemand') or 0}"
)
def main():
if args.list:
print("Services in database:\n", existing_users())
elif args.stats:
statistics(existing_users())
else:
user = args.service
user_exists = exists(user)
if not user_exists:
if args.add:
add(user)
else:
print("Services in database:\n", existing_users())
else:
if args.check:
display(user)
elif args.delete:
delete(user)
elif args.update:
update(user)
else:
NotImplementedError
if __name__ == '__main__':
main()
| 27.309524
| 93
| 0.602223
|
import redis
import argparse
import hashlib
from getpass import getpass
r = redis.StrictRedis(host="localhost", port=6379)
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--add', action='store_true', help='Adds a service')
group.add_argument('--check', action='store_true', help='Retrieve and print service details')
group.add_argument('--delete', action='store_true', help='Delete a service entry')
group.add_argument('--update', action='store_true', help='Update a service entry')
group.add_argument('--list', action='store_true', help='List all users')
group.add_argument('--stats', action='store_true', help='Statistics for all users')
parser.add_argument('service', nargs='?', default=None, type=str, help='Service username')
args = parser.parse_args()
if not args.service and not (args.list or args.stats):
from sys import exit
parser.print_help()
exit(1)
def hash_key(x: str):
m = hashlib.blake2b()
m.update(x.encode("utf-8"))
return m.digest()
def exists(key: str):
existing = r.exists(f"service:{key}") and r.sismember('services', key)
if not existing:
print(f"{key} not found")
else:
print(f"{key} exists")
return existing
def existing_users():
return [l.decode("utf-8") for l in r.smembers('services')]
def interactive_add():
public = "N/A"
public_opts = ["Y", "N"]
while public not in public_opts:
public = input("Public information? (Y/N): ")
display = input("Display name: ")
website = input("Website: ")
api_key = None
while not api_key:
api_key = getpass("API Key (hidden, will be hashed): ")
options = {
"public": public,
"display": display,
"website": website,
"api_key": hash_key(api_key),
"precache": 0,
"ondemand": 0
}
return options
def interactive_update():
public = "N/A"
public_opts = ["Y", "N", ""]
while public not in public_opts:
public = input("Public information? (Y/N): ")
display = input("Display name: ")
website = input("Website: ")
api_key = getpass("API Key (hidden, will be hashed): ")
options = dict()
if public:
options["public"] = public
if display:
options["display"] = display
if website:
options["website"] = website
if api_key:
options["api_key"] = hash_key(api_key)
return options
def display(user):
options = r.hgetall(f"service:{user}")
options = {k.decode("utf-8"): v for k,v in options.items()}
options = {k: v if k=="api_key" else v.decode("utf-8") for k,v in options.items()}
print(options)
def add(user):
print("Creating new entry.")
options = interactive_add()
r.hmset(f"service:{user}", options)
r.sadd("services", user)
print(f"User {user} created:")
display(user)
def update(user):
print("Updating entry. Leave a field blank to skip.")
options = interactive_update()
if options:
r.hmset(f"service:{user}", options)
print(f"User {user} updated:")
else:
print(f"No changes to {user}:")
display(user)
def delete(user):
print("Deleting entry.")
r.delete(f"service:{user}")
r.srem('services', user)
user_exists = exists(user)
if user_exists:
print("Failure in deleting")
else:
print("Deleting successfull")
def statistics(users):
for user in users:
stats = r.hgetall(f"service:{user}")
stats = {k.decode("utf-8"): v for k,v in stats.items()}
stats = {k: v if k=="api_key" else v.decode("utf-8") for k,v in stats.items()}
print(user)
print(f"\t{'PUBLIC' if stats['public']=='Y' else 'PRIVATE'}\n"
f"\tprecache: {stats.get('precache') or 0}"
f"\tondemand: {stats.get('ondemand') or 0}"
)
def main():
if args.list:
print("Services in database:\n", existing_users())
elif args.stats:
statistics(existing_users())
else:
user = args.service
user_exists = exists(user)
if not user_exists:
if args.add:
add(user)
else:
print("Services in database:\n", existing_users())
else:
if args.check:
display(user)
elif args.delete:
delete(user)
elif args.update:
update(user)
else:
NotImplementedError
if __name__ == '__main__':
main()
| true
| true
|
f70cd7a8ef49505ac83f768d0974b524ed21ae3f
| 3,867
|
py
|
Python
|
bindings/python/examples/SequenceClassification/SequenceClassification.py
|
KeDengMS/CNTK
|
fce86cd9581e7ba746d1ec75bbd67dd35d35d11c
|
[
"RSA-MD"
] | 1
|
2021-05-09T01:37:49.000Z
|
2021-05-09T01:37:49.000Z
|
bindings/python/examples/SequenceClassification/SequenceClassification.py
|
KeDengMS/CNTK
|
fce86cd9581e7ba746d1ec75bbd67dd35d35d11c
|
[
"RSA-MD"
] | null | null | null |
bindings/python/examples/SequenceClassification/SequenceClassification.py
|
KeDengMS/CNTK
|
fce86cd9581e7ba746d1ec75bbd67dd35d35d11c
|
[
"RSA-MD"
] | null | null | null |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import numpy as np
import sys
import os
import time
from cntk import Trainer, Axis, text_format_minibatch_source, StreamConfiguration
from cntk.device import cpu, set_default_device
from cntk.learner import sgd
from cntk.ops import input_variable, cross_entropy_with_softmax, combine, classification_error
abs_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(abs_path, "..", ".."))
from examples.common.nn import LSTMP_component_with_self_stabilization, embedding, linear_layer, select_last, print_training_progress
# Defines the LSTM model for classifying sequences
def LSTM_sequence_classifer_net(input, num_output_classes, embedding_dim, LSTM_dim, cell_dim):
embedding_function = embedding(input, embedding_dim)
LSTM_function = LSTMP_component_with_self_stabilization(
embedding_function.output, LSTM_dim, cell_dim)[0]
thought_vector = select_last(LSTM_function)
return linear_layer(thought_vector, num_output_classes)
# Creates and trains a LSTM sequence classification model
def train_sequence_classifier(debug_output=False):
input_dim = 2000
cell_dim = 25
hidden_dim = 25
embedding_dim = 50
num_output_classes = 5
# Input variables denoting the features and label data
features = input_variable(shape=input_dim, is_sparse=True)
label = input_variable(num_output_classes, dynamic_axes=[
Axis.default_batch_axis()])
# Instantiate the sequence classification model
classifier_output = LSTM_sequence_classifer_net(
features, num_output_classes, embedding_dim, hidden_dim, cell_dim)
ce = cross_entropy_with_softmax(classifier_output, label)
pe = classification_error(classifier_output, label)
rel_path = r"../../../../Tests/EndToEndTests/Text/SequenceClassification/Data/Train.ctf"
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), rel_path)
feature_stream_name = 'features'
labels_stream_name = 'labels'
mb_source = text_format_minibatch_source(path, [
StreamConfiguration(feature_stream_name, input_dim, True, 'x'),
StreamConfiguration(labels_stream_name, num_output_classes, False, 'y')], 0)
features_si = mb_source[features]
labels_si = mb_source[label]
# Instantiate the trainer object to drive the model training
trainer = Trainer(classifier_output, ce, pe,
[sgd(classifier_output.parameters, lr=0.0005)])
# Get minibatches of sequences to train with and perform model training
minibatch_size = 200
training_progress_output_freq = 10
i = 0
if debug_output:
training_progress_output_freq = training_progress_output_freq/3
while True:
mb = mb_source.next_minibatch(minibatch_size)
if len(mb) == 0:
break
# Specify the mapping of input variables in the model to actual
# minibatch data to be trained with
arguments = {features: mb[features_si],
label: mb[labels_si]}
trainer.train_minibatch(arguments)
print_training_progress(trainer, i, training_progress_output_freq)
i += 1
import copy
evaluation_average = copy.copy(
trainer.previous_minibatch_evaluation_average)
loss_average = copy.copy(trainer.previous_minibatch_loss_average)
return evaluation_average, loss_average
if __name__ == '__main__':
# Specify the target device to be used for computing, if you do not want to
# use the best available one, e.g.
# set_default_device(cpu())
error, _ = train_sequence_classifier()
print("Error: %f" % error)
| 37.182692
| 133
| 0.721748
|
import numpy as np
import sys
import os
import time
from cntk import Trainer, Axis, text_format_minibatch_source, StreamConfiguration
from cntk.device import cpu, set_default_device
from cntk.learner import sgd
from cntk.ops import input_variable, cross_entropy_with_softmax, combine, classification_error
abs_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(abs_path, "..", ".."))
from examples.common.nn import LSTMP_component_with_self_stabilization, embedding, linear_layer, select_last, print_training_progress
def LSTM_sequence_classifer_net(input, num_output_classes, embedding_dim, LSTM_dim, cell_dim):
embedding_function = embedding(input, embedding_dim)
LSTM_function = LSTMP_component_with_self_stabilization(
embedding_function.output, LSTM_dim, cell_dim)[0]
thought_vector = select_last(LSTM_function)
return linear_layer(thought_vector, num_output_classes)
def train_sequence_classifier(debug_output=False):
input_dim = 2000
cell_dim = 25
hidden_dim = 25
embedding_dim = 50
num_output_classes = 5
features = input_variable(shape=input_dim, is_sparse=True)
label = input_variable(num_output_classes, dynamic_axes=[
Axis.default_batch_axis()])
classifier_output = LSTM_sequence_classifer_net(
features, num_output_classes, embedding_dim, hidden_dim, cell_dim)
ce = cross_entropy_with_softmax(classifier_output, label)
pe = classification_error(classifier_output, label)
rel_path = r"../../../../Tests/EndToEndTests/Text/SequenceClassification/Data/Train.ctf"
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), rel_path)
feature_stream_name = 'features'
labels_stream_name = 'labels'
mb_source = text_format_minibatch_source(path, [
StreamConfiguration(feature_stream_name, input_dim, True, 'x'),
StreamConfiguration(labels_stream_name, num_output_classes, False, 'y')], 0)
features_si = mb_source[features]
labels_si = mb_source[label]
trainer = Trainer(classifier_output, ce, pe,
[sgd(classifier_output.parameters, lr=0.0005)])
minibatch_size = 200
training_progress_output_freq = 10
i = 0
if debug_output:
training_progress_output_freq = training_progress_output_freq/3
while True:
mb = mb_source.next_minibatch(minibatch_size)
if len(mb) == 0:
break
arguments = {features: mb[features_si],
label: mb[labels_si]}
trainer.train_minibatch(arguments)
print_training_progress(trainer, i, training_progress_output_freq)
i += 1
import copy
evaluation_average = copy.copy(
trainer.previous_minibatch_evaluation_average)
loss_average = copy.copy(trainer.previous_minibatch_loss_average)
return evaluation_average, loss_average
if __name__ == '__main__':
error, _ = train_sequence_classifier()
print("Error: %f" % error)
| true
| true
|
f70cd9631c838d6b5a85dc7b7a8ae17002d338e1
| 151
|
py
|
Python
|
sindec/views/home.py
|
fcgomes92/com231-tf-sindec
|
e8654ac9875ba630464537957e16fbfcde7be28c
|
[
"MIT"
] | null | null | null |
sindec/views/home.py
|
fcgomes92/com231-tf-sindec
|
e8654ac9875ba630464537957e16fbfcde7be28c
|
[
"MIT"
] | 8
|
2021-02-08T20:17:31.000Z
|
2022-03-11T23:13:09.000Z
|
sindec/views/home.py
|
fcgomes92/com231-tf-sindec
|
e8654ac9875ba630464537957e16fbfcde7be28c
|
[
"MIT"
] | null | null | null |
from django.views.generic import TemplateView
class HomeRequestView(TemplateView):
http_method_names = ['get', ]
template_name = "home.html"
| 21.571429
| 45
| 0.748344
|
from django.views.generic import TemplateView
class HomeRequestView(TemplateView):
http_method_names = ['get', ]
template_name = "home.html"
| true
| true
|
f70cd9abb296e308c348eae7ed257ca62c1e6094
| 2,161
|
py
|
Python
|
auxiliar.py
|
gustavoeso/img_manipulation
|
a2f46d46f1111ddf77b3d58cb5322ffbf591ea53
|
[
"MIT"
] | 2
|
2022-03-08T20:55:38.000Z
|
2022-03-09T19:16:10.000Z
|
Scripts/Filtragem e contorno jupyter/auxiliar.py
|
pcliquet/robotic_resumo
|
3d1d8705820cae39d5be956836a94c7884ab490d
|
[
"MIT"
] | null | null | null |
Scripts/Filtragem e contorno jupyter/auxiliar.py
|
pcliquet/robotic_resumo
|
3d1d8705820cae39d5be956836a94c7884ab490d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Atenção: usado no notebook da aula.
Não precisa ser usado diretamente
"""
print("Este script não deve ser executado diretamente")
from ipywidgets import widgets, interact, interactive, FloatSlider, IntSlider
import numpy as np
import cv2
def make_widgets_mat(m, n):
"""
Makes a m rows x n columns
matriz of integer Jupyter Widgets
all values initialized to zero
"""
list_elements = []
for i in range(m):
row = []
for j in range(n):
row.append(widgets.IntText(value=0))
list_elements.append(row)
rows = []
for row in list_elements:
rows.append(widgets.HBox(row))
widgets_mat = widgets.VBox(rows)
return list_elements, widgets_mat
def make_widgets_mat_from_data(data):
"""
Creates a matriz of int Widgets given 2D-data
"""
n = len(data)
m = len(data[0])
elements, mat = makeMat(n, m)
for i in range(n):
for j in range(m):
elements[i][j].value = data[i][j]
return elements, mat
def make_np_from_widgets_list(widgets_list):
"""
Takes as input a list of lists of widgets and initializes a matrix
"""
widgets = widgets_list
n = len(widgets)
m = len(widgets[0])
array = np.zeros((n,m), dtype=np.float32)
for i in range(n):
for j in range(m):
array[i][j] = widgets[i][j].value
return array
def convert_to_tuple(html_color):
colors = html_color.split("#")[1]
r = int(colors[0:2],16)
g = int(colors[2:4],16)
b = int(colors[4:],16)
return (r,g,b)
def to_1px(tpl):
img = np.zeros((1,1,3), dtype=np.uint8)
img[0,0,0] = tpl[0]
img[0,0,1] = tpl[1]
img[0,0,2] = tpl[2]
return img
def to_hsv(html_color):
tupla = convert_to_tuple(html_color)
hsv = cv2.cvtColor(to_1px(tupla), cv2.COLOR_RGB2HSV)
return hsv[0][0]
def ranges(value):
hsv = to_hsv(value)
hsv2 = np.copy(hsv)
hsv[0] = max(0, hsv[0]-10)
hsv2[0] = min(180, hsv[0]+ 10)
hsv[1:] = 50
hsv2[1:] = 255
return hsv, hsv2
| 22.278351
| 77
| 0.589079
|
print("Este script não deve ser executado diretamente")
from ipywidgets import widgets, interact, interactive, FloatSlider, IntSlider
import numpy as np
import cv2
def make_widgets_mat(m, n):
list_elements = []
for i in range(m):
row = []
for j in range(n):
row.append(widgets.IntText(value=0))
list_elements.append(row)
rows = []
for row in list_elements:
rows.append(widgets.HBox(row))
widgets_mat = widgets.VBox(rows)
return list_elements, widgets_mat
def make_widgets_mat_from_data(data):
n = len(data)
m = len(data[0])
elements, mat = makeMat(n, m)
for i in range(n):
for j in range(m):
elements[i][j].value = data[i][j]
return elements, mat
def make_np_from_widgets_list(widgets_list):
widgets = widgets_list
n = len(widgets)
m = len(widgets[0])
array = np.zeros((n,m), dtype=np.float32)
for i in range(n):
for j in range(m):
array[i][j] = widgets[i][j].value
return array
def convert_to_tuple(html_color):
colors = html_color.split("#")[1]
r = int(colors[0:2],16)
g = int(colors[2:4],16)
b = int(colors[4:],16)
return (r,g,b)
def to_1px(tpl):
img = np.zeros((1,1,3), dtype=np.uint8)
img[0,0,0] = tpl[0]
img[0,0,1] = tpl[1]
img[0,0,2] = tpl[2]
return img
def to_hsv(html_color):
tupla = convert_to_tuple(html_color)
hsv = cv2.cvtColor(to_1px(tupla), cv2.COLOR_RGB2HSV)
return hsv[0][0]
def ranges(value):
hsv = to_hsv(value)
hsv2 = np.copy(hsv)
hsv[0] = max(0, hsv[0]-10)
hsv2[0] = min(180, hsv[0]+ 10)
hsv[1:] = 50
hsv2[1:] = 255
return hsv, hsv2
| true
| true
|
f70cdac9dceaedf6c2a150c0ba9bac822f448f8f
| 16,112
|
py
|
Python
|
layouts_oilProd.py
|
jmcdowell26/TXRRC_data_harvest
|
10dc94ef7b7ba6a8675b9b0430b660b9a0f5f6d5
|
[
"Unlicense"
] | 26
|
2020-07-21T04:20:00.000Z
|
2022-03-25T01:12:19.000Z
|
layouts_oilProd.py
|
jmcdowell26/TXRRC_data_harvest
|
10dc94ef7b7ba6a8675b9b0430b660b9a0f5f6d5
|
[
"Unlicense"
] | 19
|
2020-07-17T17:23:08.000Z
|
2021-03-03T14:19:01.000Z
|
layouts_oilProd.py
|
jmcdowell26/TXRRC_data_harvest
|
10dc94ef7b7ba6a8675b9b0430b660b9a0f5f6d5
|
[
"Unlicense"
] | 13
|
2020-07-18T13:20:48.000Z
|
2022-01-13T23:23:05.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 11:12:44 2020
Files for this layout:
ftp://ftpe.rrc.texas.gov/sholed
ftp://ftpe.rrc.texas.gov/sholed/olf001l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf003l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf004l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf005l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf007l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf008l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf009l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf010l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf011l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf013l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/olf014l.ebc.gz
ftp://ftpe.rrc.texas.gov/sholed/ReadMe.txt
Layout Manual:
https://www.rrc.texas.gov/media/1273/ola013k.pdf
"""
OIL_FIELD_01 = [
('TYPE-REC',0,1,'pic_numeric'), ##PIC 9
('DIST',1,3,'pic_any'), ##PIC XXX
('FIELD',4,8,'pic_numeric'), ##PIC 9(8)
('OPR',12,6,'pic_numeric'), ##PIC 9(6)
('LEASE',18,5,'pic_numeric'), ##PIC 9(5)
('LEASE FILLER',23,2,'pic_numeric'), ##PIC 99
('OFFSHORE',25,1,'pic_numeric'), ##PIC 9
('F-NAME',26,32,'pic_any'), ##PIC X(32)
('COUNTY',58,18,'pic_numeric'), ##PIC 9(18)
('DISC-DATE',76,8,'pic_yyyymmdd'), ##PIC 9(8)
('F-DEPTH',84,5,'pic_numeric'), ##PIC 9(5)
('O-GRAV',89,3,'pic_numeric'), ##PIC 999
('F-TYPE',92,1,'pic_numeric'), ##PIC 9
('MULT-RES',93,1,'pic_numeric'), ##PIC 9
('F-LPB',94,1,'pic_numeric'), ##PIC 9
('F-XMT',95,1,'pic_numeric'), ##PIC 9
('PRT-AS-IS',96,1,'pic_numeric'), ##PIC 9
('YARD',97,1,'pic_numeric'), ##PIC 9
('T-CODES',98,12,'pic_numeric'), ##PIC 9(12)
('ALLOCATION',110,12,'pic_numeric'), ##PIC 9(12)
('RES-AMT',122,6,'pic_numeric'), ##PIC 9(6)
('F-GOR',128,6,'pic_numeric'), ##PIC 9(6)
('F-TOP',134,5,'pic_numeric'), ##PIC 9(5)
('F-NET',139,6,'pic_numeric'), ##PIC 9(6)
('UNET',145,3,'pic_numeric'), ##PIC 999
('TOL',148,4,'pic_numeric'), ##PIC 9999
('SPAC',152,8,'pic_numeric'), ##PIC 9(8)
('DIAG',160,4,'pic_numeric'), ##PIC 9999
('CUM-PROD',164,7,'pic_comp'), ##PIC S9(13) COMP-3
('CASING',171,21,'pic_any'), ##PIC X(21)
('COL-HEAD',192,1,'pic_any'), ##PIC X
('ALO-CODE',193,1,'pic_any'), ##PIC X
('F-RMK1',194,66,'pic_any'), ##PIC X(66)
('F-RMK2',260,66,'pic_any'), ##PIC X(66)
('PERM-NO',326,5,'pic_any'), ##PIC X(5)
('SP-FHC',331,1,'pic_numeric'), ##PIC 9
('AN-A',332,90,'pic_any'), ##PIC X(90)
('AN-B',422,35,'pic_any'), ##PIC X(35)
('F-OOIP',457,8,'pic_numeric'), ##PIC 9(08) ##('FILLER',465,7,'pic_numeric'), ##PIC 9(07) ##('FILLER',472,15,'pic_numeric'), ##PIC 9(15) ##('FILLER',487,13,'pic_numeric'), ##PIC 9(13)
('FM-DATE',500,6,'pic_yyyymm'), ##PIC 9(6)
('FM-PW',506,2,'pic_comp'), ##PIC S9(3) COMP-3
('FM-AC',508,4_4,'pic_comp'), ##PIC S999V9(4) COMP-3 ##('FILLER',512,4,'pic_numeric'), ##PIC 9(4)
('FM-OTHC',516,1,'pic_numeric'), ##PIC 9
('FM-CHG',517,1,'pic_numeric'), ##PIC 9
('FM-PROD-FACT',518,3_3,'pic_comp'), ##PIC S99V999 COMP-3
('FM-SPLIT-PROD-FACT',521,3_3,'pic_comp'), ##PIC S99V999 COMP-3
('FM-JOHN',524,1,'pic_numeric'), ##PIC 9
('FM-OTH',525,8_7,'pic_comp'), ##PIC S9(8)V9(7) COMP-3 ##('FILLER',533,15,'pic_any'), ##PIC X(15)
]
OIL_LEASE_03 = [
('LEASE-REC-TYPE-REC',0,1,'pic_numeric'), ##PIC 9
('LEASE-REC-DIST',1,3,'pic_any'), ##PIC XXX
('LEASE-REC-FIELD',4,8,'pic_numeric'), ##PIC 9(8)
('LEASE-REC-OPR',12,6,'pic_numeric'), ##PIC 9(6)
('LEASE-REC-LEASE',18,5,'pic_numeric'), ##PIC 9(5)
('LEASE-REC-FILLER',23,2,'pic_any'), ##PIC XX
('LEASE-REC-OFFSHORE',25,1,'pic_numeric'), ##PIC 9
('L-NAME',26,32,'pic_any'), ##PIC X(32)
('LSE-CO',58,6,'pic_numeric'), ##PIC 9(6)
('POGATH',64,5,'pic_any'), ##PIC X(5)
('PGGATH',69,5,'pic_any'), ##PIC X(5)
('OSPLIT',74,1,'pic_numeric'), ##PIC 9
('GSPLIT',75,1,'pic_numeric'), ##PIC 9
('OOGATH',76,5,'pic_any'), ##PIC X(5)
('OGGATH',81,5,'pic_any'), ##PIC X(5)
('OOPR',86,6,'pic_numeric'), ##PIC 9(6)
('BO-STATUS',92,4,'pic_comp'), ##PIC S9(7) COMP-3
('BG-STATUS',96,4,'pic_comp'), ##PIC S9(7) COMP-3
('MOVE-BAL',100,4,'pic_comp'), ##PIC S9(7) COMP-3
('PO-STATUS',104,4,'pic_comp'), ##PIC S9(7) COMP-3
('PG-STATUS',108,4,'pic_comp'), ##PIC S9(7) COMP-3
('SEC-REC',112,1,'pic_numeric'), ##PIC 9
('CERT',113,2,'pic_numeric'), ##PIC 99
('BATCH',115,1,'pic_any'), ##PIC X
('L-LPB',116,1,'pic_numeric'), ##PIC 9
('COMMINGLE-CD',117,1,'pic_numeric'), ##PIC 9
('COMMINGLE',118,4,'pic_numeric'), ##PIC 9999
('L-INFO',122,54,'pic_any'), ##PIC X(54)
('AD-BO-STATUS',176,4,'pic_comp'), ##PIC S9(7) COMP-3
('AD-BG-STATUS',180,4,'pic_comp'), ##PIC S9(7) COMP-3
('COMMINGLE-DATE',184,6,'pic_yyyymm'), ##PIC 9(6)
('L-RMCD',190,1,'pic_numeric'), ##PIC 9
('L-RMDT',191,6,'pic_yyyymm'), ##PIC 9(6)
('SEV-CD-13',197,1,'pic_numeric'), ##PIC 9
('SEV-CD-14',198,1,'pic_numeric'), ##PIC 9
('L-CAS-SI-LTR-DTE',199,6,'pic_yyyymm'), ##PIC 9(6)
('L-RED-RTE-DTE',205,6,'pic_yyyymm'), ##PIC 9(6)
('L-EXC-TST',211,1,'pic_numeric'), ##PIC 9
('L-RLTYCD',212,1,'pic_numeric'), ##PIC 9
('L-ONE-WELL-LEASE',213,1,'pic_any'), ##PIC X
('L-PANHANDLE-GOR-EXC',214,1,'pic_any'), ##PIC X(01)
('L-PANHANDLE-GOR-AMT',215,5_1,'pic_comp'), ##PIC 9(08)V9 COMP-3 ##('FILLER',220,4,'pic_numeric'), ##PIC 9(04)
('L-MONTH-DATE',224,6,'pic_yyyymm'), ##PIC 9(6)
('LM-SEV',230,1,'pic_numeric'), ##PIC 9
('LM-RETRO',231,1,'pic_numeric'), ##PIC 9
('LM-REC',232,1,'pic_numeric'), ##PIC 9
('LM-CHG',233,1,'pic_numeric'), ##PIC 9
('LM-ALLOW',234,4,'pic_comp'), ##PIC S9(7) COMP-3
('LM-PROD',238,4,'pic_comp'), ##PIC S9(7) COMP-3
('LM-FW',242,3,'pic_numeric'), ##PIC 999
('LM-OW',245,3,'pic_numeric'), ##PIC 999
('LM-PL',248,4,'pic_comp'), ##PIC S9(7) COMP-3
('LM-PLC',252,1,'pic_numeric'), ##PIC 9
('LM-OTH',253,4,'pic_comp'), ##PIC S9(7) COMP-3
('LM-OTHC',257,1,'pic_numeric'), ##PIC 9
('LM-STO',258,4,'pic_comp'), ##PIC S9(7) COMP-3
('LM-GL',262,5,'pic_comp'), ##PIC S9(7) COMP-3
('LM-GPROD',267,5,'pic_comp'), ##PIC S9(7) COMP-3
('LM-GLIFT',272,4,'pic_comp'), ##PIC S9(7) COMP-3
('LM-CSIL',276,1,'pic_numeric'), ##PIC 9
('LM-JOHN',277,1,'pic_numeric'), ##PIC 9
('LM-LTR-CODE',278,1,'pic_numeric'), ##PIC 9 ##('FILLER',279,13,'pic_numeric'), ##PIC 9(13) ##('FILLER',292,904,'pic_numeric'), ##PIC 9(13) ##('FILLER',1196,4,'pic_numeric') ##PIC 9(04)
]
OIL_MULTI_WELL_04 = [
('MULTI-W-REC-TYPE-REC',0,1,'pic_numeric'), ##PIC 9
('MULTI-W-REC-DIST',1,3,'pic_any'), ##PIC XXX
('MUTLI-W-REC-FIELD',4,8,'pic_numeric'), ##PIC 9(8)
('MULTI-W-REC-OPR',12,6,'pic_numeric'), ##PIC 9(6)
('MULTI-W-REC-LEASE',18,5,'pic_numeric'), ##PIC 9(5)
('MULTI-W-REC-FILLER',23,2,'pic_numeric'), ##PIC 99
('MULTI-W-REC-OFFSHORE',25,1,'pic_numeric'), ##PIC 9
('M-RECORD',26,6,'pic_any'), ##PIC X(6)
('TYPEW',32,1,'pic_any'), ##PIC X
('RESER',33,5,'pic_any'), ##PIC X(5)
('M-COUNTY',38,6,'pic_numeric'), ##PIC 9(6)
('M-TST-EFF',44,1,'pic_any'), ##PIC X
('M-PNTR-1ST',45,6,'pic_numeric'), ##PIC 9(6)
('CAP',51,1,'pic_numeric'), ##PIC 9
('PROD-WELL',52,6,'pic_numeric'), ##PIC 9(6)
('MARG-WELL',58,6,'pic_numeric'), ##PIC 9(6)
('M-DEPTH',64,1,'pic_numeric'), ##PIC 9
('M-PNTR-LST',65,6,'pic_numeric'), ##PIC 9(6)
('M-EXC-TEST',71,1,'pic_numeric'), ##PIC 9 ##('FILLER',72,6,'pic_numeric'), ##PIC 9(6)
('M-WATER',78,6,'pic_numeric'), ##PIC 9(6)
('M-REMARK',84,55,'pic_any'), ##PIC X(55)
('MM-PRCNT',139,3,'pic_comp'), ##PIC V999 ##('FILLER',142,11,'pic_numeric'), ##PIC 9(11) ##('FILLER',153,11,'pic_numeric'), ##PIC 9(11)
('M-MONTH-DATE',164,6,'pic_yyyymm'), ##PIC 9(6)
('MM-CHG',170,1,'pic_numeric'), ##PIC 9
('MM-NO',171,1,'pic_numeric'), ##PIC 9
('MM-ALLOW',172,4,'pic_comp'), ##PIC S9(7) COMP-3
('MM-ACODE',176,1,'pic_numeric'), ##PIC 9
('MM-TCODE',177,1,'pic_numeric'), ##PIC 9
('MM-LIMIT',178,5,'pic_comp'), ##PIC S9(9) COMP-3
('MM-ALLOW2',183,4,'pic_comp'), ##PIC S9(7) COMP-3
('MM-ACODE2',187,1,'pic_numeric'), ##PIC 9
('MM-TCODE2',188,1,'pic_numeric'), ##PIC 9
('MM-LIMIT2',189,5,'pic_comp'), ##PIC S9(9) COMP-3
('MM-DATE2',194,2,'pic_numeric'), ##PIC 99
('MM-ALLOW3',196,4,'pic_comp'), ##PIC S9(7) COMP-3
('MM-ACODE3',200,1,'pic_numeric'), ##PIC 9
('MM-TCODE3',201,1,'pic_numeric'), ##PIC 9
('MM-LIMIT3',202,5,'pic_comp'), ##PIC S9(9) COMP-3
('MM-DATE3',207,2,'pic_numeric'), ##PIC 99
('MM-FORM-LCK',209,1,'pic_numeric'), ##PIC 9
('MM-SPACE1',210,4,'pic_comp'), ##PIC S9(7) COMP-3
('MM-KODE2',214,1,'pic_numeric'), ##PIC 9
('MM-SPACE2',215,4,'pic_comp'), ##PIC S9(7) COMP-3
('MM-JOHN',219,1,'pic_numeric'), ##PIC 9 ##('FILLER',220,9,'pic_numeric'), ##PIC 9(09) ##('FILLER',229,9,'pic_numeric'), ##PIC 9(09)
]
OIL_WELL_05 = [
('WELL-REC-TYPE-REC',0,1,'pic_numeric'), ##PIC 9
('WELL-REC-DIST',1,3,'pic_any'), ##PIC XXX
('WELL-REC-FIELD',4,8,'pic_numeric'), ##PIC 9(8)
('WELL-REC-OPR',12,6,'pic_numeric'), ##PIC 9(6)
('WELL-REC-LEASE',18,5,'pic_numeric'), ##PIC 9(5)
('WELL-REC-FILLER',23,2,'pic_numeric'), ##PIC 99
('WELL-REC-OFFSHORE',25,1,'pic_numeric'), ##PIC 9
('WELL-NO',26,6,'pic_any'), ##PIC X(6)
('W-TYPE-WELL',32,1,'pic_any'), ##PIC X(1)
('W-UNIT-NO',33,1,'pic_any'), ##PIC X
('W-UNIT-VALUE',34,4,'pic_numeric'), ##PIC 9V999
('W-KEY',38,1,'pic_numeric'), ##PIC 9
('W-COUNTY',39,3,'pic_numeric'), ##PIC 999
('PUMP',42,1,'pic_numeric'), ##PIC 9
('W-SP',43,5,'pic_numeric'), ##PIC 9(5)
('W-NET',48,6,'pic_numeric'), ##PIC 9(6)
('W-DEPTH',54,5,'pic_numeric'), ##PIC 9(5)
('SAND',59,3,'pic_numeric'), ##PIC 9(3)
('FROZEN',62,5,'pic_numeric'), ##PIC 9(5)
('PERF',67,5,'pic_numeric'), ##PIC 9(5)
('W-DATE',72,8,'pic_yyyymmdd'), ##PIC 9(8)
('EX-14B-CD',80,1,'pic_any'), ##PIC X
('W-SUB-WELL',81,1,'pic_numeric'), ##PIC 9
('W-NO-PROD-CD',82,1,'pic_numeric'), ##PIC 9
('W-DELQ-FORM',83,1,'pic_numeric'), ##PIC 9
('W-TST-EFF',84,1,'pic_any'), ##PIC X
('W-EXC-TST',85,1,'pic_numeric'), ##PIC 9
('W-WATER',86,4,'pic_numeric'), ##PIC 9(4)
('EX-14B-DATE',90,6,'pic_yyyymm'), ##PIC 9(6)
('W-RMKS',96,15,'pic_any'), ##PIC X(15)
('BONUS-AMT',111,4,'pic_numeric'), ##PIC 9(4)
('FROZTSF',115,3,'pic_numeric'), ##PIC 999
('W-WLSD',118,1,'pic_numeric'), ##PIC 9
('W-TST-DT',119,8,'pic_yyyymmdd'), ##PIC 9(8)
('W-DTE-LST-UTL',127,6,'pic_yyyymm'), ##PIC 9(6)
('W-NEW-WB-EXC',133,1,'pic_any'), ##PIC X(01)
('W-NEW-WB-CONNECT-DATE',134,8,'pic_yyyymmdd'), ##PIC 9(8)
('W-14B2-TYPE-COVERAGE',142,1,'pic_any'), ##PIC X(01)
('W-14B2-APP-NO',143,6,'pic_numeric'), ##PIC 9(06) ##('FILLER',149,4,'pic_numeric'), ##PIC 9(04) ##('FILLER',153,18,'pic_numeric'), ##PIC 9(18) ##('FILLER',171,7,'pic_numeric'), ##PIC 9(07)
('W-MONTH-DATE',178,6,'pic_yyyymm'), ##PIC 9(6)
('WM-CHG',184,1,'pic_numeric'), ##PIC 9
('WM-NO',185,1,'pic_numeric'), ##PIC 9
('WM-ALLOW',186,3,'pic_comp'), ##PIC S9(5) COMP-3
('WM-ACODE',189,1,'pic_any'), ##PIC X
('WM-TCODE',190,1,'pic_any'), ##PIC X
('WM-LIMIT',191,4,'pic_comp'), ##PIC S9(7) COMP-3
('WM-ALLOW2',195,3,'pic_comp'), ##PIC S9(5) COMP-3
('WM-ACODE2',198,1,'pic_any'), ##PIC X
('WM-TCODE2',199,1,'pic_any'), ##PIC X
('WM-LIMIT2',200,4,'pic_comp'), ##PIC S9(7) COMP-3
('WM-DATE2',204,2,'pic_numeric'), ##PIC 99
('WM-ALLOW3',206,3,'pic_comp'), ##PIC S9(5) COMP-3
('WM-ACODE3',209,1,'pic_any'), ##PIC X
('WM-TCODE3',210,1,'pic_any'), ##PIC X
('WM-LIMIT3',211,4,'pic_comp'), ##PIC S9(7) COMP-3
('WM-DATE3',215,2,'pic_numeric'), ##PIC 99
('WM-FORM-LICK',217,1,'pic_numeric'), ##PIC 9
('WM-PGT',218,2,'pic_comp'), ##PIC S999 COMP-3
('WM-TSWA',220,1,'pic_numeric'), ##PIC 9
('WM-EGT',221,2,'pic_comp'), ##PIC S999 COMP-3
('WM-ESWA',223,1,'pic_numeric'), ##PIC 9
('WM-ACRE',224,3_2,'pic_comp'), ##PIC S999V99 COMP-3
('WM-POTE',227,3_2,'pic_comp'), ##PIC S9999V9 COMP-3
('WM-ACFT',230,3,'pic_comp'), ##PIC S9(5) COMP-3
('WM-GOR',233,3,'pic_comp'), ##PIC S9(5) COMP-3
('WM-OTRAN-CD',236,1,'pic_numeric'), ##PIC 9
('WM-POT',237,2,'pic_comp'), ##PIC S999 COMP-3
('WM-EOT',239,2,'pic_comp'), ##PIC S999 COMP-3
('WM-JOHN',241,1,'pic_numeric'), ##PIC 9
('WM-OOIP',242,6,'pic_numeric'), ##PIC 9(06) ##('FILLER',248,3,'pic_numeric'), ##PIC 9(03)
]
def oilProd_layout(startval):
layouts_map = {
'1' : {'name': 'OIL_FIELD', 'layout': OIL_FIELD_01},
'3' : {'name': 'OIL_LEASE', 'layout': OIL_LEASE_03},
'4' : {'name': 'OIL_MULTI_WELL', 'layout': OIL_MULTI_WELL_04},
'5' : {'name': 'OIL_WELL', 'layout': OIL_WELL_05},
}
try:
returnval = layouts_map[startval]
except:
returnval = None
return returnval
| 59.235294
| 209
| 0.455561
|
OIL_FIELD_01 = [
('TYPE-REC',0,1,'pic_numeric'), ('DIST',1,3,'pic_any'), ('FIELD',4,8,'pic_numeric'), ('OPR',12,6,'pic_numeric'), ('LEASE',18,5,'pic_numeric'), ('LEASE FILLER',23,2,'pic_numeric'), ('OFFSHORE',25,1,'pic_numeric'), ('F-NAME',26,32,'pic_any'), ('COUNTY',58,18,'pic_numeric'), ('DISC-DATE',76,8,'pic_yyyymmdd'), ('F-DEPTH',84,5,'pic_numeric'), ('O-GRAV',89,3,'pic_numeric'), ('F-TYPE',92,1,'pic_numeric'), ('MULT-RES',93,1,'pic_numeric'), ('F-LPB',94,1,'pic_numeric'), ('F-XMT',95,1,'pic_numeric'), ('PRT-AS-IS',96,1,'pic_numeric'), ('YARD',97,1,'pic_numeric'), ('T-CODES',98,12,'pic_numeric'), ('ALLOCATION',110,12,'pic_numeric'), ('RES-AMT',122,6,'pic_numeric'), ('F-GOR',128,6,'pic_numeric'), ('F-TOP',134,5,'pic_numeric'), ('F-NET',139,6,'pic_numeric'), ('UNET',145,3,'pic_numeric'), ('TOL',148,4,'pic_numeric'), ('SPAC',152,8,'pic_numeric'), ('DIAG',160,4,'pic_numeric'), ('CUM-PROD',164,7,'pic_comp'), ('CASING',171,21,'pic_any'), ('COL-HEAD',192,1,'pic_any'), ('ALO-CODE',193,1,'pic_any'), ('F-RMK1',194,66,'pic_any'), ('F-RMK2',260,66,'pic_any'), ('PERM-NO',326,5,'pic_any'), ('SP-FHC',331,1,'pic_numeric'), ('AN-A',332,90,'pic_any'), ('AN-B',422,35,'pic_any'), ('F-OOIP',457,8,'pic_numeric'), ('FM-DATE',500,6,'pic_yyyymm'), ('FM-PW',506,2,'pic_comp'), ('FM-AC',508,4_4,'pic_comp'), ('FM-OTHC',516,1,'pic_numeric'), ('FM-CHG',517,1,'pic_numeric'), ('FM-PROD-FACT',518,3_3,'pic_comp'), ('FM-SPLIT-PROD-FACT',521,3_3,'pic_comp'), ('FM-JOHN',524,1,'pic_numeric'), ('FM-OTH',525,8_7,'pic_comp'), ]
OIL_LEASE_03 = [
('LEASE-REC-TYPE-REC',0,1,'pic_numeric'), ('LEASE-REC-DIST',1,3,'pic_any'), ('LEASE-REC-FIELD',4,8,'pic_numeric'), ('LEASE-REC-OPR',12,6,'pic_numeric'), ('LEASE-REC-LEASE',18,5,'pic_numeric'), ('LEASE-REC-FILLER',23,2,'pic_any'), ('LEASE-REC-OFFSHORE',25,1,'pic_numeric'), ('L-NAME',26,32,'pic_any'), ('LSE-CO',58,6,'pic_numeric'), ('POGATH',64,5,'pic_any'), ('PGGATH',69,5,'pic_any'), ('OSPLIT',74,1,'pic_numeric'), ('GSPLIT',75,1,'pic_numeric'), ('OOGATH',76,5,'pic_any'), ('OGGATH',81,5,'pic_any'), ('OOPR',86,6,'pic_numeric'), ('BO-STATUS',92,4,'pic_comp'), ('BG-STATUS',96,4,'pic_comp'), ('MOVE-BAL',100,4,'pic_comp'), ('PO-STATUS',104,4,'pic_comp'), ('PG-STATUS',108,4,'pic_comp'), ('SEC-REC',112,1,'pic_numeric'), ('CERT',113,2,'pic_numeric'), ('BATCH',115,1,'pic_any'), ('L-LPB',116,1,'pic_numeric'), ('COMMINGLE-CD',117,1,'pic_numeric'), ('COMMINGLE',118,4,'pic_numeric'), ('L-INFO',122,54,'pic_any'), ('AD-BO-STATUS',176,4,'pic_comp'), ('AD-BG-STATUS',180,4,'pic_comp'), ('COMMINGLE-DATE',184,6,'pic_yyyymm'), ('L-RMCD',190,1,'pic_numeric'), ('L-RMDT',191,6,'pic_yyyymm'), ('SEV-CD-13',197,1,'pic_numeric'), ('SEV-CD-14',198,1,'pic_numeric'), ('L-CAS-SI-LTR-DTE',199,6,'pic_yyyymm'), ('L-RED-RTE-DTE',205,6,'pic_yyyymm'), ('L-EXC-TST',211,1,'pic_numeric'), ('L-RLTYCD',212,1,'pic_numeric'), ('L-ONE-WELL-LEASE',213,1,'pic_any'), ('L-PANHANDLE-GOR-EXC',214,1,'pic_any'), ('L-PANHANDLE-GOR-AMT',215,5_1,'pic_comp'), ('L-MONTH-DATE',224,6,'pic_yyyymm'), ('LM-SEV',230,1,'pic_numeric'), ('LM-RETRO',231,1,'pic_numeric'), ('LM-REC',232,1,'pic_numeric'), ('LM-CHG',233,1,'pic_numeric'), ('LM-ALLOW',234,4,'pic_comp'), ('LM-PROD',238,4,'pic_comp'), ('LM-FW',242,3,'pic_numeric'), ('LM-OW',245,3,'pic_numeric'), ('LM-PL',248,4,'pic_comp'), ('LM-PLC',252,1,'pic_numeric'), ('LM-OTH',253,4,'pic_comp'), ('LM-OTHC',257,1,'pic_numeric'), ('LM-STO',258,4,'pic_comp'), ('LM-GL',262,5,'pic_comp'), ('LM-GPROD',267,5,'pic_comp'), ('LM-GLIFT',272,4,'pic_comp'), ('LM-CSIL',276,1,'pic_numeric'), ('LM-JOHN',277,1,'pic_numeric'), ('LM-LTR-CODE',278,1,'pic_numeric'), ]
OIL_MULTI_WELL_04 = [
('MULTI-W-REC-TYPE-REC',0,1,'pic_numeric'), ('MULTI-W-REC-DIST',1,3,'pic_any'), ('MUTLI-W-REC-FIELD',4,8,'pic_numeric'), ('MULTI-W-REC-OPR',12,6,'pic_numeric'), ('MULTI-W-REC-LEASE',18,5,'pic_numeric'), ('MULTI-W-REC-FILLER',23,2,'pic_numeric'), ('MULTI-W-REC-OFFSHORE',25,1,'pic_numeric'), ('M-RECORD',26,6,'pic_any'), ('TYPEW',32,1,'pic_any'), ('RESER',33,5,'pic_any'), ('M-COUNTY',38,6,'pic_numeric'), ('M-TST-EFF',44,1,'pic_any'), ('M-PNTR-1ST',45,6,'pic_numeric'), ('CAP',51,1,'pic_numeric'), ('PROD-WELL',52,6,'pic_numeric'), ('MARG-WELL',58,6,'pic_numeric'), ('M-DEPTH',64,1,'pic_numeric'), ('M-PNTR-LST',65,6,'pic_numeric'), ('M-EXC-TEST',71,1,'pic_numeric'), ('M-WATER',78,6,'pic_numeric'), ('M-REMARK',84,55,'pic_any'), ('MM-PRCNT',139,3,'pic_comp'), ('M-MONTH-DATE',164,6,'pic_yyyymm'), ('MM-CHG',170,1,'pic_numeric'), ('MM-NO',171,1,'pic_numeric'), ('MM-ALLOW',172,4,'pic_comp'), ('MM-ACODE',176,1,'pic_numeric'), ('MM-TCODE',177,1,'pic_numeric'), ('MM-LIMIT',178,5,'pic_comp'), ('MM-ALLOW2',183,4,'pic_comp'), ('MM-ACODE2',187,1,'pic_numeric'), ('MM-TCODE2',188,1,'pic_numeric'), ('MM-LIMIT2',189,5,'pic_comp'), ('MM-DATE2',194,2,'pic_numeric'), ('MM-ALLOW3',196,4,'pic_comp'), ('MM-ACODE3',200,1,'pic_numeric'), ('MM-TCODE3',201,1,'pic_numeric'), ('MM-LIMIT3',202,5,'pic_comp'), ('MM-DATE3',207,2,'pic_numeric'), ('MM-FORM-LCK',209,1,'pic_numeric'), ('MM-SPACE1',210,4,'pic_comp'), ('MM-KODE2',214,1,'pic_numeric'), ('MM-SPACE2',215,4,'pic_comp'), ('MM-JOHN',219,1,'pic_numeric'), ]
OIL_WELL_05 = [
('WELL-REC-TYPE-REC',0,1,'pic_numeric'), ('WELL-REC-DIST',1,3,'pic_any'), ('WELL-REC-FIELD',4,8,'pic_numeric'), ('WELL-REC-OPR',12,6,'pic_numeric'), ('WELL-REC-LEASE',18,5,'pic_numeric'), ('WELL-REC-FILLER',23,2,'pic_numeric'), ('WELL-REC-OFFSHORE',25,1,'pic_numeric'), ('WELL-NO',26,6,'pic_any'), ('W-TYPE-WELL',32,1,'pic_any'), ('W-UNIT-NO',33,1,'pic_any'), ('W-UNIT-VALUE',34,4,'pic_numeric'), ('W-KEY',38,1,'pic_numeric'), ('W-COUNTY',39,3,'pic_numeric'), ('PUMP',42,1,'pic_numeric'), ('W-SP',43,5,'pic_numeric'), ('W-NET',48,6,'pic_numeric'), ('W-DEPTH',54,5,'pic_numeric'), ('SAND',59,3,'pic_numeric'), ('FROZEN',62,5,'pic_numeric'), ('PERF',67,5,'pic_numeric'), ('W-DATE',72,8,'pic_yyyymmdd'), ('EX-14B-CD',80,1,'pic_any'), ('W-SUB-WELL',81,1,'pic_numeric'), ('W-NO-PROD-CD',82,1,'pic_numeric'), ('W-DELQ-FORM',83,1,'pic_numeric'), ('W-TST-EFF',84,1,'pic_any'), ('W-EXC-TST',85,1,'pic_numeric'), ('W-WATER',86,4,'pic_numeric'), ('EX-14B-DATE',90,6,'pic_yyyymm'), ('W-RMKS',96,15,'pic_any'), ('BONUS-AMT',111,4,'pic_numeric'), ('FROZTSF',115,3,'pic_numeric'), ('W-WLSD',118,1,'pic_numeric'), ('W-TST-DT',119,8,'pic_yyyymmdd'), ('W-DTE-LST-UTL',127,6,'pic_yyyymm'), ('W-NEW-WB-EXC',133,1,'pic_any'), ('W-NEW-WB-CONNECT-DATE',134,8,'pic_yyyymmdd'), ('W-14B2-TYPE-COVERAGE',142,1,'pic_any'), ('W-14B2-APP-NO',143,6,'pic_numeric'), ('W-MONTH-DATE',178,6,'pic_yyyymm'), ('WM-CHG',184,1,'pic_numeric'), ('WM-NO',185,1,'pic_numeric'), ('WM-ALLOW',186,3,'pic_comp'), ('WM-ACODE',189,1,'pic_any'), ('WM-TCODE',190,1,'pic_any'), ('WM-LIMIT',191,4,'pic_comp'), ('WM-ALLOW2',195,3,'pic_comp'), ('WM-ACODE2',198,1,'pic_any'), ('WM-TCODE2',199,1,'pic_any'), ('WM-LIMIT2',200,4,'pic_comp'), ('WM-DATE2',204,2,'pic_numeric'), ('WM-ALLOW3',206,3,'pic_comp'), ('WM-ACODE3',209,1,'pic_any'), ('WM-TCODE3',210,1,'pic_any'), ('WM-LIMIT3',211,4,'pic_comp'), ('WM-DATE3',215,2,'pic_numeric'), ('WM-FORM-LICK',217,1,'pic_numeric'), ('WM-PGT',218,2,'pic_comp'), ('WM-TSWA',220,1,'pic_numeric'), ('WM-EGT',221,2,'pic_comp'), ('WM-ESWA',223,1,'pic_numeric'), ('WM-ACRE',224,3_2,'pic_comp'), ('WM-POTE',227,3_2,'pic_comp'), ('WM-ACFT',230,3,'pic_comp'), ('WM-GOR',233,3,'pic_comp'), ('WM-OTRAN-CD',236,1,'pic_numeric'), ('WM-POT',237,2,'pic_comp'), ('WM-EOT',239,2,'pic_comp'), ('WM-JOHN',241,1,'pic_numeric'), ('WM-OOIP',242,6,'pic_numeric'), ]
def oilProd_layout(startval):
layouts_map = {
'1' : {'name': 'OIL_FIELD', 'layout': OIL_FIELD_01},
'3' : {'name': 'OIL_LEASE', 'layout': OIL_LEASE_03},
'4' : {'name': 'OIL_MULTI_WELL', 'layout': OIL_MULTI_WELL_04},
'5' : {'name': 'OIL_WELL', 'layout': OIL_WELL_05},
}
try:
returnval = layouts_map[startval]
except:
returnval = None
return returnval
| true
| true
|
f70cdafe08c975c4847f2ad36db208c8169a9f3e
| 1,841
|
py
|
Python
|
claib_cam.py
|
m4xst3r/Udacity-AdvancedLaneLines
|
c95a1831a418726ad374a1ebd65d4ec5e9900ab9
|
[
"MIT"
] | null | null | null |
claib_cam.py
|
m4xst3r/Udacity-AdvancedLaneLines
|
c95a1831a418726ad374a1ebd65d4ec5e9900ab9
|
[
"MIT"
] | null | null | null |
claib_cam.py
|
m4xst3r/Udacity-AdvancedLaneLines
|
c95a1831a418726ad374a1ebd65d4ec5e9900ab9
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import matplotlib.pyplot as plt
import glob
import pickle
# read in all the images in the calibration folder
calib_images = glob.glob(".\camera_cal\*.jpg")
#define chess board parameters:
nx = 9
ny = 6
# Arrays to store image point and opbject points
imgpoints = []
objpoints = []
def get_points_chessboard(img, nx, ny):
"""
returns the obj and img points from one chessboard image
"""
#Genreate obj points based on the chessboar from (0,0) to (nx-1, ny-1)
objp = np.zeros((nx*ny,3), np.float32)
#np.mgrid cretes two arrays with 9x5 which are than merged together using T (transpose) and reshape. Only the first 2 columns of objp are replaced
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2)
#convert Image to gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#get chess board corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
return ret, objp, corners
def calc_cam_values(img, objpoints, imgpoints):
"""
Calculates camera matrix etc. using the fucntio cv2.calibrateCamera
"""
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[:2], None, None)
return ret, mtx, dist, rvecs, tvecs
#Iterate thorugh images and extract there image points
for image_path in calib_images:
image = cv2.imread(image_path)
ret, objp, imgp = get_points_chessboard(image, nx, ny)
if ret == True:
imgpoints.append(imgp)
objpoints.append(objp)
else:
print("image is not usable: ", image_path)
ret, mtx, dist, rvecs, tvecs = calc_cam_values(image, objpoints, imgpoints)
#write cam values into a dict
cam_values = { "mtx": mtx, "dist": dist,"rvecs": rvecs,"tvecs": tvecs}
#Save cam values in a pickle
pickle.dump(cam_values, open("cam_values.p", "wb"))
| 29.222222
| 150
| 0.693645
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
import glob
import pickle
calib_images = glob.glob(".\camera_cal\*.jpg")
nx = 9
ny = 6
imgpoints = []
objpoints = []
def get_points_chessboard(img, nx, ny):
objp = np.zeros((nx*ny,3), np.float32)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
return ret, objp, corners
def calc_cam_values(img, objpoints, imgpoints):
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[:2], None, None)
return ret, mtx, dist, rvecs, tvecs
for image_path in calib_images:
image = cv2.imread(image_path)
ret, objp, imgp = get_points_chessboard(image, nx, ny)
if ret == True:
imgpoints.append(imgp)
objpoints.append(objp)
else:
print("image is not usable: ", image_path)
ret, mtx, dist, rvecs, tvecs = calc_cam_values(image, objpoints, imgpoints)
cam_values = { "mtx": mtx, "dist": dist,"rvecs": rvecs,"tvecs": tvecs}
pickle.dump(cam_values, open("cam_values.p", "wb"))
| true
| true
|
f70cdb7d010435165a57a0c040d4ec0555cc7980
| 3,636
|
py
|
Python
|
scieloopds/__init__.py
|
scieloorg/scielo-opds
|
e7e6b2ae4b0817c7b3b395cc45ca24c8b9039353
|
[
"Unlicense"
] | 4
|
2015-04-15T22:52:51.000Z
|
2022-03-31T13:23:59.000Z
|
scieloopds/__init__.py
|
DalavanCloud/scielo-opds
|
e7e6b2ae4b0817c7b3b395cc45ca24c8b9039353
|
[
"Unlicense"
] | 2
|
2016-02-29T18:49:12.000Z
|
2021-03-31T18:31:50.000Z
|
scieloopds/__init__.py
|
DalavanCloud/scielo-opds
|
e7e6b2ae4b0817c7b3b395cc45ca24c8b9039353
|
[
"Unlicense"
] | 1
|
2019-03-16T04:43:35.000Z
|
2019-03-16T04:43:35.000Z
|
# coding: utf-8
"""
.. module: scieloopds
:synopsis: WSGI Application to provide SciELO Books in OPDS protocol.
.. moduleauthor:: Allison Vollmann <allisonvoll@gmail.com>
Example configuration (aditional parameters):
.. note::
[app:main]
...
mongo_uri = mongodb://localhost:27017/scieloopds
scielo_uri = http://books.scielo.org/api/v1/
auto_sync = True
auto_sync_interval = 60
items_per_page = 20
"""
import os
import sys
import logging
from urlparse import urlparse
from datetime import datetime, timedelta
import pymongo
from pyramid.config import Configurator
from pyramid.events import NewRequest
from pyramid.settings import asbool
from .sync import main as do_sync
from .utils import get_db_connection
APP_PATH = os.path.abspath(os.path.dirname(__file__))
DEFAULT_SETTINGS = [
('mongo_uri', 'OPDS_MONGO_URI', str,
'mongodb://localhost:27017/scieloopds'),
('scielo_uri', 'OPDS_SCIELO_URI', str,
'http://books.scielo.org/api/v1'),
('auto_sync', 'OPDS_AUTO_SYNC', bool,
True),
('auto_sync_interval', 'OPDS_AUTO_SYNC_INTERVAL', int,
60*60*12),
('items_per_page', 'OPDS_ITEMS_PER_PAGE', int,
20),
]
def parse_settings(settings):
"""Analisa e retorna as configurações da app com base no arquivo .ini e env.
As variáveis de ambiente possuem precedência em relação aos valores
definidos no arquivo .ini.
"""
parsed = {}
cfg = list(DEFAULT_SETTINGS)
for name, envkey, convert, default in cfg:
value = os.environ.get(envkey, settings.get(name, default))
if convert is not None:
value = convert(value)
parsed[name] = value
return parsed
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=parse_settings(settings))
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('root', '/opds/')
config.add_route('new', '/opds/new')
config.add_route('alpha_catalog', '/opds/alpha')
config.add_route('alpha_filter', '/opds/alpha/{id}')
config.add_route('publisher_catalog', '/opds/publisher')
config.add_route('publisher_filter', '/opds/publisher/{id}')
config.add_subscriber(add_mongo_db, NewRequest)
config.add_subscriber(start_sync, NewRequest)
config.scan(ignore='scieloopds.tests')
config.add_renderer('opds', factory='scieloopds.renderers.opds_factory')
return config.make_wsgi_app()
def ensure_indexes(db):
db.book.ensure_index([('updated', pymongo.DESCENDING)])
db.book.ensure_index([('title_ascii', pymongo.ASCENDING)])
db.alpha.ensure_index([('title_ascii', pymongo.ASCENDING)])
db.publisher.ensure_index([('title_ascii', pymongo.ASCENDING)])
def add_mongo_db(event):
settings = event.request.registry.settings
db = get_db_connection(settings)
ensure_indexes(db)
event.request.db = db
def start_sync(event):
settings = event.request.registry.settings
if settings['auto_sync']:
db = event.request.db
interval = settings['auto_sync_interval']
try:
update = db.catalog.find_one()
if update:
last_update = update['updated']
next_update = last_update + timedelta(seconds=interval)
if next_update < datetime.now():
do_sync(settings)
else:
do_sync(settings)
except pymongo.errors.AutoReconnect as e:
logging.getLogger(__name__).error('MongoDB: %s' % e.message)
| 30.813559
| 80
| 0.671067
|
import os
import sys
import logging
from urlparse import urlparse
from datetime import datetime, timedelta
import pymongo
from pyramid.config import Configurator
from pyramid.events import NewRequest
from pyramid.settings import asbool
from .sync import main as do_sync
from .utils import get_db_connection
APP_PATH = os.path.abspath(os.path.dirname(__file__))
DEFAULT_SETTINGS = [
('mongo_uri', 'OPDS_MONGO_URI', str,
'mongodb://localhost:27017/scieloopds'),
('scielo_uri', 'OPDS_SCIELO_URI', str,
'http://books.scielo.org/api/v1'),
('auto_sync', 'OPDS_AUTO_SYNC', bool,
True),
('auto_sync_interval', 'OPDS_AUTO_SYNC_INTERVAL', int,
60*60*12),
('items_per_page', 'OPDS_ITEMS_PER_PAGE', int,
20),
]
def parse_settings(settings):
parsed = {}
cfg = list(DEFAULT_SETTINGS)
for name, envkey, convert, default in cfg:
value = os.environ.get(envkey, settings.get(name, default))
if convert is not None:
value = convert(value)
parsed[name] = value
return parsed
def main(global_config, **settings):
config = Configurator(settings=parse_settings(settings))
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('root', '/opds/')
config.add_route('new', '/opds/new')
config.add_route('alpha_catalog', '/opds/alpha')
config.add_route('alpha_filter', '/opds/alpha/{id}')
config.add_route('publisher_catalog', '/opds/publisher')
config.add_route('publisher_filter', '/opds/publisher/{id}')
config.add_subscriber(add_mongo_db, NewRequest)
config.add_subscriber(start_sync, NewRequest)
config.scan(ignore='scieloopds.tests')
config.add_renderer('opds', factory='scieloopds.renderers.opds_factory')
return config.make_wsgi_app()
def ensure_indexes(db):
db.book.ensure_index([('updated', pymongo.DESCENDING)])
db.book.ensure_index([('title_ascii', pymongo.ASCENDING)])
db.alpha.ensure_index([('title_ascii', pymongo.ASCENDING)])
db.publisher.ensure_index([('title_ascii', pymongo.ASCENDING)])
def add_mongo_db(event):
settings = event.request.registry.settings
db = get_db_connection(settings)
ensure_indexes(db)
event.request.db = db
def start_sync(event):
settings = event.request.registry.settings
if settings['auto_sync']:
db = event.request.db
interval = settings['auto_sync_interval']
try:
update = db.catalog.find_one()
if update:
last_update = update['updated']
next_update = last_update + timedelta(seconds=interval)
if next_update < datetime.now():
do_sync(settings)
else:
do_sync(settings)
except pymongo.errors.AutoReconnect as e:
logging.getLogger(__name__).error('MongoDB: %s' % e.message)
| true
| true
|
f70cdc58f3bab9c011738c42991e61f6e7f48ee7
| 10,593
|
py
|
Python
|
main/tests/test_management.py
|
geoah/mataroa
|
5646af778bca8625b2d5efa4ebcfbe69a5f7dd12
|
[
"MIT"
] | null | null | null |
main/tests/test_management.py
|
geoah/mataroa
|
5646af778bca8625b2d5efa4ebcfbe69a5f7dd12
|
[
"MIT"
] | null | null | null |
main/tests/test_management.py
|
geoah/mataroa
|
5646af778bca8625b2d5efa4ebcfbe69a5f7dd12
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from io import StringIO
from unittest.mock import patch
from django.conf import settings
from django.core import mail
from django.core.management import call_command
from django.test import TestCase
from django.utils import timezone
from main import models
from main.management.commands import mail_exports, process_notifications
class EnqueueNotificationsTest(TestCase):
"""
Test that the enqueue_notifications management command, creates NotificationRecords
to the blog_user subscribers.
"""
def setUp(self):
self.user = models.User.objects.create(
username="alice", email="alice@wonderland.com", notifications_on=True
)
post_data = {
"title": "Old post",
"slug": "old-post",
"body": "Content sentence.",
"published_at": timezone.make_aware(datetime(2019, 1, 2)),
}
models.Post.objects.create(owner=self.user, **post_data)
post_data = {
"title": "Yesterday post",
"slug": "yesterday-post",
"body": "Content sentence.",
"published_at": timezone.make_aware(datetime(2020, 1, 1)),
}
models.Post.objects.create(owner=self.user, **post_data)
# as inactive, it should be ignored by the enqueue functionality
models.Notification.objects.create(
blog_user=self.user,
email="inactive@example.com",
is_active=False,
)
self.notification = models.Notification.objects.create(
blog_user=self.user, email="s@example.com"
)
def test_command(self):
output = StringIO()
with patch.object(timezone, "now", return_value=datetime(2020, 1, 2, 9, 00)):
call_command("enqueue_notifications", stdout=output)
# notification records
self.assertEqual(len(models.NotificationRecord.objects.all()), 1)
self.assertEqual(
models.NotificationRecord.objects.first().notification.email,
self.notification.email,
)
self.assertEqual(
models.NotificationRecord.objects.first().post.title, "Yesterday post"
)
self.assertIsNone(models.NotificationRecord.objects.first().sent_at)
# logging
self.assertIn("Enqueuing notifications started.", output.getvalue())
self.assertIn(
"Adding notification record for 'Yesterday post' to 's@example.com'",
output.getvalue(),
)
self.assertIn("Enqueuing complete for 'Yesterday post'", output.getvalue())
self.assertIn("Enqueuing finished.", output.getvalue())
def tearDown(self):
models.User.objects.all().delete()
models.Post.objects.all().delete()
class ProcessNotificationsTest(TestCase):
"""
Test process_notifications sends emails to the subscibers of the
NotificationRecords that exist.
"""
def setUp(self):
self.user = models.User.objects.create(
username="alice", email="alice@wonderland.com", notifications_on=True
)
post_data = {
"title": "Yesterday post",
"slug": "yesterday-post",
"body": "Content sentence.",
"published_at": timezone.make_aware(datetime(2020, 1, 1)),
}
self.post_yesterday = models.Post.objects.create(owner=self.user, **post_data)
post_data = {
"title": "Today post",
"slug": "today-post",
"body": "Content sentence.",
"published_at": timezone.make_aware(datetime(2020, 1, 2)),
}
self.post_today = models.Post.objects.create(owner=self.user, **post_data)
self.notification = models.Notification.objects.create(
blog_user=self.user, email="zf@sirodoht.com"
)
# notification records
self.notificationrecord_yesterday = models.NotificationRecord.objects.create(
notification=self.notification,
post=self.post_yesterday,
sent_at=None,
)
self.notificationrecord_today = models.NotificationRecord.objects.create(
notification=self.notification,
post=self.post_today,
sent_at=None,
)
def test_mail_backend(self):
connection = process_notifications.get_mail_connection()
self.assertEqual(connection.host, settings.EMAIL_HOST_BROADCASTS)
def test_command(self):
output = StringIO()
with patch.object(
timezone, "now", return_value=datetime(2020, 1, 2, 13, 00)
), patch.object(
# Django default test runner overrides SMTP EmailBackend with locmem,
# but because we re-import the SMTP backend in
# process_notifications.get_mail_connection, we need to mock it here too.
process_notifications,
"get_mail_connection",
return_value=mail.get_connection(
"django.core.mail.backends.locmem.EmailBackend"
),
):
call_command("process_notifications", stdout=output)
# notification records
records = models.NotificationRecord.objects.all()
self.assertEqual(len(records), 2)
# notification record for yesterday's post
self.assertEqual(
records.filter(sent_at__isnull=False).first().notification.email,
self.notificationrecord_today.notification.email,
)
self.assertEqual(
records.filter(sent_at__isnull=False).first().post.title, "Yesterday post"
)
# notification record for today's post
records = models.NotificationRecord.objects.all()
self.assertEqual(
records.filter(sent_at__isnull=True).first().notification.email,
self.notificationrecord_today.notification.email,
)
self.assertEqual(
records.filter(sent_at__isnull=True).first().post.title, "Today post"
)
# logging
self.assertIn("Processing notifications.", output.getvalue())
self.assertIn("Broadcast sent. Total 1 emails.", output.getvalue())
self.assertIn(
"Adding notification record for 'Yesterday post' to 'zf@sirodoht.com'",
output.getvalue(),
)
# email
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "Yesterday post")
self.assertIn("To unsubscribe", mail.outbox[0].body)
# email headers
self.assertEqual(mail.outbox[0].to, [self.notification.email])
self.assertEqual(mail.outbox[0].reply_to, [self.user.email])
self.assertEqual(
mail.outbox[0].from_email,
f"{self.user.username} <{self.user.username}@{settings.EMAIL_FROM_HOST}>",
)
self.assertEqual(
mail.outbox[0].extra_headers["X-PM-Message-Stream"], "newsletters"
)
self.assertIn(
"/newsletter/unsubscribe/",
mail.outbox[0].extra_headers["List-Unsubscribe"],
)
self.assertEqual(
mail.outbox[0].extra_headers["List-Unsubscribe-Post"],
"List-Unsubscribe=One-Click",
)
def tearDown(self):
models.User.objects.all().delete()
models.Post.objects.all().delete()
class MailExportsTest(TestCase):
"""
Test mail_export sends emails to users with `mail_export_on` enabled.
"""
def setUp(self):
self.user = models.User.objects.create(
username="alice", email="alice@wonderland.com", mail_export_on=True
)
post_data = {
"title": "A post",
"slug": "a-post",
"body": "Content sentence.",
"published_at": timezone.make_aware(datetime(2020, 1, 1)),
}
self.post_a = models.Post.objects.create(owner=self.user, **post_data)
post_data = {
"title": "Second post",
"slug": "second-post",
"body": "Content sentence two.",
"published_at": timezone.make_aware(datetime(2020, 1, 2)),
}
self.post_b = models.Post.objects.create(owner=self.user, **post_data)
def test_mail_backend(self):
connection = mail_exports.get_mail_connection()
self.assertEqual(connection.host, settings.EMAIL_HOST_BROADCASTS)
def test_command(self):
output = StringIO()
with patch.object(
timezone, "now", return_value=datetime(2020, 1, 3, 00, 00)
), patch.object(
# Django default test runner overrides SMTP EmailBackend with locmem,
# but because we re-import the SMTP backend in
# process_notifications.get_mail_connection, we need to mock it here too.
mail_exports,
"get_mail_connection",
return_value=mail.get_connection(
"django.core.mail.backends.locmem.EmailBackend"
),
):
call_command("mail_exports", stdout=output)
# export records
records = models.ExportRecord.objects.all()
self.assertEqual(len(records), 1)
self.assertEqual(records[0].user, self.user)
self.assertIn("export-markdown-", records[0].name)
# logging
self.assertIn("Processing email exports.", output.getvalue())
self.assertIn(f"Processing user {self.user.username}.", output.getvalue())
self.assertIn(f"Export sent to {self.user.username}.", output.getvalue())
self.assertIn(
f"Logging export record for '{records[0].name}'.", output.getvalue()
)
self.assertIn("Emailing all exports complete.", output.getvalue())
# email
self.assertEqual(len(mail.outbox), 1)
self.assertIn("Mataroa export", mail.outbox[0].subject)
self.assertIn("Unsubscribe", mail.outbox[0].body)
# email headers
self.assertEqual(mail.outbox[0].to, [self.user.email])
self.assertEqual(
mail.outbox[0].from_email,
settings.DEFAULT_FROM_EMAIL,
)
self.assertEqual(mail.outbox[0].extra_headers["X-PM-Message-Stream"], "exports")
self.assertIn(
"/export/unsubscribe/",
mail.outbox[0].extra_headers["List-Unsubscribe"],
)
self.assertEqual(
mail.outbox[0].extra_headers["List-Unsubscribe-Post"],
"List-Unsubscribe=One-Click",
)
def tearDown(self):
models.User.objects.all().delete()
models.Post.objects.all().delete()
| 35.787162
| 88
| 0.616634
|
from datetime import datetime
from io import StringIO
from unittest.mock import patch
from django.conf import settings
from django.core import mail
from django.core.management import call_command
from django.test import TestCase
from django.utils import timezone
from main import models
from main.management.commands import mail_exports, process_notifications
class EnqueueNotificationsTest(TestCase):
def setUp(self):
self.user = models.User.objects.create(
username="alice", email="alice@wonderland.com", notifications_on=True
)
post_data = {
"title": "Old post",
"slug": "old-post",
"body": "Content sentence.",
"published_at": timezone.make_aware(datetime(2019, 1, 2)),
}
models.Post.objects.create(owner=self.user, **post_data)
post_data = {
"title": "Yesterday post",
"slug": "yesterday-post",
"body": "Content sentence.",
"published_at": timezone.make_aware(datetime(2020, 1, 1)),
}
models.Post.objects.create(owner=self.user, **post_data)
models.Notification.objects.create(
blog_user=self.user,
email="inactive@example.com",
is_active=False,
)
self.notification = models.Notification.objects.create(
blog_user=self.user, email="s@example.com"
)
def test_command(self):
output = StringIO()
with patch.object(timezone, "now", return_value=datetime(2020, 1, 2, 9, 00)):
call_command("enqueue_notifications", stdout=output)
self.assertEqual(len(models.NotificationRecord.objects.all()), 1)
self.assertEqual(
models.NotificationRecord.objects.first().notification.email,
self.notification.email,
)
self.assertEqual(
models.NotificationRecord.objects.first().post.title, "Yesterday post"
)
self.assertIsNone(models.NotificationRecord.objects.first().sent_at)
self.assertIn("Enqueuing notifications started.", output.getvalue())
self.assertIn(
"Adding notification record for 'Yesterday post' to 's@example.com'",
output.getvalue(),
)
self.assertIn("Enqueuing complete for 'Yesterday post'", output.getvalue())
self.assertIn("Enqueuing finished.", output.getvalue())
def tearDown(self):
models.User.objects.all().delete()
models.Post.objects.all().delete()
class ProcessNotificationsTest(TestCase):
def setUp(self):
self.user = models.User.objects.create(
username="alice", email="alice@wonderland.com", notifications_on=True
)
post_data = {
"title": "Yesterday post",
"slug": "yesterday-post",
"body": "Content sentence.",
"published_at": timezone.make_aware(datetime(2020, 1, 1)),
}
self.post_yesterday = models.Post.objects.create(owner=self.user, **post_data)
post_data = {
"title": "Today post",
"slug": "today-post",
"body": "Content sentence.",
"published_at": timezone.make_aware(datetime(2020, 1, 2)),
}
self.post_today = models.Post.objects.create(owner=self.user, **post_data)
self.notification = models.Notification.objects.create(
blog_user=self.user, email="zf@sirodoht.com"
)
self.notificationrecord_yesterday = models.NotificationRecord.objects.create(
notification=self.notification,
post=self.post_yesterday,
sent_at=None,
)
self.notificationrecord_today = models.NotificationRecord.objects.create(
notification=self.notification,
post=self.post_today,
sent_at=None,
)
def test_mail_backend(self):
connection = process_notifications.get_mail_connection()
self.assertEqual(connection.host, settings.EMAIL_HOST_BROADCASTS)
def test_command(self):
output = StringIO()
with patch.object(
timezone, "now", return_value=datetime(2020, 1, 2, 13, 00)
), patch.object(
process_notifications,
"get_mail_connection",
return_value=mail.get_connection(
"django.core.mail.backends.locmem.EmailBackend"
),
):
call_command("process_notifications", stdout=output)
records = models.NotificationRecord.objects.all()
self.assertEqual(len(records), 2)
self.assertEqual(
records.filter(sent_at__isnull=False).first().notification.email,
self.notificationrecord_today.notification.email,
)
self.assertEqual(
records.filter(sent_at__isnull=False).first().post.title, "Yesterday post"
)
# notification record for today's post
records = models.NotificationRecord.objects.all()
self.assertEqual(
records.filter(sent_at__isnull=True).first().notification.email,
self.notificationrecord_today.notification.email,
)
self.assertEqual(
records.filter(sent_at__isnull=True).first().post.title, "Today post"
)
self.assertIn("Processing notifications.", output.getvalue())
self.assertIn("Broadcast sent. Total 1 emails.", output.getvalue())
self.assertIn(
"Adding notification record for 'Yesterday post' to 'zf@sirodoht.com'",
output.getvalue(),
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "Yesterday post")
self.assertIn("To unsubscribe", mail.outbox[0].body)
self.assertEqual(mail.outbox[0].to, [self.notification.email])
self.assertEqual(mail.outbox[0].reply_to, [self.user.email])
self.assertEqual(
mail.outbox[0].from_email,
f"{self.user.username} <{self.user.username}@{settings.EMAIL_FROM_HOST}>",
)
self.assertEqual(
mail.outbox[0].extra_headers["X-PM-Message-Stream"], "newsletters"
)
self.assertIn(
"/newsletter/unsubscribe/",
mail.outbox[0].extra_headers["List-Unsubscribe"],
)
self.assertEqual(
mail.outbox[0].extra_headers["List-Unsubscribe-Post"],
"List-Unsubscribe=One-Click",
)
def tearDown(self):
models.User.objects.all().delete()
models.Post.objects.all().delete()
class MailExportsTest(TestCase):
def setUp(self):
self.user = models.User.objects.create(
username="alice", email="alice@wonderland.com", mail_export_on=True
)
post_data = {
"title": "A post",
"slug": "a-post",
"body": "Content sentence.",
"published_at": timezone.make_aware(datetime(2020, 1, 1)),
}
self.post_a = models.Post.objects.create(owner=self.user, **post_data)
post_data = {
"title": "Second post",
"slug": "second-post",
"body": "Content sentence two.",
"published_at": timezone.make_aware(datetime(2020, 1, 2)),
}
self.post_b = models.Post.objects.create(owner=self.user, **post_data)
def test_mail_backend(self):
connection = mail_exports.get_mail_connection()
self.assertEqual(connection.host, settings.EMAIL_HOST_BROADCASTS)
def test_command(self):
output = StringIO()
with patch.object(
timezone, "now", return_value=datetime(2020, 1, 3, 00, 00)
), patch.object(
mail_exports,
"get_mail_connection",
return_value=mail.get_connection(
"django.core.mail.backends.locmem.EmailBackend"
),
):
call_command("mail_exports", stdout=output)
records = models.ExportRecord.objects.all()
self.assertEqual(len(records), 1)
self.assertEqual(records[0].user, self.user)
self.assertIn("export-markdown-", records[0].name)
self.assertIn("Processing email exports.", output.getvalue())
self.assertIn(f"Processing user {self.user.username}.", output.getvalue())
self.assertIn(f"Export sent to {self.user.username}.", output.getvalue())
self.assertIn(
f"Logging export record for '{records[0].name}'.", output.getvalue()
)
self.assertIn("Emailing all exports complete.", output.getvalue())
self.assertEqual(len(mail.outbox), 1)
self.assertIn("Mataroa export", mail.outbox[0].subject)
self.assertIn("Unsubscribe", mail.outbox[0].body)
self.assertEqual(mail.outbox[0].to, [self.user.email])
self.assertEqual(
mail.outbox[0].from_email,
settings.DEFAULT_FROM_EMAIL,
)
self.assertEqual(mail.outbox[0].extra_headers["X-PM-Message-Stream"], "exports")
self.assertIn(
"/export/unsubscribe/",
mail.outbox[0].extra_headers["List-Unsubscribe"],
)
self.assertEqual(
mail.outbox[0].extra_headers["List-Unsubscribe-Post"],
"List-Unsubscribe=One-Click",
)
def tearDown(self):
models.User.objects.all().delete()
models.Post.objects.all().delete()
| true
| true
|
f70cdc7487d74f04b362aa3575ebac9864924375
| 6,777
|
py
|
Python
|
python3/koans/about_attribute_access.py
|
adrianhryn/Python_Koans
|
e5ec28939ac4cf7e91e470d9545ef7365fef4077
|
[
"MIT"
] | null | null | null |
python3/koans/about_attribute_access.py
|
adrianhryn/Python_Koans
|
e5ec28939ac4cf7e91e470d9545ef7365fef4077
|
[
"MIT"
] | null | null | null |
python3/koans/about_attribute_access.py
|
adrianhryn/Python_Koans
|
e5ec28939ac4cf7e91e470d9545ef7365fef4077
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Partially based on AboutMessagePassing in the Ruby Koans
#
from runner.koan import *
class AboutAttributeAccess(Koan):
class TypicalObject:
pass
def test_calling_undefined_functions_normally_results_in_errors(self):
typical = self.TypicalObject()
with self.assertRaises(AttributeError): typical.foobar()
def test_calling_getattribute_causes_an_attribute_error(self):
typical = self.TypicalObject()
with self.assertRaises(AttributeError): typical.__getattribute__('foobar')
# THINK ABOUT IT:
#
# If the method __getattribute__() causes the AttributeError, then
# what would happen if we redefine __getattribute__()?
# ------------------------------------------------------------------
class CatchAllAttributeReads:
def __getattribute__(self, attr_name):
return "Someone called '" + attr_name + "' and it could not be found"
def test_all_attribute_reads_are_caught(self):
catcher = self.CatchAllAttributeReads()
self.assertRegex(catcher.foobar, "Someone called 'foobar' and it could not be found")
def test_intercepting_return_values_can_disrupt_the_call_chain(self):
catcher = self.CatchAllAttributeReads()
self.assertRegex(catcher.foobaz, "Someone called 'foobaz' and it could not be found") # This is fine
try:
catcher.foobaz(1)
except TypeError as ex:
err_msg = ex.args[0]
self.assertRegex(err_msg, "'str' object is not callable")
# foobaz returns a string. What happens to the '(1)' part?
# Try entering this into a python console to reproduce the issue:
#
# "foobaz"(1)
#
def test_changes_to_the_getattribute_implementation_affects_getattr_function(self):
catcher = self.CatchAllAttributeReads()
self.assertRegex(getattr(catcher, 'any_attribute'), "Someone called 'any_attribute' and it could not be found")
# ------------------------------------------------------------------
class WellBehavedFooCatcher:
def __getattribute__(self, attr_name):
if attr_name[:3] == "foo":
return "Foo to you too"
else:
return super().__getattribute__(attr_name)
def test_foo_attributes_are_caught(self):
catcher = self.WellBehavedFooCatcher()
self.assertEqual("Foo to you too", catcher.foo_bar)
self.assertEqual("Foo to you too", catcher.foo_baz)
def test_non_foo_messages_are_treated_normally(self):
catcher = self.WellBehavedFooCatcher()
with self.assertRaises(AttributeError): catcher.normal_undefined_attribute
# ------------------------------------------------------------------
global stack_depth
stack_depth = 0
class RecursiveCatcher:
def __init__(self):
global stack_depth
stack_depth = 0
self.no_of_getattribute_calls = 0
def __getattribute__(self, attr_name):
# We need something that is outside the scope of this class:
global stack_depth
stack_depth += 1
if stack_depth<=10: # to prevent a stack overflow
self.no_of_getattribute_calls += 1
# Oops! We just accessed an attribute (no_of_getattribute_calls)
# Guess what happens when self.no_of_getattribute_calls is
# accessed?
# Using 'object' directly because using super() here will also
# trigger a __getattribute__() call.
return object.__getattribute__(self, attr_name)
def my_method(self):
pass
def test_getattribute_is_a_bit_overzealous_sometimes(self):
catcher = self.RecursiveCatcher()
catcher.my_method()
global stack_depth
self.assertEqual(11, stack_depth)
# ------------------------------------------------------------------
class MinimalCatcher:
class DuffObject: pass
def __init__(self):
self.no_of_getattr_calls = 0
def __getattr__(self, attr_name):
self.no_of_getattr_calls += 1
return self.DuffObject
def my_method(self):
pass
def test_getattr_ignores_known_attributes(self):
catcher = self.MinimalCatcher()
catcher.my_method()
self.assertEqual(0, catcher.no_of_getattr_calls)
def test_getattr_only_catches_unknown_attributes(self):
catcher = self.MinimalCatcher()
catcher.purple_flamingos()
catcher.free_pie()
self.assertEqual("DuffObject",
type(catcher.give_me_duff_or_give_me_death()).__name__)
self.assertEqual(3, catcher.no_of_getattr_calls)
# ------------------------------------------------------------------
class PossessiveSetter(object):
def __setattr__(self, attr_name, value):
new_attr_name = attr_name
if attr_name[-5:] == 'comic':
new_attr_name = "my_" + new_attr_name
elif attr_name[-3:] == 'pie':
new_attr_name = "a_" + new_attr_name
object.__setattr__(self, new_attr_name, value)
def test_setattr_intercepts_attribute_assignments(self):
fanboy = self.PossessiveSetter()
fanboy.comic = 'The Laminator, issue #1'
fanboy.pie = 'blueberry'
self.assertEqual("blueberry", fanboy.a_pie)
#
# NOTE: Change the prefix to make this next assert pass
#
prefix = 'my'
self.assertEqual("The Laminator, issue #1", getattr(fanboy, prefix + '_comic'))
# ------------------------------------------------------------------
class ScarySetter:
def __init__(self):
self.num_of_coconuts = 9
self._num_of_private_coconuts = 2
def __setattr__(self, attr_name, value):
new_attr_name = attr_name
if attr_name[0] != '_':
new_attr_name = "altered_" + new_attr_name
object.__setattr__(self, new_attr_name, value)
def test_it_modifies_external_attribute_as_expected(self):
setter = self.ScarySetter()
setter.e = "mc hammer"
self.assertEqual("mc hammer", setter.altered_e)
def test_it_mangles_some_internal_attributes(self):
setter = self.ScarySetter()
try:
coconuts = setter.num_of_coconuts
except AttributeError:
self.assertEqual(9, setter.altered_num_of_coconuts)
def test_in_this_case_private_attributes_remain_unmangled(self):
setter = self.ScarySetter()
self.assertEqual(2, setter._num_of_private_coconuts)
| 32.118483
| 119
| 0.607053
|
from runner.koan import *
class AboutAttributeAccess(Koan):
class TypicalObject:
pass
def test_calling_undefined_functions_normally_results_in_errors(self):
typical = self.TypicalObject()
with self.assertRaises(AttributeError): typical.foobar()
def test_calling_getattribute_causes_an_attribute_error(self):
typical = self.TypicalObject()
with self.assertRaises(AttributeError): typical.__getattribute__('foobar')
class CatchAllAttributeReads:
def __getattribute__(self, attr_name):
return "Someone called '" + attr_name + "' and it could not be found"
def test_all_attribute_reads_are_caught(self):
catcher = self.CatchAllAttributeReads()
self.assertRegex(catcher.foobar, "Someone called 'foobar' and it could not be found")
def test_intercepting_return_values_can_disrupt_the_call_chain(self):
catcher = self.CatchAllAttributeReads()
self.assertRegex(catcher.foobaz, "Someone called 'foobaz' and it could not be found")
try:
catcher.foobaz(1)
except TypeError as ex:
err_msg = ex.args[0]
self.assertRegex(err_msg, "'str' object is not callable")
def test_changes_to_the_getattribute_implementation_affects_getattr_function(self):
catcher = self.CatchAllAttributeReads()
self.assertRegex(getattr(catcher, 'any_attribute'), "Someone called 'any_attribute' and it could not be found")
class WellBehavedFooCatcher:
def __getattribute__(self, attr_name):
if attr_name[:3] == "foo":
return "Foo to you too"
else:
return super().__getattribute__(attr_name)
def test_foo_attributes_are_caught(self):
catcher = self.WellBehavedFooCatcher()
self.assertEqual("Foo to you too", catcher.foo_bar)
self.assertEqual("Foo to you too", catcher.foo_baz)
def test_non_foo_messages_are_treated_normally(self):
catcher = self.WellBehavedFooCatcher()
with self.assertRaises(AttributeError): catcher.normal_undefined_attribute
global stack_depth
stack_depth = 0
class RecursiveCatcher:
def __init__(self):
global stack_depth
stack_depth = 0
self.no_of_getattribute_calls = 0
def __getattribute__(self, attr_name):
global stack_depth
stack_depth += 1
if stack_depth<=10: self.no_of_getattribute_calls += 1
return object.__getattribute__(self, attr_name)
def my_method(self):
pass
def test_getattribute_is_a_bit_overzealous_sometimes(self):
catcher = self.RecursiveCatcher()
catcher.my_method()
global stack_depth
self.assertEqual(11, stack_depth)
class MinimalCatcher:
class DuffObject: pass
def __init__(self):
self.no_of_getattr_calls = 0
def __getattr__(self, attr_name):
self.no_of_getattr_calls += 1
return self.DuffObject
def my_method(self):
pass
def test_getattr_ignores_known_attributes(self):
catcher = self.MinimalCatcher()
catcher.my_method()
self.assertEqual(0, catcher.no_of_getattr_calls)
def test_getattr_only_catches_unknown_attributes(self):
catcher = self.MinimalCatcher()
catcher.purple_flamingos()
catcher.free_pie()
self.assertEqual("DuffObject",
type(catcher.give_me_duff_or_give_me_death()).__name__)
self.assertEqual(3, catcher.no_of_getattr_calls)
class PossessiveSetter(object):
def __setattr__(self, attr_name, value):
new_attr_name = attr_name
if attr_name[-5:] == 'comic':
new_attr_name = "my_" + new_attr_name
elif attr_name[-3:] == 'pie':
new_attr_name = "a_" + new_attr_name
object.__setattr__(self, new_attr_name, value)
def test_setattr_intercepts_attribute_assignments(self):
fanboy = self.PossessiveSetter()
fanboy.comic = 'The Laminator, issue #1'
fanboy.pie = 'blueberry'
self.assertEqual("blueberry", fanboy.a_pie)
prefix = 'my'
self.assertEqual("The Laminator, issue #1", getattr(fanboy, prefix + '_comic'))
class ScarySetter:
def __init__(self):
self.num_of_coconuts = 9
self._num_of_private_coconuts = 2
def __setattr__(self, attr_name, value):
new_attr_name = attr_name
if attr_name[0] != '_':
new_attr_name = "altered_" + new_attr_name
object.__setattr__(self, new_attr_name, value)
def test_it_modifies_external_attribute_as_expected(self):
setter = self.ScarySetter()
setter.e = "mc hammer"
self.assertEqual("mc hammer", setter.altered_e)
def test_it_mangles_some_internal_attributes(self):
setter = self.ScarySetter()
try:
coconuts = setter.num_of_coconuts
except AttributeError:
self.assertEqual(9, setter.altered_num_of_coconuts)
def test_in_this_case_private_attributes_remain_unmangled(self):
setter = self.ScarySetter()
self.assertEqual(2, setter._num_of_private_coconuts)
| true
| true
|
f70cddb894741e5d4601556682226a7d025c1177
| 407
|
py
|
Python
|
tasks/__init__.py
|
jhrcook/awesome-streamlit
|
83c4c4ec13f274b30e586715bb080e01432b87e6
|
[
"CC0-1.0"
] | null | null | null |
tasks/__init__.py
|
jhrcook/awesome-streamlit
|
83c4c4ec13f274b30e586715bb080e01432b87e6
|
[
"CC0-1.0"
] | null | null | null |
tasks/__init__.py
|
jhrcook/awesome-streamlit
|
83c4c4ec13f274b30e586715bb080e01432b87e6
|
[
"CC0-1.0"
] | null | null | null |
"""Here we import the different task submodules/ collections"""
from invoke import Collection, task
from tasks import docker, package, sphinx, test # pylint: disable=import-self
# pylint: disable=invalid-name
# as invoke only recognizes lower case
namespace = Collection()
namespace.add_collection(test)
namespace.add_collection(docker)
namespace.add_collection(package)
namespace.add_collection(sphinx)
| 31.307692
| 78
| 0.808354
|
from invoke import Collection, task
from tasks import docker, package, sphinx, test
namespace = Collection()
namespace.add_collection(test)
namespace.add_collection(docker)
namespace.add_collection(package)
namespace.add_collection(sphinx)
| true
| true
|
f70cddc3e741ebf77bb585329e60eada73e20d13
| 1,768
|
py
|
Python
|
albow/openGL/GLOrtho.py
|
hasii2011/albow-python-3
|
04b9d42705b370b62f0e49d10274eebf3ac54bc1
|
[
"MIT"
] | 6
|
2019-04-30T23:50:39.000Z
|
2019-11-04T06:15:02.000Z
|
albow/openGL/GLOrtho.py
|
hasii2011/albow-python-3
|
04b9d42705b370b62f0e49d10274eebf3ac54bc1
|
[
"MIT"
] | 73
|
2019-05-12T18:43:14.000Z
|
2021-04-13T19:19:03.000Z
|
albow/openGL/GLOrtho.py
|
hasii2011/albow-python-3
|
04b9d42705b370b62f0e49d10274eebf3ac54bc1
|
[
"MIT"
] | null | null | null |
from pygame import Rect
# noinspection PyPackageRequirements
from OpenGL import GL
from albow.openGL.GLViewport import GLViewport
class GLOrtho(GLViewport):
"""
GLOrtho provides an OpenGL drawing area with an orthographic projection.
Using a GLOrtho widget is the same as using a GLViewport, except that you do not need to
provide a `setup_projection()` method.
------
------
"""
def __init__(self, rect: Rect=None, xmin=-1, xmax=1, ymin=-1, ymax=1, near=-1, far=1, **kwds):
"""
Creates a GLOrtho instance with the given initial values for its projection parameters.
Args:
rect: A pygame Rect
xmin: Specify the coordinates for the left vertical clipping planes.
xmax: Specify the coordinates for the right vertical clipping planes.
ymin: Specify the coordinates for the bottom horizontal clipping planes.
ymax: Specify the coordinates for the top horizontal clipping planes.
near: Specify the distances to the nearer clipping planes.
These distances are negative if the plane is to be behind the viewer.
far: Specify the distances to the depth clipping planes.
These distances are negative if the plane is to be behind the viewer.
**kwds:
"""
#
# Python 3 update
#
# GLViewport.__init__(self, rect, **kwds)
super().__init__(rect, **kwds)
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.near = near
self.far = far
def setup_projection(self):
GL.glOrtho(self.xmin, self.xmax, self.ymin, self.ymax, self.near, self.far)
| 29.466667
| 98
| 0.625566
|
from pygame import Rect
from OpenGL import GL
from albow.openGL.GLViewport import GLViewport
class GLOrtho(GLViewport):
def __init__(self, rect: Rect=None, xmin=-1, xmax=1, ymin=-1, ymax=1, near=-1, far=1, **kwds):
super().__init__(rect, **kwds)
self.xmin = xmin
self.xmax = xmax
self.ymin = ymin
self.ymax = ymax
self.near = near
self.far = far
def setup_projection(self):
GL.glOrtho(self.xmin, self.xmax, self.ymin, self.ymax, self.near, self.far)
| true
| true
|
f70cdf1b212aa1b3168e274d8de517262a3bdb7d
| 7,227
|
py
|
Python
|
pontoon/tags/tests/utils/test_tags.py
|
nanopony/pontoon
|
ebd342922d04df2dfbbce23ac5a15ee1e71d50fe
|
[
"BSD-3-Clause"
] | 1
|
2018-12-24T11:15:35.000Z
|
2018-12-24T11:15:35.000Z
|
pontoon/tags/tests/utils/test_tags.py
|
nanopony/pontoon
|
ebd342922d04df2dfbbce23ac5a15ee1e71d50fe
|
[
"BSD-3-Clause"
] | 1
|
2018-08-03T12:02:41.000Z
|
2018-08-03T12:02:41.000Z
|
pontoon/tags/tests/utils/test_tags.py
|
nanopony/pontoon
|
ebd342922d04df2dfbbce23ac5a15ee1e71d50fe
|
[
"BSD-3-Clause"
] | 1
|
2019-07-17T21:21:41.000Z
|
2019-07-17T21:21:41.000Z
|
import pytest
from mock import MagicMock, patch, PropertyMock
from pontoon.tags.models import Tag
from pontoon.tags.utils import (
TagsLatestTranslationsTool, TagsResourcesTool,
TagsStatsTool, TagsTool, TagTool)
from pontoon.tags.utils.base import Clonable
def test_util_tags_tool():
# test tags tool instantiation
tags_tool = TagsTool()
assert tags_tool.tag_class is TagTool
assert tags_tool.resources_class is TagsResourcesTool
assert tags_tool.translations_class is TagsLatestTranslationsTool
assert tags_tool.stats_class is TagsStatsTool
assert tags_tool.locales is None
assert tags_tool.projects is None
assert tags_tool.priority is None
assert tags_tool.slug is None
assert tags_tool.path is None
assert tags_tool.tag_manager == Tag.objects
@patch('pontoon.tags.utils.TagsTool.stats_class')
def test_util_tags_tool_stats(stats_mock, tag_init_kwargs):
# tests instantiation of tag.stats_tool with different args
tags_tool = TagsTool(**tag_init_kwargs)
stats_mock.return_value = 23
assert tags_tool.stat_tool == 23
assert stats_mock.call_args[1] == tag_init_kwargs
@pytest.mark.parametrize(
"kwargs",
[dict(
slug=None,
locales=None,
projects=None,
path=None),
dict(
slug=1,
locales=2,
projects=3,
path=4)])
@patch('pontoon.tags.utils.TagsTool.resources_class')
def test_util_tags_tool_resources(resources_mock, kwargs):
# tests instantiation of tag.resources_tool with different args
tags_tool = TagsTool(**kwargs)
resources_mock.return_value = 23
assert tags_tool.resource_tool == 23
assert resources_mock.call_args[1] == kwargs
@pytest.mark.parametrize(
"kwargs",
[dict(
slug=None,
locales=None,
projects=None),
dict(
slug=1,
locales=2,
projects=3)])
@patch('pontoon.tags.utils.TagsTool.translations_class')
def test_util_tags_tool_translations(trans_mock, kwargs):
# tests instantiation of tag.translations_tool with different args
tags_tool = TagsTool(**kwargs)
trans_mock.return_value = 23
assert tags_tool.translation_tool == 23
assert trans_mock.call_args[1] == kwargs
@patch('pontoon.tags.utils.TagsTool.tag_class')
@patch('pontoon.tags.utils.TagsTool.get_tags')
@patch('pontoon.tags.utils.TagsTool.__len__')
@patch('pontoon.tags.utils.TagsTool.__iter__')
def test_util_tags_tool_get(iter_mock, len_mock, tags_mock, class_mock):
# tests getting a TagTool from TagsTool
tags_tool = TagsTool()
class_mock.return_value = 23
len_mock.return_value = 7
iter_mock.return_value = iter([3, 17, 73])
# with no slug returns first result from iter(self)
assert tags_tool.get() == 3
assert not class_mock.called
assert not tags_mock.called
assert len_mock.called
assert iter_mock.called
len_mock.reset_mock()
iter_mock.reset_mock()
# calling with slug creates a TagTool instance
# and doesnt call iter(self) at all
assert tags_tool.get(113) == 23
assert not len_mock.called
assert not iter_mock.called
assert (
list(class_mock.call_args)
== [(tags_tool, ), {}])
assert (
list(tags_mock.call_args)
== [(), {'slug': 113}])
def test_util_tags_tool_call_and_clone():
# tests cloning a TagsTool
tags_tool = TagsTool()
cloned = tags_tool()
assert cloned is not tags_tool
assert isinstance(tags_tool, Clonable)
assert isinstance(cloned, Clonable)
@patch('pontoon.tags.utils.TagsTool.__call__')
def test_util_tags_tool_getitem(call_mock):
# test that calling __getitem__ calls __call__ with slug
tags_tool = TagsTool()
slugs = ["foo", "bar"]
for slug in slugs:
tags_tool[slug]
assert call_mock.call_args_list[0][1] == dict(slug=slugs[0])
assert call_mock.call_args_list[1][1] == dict(slug=slugs[1])
@patch('pontoon.tags.utils.TagsTool.iter_tags')
@patch('pontoon.tags.utils.TagsTool.stat_tool',
new_callable=PropertyMock)
def test_util_tags_tool_iter(stats_mock, iter_mock):
# tests that when you iter it calls iter_tags with
# stats data
tags_tool = TagsTool()
stats_mock.configure_mock(
**{'return_value.data': [7, 23]})
iter_mock.return_value = iter([])
assert list(tags_tool) == []
assert stats_mock.called
assert (
list(iter_mock.call_args)
== [([7, 23],), {}])
@patch('pontoon.tags.utils.TagsTool.stat_tool',
new_callable=PropertyMock)
def test_util_tags_tool_len(stats_mock):
# tests that when you len() you get the len
# of the stats data
m_len = MagicMock()
m_len.__len__.return_value = 23
stats_mock.configure_mock(
**{'return_value.data': m_len})
tags_tool = TagsTool()
assert len(tags_tool) == 23
assert m_len.__len__.called
@patch('pontoon.tags.utils.TagsTool.translation_tool',
new_callable=PropertyMock)
@patch('pontoon.tags.utils.TagsTool.tag_class')
def test_util_tags_tool_iter_tags(tag_mock, trans_mock):
# tests that iter_tags calls instantiates a TagTool with
# stat data and latest_translation data
trans_mock.configure_mock(
**{'return_value.data.get.return_value': 23})
tags_tool = TagsTool()
list(
tags_tool.iter_tags(
[dict(resource__tag=1, foo="bar"),
dict(resource__tag=2, foo="bar"),
dict(resource__tag=3, foo="bar")]))
# translation_tool.data.get() was called 3 times with tag pks
assert (
[x[0][0] for x in trans_mock.return_value.data.get.call_args_list]
== [1, 2, 3])
# TagTool was called 3 times with the tags tool as arg
assert (
[x[0][0] for x in tag_mock.call_args_list]
== [tags_tool] * 3)
# and stat + translation data as kwargs
assert (
[x[1] for x in tag_mock.call_args_list]
== [{'resource__tag': 1, 'latest_translation': 23, 'foo': 'bar'},
{'resource__tag': 2, 'latest_translation': 23, 'foo': 'bar'},
{'resource__tag': 3, 'latest_translation': 23, 'foo': 'bar'}])
@patch('pontoon.tags.utils.TagsTool.tag_manager',
new_callable=PropertyMock)
@patch('pontoon.tags.utils.tags.glob_to_regex')
def test_util_tags_tool_get_tags(glob_mock, tag_mock):
glob_mock.return_value = 17
filter_mock = MagicMock(
**{'filter.return_value': 23})
tag_mock.configure_mock(
**{'return_value.values.return_value': filter_mock})
tags_tool = TagsTool()
# no slug provided, returns `values`
assert tags_tool.get_tags() is filter_mock
assert not filter_mock.called
assert not glob_mock.called
assert (
list(tag_mock.return_value.values.call_args)
== [('pk', 'name', 'slug', 'priority', 'project'), {}])
tag_mock.reset_mock()
# slug provided, `values` is filtered
assert tags_tool.get_tags('FOO') == 23
assert (
list(filter_mock.filter.call_args)
== [(), {'slug__regex': 17}])
assert list(glob_mock.call_args) == [('FOO',), {}]
assert (
list(tag_mock.return_value.values.call_args)
== [('pk', 'name', 'slug', 'priority', 'project'), {}])
| 32.12
| 74
| 0.682164
|
import pytest
from mock import MagicMock, patch, PropertyMock
from pontoon.tags.models import Tag
from pontoon.tags.utils import (
TagsLatestTranslationsTool, TagsResourcesTool,
TagsStatsTool, TagsTool, TagTool)
from pontoon.tags.utils.base import Clonable
def test_util_tags_tool():
tags_tool = TagsTool()
assert tags_tool.tag_class is TagTool
assert tags_tool.resources_class is TagsResourcesTool
assert tags_tool.translations_class is TagsLatestTranslationsTool
assert tags_tool.stats_class is TagsStatsTool
assert tags_tool.locales is None
assert tags_tool.projects is None
assert tags_tool.priority is None
assert tags_tool.slug is None
assert tags_tool.path is None
assert tags_tool.tag_manager == Tag.objects
@patch('pontoon.tags.utils.TagsTool.stats_class')
def test_util_tags_tool_stats(stats_mock, tag_init_kwargs):
tags_tool = TagsTool(**tag_init_kwargs)
stats_mock.return_value = 23
assert tags_tool.stat_tool == 23
assert stats_mock.call_args[1] == tag_init_kwargs
@pytest.mark.parametrize(
"kwargs",
[dict(
slug=None,
locales=None,
projects=None,
path=None),
dict(
slug=1,
locales=2,
projects=3,
path=4)])
@patch('pontoon.tags.utils.TagsTool.resources_class')
def test_util_tags_tool_resources(resources_mock, kwargs):
tags_tool = TagsTool(**kwargs)
resources_mock.return_value = 23
assert tags_tool.resource_tool == 23
assert resources_mock.call_args[1] == kwargs
@pytest.mark.parametrize(
"kwargs",
[dict(
slug=None,
locales=None,
projects=None),
dict(
slug=1,
locales=2,
projects=3)])
@patch('pontoon.tags.utils.TagsTool.translations_class')
def test_util_tags_tool_translations(trans_mock, kwargs):
tags_tool = TagsTool(**kwargs)
trans_mock.return_value = 23
assert tags_tool.translation_tool == 23
assert trans_mock.call_args[1] == kwargs
@patch('pontoon.tags.utils.TagsTool.tag_class')
@patch('pontoon.tags.utils.TagsTool.get_tags')
@patch('pontoon.tags.utils.TagsTool.__len__')
@patch('pontoon.tags.utils.TagsTool.__iter__')
def test_util_tags_tool_get(iter_mock, len_mock, tags_mock, class_mock):
tags_tool = TagsTool()
class_mock.return_value = 23
len_mock.return_value = 7
iter_mock.return_value = iter([3, 17, 73])
assert tags_tool.get() == 3
assert not class_mock.called
assert not tags_mock.called
assert len_mock.called
assert iter_mock.called
len_mock.reset_mock()
iter_mock.reset_mock()
assert tags_tool.get(113) == 23
assert not len_mock.called
assert not iter_mock.called
assert (
list(class_mock.call_args)
== [(tags_tool, ), {}])
assert (
list(tags_mock.call_args)
== [(), {'slug': 113}])
def test_util_tags_tool_call_and_clone():
tags_tool = TagsTool()
cloned = tags_tool()
assert cloned is not tags_tool
assert isinstance(tags_tool, Clonable)
assert isinstance(cloned, Clonable)
@patch('pontoon.tags.utils.TagsTool.__call__')
def test_util_tags_tool_getitem(call_mock):
tags_tool = TagsTool()
slugs = ["foo", "bar"]
for slug in slugs:
tags_tool[slug]
assert call_mock.call_args_list[0][1] == dict(slug=slugs[0])
assert call_mock.call_args_list[1][1] == dict(slug=slugs[1])
@patch('pontoon.tags.utils.TagsTool.iter_tags')
@patch('pontoon.tags.utils.TagsTool.stat_tool',
new_callable=PropertyMock)
def test_util_tags_tool_iter(stats_mock, iter_mock):
tags_tool = TagsTool()
stats_mock.configure_mock(
**{'return_value.data': [7, 23]})
iter_mock.return_value = iter([])
assert list(tags_tool) == []
assert stats_mock.called
assert (
list(iter_mock.call_args)
== [([7, 23],), {}])
@patch('pontoon.tags.utils.TagsTool.stat_tool',
new_callable=PropertyMock)
def test_util_tags_tool_len(stats_mock):
m_len = MagicMock()
m_len.__len__.return_value = 23
stats_mock.configure_mock(
**{'return_value.data': m_len})
tags_tool = TagsTool()
assert len(tags_tool) == 23
assert m_len.__len__.called
@patch('pontoon.tags.utils.TagsTool.translation_tool',
new_callable=PropertyMock)
@patch('pontoon.tags.utils.TagsTool.tag_class')
def test_util_tags_tool_iter_tags(tag_mock, trans_mock):
trans_mock.configure_mock(
**{'return_value.data.get.return_value': 23})
tags_tool = TagsTool()
list(
tags_tool.iter_tags(
[dict(resource__tag=1, foo="bar"),
dict(resource__tag=2, foo="bar"),
dict(resource__tag=3, foo="bar")]))
assert (
[x[0][0] for x in trans_mock.return_value.data.get.call_args_list]
== [1, 2, 3])
assert (
[x[0][0] for x in tag_mock.call_args_list]
== [tags_tool] * 3)
assert (
[x[1] for x in tag_mock.call_args_list]
== [{'resource__tag': 1, 'latest_translation': 23, 'foo': 'bar'},
{'resource__tag': 2, 'latest_translation': 23, 'foo': 'bar'},
{'resource__tag': 3, 'latest_translation': 23, 'foo': 'bar'}])
@patch('pontoon.tags.utils.TagsTool.tag_manager',
new_callable=PropertyMock)
@patch('pontoon.tags.utils.tags.glob_to_regex')
def test_util_tags_tool_get_tags(glob_mock, tag_mock):
glob_mock.return_value = 17
filter_mock = MagicMock(
**{'filter.return_value': 23})
tag_mock.configure_mock(
**{'return_value.values.return_value': filter_mock})
tags_tool = TagsTool()
assert tags_tool.get_tags() is filter_mock
assert not filter_mock.called
assert not glob_mock.called
assert (
list(tag_mock.return_value.values.call_args)
== [('pk', 'name', 'slug', 'priority', 'project'), {}])
tag_mock.reset_mock()
assert tags_tool.get_tags('FOO') == 23
assert (
list(filter_mock.filter.call_args)
== [(), {'slug__regex': 17}])
assert list(glob_mock.call_args) == [('FOO',), {}]
assert (
list(tag_mock.return_value.values.call_args)
== [('pk', 'name', 'slug', 'priority', 'project'), {}])
| true
| true
|
f70ce1179beb4fa649da647a7ffe263a21a75625
| 1,025
|
py
|
Python
|
backend/votes/migrations/0001_initial.py
|
mnieber/lindyscience
|
468160aa6da42f45d8c37a2141a077a48410f81d
|
[
"MIT"
] | null | null | null |
backend/votes/migrations/0001_initial.py
|
mnieber/lindyscience
|
468160aa6da42f45d8c37a2141a077a48410f81d
|
[
"MIT"
] | 21
|
2020-02-11T23:50:05.000Z
|
2022-02-27T17:44:29.000Z
|
backend/votes/migrations/0001_initial.py
|
mnieber/lindyscience
|
468160aa6da42f45d8c37a2141a077a48410f81d
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-08-26 16:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Vote',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('object_id', models.UUIDField()),
('value', models.IntegerField(choices=[(-1, 'down'), (0, 'neutral'), (1, 'up')])),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 34.166667
| 128
| 0.633171
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Vote',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('object_id', models.UUIDField()),
('value', models.IntegerField(choices=[(-1, 'down'), (0, 'neutral'), (1, 'up')])),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true
| true
|
f70ce14c3ec536405be5c819c12da8f86f9c0e0c
| 6,967
|
py
|
Python
|
gupb/view/render.py
|
janusz-tracz/czesc_gupb
|
ea33376769657e1a8b97b02a2abd93679b32ae88
|
[
"MIT"
] | null | null | null |
gupb/view/render.py
|
janusz-tracz/czesc_gupb
|
ea33376769657e1a8b97b02a2abd93679b32ae88
|
[
"MIT"
] | null | null | null |
gupb/view/render.py
|
janusz-tracz/czesc_gupb
|
ea33376769657e1a8b97b02a2abd93679b32ae88
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import os
import itertools
from typing import Any, Optional, TypeVar
import pygame
from gupb.controller import keyboard
from gupb.model import characters
from gupb.model import effects
from gupb.model import games
from gupb.model import tiles
from gupb.model import weapons
pygame.init()
Sprite = TypeVar('Sprite')
TILE_SIZE = 8
BLACK = pygame.Color('black')
def load_sprite(group: str, name: str, transparent: pygame.Color = None) -> Sprite:
path = os.path.join('resources', 'images', group, f'{name}.png')
sprite = pygame.image.load(path).convert()
if transparent:
sprite.set_colorkey(transparent)
return sprite
class SpriteRepository:
def __init__(self) -> None:
self.sprites: dict[Any, Sprite] = {
tiles.Land: load_sprite('tiles', 'land'),
tiles.Sea: load_sprite('tiles', 'sea'),
tiles.Wall: load_sprite('tiles', 'wall'),
tiles.Menhir: load_sprite('tiles', 'menhir'),
weapons.Knife: load_sprite('weapons', 'knife', BLACK),
weapons.Sword: load_sprite('weapons', 'sword', BLACK),
weapons.Axe: load_sprite('weapons', 'axe', BLACK),
weapons.Bow: load_sprite('weapons', 'bow', BLACK),
weapons.Amulet: load_sprite('weapons', 'amulet', BLACK),
characters.Tabard.BLUE: load_sprite('characters', 'champion_blue', BLACK),
characters.Tabard.BROWN: load_sprite('characters', 'champion_brown', BLACK),
characters.Tabard.GREY: load_sprite('characters', 'champion_grey', BLACK),
characters.Tabard.RED: load_sprite('characters', 'champion_red', BLACK),
characters.Tabard.VIOLET: load_sprite('characters', 'champion_violet', BLACK),
characters.Tabard.WHITE: load_sprite('characters', 'champion_white', BLACK),
characters.Tabard.YELLOW: load_sprite('characters', 'champion_yellow', BLACK),
effects.Mist: load_sprite('effects', 'mist', BLACK),
effects.WeaponCut: load_sprite('effects', 'blood', BLACK),
}
self.rotation_values: dict[characters.Facing, int] = {
characters.Facing.RIGHT: 0,
characters.Facing.UP: 90,
characters.Facing.LEFT: 180,
characters.Facing.DOWN: 270,
}
self.champion_sprites: dict[tuple[characters.Tabard, characters.Facing], Sprite] = {
(tabard, facing): pygame.transform.rotate(self.sprites[tabard], self.rotation_values[facing])
for tabard, facing in itertools.product(
[
characters.Tabard.BLUE,
characters.Tabard.BROWN,
characters.Tabard.GREY,
characters.Tabard.RED,
characters.Tabard.VIOLET,
characters.Tabard.WHITE,
characters.Tabard.YELLOW,
],
[
characters.Facing.RIGHT,
characters.Facing.UP,
characters.Facing.LEFT,
characters.Facing.DOWN,
]
)
}
def match_sprite(self, element: Any) -> Sprite:
if isinstance(element, characters.Champion):
return self.champion_sprites[(element.tabard, element.facing)]
else:
return self.sprites[type(element)]
class Renderer:
def __init__(self, ms_per_time_unit: int = 1):
pygame.display.set_caption('GUPB')
self.screen = pygame.display.set_mode((100, 100))
self.sprite_repository = SpriteRepository()
self.clock = pygame.time.Clock()
self.time_passed = 0
self.ms_per_time_unit = ms_per_time_unit
def run(
self,
game: games.Game,
show_sight: Optional[characters.Champion] = None,
keyboard_controller: Optional[keyboard.KeyboardController] = None,
) -> None:
self.screen = self._resize_window(game)
time_to_cycle = self._time_to_cycle(game)
self.clock.tick()
while not game.finished:
self.time_passed += self.clock.tick()
if self.time_passed >= time_to_cycle:
self.time_passed -= time_to_cycle
game.cycle()
self._render(game, show_sight)
time_to_cycle = self._time_to_cycle(game)
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
elif event.type == pygame.KEYDOWN and keyboard_controller:
keyboard_controller.register(event.key)
@staticmethod
def _resize_window(game: games.Game) -> pygame.Surface:
arena_x_size, arena_y_size = game.arena.size
window_size = TILE_SIZE * arena_x_size, TILE_SIZE * arena_y_size
return pygame.display.set_mode(window_size)
def _time_to_cycle(self, game: games.Game) -> int:
return self.ms_per_time_unit * game.current_state.value
def _render(self, game: games.Game, show_sight: Optional[characters.Champion]) -> None:
background = pygame.Surface(self.screen.get_size())
background.convert()
self._render_arena(game, background)
if show_sight:
self._render_sight(game, show_sight, background)
self.screen.blit(background, (0, 0))
pygame.display.flip()
def _render_arena(self, game: games.Game, background: pygame.Surface) -> None:
for i, j in game.arena.terrain:
blit_destination = (i * TILE_SIZE, j * TILE_SIZE)
tile = game.arena.terrain[i, j]
tile_sprite = self.sprite_repository.match_sprite(tile)
background.blit(tile_sprite, blit_destination)
if tile.loot:
loot_sprite = self.sprite_repository.match_sprite(tile.loot)
background.blit(loot_sprite, blit_destination)
if tile.character:
character_sprite = self.sprite_repository.match_sprite(tile.character)
background.blit(character_sprite, blit_destination)
if tile.effects:
for effect in tile.effects:
effect_sprite = self.sprite_repository.match_sprite(effect)
background.blit(effect_sprite, blit_destination)
@staticmethod
def _render_sight(game: games.Game, show_sight: characters.Champion, background: pygame.Surface) -> None:
if show_sight in game.champions:
darken_percent = 0.5
dark = pygame.Surface((TILE_SIZE, TILE_SIZE), pygame.SRCALPHA)
dark.fill((0, 0, 0, darken_percent * 255))
visible = game.arena.visible_coords(show_sight)
for i, j in game.arena.terrain:
if (i, j) not in visible:
blit_destination = (i * TILE_SIZE, j * TILE_SIZE)
background.blit(dark, blit_destination)
| 41.224852
| 109
| 0.615186
|
from __future__ import annotations
import os
import itertools
from typing import Any, Optional, TypeVar
import pygame
from gupb.controller import keyboard
from gupb.model import characters
from gupb.model import effects
from gupb.model import games
from gupb.model import tiles
from gupb.model import weapons
pygame.init()
Sprite = TypeVar('Sprite')
TILE_SIZE = 8
BLACK = pygame.Color('black')
def load_sprite(group: str, name: str, transparent: pygame.Color = None) -> Sprite:
path = os.path.join('resources', 'images', group, f'{name}.png')
sprite = pygame.image.load(path).convert()
if transparent:
sprite.set_colorkey(transparent)
return sprite
class SpriteRepository:
def __init__(self) -> None:
self.sprites: dict[Any, Sprite] = {
tiles.Land: load_sprite('tiles', 'land'),
tiles.Sea: load_sprite('tiles', 'sea'),
tiles.Wall: load_sprite('tiles', 'wall'),
tiles.Menhir: load_sprite('tiles', 'menhir'),
weapons.Knife: load_sprite('weapons', 'knife', BLACK),
weapons.Sword: load_sprite('weapons', 'sword', BLACK),
weapons.Axe: load_sprite('weapons', 'axe', BLACK),
weapons.Bow: load_sprite('weapons', 'bow', BLACK),
weapons.Amulet: load_sprite('weapons', 'amulet', BLACK),
characters.Tabard.BLUE: load_sprite('characters', 'champion_blue', BLACK),
characters.Tabard.BROWN: load_sprite('characters', 'champion_brown', BLACK),
characters.Tabard.GREY: load_sprite('characters', 'champion_grey', BLACK),
characters.Tabard.RED: load_sprite('characters', 'champion_red', BLACK),
characters.Tabard.VIOLET: load_sprite('characters', 'champion_violet', BLACK),
characters.Tabard.WHITE: load_sprite('characters', 'champion_white', BLACK),
characters.Tabard.YELLOW: load_sprite('characters', 'champion_yellow', BLACK),
effects.Mist: load_sprite('effects', 'mist', BLACK),
effects.WeaponCut: load_sprite('effects', 'blood', BLACK),
}
self.rotation_values: dict[characters.Facing, int] = {
characters.Facing.RIGHT: 0,
characters.Facing.UP: 90,
characters.Facing.LEFT: 180,
characters.Facing.DOWN: 270,
}
self.champion_sprites: dict[tuple[characters.Tabard, characters.Facing], Sprite] = {
(tabard, facing): pygame.transform.rotate(self.sprites[tabard], self.rotation_values[facing])
for tabard, facing in itertools.product(
[
characters.Tabard.BLUE,
characters.Tabard.BROWN,
characters.Tabard.GREY,
characters.Tabard.RED,
characters.Tabard.VIOLET,
characters.Tabard.WHITE,
characters.Tabard.YELLOW,
],
[
characters.Facing.RIGHT,
characters.Facing.UP,
characters.Facing.LEFT,
characters.Facing.DOWN,
]
)
}
def match_sprite(self, element: Any) -> Sprite:
if isinstance(element, characters.Champion):
return self.champion_sprites[(element.tabard, element.facing)]
else:
return self.sprites[type(element)]
class Renderer:
def __init__(self, ms_per_time_unit: int = 1):
pygame.display.set_caption('GUPB')
self.screen = pygame.display.set_mode((100, 100))
self.sprite_repository = SpriteRepository()
self.clock = pygame.time.Clock()
self.time_passed = 0
self.ms_per_time_unit = ms_per_time_unit
def run(
self,
game: games.Game,
show_sight: Optional[characters.Champion] = None,
keyboard_controller: Optional[keyboard.KeyboardController] = None,
) -> None:
self.screen = self._resize_window(game)
time_to_cycle = self._time_to_cycle(game)
self.clock.tick()
while not game.finished:
self.time_passed += self.clock.tick()
if self.time_passed >= time_to_cycle:
self.time_passed -= time_to_cycle
game.cycle()
self._render(game, show_sight)
time_to_cycle = self._time_to_cycle(game)
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
elif event.type == pygame.KEYDOWN and keyboard_controller:
keyboard_controller.register(event.key)
@staticmethod
def _resize_window(game: games.Game) -> pygame.Surface:
arena_x_size, arena_y_size = game.arena.size
window_size = TILE_SIZE * arena_x_size, TILE_SIZE * arena_y_size
return pygame.display.set_mode(window_size)
def _time_to_cycle(self, game: games.Game) -> int:
return self.ms_per_time_unit * game.current_state.value
def _render(self, game: games.Game, show_sight: Optional[characters.Champion]) -> None:
background = pygame.Surface(self.screen.get_size())
background.convert()
self._render_arena(game, background)
if show_sight:
self._render_sight(game, show_sight, background)
self.screen.blit(background, (0, 0))
pygame.display.flip()
def _render_arena(self, game: games.Game, background: pygame.Surface) -> None:
for i, j in game.arena.terrain:
blit_destination = (i * TILE_SIZE, j * TILE_SIZE)
tile = game.arena.terrain[i, j]
tile_sprite = self.sprite_repository.match_sprite(tile)
background.blit(tile_sprite, blit_destination)
if tile.loot:
loot_sprite = self.sprite_repository.match_sprite(tile.loot)
background.blit(loot_sprite, blit_destination)
if tile.character:
character_sprite = self.sprite_repository.match_sprite(tile.character)
background.blit(character_sprite, blit_destination)
if tile.effects:
for effect in tile.effects:
effect_sprite = self.sprite_repository.match_sprite(effect)
background.blit(effect_sprite, blit_destination)
@staticmethod
def _render_sight(game: games.Game, show_sight: characters.Champion, background: pygame.Surface) -> None:
if show_sight in game.champions:
darken_percent = 0.5
dark = pygame.Surface((TILE_SIZE, TILE_SIZE), pygame.SRCALPHA)
dark.fill((0, 0, 0, darken_percent * 255))
visible = game.arena.visible_coords(show_sight)
for i, j in game.arena.terrain:
if (i, j) not in visible:
blit_destination = (i * TILE_SIZE, j * TILE_SIZE)
background.blit(dark, blit_destination)
| true
| true
|
f70ce1740834426274a39514b249231aca415f40
| 2,195
|
py
|
Python
|
packages/pyre/patterns/AttributeClassifier.py
|
lijun99/pyre
|
004dfd4c06489b4ba5b32877338ca6440f2d523b
|
[
"BSD-3-Clause"
] | 3
|
2019-08-02T21:02:47.000Z
|
2021-09-08T13:59:43.000Z
|
packages/pyre/patterns/AttributeClassifier.py
|
lijun99/pyre
|
004dfd4c06489b4ba5b32877338ca6440f2d523b
|
[
"BSD-3-Clause"
] | null | null | null |
packages/pyre/patterns/AttributeClassifier.py
|
lijun99/pyre
|
004dfd4c06489b4ba5b32877338ca6440f2d523b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2019 all rights reserved
#
# externals
import collections
# superclass
from .AbstractMetaclass import AbstractMetaclass
# class declaration
class AttributeClassifier(AbstractMetaclass):
"""
A base metaclass that enables attribute categorization.
A common pattern in pyre is to define classes that contain special attributes whose purpose
is to collect declaration meta data and associate them with a class attribute. These
attributes are processed by metaclasses and are converted into appropriate behavior. For
example, components have properties, which are decorated descriptors that enable external
configuration of component state. Similarly, XML parsing happens with the aid of classes
that capture the syntax, semantics and processing behavior of tags by employing descriptors
to capture the layout of an XML document.
This class defines {pyre_harvest}, which scans the class attribute dictionary for instances
of the special class {descriptor}. It also overrides {__prepare__} to provide attribute
storage that records the order in which attributes were encountered in the class record.
"""
# data
pyre_reserved = set()
# meta methods
@classmethod
def __prepare__(cls, name, bases, **kwds):
"""
Build an attribute table that maintains a category index for attribute descriptors
"""
# use an ordered dictionary
return collections.OrderedDict()
# interface
@classmethod
def pyre_harvest(cls, attributes, descriptor):
"""
Examine {attributes}, looking for instances of {descriptor}
"""
# reserved names are excluded from harvesting
reserved = cls.pyre_reserved
# loop over the attributes
for name, attribute in attributes.items():
# if this is a descriptor that's not in the reserved list
if isinstance(attribute, descriptor) and name not in reserved:
# return it to the caller along with its name
yield name, attribute
# all done
return
# end of file
| 32.761194
| 95
| 0.701139
|
import collections
from .AbstractMetaclass import AbstractMetaclass
class AttributeClassifier(AbstractMetaclass):
pyre_reserved = set()
@classmethod
def __prepare__(cls, name, bases, **kwds):
return collections.OrderedDict()
@classmethod
def pyre_harvest(cls, attributes, descriptor):
reserved = cls.pyre_reserved
for name, attribute in attributes.items():
if isinstance(attribute, descriptor) and name not in reserved:
# return it to the caller along with its name
yield name, attribute
# all done
return
# end of file
| true
| true
|
f70ce260fbedc20ce6468e1c73bcde5215e6e54c
| 4,697
|
py
|
Python
|
marlgrid/envs/__init__.py
|
s-mawjee/marlgrid
|
a2bb039fde2a789715bfe7ecb9f84d78cbecbc24
|
[
"Apache-2.0"
] | null | null | null |
marlgrid/envs/__init__.py
|
s-mawjee/marlgrid
|
a2bb039fde2a789715bfe7ecb9f84d78cbecbc24
|
[
"Apache-2.0"
] | null | null | null |
marlgrid/envs/__init__.py
|
s-mawjee/marlgrid
|
a2bb039fde2a789715bfe7ecb9f84d78cbecbc24
|
[
"Apache-2.0"
] | null | null | null |
from ..base import MultiGridEnv
from .empty import EmptyMultiGrid, EmptyColorMultiGrid
from .doorkey import DoorKeyEnv
from .cluttered import ClutteredMultiGrid
from .goalcycle import ClutteredGoalCycleEnv
from .viz_test import VisibilityTestEnv
from .hallways import HallWaysMultiGrid
from .comm_game import CommunicationGameEnv
from ..agents import GridAgentInterface
from gym.envs.registration import register as gym_register
import sys
import inspect
import random
this_module = sys.modules[__name__]
registered_envs = []
def register_marl_env(
env_name,
env_class,
n_agents,
grid_size,
view_size,
view_tile_size=8,
view_offset=0,
agent_color=None,
env_kwargs={},
):
colors = ["red", "blue", "purple", "orange", "olive", "pink"]
assert n_agents <= len(colors)
class RegEnv(env_class):
def __new__(cls):
instance = super(env_class, RegEnv).__new__(env_class)
instance.__init__(
agents=[
GridAgentInterface(
color=c if agent_color is None else agent_color,
view_size=view_size,
view_tile_size=8,
view_offset=view_offset,
)
for c in colors[:n_agents]
],
grid_size=grid_size,
**env_kwargs,
)
return instance
env_class_name = f"env_{len(registered_envs)}"
setattr(this_module, env_class_name, RegEnv)
registered_envs.append(env_name)
gym_register(env_name, entry_point=f"marlgrid.envs:{env_class_name}")
def env_from_config(env_config, randomize_seed=True):
possible_envs = {
k: v
for k, v in globals().items()
if inspect.isclass(v) and issubclass(v, MultiGridEnv)
}
env_class = possible_envs[env_config["env_class"]]
env_kwargs = {k: v for k, v in env_config.items() if k != "env_class"}
if randomize_seed:
env_kwargs["seed"] = env_kwargs.get("seed", 0) + random.randint(0, 1337 * 1337)
return env_class(**env_kwargs)
register_marl_env(
"MarlGrid-1AgentCluttered15x15-v0",
ClutteredMultiGrid,
n_agents=1,
grid_size=11,
view_size=5,
env_kwargs={"n_clutter": 30},
)
register_marl_env(
"MarlGrid-2AgentCluttered15x15-v0",
ClutteredMultiGrid,
n_agents=2,
grid_size=11,
view_size=5,
env_kwargs={"n_clutter": 30},
)
register_marl_env(
"MarlGrid-3AgentCluttered11x11-v0",
ClutteredMultiGrid,
n_agents=3,
grid_size=11,
view_size=7,
env_kwargs={"clutter_density": 0.15},
)
register_marl_env(
"MarlGrid-3AgentCluttered15x15-v0",
ClutteredMultiGrid,
n_agents=3,
grid_size=15,
view_size=7,
env_kwargs={"clutter_density": 0.15},
)
register_marl_env(
"MarlGrid-2AgentEmpty9x9-v0", EmptyMultiGrid, n_agents=2, grid_size=9, view_size=7
)
register_marl_env(
"MarlGrid-3AgentEmpty9x9-v0", EmptyMultiGrid, n_agents=3, grid_size=9, view_size=7
)
register_marl_env(
"MarlGrid-4AgentEmpty9x9-v0", EmptyMultiGrid, n_agents=4, grid_size=9, view_size=7
)
register_marl_env(
"Goalcycle-demo-solo-v0",
ClutteredGoalCycleEnv,
n_agents=1,
grid_size=13,
view_size=7,
view_tile_size=5,
view_offset=1,
env_kwargs={"clutter_density": 0.1, "n_bonus_tiles": 3},
)
register_marl_env(
"MarlGrid-2AgentComms15x15-v0",
HallWaysMultiGrid,
n_agents=2,
grid_size=15,
view_size=7,
env_kwargs={
"respawn": False,
"ghost_mode": False,
"reward_decay": False,
"goal_coordinates": [(1, 1), (1, 13), (13, 13), (13, 1)],
"goal_colors": ["blue", "red", "blue", "red"],
"max_steps": 250,
},
agent_color="green",
)
register_marl_env(
"MarlGrid-2AgentEmptyColor15x15-v0",
EmptyColorMultiGrid,
n_agents=2,
grid_size=15,
view_size=7,
env_kwargs={
"respawn": False,
"ghost_mode": False,
"reward_decay": False,
"goal_coordinates": [(7, 1), (8, 1), (8, 13), (7, 13)],
"goal_colors": ["blue", "red", "blue", "red"],
"max_steps": 250,
},
agent_color="green",
)
register_marl_env(
"MarlGrid-2AgentCommGame15x15-v0",
CommunicationGameEnv,
n_agents=2,
grid_size=15,
view_size=7,
env_kwargs={
"respawn": False,
"ghost_mode": False,
"reward_decay": False,
"block_coordinates": [(1, 1), (13, 1), (1, 13), (13, 13)],
"block_colors": ["blue", "red", "cyan", "pink"],
"comm_blocks_coordinates": [(7, 4), (7, 10)],
"max_steps": 250,
},
agent_color="green",
)
| 25.252688
| 87
| 0.631254
|
from ..base import MultiGridEnv
from .empty import EmptyMultiGrid, EmptyColorMultiGrid
from .doorkey import DoorKeyEnv
from .cluttered import ClutteredMultiGrid
from .goalcycle import ClutteredGoalCycleEnv
from .viz_test import VisibilityTestEnv
from .hallways import HallWaysMultiGrid
from .comm_game import CommunicationGameEnv
from ..agents import GridAgentInterface
from gym.envs.registration import register as gym_register
import sys
import inspect
import random
this_module = sys.modules[__name__]
registered_envs = []
def register_marl_env(
env_name,
env_class,
n_agents,
grid_size,
view_size,
view_tile_size=8,
view_offset=0,
agent_color=None,
env_kwargs={},
):
colors = ["red", "blue", "purple", "orange", "olive", "pink"]
assert n_agents <= len(colors)
class RegEnv(env_class):
def __new__(cls):
instance = super(env_class, RegEnv).__new__(env_class)
instance.__init__(
agents=[
GridAgentInterface(
color=c if agent_color is None else agent_color,
view_size=view_size,
view_tile_size=8,
view_offset=view_offset,
)
for c in colors[:n_agents]
],
grid_size=grid_size,
**env_kwargs,
)
return instance
env_class_name = f"env_{len(registered_envs)}"
setattr(this_module, env_class_name, RegEnv)
registered_envs.append(env_name)
gym_register(env_name, entry_point=f"marlgrid.envs:{env_class_name}")
def env_from_config(env_config, randomize_seed=True):
possible_envs = {
k: v
for k, v in globals().items()
if inspect.isclass(v) and issubclass(v, MultiGridEnv)
}
env_class = possible_envs[env_config["env_class"]]
env_kwargs = {k: v for k, v in env_config.items() if k != "env_class"}
if randomize_seed:
env_kwargs["seed"] = env_kwargs.get("seed", 0) + random.randint(0, 1337 * 1337)
return env_class(**env_kwargs)
register_marl_env(
"MarlGrid-1AgentCluttered15x15-v0",
ClutteredMultiGrid,
n_agents=1,
grid_size=11,
view_size=5,
env_kwargs={"n_clutter": 30},
)
register_marl_env(
"MarlGrid-2AgentCluttered15x15-v0",
ClutteredMultiGrid,
n_agents=2,
grid_size=11,
view_size=5,
env_kwargs={"n_clutter": 30},
)
register_marl_env(
"MarlGrid-3AgentCluttered11x11-v0",
ClutteredMultiGrid,
n_agents=3,
grid_size=11,
view_size=7,
env_kwargs={"clutter_density": 0.15},
)
register_marl_env(
"MarlGrid-3AgentCluttered15x15-v0",
ClutteredMultiGrid,
n_agents=3,
grid_size=15,
view_size=7,
env_kwargs={"clutter_density": 0.15},
)
register_marl_env(
"MarlGrid-2AgentEmpty9x9-v0", EmptyMultiGrid, n_agents=2, grid_size=9, view_size=7
)
register_marl_env(
"MarlGrid-3AgentEmpty9x9-v0", EmptyMultiGrid, n_agents=3, grid_size=9, view_size=7
)
register_marl_env(
"MarlGrid-4AgentEmpty9x9-v0", EmptyMultiGrid, n_agents=4, grid_size=9, view_size=7
)
register_marl_env(
"Goalcycle-demo-solo-v0",
ClutteredGoalCycleEnv,
n_agents=1,
grid_size=13,
view_size=7,
view_tile_size=5,
view_offset=1,
env_kwargs={"clutter_density": 0.1, "n_bonus_tiles": 3},
)
register_marl_env(
"MarlGrid-2AgentComms15x15-v0",
HallWaysMultiGrid,
n_agents=2,
grid_size=15,
view_size=7,
env_kwargs={
"respawn": False,
"ghost_mode": False,
"reward_decay": False,
"goal_coordinates": [(1, 1), (1, 13), (13, 13), (13, 1)],
"goal_colors": ["blue", "red", "blue", "red"],
"max_steps": 250,
},
agent_color="green",
)
register_marl_env(
"MarlGrid-2AgentEmptyColor15x15-v0",
EmptyColorMultiGrid,
n_agents=2,
grid_size=15,
view_size=7,
env_kwargs={
"respawn": False,
"ghost_mode": False,
"reward_decay": False,
"goal_coordinates": [(7, 1), (8, 1), (8, 13), (7, 13)],
"goal_colors": ["blue", "red", "blue", "red"],
"max_steps": 250,
},
agent_color="green",
)
register_marl_env(
"MarlGrid-2AgentCommGame15x15-v0",
CommunicationGameEnv,
n_agents=2,
grid_size=15,
view_size=7,
env_kwargs={
"respawn": False,
"ghost_mode": False,
"reward_decay": False,
"block_coordinates": [(1, 1), (13, 1), (1, 13), (13, 13)],
"block_colors": ["blue", "red", "cyan", "pink"],
"comm_blocks_coordinates": [(7, 4), (7, 10)],
"max_steps": 250,
},
agent_color="green",
)
| true
| true
|
f70ce322dbdb58b9a889faa2dc81d50d6ec92438
| 18,167
|
py
|
Python
|
tests/framework/test_events.py
|
svidoso/ipopo
|
1d4b81207e67890dfccc8f562336c7104f194c17
|
[
"Apache-2.0"
] | 65
|
2015-04-21T10:41:18.000Z
|
2022-01-02T16:25:40.000Z
|
tests/framework/test_events.py
|
svidoso/ipopo
|
1d4b81207e67890dfccc8f562336c7104f194c17
|
[
"Apache-2.0"
] | 85
|
2015-01-20T14:23:52.000Z
|
2022-02-19T17:08:46.000Z
|
tests/framework/test_events.py
|
svidoso/ipopo
|
1d4b81207e67890dfccc8f562336c7104f194c17
|
[
"Apache-2.0"
] | 32
|
2015-03-13T07:43:05.000Z
|
2020-04-24T07:56:53.000Z
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Tests the framework events.
:author: Thomas Calmant
"""
# Standard library
try:
import unittest2 as unittest
except ImportError:
import unittest
# Pelix
from pelix.framework import FrameworkFactory, Bundle, BundleException, \
BundleContext, BundleEvent, ServiceEvent
from pelix.services import SERVICE_EVENT_LISTENER_HOOK
# Tests
from tests import log_on, log_off
from tests.interfaces import IEchoService
# ------------------------------------------------------------------------------
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
SERVICE_BUNDLE = "tests.framework.service_bundle"
SIMPLE_BUNDLE = "tests.framework.simple_bundle"
# ------------------------------------------------------------------------------
class BundleEventTest(unittest.TestCase):
"""
Pelix bundle event tests
"""
def setUp(self):
"""
Called before each test. Initiates a framework.
"""
self.framework = FrameworkFactory.get_framework()
self.framework.start()
self.test_bundle_name = SIMPLE_BUNDLE
self.bundle = None
self.received = []
def tearDown(self):
"""
Called after each test
"""
self.framework.stop()
FrameworkFactory.delete_framework()
def reset_state(self):
"""
Resets the flags
"""
del self.received[:]
def bundle_changed(self, event):
"""
Called by the framework when a bundle event is triggered
@param event: The BundleEvent
"""
assert isinstance(event, BundleEvent)
bundle = event.get_bundle()
kind = event.get_kind()
if self.bundle is not None \
and kind == BundleEvent.INSTALLED:
# Bundle is not yet locally known...
self.assertIs(self.bundle, bundle,
"Received an event for an other bundle.")
self.assertNotIn(kind, self.received, "Event received twice")
self.received.append(kind)
def testBundleEvents(self):
"""
Tests if the signals are correctly received
"""
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Register to events
self.assertTrue(context.add_bundle_listener(self),
"Can't register the bundle listener")
# Install the bundle
self.bundle = bundle = context.install_bundle(self.test_bundle_name)
assert isinstance(bundle, Bundle)
# Assert the Install events has been received
self.assertEqual([BundleEvent.INSTALLED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Start the bundle
bundle.start()
# Assert the events have been received
self.assertEqual([BundleEvent.STARTING, BundleEvent.STARTED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Stop the bundle
bundle.stop()
# Assert the events have been received
self.assertEqual([BundleEvent.STOPPING, BundleEvent.STOPPING_PRECLEAN,
BundleEvent.STOPPED], self.received,
"Received {0}".format(self.received))
self.reset_state()
# Uninstall the bundle
bundle.uninstall()
# Assert the events have been received
self.assertEqual([BundleEvent.UNINSTALLED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Unregister from events
context.remove_bundle_listener(self)
# ------------------------------------------------------------------------------
class ServiceEventTest(unittest.TestCase):
"""
Pelix service event tests
"""
def setUp(self):
"""
Called before each test. Initiates a framework.
"""
self.framework = FrameworkFactory.get_framework()
self.framework.start()
self.test_bundle_name = SERVICE_BUNDLE
self.bundle = None
self.received = []
def tearDown(self):
"""
Called after each test
"""
self.framework.stop()
FrameworkFactory.delete_framework()
def reset_state(self):
"""
Resets the flags
"""
del self.received[:]
def service_changed(self, event):
"""
Called by the framework when a service event is triggered
@param event: The ServiceEvent
"""
assert isinstance(event, ServiceEvent)
ref = event.get_service_reference()
self.assertIsNotNone(ref, "Invalid service reference in the event")
kind = event.get_kind()
if kind == ServiceEvent.MODIFIED \
or kind == ServiceEvent.MODIFIED_ENDMATCH:
# Properties have been modified
self.assertNotEqual(ref.get_properties(),
event.get_previous_properties(),
"Modified event for unchanged properties")
self.assertNotIn(kind, self.received, "Event received twice")
self.received.append(kind)
def testDoubleListener(self):
"""
Tests double registration / unregistration
"""
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Double registration
self.assertTrue(context.add_service_listener(self),
"Can't register the service listener")
log_off()
self.assertFalse(context.add_service_listener(self),
"Service listener registered twice")
log_on()
# Double unregistration
self.assertTrue(context.remove_service_listener(self),
"Can't unregister the service listener")
log_off()
self.assertFalse(context.remove_service_listener(self),
"Service listener unregistered twice")
log_on()
def testInvalidFilterListener(self):
"""
Tests invalid filter listener registration
"""
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
log_off()
self.assertRaises(BundleException, context.add_service_listener, self,
"Invalid")
log_on()
self.assertFalse(context.remove_service_listener(self),
"Invalid filter was registered anyway")
def testServiceEventsNormal(self):
"""
Tests if the signals are correctly received
"""
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Register to events
self.assertTrue(context.add_service_listener(self),
"Can't register the service listener")
# Install the bundle
self.bundle = bundle = context.install_bundle(self.test_bundle_name)
assert isinstance(bundle, Bundle)
# Assert the Install events has been received
self.assertEqual(
[], self.received, "Received {0}".format(self.received))
self.reset_state()
# Start the bundle
bundle.start()
# Assert the events have been received
self.assertEqual([ServiceEvent.REGISTERED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Stop the bundle
bundle.stop()
# Assert the events have been received
self.assertEqual([ServiceEvent.UNREGISTERING],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Uninstall the bundle
bundle.uninstall()
# Assert the events have been received
self.assertEqual(
[], self.received, "Received {0}".format(self.received))
self.reset_state()
# Unregister from events
context.remove_service_listener(self)
def testServiceEventsNoStop(self):
"""
Tests if the signals are correctly received, even if the service is not
correctly removed
"""
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Register to events
self.assertTrue(context.add_service_listener(self),
"Can't register the service listener")
# Install the bundle
self.bundle = bundle = context.install_bundle(self.test_bundle_name)
assert isinstance(bundle, Bundle)
# Assert the Install events has been received
self.assertEqual(
[], self.received, "Received {0}".format(self.received))
self.reset_state()
# Start the bundle
bundle.start()
# Assert the events have been received
self.assertEqual([ServiceEvent.REGISTERED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Uninstall the bundle, without unregistering the service
module_ = bundle.get_module()
module_.unregister = False
bundle.uninstall()
# Assert the events have been received
self.assertEqual([ServiceEvent.UNREGISTERING],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Unregister from events
context.remove_service_listener(self)
def testServiceModified(self):
"""
Tests the service modified event
"""
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Register to events
self.assertTrue(context.add_service_listener(self, "(test=True)"),
"Can't register the service listener")
# Install the bundle
self.bundle = bundle = context.install_bundle(self.test_bundle_name)
assert isinstance(bundle, Bundle)
# Start the bundle
bundle.start()
# Assert the events have been received
self.assertEqual([ServiceEvent.REGISTERED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Get the service
ref = context.get_service_reference(IEchoService)
self.assertIsNotNone(ref, "ServiceReference not found")
svc = context.get_service(ref)
self.assertIsNotNone(ref, "Invalid service instance")
# Modify the service => Simple modification
svc.modify({"answer": 42})
self.assertEqual([ServiceEvent.MODIFIED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Set the same value => No event should be sent
svc.modify({"answer": 42})
self.assertEqual([], self.received,
"Received {0}".format(self.received))
self.reset_state()
# Modify the service => Ends the filter match
svc.modify({"test": False})
# Assert the events have been received
self.assertEqual([ServiceEvent.MODIFIED_ENDMATCH],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Modify the service => the filter matches again
svc.modify({"test": True})
# Assert the events have been received
self.assertEqual([ServiceEvent.MODIFIED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Stop the bundle
bundle.stop()
# Assert the events have been received
self.assertEqual([ServiceEvent.UNREGISTERING],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Uninstall the bundle
bundle.uninstall()
# Unregister from events
context.remove_service_listener(self)
# ------------------------------------------------------------------------------
class EventListenerHookTest(unittest.TestCase):
"""
Event Listener Hook tests
"""
def setUp(self):
"""
Called before each test. Initiates a framework.
"""
self.framework = FrameworkFactory.get_framework()
self.framework.start()
self.test_bundle_name = SERVICE_BUNDLE
self.bundle = None
self.received = []
def tearDown(self):
"""
Called after each test
"""
self.framework.stop()
self.framework.delete()
def test_normal_behaviour(self):
"""
Checks if event listener hooks are registered correctly
"""
# Test implementation
events = []
class Hook(object):
@staticmethod
def event(svc_event, listeners_dict):
events.append((svc_event, listeners_dict))
# Register the hook
ctx = self.framework.get_bundle_context()
reg = ctx.register_service(SERVICE_EVENT_LISTENER_HOOK, Hook(), {})
# Hooks shouldn't be aware of themselves
self.assertFalse(events)
# Register a dummy service
dummy_reg = ctx.register_service("dummy", object(), {})
# Pop information
event, listeners = events.pop(0)
# Check event
assert isinstance(event, ServiceEvent)
self.assertEqual(event.get_kind(), ServiceEvent.REGISTERED)
self.assertIs(event.get_service_reference(), dummy_reg.get_reference())
# No listeners are registered
self.assertFalse(listeners)
# Update the service
dummy_reg.set_properties({"hello": "world"})
# Pop information
event, listeners = events.pop(0)
# Check event
assert isinstance(event, ServiceEvent)
self.assertEqual(event.get_kind(), ServiceEvent.MODIFIED)
self.assertIs(event.get_service_reference(), dummy_reg.get_reference())
# Unregister the service
dummy_reg.unregister()
# Pop information
event, listeners = events.pop(0)
# Check event
assert isinstance(event, ServiceEvent)
self.assertEqual(event.get_kind(), ServiceEvent.UNREGISTERING)
self.assertIs(event.get_service_reference(), dummy_reg.get_reference())
# Unregister the hook
reg.unregister()
# Register a new service
ctx.register_service("dummy", object(), {})
# Hook must not be notified
self.assertFalse(events)
def test_hook(self):
"""
Tests the hook filtering behaviour
"""
# Add a bundle to have two contexts in the test
fw_ctx = self.framework.get_bundle_context()
bnd = fw_ctx.install_bundle("tests.dummy_1")
bnd.start()
bnd_ctx = bnd.get_bundle_context()
# Setup a hook
class Hook(object):
@staticmethod
def event(svc_event, listeners_dict):
to_remove = svc_event.get_service_reference() \
.get_property("to.remove")
info_to_remove = []
for listener_bc, listeners_info in listeners_dict.items():
# Check the dictionary content
for listener_info in listeners_info:
self.assertIs(listener_bc, listener_info.bundle_context)
self.assertIs(
listener_bc, listener_info.listener.context)
self.assertIs(
listener_bc, listener_info.get_bundle_context())
if listener_info.listener in to_remove:
info_to_remove.append(listener_info)
# Remove the requested listeners
for listener_info in info_to_remove:
listeners_dict[listener_info.bundle_context] \
.remove(listener_info)
fw_ctx.register_service(SERVICE_EVENT_LISTENER_HOOK, Hook(), {})
# Register multiple listeners
class Listener(object):
def __init__(self, bc):
self.context = bc
self.storage = []
bc.add_service_listener(self)
def service_changed(self, event):
self.storage.append(event)
listener_referee = Listener(fw_ctx)
listener_1 = Listener(fw_ctx)
listener_2 = Listener(bnd_ctx)
# Register a service that only the referee will get
reg = fw_ctx.register_service(
"dummy", object(), {"to.remove": [listener_1, listener_2]})
evt = listener_referee.storage.pop(0)
self.assertIs(evt.get_service_reference(), reg.get_reference())
self.assertEqual(evt.get_kind(), ServiceEvent.REGISTERED)
self.assertFalse(listener_1.storage)
self.assertFalse(listener_2.storage)
# Modify it so that listener_1 gets it
reg.set_properties({"to.remove": [listener_2]})
self.assertFalse(listener_2.storage)
evt = listener_referee.storage.pop(0)
self.assertIs(evt.get_service_reference(), reg.get_reference())
self.assertEqual(evt.get_kind(), ServiceEvent.MODIFIED)
evt1 = listener_1.storage.pop(0)
self.assertIs(evt1, evt)
# Modify it so that listener_2, but not listener_1 gets it
reg.set_properties({"to.remove": [listener_1]})
self.assertFalse(listener_1.storage)
evt = listener_referee.storage.pop(0)
self.assertIs(evt.get_service_reference(), reg.get_reference())
self.assertEqual(evt.get_kind(), ServiceEvent.MODIFIED)
evt2 = listener_2.storage.pop(0)
self.assertIs(evt2, evt)
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Set logging level
import logging
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| 32.733333
| 80
| 0.59476
|
try:
import unittest2 as unittest
except ImportError:
import unittest
from pelix.framework import FrameworkFactory, Bundle, BundleException, \
BundleContext, BundleEvent, ServiceEvent
from pelix.services import SERVICE_EVENT_LISTENER_HOOK
from tests import log_on, log_off
from tests.interfaces import IEchoService
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
SERVICE_BUNDLE = "tests.framework.service_bundle"
SIMPLE_BUNDLE = "tests.framework.simple_bundle"
class BundleEventTest(unittest.TestCase):
def setUp(self):
self.framework = FrameworkFactory.get_framework()
self.framework.start()
self.test_bundle_name = SIMPLE_BUNDLE
self.bundle = None
self.received = []
def tearDown(self):
self.framework.stop()
FrameworkFactory.delete_framework()
def reset_state(self):
del self.received[:]
def bundle_changed(self, event):
assert isinstance(event, BundleEvent)
bundle = event.get_bundle()
kind = event.get_kind()
if self.bundle is not None \
and kind == BundleEvent.INSTALLED:
self.assertIs(self.bundle, bundle,
"Received an event for an other bundle.")
self.assertNotIn(kind, self.received, "Event received twice")
self.received.append(kind)
def testBundleEvents(self):
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
self.assertTrue(context.add_bundle_listener(self),
"Can't register the bundle listener")
# Install the bundle
self.bundle = bundle = context.install_bundle(self.test_bundle_name)
assert isinstance(bundle, Bundle)
# Assert the Install events has been received
self.assertEqual([BundleEvent.INSTALLED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Start the bundle
bundle.start()
# Assert the events have been received
self.assertEqual([BundleEvent.STARTING, BundleEvent.STARTED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Stop the bundle
bundle.stop()
# Assert the events have been received
self.assertEqual([BundleEvent.STOPPING, BundleEvent.STOPPING_PRECLEAN,
BundleEvent.STOPPED], self.received,
"Received {0}".format(self.received))
self.reset_state()
# Uninstall the bundle
bundle.uninstall()
# Assert the events have been received
self.assertEqual([BundleEvent.UNINSTALLED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Unregister from events
context.remove_bundle_listener(self)
# ------------------------------------------------------------------------------
class ServiceEventTest(unittest.TestCase):
def setUp(self):
self.framework = FrameworkFactory.get_framework()
self.framework.start()
self.test_bundle_name = SERVICE_BUNDLE
self.bundle = None
self.received = []
def tearDown(self):
self.framework.stop()
FrameworkFactory.delete_framework()
def reset_state(self):
del self.received[:]
def service_changed(self, event):
assert isinstance(event, ServiceEvent)
ref = event.get_service_reference()
self.assertIsNotNone(ref, "Invalid service reference in the event")
kind = event.get_kind()
if kind == ServiceEvent.MODIFIED \
or kind == ServiceEvent.MODIFIED_ENDMATCH:
# Properties have been modified
self.assertNotEqual(ref.get_properties(),
event.get_previous_properties(),
"Modified event for unchanged properties")
self.assertNotIn(kind, self.received, "Event received twice")
self.received.append(kind)
def testDoubleListener(self):
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Double registration
self.assertTrue(context.add_service_listener(self),
"Can't register the service listener")
log_off()
self.assertFalse(context.add_service_listener(self),
"Service listener registered twice")
log_on()
self.assertTrue(context.remove_service_listener(self),
"Can't unregister the service listener")
log_off()
self.assertFalse(context.remove_service_listener(self),
"Service listener unregistered twice")
log_on()
def testInvalidFilterListener(self):
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
log_off()
self.assertRaises(BundleException, context.add_service_listener, self,
"Invalid")
log_on()
self.assertFalse(context.remove_service_listener(self),
"Invalid filter was registered anyway")
def testServiceEventsNormal(self):
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Register to events
self.assertTrue(context.add_service_listener(self),
"Can't register the service listener")
self.bundle = bundle = context.install_bundle(self.test_bundle_name)
assert isinstance(bundle, Bundle)
self.assertEqual(
[], self.received, "Received {0}".format(self.received))
self.reset_state()
bundle.start()
self.assertEqual([ServiceEvent.REGISTERED],
self.received, "Received {0}".format(self.received))
self.reset_state()
bundle.stop()
self.assertEqual([ServiceEvent.UNREGISTERING],
self.received, "Received {0}".format(self.received))
self.reset_state()
bundle.uninstall()
self.assertEqual(
[], self.received, "Received {0}".format(self.received))
self.reset_state()
context.remove_service_listener(self)
def testServiceEventsNoStop(self):
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
self.assertTrue(context.add_service_listener(self),
"Can't register the service listener")
# Install the bundle
self.bundle = bundle = context.install_bundle(self.test_bundle_name)
assert isinstance(bundle, Bundle)
# Assert the Install events has been received
self.assertEqual(
[], self.received, "Received {0}".format(self.received))
self.reset_state()
# Start the bundle
bundle.start()
# Assert the events have been received
self.assertEqual([ServiceEvent.REGISTERED],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Uninstall the bundle, without unregistering the service
module_ = bundle.get_module()
module_.unregister = False
bundle.uninstall()
# Assert the events have been received
self.assertEqual([ServiceEvent.UNREGISTERING],
self.received, "Received {0}".format(self.received))
self.reset_state()
# Unregister from events
context.remove_service_listener(self)
def testServiceModified(self):
context = self.framework.get_bundle_context()
assert isinstance(context, BundleContext)
# Register to events
self.assertTrue(context.add_service_listener(self, "(test=True)"),
"Can't register the service listener")
self.bundle = bundle = context.install_bundle(self.test_bundle_name)
assert isinstance(bundle, Bundle)
bundle.start()
self.assertEqual([ServiceEvent.REGISTERED],
self.received, "Received {0}".format(self.received))
self.reset_state()
ref = context.get_service_reference(IEchoService)
self.assertIsNotNone(ref, "ServiceReference not found")
svc = context.get_service(ref)
self.assertIsNotNone(ref, "Invalid service instance")
svc.modify({"answer": 42})
self.assertEqual([ServiceEvent.MODIFIED],
self.received, "Received {0}".format(self.received))
self.reset_state()
svc.modify({"answer": 42})
self.assertEqual([], self.received,
"Received {0}".format(self.received))
self.reset_state()
svc.modify({"test": False})
self.assertEqual([ServiceEvent.MODIFIED_ENDMATCH],
self.received, "Received {0}".format(self.received))
self.reset_state()
svc.modify({"test": True})
self.assertEqual([ServiceEvent.MODIFIED],
self.received, "Received {0}".format(self.received))
self.reset_state()
bundle.stop()
self.assertEqual([ServiceEvent.UNREGISTERING],
self.received, "Received {0}".format(self.received))
self.reset_state()
bundle.uninstall()
context.remove_service_listener(self)
class EventListenerHookTest(unittest.TestCase):
def setUp(self):
self.framework = FrameworkFactory.get_framework()
self.framework.start()
self.test_bundle_name = SERVICE_BUNDLE
self.bundle = None
self.received = []
def tearDown(self):
self.framework.stop()
self.framework.delete()
def test_normal_behaviour(self):
events = []
class Hook(object):
@staticmethod
def event(svc_event, listeners_dict):
events.append((svc_event, listeners_dict))
ctx = self.framework.get_bundle_context()
reg = ctx.register_service(SERVICE_EVENT_LISTENER_HOOK, Hook(), {})
self.assertFalse(events)
# Register a dummy service
dummy_reg = ctx.register_service("dummy", object(), {})
# Pop information
event, listeners = events.pop(0)
# Check event
assert isinstance(event, ServiceEvent)
self.assertEqual(event.get_kind(), ServiceEvent.REGISTERED)
self.assertIs(event.get_service_reference(), dummy_reg.get_reference())
# No listeners are registered
self.assertFalse(listeners)
# Update the service
dummy_reg.set_properties({"hello": "world"})
# Pop information
event, listeners = events.pop(0)
# Check event
assert isinstance(event, ServiceEvent)
self.assertEqual(event.get_kind(), ServiceEvent.MODIFIED)
self.assertIs(event.get_service_reference(), dummy_reg.get_reference())
# Unregister the service
dummy_reg.unregister()
# Pop information
event, listeners = events.pop(0)
# Check event
assert isinstance(event, ServiceEvent)
self.assertEqual(event.get_kind(), ServiceEvent.UNREGISTERING)
self.assertIs(event.get_service_reference(), dummy_reg.get_reference())
# Unregister the hook
reg.unregister()
# Register a new service
ctx.register_service("dummy", object(), {})
# Hook must not be notified
self.assertFalse(events)
def test_hook(self):
# Add a bundle to have two contexts in the test
fw_ctx = self.framework.get_bundle_context()
bnd = fw_ctx.install_bundle("tests.dummy_1")
bnd.start()
bnd_ctx = bnd.get_bundle_context()
# Setup a hook
class Hook(object):
@staticmethod
def event(svc_event, listeners_dict):
to_remove = svc_event.get_service_reference() \
.get_property("to.remove")
info_to_remove = []
for listener_bc, listeners_info in listeners_dict.items():
# Check the dictionary content
for listener_info in listeners_info:
self.assertIs(listener_bc, listener_info.bundle_context)
self.assertIs(
listener_bc, listener_info.listener.context)
self.assertIs(
listener_bc, listener_info.get_bundle_context())
if listener_info.listener in to_remove:
info_to_remove.append(listener_info)
# Remove the requested listeners
for listener_info in info_to_remove:
listeners_dict[listener_info.bundle_context] \
.remove(listener_info)
fw_ctx.register_service(SERVICE_EVENT_LISTENER_HOOK, Hook(), {})
# Register multiple listeners
class Listener(object):
def __init__(self, bc):
self.context = bc
self.storage = []
bc.add_service_listener(self)
def service_changed(self, event):
self.storage.append(event)
listener_referee = Listener(fw_ctx)
listener_1 = Listener(fw_ctx)
listener_2 = Listener(bnd_ctx)
# Register a service that only the referee will get
reg = fw_ctx.register_service(
"dummy", object(), {"to.remove": [listener_1, listener_2]})
evt = listener_referee.storage.pop(0)
self.assertIs(evt.get_service_reference(), reg.get_reference())
self.assertEqual(evt.get_kind(), ServiceEvent.REGISTERED)
self.assertFalse(listener_1.storage)
self.assertFalse(listener_2.storage)
# Modify it so that listener_1 gets it
reg.set_properties({"to.remove": [listener_2]})
self.assertFalse(listener_2.storage)
evt = listener_referee.storage.pop(0)
self.assertIs(evt.get_service_reference(), reg.get_reference())
self.assertEqual(evt.get_kind(), ServiceEvent.MODIFIED)
evt1 = listener_1.storage.pop(0)
self.assertIs(evt1, evt)
# Modify it so that listener_2, but not listener_1 gets it
reg.set_properties({"to.remove": [listener_1]})
self.assertFalse(listener_1.storage)
evt = listener_referee.storage.pop(0)
self.assertIs(evt.get_service_reference(), reg.get_reference())
self.assertEqual(evt.get_kind(), ServiceEvent.MODIFIED)
evt2 = listener_2.storage.pop(0)
self.assertIs(evt2, evt)
# ------------------------------------------------------------------------------
if __name__ == "__main__":
# Set logging level
import logging
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| true
| true
|
f70ce34117ba3df7c222bb20549fc1467f2d3a87
| 8,468
|
py
|
Python
|
tests/test_core.py
|
kevinheavey/borsh-py
|
e49dee71716ec217e8c9966aaa621c61669f7c15
|
[
"MIT"
] | 11
|
2021-10-04T19:47:22.000Z
|
2022-03-27T05:27:17.000Z
|
tests/test_core.py
|
kevinheavey/borsh-py
|
e49dee71716ec217e8c9966aaa621c61669f7c15
|
[
"MIT"
] | 8
|
2021-09-30T13:57:43.000Z
|
2022-03-14T11:20:53.000Z
|
tests/test_core.py
|
kevinheavey/borsh-py
|
e49dee71716ec217e8c9966aaa621c61669f7c15
|
[
"MIT"
] | 4
|
2021-11-13T10:46:37.000Z
|
2022-03-27T05:27:20.000Z
|
"""Core tests."""
from typing import Any
import pytest
from borsh_construct import (
F32,
F64,
I8,
I16,
I32,
I64,
I128,
U8,
U16,
U32,
U64,
U128,
Bool,
Vec,
CStruct,
TupleStruct,
Enum,
String,
Option,
HashMap,
HashSet,
Bytes,
)
from borsh_construct.core import (
NAMED_TUPLE_FIELD_ERROR,
TUPLE_DATA,
UNNAMED_SUBCON_ERROR,
NON_STR_NAME_ERROR,
UNDERSCORE_NAME_ERROR,
TUPLE_DATA_NAME_ERROR,
)
from construct import Construct, Float32l, Float64l, FormatField, FormatFieldError
ENUM = Enum(
"Unit",
"TupleVariant" / TupleStruct(U128, String, I64, Option(U16)),
"CStructVariant"
/ CStruct("u128_field" / U128, "string_field" / String, "vec_field" / Vec(U16)),
enum_name="Placeholder",
)
TYPE_INPUT_EXPECTED = (
(Bool, True, [1]),
(Bool, False, [0]),
(U8, 255, [255]),
(I8, -128, [128]),
(U16, 65535, [255, 255]),
(I16, -32768, [0, 128]),
(U32, 4294967295, [255, 255, 255, 255]),
(I32, -2147483648, [0, 0, 0, 128]),
(U64, 18446744073709551615, [255, 255, 255, 255, 255, 255, 255, 255]),
(I64, -9223372036854775808, [0, 0, 0, 0, 0, 0, 0, 128]),
(
U128,
340282366920938463463374607431768211455,
[
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
],
),
(
I128,
-170141183460469231731687303715884105728,
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128],
),
(F32, 0.5, [0, 0, 0, 63]),
(F64, -0.5, [0, 0, 0, 0, 0, 0, 224, 191]),
(I16[3], [1, 2, 3], [1, 0, 2, 0, 3, 0]),
(Vec(I16), [1, 1], [2, 0, 0, 0, 1, 0, 1, 0]),
(
TupleStruct(U128, String, I64, Option(U16)),
[123, "hello", 1400, 13],
[
123,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
0,
0,
0,
104,
101,
108,
108,
111,
120,
5,
0,
0,
0,
0,
0,
0,
1,
13,
0,
],
),
(
CStruct("u128_field" / U128, "string_field" / String, "vec_field" / Vec(U16)),
{"u128_field": 1033, "string_field": "hello", "vec_field": [1, 2, 3]},
[
9,
4,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
0,
0,
0,
104,
101,
108,
108,
111,
3,
0,
0,
0,
1,
0,
2,
0,
3,
0,
],
),
(ENUM, ENUM.enum.Unit(), [0]),
(
ENUM,
ENUM.enum.TupleVariant([10, "hello", 13, 12]),
[
1,
10,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
0,
0,
0,
104,
101,
108,
108,
111,
13,
0,
0,
0,
0,
0,
0,
0,
1,
12,
0,
],
),
(
ENUM,
ENUM.enum.CStructVariant(
u128_field=15,
string_field="hi",
vec_field=[3, 2, 1],
),
[
2,
15,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
0,
0,
0,
104,
105,
3,
0,
0,
0,
3,
0,
2,
0,
1,
0,
],
),
(
HashMap(U8, ENUM),
{2: ENUM.enum.Unit(), 1: ENUM.enum.TupleVariant([11, "hello", 123, None])},
[
2,
0,
0,
0,
1,
1,
11,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
0,
0,
0,
104,
101,
108,
108,
111,
123,
0,
0,
0,
0,
0,
0,
0,
0,
2,
0,
],
),
(HashSet(U8), {1, 2, 3}, [3, 0, 0, 0, 1, 2, 3]),
(Bytes, b"\x01\x02\x03", [3, 0, 0, 0, 1, 2, 3]),
(
String,
"🚀🚀🚀",
[12, 0, 0, 0, 240, 159, 154, 128, 240, 159, 154, 128, 240, 159, 154, 128],
),
)
@pytest.mark.parametrize("obj_type,obj_input,expected", TYPE_INPUT_EXPECTED)
def test_serde(obj_type: Construct, obj_input: Any, expected: Any) -> None:
"""Tests that inputs are serialized and deserialized as expected."""
serialized = obj_type.build(obj_input)
assert list(serialized) == expected
deserialized = obj_type.parse(serialized)
assert deserialized == obj_input
@pytest.mark.parametrize(
"nonan_type,construct_type",
[(F32, Float32l), (F64, Float64l)],
)
def test_nan_floats(nonan_type: FormatField, construct_type: FormatField) -> None:
"""Check that error is raised if you try to build or parse nan floats."""
nan = float("nan") # noqa: WPS456
with pytest.raises(FormatFieldError):
nonan_type.build(nan)
nan_serialized = construct_type.build(nan)
with pytest.raises(FormatFieldError):
nonan_type.parse(nan_serialized)
def test_named_tuple_struct_field_raises() -> None:
"""Check that error is raised if TupleStruct field is named."""
with pytest.raises(ValueError) as exc:
TupleStruct("foo" / U8)
assert exc.value == NAMED_TUPLE_FIELD_ERROR
def test_unnamed_subcon_raises() -> None:
"""Check that error is raised when enum variant or CStruct field is unnamed."""
with pytest.raises(ValueError) as excinfo:
Enum("foo", TupleStruct(U8), enum_name="placeholder")
assert str(excinfo.value) == str(UNNAMED_SUBCON_ERROR)
def test_non_str_name_raises() -> None:
"""Check that error is raised when subcon name is not a string."""
with pytest.raises(ValueError) as excinfo:
CStruct(1 / U8) # type: ignore
assert str(excinfo.value) == str(NON_STR_NAME_ERROR)
def test_tuple_data_name_raises() -> None:
"""Check that error is raised when subcon name is not a string."""
with pytest.raises(ValueError) as excinfo:
CStruct(TUPLE_DATA / U8)
assert str(excinfo.value) == str(TUPLE_DATA_NAME_ERROR)
def test_underscore_name_raises() -> None:
"""Check that error is raised when subcon name starts with underscore."""
with pytest.raises(ValueError) as excinfo:
CStruct("_foo" / U8)
assert str(excinfo.value) == str(UNDERSCORE_NAME_ERROR)
def test_unrecognized_variant_type_raises() -> None:
"""Check that error is raised if variant type is not valid."""
with pytest.raises(ValueError) as excinfo:
Enum("foo" / U8, enum_name="placeholder")
assert "Unrecognized" in str(excinfo.value)
def test_duplicate_variant_name_raises() -> None:
"""Check error raised if two variants in same Enum have same name."""
with pytest.raises(ValueError) as excinfo:
Enum("foo", "foo", enum_name="placeholder")
assert "must be unique" in str(excinfo.value)
| 21.881137
| 86
| 0.41769
|
from typing import Any
import pytest
from borsh_construct import (
F32,
F64,
I8,
I16,
I32,
I64,
I128,
U8,
U16,
U32,
U64,
U128,
Bool,
Vec,
CStruct,
TupleStruct,
Enum,
String,
Option,
HashMap,
HashSet,
Bytes,
)
from borsh_construct.core import (
NAMED_TUPLE_FIELD_ERROR,
TUPLE_DATA,
UNNAMED_SUBCON_ERROR,
NON_STR_NAME_ERROR,
UNDERSCORE_NAME_ERROR,
TUPLE_DATA_NAME_ERROR,
)
from construct import Construct, Float32l, Float64l, FormatField, FormatFieldError
ENUM = Enum(
"Unit",
"TupleVariant" / TupleStruct(U128, String, I64, Option(U16)),
"CStructVariant"
/ CStruct("u128_field" / U128, "string_field" / String, "vec_field" / Vec(U16)),
enum_name="Placeholder",
)
TYPE_INPUT_EXPECTED = (
(Bool, True, [1]),
(Bool, False, [0]),
(U8, 255, [255]),
(I8, -128, [128]),
(U16, 65535, [255, 255]),
(I16, -32768, [0, 128]),
(U32, 4294967295, [255, 255, 255, 255]),
(I32, -2147483648, [0, 0, 0, 128]),
(U64, 18446744073709551615, [255, 255, 255, 255, 255, 255, 255, 255]),
(I64, -9223372036854775808, [0, 0, 0, 0, 0, 0, 0, 128]),
(
U128,
340282366920938463463374607431768211455,
[
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
255,
],
),
(
I128,
-170141183460469231731687303715884105728,
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128],
),
(F32, 0.5, [0, 0, 0, 63]),
(F64, -0.5, [0, 0, 0, 0, 0, 0, 224, 191]),
(I16[3], [1, 2, 3], [1, 0, 2, 0, 3, 0]),
(Vec(I16), [1, 1], [2, 0, 0, 0, 1, 0, 1, 0]),
(
TupleStruct(U128, String, I64, Option(U16)),
[123, "hello", 1400, 13],
[
123,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
0,
0,
0,
104,
101,
108,
108,
111,
120,
5,
0,
0,
0,
0,
0,
0,
1,
13,
0,
],
),
(
CStruct("u128_field" / U128, "string_field" / String, "vec_field" / Vec(U16)),
{"u128_field": 1033, "string_field": "hello", "vec_field": [1, 2, 3]},
[
9,
4,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
0,
0,
0,
104,
101,
108,
108,
111,
3,
0,
0,
0,
1,
0,
2,
0,
3,
0,
],
),
(ENUM, ENUM.enum.Unit(), [0]),
(
ENUM,
ENUM.enum.TupleVariant([10, "hello", 13, 12]),
[
1,
10,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
0,
0,
0,
104,
101,
108,
108,
111,
13,
0,
0,
0,
0,
0,
0,
0,
1,
12,
0,
],
),
(
ENUM,
ENUM.enum.CStructVariant(
u128_field=15,
string_field="hi",
vec_field=[3, 2, 1],
),
[
2,
15,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2,
0,
0,
0,
104,
105,
3,
0,
0,
0,
3,
0,
2,
0,
1,
0,
],
),
(
HashMap(U8, ENUM),
{2: ENUM.enum.Unit(), 1: ENUM.enum.TupleVariant([11, "hello", 123, None])},
[
2,
0,
0,
0,
1,
1,
11,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
0,
0,
0,
104,
101,
108,
108,
111,
123,
0,
0,
0,
0,
0,
0,
0,
0,
2,
0,
],
),
(HashSet(U8), {1, 2, 3}, [3, 0, 0, 0, 1, 2, 3]),
(Bytes, b"\x01\x02\x03", [3, 0, 0, 0, 1, 2, 3]),
(
String,
"🚀🚀🚀",
[12, 0, 0, 0, 240, 159, 154, 128, 240, 159, 154, 128, 240, 159, 154, 128],
),
)
@pytest.mark.parametrize("obj_type,obj_input,expected", TYPE_INPUT_EXPECTED)
def test_serde(obj_type: Construct, obj_input: Any, expected: Any) -> None:
serialized = obj_type.build(obj_input)
assert list(serialized) == expected
deserialized = obj_type.parse(serialized)
assert deserialized == obj_input
@pytest.mark.parametrize(
"nonan_type,construct_type",
[(F32, Float32l), (F64, Float64l)],
)
def test_nan_floats(nonan_type: FormatField, construct_type: FormatField) -> None:
nan = float("nan") with pytest.raises(FormatFieldError):
nonan_type.build(nan)
nan_serialized = construct_type.build(nan)
with pytest.raises(FormatFieldError):
nonan_type.parse(nan_serialized)
def test_named_tuple_struct_field_raises() -> None:
with pytest.raises(ValueError) as exc:
TupleStruct("foo" / U8)
assert exc.value == NAMED_TUPLE_FIELD_ERROR
def test_unnamed_subcon_raises() -> None:
with pytest.raises(ValueError) as excinfo:
Enum("foo", TupleStruct(U8), enum_name="placeholder")
assert str(excinfo.value) == str(UNNAMED_SUBCON_ERROR)
def test_non_str_name_raises() -> None:
with pytest.raises(ValueError) as excinfo:
CStruct(1 / U8) assert str(excinfo.value) == str(NON_STR_NAME_ERROR)
def test_tuple_data_name_raises() -> None:
with pytest.raises(ValueError) as excinfo:
CStruct(TUPLE_DATA / U8)
assert str(excinfo.value) == str(TUPLE_DATA_NAME_ERROR)
def test_underscore_name_raises() -> None:
with pytest.raises(ValueError) as excinfo:
CStruct("_foo" / U8)
assert str(excinfo.value) == str(UNDERSCORE_NAME_ERROR)
def test_unrecognized_variant_type_raises() -> None:
with pytest.raises(ValueError) as excinfo:
Enum("foo" / U8, enum_name="placeholder")
assert "Unrecognized" in str(excinfo.value)
def test_duplicate_variant_name_raises() -> None:
with pytest.raises(ValueError) as excinfo:
Enum("foo", "foo", enum_name="placeholder")
assert "must be unique" in str(excinfo.value)
| true
| true
|
f70ce39d0ffbfb8b9b2bf0200c9ad87b5f53c49d
| 5,021
|
py
|
Python
|
sample/pid/wall_follower_pid.py
|
savazeb/cosmos-ai
|
4606e959396ebedca73086601078aa9c0ed77b31
|
[
"MIT"
] | null | null | null |
sample/pid/wall_follower_pid.py
|
savazeb/cosmos-ai
|
4606e959396ebedca73086601078aa9c0ed77b31
|
[
"MIT"
] | null | null | null |
sample/pid/wall_follower_pid.py
|
savazeb/cosmos-ai
|
4606e959396ebedca73086601078aa9c0ed77b31
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("../..")
from api.control.PID import PID
from api.control.sensor import sensor
from api.control.robot import robot
import posix_ipc as ipc
import time
import threading
import math
import numpy as np
graphq = ipc.MessageQueue('/graphQueue', ipc.O_CREAT)
mq = ipc.MessageQueue('/keyQueue', ipc.O_CREAT)
mq.block = False
lidar = sensor('lidar', '/pointQueue')
""" THREAD CLASS """
class sensor_thread(threading.Thread):
def __init__(self, name, delay,*args, **kwargs):
super(sensor_thread, self).__init__(*args, **kwargs)
self._stopper = threading.Event()
self.name = name
self.delay = delay
def stopit(self):
self._stopper.set()
def stopped(self):
return self._stopper.isSet()
def run(self):
while True:
if self.stopped():
return
if self.name == 'cam':
cam.set_data()
if self.name == 'ir':
ir.set_data()
if self.name == 'lidar':
lidar.set_data()
time.sleep(self.delay)
def getPressed():
try:
mes = mq.receive()
key = list((mes[0].decode()).split(","))
key = int(key[0]), list(map(int, key[1:3])), list(map(float, key[3:]))
return key
except:
return None
""" GLOBAL VARIABLE HERE """
SENSOR_TYPE = [('lidar', 0.0)]
ATTRIBUTE = 'data'
DELTA_ANGLE = 50
RIGHT_HAND_ANGLE = 90
HELPER_HAND_ANGLE = RIGHT_HAND_ANGLE + DELTA_ANGLE
FACE_ANGLE = 180
WALL_THRES = 1
WALL_DISTANCE = 60
WALL_LEFT_BOUND = WALL_DISTANCE - WALL_THRES
WALL_RIGHT_BOUND = WALL_DISTANCE + WALL_THRES
AVOIDER_POWER = 35
STOP = 0, 0, 0
class power:
value = 0, 0, 0
def set(self, x, y, turn):
self.value = x, y ,turn
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def main():
start = 0
last_start = start
min_power = 20
max_power = 50
kp = 1
ki = 0
kd = 0
lidar_pid = PID(kp, ki, kd, WALL_DISTANCE)
workers = []
for name, delay in SENSOR_TYPE:
print('[info] start thread : ' , name)
thread = sensor_thread(name, delay)
workers.append(thread)
thread.start()
try:
rc = robot('/serialWriteQueue')
time.sleep(5)
rc.connect()
time.sleep(0.5)
pwr = power()
while True:
key = getPressed()
if key:
print(key)
start, (min_power, max_power), (kp, ki, kd) = key
lidar_pid.setOutputLimits((-max_power, max_power))
lidar_pid.setKValue(kp, ki ,kd)
if start != last_start:
rx_distance = 0
graphq.send(",".join(map(str, [start, rx_distance, WALL_DISTANCE])))
last_start = start
if start:
point = lidar.data
print(type(point))
if type(point) is np.ndarray:
print("ye")
angles, ranges = point
right_hand = float(ranges[find_nearest(angles, RIGHT_HAND_ANGLE)])
helper_hand = float(ranges[find_nearest(angles, HELPER_HAND_ANGLE)])
face = float(ranges[find_nearest(angles, FACE_ANGLE)])
teta = math.radians(DELTA_ANGLE)
if face < 50:
print("50")
pwr.set(0, 0, AVOIDER_POWER)
elif right_hand > 0 and helper_hand > 0:
print("ye")
alpha = math.atan((right_hand * math.cos(teta) - helper_hand)/ (right_hand * math.sin(teta)))
rx_distance = helper_hand * math.cos(math.radians(alpha))
graphq.send(",".join(map(str, [start, rx_distance, WALL_DISTANCE])))
if rx_distance > WALL_RIGHT_BOUND or rx_distance < WALL_LEFT_BOUND:
out = lidar_pid.update(rx_distance)
if out < min_power and out > 0:
out = min_power
if out > -min_power and out < 0:
out = -min_power
print(rx_distance, out)
pwr.set(0, max_power, out)
else:
pwr.set(0, max_power, 0)
else:
pwr.set(*STOP)
else:
pwr.set(*STOP)
else:
pwr.set(*STOP)
rc.drive(*pwr.value)
time.sleep(0.001)
except KeyboardInterrupt:
print('[info] interrupt pressed')
print('[main] work finished')
for worker in workers:
worker.stopit()
time.sleep(3)
worker.join()
#lidar.cleanup()
#ir.cleanup()
#cam.cleanup()
#rc.disconnect()
print('[main] end')
main()
| 31.186335
| 117
| 0.520016
|
import sys
sys.path.append("../..")
from api.control.PID import PID
from api.control.sensor import sensor
from api.control.robot import robot
import posix_ipc as ipc
import time
import threading
import math
import numpy as np
graphq = ipc.MessageQueue('/graphQueue', ipc.O_CREAT)
mq = ipc.MessageQueue('/keyQueue', ipc.O_CREAT)
mq.block = False
lidar = sensor('lidar', '/pointQueue')
class sensor_thread(threading.Thread):
def __init__(self, name, delay,*args, **kwargs):
super(sensor_thread, self).__init__(*args, **kwargs)
self._stopper = threading.Event()
self.name = name
self.delay = delay
def stopit(self):
self._stopper.set()
def stopped(self):
return self._stopper.isSet()
def run(self):
while True:
if self.stopped():
return
if self.name == 'cam':
cam.set_data()
if self.name == 'ir':
ir.set_data()
if self.name == 'lidar':
lidar.set_data()
time.sleep(self.delay)
def getPressed():
try:
mes = mq.receive()
key = list((mes[0].decode()).split(","))
key = int(key[0]), list(map(int, key[1:3])), list(map(float, key[3:]))
return key
except:
return None
SENSOR_TYPE = [('lidar', 0.0)]
ATTRIBUTE = 'data'
DELTA_ANGLE = 50
RIGHT_HAND_ANGLE = 90
HELPER_HAND_ANGLE = RIGHT_HAND_ANGLE + DELTA_ANGLE
FACE_ANGLE = 180
WALL_THRES = 1
WALL_DISTANCE = 60
WALL_LEFT_BOUND = WALL_DISTANCE - WALL_THRES
WALL_RIGHT_BOUND = WALL_DISTANCE + WALL_THRES
AVOIDER_POWER = 35
STOP = 0, 0, 0
class power:
value = 0, 0, 0
def set(self, x, y, turn):
self.value = x, y ,turn
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def main():
start = 0
last_start = start
min_power = 20
max_power = 50
kp = 1
ki = 0
kd = 0
lidar_pid = PID(kp, ki, kd, WALL_DISTANCE)
workers = []
for name, delay in SENSOR_TYPE:
print('[info] start thread : ' , name)
thread = sensor_thread(name, delay)
workers.append(thread)
thread.start()
try:
rc = robot('/serialWriteQueue')
time.sleep(5)
rc.connect()
time.sleep(0.5)
pwr = power()
while True:
key = getPressed()
if key:
print(key)
start, (min_power, max_power), (kp, ki, kd) = key
lidar_pid.setOutputLimits((-max_power, max_power))
lidar_pid.setKValue(kp, ki ,kd)
if start != last_start:
rx_distance = 0
graphq.send(",".join(map(str, [start, rx_distance, WALL_DISTANCE])))
last_start = start
if start:
point = lidar.data
print(type(point))
if type(point) is np.ndarray:
print("ye")
angles, ranges = point
right_hand = float(ranges[find_nearest(angles, RIGHT_HAND_ANGLE)])
helper_hand = float(ranges[find_nearest(angles, HELPER_HAND_ANGLE)])
face = float(ranges[find_nearest(angles, FACE_ANGLE)])
teta = math.radians(DELTA_ANGLE)
if face < 50:
print("50")
pwr.set(0, 0, AVOIDER_POWER)
elif right_hand > 0 and helper_hand > 0:
print("ye")
alpha = math.atan((right_hand * math.cos(teta) - helper_hand)/ (right_hand * math.sin(teta)))
rx_distance = helper_hand * math.cos(math.radians(alpha))
graphq.send(",".join(map(str, [start, rx_distance, WALL_DISTANCE])))
if rx_distance > WALL_RIGHT_BOUND or rx_distance < WALL_LEFT_BOUND:
out = lidar_pid.update(rx_distance)
if out < min_power and out > 0:
out = min_power
if out > -min_power and out < 0:
out = -min_power
print(rx_distance, out)
pwr.set(0, max_power, out)
else:
pwr.set(0, max_power, 0)
else:
pwr.set(*STOP)
else:
pwr.set(*STOP)
else:
pwr.set(*STOP)
rc.drive(*pwr.value)
time.sleep(0.001)
except KeyboardInterrupt:
print('[info] interrupt pressed')
print('[main] work finished')
for worker in workers:
worker.stopit()
time.sleep(3)
worker.join()
print('[main] end')
main()
| true
| true
|
f70ce4a07512185fe6b340ce3de6595da8c779d4
| 5,801
|
py
|
Python
|
dynsimf/examples/school_segregation.py
|
Tensaiz/DyNSimF
|
6288ff83f1b3f56fa626f741b55ade57b7c1b358
|
[
"BSD-2-Clause"
] | 3
|
2020-11-04T08:52:33.000Z
|
2021-01-27T22:27:07.000Z
|
dynsimf/examples/school_segregation.py
|
Tensaiz/DyNSimF
|
6288ff83f1b3f56fa626f741b55ade57b7c1b358
|
[
"BSD-2-Clause"
] | 2
|
2020-11-13T07:49:33.000Z
|
2020-11-16T08:22:13.000Z
|
dynsimf/examples/school_segregation.py
|
Tensaiz/DyNSimF
|
6288ff83f1b3f56fa626f741b55ade57b7c1b358
|
[
"BSD-2-Clause"
] | null | null | null |
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import pickle
import math
from dynsimf.models.Model import Model
from dynsimf.models.Model import ModelConfiguration
from dynsimf.models.components.Memory import MemoryConfiguration
from dynsimf.models.components.Memory import MemoryConfigurationType
from dynsimf.models.components.conditions.Condition import ConditionType
from dynsimf.models.components.conditions.ThresholdCondition import ThresholdCondition
from dynsimf.models.components.conditions.CustomCondition import CustomCondition
from dynsimf.models.components.conditions.ThresholdCondition import ThresholdOperator
from dynsimf.models.components.conditions.ThresholdCondition import ThresholdConfiguration
if __name__ == "__main__":
# Network definition
g_list = pickle.load(open(r"C:/Users/Admin/MEGA/Uni/Master/Thesis/data/g_list.pkl", 'rb'))
X_list = pickle.load(open(r"C:/Users/Admin/MEGA/Uni/Master/Thesis/data/x_list.pkl", 'rb'))
school = 3
X = X_list[school]
n = len(X['sex'])
avg_initial_links = 5 # desired average degree in initial network
link_prop = avg_initial_links/n
g = np.random.choice([0, 1], size=(n, n),
p=[1 - link_prop,
link_prop])
np.fill_diagonal(g, 0)
g = nx.convert_matrix.from_numpy_array(g, create_using=nx.DiGraph)
cfg = {
'adjacency_memory_config': \
MemoryConfiguration(MemoryConfigurationType.ADJACENCY, {
'memory_size': 0
}),
'edge_values_memory_config': \
MemoryConfiguration(MemoryConfigurationType.EDGE_VALUES, {
'memory_size': 0
})
}
model = Model(g, ModelConfiguration(cfg))
constants = {
'n': n,
'delta': 0.05,
'gamma': 0.65,
'c': 0.175,
'B1': 0.1,
'B2': 0.1,
'B3': 0.2,
'sigma': 0.035,
'alpha': 2,
'min_prop': 1000,
'X': X
}
def initial_utility():
utility = np.zeros((constants['n'], constants['n']))
race = list(constants['X']['race'])
sex = list(constants['X']['sex'])
grade = list(constants['X']['grade'])
for i in range(constants['n']):
for j in range(constants['n']):
weighted_diffs = [constants['B1']*abs(sex[i] - sex[j]),
constants['B2'] * (0 if grade[i] == grade[j] else 1),
constants['B3'] * (0 if race[i] == race[j] else 1)]
utility[i, j] = math.exp(-sum(weighted_diffs))
return utility
def initial_prop():
prop = np.zeros((constants['n'], constants['n']))
utility = initial_utility()
# Loop over the person and their peers
for i in range(constants['n']):
for j in range(constants['n']):
if i == j:
prop[i, j] = 0
else:
prop[i, j] = utility[i, j] + constants['min_prop']
# Normalize
prop[i, :] = prop[i, :] / np.sum(prop[i, :])
return prop
constants['probability'] = initial_prop()
constants['utility'] = initial_utility()
def nb_update():
adj = model.get_adjacency()
return {'Neighbors': np.sum(adj, axis=1)}
def node_utility(node, adj):
utility = constants['utility']
# degree, connection gain and cost calculations
d_i = adj[node].sum()
direct_u = np.sum(adj[node] * utility[node])
mutual_u = np.sum(adj[node] * adj.T[node] * utility[node])
# indirect connection gain
a = (adj.T.dot(adj[node, :]) * utility)[node]
a[node] = 0
indirect_u = np.sum(a)
return direct_u + constants['gamma'] * mutual_u + constants['delta'] * indirect_u - d_i ** constants['alpha'] * constants['c']
def network_update(nodes):
adj = model.get_adjacency()
order = nodes.copy()
eps = np.random.normal(scale=constants['sigma'], size=constants['n']*2)
np.random.shuffle(order)
changes = {}
P = constants['probability']
for node in order:
other_node = node
while other_node == node:
other_node = np.random.choice(nodes, p=P[node])
existing_connection = not not adj[node, other_node]
adj[node, other_node] = 0
U_without = node_utility(node, adj) + eps[node]
adj[node, other_node] = 1
U_with = node_utility(node, adj) + eps[-node]
if U_without > U_with and existing_connection:
changes[node] = {'remove': [other_node]}
elif U_without < U_with and not existing_connection:
changes[node] = {'add': [other_node]}
return {
'edge_change': changes
}
# Model definition
model.constants = constants
model.set_states(['Neighbors'])
model.add_update(nb_update)
model.set_edge_values(['utility'])
model.set_initial_edge_values({
'utility': initial_utility,
})
model.add_network_update(network_update, get_nodes=True)
output = model.simulate(500)
visualization_config = {
'plot_interval': 10,
'edge_values': 'utility',
'plot_variable': 'Neighbors',
'variable_limits': {
'Neighbors': [0, 55]
},
'color_scale': 'Reds',
'show_plot': False,
'repeat': True,
'plot_output': '../animations/school_segregation/school_' + str(school) + '.gif',
'plot_title': 'School segregation'
}
model.configure_visualization(visualization_config, output)
model.visualize('animation')
| 31.188172
| 134
| 0.58421
|
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import pickle
import math
from dynsimf.models.Model import Model
from dynsimf.models.Model import ModelConfiguration
from dynsimf.models.components.Memory import MemoryConfiguration
from dynsimf.models.components.Memory import MemoryConfigurationType
from dynsimf.models.components.conditions.Condition import ConditionType
from dynsimf.models.components.conditions.ThresholdCondition import ThresholdCondition
from dynsimf.models.components.conditions.CustomCondition import CustomCondition
from dynsimf.models.components.conditions.ThresholdCondition import ThresholdOperator
from dynsimf.models.components.conditions.ThresholdCondition import ThresholdConfiguration
if __name__ == "__main__":
g_list = pickle.load(open(r"C:/Users/Admin/MEGA/Uni/Master/Thesis/data/g_list.pkl", 'rb'))
X_list = pickle.load(open(r"C:/Users/Admin/MEGA/Uni/Master/Thesis/data/x_list.pkl", 'rb'))
school = 3
X = X_list[school]
n = len(X['sex'])
avg_initial_links = 5 link_prop = avg_initial_links/n
g = np.random.choice([0, 1], size=(n, n),
p=[1 - link_prop,
link_prop])
np.fill_diagonal(g, 0)
g = nx.convert_matrix.from_numpy_array(g, create_using=nx.DiGraph)
cfg = {
'adjacency_memory_config': \
MemoryConfiguration(MemoryConfigurationType.ADJACENCY, {
'memory_size': 0
}),
'edge_values_memory_config': \
MemoryConfiguration(MemoryConfigurationType.EDGE_VALUES, {
'memory_size': 0
})
}
model = Model(g, ModelConfiguration(cfg))
constants = {
'n': n,
'delta': 0.05,
'gamma': 0.65,
'c': 0.175,
'B1': 0.1,
'B2': 0.1,
'B3': 0.2,
'sigma': 0.035,
'alpha': 2,
'min_prop': 1000,
'X': X
}
def initial_utility():
utility = np.zeros((constants['n'], constants['n']))
race = list(constants['X']['race'])
sex = list(constants['X']['sex'])
grade = list(constants['X']['grade'])
for i in range(constants['n']):
for j in range(constants['n']):
weighted_diffs = [constants['B1']*abs(sex[i] - sex[j]),
constants['B2'] * (0 if grade[i] == grade[j] else 1),
constants['B3'] * (0 if race[i] == race[j] else 1)]
utility[i, j] = math.exp(-sum(weighted_diffs))
return utility
def initial_prop():
prop = np.zeros((constants['n'], constants['n']))
utility = initial_utility()
for i in range(constants['n']):
for j in range(constants['n']):
if i == j:
prop[i, j] = 0
else:
prop[i, j] = utility[i, j] + constants['min_prop']
prop[i, :] = prop[i, :] / np.sum(prop[i, :])
return prop
constants['probability'] = initial_prop()
constants['utility'] = initial_utility()
def nb_update():
adj = model.get_adjacency()
return {'Neighbors': np.sum(adj, axis=1)}
def node_utility(node, adj):
utility = constants['utility']
d_i = adj[node].sum()
direct_u = np.sum(adj[node] * utility[node])
mutual_u = np.sum(adj[node] * adj.T[node] * utility[node])
a = (adj.T.dot(adj[node, :]) * utility)[node]
a[node] = 0
indirect_u = np.sum(a)
return direct_u + constants['gamma'] * mutual_u + constants['delta'] * indirect_u - d_i ** constants['alpha'] * constants['c']
def network_update(nodes):
adj = model.get_adjacency()
order = nodes.copy()
eps = np.random.normal(scale=constants['sigma'], size=constants['n']*2)
np.random.shuffle(order)
changes = {}
P = constants['probability']
for node in order:
other_node = node
while other_node == node:
other_node = np.random.choice(nodes, p=P[node])
existing_connection = not not adj[node, other_node]
adj[node, other_node] = 0
U_without = node_utility(node, adj) + eps[node]
adj[node, other_node] = 1
U_with = node_utility(node, adj) + eps[-node]
if U_without > U_with and existing_connection:
changes[node] = {'remove': [other_node]}
elif U_without < U_with and not existing_connection:
changes[node] = {'add': [other_node]}
return {
'edge_change': changes
}
model.constants = constants
model.set_states(['Neighbors'])
model.add_update(nb_update)
model.set_edge_values(['utility'])
model.set_initial_edge_values({
'utility': initial_utility,
})
model.add_network_update(network_update, get_nodes=True)
output = model.simulate(500)
visualization_config = {
'plot_interval': 10,
'edge_values': 'utility',
'plot_variable': 'Neighbors',
'variable_limits': {
'Neighbors': [0, 55]
},
'color_scale': 'Reds',
'show_plot': False,
'repeat': True,
'plot_output': '../animations/school_segregation/school_' + str(school) + '.gif',
'plot_title': 'School segregation'
}
model.configure_visualization(visualization_config, output)
model.visualize('animation')
| true
| true
|
f70ce603d9a803770d32c0f20b1a94a6eaaa3c20
| 23,141
|
py
|
Python
|
pyais/decode.py
|
KingKongOne/pyais
|
ddee5cc4eb8f01f494c82f7b14bdd55aa393af47
|
[
"MIT"
] | 1
|
2021-02-24T07:13:46.000Z
|
2021-02-24T07:13:46.000Z
|
pyais/decode.py
|
KingKongOne/pyais
|
ddee5cc4eb8f01f494c82f7b14bdd55aa393af47
|
[
"MIT"
] | null | null | null |
pyais/decode.py
|
KingKongOne/pyais
|
ddee5cc4eb8f01f494c82f7b14bdd55aa393af47
|
[
"MIT"
] | null | null | null |
from functools import partial
from pyais.exceptions import UnknownMessageException
import typing
import bitarray
from pyais.constants import (
NavigationStatus,
ManeuverIndicator,
TransmitMode,
EpfdType,
ShipType,
StationType,
StationIntervals,
NavAid
)
from pyais.util import get_int, encode_bin_as_ascii6
def decode_msg_1(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
AIS Vessel position report using SOTDMA (Self-Organizing Time Division Multiple Access)
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_types_1_2_and_3_position_report_class_a
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'status': NavigationStatus(get_int_from_data(38, 42)),
'turn': get_int_from_data(42, 50, signed=True),
'speed': get_int_from_data(50, 60) / 10.0,
'accuracy': bit_arr[60],
'lon': get_int_from_data(61, 89, signed=True) / 600000.0,
'lat': get_int_from_data(89, 116, signed=True) / 600000.0,
'course': get_int_from_data(116, 128) * 0.1,
'heading': get_int_from_data(128, 137),
'second': get_int_from_data(137, 143),
'maneuver': ManeuverIndicator(get_int_from_data(143, 145)),
'raim': bit_arr[148],
'radio': get_int_from_data(149, bit_arr.length()),
}
def decode_msg_2(bit_arr: bitarray.bitarray) -> typing.Dict:
"""AIS Vessel position report using SOTDMA (Self-Organizing Time Division Multiple Access)
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_types_1_2_and_3_position_report_class_a
"""
return decode_msg_1(bit_arr)
def decode_msg_3(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
AIS Vessel position report using ITDMA (Incremental Time Division Multiple Access)
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_types_1_2_and_3_position_report_class_a
"""
return decode_msg_1(bit_arr)
def decode_msg_4(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
AIS Vessel position report using SOTDMA (Self-Organizing Time Division Multiple Access)
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_4_base_station_report
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'year': get_int_from_data(38, 52),
'month': get_int_from_data(52, 56),
'day': get_int_from_data(56, 61),
'hour': get_int_from_data(61, 66),
'minute': get_int_from_data(66, 72),
'second': get_int_from_data(72, 78),
'accuracy': bit_arr[78],
'lon': get_int_from_data(79, 107, signed=True) / 600000.0,
'lat': get_int_from_data(107, 134, signed=True) / 600000.0,
'epfd': EpfdType(get_int_from_data(134, 138)),
'raim': bit_arr[148],
'radio': get_int_from_data(148, len(bit_arr)),
}
def decode_msg_5(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Static and Voyage Related Data
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_5_static_and_voyage_related_data
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'ais_version': get_int_from_data(38, 40),
'imo': get_int_from_data(40, 70),
'callsign': encode_bin_as_ascii6(bit_arr[70:112]),
'shipname': encode_bin_as_ascii6(bit_arr[112:232]),
'shiptype': ShipType(get_int_from_data(232, 240)),
'to_bow': get_int_from_data(240, 249),
'to_stern': get_int_from_data(249, 258),
'to_port': get_int_from_data(258, 264),
'to_starboard': get_int_from_data(264, 270),
'epfd': EpfdType(get_int_from_data(270, 274)),
'month': get_int_from_data(274, 278),
'day': get_int_from_data(278, 283),
'hour': get_int_from_data(283, 288),
'minute': get_int_from_data(288, 294),
'draught': get_int_from_data(294, 302) / 10.0,
'destination': encode_bin_as_ascii6(bit_arr[302:422]),
'dte': bit_arr[-2]
}
def decode_msg_6(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Binary Addresses Message
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_4_base_station_report
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'seqno': get_int_from_data(38, 40),
'dest_mmsi': get_int_from_data(40, 70),
'retransmit': bit_arr[70],
'dac': get_int_from_data(72, 82),
'fid': get_int_from_data(82, 88),
'data': bit_arr[88:].to01()
}
def decode_msg_7(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Binary Acknowledge
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_7_binary_acknowledge
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'mmsi1': get_int_from_data(40, 70),
'mmsiseq1': get_int_from_data(70, 72),
'mmsi2': get_int_from_data(72, 102),
'mmsiseq2': get_int_from_data(102, 104),
'mmsi3': get_int_from_data(104, 134),
'mmsiseq3': get_int_from_data(134, 136),
'mmsi4': get_int_from_data(136, 166),
'mmsiseq4': get_int_from_data(166, 168)
}
def decode_msg_8(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Binary Acknowledge
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_8_binary_broadcast_message
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'dac': get_int_from_data(40, 50),
'fid': get_int_from_data(50, 56),
'data': bit_arr[56:].to01()
}
def decode_msg_9(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Standard SAR Aircraft Position Report
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_9_standard_sar_aircraft_position_report
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'alt': get_int_from_data(38, 50),
'speed': get_int_from_data(50, 60),
'accuracy': bit_arr[60],
'lon': get_int_from_data(61, 89, signed=True) / 600000.0,
'lat': get_int_from_data(89, 116, signed=True) / 600000.0,
'course': get_int_from_data(116, 128) * 0.1,
'second': get_int_from_data(128, 134),
'dte': bit_arr[142],
'assigned': bit_arr[146],
'raim': bit_arr[147],
'radio': get_int_from_data(148, 168)
}
def decode_msg_10(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
UTC/Date Inquiry
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_10_utc_date_inquiry
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'dest_mmsi': get_int_from_data(40, 70)
}
def decode_msg_11(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
UTC/Date Response
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_11_utc_date_response
"""
return decode_msg_4(bit_arr)
def decode_msg_12(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Addressed Safety-Related Message
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_12_addressed_safety_related_message
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'seqno': get_int_from_data(38, 40),
'dest_mmsi': get_int_from_data(40, 70),
'retransmit': bit_arr[70],
'text': encode_bin_as_ascii6(bit_arr[72:])
}
def decode_msg_13(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Identical to type 7
"""
return decode_msg_7(bit_arr)
def decode_msg_14(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Safety-Related Broadcast Message
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_14_safety_related_broadcast_message
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'text': encode_bin_as_ascii6(bit_arr[40:])
}
def decode_msg_15(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Interrogation
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_15_interrogation
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'mmsi1': get_int_from_data(40, 70),
'type1_1': get_int_from_data(70, 76),
'offset1_1': get_int_from_data(76, 88),
'type1_2': get_int_from_data(90, 96),
'offset1_2': get_int_from_data(96, 108),
'mmsi2': get_int_from_data(110, 140),
'type2_1': get_int_from_data(140, 146),
'offset2_1': get_int_from_data(146, 157),
}
def decode_msg_16(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Assignment Mode Command
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_16_assignment_mode_command
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'mmsi1': get_int_from_data(40, 70),
'offset1': get_int_from_data(70, 82),
'increment1': get_int_from_data(82, 92),
'mmsi2': get_int_from_data(92, 122),
'offset2': get_int_from_data(122, 134),
'increment2': get_int_from_data(134, 144)
}
def decode_msg_17(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
DGNSS Broadcast Binary Message
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_17_dgnss_broadcast_binary_message
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'lon': get_int_from_data(40, 58, signed=True),
'lat': get_int_from_data(58, 75, signed=True),
'data': get_int_from_data(80, 816)
}
def decode_msg_18(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Standard Class B CS Position Report
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_18_standard_class_b_cs_position_report
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'speed': get_int_from_data(46, 56) * 0.1,
'accuracy': bit_arr[56],
'lon': get_int_from_data(57, 85, signed=True) / 600000.0,
'lat': get_int_from_data(85, 112, signed=True) / 600000.0,
'course': get_int_from_data(112, 124) * 0.1,
'heading': get_int_from_data(124, 133),
'second': get_int_from_data(133, 139),
'regional': get_int_from_data(139, 141),
'cs': bit_arr[141],
'display': bit_arr[142],
'dsc': bit_arr[143],
'band': bit_arr[144],
'msg22': bit_arr[145],
'assigned': bit_arr[146],
'raim': bit_arr[147],
'radio': get_int_from_data(148, len(bit_arr)),
}
def decode_msg_19(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Extended Class B CS Position Report
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_19_extended_class_b_cs_position_report
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'speed': get_int_from_data(46, 56) * 0.1,
'accuracy': bit_arr[56],
'lon': get_int_from_data(57, 85, signed=True) / 600000.0,
'lat': get_int_from_data(85, 112, signed=True) / 600000.0,
'course': get_int_from_data(112, 124) * 0.1,
'heading': get_int_from_data(124, 133),
'second': get_int_from_data(133, 139),
'regional': get_int_from_data(139, 143),
'shipname': encode_bin_as_ascii6(bit_arr[143:263]),
'shiptype': ShipType(get_int_from_data(263, 271)),
'to_bow': get_int_from_data(271, 280),
'to_stern': get_int_from_data(280, 289),
'to_port': get_int_from_data(289, 295),
'to_starboard': get_int_from_data(295, 301),
'epfd': EpfdType(get_int_from_data(301, 305)),
'raim': bit_arr[305],
'dte': bit_arr[306],
'assigned': bit_arr[307],
}
def decode_msg_20(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Data Link Management Message
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_20_data_link_management_message
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'offset1': get_int_from_data(40, 52),
'number1': get_int_from_data(52, 56),
'timeout1': get_int_from_data(56, 59),
'increment1': get_int_from_data(59, 70),
'offset2': get_int_from_data(70, 82),
'number2': get_int_from_data(82, 86),
'timeout2': get_int_from_data(86, 89),
'increment2': get_int_from_data(89, 100),
'offset3': get_int_from_data(100, 112),
'number3': get_int_from_data(112, 116),
'timeout3': get_int_from_data(116, 119),
'increment3': get_int_from_data(110, 130),
'offset4': get_int_from_data(130, 142),
'number4': get_int_from_data(142, 146),
'timeout4': get_int_from_data(146, 149),
'increment4': get_int_from_data(149, 160),
}
def decode_msg_21(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Aid-to-Navigation Report
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_21_aid_to_navigation_report
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'aid_type': NavAid(get_int_from_data(38, 43)),
'name': encode_bin_as_ascii6(bit_arr[43:163]),
'accuracy': bit_arr[163],
'lon': get_int_from_data(164, 192, signed=True) / 600000.0,
'lat': get_int_from_data(192, 219, signed=True) / 600000.0,
'to_bow': get_int_from_data(219, 228),
'to_stern': get_int_from_data(228, 237),
'to_port': get_int_from_data(237, 243),
'to_starboard': get_int_from_data(243, 249),
'epfd': EpfdType(get_int_from_data(249, 253)),
'second': get_int_from_data(253, 259),
'off_position': bit_arr[259],
'regional': get_int_from_data(260, 268),
'raim': bit_arr[268],
'virtual_aid': bit_arr[269],
'assigned': bit_arr[270],
'name_extension': encode_bin_as_ascii6(bit_arr[272:]),
}
def decode_msg_22(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Channel Management
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_22_channel_management
"""
get_int_from_data = partial(get_int, bit_arr)
data = {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'channel_a': get_int_from_data(40, 52),
'channel_b': get_int_from_data(52, 64),
'txrx': get_int_from_data(64, 68),
'power': bit_arr[68],
'addressed': bit_arr[139],
'band_a': bit_arr[140],
'band_b': bit_arr[141],
'zonesize': get_int_from_data(142, 145),
}
# Broadcast
if data['addressed']:
d = {
'dest1': get_int_from_data(69, 99),
'dest2': get_int_from_data(104, 134),
}
# Addressed
else:
d = {
'ne_lon': get_int_from_data(69, 87, signed=True) * 0.1,
'ne_lat': get_int_from_data(87, 104, signed=True) * 0.1,
'sw_lon': get_int_from_data(104, 122, signed=True) * 0.1,
'sw_lat': get_int_from_data(122, 139, signed=True) * 0.1,
}
data.update(d)
return data
def decode_msg_23(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Group Assignment Command
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_23_group_assignment_command
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'ne_lon': get_int_from_data(40, 58, signed=True) * 0.1,
'ne_lat': get_int_from_data(58, 75, signed=True) * 0.1,
'sw_lon': get_int_from_data(75, 93, signed=True) * 0.1,
'sw_lat': get_int_from_data(93, 110, signed=True) * 0.1,
'station_type': StationType(get_int_from_data(110, 114)),
'ship_type': ShipType(get_int_from_data(114, 122)),
'txrx': TransmitMode(get_int_from_data(144, 146)),
'interval': StationIntervals(get_int_from_data(146, 150)),
'quiet': get_int_from_data(150, 154),
}
def decode_msg_24(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Static Data Report
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_24_static_data_report
"""
get_int_from_data = partial(get_int, bit_arr)
data: typing.Dict = {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'partno': get_int_from_data(38, 40)
}
if not data['partno']:
# Part A
d: typing.Dict = {
'shipname': encode_bin_as_ascii6(bit_arr[40: 160])
}
else:
# Part B
d: typing.Dict = {
'shiptype': ShipType(get_int_from_data(40, 48)),
'vendorid': encode_bin_as_ascii6(bit_arr[48: 66]),
'model': get_int_from_data(66, 70),
'serial': get_int_from_data(70, 90),
'callsign': encode_bin_as_ascii6(bit_arr[90: 132]),
'to_bow': get_int_from_data(132, 141),
'to_stern': get_int_from_data(141, 150),
'to_port': get_int_from_data(150, 156),
'to_starboard': get_int_from_data(156, 162),
'mothership_mmsi': get_int_from_data(132, 162)
}
data.update(d)
return data
def decode_msg_25(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Single Slot Binary Message
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_25_single_slot_binary_message
NOTE: This message type is quite uncommon and
I was not able find any real world occurrence of the type.
Also documentation seems to vary. Use with caution.
"""
get_int_from_data = partial(get_int, bit_arr)
data = {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'addressed': bit_arr[38],
'structured': bit_arr[39],
}
if data['addressed']:
d = {
'dest_mmsi': get_int_from_data(40, 70),
}
data.update(d)
lo_ix = 40 if data['addressed'] else 70
hi_ix = lo_ix + 16
if data['structured']:
d = {
'app_id': get_int_from_data(lo_ix, hi_ix),
'data': bit_arr[hi_ix:].to01()
}
else:
d = {
'data': bit_arr[lo_ix:].to01()
}
data.update(d)
return data
def decode_msg_26(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Multiple Slot Binary Message
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_26_multiple_slot_binary_message
NOTE: This message type is quite uncommon and
I was not able find any real world occurrence of the type.
Also documentation seems to vary. Use with caution.
"""
get_int_from_data = partial(get_int, bit_arr)
radio_status_offset = len(bit_arr) - 20
data = {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'addressed': bit_arr[38],
'structured': bit_arr[39],
'radio': get_int_from_data(radio_status_offset, len(bit_arr))
}
if data['addressed']:
d = {
'dest_mmsi': get_int_from_data(40, 70),
}
data.update(d)
lo_ix = 40 if data['addressed'] else 70
hi_ix = lo_ix + 16
if data['structured']:
d = {
'app_id': get_int_from_data(lo_ix, hi_ix),
'data': bit_arr[hi_ix:radio_status_offset].to01()
}
else:
d = {
'data': bit_arr[lo_ix:radio_status_offset].to01()
}
data.update(d)
return data
def decode_msg_27(bit_arr: bitarray.bitarray) -> typing.Dict:
"""
Long Range AIS Broadcast message
Src: https://gpsd.gitlab.io/gpsd/AIVDM.html#_type_27_long_range_ais_broadcast_message
"""
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'accuracy': bit_arr[38],
'raim': bit_arr[39],
'status': NavigationStatus(get_int_from_data(40, 44)),
'lon': get_int_from_data(44, 62, signed=True) / 600.0,
'lat': get_int_from_data(62, 79, signed=True) / 600.0,
'speed': get_int_from_data(79, 85),
'course': get_int_from_data(85, 94),
'gnss': bit_arr[94],
}
# Decoding Lookup Table
DECODE_MSG = [
decode_msg_1, # there are messages with a zero (0) as an id. these seem to be the same as type 1 messages
decode_msg_1,
decode_msg_2,
decode_msg_3,
decode_msg_4,
decode_msg_5,
decode_msg_6,
decode_msg_7,
decode_msg_8,
decode_msg_9,
decode_msg_10,
decode_msg_11,
decode_msg_12,
decode_msg_13,
decode_msg_14,
decode_msg_15,
decode_msg_16,
decode_msg_17,
decode_msg_18,
decode_msg_19,
decode_msg_20,
decode_msg_21,
decode_msg_22,
decode_msg_23,
decode_msg_24,
decode_msg_25,
decode_msg_26,
decode_msg_27,
]
def _decode(msg) -> typing.Dict:
"""
Decodes a given NMEA message.
"""
try:
return DECODE_MSG[msg.ais_id](msg.bit_array)
except IndexError as e:
raise UnknownMessageException(f"The message {msg} is not currently supported!") from e
def decode(msg) -> typing.Dict:
"""
Decodes a given message.
@param msg: A object of type NMEAMessage to decode
"""
return _decode(msg)
| 33.296403
| 110
| 0.630267
|
from functools import partial
from pyais.exceptions import UnknownMessageException
import typing
import bitarray
from pyais.constants import (
NavigationStatus,
ManeuverIndicator,
TransmitMode,
EpfdType,
ShipType,
StationType,
StationIntervals,
NavAid
)
from pyais.util import get_int, encode_bin_as_ascii6
def decode_msg_1(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'status': NavigationStatus(get_int_from_data(38, 42)),
'turn': get_int_from_data(42, 50, signed=True),
'speed': get_int_from_data(50, 60) / 10.0,
'accuracy': bit_arr[60],
'lon': get_int_from_data(61, 89, signed=True) / 600000.0,
'lat': get_int_from_data(89, 116, signed=True) / 600000.0,
'course': get_int_from_data(116, 128) * 0.1,
'heading': get_int_from_data(128, 137),
'second': get_int_from_data(137, 143),
'maneuver': ManeuverIndicator(get_int_from_data(143, 145)),
'raim': bit_arr[148],
'radio': get_int_from_data(149, bit_arr.length()),
}
def decode_msg_2(bit_arr: bitarray.bitarray) -> typing.Dict:
return decode_msg_1(bit_arr)
def decode_msg_3(bit_arr: bitarray.bitarray) -> typing.Dict:
return decode_msg_1(bit_arr)
def decode_msg_4(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'year': get_int_from_data(38, 52),
'month': get_int_from_data(52, 56),
'day': get_int_from_data(56, 61),
'hour': get_int_from_data(61, 66),
'minute': get_int_from_data(66, 72),
'second': get_int_from_data(72, 78),
'accuracy': bit_arr[78],
'lon': get_int_from_data(79, 107, signed=True) / 600000.0,
'lat': get_int_from_data(107, 134, signed=True) / 600000.0,
'epfd': EpfdType(get_int_from_data(134, 138)),
'raim': bit_arr[148],
'radio': get_int_from_data(148, len(bit_arr)),
}
def decode_msg_5(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'ais_version': get_int_from_data(38, 40),
'imo': get_int_from_data(40, 70),
'callsign': encode_bin_as_ascii6(bit_arr[70:112]),
'shipname': encode_bin_as_ascii6(bit_arr[112:232]),
'shiptype': ShipType(get_int_from_data(232, 240)),
'to_bow': get_int_from_data(240, 249),
'to_stern': get_int_from_data(249, 258),
'to_port': get_int_from_data(258, 264),
'to_starboard': get_int_from_data(264, 270),
'epfd': EpfdType(get_int_from_data(270, 274)),
'month': get_int_from_data(274, 278),
'day': get_int_from_data(278, 283),
'hour': get_int_from_data(283, 288),
'minute': get_int_from_data(288, 294),
'draught': get_int_from_data(294, 302) / 10.0,
'destination': encode_bin_as_ascii6(bit_arr[302:422]),
'dte': bit_arr[-2]
}
def decode_msg_6(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'seqno': get_int_from_data(38, 40),
'dest_mmsi': get_int_from_data(40, 70),
'retransmit': bit_arr[70],
'dac': get_int_from_data(72, 82),
'fid': get_int_from_data(82, 88),
'data': bit_arr[88:].to01()
}
def decode_msg_7(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'mmsi1': get_int_from_data(40, 70),
'mmsiseq1': get_int_from_data(70, 72),
'mmsi2': get_int_from_data(72, 102),
'mmsiseq2': get_int_from_data(102, 104),
'mmsi3': get_int_from_data(104, 134),
'mmsiseq3': get_int_from_data(134, 136),
'mmsi4': get_int_from_data(136, 166),
'mmsiseq4': get_int_from_data(166, 168)
}
def decode_msg_8(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'dac': get_int_from_data(40, 50),
'fid': get_int_from_data(50, 56),
'data': bit_arr[56:].to01()
}
def decode_msg_9(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'alt': get_int_from_data(38, 50),
'speed': get_int_from_data(50, 60),
'accuracy': bit_arr[60],
'lon': get_int_from_data(61, 89, signed=True) / 600000.0,
'lat': get_int_from_data(89, 116, signed=True) / 600000.0,
'course': get_int_from_data(116, 128) * 0.1,
'second': get_int_from_data(128, 134),
'dte': bit_arr[142],
'assigned': bit_arr[146],
'raim': bit_arr[147],
'radio': get_int_from_data(148, 168)
}
def decode_msg_10(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'dest_mmsi': get_int_from_data(40, 70)
}
def decode_msg_11(bit_arr: bitarray.bitarray) -> typing.Dict:
return decode_msg_4(bit_arr)
def decode_msg_12(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'seqno': get_int_from_data(38, 40),
'dest_mmsi': get_int_from_data(40, 70),
'retransmit': bit_arr[70],
'text': encode_bin_as_ascii6(bit_arr[72:])
}
def decode_msg_13(bit_arr: bitarray.bitarray) -> typing.Dict:
return decode_msg_7(bit_arr)
def decode_msg_14(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'text': encode_bin_as_ascii6(bit_arr[40:])
}
def decode_msg_15(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'mmsi1': get_int_from_data(40, 70),
'type1_1': get_int_from_data(70, 76),
'offset1_1': get_int_from_data(76, 88),
'type1_2': get_int_from_data(90, 96),
'offset1_2': get_int_from_data(96, 108),
'mmsi2': get_int_from_data(110, 140),
'type2_1': get_int_from_data(140, 146),
'offset2_1': get_int_from_data(146, 157),
}
def decode_msg_16(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'mmsi1': get_int_from_data(40, 70),
'offset1': get_int_from_data(70, 82),
'increment1': get_int_from_data(82, 92),
'mmsi2': get_int_from_data(92, 122),
'offset2': get_int_from_data(122, 134),
'increment2': get_int_from_data(134, 144)
}
def decode_msg_17(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(6, 8),
'mmsi': get_int_from_data(8, 38),
'lon': get_int_from_data(40, 58, signed=True),
'lat': get_int_from_data(58, 75, signed=True),
'data': get_int_from_data(80, 816)
}
def decode_msg_18(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'speed': get_int_from_data(46, 56) * 0.1,
'accuracy': bit_arr[56],
'lon': get_int_from_data(57, 85, signed=True) / 600000.0,
'lat': get_int_from_data(85, 112, signed=True) / 600000.0,
'course': get_int_from_data(112, 124) * 0.1,
'heading': get_int_from_data(124, 133),
'second': get_int_from_data(133, 139),
'regional': get_int_from_data(139, 141),
'cs': bit_arr[141],
'display': bit_arr[142],
'dsc': bit_arr[143],
'band': bit_arr[144],
'msg22': bit_arr[145],
'assigned': bit_arr[146],
'raim': bit_arr[147],
'radio': get_int_from_data(148, len(bit_arr)),
}
def decode_msg_19(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'speed': get_int_from_data(46, 56) * 0.1,
'accuracy': bit_arr[56],
'lon': get_int_from_data(57, 85, signed=True) / 600000.0,
'lat': get_int_from_data(85, 112, signed=True) / 600000.0,
'course': get_int_from_data(112, 124) * 0.1,
'heading': get_int_from_data(124, 133),
'second': get_int_from_data(133, 139),
'regional': get_int_from_data(139, 143),
'shipname': encode_bin_as_ascii6(bit_arr[143:263]),
'shiptype': ShipType(get_int_from_data(263, 271)),
'to_bow': get_int_from_data(271, 280),
'to_stern': get_int_from_data(280, 289),
'to_port': get_int_from_data(289, 295),
'to_starboard': get_int_from_data(295, 301),
'epfd': EpfdType(get_int_from_data(301, 305)),
'raim': bit_arr[305],
'dte': bit_arr[306],
'assigned': bit_arr[307],
}
def decode_msg_20(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'offset1': get_int_from_data(40, 52),
'number1': get_int_from_data(52, 56),
'timeout1': get_int_from_data(56, 59),
'increment1': get_int_from_data(59, 70),
'offset2': get_int_from_data(70, 82),
'number2': get_int_from_data(82, 86),
'timeout2': get_int_from_data(86, 89),
'increment2': get_int_from_data(89, 100),
'offset3': get_int_from_data(100, 112),
'number3': get_int_from_data(112, 116),
'timeout3': get_int_from_data(116, 119),
'increment3': get_int_from_data(110, 130),
'offset4': get_int_from_data(130, 142),
'number4': get_int_from_data(142, 146),
'timeout4': get_int_from_data(146, 149),
'increment4': get_int_from_data(149, 160),
}
def decode_msg_21(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'aid_type': NavAid(get_int_from_data(38, 43)),
'name': encode_bin_as_ascii6(bit_arr[43:163]),
'accuracy': bit_arr[163],
'lon': get_int_from_data(164, 192, signed=True) / 600000.0,
'lat': get_int_from_data(192, 219, signed=True) / 600000.0,
'to_bow': get_int_from_data(219, 228),
'to_stern': get_int_from_data(228, 237),
'to_port': get_int_from_data(237, 243),
'to_starboard': get_int_from_data(243, 249),
'epfd': EpfdType(get_int_from_data(249, 253)),
'second': get_int_from_data(253, 259),
'off_position': bit_arr[259],
'regional': get_int_from_data(260, 268),
'raim': bit_arr[268],
'virtual_aid': bit_arr[269],
'assigned': bit_arr[270],
'name_extension': encode_bin_as_ascii6(bit_arr[272:]),
}
def decode_msg_22(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
data = {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'channel_a': get_int_from_data(40, 52),
'channel_b': get_int_from_data(52, 64),
'txrx': get_int_from_data(64, 68),
'power': bit_arr[68],
'addressed': bit_arr[139],
'band_a': bit_arr[140],
'band_b': bit_arr[141],
'zonesize': get_int_from_data(142, 145),
}
if data['addressed']:
d = {
'dest1': get_int_from_data(69, 99),
'dest2': get_int_from_data(104, 134),
}
else:
d = {
'ne_lon': get_int_from_data(69, 87, signed=True) * 0.1,
'ne_lat': get_int_from_data(87, 104, signed=True) * 0.1,
'sw_lon': get_int_from_data(104, 122, signed=True) * 0.1,
'sw_lat': get_int_from_data(122, 139, signed=True) * 0.1,
}
data.update(d)
return data
def decode_msg_23(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'ne_lon': get_int_from_data(40, 58, signed=True) * 0.1,
'ne_lat': get_int_from_data(58, 75, signed=True) * 0.1,
'sw_lon': get_int_from_data(75, 93, signed=True) * 0.1,
'sw_lat': get_int_from_data(93, 110, signed=True) * 0.1,
'station_type': StationType(get_int_from_data(110, 114)),
'ship_type': ShipType(get_int_from_data(114, 122)),
'txrx': TransmitMode(get_int_from_data(144, 146)),
'interval': StationIntervals(get_int_from_data(146, 150)),
'quiet': get_int_from_data(150, 154),
}
def decode_msg_24(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
data: typing.Dict = {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'partno': get_int_from_data(38, 40)
}
if not data['partno']:
d: typing.Dict = {
'shipname': encode_bin_as_ascii6(bit_arr[40: 160])
}
else:
d: typing.Dict = {
'shiptype': ShipType(get_int_from_data(40, 48)),
'vendorid': encode_bin_as_ascii6(bit_arr[48: 66]),
'model': get_int_from_data(66, 70),
'serial': get_int_from_data(70, 90),
'callsign': encode_bin_as_ascii6(bit_arr[90: 132]),
'to_bow': get_int_from_data(132, 141),
'to_stern': get_int_from_data(141, 150),
'to_port': get_int_from_data(150, 156),
'to_starboard': get_int_from_data(156, 162),
'mothership_mmsi': get_int_from_data(132, 162)
}
data.update(d)
return data
def decode_msg_25(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
data = {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'addressed': bit_arr[38],
'structured': bit_arr[39],
}
if data['addressed']:
d = {
'dest_mmsi': get_int_from_data(40, 70),
}
data.update(d)
lo_ix = 40 if data['addressed'] else 70
hi_ix = lo_ix + 16
if data['structured']:
d = {
'app_id': get_int_from_data(lo_ix, hi_ix),
'data': bit_arr[hi_ix:].to01()
}
else:
d = {
'data': bit_arr[lo_ix:].to01()
}
data.update(d)
return data
def decode_msg_26(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
radio_status_offset = len(bit_arr) - 20
data = {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'addressed': bit_arr[38],
'structured': bit_arr[39],
'radio': get_int_from_data(radio_status_offset, len(bit_arr))
}
if data['addressed']:
d = {
'dest_mmsi': get_int_from_data(40, 70),
}
data.update(d)
lo_ix = 40 if data['addressed'] else 70
hi_ix = lo_ix + 16
if data['structured']:
d = {
'app_id': get_int_from_data(lo_ix, hi_ix),
'data': bit_arr[hi_ix:radio_status_offset].to01()
}
else:
d = {
'data': bit_arr[lo_ix:radio_status_offset].to01()
}
data.update(d)
return data
def decode_msg_27(bit_arr: bitarray.bitarray) -> typing.Dict:
get_int_from_data = partial(get_int, bit_arr)
return {
'type': get_int_from_data(0, 6),
'repeat': get_int_from_data(8, 8),
'mmsi': get_int_from_data(8, 38),
'accuracy': bit_arr[38],
'raim': bit_arr[39],
'status': NavigationStatus(get_int_from_data(40, 44)),
'lon': get_int_from_data(44, 62, signed=True) / 600.0,
'lat': get_int_from_data(62, 79, signed=True) / 600.0,
'speed': get_int_from_data(79, 85),
'course': get_int_from_data(85, 94),
'gnss': bit_arr[94],
}
DECODE_MSG = [
decode_msg_1, decode_msg_1,
decode_msg_2,
decode_msg_3,
decode_msg_4,
decode_msg_5,
decode_msg_6,
decode_msg_7,
decode_msg_8,
decode_msg_9,
decode_msg_10,
decode_msg_11,
decode_msg_12,
decode_msg_13,
decode_msg_14,
decode_msg_15,
decode_msg_16,
decode_msg_17,
decode_msg_18,
decode_msg_19,
decode_msg_20,
decode_msg_21,
decode_msg_22,
decode_msg_23,
decode_msg_24,
decode_msg_25,
decode_msg_26,
decode_msg_27,
]
def _decode(msg) -> typing.Dict:
try:
return DECODE_MSG[msg.ais_id](msg.bit_array)
except IndexError as e:
raise UnknownMessageException(f"The message {msg} is not currently supported!") from e
def decode(msg) -> typing.Dict:
return _decode(msg)
| true
| true
|
f70ce61faa9d386dd5eb06bbf87f5f0b8e80d8b7
| 12,161
|
py
|
Python
|
pylabnet/hardware/polarization/thorlabs_mpc320.py
|
wi11dey/pylabnet
|
a6e3362f727c45aaa60e61496e858ae92e85574d
|
[
"MIT"
] | 10
|
2020-01-07T23:28:49.000Z
|
2022-02-02T19:09:17.000Z
|
pylabnet/hardware/polarization/thorlabs_mpc320.py
|
wi11dey/pylabnet
|
a6e3362f727c45aaa60e61496e858ae92e85574d
|
[
"MIT"
] | 249
|
2019-12-28T19:38:49.000Z
|
2022-03-28T16:45:32.000Z
|
pylabnet/hardware/polarization/thorlabs_mpc320.py
|
wi11dey/pylabnet
|
a6e3362f727c45aaa60e61496e858ae92e85574d
|
[
"MIT"
] | 5
|
2020-11-17T19:45:10.000Z
|
2022-01-04T18:07:04.000Z
|
""" Module for controlling Thorlabs motorized pollarization paddles """
import ctypes
from ctypes import Structure
import time
from pylabnet.utils.logging.logger import LogHandler
#from comtypes.typeinfo import SAFEARRAYABOUND
#enum FT_Status
FT_OK = ctypes.c_short(0x00)
FT_InvalidHandle = ctypes.c_short(0x0)
FT_DeviceNotFound = ctypes.c_short(0x02)
FT_DeviceNotOpened = ctypes.c_short(0x03)
FT_IOError = ctypes.c_short(0x04)
FT_InsufficientResources = ctypes.c_short(0x05)
FT_InvalidParameter = ctypes.c_short(0x06)
FT_DeviceNotPresent = ctypes.c_short(0x07)
FT_IncorrectDevice = ctypes.c_short(0x08)
FT_Status = ctypes.c_short
#enum MOT_MotorTypes
MOT_NotMotor = ctypes.c_int(0)
MOT_DCMotor = ctypes.c_int(1)
MOT_StepperMotor = ctypes.c_int(2)
MOT_BrushlessMotor = ctypes.c_int(3)
MOT_CustomMotor = ctypes.c_int(100)
MOT_MotorTypes = ctypes.c_int
#enum POL_Paddle
paddle1 = ctypes.c_uint16(1)
paddle2 = ctypes.c_uint16(2)
paddle3 = ctypes.c_uint16(3)
POL_Paddles = ctypes.c_uint16
#enum POL_PaddleBits
none_ctype = ctypes.c_ushort(0x0) #is None in header file
PaddleBit1 = ctypes.c_ushort(0x01)
PaddleBit2 = ctypes.c_ushort(0x02)
PaddleBit4 = ctypes.c_ushort(0x04)
AllPaddles = ctypes.c_ushort(0x07)
POL_PaddleBits = ctypes.c_ushort
#enum MOT_TravelDirection
MOT_TravelDirectionDisabled = ctypes.c_short(0x00)
MOT_Forwards = ctypes.c_short(0x01)
MOT_Reverse = ctypes.c_short(0x02)
MOT_TravelDirection = ctypes.c_short
#enum MPC_IOModes
MPC_ToggleOnPositiveEdge = ctypes.c_ulong(0x01)
MPC_SetPositionOnPositiveEdge = ctypes.c_ulong(0x02)
MPC_OutputHighAtSetPosition = ctypes.c_ulong(0x04)
MPC_OutputHighWhemMoving = ctypes.c_ulong(0x08)
MPC_IOModes = ctypes.c_ulong
class TLI_DeviceInfo(Structure):
_fields_ = [("typeID", ctypes.c_ulong),
("description", (65 * ctypes.c_char)), #changed from 65* _char
("serialNo", (9 * ctypes.c_char)), #changed from 9* _char
("PID", ctypes.c_ulong),# wintypes.DWORD
("isKnownType", ctypes.c_bool),
("motorType", MOT_MotorTypes),
("isPiezoDevice", ctypes.c_bool),
("isLaser", ctypes.c_bool),
("isCustomType", ctypes.c_bool),
("isRack", ctypes.c_bool),
("maxPaddles", ctypes.c_short)]
# class TLI_HardwareInformation(Structure):
# _fields_ = [("serialNumber", ctypes.c_ulong),
# ("modelNumber", (8 * ctypes.c_char)),
# ("type", ctypes.c_ushort),
# ("firmwareVersion", ctypes.c_ulong),
# ("notes", (48 * ctypes.c_char)),
# ("deviceDependantData", (12 * ctypes.c_byte)),
# ("hardwareVersion", ctypes.c_ushort),
# ("modificationState", ctypes.c_ushort),
# ("numChannels", ctypes.c_ushort)]
class TLI_PolarizerParameters(Structure):
_fields_ = [("Velocity", ctypes.c_ushort),
("HomePosition", ctypes.c_double),
("JogSize1", ctypes.c_double),
("JogSize2", ctypes.c_double),
("JogSize3", ctypes.c_double)]
#class SAFEARRAYBOUND(Strcuture):
# _fields_ = [("cElements" , ctypes.c_ulong),
# ("lLbound" , ctypes.c_long)]
#class SAFEARRAY(Strcuture):
# _fields_ = [("cDims", ctypes.c_ushort),
# ("fFeatures", ctypes.c_ushort),
# ("cbElements", ctypes.c_ulong),
# ("cLocks", ctypes.c_ulong),
# ("pvData", ctypes.c_void_p),
# ("rgsabound", SAFEARRAYBOUND * 1)]
class Driver():
def __init__(self, device_num, logger):
"""Instantiate driver class.
device_num is numbering of devices connected via USB. Drivrt then finds serial numbers of polarization paddle by Driver, e.g. b'38154354' """
# Instantiate log.
self.log = LogHandler(logger=logger)
#Loads polarization contorolles DLL and define arguments and result 5types for c function
self._polarizationdll = ctypes.cdll.LoadLibrary('Thorlabs.MotionControl.Polarizer.dll')
self._devmanagerdll = ctypes.cdll.LoadLibrary('Thorlabs.MotionControl.DeviceManager.dll')
self._configure_functions()
#get device list size
if self._polarizationdll.TLI_BuildDeviceList() == 0:
num_devs = self._polarizationdll.TLI_GetDeviceListSize()
#print(f"There are {num_devs} devices connected")
#Get devices serial numbers
serialNos = ctypes.create_string_buffer(100) #the way to have a mutable buffer
serialNosSize = ctypes.c_ulong(ctypes.sizeof(serialNos))
List = self._polarizationdll.TLI_GetDeviceListByTypeExt(serialNos, serialNosSize, 38)
#if List:
# print("Failed to get device list")
#else:
# print("Device list created succesfully") #change these massages to interact with logger
self.dev_name = serialNos.value.decode("utf-8") #.strip().split(',')
#print(f"Connected to device {self.dev_name}")
#get device info including serial number
self.device_info = TLI_DeviceInfo() # container for device info
self._polarizationdll.TLI_GetDeviceInfo(serialNos[(device_num - 1) * 9:(device_num * 9) - 1], ctypes.byref(self.device_info)) #when there will be a few devices figure out how to seperate and access each one
self.device = serialNos[(device_num - 1) * 9:(device_num * 9) - 1]
#print("Description: ", self.device_info.description)
#print("Serial No: ", self.device_info.serialNo)
#print("Motor Type: ", self.device_info.motorType)
#print("USB PID: ", self.device_info.PID)
#print("Max Number of Paddles: ", self.device_info.maxPaddles)
#establising connection to device
self.paddles = [paddle1, paddle3, paddle2]
connection = self._polarizationdll.MPC_Open(self.device)
if connection == 0:
self.log.info(f"Successfully connected to {self.device}.")
else:
self.log.error(f"Connection to {self.device} failed due to error {connection}.")
#technical methods
def _configure_functions(self):
""" Defines arguments and results for c functions """
self._polarizationdll.TLI_BuildDeviceList.argtype = None
self._polarizationdll.TLI_BuildDeviceList.restype = ctypes.c_short
self._polarizationdll.TLI_GetDeviceListSize.argtype = None
self._polarizationdll.TLI_GetDeviceListSize.restype = ctypes.c_short
self._polarizationdll.TLI_GetDeviceInfo.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.POINTER(TLI_DeviceInfo)]
self._polarizationdll.TLI_GetDeviceInfo.restype = ctypes.c_short
self._polarizationdll.TLI_GetDeviceListByTypeExt.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.c_ulong, ctypes.c_int]
self._polarizationdll.TLI_GetDeviceListByTypeExt.restype = ctypes.c_short
self._polarizationdll.MPC_Open.argtype = ctypes.POINTER(ctypes.c_char)
self._polarizationdll.MPC_Open.restype = ctypes.c_short
self._polarizationdll.MPC_Close.argtype = ctypes.POINTER(ctypes.c_char)
self._polarizationdll.MPC_Close.restype = ctypes.c_short
self._polarizationdll.MPC_CheckConnection.argtype = ctypes.c_char_p
self._polarizationdll.MPC_CheckConnection.restype = ctypes.c_bool
self._polarizationdll.MPC_GetPosition.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles]
self._polarizationdll.MPC_GetPosition.restype = ctypes.c_double
self._polarizationdll.MPC_RequestPolParams.argtype = ctypes.POINTER(ctypes.c_char)
self._polarizationdll.MPC_RequestPolParams.restype = ctypes.c_short
self._polarizationdll.MPC_GetPolParams.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.POINTER(TLI_PolarizerParameters)]
self._polarizationdll.MPC_GetPolParams.restype = ctypes.c_short
self._polarizationdll.MPC_SetPolParams.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.POINTER(TLI_PolarizerParameters)]
self._polarizationdll.MPC_SetPolParams.restype = ctypes.c_short
self._polarizationdll.MPC_SetJogSize.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles, ctypes.c_double]
self._polarizationdll.MPC_SetJogSize.restype = ctypes.c_short
self._polarizationdll.MPC_Jog.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles, MOT_TravelDirection]
self._polarizationdll.MPC_Jog.restype = ctypes.c_short
self._polarizationdll.MPC_GetMaxTravel.argtype = ctypes.POINTER(ctypes.c_char)
self._polarizationdll.MPC_GetMaxTravel.restype = ctypes.c_double
self._polarizationdll.MPC_MoveToPosition.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles, ctypes.c_double]
self._polarizationdll.MPC_MoveToPosition.restype = ctypes.c_short
self._polarizationdll.MPC_Stop.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles]
self._polarizationdll.MPC_Stop.restype = ctypes.c_short
self._polarizationdll.MPC_Home.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles]
self._polarizationdll.MPC_Home.restype = ctypes.c_short
self._polarizationdll.MPC_Jog.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.POINTER(TLI_PolarizerParameters), MOT_TravelDirection]
self._polarizationdll.MPC_Jog.restype = ctypes.c_short
self._polarizationdll.MPC_StartPolling.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.c_int]
self._polarizationdll.MPC_StartPolling.restype = ctypes.c_bool
self._polarizationdll.MPC_StopPolling.argtype = ctypes.POINTER(ctypes.c_char)
self._polarizationdll.MPC_StopPolling.restype = ctypes.c_void_p #did not find the a c_void with no pointer as needed
self._polarizationdll.MPC_SetVelocity.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.c_short]
self._polarizationdll.MPC_SetVelocity.restype = ctypes.c_short
self._polarizationdll.MPC_MoveRelative.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles, ctypes.c_double]
self._polarizationdll.MPC_MoveRelative.restype = ctypes.c_short
self._polarizationdll.MPC_GetStepsPerDegree.argtype = [ctypes.POINTER(ctypes.c_char)]
self._polarizationdll.MPC_GetStepsPerDegree.result = ctypes.c_double
#wrap function for external use
def open(self):
result = self._polarizationdll.MPC_Open(self.device)
if result == 0:
print("Connected succesfully to device")
else:
print("A problem occured when trying to connect to device")
def close(self):
resultc = self._polarizationdll.MPC_Close(self.device)
if resultc == 0:
print("Closed connection to device")
else:
print("A problem occured when trying to diconnect from device")
def home(self, paddle_num):
home_result = self._polarizationdll.MPC_Home(self.device, self.paddles[paddle_num])
return home_result
def set_velocity(self, velocity):
velocity = self._polarizationdll.MPC_SetVelocity(self.device, velocity)
def move(self, paddle_num, pos, sleep_time):
#posinitial = self._polarizationdll.MPC_GetPosition(self.device, self.paddles[paddle_num])
move_result = self._polarizationdll.MPC_MoveToPosition(self.device, self.paddles[paddle_num], pos)
time.sleep(abs(sleep_time * pos / 170))
#posfinal = self._polarizationdll.MPC_GetPosition(self.device, self.paddles[paddle_num])
return move_result #, posinitial, posfinal
def move_rel(self, paddle_num, step, sleep_time):
#posinitial = self._polarizationdll.MPC_GetPosition(self.device, self.paddles[paddle_num])
move_result = self._polarizationdll.MPC_MoveRelative(self.device, self.paddles[paddle_num], step)
time.sleep(abs(sleep_time * step / 170))
#posfinal = self._polarizationdll.MPC_GetPosition(self.device, self.paddles[paddle_num])
return move_result #, posinitial, posfinal
def get_angle(self, paddle_num):
currentpos = self._polarizationdll.MPC_GetPosition(self.device, self.paddles[paddle_num])
return currentpos
| 48.450199
| 214
| 0.709892
|
import ctypes
from ctypes import Structure
import time
from pylabnet.utils.logging.logger import LogHandler
FT_OK = ctypes.c_short(0x00)
FT_InvalidHandle = ctypes.c_short(0x0)
FT_DeviceNotFound = ctypes.c_short(0x02)
FT_DeviceNotOpened = ctypes.c_short(0x03)
FT_IOError = ctypes.c_short(0x04)
FT_InsufficientResources = ctypes.c_short(0x05)
FT_InvalidParameter = ctypes.c_short(0x06)
FT_DeviceNotPresent = ctypes.c_short(0x07)
FT_IncorrectDevice = ctypes.c_short(0x08)
FT_Status = ctypes.c_short
MOT_NotMotor = ctypes.c_int(0)
MOT_DCMotor = ctypes.c_int(1)
MOT_StepperMotor = ctypes.c_int(2)
MOT_BrushlessMotor = ctypes.c_int(3)
MOT_CustomMotor = ctypes.c_int(100)
MOT_MotorTypes = ctypes.c_int
paddle1 = ctypes.c_uint16(1)
paddle2 = ctypes.c_uint16(2)
paddle3 = ctypes.c_uint16(3)
POL_Paddles = ctypes.c_uint16
none_ctype = ctypes.c_ushort(0x0) PaddleBit1 = ctypes.c_ushort(0x01)
PaddleBit2 = ctypes.c_ushort(0x02)
PaddleBit4 = ctypes.c_ushort(0x04)
AllPaddles = ctypes.c_ushort(0x07)
POL_PaddleBits = ctypes.c_ushort
MOT_TravelDirectionDisabled = ctypes.c_short(0x00)
MOT_Forwards = ctypes.c_short(0x01)
MOT_Reverse = ctypes.c_short(0x02)
MOT_TravelDirection = ctypes.c_short
MPC_ToggleOnPositiveEdge = ctypes.c_ulong(0x01)
MPC_SetPositionOnPositiveEdge = ctypes.c_ulong(0x02)
MPC_OutputHighAtSetPosition = ctypes.c_ulong(0x04)
MPC_OutputHighWhemMoving = ctypes.c_ulong(0x08)
MPC_IOModes = ctypes.c_ulong
class TLI_DeviceInfo(Structure):
_fields_ = [("typeID", ctypes.c_ulong),
("description", (65 * ctypes.c_char)), ("serialNo", (9 * ctypes.c_char)), ("PID", ctypes.c_ulong), ("isKnownType", ctypes.c_bool),
("motorType", MOT_MotorTypes),
("isPiezoDevice", ctypes.c_bool),
("isLaser", ctypes.c_bool),
("isCustomType", ctypes.c_bool),
("isRack", ctypes.c_bool),
("maxPaddles", ctypes.c_short)]
class TLI_PolarizerParameters(Structure):
_fields_ = [("Velocity", ctypes.c_ushort),
("HomePosition", ctypes.c_double),
("JogSize1", ctypes.c_double),
("JogSize2", ctypes.c_double),
("JogSize3", ctypes.c_double)]
class Driver():
def __init__(self, device_num, logger):
self.log = LogHandler(logger=logger)
self._polarizationdll = ctypes.cdll.LoadLibrary('Thorlabs.MotionControl.Polarizer.dll')
self._devmanagerdll = ctypes.cdll.LoadLibrary('Thorlabs.MotionControl.DeviceManager.dll')
self._configure_functions()
if self._polarizationdll.TLI_BuildDeviceList() == 0:
num_devs = self._polarizationdll.TLI_GetDeviceListSize()
serialNos = ctypes.create_string_buffer(100) serialNosSize = ctypes.c_ulong(ctypes.sizeof(serialNos))
List = self._polarizationdll.TLI_GetDeviceListByTypeExt(serialNos, serialNosSize, 38)
self.dev_name = serialNos.value.decode("utf-8")
self.device_info = TLI_DeviceInfo() self._polarizationdll.TLI_GetDeviceInfo(serialNos[(device_num - 1) * 9:(device_num * 9) - 1], ctypes.byref(self.device_info)) self.device = serialNos[(device_num - 1) * 9:(device_num * 9) - 1]
self.paddles = [paddle1, paddle3, paddle2]
connection = self._polarizationdll.MPC_Open(self.device)
if connection == 0:
self.log.info(f"Successfully connected to {self.device}.")
else:
self.log.error(f"Connection to {self.device} failed due to error {connection}.")
def _configure_functions(self):
self._polarizationdll.TLI_BuildDeviceList.argtype = None
self._polarizationdll.TLI_BuildDeviceList.restype = ctypes.c_short
self._polarizationdll.TLI_GetDeviceListSize.argtype = None
self._polarizationdll.TLI_GetDeviceListSize.restype = ctypes.c_short
self._polarizationdll.TLI_GetDeviceInfo.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.POINTER(TLI_DeviceInfo)]
self._polarizationdll.TLI_GetDeviceInfo.restype = ctypes.c_short
self._polarizationdll.TLI_GetDeviceListByTypeExt.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.c_ulong, ctypes.c_int]
self._polarizationdll.TLI_GetDeviceListByTypeExt.restype = ctypes.c_short
self._polarizationdll.MPC_Open.argtype = ctypes.POINTER(ctypes.c_char)
self._polarizationdll.MPC_Open.restype = ctypes.c_short
self._polarizationdll.MPC_Close.argtype = ctypes.POINTER(ctypes.c_char)
self._polarizationdll.MPC_Close.restype = ctypes.c_short
self._polarizationdll.MPC_CheckConnection.argtype = ctypes.c_char_p
self._polarizationdll.MPC_CheckConnection.restype = ctypes.c_bool
self._polarizationdll.MPC_GetPosition.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles]
self._polarizationdll.MPC_GetPosition.restype = ctypes.c_double
self._polarizationdll.MPC_RequestPolParams.argtype = ctypes.POINTER(ctypes.c_char)
self._polarizationdll.MPC_RequestPolParams.restype = ctypes.c_short
self._polarizationdll.MPC_GetPolParams.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.POINTER(TLI_PolarizerParameters)]
self._polarizationdll.MPC_GetPolParams.restype = ctypes.c_short
self._polarizationdll.MPC_SetPolParams.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.POINTER(TLI_PolarizerParameters)]
self._polarizationdll.MPC_SetPolParams.restype = ctypes.c_short
self._polarizationdll.MPC_SetJogSize.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles, ctypes.c_double]
self._polarizationdll.MPC_SetJogSize.restype = ctypes.c_short
self._polarizationdll.MPC_Jog.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles, MOT_TravelDirection]
self._polarizationdll.MPC_Jog.restype = ctypes.c_short
self._polarizationdll.MPC_GetMaxTravel.argtype = ctypes.POINTER(ctypes.c_char)
self._polarizationdll.MPC_GetMaxTravel.restype = ctypes.c_double
self._polarizationdll.MPC_MoveToPosition.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles, ctypes.c_double]
self._polarizationdll.MPC_MoveToPosition.restype = ctypes.c_short
self._polarizationdll.MPC_Stop.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles]
self._polarizationdll.MPC_Stop.restype = ctypes.c_short
self._polarizationdll.MPC_Home.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles]
self._polarizationdll.MPC_Home.restype = ctypes.c_short
self._polarizationdll.MPC_Jog.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.POINTER(TLI_PolarizerParameters), MOT_TravelDirection]
self._polarizationdll.MPC_Jog.restype = ctypes.c_short
self._polarizationdll.MPC_StartPolling.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.c_int]
self._polarizationdll.MPC_StartPolling.restype = ctypes.c_bool
self._polarizationdll.MPC_StopPolling.argtype = ctypes.POINTER(ctypes.c_char)
self._polarizationdll.MPC_StopPolling.restype = ctypes.c_void_p self._polarizationdll.MPC_SetVelocity.argtypes = [ctypes.POINTER(ctypes.c_char), ctypes.c_short]
self._polarizationdll.MPC_SetVelocity.restype = ctypes.c_short
self._polarizationdll.MPC_MoveRelative.argtypes = [ctypes.POINTER(ctypes.c_char), POL_Paddles, ctypes.c_double]
self._polarizationdll.MPC_MoveRelative.restype = ctypes.c_short
self._polarizationdll.MPC_GetStepsPerDegree.argtype = [ctypes.POINTER(ctypes.c_char)]
self._polarizationdll.MPC_GetStepsPerDegree.result = ctypes.c_double
def open(self):
result = self._polarizationdll.MPC_Open(self.device)
if result == 0:
print("Connected succesfully to device")
else:
print("A problem occured when trying to connect to device")
def close(self):
resultc = self._polarizationdll.MPC_Close(self.device)
if resultc == 0:
print("Closed connection to device")
else:
print("A problem occured when trying to diconnect from device")
def home(self, paddle_num):
home_result = self._polarizationdll.MPC_Home(self.device, self.paddles[paddle_num])
return home_result
def set_velocity(self, velocity):
velocity = self._polarizationdll.MPC_SetVelocity(self.device, velocity)
def move(self, paddle_num, pos, sleep_time):
move_result = self._polarizationdll.MPC_MoveToPosition(self.device, self.paddles[paddle_num], pos)
time.sleep(abs(sleep_time * pos / 170))
return move_result
def move_rel(self, paddle_num, step, sleep_time):
move_result = self._polarizationdll.MPC_MoveRelative(self.device, self.paddles[paddle_num], step)
time.sleep(abs(sleep_time * step / 170))
return move_result
def get_angle(self, paddle_num):
currentpos = self._polarizationdll.MPC_GetPosition(self.device, self.paddles[paddle_num])
return currentpos
| true
| true
|
f70ce792696d4d4c744b5ebc894430f044a62707
| 1,434
|
py
|
Python
|
model/Base.py
|
massquantity/DNN-implementation
|
bca336749a2076fa2873f57d9c2b6f98ebf18a0d
|
[
"MIT"
] | null | null | null |
model/Base.py
|
massquantity/DNN-implementation
|
bca336749a2076fa2873f57d9c2b6f98ebf18a0d
|
[
"MIT"
] | null | null | null |
model/Base.py
|
massquantity/DNN-implementation
|
bca336749a2076fa2873f57d9c2b6f98ebf18a0d
|
[
"MIT"
] | null | null | null |
from abc import ABCMeta, abstractmethod
from ..utils.activations import *
class NetworkBase(metaclass=ABCMeta):
def __init__(self, sizes, activation, last_layer, **kwargs):
self.sizes = sizes
self.num_layers = len(sizes)
if activation.lower() == "sigmoid":
self.activation = Sigmoid()
# self.activation_derivative = sigmoid_derivative
elif activation.lower() == "relu":
self.activation = ReLU()
# self.activation_derivative = relu_derivative
elif activation.lower() == "tanh":
self.activation = Tanh()
elif activation.lower() == "softplus":
self.activation = Softplus()
elif activation.lower() == "leaky_relu" or "leakyrelu":
if "alpha" in kwargs:
self.activation = LeakyReLU(kwargs.get("alpha"))
else:
self.activation = LeakyReLU()
elif activation.lower() == "elu":
if "alpha" in kwargs:
self.activation = ELU(kwargs.get("alpha"))
else:
self.activation = ELU()
elif activation.lower() == "selu":
self.activation = Selu()
if last_layer.lower() == "softmax":
self.last_layer = Softmax()
@abstractmethod
def predict(self):
raise NotImplementedError
@abstractmethod
def backprop(self):
raise NotImplementedError
| 35.85
| 64
| 0.580195
|
from abc import ABCMeta, abstractmethod
from ..utils.activations import *
class NetworkBase(metaclass=ABCMeta):
def __init__(self, sizes, activation, last_layer, **kwargs):
self.sizes = sizes
self.num_layers = len(sizes)
if activation.lower() == "sigmoid":
self.activation = Sigmoid()
elif activation.lower() == "relu":
self.activation = ReLU()
elif activation.lower() == "tanh":
self.activation = Tanh()
elif activation.lower() == "softplus":
self.activation = Softplus()
elif activation.lower() == "leaky_relu" or "leakyrelu":
if "alpha" in kwargs:
self.activation = LeakyReLU(kwargs.get("alpha"))
else:
self.activation = LeakyReLU()
elif activation.lower() == "elu":
if "alpha" in kwargs:
self.activation = ELU(kwargs.get("alpha"))
else:
self.activation = ELU()
elif activation.lower() == "selu":
self.activation = Selu()
if last_layer.lower() == "softmax":
self.last_layer = Softmax()
@abstractmethod
def predict(self):
raise NotImplementedError
@abstractmethod
def backprop(self):
raise NotImplementedError
| true
| true
|
f70ce7d6076b8cfa52b2cf0ad6a09abcd1241b5c
| 1,881
|
py
|
Python
|
setup.py
|
AndreMiras/prawtools
|
5c9ccea34c2dff84dcd3540efe852d6a1c10df89
|
[
"BSD-2-Clause"
] | 92
|
2015-01-04T14:35:32.000Z
|
2022-03-21T20:42:26.000Z
|
setup.py
|
TheManFromEarth1/prawtools
|
5c9ccea34c2dff84dcd3540efe852d6a1c10df89
|
[
"BSD-2-Clause"
] | 27
|
2015-01-29T01:32:15.000Z
|
2021-01-23T07:22:18.000Z
|
setup.py
|
TheManFromEarth1/prawtools
|
5c9ccea34c2dff84dcd3540efe852d6a1c10df89
|
[
"BSD-2-Clause"
] | 26
|
2015-02-20T15:32:00.000Z
|
2020-07-13T21:55:04.000Z
|
"""prawtools setup.py."""
import re
from codecs import open
from os import path
from setuptools import setup
PACKAGE_NAME = "prawtools"
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, "README.md"), encoding="utf-8") as fp:
README = fp.read()
with open(path.join(HERE, PACKAGE_NAME, "__init__.py"), encoding="utf-8") as fp:
VERSION = re.search('__version__ = "([^"]+)', fp.read()).group(1)
extras = {
"ci": ["coveralls"],
"lint": ["black", "flake8", "pydocstyle"],
"test": [
"betamax >=0.7.1, <0.8",
"betamax-serializers >=0.2.0, <0.3",
"mock ==1.0.1",
"pytest",
],
}
required = ["praw >=4.0.0, <7", "six >=1, <2"]
setup(
name=PACKAGE_NAME,
author="Bryce Boe",
author_email="bbzbryce@gmail.com",
classifiers=[
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Utilities",
],
description="A collection of utilities that utilize the reddit API.",
entry_points={
"console_scripts": [
"modutils = prawtools.mod:main",
"reddit_alert = prawtools.alert:main",
"subreddit_stats = prawtools.stats:main",
]
},
extras_require=extras,
install_requires=required,
keywords="reddit mod moderator subreddit statistics tools",
license="Simplified BSD License",
long_description=README,
packages=[PACKAGE_NAME],
url="https://github.com/praw-dev/prawtools",
version=VERSION,
)
| 29.390625
| 80
| 0.601276
|
import re
from codecs import open
from os import path
from setuptools import setup
PACKAGE_NAME = "prawtools"
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, "README.md"), encoding="utf-8") as fp:
README = fp.read()
with open(path.join(HERE, PACKAGE_NAME, "__init__.py"), encoding="utf-8") as fp:
VERSION = re.search('__version__ = "([^"]+)', fp.read()).group(1)
extras = {
"ci": ["coveralls"],
"lint": ["black", "flake8", "pydocstyle"],
"test": [
"betamax >=0.7.1, <0.8",
"betamax-serializers >=0.2.0, <0.3",
"mock ==1.0.1",
"pytest",
],
}
required = ["praw >=4.0.0, <7", "six >=1, <2"]
setup(
name=PACKAGE_NAME,
author="Bryce Boe",
author_email="bbzbryce@gmail.com",
classifiers=[
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Utilities",
],
description="A collection of utilities that utilize the reddit API.",
entry_points={
"console_scripts": [
"modutils = prawtools.mod:main",
"reddit_alert = prawtools.alert:main",
"subreddit_stats = prawtools.stats:main",
]
},
extras_require=extras,
install_requires=required,
keywords="reddit mod moderator subreddit statistics tools",
license="Simplified BSD License",
long_description=README,
packages=[PACKAGE_NAME],
url="https://github.com/praw-dev/prawtools",
version=VERSION,
)
| true
| true
|
f70ce833ac88def087cedd6e79afd8aa8251779a
| 27,515
|
py
|
Python
|
fusionmodel.py
|
zhaoaite/CorrMNN
|
f88a70a199b462e9f3648da3ffdc5ee80a3e5f02
|
[
"MIT"
] | 1
|
2022-02-04T05:28:03.000Z
|
2022-02-04T05:28:03.000Z
|
fusionmodel.py
|
zhaoaite/CorrMNN
|
f88a70a199b462e9f3648da3ffdc5ee80a3e5f02
|
[
"MIT"
] | null | null | null |
fusionmodel.py
|
zhaoaite/CorrMNN
|
f88a70a199b462e9f3648da3ffdc5ee80a3e5f02
|
[
"MIT"
] | 1
|
2022-03-07T10:16:24.000Z
|
2022-03-07T10:16:24.000Z
|
#-*- coding: utf-8 -*-
"""
Created on Mon Dec 10 12:48:22 2018
@author: Aite Zhao
"""
from __future__ import print_function
#import random
import tensorflow as tf
#from tensorflow.python.ops import rnn, rnn_cell
import numpy as np
#import plot_confusion_matrix
import rnn_cell_GRU as rnn_cell
import rnn
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import os
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.ops import resources
#from EvoloPy import *
from sklearn.ensemble import VotingClassifier
from sklearn.metrics import accuracy_score
from hmmlearn import hmm
from sklearn.linear_model import BayesianRidge
from sklearn.metrics import mean_squared_error
from sklearn.learning_curve import validation_curve
from sklearn.svm import SVC
from hmmexm import hmm_4model_classification,hmm_3model_classification
from sklearn.model_selection import LeaveOneOut, KFold,cross_val_score
from deep_CCA_model import *
from linear_cca import linear_cca
n_classes = 52
def labelprocess(label,n_class=n_classes):
label_length=len(label)
label_matrix=np.zeros((label_length,n_class))
for i,j in enumerate(label):
label_matrix[i,int(j)]=1
return label_matrix
#
#def kfold_validation(data,label,n_splits=5):
# # K fold cross validation
# x_trains = []
# y_trains = []
# x_tests = []
# y_tests = []
# k_fold = KFold(n_splits)
# for train_index, test_index in k_fold.split(data):
# X_train, X_test = data[train_index], data[test_index]
# y_train, y_test = label[train_index], label[test_index]
# x_trains.append(X_train)
# y_trains.append(y_train)
# x_tests.append(X_test)
# y_tests.append(y_test)
# return x_trains,y_trains,x_tests,y_tests
#
def next_batch(batch_size,train_x,train_y,newli_train,force):
global batchid_force, batchid_time
if force==True:
if batchid_force+batch_size > len(train_x):
batchid_force = 0
batch_data = (train_x[batchid_force:min(batchid_force +batch_size, len(train_y)),:])
batch_labels = (newli_train[batchid_force:min(batchid_force + batch_size, len(newli_train)),:])
batch_labels_1d = (train_y[batchid_force:min(batchid_force + batch_size, len(train_y))])
batchid_force = min(batchid_force + batch_size, len(train_y))
return batch_data, batch_labels,batch_labels_1d
else:
if batchid_time+batch_size > len(train_x):
batchid_time = 0
batch_data = (train_x[batchid_time:min(batchid_time +batch_size, len(train_y)),:])
batch_labels = (newli_train[batchid_time:min(batchid_time + batch_size, len(newli_train)),:])
batch_labels_1d = (train_y[batchid_time:min(batchid_time + batch_size, len(train_y))])
batchid_time = min(batchid_time + batch_size, len(train_y))
return batch_data, batch_labels,batch_labels_1d
def RNN(x, weights, biases, n_input):
x = tf.transpose(x, [1, 0, 2])
# Reshaping to (n_steps*batch_size, n_input)
x = tf.reshape(tensor=x, shape=[-1, n_input])
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.split(value=x, num_or_size_splits=n_steps, axis=0)
# Define a lstm cell with tensorflow
#lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1)
lstm_cell = rnn_cell.GRUCell(n_hidden)
#lstm_cell = rnn_cell.LSTMCell(n_hidden,use_peepholes=True)
# avoid overfitting
lstm_cell = rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=0.5)
# 2 layers lstm
# num_units = [256, 256]
# cells = [rnn_cell.GRUCell(num_units=n) for n in num_units]
# lstm_cell = rnn_cell.MultiRNNCell(cells)
lstm_cell = rnn_cell.MultiRNNCell([lstm_cell] * 2)
# Get lstm cell output
# print(x)
outputs, states = rnn.rnn(cell=lstm_cell, inputs=x, dtype=tf.float32)
return tf.matmul(outputs[-1], weights) + biases, outputs
def feature_connect(a_time,a_force):
a=np.array([])
for j in range(int(11340/15)):
f=np.array([])
for i in range(15):
f = np.concatenate((f, a_force[j*15+i,:]), axis=0) if f.size else a_force[j*15+i,:]
a=np.c_[a,f] if a.size else f
# np.savetxt('./feature_extract/fusionfeature_data.txt', np.c_[a_time,np.transpose(a)],fmt='%.4f')
return np.c_[a_time,np.transpose(a)],np.transpose(a)
def DCCA():
# LSTM CCA
outdim_size = 10
input_size1 = n_hidden
input_size2 = n_hidden
# input_size2 = 256
layer_sizes1 = [1024, 1024, 1024, outdim_size]
layer_sizes2 = [1024, 1024, 1024, outdim_size]
layer_sizes3 = [1024, 1024, 1024, n_classes]
layer_sizes4 = [1024, 1024, 1024, n_classes]
reg_par = 1e-4
use_all_singular_values = True
dcca_model = DeepCCA(layer_sizes1, layer_sizes2,layer_sizes3,layer_sizes4,
input_size1, input_size2,
outdim_size,
reg_par, use_all_singular_values)
return dcca_model
def softmax(x):
x_exp = np.exp(x)
x_sum = np.sum(x_exp, axis = 1, keepdims = True)
s = x_exp / x_sum
return s
if __name__=='__main__':
#remove cpu occupation
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#os.environ["CUDA_VISIBLE_DEVICES"] = ""
# load data
a_force=np.loadtxt("/home/zat/zresearch/ndds-corrlstm/data/sdugait/12023f.txt")
a_force=a_force[:,0:60]
b_force=np.loadtxt("/home/zat/zresearch/ndds-corrlstm/data/sdugait/12023label.txt")
b_force=b_force-1
a_time=np.loadtxt("/home/zat/zresearch/ndds-corrlstm/results/sdu/feature/out256_sdu_img.txt")
b_time=np.loadtxt("/home/zat/zresearch/ndds-corrlstm/data/sdugait/12023label.txt")
# a_time=a_time[:,270:330]
b_time=b_time-1
# a_time=preprocessing.normalize(a_time+1)
all_fea_force=labelprocess(b_force)
all_fea_time=labelprocess(b_time)
## train_test_split 20% testing
# train_x_time,test_x_time,train_y_time,test_y_time = train_test_split(a_time,b_time,test_size=0.2)
# train_x_force,test_x_force,train_y_force,test_y_force = train_test_split(a_force,b_force,test_size=0.2)
# print(train_x_time.shape,test_x_time.shape,train_x_force.shape,test_x_force.shape)
# newli_train_time=labelprocess(train_y_time)
# newli_test_time=labelprocess(test_y_time)
# newli_train_force=labelprocess(train_y_force)
# newli_test_force=labelprocess(test_y_force)
## 10 Fold cross validation
# x_trains_force,y_trains_force,x_tests_force,y_tests_force = kfold_validation(a_force,b_force)
# x_trains_time,y_trains_time,x_tests_time,y_tests_time = kfold_validation(a_time,b_time)
# Parameters
learning_rate = 0.001
training_iters_force = 5000000
# training_iters_time = 500000
batch_size = 256
display_step = 100
batchid_time = 0
batchid_force = 0
# Network Parameters
n_input_force = 15
n_input_time = 32
n_steps = 4
n_hidden = 128
# reset graph
tf.reset_default_graph()
# force_channel Graph
G_force=tf.Graph()
Sess_force=tf.Session(graph=G_force)
with Sess_force.as_default():
with G_force.as_default():
with tf.variable_scope("force_channel") as scope:
x_force = tf.placeholder("float", [None, n_steps, n_input_force],name='x_force')
y_force = tf.placeholder("float", [None, n_classes])
weights = {
'weights_out_force': tf.Variable(tf.random_normal([n_hidden, n_classes]),name='weights_out_force')
}
biases= {
'biases_out_force': tf.Variable(tf.random_normal([n_classes]),name='biases_out_force')
}
pred_force, out_force = RNN(x_force, weights['weights_out_force'], biases['biases_out_force'], n_input_force)
logits_scaled_force=tf.nn.softmax(pred_force)
cost_force = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred_force, labels=y_force))
optimizer_force = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost_force)
correct_pred_force = tf.equal(tf.argmax(pred_force,1), tf.argmax(y_force,1))
accuracy_force = tf.reduce_mean(tf.cast(correct_pred_force, tf.float32))
Sess_force.run(tf.global_variables_initializer())
saverf = tf.train.Saver()
# time_channel Graph
G_time=tf.Graph()
Sess_time=tf.Session(graph=G_time)
with Sess_time.as_default():
with G_time.as_default():
with tf.variable_scope("time_channel") as scope:
x_time = tf.placeholder("float", [None, n_steps, n_input_time],name='x_time')
y_time = tf.placeholder("float", [None, n_classes])
weights = {
'weights_out_time': tf.Variable(tf.random_normal([n_hidden, n_classes]),name='weights_out_time'),
}
biases= {
'biases_out_time': tf.Variable(tf.random_normal([n_classes]),name='biases_out_time'),
}
pred_time, out_time = RNN(x_time, weights['weights_out_time'], biases['biases_out_time'], n_input_time)
logits_scaled_time=tf.nn.softmax(pred_time)
cost_time = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred_time, labels=y_time))
optimizer_time = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost_time)
correct_pred_time = tf.equal(tf.argmax(pred_time,1), tf.argmax(y_time,1))
accuracy_time = tf.reduce_mean(tf.cast(correct_pred_time, tf.float32))
Sess_time.run(tf.global_variables_initializer())
savert = tf.train.Saver()
# dcca_model Graph
G_dcca=tf.Graph()
Sess_dcca=tf.Session(graph=G_dcca)
with Sess_dcca.as_default():
with G_dcca.as_default():
dcca_model=DCCA()
input_view1 = dcca_model.input_view1
input_view2 = dcca_model.input_view2
hidden_view1 = dcca_model.output_view1
hidden_view2 = dcca_model.output_view2
hidden_view1_pred = dcca_model.output_view1_class
hidden_view2_pred = dcca_model.output_view2_class
label1 = dcca_model.label1
label2 = dcca_model.label2
neg_corr = dcca_model.neg_corr
value= dcca_model.value
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
# Sess_dcca = tf.InteractiveSession(config=tf.ConfigProto(gpu_options=gpu_options))
# maxmize the correlation between two data unsupervised learning(minimize -corr)
# train_op = tf.train.MomentumOptimizer(learning_rate, 0.99).minimize(neg_corr,var_list=tf.trainable_variables())
train_op = tf.train.AdamOptimizer(learning_rate).minimize(neg_corr,var_list=tf.trainable_variables())
# minimize the cost between different classes supervised learning
cross_entropy1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label1, logits=hidden_view1_pred))
optimizer1 = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy1)
accuracy1 = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(hidden_view1_pred, 1), tf.argmax(label1, 1)), tf.float32))
cross_entropy2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label2, logits=hidden_view2_pred))
optimizer2 = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy2)
accuracy2 = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(hidden_view2_pred, 1), tf.argmax(label2, 1)), tf.float32))
lossfuse=cross_entropy1+cross_entropy2+tf.exp(neg_corr)
optimizerfuse=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(lossfuse)
## supervised learning
# cross_entropy1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=cnnlabel1, logits=hidden_view1))
# optimizer1 = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy1)
# cnnaccuracy1 = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(hidden_view1, 1), tf.argmax(cnnlabel1, 1)), tf.float32))
#
# cross_entropy2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=cnnlabel2, logits=hidden_view2))
# optimizer2 = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy2)
# cnnaccuracy2 = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(hidden_view2, 1), tf.argmax(cnnlabel2, 1)), tf.float32))
Sess_dcca.run(tf.global_variables_initializer())
saverd = tf.train.Saver()
# tf.InteractiveSession.close()
# weights = {
# 'weights_out_time': tf.Variable(tf.random_normal([n_hidden, n_classes]),name='weights_out_time'),
# 'weights_out_force': tf.Variable(tf.random_normal([n_hidden, n_classes]),name='weights_out_force')
# }
# biases= {
# 'biases_out_time': tf.Variable(tf.random_normal([n_classes]),name='biases_out_time'),
# 'biases_out_force': tf.Variable(tf.random_normal([n_classes]),name='biases_out_force')
# }
# weights = {
# 'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
# }
# biases= {
# 'out': tf.Variable(tf.random_normal([n_classes]))
# }
#
# with tf.variable_scope("force_channel") as scope:
# pred_force, out_force = RNN(x_force, weights['weights_out_force'], biases['biases_out_force'], n_input_force)
# pred_force, out_force = RNN(x_force, weights['out'], biases['out'], n_input_force)
# logits_scaled_force=tf.nn.softmax(pred_force)
# cost_force = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred_force, labels=y_force))
# optimizer_force = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost_force)
# correct_pred_force = tf.equal(tf.argmax(pred_force,1), tf.argmax(y_force,1))
# accuracy_force = tf.reduce_mean(tf.cast(correct_pred_force, tf.float32))
#
# with tf.variable_scope("time_channel") as scope:
## pred_time, out_time = RNN(x_time, weights['weights_out_time'], biases['biases_out_time'], n_input_time)
# pred_time, out_time = RNN(x_time, weights['out'], biases['out'], n_input_time)
# logits_scaled_time=tf.nn.softmax(pred_time)
# cost_time = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred_time, labels=y_time))
# optimizer_time = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost_time)
# correct_pred_time = tf.equal(tf.argmax(pred_time,1), tf.argmax(y_time,1))
# accuracy_time = tf.reduce_mean(tf.cast(correct_pred_time, tf.float32))
accuracys_force=[]
accuracys_time=[]
for i in range(1):
#20% split
train_x_time,test_x_time,train_y_time,test_y_time = train_test_split(a_time,b_time,test_size=0.2,random_state=1)
train_x_force,test_x_force,train_y_force,test_y_force = train_test_split(a_force,b_force,test_size=0.2,random_state=1)
print(train_x_time.shape,test_x_time.shape,train_x_force.shape,test_x_force.shape)
newli_train_time=labelprocess(train_y_time)
newli_test_time=labelprocess(test_y_time)
newli_train_force=labelprocess(train_y_force)
newli_test_force=labelprocess(test_y_force)
#10 fold
# train_x_force=x_trains_force[i]
# train_y_force=y_trains_force[i]
# test_x_force=x_tests_force[i]
# test_y_force=y_tests_force[i]
#
# train_x_time=x_trains_time[i]
# train_y_time=y_trains_time[i]
# test_x_time=x_tests_time[i]
# test_y_time=y_tests_time[i]
#
# newli_train_force=labelprocess(train_y_force)
# newli_train_time=labelprocess(train_y_time)
#
# newli_test_force=labelprocess(test_y_force)
# newli_test_time=labelprocess(test_y_time)
# Initializing the variables
# init = tf.global_variables_initializer()
# saver = tf.train.Saver()
# Launch the graph
# with tf.Session() as sess:
# #rnn
# sess.run(init)
# #rf
# # sess.run(rf_init_vars)
# tf.device('/gpu:0')
step = 1
acc_forces=[]
loss_forces=[]
acc_times=[]
loss_times=[]
dccaloss=[]
fuseloss=[]
out_force256=None
out_time256=None
tf.device('/gpu:0')
while step * batch_size < training_iters_force:
with tf.variable_scope("force_channel") as scope:
rf_batch_x_force, batch_y_force, rf_batch_y_force= next_batch(batch_size,train_x_force,train_y_force,newli_train_force,True)
batch_x_force = rf_batch_x_force.reshape((batch_size, n_steps, n_input_force))
_,out_force256=Sess_force.run([optimizer_force,out_force],
feed_dict={x_force: batch_x_force, y_force: batch_y_force})
if step % display_step == 0:
acc_force,loss_force= Sess_force.run([accuracy_force,cost_force],
feed_dict={x_force: batch_x_force, y_force: batch_y_force})
print("Iter " + str(step*batch_size) + ", Minibatch loss_force= " + \
"{:.6f}".format(loss_force) + ", Training Accuracy= " + \
"{:.5f}".format(acc_force))
acc_forces.append(acc_force)
loss_forces.append(loss_force)
# step += 1
# step = 1
# while step * batch_size < training_iters_time:
with tf.variable_scope("time_channel") as scope:
rf_batch_x_time, batch_y_time, rf_batch_y_time= next_batch(batch_size,train_x_time,train_y_time,newli_train_time,False)
batch_x_time = rf_batch_x_time.reshape((batch_size, n_steps, n_input_time))
_,out_time256=Sess_time.run([optimizer_time,out_time],
feed_dict={x_time: batch_x_time, y_time: batch_y_time})
if step % display_step == 0:
acc_time,loss_time = Sess_time.run([accuracy_time,cost_time],
feed_dict={x_time: batch_x_time, y_time: batch_y_time})
print("Iter " + str(step*batch_size) + ", Minibatch loss_time= " + \
"{:.6f}".format(loss_time) + ", Training Accuracy= " + \
"{:.5f}".format(acc_time))
acc_times.append(acc_time)
loss_times.append(loss_time)
################# Deep CCA maxmize the correlation #############################
# correlation in each node
# for force256,time256 in zip(out_force256,out_time256):
# _, neg_corr_val,_,_= Sess_dcca.run([train_op, neg_corr,optimizer1,optimizer2],
# feed_dict={input_view1:force256,input_view2:time256,
# label1:batch_y_force,
# label2:batch_y_time})
# acc1,acc2 = Sess_dcca.run([accuracy1, accuracy2],
# feed_dict={input_view1:force256,input_view2:time256,
# label1:batch_y_force,
# label2:batch_y_time})
for force256,time256 in zip(out_force256,out_time256):
# print(force256.shape,time256.shape)
_, neg_corr_val,_,lossfuseprint,corvalue= Sess_dcca.run([train_op, neg_corr,optimizerfuse,lossfuse,value],
feed_dict={input_view1:force256,input_view2:time256,
label1:batch_y_force,
label2:batch_y_time})
# acc1,acc2 = Sess_dcca.run([accuracy1, accuracy2],
# feed_dict={input_view1:force256,input_view2:time256,
# label1:batch_y_force,
# label2:batch_y_time})
# print(corvalue)
if step % display_step == 0:
dccaloss.append(np.exp(neg_corr_val))
fuseloss.append(lossfuseprint)
# print('corr_val',-neg_corr_val)
# print("fuse_loss_for_train:", lossfuseprint)
# print("accuracy1:", acc1)
# print("accuracy2:", acc2)
step += 1
# save the training process
# np.savetxt('./results/train_loss_dcca'+str(i)+'.csv',dccaloss,delimiter=',')
# np.savetxt('./results/train_loss_fuse'+str(i)+'.csv',fuseloss,delimiter=',')
#
#
# np.savetxt('./results/train_acc_force'+str(i)+'.csv',acc_forces,delimiter=',')
# np.savetxt('./results/train_loss_force'+str(i)+'.csv',loss_forces,delimiter=',')
# np.savetxt('./results/train_acc_time'+str(i)+'.csv',acc_times,delimiter=',')
# np.savetxt('./results/train_loss_time'+str(i)+'.csv',loss_times,delimiter=',')
################# Linear CCA #############################
# Using CCA to extract feature in each node in LSTM
data_time=a_time.reshape((-1,n_steps, n_input_time))
out256_time=Sess_time.run(out_time,feed_dict={x_time: data_time, y_time: all_fea_time})
data_force=a_force.reshape((-1,n_steps, n_input_force))
out256_force=Sess_force.run(out_force,feed_dict={x_force: data_force, y_force: all_fea_force})
fusionfeature_data=np.c_[out256_time[-1],out256_force[-1]]
np.savetxt('./fusionfeature_Corrmnn_sdu.csv', fusionfeature_data, fmt='%.4f')
# compute the correlation in each node in LSTM (timestep* batchsize * 256d)
X1projlist=np.array([])
X2projlist=np.array([])
for eachnode_force,eachnode_time in zip(out256_force,out256_time):
X1proj, X2proj = Sess_dcca.run([hidden_view1, hidden_view2],
feed_dict={
input_view1: eachnode_force,
input_view2: eachnode_time})
# (11340, 10) (756, 10)
X1projlist=np.c_[X1projlist,X1proj] if X1projlist.size else X1proj
X2projlist=np.c_[X2projlist,X2proj] if X2projlist.size else X2proj
# ccafuse_data,_ = feature_connect(X2projlist,X1projlist)
ccafuse_data=np.c_[X2projlist,X1projlist]
print('----------ccafuse_data '+str(i)+'-----------')
# (756, 1600) (756, 1600)
np.savetxt('./ccafuse_sdu.csv', ccafuse_data, fmt='%.4f')
# print("Linear CCA started!")
# w = [None, None]
# m = [None, None]
# print(X1proj.shape, X2proj.shape)
# w[0], w[1], m[0], m[1] = linear_cca(X1proj, X2proj, 10)
# print("Linear CCA ended!")
# X1proj -= m[0].reshape([1, -1]).repeat(len(X1proj), axis=0)
# X1proj = np.dot(X1proj, w[0])
# X1projlist=np.c_[X1projlist,X1proj] if X1projlist.size else X1proj
# print(X1projlist.shape)
################# testing LSTM #############################
test_data=test_x_force.reshape((-1,n_steps, n_input_force))
test_label=newli_test_force
accuracy_force_out=Sess_force.run(accuracy_force, feed_dict={x_force: test_data, y_force: test_label})
print("Force Testing Accuracy:",accuracy_force_out)
test_data=test_x_time.reshape((-1,n_steps, n_input_time))
test_label=newli_test_time
accuracy_time_out=Sess_time.run(accuracy_time, feed_dict={x_time: test_data, y_time: test_label})
print("Time Testing Accuracy:",accuracy_time_out)
accuracys_force.append(accuracy_force_out)
accuracys_time.append(accuracy_time_out)
print(accuracys_force,accuracys_time)
print('accuracys_force_mean:',np.mean(accuracys_force))
print('accuracys_time_mean:',np.mean(accuracys_time))
accuracys_force.append(np.mean(accuracys_force))
accuracys_time.append(np.mean(accuracys_time))
# np.savetxt('./test_result_fog.csv',[accuracys_force,accuracys_time])
## extract the last output of the lstm in all data
# data_time=a_time.reshape((-1,n_steps, n_input_time))
# out256_time=Sess_time.run(out_time,feed_dict={x_time: data_time, y_time: all_fea_time})
#
# data_force=a_force.reshape((-1,n_steps, n_input_force))
# out256_force=Sess_force.run(out_force,feed_dict={x_force: data_force, y_force: all_fea_force})
#
# np.savetxt('./out256_time.txt', out256_time, fmt='%.4f')
# np.savetxt('./out256_force.txt', out256_force, fmt='%.4f')
#
# saver.save(sess, './modelcache/fusemodel.ckpt')
# writer=tf.summary.FileWriter('./fusemodel_graph',sess.graph)
# writer.flush()
# writer.close()
# sess.close()
# saverf.save(Sess_force, './modelcache/forcemodel.ckpt')
# writerf=tf.summary.FileWriter('./graphs/forcemodel_graph',Sess_force.graph)
# savert.save(Sess_time, './modelcache/timemodel.ckpt')
# writert=tf.summary.FileWriter('./graphs/timemodel_graph',Sess_time.graph)
# saverd.save(Sess_dcca, './modelcache/dccamodel.ckpt')
# writerd=tf.summary.FileWriter('./graphs/dccamodel_graph',Sess_dcca.graph)
# writerf.flush()
# writerf.close()
# Sess_force.close()
# writert.flush()
# writert.close()
# Sess_time.close()
# writerd.flush()
# writerd.close()
# Sess_dcca.close()
# align the two types of data
# fusionfeature_data,force_data = feature_connect(out256_time,out256_force)
# fusionfeature_data=np.c_[out256_time[-1],out256_force[-1]]
# np.savetxt('./fusionfeature_Corrmnn.txt', fusionfeature_data, fmt='%.4f')
# hmm_accuracy = hmm_4model_classification(fusionfeature_data,b_time)
# combine the lda feature(2d) with ccafuse_data
# ldafeature=np.loadtxt('./feature_extract/ldafeature_data.txt')
# ldafeature=softmax(ldafeature)
# ldafeature=preprocessing.normalize(ldafeature)
# print(ldafeature)
# ccafuse_data=np.c_[ccafuse_data,ldafeature]
#
# hmm_accuracy = hmm_4model_classification(ccafuse_data,b_time)
# print('Total hmm accuracy:',hmm_accuracy)
# fuse_data=np.loadtxt('/home/zat/zresearch/ndds-corrlstm/results/fog/fusefea.csv')
#
# hmm_accuracy = hmm_3model_classification(fuse_data,b_time)
# print('Total hmm accuracy:',hmm_accuracy)
| 47.034188
| 140
| 0.638016
|
from __future__ import print_function
import tensorflow as tf
import numpy as np
import rnn_cell_GRU as rnn_cell
import rnn
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import os
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.ops import resources
from sklearn.ensemble import VotingClassifier
from sklearn.metrics import accuracy_score
from hmmlearn import hmm
from sklearn.linear_model import BayesianRidge
from sklearn.metrics import mean_squared_error
from sklearn.learning_curve import validation_curve
from sklearn.svm import SVC
from hmmexm import hmm_4model_classification,hmm_3model_classification
from sklearn.model_selection import LeaveOneOut, KFold,cross_val_score
from deep_CCA_model import *
from linear_cca import linear_cca
n_classes = 52
def labelprocess(label,n_class=n_classes):
label_length=len(label)
label_matrix=np.zeros((label_length,n_class))
for i,j in enumerate(label):
label_matrix[i,int(j)]=1
return label_matrix
def next_batch(batch_size,train_x,train_y,newli_train,force):
global batchid_force, batchid_time
if force==True:
if batchid_force+batch_size > len(train_x):
batchid_force = 0
batch_data = (train_x[batchid_force:min(batchid_force +batch_size, len(train_y)),:])
batch_labels = (newli_train[batchid_force:min(batchid_force + batch_size, len(newli_train)),:])
batch_labels_1d = (train_y[batchid_force:min(batchid_force + batch_size, len(train_y))])
batchid_force = min(batchid_force + batch_size, len(train_y))
return batch_data, batch_labels,batch_labels_1d
else:
if batchid_time+batch_size > len(train_x):
batchid_time = 0
batch_data = (train_x[batchid_time:min(batchid_time +batch_size, len(train_y)),:])
batch_labels = (newli_train[batchid_time:min(batchid_time + batch_size, len(newli_train)),:])
batch_labels_1d = (train_y[batchid_time:min(batchid_time + batch_size, len(train_y))])
batchid_time = min(batchid_time + batch_size, len(train_y))
return batch_data, batch_labels,batch_labels_1d
def RNN(x, weights, biases, n_input):
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(tensor=x, shape=[-1, n_input])
x = tf.split(value=x, num_or_size_splits=n_steps, axis=0)
lstm_cell = rnn_cell.GRUCell(n_hidden)
lstm_cell = rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=0.5)
lstm_cell = rnn_cell.MultiRNNCell([lstm_cell] * 2)
outputs, states = rnn.rnn(cell=lstm_cell, inputs=x, dtype=tf.float32)
return tf.matmul(outputs[-1], weights) + biases, outputs
def feature_connect(a_time,a_force):
a=np.array([])
for j in range(int(11340/15)):
f=np.array([])
for i in range(15):
f = np.concatenate((f, a_force[j*15+i,:]), axis=0) if f.size else a_force[j*15+i,:]
a=np.c_[a,f] if a.size else f
return np.c_[a_time,np.transpose(a)],np.transpose(a)
def DCCA():
outdim_size = 10
input_size1 = n_hidden
input_size2 = n_hidden
layer_sizes1 = [1024, 1024, 1024, outdim_size]
layer_sizes2 = [1024, 1024, 1024, outdim_size]
layer_sizes3 = [1024, 1024, 1024, n_classes]
layer_sizes4 = [1024, 1024, 1024, n_classes]
reg_par = 1e-4
use_all_singular_values = True
dcca_model = DeepCCA(layer_sizes1, layer_sizes2,layer_sizes3,layer_sizes4,
input_size1, input_size2,
outdim_size,
reg_par, use_all_singular_values)
return dcca_model
def softmax(x):
x_exp = np.exp(x)
x_sum = np.sum(x_exp, axis = 1, keepdims = True)
s = x_exp / x_sum
return s
if __name__=='__main__':
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
a_force=np.loadtxt("/home/zat/zresearch/ndds-corrlstm/data/sdugait/12023f.txt")
a_force=a_force[:,0:60]
b_force=np.loadtxt("/home/zat/zresearch/ndds-corrlstm/data/sdugait/12023label.txt")
b_force=b_force-1
a_time=np.loadtxt("/home/zat/zresearch/ndds-corrlstm/results/sdu/feature/out256_sdu_img.txt")
b_time=np.loadtxt("/home/zat/zresearch/ndds-corrlstm/data/sdugait/12023label.txt")
b_time=b_time-1
all_fea_force=labelprocess(b_force)
all_fea_time=labelprocess(b_time)
learning_rate = 0.001
training_iters_force = 5000000
batch_size = 256
display_step = 100
batchid_time = 0
batchid_force = 0
n_input_force = 15
n_input_time = 32
n_steps = 4
n_hidden = 128
tf.reset_default_graph()
G_force=tf.Graph()
Sess_force=tf.Session(graph=G_force)
with Sess_force.as_default():
with G_force.as_default():
with tf.variable_scope("force_channel") as scope:
x_force = tf.placeholder("float", [None, n_steps, n_input_force],name='x_force')
y_force = tf.placeholder("float", [None, n_classes])
weights = {
'weights_out_force': tf.Variable(tf.random_normal([n_hidden, n_classes]),name='weights_out_force')
}
biases= {
'biases_out_force': tf.Variable(tf.random_normal([n_classes]),name='biases_out_force')
}
pred_force, out_force = RNN(x_force, weights['weights_out_force'], biases['biases_out_force'], n_input_force)
logits_scaled_force=tf.nn.softmax(pred_force)
cost_force = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred_force, labels=y_force))
optimizer_force = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost_force)
correct_pred_force = tf.equal(tf.argmax(pred_force,1), tf.argmax(y_force,1))
accuracy_force = tf.reduce_mean(tf.cast(correct_pred_force, tf.float32))
Sess_force.run(tf.global_variables_initializer())
saverf = tf.train.Saver()
G_time=tf.Graph()
Sess_time=tf.Session(graph=G_time)
with Sess_time.as_default():
with G_time.as_default():
with tf.variable_scope("time_channel") as scope:
x_time = tf.placeholder("float", [None, n_steps, n_input_time],name='x_time')
y_time = tf.placeholder("float", [None, n_classes])
weights = {
'weights_out_time': tf.Variable(tf.random_normal([n_hidden, n_classes]),name='weights_out_time'),
}
biases= {
'biases_out_time': tf.Variable(tf.random_normal([n_classes]),name='biases_out_time'),
}
pred_time, out_time = RNN(x_time, weights['weights_out_time'], biases['biases_out_time'], n_input_time)
logits_scaled_time=tf.nn.softmax(pred_time)
cost_time = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred_time, labels=y_time))
optimizer_time = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost_time)
correct_pred_time = tf.equal(tf.argmax(pred_time,1), tf.argmax(y_time,1))
accuracy_time = tf.reduce_mean(tf.cast(correct_pred_time, tf.float32))
Sess_time.run(tf.global_variables_initializer())
savert = tf.train.Saver()
G_dcca=tf.Graph()
Sess_dcca=tf.Session(graph=G_dcca)
with Sess_dcca.as_default():
with G_dcca.as_default():
dcca_model=DCCA()
input_view1 = dcca_model.input_view1
input_view2 = dcca_model.input_view2
hidden_view1 = dcca_model.output_view1
hidden_view2 = dcca_model.output_view2
hidden_view1_pred = dcca_model.output_view1_class
hidden_view2_pred = dcca_model.output_view2_class
label1 = dcca_model.label1
label2 = dcca_model.label2
neg_corr = dcca_model.neg_corr
value= dcca_model.value
train_op = tf.train.AdamOptimizer(learning_rate).minimize(neg_corr,var_list=tf.trainable_variables())
cross_entropy1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label1, logits=hidden_view1_pred))
optimizer1 = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy1)
accuracy1 = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(hidden_view1_pred, 1), tf.argmax(label1, 1)), tf.float32))
cross_entropy2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=label2, logits=hidden_view2_pred))
optimizer2 = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy2)
accuracy2 = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(hidden_view2_pred, 1), tf.argmax(label2, 1)), tf.float32))
lossfuse=cross_entropy1+cross_entropy2+tf.exp(neg_corr)
optimizerfuse=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(lossfuse)
Sess_dcca.run(tf.global_variables_initializer())
saverd = tf.train.Saver()
accuracys_force=[]
accuracys_time=[]
for i in range(1):
train_x_time,test_x_time,train_y_time,test_y_time = train_test_split(a_time,b_time,test_size=0.2,random_state=1)
train_x_force,test_x_force,train_y_force,test_y_force = train_test_split(a_force,b_force,test_size=0.2,random_state=1)
print(train_x_time.shape,test_x_time.shape,train_x_force.shape,test_x_force.shape)
newli_train_time=labelprocess(train_y_time)
newli_test_time=labelprocess(test_y_time)
newli_train_force=labelprocess(train_y_force)
newli_test_force=labelprocess(test_y_force)
step = 1
acc_forces=[]
loss_forces=[]
acc_times=[]
loss_times=[]
dccaloss=[]
fuseloss=[]
out_force256=None
out_time256=None
tf.device('/gpu:0')
while step * batch_size < training_iters_force:
with tf.variable_scope("force_channel") as scope:
rf_batch_x_force, batch_y_force, rf_batch_y_force= next_batch(batch_size,train_x_force,train_y_force,newli_train_force,True)
batch_x_force = rf_batch_x_force.reshape((batch_size, n_steps, n_input_force))
_,out_force256=Sess_force.run([optimizer_force,out_force],
feed_dict={x_force: batch_x_force, y_force: batch_y_force})
if step % display_step == 0:
acc_force,loss_force= Sess_force.run([accuracy_force,cost_force],
feed_dict={x_force: batch_x_force, y_force: batch_y_force})
print("Iter " + str(step*batch_size) + ", Minibatch loss_force= " + \
"{:.6f}".format(loss_force) + ", Training Accuracy= " + \
"{:.5f}".format(acc_force))
acc_forces.append(acc_force)
loss_forces.append(loss_force)
with tf.variable_scope("time_channel") as scope:
rf_batch_x_time, batch_y_time, rf_batch_y_time= next_batch(batch_size,train_x_time,train_y_time,newli_train_time,False)
batch_x_time = rf_batch_x_time.reshape((batch_size, n_steps, n_input_time))
_,out_time256=Sess_time.run([optimizer_time,out_time],
feed_dict={x_time: batch_x_time, y_time: batch_y_time})
if step % display_step == 0:
acc_time,loss_time = Sess_time.run([accuracy_time,cost_time],
feed_dict={x_time: batch_x_time, y_time: batch_y_time})
print("Iter " + str(step*batch_size) + ", Minibatch loss_time= " + \
"{:.6f}".format(loss_time) + ", Training Accuracy= " + \
"{:.5f}".format(acc_time))
acc_times.append(acc_time)
loss_times.append(loss_time)
for force256,time256 in zip(out_force256,out_time256):
_, neg_corr_val,_,lossfuseprint,corvalue= Sess_dcca.run([train_op, neg_corr,optimizerfuse,lossfuse,value],
feed_dict={input_view1:force256,input_view2:time256,
label1:batch_y_force,
label2:batch_y_time})
if step % display_step == 0:
dccaloss.append(np.exp(neg_corr_val))
fuseloss.append(lossfuseprint)
step += 1
data_time=a_time.reshape((-1,n_steps, n_input_time))
out256_time=Sess_time.run(out_time,feed_dict={x_time: data_time, y_time: all_fea_time})
data_force=a_force.reshape((-1,n_steps, n_input_force))
out256_force=Sess_force.run(out_force,feed_dict={x_force: data_force, y_force: all_fea_force})
fusionfeature_data=np.c_[out256_time[-1],out256_force[-1]]
np.savetxt('./fusionfeature_Corrmnn_sdu.csv', fusionfeature_data, fmt='%.4f')
X1projlist=np.array([])
X2projlist=np.array([])
for eachnode_force,eachnode_time in zip(out256_force,out256_time):
X1proj, X2proj = Sess_dcca.run([hidden_view1, hidden_view2],
feed_dict={
input_view1: eachnode_force,
input_view2: eachnode_time})
X1projlist=np.c_[X1projlist,X1proj] if X1projlist.size else X1proj
X2projlist=np.c_[X2projlist,X2proj] if X2projlist.size else X2proj
ccafuse_data=np.c_[X2projlist,X1projlist]
print('----------ccafuse_data '+str(i)+'-----------')
np.savetxt('./ccafuse_sdu.csv', ccafuse_data, fmt='%.4f')
test_data=test_x_force.reshape((-1,n_steps, n_input_force))
test_label=newli_test_force
accuracy_force_out=Sess_force.run(accuracy_force, feed_dict={x_force: test_data, y_force: test_label})
print("Force Testing Accuracy:",accuracy_force_out)
test_data=test_x_time.reshape((-1,n_steps, n_input_time))
test_label=newli_test_time
accuracy_time_out=Sess_time.run(accuracy_time, feed_dict={x_time: test_data, y_time: test_label})
print("Time Testing Accuracy:",accuracy_time_out)
accuracys_force.append(accuracy_force_out)
accuracys_time.append(accuracy_time_out)
print(accuracys_force,accuracys_time)
print('accuracys_force_mean:',np.mean(accuracys_force))
print('accuracys_time_mean:',np.mean(accuracys_time))
accuracys_force.append(np.mean(accuracys_force))
accuracys_time.append(np.mean(accuracys_time))
| true
| true
|
f70ce91d5070d4971495752d0b2971a0ed5fb7f8
| 13,671
|
py
|
Python
|
workflow/workflow_inventory.py
|
kking423/digital_library
|
643c396991bbc9664312826e849d3b9baae98c0d
|
[
"MIT"
] | null | null | null |
workflow/workflow_inventory.py
|
kking423/digital_library
|
643c396991bbc9664312826e849d3b9baae98c0d
|
[
"MIT"
] | 2
|
2022-01-11T18:41:24.000Z
|
2022-01-11T19:33:09.000Z
|
workflow/workflow_inventory.py
|
kking423/digital_library
|
643c396991bbc9664312826e849d3b9baae98c0d
|
[
"MIT"
] | null | null | null |
import datetime
import shutil
import services.inventory
import workflow
import pandas as pd
import os
import file_system
import file_system.images as images
import json
from file_system.file_system_object import FileSystemObject
from services import inventory, library
from tabulate import tabulate
import cv2
TEMP_FOLDER = "tmp/eval"
RECYCLE_BIN = "tmp/recycle_bin"
def inventory_menu():
library_id = prompt_for_library()
while True:
print("\n")
print("###############################################")
print("Digital Library Utility - Inventory Management ")
print("###############################################")
print("[0] Return to Main Menu")
print("[1] Add/Update (Refresh) Inventory")
print("[3] View Inventory")
print("[4] Reconcile (Library) Inventory")
print("[5] Update Inventory Compare Scores")
print("[6] Manage Duplicate Inventory")
print("[7] Restore files from Recycle Bin")
print("[8] Classify Inventory")
choice = input("> ")
if choice.isnumeric() and int(choice) in range(10):
if int(choice) == 0:
workflow.main_menu()
elif int(choice) == 1: # add/update inventory
refresh_inventory(library_id=library_id)
reconcile_inventory(library_id=library_id, calculate_compare_score=False)
elif int(choice) == 3: # view inventory by library
display_library_inventory(library_id)
elif int(choice) == 4: # reconcile inventory
reconcile_inventory(library_id=library_id, calculate_compare_score=False)
elif int(choice) == 5: # reconcile inventory with compare score calculation
reconcile_inventory(library_id=library_id, calculate_compare_score=True)
elif int(choice) == 6: # manage duplicate inventory
refresh_inventory(library_id=library_id)
reconcile_inventory(library_id=library_id, calculate_compare_score=True)
get_comparable_inventory(library_id=library_id)
move_files_to_recycle_bin(library_id=library_id)
clear_eval_folder(TEMP_FOLDER)
refresh_inventory(library_id=library_id)
elif int(choice) == 7:
restore_from_recycle_bin()
reconcile_inventory(library_id=library_id, calculate_compare_score=False)
elif int(choice) == 8:
display_library_inventory(library_id)
update_classification()
else:
print("Selection not valid. Please try again.")
def refresh_inventory(library_id):
src = get_library_base_path(library_id)
exclusion_list = ['.map', 'venv', '.pyc', '__pycache__', '.DS_Store', 'ignore', '.idea', 'git']
restricted_list = []
data = file_system.search(search_path=src,
recursive=True,
exclusion_list=exclusion_list,
restricted_list=restricted_list)
for idx, item in enumerate(data):
data[idx]['library_id'] = library_id
if not data[idx]['is_hidden']:
inventory.refresh_inventory(**data[idx])
def prompt_for_library():
workflow.workflow_library.display_user_libraries()
prompt = input("Select Library ID: ")
if lib := services.library.get_library(prompt):
return lib.library_id
print(f"{prompt} is not a valid Library ID")
prompt_for_library()
def get_library_base_path(library_id):
lib = library.get_library(library_id)
return lib.base_path
def update_inventory_compare_scores(inventory_id, full_path):
fso = FileSystemObject(full_path).to_dict()
if fso and fso['is_found'] and not fso['is_hidden']:
fso['inventory_removed_date'] = None
inv = inventory.get_inventory_item(inventory_id)
if not inv.compare_score or inv.compare_score == 0 or inv.compare_score_dt < inv.modified_dt:
fso['compare_score'] = (update_compare_score(full_path, size=fso['size']))
fso['compare_score_dt'] = datetime.datetime.now()
inventory.update_inventory(inventory_id, **fso)
else:
data = {'inventory_removed_date': datetime.datetime.now()}
inventory.update_inventory(inventory_id, **data)
def update_compare_score(full_path, size):
return images.calculate_compare_score(full_path, size=size)
def get_inventory(library_id):
return inventory.get_library_inventory(library_id=library_id)
def display_all_inventory():
results = inventory.get_all_inventory()
df = pd.DataFrame(results)
# df = df.drop(['_sa_instance_state'], axis=1)
df.sort_values(by=['library_id', 'directory', 'full_path'])
print(tabulate(df.head(500), headers='keys', tablefmt='psql'))
def display_library_inventory(library_id):
if results := inventory.get_library_inventory(library_id):
df = pd.DataFrame(results)
# df = df.drop(['_sa_instance_state'], axis=1)
df.sort_values(by=['library_id', 'directory', 'full_path'])
print(tabulate(df.head(500), headers='keys', tablefmt='psql'))
else:
return None
def reconcile_inventory(library_id, calculate_compare_score: bool = False):
# Purpose: Identify files/folders that no longer exist and update DB accordingly
# library_id = prompt_for_library()
results = inventory.get_library_inventory(library_id)
for idx, item in enumerate(results):
if results[idx]['file']:
src_path = results[idx]['full_path']
inventory_id = results[idx]['inventory_id']
fso = FileSystemObject(src_path).to_dict()
if fso and fso['is_found'] and not fso['is_hidden']:
data = {
'inventory_removed_date': None,
'inventory_removed_reason': None,
'is_missing': False
}
else:
data = {'inventory_removed_date': datetime.datetime.now(),
'is_missing': True
}
inventory.update_inventory(inventory_id, **data)
if calculate_compare_score:
update_inventory_compare_scores(inventory_id, src_path)
def restore_from_recycle_bin():
path = RECYCLE_BIN
for root, folders, files in os.walk(path, topdown=True):
for file in files:
recycled_file = os.path.splitext(file)[0]
src = os.path.join(root, file)
original_file = services.inventory.get_inventory_item(recycled_file)
dest = original_file.full_path
shutil.move(src, dest)
def get_comparable_inventory(library_id):
try:
if data := inventory.get_comparable_inventory(library_id):
df = pd.DataFrame(data)
# df = df.drop(['_sa_instance_state'], axis=1)
df["file"] = df["file"].str.lower()
df['compare_score_frequency'] = df.groupby('compare_score')['compare_score'].transform('count')
df = df[df.groupby('compare_score')['compare_score'].transform('count') > 1]
df = df[['inventory_id', 'library_id', 'directory', 'full_path', 'file', 'file_extension',
'size', 'created_dt', 'modified_dt',
'compare_score_dt', 'compare_score', 'compare_score_frequency']]
# df.sort_values(by=['compare_score', 'size'])
# print(tabulate(df, headers='keys', tablefmt='psql'))
group_duplicates(df)
clear_eval_folder(TEMP_FOLDER)
else:
print("No duplicates were found.")
except:
print("An unexpected error has occurred")
def group_duplicates(df: pd.DataFrame):
distinct_scores = list(df['compare_score'].unique())
count = len(distinct_scores)
for counter, score in enumerate(distinct_scores, 1):
sample = df[df["compare_score"] == score]
sample = pd.DataFrame(sample, columns=['inventory_id', 'file', 'file_extension', 'full_path', 'directory',
'size', 'created_dt', 'modified_dt'])
sample.reset_index(drop=True, inplace=True)
print("###############################################")
print(f"Potential Duplicate Group {counter} of {count}")
print(f"Compare Score: {score}")
print("###############################################")
evaluate_duplicates_by_group(sample)
def evaluate_duplicates_by_group(sample: pd.DataFrame):
clear_eval_folder(path=TEMP_FOLDER)
group = []
# print(tabulate(sample.head(), headers='keys', tablefmt='psql'))
for idx, row in sample.iterrows():
group.append(row['inventory_id'])
inventory_id = row['inventory_id']
created = row['created_dt']
modified = row['modified_dt']
size = row['size']
src = row['full_path']
dest = f'{TEMP_FOLDER}/' + inventory_id + row['file_extension']
print(f"InventoryID: {inventory_id} | File: {row['file']} | Created: {created} | "
f"Modified: {modified} | Size: {size}")
shutil.copy2(src, dest)
if retain := input("Enter Inventory IDs you wish to keep (separate by comma): ").split(","):
for idx, item in enumerate(retain):
retain[idx] = item.strip()
for inv_id in group:
if inv_id not in retain:
reason = input(f"Enter reason for removal of {inv_id}: ")
services.inventory.remove_inventory_item(inv_id.strip(), reason.strip())
def move_files_to_recycle_bin(library_id):
reconcile_inventory(library_id, calculate_compare_score=False)
if data := inventory.get_removed_inventory(library_id):
for idx, item in enumerate(data):
src = data[idx]['full_path']
inventory_id = data[idx]['inventory_id']
file_extension = data[idx]['file_extension']
dest = f'{RECYCLE_BIN}/' + inventory_id + file_extension
try:
shutil.move(src, dest)
except FileNotFoundError:
print("A FileNotFound error has occurred.")
def remove_inventory(group: list, retain: list):
for idx, item in enumerate(retain):
retain[idx] = item.strip()
for inv_id in group:
if inv_id not in retain:
reason = input(f"Enter reason for removal of {inv_id}: ")
services.inventory.remove_inventory_item(inv_id.strip(), reason.strip())
def clear_eval_folder(path: str):
mypath = path
for root, dirs, files in os.walk(mypath):
for file in files:
os.remove(os.path.join(root, file))
def select_inventory_item():
return input("Input Inventory ID: ")
def get_inventory_item(inventory_id):
return services.inventory.get_inventory_item(inventory_id=inventory_id)
def update_classification(library_id, incl_assignment: bool = False):
inv = workflow.workflow_inventory.get_inventory(library_id=library_id)
try:
for file in inv:
inventory_id = file['inventory_id']
if file['is_image']:
# inv = services.inventory.get_inventory_item(inventory_id=inventory_id).to_dict()
cv2.imshow(file['file'], cv2.imread(file['full_path']))
cv2.waitKey(1)
if file['classification']:
print(f"Current Tags: {file['classification']['tags']}")
tag_values = [item.strip() for item in input("Input Tags (separated by comma): ").split(',')]
data = {
'inventory_id': inventory_id,
'classification': {'tags': tag_values},
'model_assignment': input("Model Assignment Name: ") if incl_assignment else file['model_assignment']
}
services.inventory.update_inventory_classification(**data)
cv2.destroyAllWindows()
cv2.destroyAllWindows()
except:
raise
def update_classification_from_model(inventory_id, tags: str):
file = workflow.workflow_inventory.get_inventory_item(inventory_id).to_dict()
classification = file['classification']['tags'] if file['classification'] else []
classification.append(tags)
classification = list(set(classification))
data = {
'inventory_id': inventory_id,
'classification': {'tags': classification}
}
services.inventory.update_inventory_classification(**data)
# for image in inv:
# inventory_id = image['inventory_id']
#
# try:
# if inv := services.inventory.get_inventory_item(inventory_id=inventory_id).to_dict():
# cv2.imshow(image['file'], image['full_path'])
# # cv2.imwrite("tests/samples/ml/test/output.jpg", image)
# cv2.waitKey(0)
# # cv2.destroyAllWindows()
# if inv['classification']:
# print(f"Current Tags: {inv['classification']['tags']}")
#
# tag_values = [item.strip() for item in input("Input Tags (separated by comma): ").split(',')]
# data = {
# 'inventory_id': inventory_id,
# 'classification': {'tags': tag_values},
# 'model_assignment': input("Model Assignment Name: ") if incl_assignment else inv['model_assignment']
# }
# services.inventory.update_inventory_classification(**data)
#
# cv2.destroyAllWindows()
# except:
# raise
#5351dd023ef1440393b81ec0acbe2f4a
| 37.975
| 122
| 0.616049
|
import datetime
import shutil
import services.inventory
import workflow
import pandas as pd
import os
import file_system
import file_system.images as images
import json
from file_system.file_system_object import FileSystemObject
from services import inventory, library
from tabulate import tabulate
import cv2
TEMP_FOLDER = "tmp/eval"
RECYCLE_BIN = "tmp/recycle_bin"
def inventory_menu():
library_id = prompt_for_library()
while True:
print("\n")
print("###############################################")
print("Digital Library Utility - Inventory Management ")
print("###############################################")
print("[0] Return to Main Menu")
print("[1] Add/Update (Refresh) Inventory")
print("[3] View Inventory")
print("[4] Reconcile (Library) Inventory")
print("[5] Update Inventory Compare Scores")
print("[6] Manage Duplicate Inventory")
print("[7] Restore files from Recycle Bin")
print("[8] Classify Inventory")
choice = input("> ")
if choice.isnumeric() and int(choice) in range(10):
if int(choice) == 0:
workflow.main_menu()
elif int(choice) == 1: refresh_inventory(library_id=library_id)
reconcile_inventory(library_id=library_id, calculate_compare_score=False)
elif int(choice) == 3: display_library_inventory(library_id)
elif int(choice) == 4: reconcile_inventory(library_id=library_id, calculate_compare_score=False)
elif int(choice) == 5: reconcile_inventory(library_id=library_id, calculate_compare_score=True)
elif int(choice) == 6: refresh_inventory(library_id=library_id)
reconcile_inventory(library_id=library_id, calculate_compare_score=True)
get_comparable_inventory(library_id=library_id)
move_files_to_recycle_bin(library_id=library_id)
clear_eval_folder(TEMP_FOLDER)
refresh_inventory(library_id=library_id)
elif int(choice) == 7:
restore_from_recycle_bin()
reconcile_inventory(library_id=library_id, calculate_compare_score=False)
elif int(choice) == 8:
display_library_inventory(library_id)
update_classification()
else:
print("Selection not valid. Please try again.")
def refresh_inventory(library_id):
src = get_library_base_path(library_id)
exclusion_list = ['.map', 'venv', '.pyc', '__pycache__', '.DS_Store', 'ignore', '.idea', 'git']
restricted_list = []
data = file_system.search(search_path=src,
recursive=True,
exclusion_list=exclusion_list,
restricted_list=restricted_list)
for idx, item in enumerate(data):
data[idx]['library_id'] = library_id
if not data[idx]['is_hidden']:
inventory.refresh_inventory(**data[idx])
def prompt_for_library():
workflow.workflow_library.display_user_libraries()
prompt = input("Select Library ID: ")
if lib := services.library.get_library(prompt):
return lib.library_id
print(f"{prompt} is not a valid Library ID")
prompt_for_library()
def get_library_base_path(library_id):
lib = library.get_library(library_id)
return lib.base_path
def update_inventory_compare_scores(inventory_id, full_path):
fso = FileSystemObject(full_path).to_dict()
if fso and fso['is_found'] and not fso['is_hidden']:
fso['inventory_removed_date'] = None
inv = inventory.get_inventory_item(inventory_id)
if not inv.compare_score or inv.compare_score == 0 or inv.compare_score_dt < inv.modified_dt:
fso['compare_score'] = (update_compare_score(full_path, size=fso['size']))
fso['compare_score_dt'] = datetime.datetime.now()
inventory.update_inventory(inventory_id, **fso)
else:
data = {'inventory_removed_date': datetime.datetime.now()}
inventory.update_inventory(inventory_id, **data)
def update_compare_score(full_path, size):
return images.calculate_compare_score(full_path, size=size)
def get_inventory(library_id):
return inventory.get_library_inventory(library_id=library_id)
def display_all_inventory():
results = inventory.get_all_inventory()
df = pd.DataFrame(results)
df.sort_values(by=['library_id', 'directory', 'full_path'])
print(tabulate(df.head(500), headers='keys', tablefmt='psql'))
def display_library_inventory(library_id):
if results := inventory.get_library_inventory(library_id):
df = pd.DataFrame(results)
df.sort_values(by=['library_id', 'directory', 'full_path'])
print(tabulate(df.head(500), headers='keys', tablefmt='psql'))
else:
return None
def reconcile_inventory(library_id, calculate_compare_score: bool = False):
results = inventory.get_library_inventory(library_id)
for idx, item in enumerate(results):
if results[idx]['file']:
src_path = results[idx]['full_path']
inventory_id = results[idx]['inventory_id']
fso = FileSystemObject(src_path).to_dict()
if fso and fso['is_found'] and not fso['is_hidden']:
data = {
'inventory_removed_date': None,
'inventory_removed_reason': None,
'is_missing': False
}
else:
data = {'inventory_removed_date': datetime.datetime.now(),
'is_missing': True
}
inventory.update_inventory(inventory_id, **data)
if calculate_compare_score:
update_inventory_compare_scores(inventory_id, src_path)
def restore_from_recycle_bin():
path = RECYCLE_BIN
for root, folders, files in os.walk(path, topdown=True):
for file in files:
recycled_file = os.path.splitext(file)[0]
src = os.path.join(root, file)
original_file = services.inventory.get_inventory_item(recycled_file)
dest = original_file.full_path
shutil.move(src, dest)
def get_comparable_inventory(library_id):
try:
if data := inventory.get_comparable_inventory(library_id):
df = pd.DataFrame(data)
df["file"] = df["file"].str.lower()
df['compare_score_frequency'] = df.groupby('compare_score')['compare_score'].transform('count')
df = df[df.groupby('compare_score')['compare_score'].transform('count') > 1]
df = df[['inventory_id', 'library_id', 'directory', 'full_path', 'file', 'file_extension',
'size', 'created_dt', 'modified_dt',
'compare_score_dt', 'compare_score', 'compare_score_frequency']]
group_duplicates(df)
clear_eval_folder(TEMP_FOLDER)
else:
print("No duplicates were found.")
except:
print("An unexpected error has occurred")
def group_duplicates(df: pd.DataFrame):
distinct_scores = list(df['compare_score'].unique())
count = len(distinct_scores)
for counter, score in enumerate(distinct_scores, 1):
sample = df[df["compare_score"] == score]
sample = pd.DataFrame(sample, columns=['inventory_id', 'file', 'file_extension', 'full_path', 'directory',
'size', 'created_dt', 'modified_dt'])
sample.reset_index(drop=True, inplace=True)
print("###############################################")
print(f"Potential Duplicate Group {counter} of {count}")
print(f"Compare Score: {score}")
print("###############################################")
evaluate_duplicates_by_group(sample)
def evaluate_duplicates_by_group(sample: pd.DataFrame):
clear_eval_folder(path=TEMP_FOLDER)
group = []
for idx, row in sample.iterrows():
group.append(row['inventory_id'])
inventory_id = row['inventory_id']
created = row['created_dt']
modified = row['modified_dt']
size = row['size']
src = row['full_path']
dest = f'{TEMP_FOLDER}/' + inventory_id + row['file_extension']
print(f"InventoryID: {inventory_id} | File: {row['file']} | Created: {created} | "
f"Modified: {modified} | Size: {size}")
shutil.copy2(src, dest)
if retain := input("Enter Inventory IDs you wish to keep (separate by comma): ").split(","):
for idx, item in enumerate(retain):
retain[idx] = item.strip()
for inv_id in group:
if inv_id not in retain:
reason = input(f"Enter reason for removal of {inv_id}: ")
services.inventory.remove_inventory_item(inv_id.strip(), reason.strip())
def move_files_to_recycle_bin(library_id):
reconcile_inventory(library_id, calculate_compare_score=False)
if data := inventory.get_removed_inventory(library_id):
for idx, item in enumerate(data):
src = data[idx]['full_path']
inventory_id = data[idx]['inventory_id']
file_extension = data[idx]['file_extension']
dest = f'{RECYCLE_BIN}/' + inventory_id + file_extension
try:
shutil.move(src, dest)
except FileNotFoundError:
print("A FileNotFound error has occurred.")
def remove_inventory(group: list, retain: list):
for idx, item in enumerate(retain):
retain[idx] = item.strip()
for inv_id in group:
if inv_id not in retain:
reason = input(f"Enter reason for removal of {inv_id}: ")
services.inventory.remove_inventory_item(inv_id.strip(), reason.strip())
def clear_eval_folder(path: str):
mypath = path
for root, dirs, files in os.walk(mypath):
for file in files:
os.remove(os.path.join(root, file))
def select_inventory_item():
return input("Input Inventory ID: ")
def get_inventory_item(inventory_id):
return services.inventory.get_inventory_item(inventory_id=inventory_id)
def update_classification(library_id, incl_assignment: bool = False):
inv = workflow.workflow_inventory.get_inventory(library_id=library_id)
try:
for file in inv:
inventory_id = file['inventory_id']
if file['is_image']:
cv2.imshow(file['file'], cv2.imread(file['full_path']))
cv2.waitKey(1)
if file['classification']:
print(f"Current Tags: {file['classification']['tags']}")
tag_values = [item.strip() for item in input("Input Tags (separated by comma): ").split(',')]
data = {
'inventory_id': inventory_id,
'classification': {'tags': tag_values},
'model_assignment': input("Model Assignment Name: ") if incl_assignment else file['model_assignment']
}
services.inventory.update_inventory_classification(**data)
cv2.destroyAllWindows()
cv2.destroyAllWindows()
except:
raise
def update_classification_from_model(inventory_id, tags: str):
file = workflow.workflow_inventory.get_inventory_item(inventory_id).to_dict()
classification = file['classification']['tags'] if file['classification'] else []
classification.append(tags)
classification = list(set(classification))
data = {
'inventory_id': inventory_id,
'classification': {'tags': classification}
}
services.inventory.update_inventory_classification(**data)
| true
| true
|
f70ce96a8fb69cd9133101e1c0f2f4391bb082bd
| 5,814
|
py
|
Python
|
tests/extension/thread_/slice/test_thread_slice.py
|
akmaru/veriloggen
|
74f998139e8cf613f7703fa4cffd571bbf069bbc
|
[
"Apache-2.0"
] | null | null | null |
tests/extension/thread_/slice/test_thread_slice.py
|
akmaru/veriloggen
|
74f998139e8cf613f7703fa4cffd571bbf069bbc
|
[
"Apache-2.0"
] | null | null | null |
tests/extension/thread_/slice/test_thread_slice.py
|
akmaru/veriloggen
|
74f998139e8cf613f7703fa4cffd571bbf069bbc
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import thread_slice
expected_verilog = """
module test;
reg CLK;
reg RST;
wire [8-1:0] LED;
blinkled
uut
(
.CLK(CLK),
.RST(RST),
.LED(LED)
);
initial begin
$dumpfile("uut.vcd");
$dumpvars(0, uut);
end
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
#100;
RST = 1;
#100;
RST = 0;
#10000;
$finish;
end
endmodule
module blinkled
(
input CLK,
input RST,
output reg [8-1:0] LED
);
reg [8-1:0] count;
reg [32-1:0] th_blink;
localparam th_blink_init = 0;
reg signed [32-1:0] _th_blink_times_0;
reg signed [32-1:0] _th_blink_i_1;
reg signed [32-1:0] _th_blink_x_2;
localparam th_blink_1 = 1;
localparam th_blink_2 = 2;
localparam th_blink_3 = 3;
localparam th_blink_4 = 4;
localparam th_blink_5 = 5;
localparam th_blink_6 = 6;
localparam th_blink_7 = 7;
localparam th_blink_8 = 8;
localparam th_blink_9 = 9;
localparam th_blink_10 = 10;
localparam th_blink_11 = 11;
localparam th_blink_12 = 12;
localparam th_blink_13 = 13;
localparam th_blink_14 = 14;
localparam th_blink_15 = 15;
localparam th_blink_16 = 16;
localparam th_blink_17 = 17;
localparam th_blink_18 = 18;
localparam th_blink_19 = 19;
localparam th_blink_20 = 20;
localparam th_blink_21 = 21;
localparam th_blink_22 = 22;
localparam th_blink_23 = 23;
localparam th_blink_24 = 24;
localparam th_blink_25 = 25;
localparam th_blink_26 = 26;
localparam th_blink_27 = 27;
localparam th_blink_28 = 28;
always @(posedge CLK) begin
if(RST) begin
th_blink <= th_blink_init;
_th_blink_times_0 <= 0;
LED <= 0;
count <= 0;
_th_blink_i_1 <= 0;
_th_blink_x_2 <= 0;
LED[_th_blink_x_2] <= (0 >> _th_blink_x_2) & 1'd1;
end else begin
case(th_blink)
th_blink_init: begin
_th_blink_times_0 <= 10;
th_blink <= th_blink_1;
end
th_blink_1: begin
LED <= 0;
th_blink <= th_blink_2;
end
th_blink_2: begin
count <= 0;
th_blink <= th_blink_3;
end
th_blink_3: begin
_th_blink_i_1 <= 0;
th_blink <= th_blink_4;
end
th_blink_4: begin
if(_th_blink_i_1 < _th_blink_times_0) begin
th_blink <= th_blink_5;
end else begin
th_blink <= th_blink_12;
end
end
th_blink_5: begin
_th_blink_x_2 <= 0;
th_blink <= th_blink_6;
end
th_blink_6: begin
if(_th_blink_x_2 < 8) begin
th_blink <= th_blink_7;
end else begin
th_blink <= th_blink_9;
end
end
th_blink_7: begin
LED[_th_blink_x_2] <= count[_th_blink_x_2];
th_blink <= th_blink_8;
end
th_blink_8: begin
_th_blink_x_2 <= _th_blink_x_2 + 1;
th_blink <= th_blink_6;
end
th_blink_9: begin
$display("led = %d", LED);
th_blink <= th_blink_10;
end
th_blink_10: begin
count <= count + 1;
th_blink <= th_blink_11;
end
th_blink_11: begin
_th_blink_i_1 <= _th_blink_i_1 + 1;
th_blink <= th_blink_4;
end
th_blink_12: begin
LED <= 0;
th_blink <= th_blink_13;
end
th_blink_13: begin
count <= 0;
th_blink <= th_blink_14;
end
th_blink_14: begin
_th_blink_i_1 <= 0;
th_blink <= th_blink_15;
end
th_blink_15: begin
if(_th_blink_i_1 < _th_blink_times_0) begin
th_blink <= th_blink_16;
end else begin
th_blink <= th_blink_20;
end
end
th_blink_16: begin
LED <= count[1:0];
th_blink <= th_blink_17;
end
th_blink_17: begin
$display("led = %d", LED);
th_blink <= th_blink_18;
end
th_blink_18: begin
count <= count + 1;
th_blink <= th_blink_19;
end
th_blink_19: begin
_th_blink_i_1 <= _th_blink_i_1 + 1;
th_blink <= th_blink_15;
end
th_blink_20: begin
LED <= 0;
th_blink <= th_blink_21;
end
th_blink_21: begin
count <= 0;
th_blink <= th_blink_22;
end
th_blink_22: begin
_th_blink_i_1 <= 0;
th_blink <= th_blink_23;
end
th_blink_23: begin
if(_th_blink_i_1 < _th_blink_times_0) begin
th_blink <= th_blink_24;
end else begin
th_blink <= th_blink_28;
end
end
th_blink_24: begin
LED <= { count[6], count[4], count[2], count[0] };
th_blink <= th_blink_25;
end
th_blink_25: begin
$display("led = %d", LED);
th_blink <= th_blink_26;
end
th_blink_26: begin
count <= count + 1;
th_blink <= th_blink_27;
end
th_blink_27: begin
_th_blink_i_1 <= _th_blink_i_1 + 1;
th_blink <= th_blink_23;
end
endcase
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = thread_slice.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
| 23.256
| 69
| 0.567595
|
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import thread_slice
expected_verilog = """
module test;
reg CLK;
reg RST;
wire [8-1:0] LED;
blinkled
uut
(
.CLK(CLK),
.RST(RST),
.LED(LED)
);
initial begin
$dumpfile("uut.vcd");
$dumpvars(0, uut);
end
initial begin
CLK = 0;
forever begin
#5 CLK = !CLK;
end
end
initial begin
RST = 0;
#100;
RST = 1;
#100;
RST = 0;
#10000;
$finish;
end
endmodule
module blinkled
(
input CLK,
input RST,
output reg [8-1:0] LED
);
reg [8-1:0] count;
reg [32-1:0] th_blink;
localparam th_blink_init = 0;
reg signed [32-1:0] _th_blink_times_0;
reg signed [32-1:0] _th_blink_i_1;
reg signed [32-1:0] _th_blink_x_2;
localparam th_blink_1 = 1;
localparam th_blink_2 = 2;
localparam th_blink_3 = 3;
localparam th_blink_4 = 4;
localparam th_blink_5 = 5;
localparam th_blink_6 = 6;
localparam th_blink_7 = 7;
localparam th_blink_8 = 8;
localparam th_blink_9 = 9;
localparam th_blink_10 = 10;
localparam th_blink_11 = 11;
localparam th_blink_12 = 12;
localparam th_blink_13 = 13;
localparam th_blink_14 = 14;
localparam th_blink_15 = 15;
localparam th_blink_16 = 16;
localparam th_blink_17 = 17;
localparam th_blink_18 = 18;
localparam th_blink_19 = 19;
localparam th_blink_20 = 20;
localparam th_blink_21 = 21;
localparam th_blink_22 = 22;
localparam th_blink_23 = 23;
localparam th_blink_24 = 24;
localparam th_blink_25 = 25;
localparam th_blink_26 = 26;
localparam th_blink_27 = 27;
localparam th_blink_28 = 28;
always @(posedge CLK) begin
if(RST) begin
th_blink <= th_blink_init;
_th_blink_times_0 <= 0;
LED <= 0;
count <= 0;
_th_blink_i_1 <= 0;
_th_blink_x_2 <= 0;
LED[_th_blink_x_2] <= (0 >> _th_blink_x_2) & 1'd1;
end else begin
case(th_blink)
th_blink_init: begin
_th_blink_times_0 <= 10;
th_blink <= th_blink_1;
end
th_blink_1: begin
LED <= 0;
th_blink <= th_blink_2;
end
th_blink_2: begin
count <= 0;
th_blink <= th_blink_3;
end
th_blink_3: begin
_th_blink_i_1 <= 0;
th_blink <= th_blink_4;
end
th_blink_4: begin
if(_th_blink_i_1 < _th_blink_times_0) begin
th_blink <= th_blink_5;
end else begin
th_blink <= th_blink_12;
end
end
th_blink_5: begin
_th_blink_x_2 <= 0;
th_blink <= th_blink_6;
end
th_blink_6: begin
if(_th_blink_x_2 < 8) begin
th_blink <= th_blink_7;
end else begin
th_blink <= th_blink_9;
end
end
th_blink_7: begin
LED[_th_blink_x_2] <= count[_th_blink_x_2];
th_blink <= th_blink_8;
end
th_blink_8: begin
_th_blink_x_2 <= _th_blink_x_2 + 1;
th_blink <= th_blink_6;
end
th_blink_9: begin
$display("led = %d", LED);
th_blink <= th_blink_10;
end
th_blink_10: begin
count <= count + 1;
th_blink <= th_blink_11;
end
th_blink_11: begin
_th_blink_i_1 <= _th_blink_i_1 + 1;
th_blink <= th_blink_4;
end
th_blink_12: begin
LED <= 0;
th_blink <= th_blink_13;
end
th_blink_13: begin
count <= 0;
th_blink <= th_blink_14;
end
th_blink_14: begin
_th_blink_i_1 <= 0;
th_blink <= th_blink_15;
end
th_blink_15: begin
if(_th_blink_i_1 < _th_blink_times_0) begin
th_blink <= th_blink_16;
end else begin
th_blink <= th_blink_20;
end
end
th_blink_16: begin
LED <= count[1:0];
th_blink <= th_blink_17;
end
th_blink_17: begin
$display("led = %d", LED);
th_blink <= th_blink_18;
end
th_blink_18: begin
count <= count + 1;
th_blink <= th_blink_19;
end
th_blink_19: begin
_th_blink_i_1 <= _th_blink_i_1 + 1;
th_blink <= th_blink_15;
end
th_blink_20: begin
LED <= 0;
th_blink <= th_blink_21;
end
th_blink_21: begin
count <= 0;
th_blink <= th_blink_22;
end
th_blink_22: begin
_th_blink_i_1 <= 0;
th_blink <= th_blink_23;
end
th_blink_23: begin
if(_th_blink_i_1 < _th_blink_times_0) begin
th_blink <= th_blink_24;
end else begin
th_blink <= th_blink_28;
end
end
th_blink_24: begin
LED <= { count[6], count[4], count[2], count[0] };
th_blink <= th_blink_25;
end
th_blink_25: begin
$display("led = %d", LED);
th_blink <= th_blink_26;
end
th_blink_26: begin
count <= count + 1;
th_blink <= th_blink_27;
end
th_blink_27: begin
_th_blink_i_1 <= _th_blink_i_1 + 1;
th_blink <= th_blink_23;
end
endcase
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = thread_slice.mkTest()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.