function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def __init__(self, dim, depth, kernel_size=9, patch_size=7, in_chans=3, num_classes=1000, activation=nn.GELU, **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_features = dim
self.head = nn.Linear(dim, num_classes) if num_classes > 0 else nn.Identity()
self.stem = nn.Sequential(
nn.Conv2d(in_chans, dim, kernel_size=patch_size, stride=patch_size),
activation(),
nn.BatchNorm2d(dim)
)
self.blocks = nn.Sequential(
*[nn.Sequential(
Residual(nn.Sequential(
nn.Conv2d(dim, dim, kernel_size, groups=dim, padding="same"),
activation(),
nn.BatchNorm2d(dim)
)),
nn.Conv2d(dim, dim, kernel_size=1),
activation(),
nn.BatchNorm2d(dim)
) for i in range(depth)]
)
self.pooling = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten()
) | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def forward_features(self, x):
x = self.stem(x)
x = self.blocks(x)
x = self.pooling(x)
return x | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def convmixer_1536_20(pretrained=False, **kwargs):
model_args = dict(dim=1536, depth=20, kernel_size=9, patch_size=7, **kwargs)
return _create_convmixer('convmixer_1536_20', pretrained, **model_args) | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def convmixer_768_32(pretrained=False, **kwargs):
model_args = dict(dim=768, depth=32, kernel_size=7, patch_size=7, activation=nn.ReLU, **kwargs)
return _create_convmixer('convmixer_768_32', pretrained, **model_args) | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def _cfg(url=''):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'features.conv0', 'classifier': 'classifier',
} | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def __init__(self, num_input_features, growth_rate, bn_size, norm_layer=BatchNormAct2d,
drop_rate=0., memory_efficient=False):
super(DenseLayer, self).__init__()
self.add_module('norm1', norm_layer(num_input_features)),
self.add_module('conv1', nn.Conv2d(
num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)),
self.add_module('norm2', norm_layer(bn_size * growth_rate)),
self.add_module('conv2', nn.Conv2d(
bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)),
self.drop_rate = float(drop_rate)
self.memory_efficient = memory_efficient | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def any_requires_grad(self, x):
# type: (List[torch.Tensor]) -> bool
for tensor in x:
if tensor.requires_grad:
return True
return False | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def call_checkpoint_bottleneck(self, x):
# type: (List[torch.Tensor]) -> torch.Tensor
def closure(*xs):
return self.bottleneck_fn(xs)
return cp.checkpoint(closure, *x) | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def forward(self, x):
# type: (List[torch.Tensor]) -> (torch.Tensor)
pass | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def forward(self, x):
# type: (torch.Tensor) -> (torch.Tensor)
pass | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def forward(self, x): # noqa: F811
if isinstance(x, torch.Tensor):
prev_features = [x]
else:
prev_features = x
if self.memory_efficient and self.any_requires_grad(prev_features):
if torch.jit.is_scripting():
raise Exception("Memory Efficient not supported in JIT")
bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
else:
bottleneck_output = self.bottleneck_fn(prev_features)
new_features = self.conv2(self.norm2(bottleneck_output))
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return new_features | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, norm_layer=nn.ReLU,
drop_rate=0., memory_efficient=False):
super(DenseBlock, self).__init__()
for i in range(num_layers):
layer = DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
norm_layer=norm_layer,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.add_module('denselayer%d' % (i + 1), layer) | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def __init__(self, num_input_features, num_output_features, norm_layer=nn.BatchNorm2d, aa_layer=None):
super(DenseTransition, self).__init__()
self.add_module('norm', norm_layer(num_input_features))
self.add_module('conv', nn.Conv2d(
num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
if aa_layer is not None:
self.add_module('pool', aa_layer(num_output_features, stride=2))
else:
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), bn_size=4, stem_type='',
num_classes=1000, in_chans=3, global_pool='avg',
norm_layer=BatchNormAct2d, aa_layer=None, drop_rate=0, memory_efficient=False,
aa_stem_only=True):
self.num_classes = num_classes
self.drop_rate = drop_rate
super(DenseNet, self).__init__()
# Stem
deep_stem = 'deep' in stem_type # 3x3 deep stem
num_init_features = growth_rate * 2
if aa_layer is None:
stem_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
else:
stem_pool = nn.Sequential(*[
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
aa_layer(channels=num_init_features, stride=2)])
if deep_stem:
stem_chs_1 = stem_chs_2 = growth_rate
if 'tiered' in stem_type:
stem_chs_1 = 3 * (growth_rate // 4)
stem_chs_2 = num_init_features if 'narrow' in stem_type else 6 * (growth_rate // 4)
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(in_chans, stem_chs_1, 3, stride=2, padding=1, bias=False)),
('norm0', norm_layer(stem_chs_1)),
('conv1', nn.Conv2d(stem_chs_1, stem_chs_2, 3, stride=1, padding=1, bias=False)),
('norm1', norm_layer(stem_chs_2)),
('conv2', nn.Conv2d(stem_chs_2, num_init_features, 3, stride=1, padding=1, bias=False)),
('norm2', norm_layer(num_init_features)),
('pool0', stem_pool),
]))
else:
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(in_chans, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', norm_layer(num_init_features)),
('pool0', stem_pool),
]))
self.feature_info = [
dict(num_chs=num_init_features, reduction=2, module=f'features.norm{2 if deep_stem else 0}')]
current_stride = 4
# DenseBlocks
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
norm_layer=norm_layer,
drop_rate=drop_rate,
memory_efficient=memory_efficient
)
module_name = f'denseblock{(i + 1)}'
self.features.add_module(module_name, block)
num_features = num_features + num_layers * growth_rate
transition_aa_layer = None if aa_stem_only else aa_layer
if i != len(block_config) - 1:
self.feature_info += [
dict(num_chs=num_features, reduction=current_stride, module='features.' + module_name)]
current_stride *= 2
trans = DenseTransition(
num_input_features=num_features, num_output_features=num_features // 2,
norm_layer=norm_layer, aa_layer=transition_aa_layer)
self.features.add_module(f'transition{i + 1}', trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', norm_layer(num_features))
self.feature_info += [dict(num_chs=num_features, reduction=current_stride, module='features.norm5')]
self.num_features = num_features
# Linear layer
self.global_pool, self.classifier = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0) | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.classifier = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool) | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def forward(self, x):
x = self.forward_features(x)
x = self.global_pool(x)
# both classifier and block drop?
# if self.drop_rate > 0.:
# x = F.dropout(x, p=self.drop_rate, training=self.training)
x = self.classifier(x)
return x | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def _create_densenet(variant, growth_rate, block_config, pretrained, **kwargs):
kwargs['growth_rate'] = growth_rate
kwargs['block_config'] = block_config
return build_model_with_cfg(
DenseNet, variant, pretrained,
default_cfg=default_cfgs[variant],
feature_cfg=dict(flatten_sequential=True), pretrained_filter_fn=_filter_torchvision_pretrained,
**kwargs) | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def densenet121(pretrained=False, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = _create_densenet(
'densenet121', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, **kwargs)
return model | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def densenetblur121d(pretrained=False, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = _create_densenet(
'densenetblur121d', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, stem_type='deep',
aa_layer=BlurPool2d, **kwargs)
return model | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def densenet121d(pretrained=False, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = _create_densenet(
'densenet121d', growth_rate=32, block_config=(6, 12, 24, 16), stem_type='deep',
pretrained=pretrained, **kwargs)
return model | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def densenet169(pretrained=False, **kwargs):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = _create_densenet(
'densenet169', growth_rate=32, block_config=(6, 12, 32, 32), pretrained=pretrained, **kwargs)
return model | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def densenet201(pretrained=False, **kwargs):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = _create_densenet(
'densenet201', growth_rate=32, block_config=(6, 12, 48, 32), pretrained=pretrained, **kwargs)
return model | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def densenet161(pretrained=False, **kwargs):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = _create_densenet(
'densenet161', growth_rate=48, block_config=(6, 12, 36, 24), pretrained=pretrained, **kwargs)
return model | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def densenet264(pretrained=False, **kwargs):
r"""Densenet-264 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
"""
model = _create_densenet(
'densenet264', growth_rate=48, block_config=(6, 12, 64, 48), pretrained=pretrained, **kwargs)
return model | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def densenet264d_iabn(pretrained=False, **kwargs):
r"""Densenet-264 model with deep stem and Inplace-ABN
"""
def norm_act_fn(num_features, **kwargs):
return create_norm_act('iabn', num_features, **kwargs)
model = _create_densenet(
'densenet264d_iabn', growth_rate=48, block_config=(6, 12, 64, 48), stem_type='deep',
norm_layer=norm_act_fn, pretrained=pretrained, **kwargs)
return model | rwightman/pytorch-image-models | [
23978,
3956,
23978,
96,
1549086672
] |
def outsideLights(value):
if value = 1
BareBonesBrowserLaunch.openURL("http://ip_address:3480/data_request?id=action&output_format=xml&DeviceNum=6&serviceId=urn:upnp-org:serviceId:SwitchPower1&action=SetTarget&newTargetValue=01")
else
BareBonesBrowserLaunch.openURL("http://ip_address:3480/data_request?id=action&output_format=xml&DeviceNum=6&serviceId=urn:upnp-org:serviceId:SwitchPower1&action=SetTarget&newTargetValue=0") | MyRobotLab/pyrobotlab | [
62,
140,
62,
5,
1413898155
] |
def __init__(self, chrome_path, *args, **kwargs):
"""Creates Display instance.
Args:
chrome_path: path to chrome executable.
"""
super(Display, self).__init__(*args, **kwargs)
self._chrome_path = chrome_path
self._temp_path = tempfile.gettempdir()
self._index_file = tempfile.mktemp(suffix='.html')
self._chrome_app = app.Application(
name='Browser',
bin_path=chrome_path,
arguments=[
'--kiosk', self._index_file, '--new-window', '--incognito',
'--noerrordialogs', '--user-data-dir={0}'.format(self._temp_path)
],
restart_on_crash=True) | google/flight-lab | [
11,
14,
11,
18,
1536808527
] |
def show_message(self, message, template_path='./data/display_message.html'):
"""Shows a text message in full screen.
Args:
message: text to show.
template_path: a html template to use. It should contain "{{ message }}".
"""
self._generate_page(
template_path=template_path, kwargs={
'message': message
})
self._relaunch() | google/flight-lab | [
11,
14,
11,
18,
1536808527
] |
def _generate_page(self, template_path, kwargs={}):
with open(template_path, 'r') as f:
template = jinja2.Template(f.read())
with open(self._index_file, 'w') as f:
f.write(template.render(**kwargs)) | google/flight-lab | [
11,
14,
11,
18,
1536808527
] |
def fetch_arns_for_findings(inspector_client):
"""
Fetch all ARNs for findings discovered in the latest run of all enabled Assessment Templates.
:param inspector_client:
:return:
"""
# at: Assessment template
# arn: Amazon resource name
findings = set()
# Get all assessment templates for current region
at_arns = inspector_client.list_assessment_templates()['assessmentTemplateArns']
if len(at_arns) > 0:
at_details = inspector_client.describe_assessment_templates(assessmentTemplateArns=at_arns)
# For each template, get the ARN for the latest run
if "assessmentTemplates" in at_details:
at_runs = [t['lastAssessmentRunArn'] for t in at_details['assessmentTemplates']]
paginator = inspector_client.get_paginator('list_findings', )
for page in paginator.paginate(assessmentRunArns=at_runs,
maxResults=500):
if len(page['findingArns']) > 0:
findings.add(page['findingArns'][0])
return findings | CloudBoltSoftware/cloudbolt-forge | [
34,
32,
34,
16,
1437063482
] |
def update_instances(findings):
"""
For each finding build-up a dict keyed by instance ID with an array value of all applicable
findings. Then create or update the aws_inspector_findings custom field for each
corresponding CloudBolt server record.
:param findings:
:return:
"""
instances = {}
# Group findings by instance
for finding in findings['findings']:
instance_id = get_instance_id(finding)
if instance_id not in instances:
instances[instance_id] = []
else:
instances[instance_id].append(finding)
# For each istance, find its CloudBolt Server record and update aws_inspector_findings
for instance in instances.keys():
try:
s = Server.objects.get(resource_handler_svr_id=instance)
cf, _ = CustomField.objects.get_or_create(name='aws_inspector_findings', type='TXT',
label="AWS Inspector Findings")
s.set_value_for_custom_field(cf.name, json.dumps(instances[instance], indent=True,
cls=DjangoJSONEncoder))
except Server.DoesNotExist as ex:
# Unable to locate and update the server, carry on
pass | CloudBoltSoftware/cloudbolt-forge | [
34,
32,
34,
16,
1437063482
] |
def run(job, *args, **kwargs):
rh: AWSHandler
for rh in AWSHandler.objects.all():
regions = set([env.aws_region for env in rh.environment_set.all()])
# For each region currently used by the current AWSHandler
for region in regions:
inspector = rh.get_boto3_client(service_name='inspector', region_name=region)
set_progress(f'Fetching findings for {rh.name} ({region}).')
all_finding_arns = fetch_arns_for_findings(inspector)
inspector_findings = describe_findings(inspector, all_finding_arns)
set_progress(f'Updating CloudBolt instances in {region}.')
if inspector_findings:
update_instances(inspector_findings)
return "", "", "" | CloudBoltSoftware/cloudbolt-forge | [
34,
32,
34,
16,
1437063482
] |
def main(unused_argv):
out_dir = FLAGS.out_dir
exp_dir = 'exp%s' % FLAGS.exp
model_dir = 'rescale%s' % FLAGS.rescale_pixel_value
param_dir = 'reg%.2f_mr%.2f' % (FLAGS.reg_weight, FLAGS.mutation_rate)
job_dir = os.path.join(out_dir, exp_dir, model_dir, param_dir)
print('job_dir={}'.format(job_dir))
job_model_dir = os.path.join(job_dir, 'model')
job_log_dir = os.path.join(job_dir, 'log')
for sub_dir in out_dir, job_dir, job_model_dir, job_log_dir:
tf.compat.v1.gfile.MakeDirs(sub_dir)
params = {
'job_model_dir': job_model_dir,
'job_log_dir': job_log_dir,
'job_dir': job_dir,
'dropout_p': FLAGS.dropout_p,
'reg_weight': FLAGS.reg_weight,
'num_resnet': FLAGS.num_resnet,
'num_hierarchies': FLAGS.num_hierarchies,
'num_filters': FLAGS.num_filters,
'num_logistic_mix': FLAGS.num_logistic_mix,
'use_weight_norm': FLAGS.use_weight_norm,
'data_init': FLAGS.data_init,
'mutation_rate': FLAGS.mutation_rate,
'batch_size': FLAGS.batch_size,
'learning_rate': FLAGS.learning_rate,
'learning_rate_decay': FLAGS.learning_rate_decay,
'momentum': FLAGS.momentum,
'momentum2': FLAGS.momentum2,
'eval_every': FLAGS.eval_every,
'save_im': FLAGS.save_im,
'n_dim': 28 if FLAGS.exp == 'fashion' else 32,
'n_channel': 1 if FLAGS.exp == 'fashion' else 3,
'exp': FLAGS.exp,
'rescale_pixel_value': FLAGS.rescale_pixel_value,
}
# Print and write parameter settings
with tf.io.gfile.GFile(
os.path.join(params['job_model_dir'], 'params.json'), mode='w') as f:
f.write(json.dumps(params, sort_keys=True))
# Fix the random seed - easier to debug separate runs
tf.compat.v1.set_random_seed(FLAGS.random_seed)
tf.keras.backend.clear_session()
sess = tf.compat.v1.Session()
tf.compat.v1.keras.backend.set_session(sess)
# Load the datasets
if FLAGS.exp == 'fashion':
datasets = utils.load_fmnist_datasets(FLAGS.data_dir)
else:
datasets = utils.load_cifar_datasets(FLAGS.data_dir)
# pylint: disable=g-long-lambda
tr_in_ds = datasets['tr_in'].map(lambda x: utils.image_preprocess_add_noise(
x, params['mutation_rate'])).batch(
params['batch_size']).repeat().shuffle(1000).make_one_shot_iterator()
tr_in_im = tr_in_ds.get_next()
# repeat valid dataset because it will be used for training
val_in_ds = datasets['val_in'].map(utils.image_preprocess).batch(
params['batch_size']).repeat().make_one_shot_iterator()
val_in_im = val_in_ds.get_next()
# Define a Pixel CNN network
input_shape = (params['n_dim'], params['n_dim'], params['n_channel'])
dist = pixel_cnn.PixelCNN(
image_shape=input_shape,
dropout_p=params['dropout_p'],
reg_weight=params['reg_weight'],
num_resnet=params['num_resnet'],
num_hierarchies=params['num_hierarchies'],
num_filters=params['num_filters'],
num_logistic_mix=params['num_logistic_mix'],
use_weight_norm=params['use_weight_norm'],
rescale_pixel_value=params['rescale_pixel_value'],
)
# Define the training loss and optimizer
log_prob_i = dist.log_prob(tr_in_im['image'], return_per_pixel=False)
loss = -tf.reduce_mean(log_prob_i)
log_prob_i_val_in = dist.log_prob(val_in_im['image'])
loss_val_in = -tf.reduce_mean(log_prob_i_val_in)
global_step = tf.compat.v1.train.get_or_create_global_step()
learning_rate = tf.compat.v1.train.exponential_decay(
params['learning_rate'], global_step, 1, params['learning_rate_decay'])
opt = tf.compat.v1.train.AdamOptimizer(
learning_rate=learning_rate,
beta1=params['momentum'],
beta2=params['momentum2'])
tr_op = opt.minimize(loss, global_step=global_step)
init_op = tf.compat.v1.global_variables_initializer()
sess.run(init_op)
# write tensorflow summaries
saver = tf.compat.v1.train.Saver(max_to_keep=50000)
merged_tr = tf.compat.v1.summary.merge([
tf.compat.v1.summary.scalar('loss', loss),
tf.compat.v1.summary.scalar('train/learning_rate', learning_rate)
])
merged_val_in = tf.compat.v1.summary.merge(
[tf.compat.v1.summary.scalar('loss', loss_val_in)])
tr_writer = tf.compat.v1.summary.FileWriter(job_log_dir + '/tr_in',
sess.graph)
val_in_writer = tf.compat.v1.summary.FileWriter(job_log_dir + '/val_in',
sess.graph)
# If previous ckpt exists, load ckpt
ckpt_file = tf.compat.v2.train.latest_checkpoint(job_model_dir)
if ckpt_file:
prev_step = int(
os.path.basename(ckpt_file).split('model_step')[1].split('.ckpt')[0])
tf.compat.v1.logging.info(
'previous ckpt exist, prev_step={}'.format(prev_step))
saver.restore(sess, ckpt_file)
else:
prev_step = 0
# Train the model
with sess.as_default(): # this is a must otherwise localhost error
for step in range(prev_step, FLAGS.total_steps + 1, 1):
_, loss_tr_np, summary = sess.run([tr_op, loss, merged_tr])
if step % params['eval_every'] == 0:
ckpt_name = 'model_step%d.ckpt' % step
ckpt_path = os.path.join(job_model_dir, ckpt_name)
while not tf.compat.v1.gfile.Exists(ckpt_path + '.index'):
_ = saver.save(sess, ckpt_path, write_meta_graph=False)
time.sleep(10)
tr_writer.add_summary(summary, step)
# Evaluate loss on the valid_in
loss_val_in_np, summary_val_in = sess.run([loss_val_in, merged_val_in])
val_in_writer.add_summary(summary_val_in, step)
print('step=%d, tr_in_loss=%.4f, val_in_loss=%.4f' %
(step, loss_tr_np, loss_val_in_np))
tr_writer.flush()
val_in_writer.flush()
tr_writer.close()
val_in_writer.close() | google-research/google-research | [
27788,
6881,
27788,
944,
1538678568
] |
def get_msvc_version_numeric(msvc_version):
"""Get the raw version numbers from a MSVC_VERSION string, so it
could be cast to float or other numeric values. For example, '14.0Exp'
would get converted to '14.0'.
Args:
msvc_version: str
string representing the version number, could contain non
digit characters
Returns:
str: the value converted to a numeric only string
"""
return ''.join([x for x in msvc_version if x in string_digits + '.']) | kayhayen/Nuitka | [
8411,
456,
8411,
240,
1366731633
] |
def msvc_version_to_maj_min(msvc_version):
msvc_version_numeric = get_msvc_version_numeric(msvc_version)
t = msvc_version_numeric.split(".")
if not len(t) == 2:
raise ValueError("Unrecognized version %s (%s)" % (msvc_version,msvc_version_numeric))
try:
maj = int(t[0])
min = int(t[1])
return maj, min
except ValueError as e:
raise ValueError("Unrecognized version %s (%s)" % (msvc_version,msvc_version_numeric)) | kayhayen/Nuitka | [
8411,
456,
8411,
240,
1366731633
] |
def find_vc_pdir_vswhere(msvc_version):
"""
Find the MSVC product directory using the vswhere program.
:param msvc_version: MSVC version to search for
:return: MSVC install dir or None
:raises UnsupportedVersion: if the version is not known by this file
"""
try:
vswhere_version = _VCVER_TO_VSWHERE_VER[msvc_version]
except KeyError:
debug("Unknown version of MSVC: %s" % msvc_version)
raise UnsupportedVersion("Unknown version %s" % msvc_version)
# For bug 3333 - support default location of vswhere for both 64 and 32 bit windows
# installs.
for pf in ['Program Files (x86)', 'Program Files']:
vswhere_path = os.path.join(
'C:\\',
pf,
'Microsoft Visual Studio',
'Installer',
'vswhere.exe'
)
if os.path.exists(vswhere_path):
# If we found vswhere, then use it.
break
else:
# No vswhere on system, no install info available
return None
vswhere_cmd = [vswhere_path,
'-products', '*',
'-version', vswhere_version,
'-property', 'installationPath']
#TODO PY27 cannot use Popen as context manager
# try putting it back to the old way for now
sp = subprocess.Popen(vswhere_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
vsdir, err = sp.communicate()
if vsdir:
vsdir = vsdir.decode("mbcs").splitlines()
# vswhere could easily return multiple lines
# we could define a way to pick the one we prefer, but since
# this data is currently only used to make a check for existence,
# returning the first hit should be good enough for now.
vc_pdir = os.path.join(vsdir[0], 'VC')
return vc_pdir
else:
# No vswhere on system, no install info available
return None | kayhayen/Nuitka | [
8411,
456,
8411,
240,
1366731633
] |
def find_batch_file(env,msvc_version,host_arch,target_arch):
"""
Find the location of the batch script which should set up the compiler
for any TARGET_ARCH whose compilers were installed by Visual Studio/VCExpress
"""
pdir = find_vc_pdir(msvc_version)
if pdir is None:
raise NoVersionFound("No version of Visual Studio found")
debug('find_batch_file() in {}'.format(pdir))
# filter out e.g. "Exp" from the version name
msvc_ver_numeric = get_msvc_version_numeric(msvc_version)
vernum = float(msvc_ver_numeric)
if 7 <= vernum < 8:
pdir = os.path.join(pdir, os.pardir, "Common7", "Tools")
batfilename = os.path.join(pdir, "vsvars32.bat")
elif vernum < 7:
pdir = os.path.join(pdir, "Bin")
batfilename = os.path.join(pdir, "vcvars32.bat")
elif 8 <= vernum <= 14:
batfilename = os.path.join(pdir, "vcvarsall.bat")
else: # vernum >= 14.1 VS2017 and above
batfilename = os.path.join(pdir, "Auxiliary", "Build", "vcvarsall.bat")
if not os.path.exists(batfilename):
debug("Not found: %s" % batfilename)
batfilename = None
installed_sdks = get_installed_sdks()
for _sdk in installed_sdks:
sdk_bat_file = _sdk.get_sdk_vc_script(host_arch,target_arch)
if not sdk_bat_file:
debug("find_batch_file() not found:%s"%_sdk)
else:
sdk_bat_file_path = os.path.join(pdir,sdk_bat_file)
if os.path.exists(sdk_bat_file_path):
debug('find_batch_file() sdk_bat_file_path:%s'%sdk_bat_file_path)
return (batfilename, sdk_bat_file_path)
return (batfilename, None) | kayhayen/Nuitka | [
8411,
456,
8411,
240,
1366731633
] |
def _check_cl_exists_in_vc_dir(env, vc_dir, msvc_version):
"""Find the cl.exe on the filesystem in the vc_dir depending on
TARGET_ARCH, HOST_ARCH and the msvc version. TARGET_ARCH and
HOST_ARCH can be extracted from the passed env, unless its None,
which then the native platform is assumed the host and target.
Args:
env: Environment
a construction environment, usually if this is passed its
because there is a desired TARGET_ARCH to be used when searching
for a cl.exe
vc_dir: str
the path to the VC dir in the MSVC installation
msvc_version: str
msvc version (major.minor, e.g. 10.0)
Returns:
bool:
"""
# determine if there is a specific target platform we want to build for and
# use that to find a list of valid VCs, default is host platform == target platform
# and same for if no env is specified to extract target platform from
if env:
(host_platform, target_platform, req_target_platform) = get_host_target(env)
else:
host_platform = platform.machine().lower()
target_platform = host_platform
host_platform = _ARCH_TO_CANONICAL[host_platform]
target_platform = _ARCH_TO_CANONICAL[target_platform]
debug('_check_cl_exists_in_vc_dir(): host platform %s, target platform %s for version %s' % (host_platform, target_platform, msvc_version))
ver_num = float(get_msvc_version_numeric(msvc_version))
# make sure the cl.exe exists meaning the tool is installed
if ver_num > 14:
# 2017 and newer allowed multiple versions of the VC toolset to be installed at the same time.
# Just get the default tool version for now
#TODO: support setting a specific minor VC version
default_toolset_file = os.path.join(vc_dir, _VC_TOOLS_VERSION_FILE)
try:
with open(default_toolset_file) as f:
vc_specific_version = f.readlines()[0].strip()
except IOError:
debug('_check_cl_exists_in_vc_dir(): failed to read ' + default_toolset_file)
return False
except IndexError:
debug('_check_cl_exists_in_vc_dir(): failed to find MSVC version in ' + default_toolset_file)
return False
host_trgt_dir = _HOST_TARGET_TO_CL_DIR_GREATER_THAN_14.get((host_platform, target_platform), None)
if host_trgt_dir is None:
debug('_check_cl_exists_in_vc_dir(): unsupported host/target platform combo: (%s,%s)'%(host_platform, target_platform))
return False
cl_path = os.path.join(vc_dir, 'Tools','MSVC', vc_specific_version, 'bin', host_trgt_dir[0], host_trgt_dir[1], _CL_EXE_NAME)
debug('_check_cl_exists_in_vc_dir(): checking for ' + _CL_EXE_NAME + ' at ' + cl_path)
if os.path.exists(cl_path):
debug('_check_cl_exists_in_vc_dir(): found ' + _CL_EXE_NAME + '!')
return True
elif ver_num <= 14 and ver_num >= 8:
# Set default value to be -1 as "" which is the value for x86/x86 yields true when tested
# if not host_trgt_dir
host_trgt_dir = _HOST_TARGET_TO_CL_DIR.get((host_platform, target_platform), None)
if host_trgt_dir is None:
debug('_check_cl_exists_in_vc_dir(): unsupported host/target platform combo')
return False
cl_path = os.path.join(vc_dir, 'bin', host_trgt_dir, _CL_EXE_NAME)
debug('_check_cl_exists_in_vc_dir(): checking for ' + _CL_EXE_NAME + ' at ' + cl_path)
cl_path_exists = os.path.exists(cl_path)
if not cl_path_exists and host_platform == 'amd64':
# older versions of visual studio only had x86 binaries,
# so if the host platform is amd64, we need to check cross
# compile options (x86 binary compiles some other target on a 64 bit os)
# Set default value to be -1 as "" which is the value for x86/x86 yields true when tested
# if not host_trgt_dir
host_trgt_dir = _HOST_TARGET_TO_CL_DIR.get(('x86', target_platform), None)
if host_trgt_dir is None:
return False
cl_path = os.path.join(vc_dir, 'bin', host_trgt_dir, _CL_EXE_NAME)
debug('_check_cl_exists_in_vc_dir(): checking for ' + _CL_EXE_NAME + ' at ' + cl_path)
cl_path_exists = os.path.exists(cl_path)
if cl_path_exists:
debug('_check_cl_exists_in_vc_dir(): found ' + _CL_EXE_NAME + '!')
return True
elif ver_num < 8 and ver_num >= 6:
# not sure about these versions so if a walk the VC dir (could be slow)
for root, _, files in os.walk(vc_dir):
if _CL_EXE_NAME in files:
debug('get_installed_vcs ' + _CL_EXE_NAME + ' found %s' % os.path.join(root, _CL_EXE_NAME))
return True
return False
else:
# version not support return false
debug('_check_cl_exists_in_vc_dir(): unsupported MSVC version: ' + str(ver_num))
return False | kayhayen/Nuitka | [
8411,
456,
8411,
240,
1366731633
] |
def get_installed_vcs(env=None):
installed_versions = []
for ver in _VCVER:
debug('trying to find VC %s' % ver)
try:
VC_DIR = find_vc_pdir(ver)
if VC_DIR:
debug('found VC %s' % ver)
if _check_cl_exists_in_vc_dir(env, VC_DIR, ver):
installed_versions.append(ver)
else:
debug('find_vc_pdir no compiler found %s' % ver)
else:
debug('find_vc_pdir return None for ver %s' % ver)
except (MSVCUnsupportedTargetArch, MSVCUnsupportedHostArch):
# Allow this exception to propagate further as it should cause
# SCons to exit with an error code
raise
except VisualCException as e:
debug('did not find VC %s: caught exception %s' % (ver, str(e)))
return installed_versions | kayhayen/Nuitka | [
8411,
456,
8411,
240,
1366731633
] |
def script_env(script, args=None):
global script_env_cache
if script_env_cache is None:
script_env_cache = common.read_script_env_cache()
cache_key = "{}--{}".format(script, args)
cache_data = script_env_cache.get(cache_key, None)
if cache_data is None:
stdout = common.get_output(script, args)
# Stupid batch files do not set return code: we take a look at the
# beginning of the output for an error message instead
olines = stdout.splitlines()
if olines[0].startswith("The specified configuration type is missing"):
raise BatchFileExecutionError("\n".join(olines[:2]))
cache_data = common.parse_output(stdout)
script_env_cache[cache_key] = cache_data
# once we updated cache, give a chance to write out if user wanted
common.write_script_env_cache(script_env_cache)
else:
#TODO: Python 2 cleanup
# If we "hit" data from the json file, we have a Py2 problem:
# keys & values will be unicode. don't detect, just convert.
if sys.version_info[0] == 2:
def convert(data):
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(convert, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert, data))
else:
return data
cache_data = convert(cache_data)
return cache_data | kayhayen/Nuitka | [
8411,
456,
8411,
240,
1366731633
] |
def msvc_setup_env_once(env):
try:
has_run = env["MSVC_SETUP_RUN"]
except KeyError:
has_run = False
if not has_run:
msvc_setup_env(env)
env["MSVC_SETUP_RUN"] = True | kayhayen/Nuitka | [
8411,
456,
8411,
240,
1366731633
] |
def msvc_setup_env(env):
debug('msvc_setup_env()')
version = get_default_version(env)
if version is None:
warn_msg = "No version of Visual Studio compiler found - C/C++ " \
"compilers most likely not set correctly"
# Nuitka: Useless warning for us.
# SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, warn_msg)
return None
debug('msvc_setup_env: using specified MSVC version %s' % repr(version))
# XXX: we set-up both MSVS version for backward
# compatibility with the msvs tool
env['MSVC_VERSION'] = version
env['MSVS_VERSION'] = version
env['MSVS'] = {}
use_script = env.get('MSVC_USE_SCRIPT', True)
if SCons.Util.is_String(use_script):
debug('msvc_setup_env() use_script 1 %s' % repr(use_script))
d = script_env(use_script)
elif use_script:
d = msvc_find_valid_batch_script(env,version)
debug('msvc_setup_env() use_script 2 %s' % d)
if not d:
return d
else:
debug('MSVC_USE_SCRIPT set to False')
warn_msg = "MSVC_USE_SCRIPT set to False, assuming environment " \
"set correctly."
# Nuitka: We use this on purpose.
# SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning, warn_msg)
return None
for k, v in d.items():
# Nuitka: Make the Windows SDK version visible in environment.
if k == "WindowsSDKVersion":
# Always just a single version if any.
if len(v) == 1:
env["WindowsSDKVersion"] = v[0].rstrip('\\')
elif len(v) == 0:
env["WindowsSDKVersion"] = None
else:
assert False, v
continue
debug('msvc_setup_env() env:%s -> %s'%(k,v))
env.PrependENVPath(k, v, delete_existing=True)
# final check to issue a warning if the compiler is not present
msvc_cl = find_program_path(env, 'cl')
if not msvc_cl:
SCons.Warnings.warn(SCons.Warnings.VisualCMissingWarning,
"Could not find MSVC compiler 'cl', it may need to be installed separately with Visual Studio") | kayhayen/Nuitka | [
8411,
456,
8411,
240,
1366731633
] |
def extractDellstoriesWordpressCom(item):
'''
Parser for 'dellstories.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | fake-name/ReadableWebProxy | [
191,
16,
191,
3,
1437712243
] |
def respond_to_message(self,message,response_msg,poll):
if response_msg == poll.default_response:
try:
batch=MessageBatch.objects.get(name=str(poll.pk))
batch.status="Q"
batch.save()
msg=Message.objects.create(text=response_msg,status="Q",connection=message.connection,direction="O",in_response_to=message.db_message)
batch.messages.add(msg)
except MessageBatch.DoesNotExist:
message.respond(response_msg)
else:
message.respond(response_msg) | unicefuganda/edtrac | [
7,
3,
7,
3,
1324013652
] |
def latest_platform_fallback_path(cls):
return cls.FALLBACK_PATHS[cls.SUPPORTED_VERSIONS[-1]] | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def determine_full_port_name(cls, host, options, port_name):
"""Return a fully-specified port name that can be used to construct objects."""
# Subclasses will usually override this.
assert port_name.startswith(cls.port_name)
return port_name | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def __str__(self):
return 'Port{name=%s, version=%s, architecture=%s, test_configuration=%s}' % (
self._name, self._version, self._architecture,
self._test_configuration) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def flag_specific_config_name(self):
"""Returns the name of the flag-specific configuration which best matches
self._specified_additional_driver_flags(), or the first specified flag
with leading '-'s stripped if no match in the configuration is found.
"""
specified_flags = self._specified_additional_driver_flags()
if not specified_flags:
return None
best_match = None
configs = self._flag_specific_configs()
for name in configs:
# To match the specified flags must start with all config args.
args = configs[name]
if specified_flags[:len(args)] != args:
continue
# The first config matching the highest number of specified flags wins.
if not best_match or len(configs[best_match]) < len(args):
best_match = name
if best_match:
return best_match
# If no match, fallback to the old mode: using the name of the first specified flag.
return specified_flags[0].lstrip('-') | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _flag_specific_configs(self):
"""Reads configuration from FlagSpecificConfig and returns a dictionary from name to args."""
config_file = self._filesystem.join(self.web_tests_dir(),
'FlagSpecificConfig')
if not self._filesystem.exists(config_file):
return {}
try:
json_configs = json.loads(
self._filesystem.read_text_file(config_file))
except ValueError as error:
raise ValueError('{} is not a valid JSON file: {}'.format(
config_file, error))
configs = {}
for config in json_configs:
name = config['name']
args = config['args']
if not VALID_FILE_NAME_REGEX.match(name):
raise ValueError(
'{}: name "{}" contains invalid characters'.format(
config_file, name))
if name in configs:
raise ValueError('{} contains duplicated name {}.'.format(
config_file, name))
if args in configs.values():
raise ValueError(
'{}: name "{}" has the same args as another entry.'.format(
config_file, name))
configs[name] = args
return configs | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def additional_driver_flags(self):
flags = self._specified_additional_driver_flags()
if self.driver_name() == self.CONTENT_SHELL_NAME:
flags += [
'--run-web-tests',
'--ignore-certificate-errors-spki-list=' + WPT_FINGERPRINT +
',' + SXG_FINGERPRINT + ',' + SXG_WPT_FINGERPRINT,
# Required for WebTransport tests.
'--origin-to-force-quic-on=web-platform.test:11000',
'--user-data-dir'
]
if self.get_option('nocheck_sys_deps', False):
flags.append('--disable-system-font-check')
# If we're already repeating the tests more than once, then we're not
# particularly concerned with speed. Resetting the shell between tests
# increases test run time by 2-5X, but provides more consistent results
# [less state leaks between tests].
if (self.get_option('reset_shell_between_tests')
or (self.get_option('repeat_each')
and self.get_option('repeat_each') > 1)
or (self.get_option('iterations')
and self.get_option('iterations') > 1)):
flags += ['--reset-shell-between-tests']
return flags | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def default_smoke_test_only(self):
return False | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def timeout_ms(self):
timeout_ms = self._default_timeout_ms()
if self.get_option('configuration') == 'Debug':
# Debug is about 5x slower than Release.
return 5 * timeout_ms
if self._build_has_dcheck_always_on():
# Release with DCHECK is also slower than pure Release.
return 2 * timeout_ms
return timeout_ms | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _build_has_dcheck_always_on(self):
args_gn_file = self._build_path('args.gn')
if not self._filesystem.exists(args_gn_file):
_log.error('Unable to find %s', args_gn_file)
return False
contents = self._filesystem.read_text_file(args_gn_file)
return bool(
re.search(r'^\s*dcheck_always_on\s*=\s*true\s*(#.*)?$', contents,
re.MULTILINE)) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def default_batch_size(self):
"""Returns the default batch size to use for this port."""
if self.get_option('enable_sanitizer'):
# ASAN/MSAN/TSAN use more memory than regular content_shell. Their
# memory usage may also grow over time, up to a certain point.
# Relaunching the driver periodically helps keep it under control.
return 40
# The default is infinite batch size.
return 0 | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def default_max_locked_shards(self):
"""Returns the number of "locked" shards to run in parallel (like the http tests)."""
max_locked_shards = int(self.default_child_processes()) // 4
if not max_locked_shards:
return 1
return max_locked_shards | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def baseline_flag_specific_dir(self):
"""If --additional-driver-flag is specified, returns the absolute path to the flag-specific
platform-independent results. Otherwise returns None."""
flag_specific_path = self._flag_specific_baseline_search_path()
return flag_specific_path[-1] if flag_specific_path else None | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def default_baseline_search_path(self):
"""Returns a list of absolute paths to directories to search under for baselines.
The directories are searched in order.
"""
return map(self._absolute_baseline_path,
self.FALLBACK_PATHS[self.version()]) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _compare_baseline(self):
factory = PortFactory(self.host)
target_port = self.get_option('compare_port')
if target_port:
return factory.get(target_port).default_baseline_search_path()
return [] | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def check_build(self, needs_http, printer):
if not self._check_file_exists(self._path_to_driver(), 'test driver'):
return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
if not self._check_driver_build_up_to_date(
self.get_option('configuration')):
return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
if not self._check_file_exists(self._path_to_image_diff(),
'image_diff'):
return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
if self._dump_reader and not self._dump_reader.check_is_functional():
return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
if needs_http and not self.check_httpd():
return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
return exit_codes.OK_EXIT_STATUS | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def check_httpd(self):
httpd_path = self.path_to_apache()
if httpd_path:
try:
env = self.setup_environ_for_server()
if self._executive.run_command(
[httpd_path, '-v'], env=env, return_exit_code=True) != 0:
_log.error('httpd seems broken. Cannot run http tests.')
return False
return True
except OSError as e:
_log.error('httpd launch error: ' + repr(e))
_log.error('No httpd found. Cannot run http tests.')
return False | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def do_audio_results_differ(self, expected_audio, actual_audio):
return expected_audio != actual_audio | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def driver_name(self):
if self.get_option('driver_name'):
return self.get_option('driver_name')
return self.CONTENT_SHELL_NAME | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def output_filename(self, test_name, suffix, extension):
"""Generates the output filename for a test.
This method gives a proper filename for various outputs of a test,
including baselines and actual results. Usually, the output filename
follows the pattern: test_name_without_ext+suffix+extension, but when
the test name contains query strings, e.g. external/wpt/foo.html?wss,
test_name_without_ext is mangled to be external/wpt/foo_wss.
It is encouraged to use this method instead of writing another mangling.
Args:
test_name: The name of a test.
suffix: A suffix string to add before the extension
(e.g. "-expected").
extension: The extension of the output file (starting with .).
Returns:
A string, the output filename.
"""
# WPT names might contain query strings, e.g. external/wpt/foo.html?wss,
# in which case we mangle test_name_root (the part of a path before the
# last extension point) to external/wpt/foo_wss, and the output filename
# becomes external/wpt/foo_wss-expected.txt.
index = test_name.find('?')
if index != -1:
test_name_root, _ = self._filesystem.splitext(test_name[:index])
query_part = test_name[index:]
test_name_root += self._filesystem.sanitize_filename(query_part)
else:
test_name_root, _ = self._filesystem.splitext(test_name)
return test_name_root + suffix + extension | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def expected_filename(self,
test_name,
extension,
return_default=True,
fallback_base_for_virtual=True,
match=True):
"""Given a test name, returns an absolute path to its expected results.
If no expected results are found in any of the searched directories,
the directory in which the test itself is located will be returned.
The return value is in the format appropriate for the platform
(e.g., "\\" for path separators on windows).
This routine is generic but is implemented here to live alongside
the other baseline and filename manipulation routines.
Args:
test_name: Name of test file (usually a relative path under web_tests/)
extension: File extension of the expected results, including dot;
e.g. '.txt' or '.png'. This should not be None, but may be an
empty string.
return_default: If True, returns the path to the generic expectation
if nothing else is found; if False, returns None.
fallback_base_for_virtual: For virtual test only. When no virtual
specific baseline is found, if this parameter is True, fallback
to find baselines of the base test; if False, depending on
|return_default|, returns the generic virtual baseline or None.
match: Whether the baseline is a match or a mismatch.
Returns:
An absolute path to its expected results, or None if not found.
"""
# The [0] means the first expected baseline (which is the one to be
# used) in the fallback paths.
platform_dir, baseline_filename = self.expected_baselines(
test_name, extension, match=match)[0]
if platform_dir:
return self._filesystem.join(platform_dir, baseline_filename)
if fallback_base_for_virtual:
actual_test_name = self.lookup_virtual_test_base(test_name)
if actual_test_name:
return self.expected_filename(
actual_test_name, extension, return_default, match=match)
if return_default:
return self._filesystem.join(self.web_tests_dir(),
baseline_filename)
return None | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def expected_checksum(self, test_name):
"""Returns the checksum of the image we expect the test to produce,
or None if it is a text-only test.
"""
png_path = self.expected_filename(test_name, '.png')
if self._filesystem.exists(png_path):
with self._filesystem.open_binary_file_for_reading(
png_path) as filehandle:
return read_checksum_from_png.read_checksum(filehandle)
return None | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def expected_audio(self, test_name):
baseline_path = self.expected_filename(test_name, '.wav')
if not self._filesystem.exists(baseline_path):
return None
return self._filesystem.read_binary_file(baseline_path) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def expected_subtest_failure(self, test_name):
baseline = self.expected_text(test_name)
if baseline:
baseline = baseline.decode('utf8', 'replace')
if re.search(r"^(FAIL|NOTRUN|TIMEOUT)", baseline, re.MULTILINE):
return True
return False | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def reference_files(self, test_name):
"""Returns a list of expectation (== or !=) and filename pairs"""
# Try to find -expected.* or -expected-mismatch.* in the same directory.
reftest_list = []
for expectation in ('==', '!='):
for extension in Port.supported_file_extensions:
path = self.expected_filename(
test_name, extension, match=(expectation == '=='))
if self._filesystem.exists(path):
reftest_list.append((expectation, path))
if reftest_list:
return reftest_list
# Try to extract information from MANIFEST.json.
match = self.WPT_REGEX.match(test_name)
if not match:
return []
wpt_path = match.group(1)
path_in_wpt = match.group(2)
for expectation, ref_path_in_wpt in self.wpt_manifest(
wpt_path).extract_reference_list(path_in_wpt):
ref_absolute_path = self._filesystem.join(
self.web_tests_dir(), wpt_path + ref_path_in_wpt)
reftest_list.append((expectation, ref_absolute_path))
return reftest_list | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def real_tests_from_dict(self, paths, tests_by_dir):
"""Find all real tests in paths, using results saved in dict."""
files = []
for path in paths:
if self._has_supported_extension_for_all(path):
files.append(path)
continue
path = path + '/' if path[-1] != '/' else path
for key, value in tests_by_dir.items():
if key.startswith(path):
files.extend(value)
return files | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def is_reference_html_file(filesystem, dirname, filename):
# TODO(robertma): We probably do not need prefixes/suffixes other than
# -expected{-mismatch} any more. Or worse, there might be actual tests
# with these prefixes/suffixes.
if filename.startswith('ref-') or filename.startswith('notref-'):
return True
filename_without_ext, _ = filesystem.splitext(filename)
for suffix in ['-expected', '-expected-mismatch', '-ref', '-notref']:
if filename_without_ext.endswith(suffix):
return True
return False | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _has_supported_extension_for_all(self, filename):
extension = self._filesystem.splitext(filename)[1]
if 'inspector-protocol' in filename and extension == '.js':
return True
if 'devtools' in filename and extension == '.js':
return True
return extension in self.supported_file_extensions | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def is_non_wpt_test_file(self, dirname, filename):
# Convert dirname to a relative path to web_tests with slashes
# normalized and ensure it has a trailing slash.
normalized_test_dir = self.relative_test_filename(
dirname) + self.TEST_PATH_SEPARATOR
if any(
normalized_test_dir.startswith(d + self.TEST_PATH_SEPARATOR)
for d in self.WPT_DIRS):
return False
extension = self._filesystem.splitext(filename)[1]
if 'inspector-protocol' in dirname and extension == '.js':
return True
if 'devtools' in dirname and extension == '.js':
return True
return (self._has_supported_extension(filename)
and not Port.is_reference_html_file(self._filesystem, dirname,
filename)) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def wpt_manifest(self, path):
assert path in self.WPT_DIRS
# Convert '/' to the platform-specific separator.
path = self._filesystem.normpath(path)
manifest_path = self._filesystem.join(self.web_tests_dir(), path,
MANIFEST_NAME)
if not self._filesystem.exists(manifest_path) or self.get_option(
'manifest_update', False):
_log.debug('Generating MANIFEST.json for %s...', path)
WPTManifest.ensure_manifest(self, path)
return WPTManifest(self.host, manifest_path) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def is_slow_wpt_test(self, test_name):
# When DCHECK is enabled, idlharness tests run 5-6x slower due to the
# amount of JavaScript they use (most web_tests run very little JS).
# This causes flaky timeouts for a lot of them, as a 0.5-1s test becomes
# close to the default 6s timeout.
if (self.is_wpt_idlharness_test(test_name)
and self._build_has_dcheck_always_on()):
return True
match = self.WPT_REGEX.match(test_name)
if not match:
return False
wpt_path = match.group(1)
path_in_wpt = match.group(2)
return self.wpt_manifest(wpt_path).is_slow_test(path_in_wpt) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def get_file_path_for_wpt_test(self, test_name):
"""Returns the real file path for the given WPT test.
Or None if the test is not a WPT.
"""
match = self.WPT_REGEX.match(test_name)
if not match:
return None
wpt_path = match.group(1)
path_in_wpt = match.group(2)
file_path_in_wpt = self.wpt_manifest(wpt_path).file_path_for_test_url(
path_in_wpt)
if not file_path_in_wpt:
return None
return self._filesystem.join(wpt_path, file_path_in_wpt) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _natural_sort_key(self, string_to_split):
"""Turns a string into a list of string and number chunks.
For example: "z23a" -> ["z", 23, "a"]
This can be used to implement "natural sort" order. See:
http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html
http://nedbatchelder.com/blog/200712.html#e20071211T054956
"""
def tryint(val):
try:
return int(val)
except ValueError:
return val
return [tryint(chunk) for chunk in re.split(r'(\d+)', string_to_split)] | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def test_isfile(self, test_name):
"""Returns True if the test name refers to an existing test file."""
# Used by test_expectations.py to apply rules to a file.
if self._filesystem.isfile(self.abspath_for_test(test_name)):
return True
base = self.lookup_virtual_test_base(test_name)
return base and self._filesystem.isfile(self.abspath_for_test(base)) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def test_isdir(self, test_name):
"""Returns True if the test name refers to an existing directory of tests."""
# Used by test_expectations.py to apply rules to whole directories.
if self._filesystem.isdir(self.abspath_for_test(test_name)):
return True
base = self.lookup_virtual_test_base(test_name)
return base and self._filesystem.isdir(self.abspath_for_test(base)) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def test_exists(self, test_name):
"""Returns True if the test name refers to an existing test directory or file."""
# Used by lint_test_expectations.py to determine if an entry refers to a
# valid test.
if self.is_wpt_test(test_name):
# A virtual WPT test must have valid virtual prefix and base.
if test_name.startswith('virtual/'):
return bool(self.lookup_virtual_test_base(test_name))
# Otherwise treat any WPT test as existing regardless of their real
# existence on the file system.
# TODO(crbug.com/959958): Actually check existence of WPT tests.
return True
return self.test_isfile(test_name) or self.test_isdir(test_name) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def normalize_test_name(self, test_name):
"""Returns a normalized version of the test name or test directory."""
if test_name.endswith('/'):
return test_name
if self.test_isdir(test_name):
return test_name + '/'
return test_name | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def update_baseline(self, baseline_path, data):
"""Updates the baseline for a test.
Args:
baseline_path: the actual path to use for baseline, not the path to
the test. This function is used to update either generic or
platform-specific baselines, but we can't infer which here.
data: contents of the baseline.
"""
self._filesystem.write_binary_file(baseline_path, data) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _perf_tests_dir(self):
return self._path_finder.perf_tests_dir() | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def skips_test(self, test):
"""Checks whether the given test is skipped for this port.
Returns True if the test is skipped because the port runs smoke tests
only or because the test is marked as Skip in NeverFixTest (otherwise
the test is only marked as Skip indicating a temporary skip).
"""
return self.skipped_due_to_smoke_tests(
test) or self.skipped_in_never_fix_tests(test) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def _tests_from_file(self, filename):
tests = set()
file_contents = self._filesystem.read_text_file(filename)
for line in file_contents.splitlines():
line = line.strip()
if line.startswith('#') or not line:
continue
tests.add(line)
return tests | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def path_to_smoke_tests_file(self):
return self._filesystem.join(self.web_tests_dir(), 'SmokeTests') | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def path_to_never_fix_tests_file(self):
return self._filesystem.join(self.web_tests_dir(), 'NeverFixTests') | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def operating_system(self):
raise NotImplementedError | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def architecture(self):
return self._architecture | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def get_option(self, name, default_value=None):
return getattr(self._options, name, default_value) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def relative_test_filename(self, filename):
"""Returns a Unix-style path for a filename relative to web_tests.
Ports may legitimately return absolute paths here if no relative path
makes sense.
"""
# Ports that run on windows need to override this method to deal with
# filenames with backslashes in them.
if filename.startswith(self.web_tests_dir()):
return self.host.filesystem.relpath(filename, self.web_tests_dir())
else:
return self.host.filesystem.abspath(filename) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def abspath_for_test(self, test_name):
"""Returns the full path to the file for a given test name.
This is the inverse of relative_test_filename().
"""
return self._filesystem.join(self.web_tests_dir(), test_name) | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def args_for_test(self, test_name):
args = self._lookup_virtual_test_args(test_name)
tracing_categories = self.get_option('enable_tracing')
if tracing_categories:
args.append('--trace-startup=' + tracing_categories)
# Do not finish the trace until the test is finished.
args.append('--trace-startup-duration=0')
# Append the current time to the output file name to ensure that
# the subsequent repetitions of the test do not overwrite older
# trace files.
current_time = time.strftime("%Y-%m-%d-%H-%M-%S")
file_name = 'trace_layout_test_{}_{}.json'.format(
self._filesystem.sanitize_filename(test_name), current_time)
args.append('--trace-startup-file=' + file_name)
return args | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def name_for_test(self, test_name):
test_base = self.lookup_virtual_test_base(test_name)
if test_base and not self._filesystem.exists(
self.abspath_for_test(test_name)):
return test_base
return test_name | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def results_directory(self):
"""Returns the absolute path directory which will store all web tests outputted
files. It may include a sub directory for artifacts and it may store performance test results."""
if not self._results_directory:
option_val = self.get_option(
'results_directory') or self.default_results_directory()
assert not self._filesystem.basename(option_val) == 'layout-test-results', (
'crbug.com/1026494, crbug.com/1027708: The layout-test-results sub directory should '
'not be passed as part of the --results-directory command line argument.')
self._results_directory = self._filesystem.abspath(option_val)
return self._results_directory | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def perf_results_directory(self):
return self.results_directory() | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def generated_sources_directory(self):
return self._build_path('gen') | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def default_results_directory(self):
"""Returns the absolute path to the build directory."""
return self._build_path() | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
def num_workers(self, requested_num_workers):
"""Returns the number of available workers (possibly less than the number requested)."""
return requested_num_workers | ric2b/Vivaldi-browser | [
131,
27,
131,
3,
1490828945
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.