hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72d4d256b09e12e5ab72b4beb08439fff8b274f | 3,811 | py | Python | networkx/algorithms/simple_paths.py | tempcyc/networkx | cae83ba501c242567cb2454f97f851898276f06e | [
"BSD-3-Clause"
] | 6 | 2017-08-18T07:30:40.000Z | 2021-07-08T02:57:36.000Z | networkx/algorithms/simple_paths.py | tempcyc/networkx | cae83ba501c242567cb2454f97f851898276f06e | [
"BSD-3-Clause"
] | 2 | 2016-10-06T13:07:05.000Z | 2017-12-20T09:47:08.000Z | networkx/algorithms/simple_paths.py | tempcyc/networkx | cae83ba501c242567cb2454f97f851898276f06e | [
"BSD-3-Clause"
] | 4 | 2016-04-25T22:15:40.000Z | 2017-12-18T14:40:58.000Z | # -*- coding: utf-8 -*-
# Copyright (C) 2012 by
# Sergio Nery Simoes <sergionery@gmail.com>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = """\n""".join(['Sérgio Nery Simões <sergionery@gmail.com>',
'Aric Hagberg <aric.hagberg@gmail.com>'])
__all__ = ['all_simple_paths']
def all_simple_paths(G, source, target, cutoff=None):
"""Generate all simple paths in the graph G from source to target.
A simple path is a path with no repeated nodes.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
target : node
Ending node for path
cutoff : integer, optional
Depth to stop the search. Only paths of length <= cutoff are returned.
Returns
-------
path_generator: generator
A generator that produces lists of simple paths. If there are no paths
between the source and target within the given cutoff the generator
produces no output.
Examples
--------
>>> G = nx.complete_graph(4)
>>> for path in nx.all_simple_paths(G, source=0, target=3):
... print(path)
...
[0, 1, 2, 3]
[0, 1, 3]
[0, 2, 1, 3]
[0, 2, 3]
[0, 3]
>>> paths = nx.all_simple_paths(G, source=0, target=3, cutoff=2)
>>> print(list(paths))
[[0, 1, 3], [0, 2, 3], [0, 3]]
Notes
-----
This algorithm uses a modified depth-first search to generate the
paths [1]_. A single path can be found in `O(V+E)` time but the
number of simple paths in a graph can be very large, e.g. `O(n!)` in
the complete graph of order n.
References
----------
.. [1] R. Sedgewick, "Algorithms in C, Part 5: Graph Algorithms",
Addison Wesley Professional, 3rd ed., 2001.
See Also
--------
all_shortest_paths, shortest_path
"""
if source not in G:
raise nx.NetworkXError('source node %s not in graph'%source)
if target not in G:
raise nx.NetworkXError('target node %s not in graph'%target)
if cutoff is None:
cutoff = len(G)-1
if G.is_multigraph():
return _all_simple_paths_multigraph(G, source, target, cutoff=cutoff)
else:
return _all_simple_paths_graph(G, source, target, cutoff=cutoff)
def _all_simple_paths_graph(G, source, target, cutoff=None):
if cutoff < 1:
return
visited = [source]
stack = [iter(G[source])]
while stack:
children = stack[-1]
child = next(children, None)
if child is None:
stack.pop()
visited.pop()
elif len(visited) < cutoff:
if child == target:
yield visited + [target]
elif child not in visited:
visited.append(child)
stack.append(iter(G[child]))
else: #len(visited) == cutoff:
if child == target or target in children:
yield visited + [target]
stack.pop()
visited.pop()
def _all_simple_paths_multigraph(G, source, target, cutoff=None):
if cutoff < 1:
return
visited = [source]
stack = [(v for u,v in G.edges(source))]
while stack:
children = stack[-1]
child = next(children, None)
if child is None:
stack.pop()
visited.pop()
elif len(visited) < cutoff:
if child == target:
yield visited + [target]
elif child not in visited:
visited.append(child)
stack.append((v for u,v in G.edges(child)))
else: #len(visited) == cutoff:
count = ([child]+list(children)).count(target)
for i in range(count):
yield visited + [target]
stack.pop()
visited.pop()
| 30.488 | 78 | 0.568617 |
import networkx as nx
__author__ = """\n""".join(['Sérgio Nery Simões <sergionery@gmail.com>',
'Aric Hagberg <aric.hagberg@gmail.com>'])
__all__ = ['all_simple_paths']
def all_simple_paths(G, source, target, cutoff=None):
if source not in G:
raise nx.NetworkXError('source node %s not in graph'%source)
if target not in G:
raise nx.NetworkXError('target node %s not in graph'%target)
if cutoff is None:
cutoff = len(G)-1
if G.is_multigraph():
return _all_simple_paths_multigraph(G, source, target, cutoff=cutoff)
else:
return _all_simple_paths_graph(G, source, target, cutoff=cutoff)
def _all_simple_paths_graph(G, source, target, cutoff=None):
if cutoff < 1:
return
visited = [source]
stack = [iter(G[source])]
while stack:
children = stack[-1]
child = next(children, None)
if child is None:
stack.pop()
visited.pop()
elif len(visited) < cutoff:
if child == target:
yield visited + [target]
elif child not in visited:
visited.append(child)
stack.append(iter(G[child]))
else:
if child == target or target in children:
yield visited + [target]
stack.pop()
visited.pop()
def _all_simple_paths_multigraph(G, source, target, cutoff=None):
if cutoff < 1:
return
visited = [source]
stack = [(v for u,v in G.edges(source))]
while stack:
children = stack[-1]
child = next(children, None)
if child is None:
stack.pop()
visited.pop()
elif len(visited) < cutoff:
if child == target:
yield visited + [target]
elif child not in visited:
visited.append(child)
stack.append((v for u,v in G.edges(child)))
else:
count = ([child]+list(children)).count(target)
for i in range(count):
yield visited + [target]
stack.pop()
visited.pop()
| true | true |
f72d4d2fdf905ec644db0080a322da124ae18054 | 3,700 | py | Python | mlp/src/mlp_numpy.py | akashrajkn/sarcastic-gradients | 5a995ab7822dfd49cdc88855c631dcc8f1b0532f | [
"Apache-2.0"
] | null | null | null | mlp/src/mlp_numpy.py | akashrajkn/sarcastic-gradients | 5a995ab7822dfd49cdc88855c631dcc8f1b0532f | [
"Apache-2.0"
] | null | null | null | mlp/src/mlp_numpy.py | akashrajkn/sarcastic-gradients | 5a995ab7822dfd49cdc88855c631dcc8f1b0532f | [
"Apache-2.0"
] | null | null | null | """
This module implements a multi-layer perceptron (MLP) in NumPy.
You should fill in code into indicated sections.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from modules import *
class MLP(object):
"""
This class implements a Multi-layer Perceptron in NumPy.
It handles the different layers and parameters of the model.
Once initialized an MLP object can perform forward and backward.
"""
def __init__(self, n_inputs, n_hidden, n_classes):
"""
Initializes MLP object.
Args:
n_inputs: number of inputs.
n_hidden: list of ints, specifies the number of units
in each linear layer. If the list is empty, the MLP
will not have any linear layers, and the model
will simply perform a multinomial logistic regression.
n_classes: number of classes of the classification problem.
This number is required in order to specify the
output dimensions of the MLP
TODO:
Implement initialization of the network.
"""
if len(n_hidden) == 0:
self.input_layer = LinearModule(n_inputs, n_classes)
else:
self.input_layer = LinearModule(n_inputs, n_hidden[0])
modules = []
for i in range(1, len(n_hidden)):
modules.append(LinearModule(n_hidden[i - 1], n_hidden[i]))
self.hidden_layers = modules
self.output_layer = LinearModule(n_hidden[-1], n_classes)
self.n_hidden = n_hidden
self.n_inputs = n_inputs
self.n_classes = n_classes
self.relu = ReLUModule()
self.softmax = SoftMaxModule()
def forward(self, x):
"""
Performs forward pass of the input. Here an input tensor x is transformed through
several layer transformations.
Args:
x: input to the network
Returns:
out: outputs of the network
TODO:
Implement forward pass of the network.
"""
if len(self.n_hidden) == 0:
pass
out = self.input_layer.forward(x)
out = self.relu.forward(out)
for layer in self.hidden_layers:
out = layer.forward(out)
out = self.relu.forward(out)
out = self.output_layer.forward(out)
out = self.softmax.forward(out)
return out
def backward(self, dout):
"""
Performs backward pass given the gradients of the loss.
Args:
dout: gradients of the loss
TODO:
Implement backward pass of the network.
"""
dout = self.softmax.backward(dout)
dout = self.output_layer.backward(dout)
for layer in reversed(self.hidden_layers[:-1]):
dout = self.relu.backward(dout)
dout = layer.backward(dout)
dout = self.relu.backward(dout)
dout = self.input_layer.backward(dout)
return
def step(self, learning_rate):
self.input_layer.params['weight'] -= learning_rate * self.input_layer.grads['weight']
self.input_layer.params['bias'] -= learning_rate * self.input_layer.grads['bias']
for layer in self.hidden_layers:
layer.params['weight'] -= learning_rate * layer.grads['weight']
layer.params['bias'] -= learning_rate * layer.grads['bias']
self.output_layer.params['weight'] -= learning_rate * self.output_layer.grads['weight']
self.output_layer.params['bias'] -= learning_rate * self.output_layer.grads['bias']
return
| 31.355932 | 95 | 0.609459 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from modules import *
class MLP(object):
def __init__(self, n_inputs, n_hidden, n_classes):
if len(n_hidden) == 0:
self.input_layer = LinearModule(n_inputs, n_classes)
else:
self.input_layer = LinearModule(n_inputs, n_hidden[0])
modules = []
for i in range(1, len(n_hidden)):
modules.append(LinearModule(n_hidden[i - 1], n_hidden[i]))
self.hidden_layers = modules
self.output_layer = LinearModule(n_hidden[-1], n_classes)
self.n_hidden = n_hidden
self.n_inputs = n_inputs
self.n_classes = n_classes
self.relu = ReLUModule()
self.softmax = SoftMaxModule()
def forward(self, x):
if len(self.n_hidden) == 0:
pass
out = self.input_layer.forward(x)
out = self.relu.forward(out)
for layer in self.hidden_layers:
out = layer.forward(out)
out = self.relu.forward(out)
out = self.output_layer.forward(out)
out = self.softmax.forward(out)
return out
def backward(self, dout):
dout = self.softmax.backward(dout)
dout = self.output_layer.backward(dout)
for layer in reversed(self.hidden_layers[:-1]):
dout = self.relu.backward(dout)
dout = layer.backward(dout)
dout = self.relu.backward(dout)
dout = self.input_layer.backward(dout)
return
def step(self, learning_rate):
self.input_layer.params['weight'] -= learning_rate * self.input_layer.grads['weight']
self.input_layer.params['bias'] -= learning_rate * self.input_layer.grads['bias']
for layer in self.hidden_layers:
layer.params['weight'] -= learning_rate * layer.grads['weight']
layer.params['bias'] -= learning_rate * layer.grads['bias']
self.output_layer.params['weight'] -= learning_rate * self.output_layer.grads['weight']
self.output_layer.params['bias'] -= learning_rate * self.output_layer.grads['bias']
return
| true | true |
f72d4ea900979ff121f13afed4e4fdf730a65273 | 5,203 | py | Python | disf_gen_coarse2fine/table/Loss.py | GT-SALT/Disfluency-Generation-and-Detection | 72126172b466aa74277f3cf0f73b915e5dbeefbb | [
"MIT"
] | 11 | 2020-10-19T21:52:58.000Z | 2022-02-23T02:28:57.000Z | disf_gen_coarse2fine/table/Loss.py | JingfengYang/Disfluency-Generation-and-Detection-1 | 72126172b466aa74277f3cf0f73b915e5dbeefbb | [
"MIT"
] | 4 | 2021-07-09T11:59:03.000Z | 2022-02-10T00:58:25.000Z | disf_gen_coarse2fine/table/Loss.py | JingfengYang/Disfluency-Generation-and-Detection-1 | 72126172b466aa74277f3cf0f73b915e5dbeefbb | [
"MIT"
] | 5 | 2021-06-18T08:05:51.000Z | 2022-02-18T04:09:17.000Z | """
This file handles the details of the loss function during training.
This includes: LossComputeBase and the standard NMTLossCompute, and
sharded loss compute stuff.
"""
from __future__ import division
from itertools import count
import torch
import torch.nn as nn
import random as rnd
import table
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class CopyGeneratorLoss(nn.Module):
"""Copy generator criterion."""
def __init__(self, vocab_size, force_copy, only_disf_loss, unk_index=0,
ignore_index=-100, eps=1e-20):
super(CopyGeneratorLoss, self).__init__()
self.force_copy = force_copy
self.eps = eps
self.vocab_size = vocab_size
self.ignore_index = ignore_index
self.unk_index = unk_index
self.only_disf_loss=only_disf_loss
def forward(self, scores, tgt):
"""
Args:
scores (FloatTensor): ``(batch_size*tgt_len)`` x dynamic vocab size
whose sum along dim 1 is less than or equal to 1, i.e. cols
softmaxed.
tgt tuple (target, align)
align (LongTensor): ``(tgt_len, batch_size)``
target (LongTensor): ``(tgt_len, batch_size)``
tgt_loss_mask (LongTensor): ``(tgt_len, batch_size)``
"""
# probabilities assigned by the model to the gold targets
align=tgt[1]
target=tgt[0]
tgt_loss_mask=tgt[2]
#print(scores, target)
#print(scores.size(), target.size())
target = target.view(-1)
align = align.view(-1)
tgt_loss_mask = tgt_loss_mask.view(-1)
vocab_probs = scores.gather(1, target.unsqueeze(1)).squeeze(1)
# probability of tokens copied from source
copy_ix = align.unsqueeze(1) + self.vocab_size
copy_tok_probs = scores.gather(1, copy_ix).squeeze(1) # Set scores for unk to 0 and add eps
copy_tok_probs[align == self.unk_index] = 0
copy_tok_probs += self.eps # to avoid -inf logs
# find the indices in which you do not use the copy mechanism
non_copy = align == self.unk_index
if not self.force_copy:
non_copy = non_copy | (target != self.unk_index)
probs = torch.where(
non_copy, copy_tok_probs + vocab_probs, copy_tok_probs
)
loss = - probs.log() # just NLLLoss; can the module be incorporated?
# Drop padding.
if self.only_disf_loss:
loss[tgt_loss_mask == 1] = 0
else:
loss[tgt == self.ignore_index] = 0
'''if self.normalize_by_length:
# Compute Loss as NLL divided by seq length
tgt_lens = batch.tgt[:, :, 0].ne(self.padding_idx).sum(0).float()
# Compute Total Loss per sequence in batch
loss = loss.view(-1, batch.batch_size).sum(0)
# Divide by length of each sequence and sum
loss = torch.div(loss, tgt_lens).sum()
else:'''
loss = loss.sum()
return loss
class LossCompute(nn.Module):
def __init__(self, vocab, opt, fields,unk_index=0,
ignore_index=-100,smooth_eps=0):
super(LossCompute, self).__init__()
self.criterion = {}
self.label_weights=torch.ones(len(fields['src_label'].vocab),dtype=torch.float,requires_grad=False,device=device)
self.label_weights[fields['src_label'].vocab.stoi[table.IO.BOD_LABEL]]=opt.disf_label_weight
self.label_weights[fields['src_label'].vocab.stoi[table.IO.UNK_WORD]] = 0
self.label_weights[fields['src_label'].vocab.stoi[table.IO.PAD_WORD]] = 0
self.criterion['lay'] = nn.NLLLoss( weight=self.label_weights,
reduction='sum', ignore_index=ignore_index)
if opt.no_attention:
self.criterion['tgt'] = nn.NLLLoss(
reduction='sum', ignore_index=ignore_index)
else:
if opt.no_copy:
self.criterion['tgt'] = nn.NLLLoss(
reduction='sum', ignore_index=ignore_index)
else:
self.criterion['tgt'] = CopyGeneratorLoss(len(vocab),
opt.copy_attn_force, opt.only_disf_loss, unk_index=unk_index,
ignore_index=ignore_index)
def compute_loss(self, pred, gold):
loss_list = []
for loss_name in ('lay', 'tgt'):
if loss_name not in gold:
continue
'''print(loss_name)
print(pred[loss_name].size())
print(gold[loss_name].size())'''
loss = self.criterion[loss_name](pred[loss_name], gold[loss_name])
loss_list.append(loss)
# sum up the loss functions
return loss_list, self.label_weights[gold['lay']].sum()#sum(loss_list)
class SegLossCompute(nn.Module):
def __init__(self, vocab, opt, fields,unk_index=0,
ignore_index=-100,smooth_eps=0):
super(SegLossCompute, self).__init__()
self.criterion= nn.NLLLoss(
reduction='sum', ignore_index=ignore_index)
def compute_loss(self, pred, gold):
loss = self.criterion(pred, gold)
return loss
| 38.540741 | 121 | 0.610994 | from __future__ import division
from itertools import count
import torch
import torch.nn as nn
import random as rnd
import table
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class CopyGeneratorLoss(nn.Module):
def __init__(self, vocab_size, force_copy, only_disf_loss, unk_index=0,
ignore_index=-100, eps=1e-20):
super(CopyGeneratorLoss, self).__init__()
self.force_copy = force_copy
self.eps = eps
self.vocab_size = vocab_size
self.ignore_index = ignore_index
self.unk_index = unk_index
self.only_disf_loss=only_disf_loss
def forward(self, scores, tgt):
align=tgt[1]
target=tgt[0]
tgt_loss_mask=tgt[2]
target = target.view(-1)
align = align.view(-1)
tgt_loss_mask = tgt_loss_mask.view(-1)
vocab_probs = scores.gather(1, target.unsqueeze(1)).squeeze(1)
copy_ix = align.unsqueeze(1) + self.vocab_size
copy_tok_probs = scores.gather(1, copy_ix).squeeze(1)
copy_tok_probs[align == self.unk_index] = 0
copy_tok_probs += self.eps
non_copy = align == self.unk_index
if not self.force_copy:
non_copy = non_copy | (target != self.unk_index)
probs = torch.where(
non_copy, copy_tok_probs + vocab_probs, copy_tok_probs
)
loss = - probs.log()
if self.only_disf_loss:
loss[tgt_loss_mask == 1] = 0
else:
loss[tgt == self.ignore_index] = 0
loss = loss.sum()
return loss
class LossCompute(nn.Module):
def __init__(self, vocab, opt, fields,unk_index=0,
ignore_index=-100,smooth_eps=0):
super(LossCompute, self).__init__()
self.criterion = {}
self.label_weights=torch.ones(len(fields['src_label'].vocab),dtype=torch.float,requires_grad=False,device=device)
self.label_weights[fields['src_label'].vocab.stoi[table.IO.BOD_LABEL]]=opt.disf_label_weight
self.label_weights[fields['src_label'].vocab.stoi[table.IO.UNK_WORD]] = 0
self.label_weights[fields['src_label'].vocab.stoi[table.IO.PAD_WORD]] = 0
self.criterion['lay'] = nn.NLLLoss( weight=self.label_weights,
reduction='sum', ignore_index=ignore_index)
if opt.no_attention:
self.criterion['tgt'] = nn.NLLLoss(
reduction='sum', ignore_index=ignore_index)
else:
if opt.no_copy:
self.criterion['tgt'] = nn.NLLLoss(
reduction='sum', ignore_index=ignore_index)
else:
self.criterion['tgt'] = CopyGeneratorLoss(len(vocab),
opt.copy_attn_force, opt.only_disf_loss, unk_index=unk_index,
ignore_index=ignore_index)
def compute_loss(self, pred, gold):
loss_list = []
for loss_name in ('lay', 'tgt'):
if loss_name not in gold:
continue
loss = self.criterion[loss_name](pred[loss_name], gold[loss_name])
loss_list.append(loss)
return loss_list, self.label_weights[gold['lay']].sum()
class SegLossCompute(nn.Module):
def __init__(self, vocab, opt, fields,unk_index=0,
ignore_index=-100,smooth_eps=0):
super(SegLossCompute, self).__init__()
self.criterion= nn.NLLLoss(
reduction='sum', ignore_index=ignore_index)
def compute_loss(self, pred, gold):
loss = self.criterion(pred, gold)
return loss
| true | true |
f72d4f5f379c640e9958f7aad5d2a4526ab22302 | 1,325 | py | Python | apps/rss_feeds/management/commands/mark_read.py | Paul3MK/NewsBlur | f912d100c2867e5366fca92abadc50d4253a41d8 | [
"MIT"
] | 3,073 | 2015-01-01T07:20:18.000Z | 2022-03-31T20:33:41.000Z | apps/rss_feeds/management/commands/mark_read.py | Paul3MK/NewsBlur | f912d100c2867e5366fca92abadc50d4253a41d8 | [
"MIT"
] | 1,054 | 2015-01-02T13:32:35.000Z | 2022-03-30T04:21:21.000Z | apps/rss_feeds/management/commands/mark_read.py | Paul3MK/NewsBlur | f912d100c2867e5366fca92abadc50d4253a41d8 | [
"MIT"
] | 676 | 2015-01-03T16:40:29.000Z | 2022-03-30T14:00:40.000Z | from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from apps.reader.models import UserSubscription
import datetime
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("-d", "--days", dest="days", nargs=1, default=1, help="Days of unread")
parser.add_argument("-u", "--username", dest="username", nargs=1, help="Specify user id or username")
parser.add_argument("-U", "--userid", dest="userid", nargs=1, help="Specify user id or username")
def handle(self, *args, **options):
if options['userid']:
user = User.objects.filter(pk=options['userid'])[0]
elif options['username']:
user = User.objects.get(username__icontains=options['username'])
else:
raise Exception("Need username or user id.")
user.profile.last_seen_on = datetime.datetime.utcnow()
user.profile.save()
feeds = UserSubscription.objects.filter(user=user)
for sub in feeds:
if options['days'] == 0:
sub.mark_feed_read()
else:
sub.mark_read_date = datetime.datetime.utcnow() - datetime.timedelta(days=int(options['days']))
sub.needs_unread_recalc = True
sub.save() | 44.166667 | 111 | 0.626415 | from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from apps.reader.models import UserSubscription
import datetime
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("-d", "--days", dest="days", nargs=1, default=1, help="Days of unread")
parser.add_argument("-u", "--username", dest="username", nargs=1, help="Specify user id or username")
parser.add_argument("-U", "--userid", dest="userid", nargs=1, help="Specify user id or username")
def handle(self, *args, **options):
if options['userid']:
user = User.objects.filter(pk=options['userid'])[0]
elif options['username']:
user = User.objects.get(username__icontains=options['username'])
else:
raise Exception("Need username or user id.")
user.profile.last_seen_on = datetime.datetime.utcnow()
user.profile.save()
feeds = UserSubscription.objects.filter(user=user)
for sub in feeds:
if options['days'] == 0:
sub.mark_feed_read()
else:
sub.mark_read_date = datetime.datetime.utcnow() - datetime.timedelta(days=int(options['days']))
sub.needs_unread_recalc = True
sub.save() | true | true |
f72d4f7e7d807286e5c6688975bbbba1e0d587a4 | 3,185 | py | Python | Lab/adventofcode2020/day12.py | hscspring/TheAlgorithms-Python | 5c2faea1d2d25a9a81a4786e053b0cc58ab46c6f | [
"MIT"
] | 10 | 2020-07-06T11:00:58.000Z | 2022-01-29T09:25:24.000Z | Lab/adventofcode2020/day12.py | hscspring/TheAlgorithms-Python | 5c2faea1d2d25a9a81a4786e053b0cc58ab46c6f | [
"MIT"
] | null | null | null | Lab/adventofcode2020/day12.py | hscspring/TheAlgorithms-Python | 5c2faea1d2d25a9a81a4786e053b0cc58ab46c6f | [
"MIT"
] | 3 | 2020-07-13T06:39:23.000Z | 2020-08-15T16:29:48.000Z | import pnlp
DIRECTS = ["E", "S", "W", "N"]
SIGNS = ["++", "+-", "--", "-+"]
def get_direct(directs: list, direct: str, num: int, rotate: str):
count = num // 90
idx = directs.index(direct)
if rotate == "R":
new_idx = idx + count
return directs[new_idx % 4]
else:
new_idx = idx - count
return directs[new_idx]
assert get_direct(DIRECTS, "E", 90, "R") == "S"
assert get_direct(DIRECTS, "E", 90, "L") == "N"
assert get_direct(DIRECTS, "S", 270, "R") == "E"
assert get_direct(DIRECTS, "S", 270, "L") == "W"
file = "data/day12Test.txt"
# file = "data/day12.txt"
def get_dist(file):
lines = pnlp.read_lines(file)
x, y = 0, 0
direct = "E"
for line in lines:
nvi = line[0]
num = int(line[1:])
if nvi == "R":
direct = get_direct(DIRECTS, direct, num, "R")
elif nvi == "L":
direct = get_direct(DIRECTS, direct, num, "L")
else:
if nvi == "F":
tmp = direct
else:
tmp = nvi
if tmp == "N":
y += num
elif tmp == "S":
y -= num
elif tmp == "W":
x -= num
elif tmp == "E":
x += num
return abs(x) + abs(y)
def get_sign(dx, dy):
if dx >= 0 and dy >= 0:
return "++"
elif dx >= 0 and dy <= 0:
return "+-"
elif dx <= 0 and dy >= 0:
return "-+"
elif dx <= 0 and dy <= 0:
return "--"
def get_waypoint(dx, dy, num, rotate):
sign = get_sign(dx, dy)
# 判断第几象限,x 和 y 的符号
new_sign = get_direct(SIGNS, sign, num, rotate)
sg1 = new_sign[0]
sg2 = new_sign[1]
# 判断是否需要交换 x 和 y
if num == 90 or num == 270:
dx, dy = abs(dy), abs(dx)
else:
dx, dy = abs(dx), abs(dy)
return int(sg1 + str(dx)), int(sg2 + str(dy))
assert get_waypoint(1, 5, 0, "R") == (1, 5)
assert get_waypoint(1, 5, 360, "L") == (1, 5)
assert get_waypoint(1, 5, 90, "R") == (5, -1)
assert get_waypoint(1, 5, 270, "L") == (5, -1)
assert get_waypoint(1, 5, 180, "R") == (-1, -5)
assert get_waypoint(1, 5, 180, "L") == (-1, -5)
assert get_waypoint(1, 0, 0, "R") == (1, 0)
assert get_waypoint(1, 0, 360, "L") == (1, 0)
assert get_waypoint(1, 0, 90, "R") == (0, -1)
assert get_waypoint(1, 0, 270, "L") == (0, -1)
assert get_waypoint(1, 0, 180, "R") == (-1, 0)
assert get_waypoint(1, 0, 180, "L") == (-1, 0)
assert get_waypoint(2, -15, 180, "R") == (-2, 15)
assert get_waypoint(-2, 15, 90, "L") == (-15, -2)
assert get_waypoint(2, -15, 270, "R") == (15, 2)
lines = pnlp.read_lines(file)
x, y = 0, 0
dx, dy = 10, 1
for line in lines:
nvi = line[0]
num = int(line[1:])
if nvi == "R":
dx, dy = get_waypoint(dx, dy, num, "R")
elif nvi == "L":
dx, dy = get_waypoint(dx, dy, num, "L")
elif nvi in DIRECTS:
if nvi == "N":
dy += num
elif nvi == "S":
dy -= num
elif nvi == "W":
dx -= num
elif nvi == "E":
dx += num
elif nvi == "F":
x += num * dx
y += num * dy
# print(line, dx, dy, x, y)
print(abs(x) + abs(y))
| 25.277778 | 66 | 0.475039 | import pnlp
DIRECTS = ["E", "S", "W", "N"]
SIGNS = ["++", "+-", "--", "-+"]
def get_direct(directs: list, direct: str, num: int, rotate: str):
count = num // 90
idx = directs.index(direct)
if rotate == "R":
new_idx = idx + count
return directs[new_idx % 4]
else:
new_idx = idx - count
return directs[new_idx]
assert get_direct(DIRECTS, "E", 90, "R") == "S"
assert get_direct(DIRECTS, "E", 90, "L") == "N"
assert get_direct(DIRECTS, "S", 270, "R") == "E"
assert get_direct(DIRECTS, "S", 270, "L") == "W"
file = "data/day12Test.txt"
def get_dist(file):
lines = pnlp.read_lines(file)
x, y = 0, 0
direct = "E"
for line in lines:
nvi = line[0]
num = int(line[1:])
if nvi == "R":
direct = get_direct(DIRECTS, direct, num, "R")
elif nvi == "L":
direct = get_direct(DIRECTS, direct, num, "L")
else:
if nvi == "F":
tmp = direct
else:
tmp = nvi
if tmp == "N":
y += num
elif tmp == "S":
y -= num
elif tmp == "W":
x -= num
elif tmp == "E":
x += num
return abs(x) + abs(y)
def get_sign(dx, dy):
if dx >= 0 and dy >= 0:
return "++"
elif dx >= 0 and dy <= 0:
return "+-"
elif dx <= 0 and dy >= 0:
return "-+"
elif dx <= 0 and dy <= 0:
return "--"
def get_waypoint(dx, dy, num, rotate):
sign = get_sign(dx, dy)
new_sign = get_direct(SIGNS, sign, num, rotate)
sg1 = new_sign[0]
sg2 = new_sign[1]
if num == 90 or num == 270:
dx, dy = abs(dy), abs(dx)
else:
dx, dy = abs(dx), abs(dy)
return int(sg1 + str(dx)), int(sg2 + str(dy))
assert get_waypoint(1, 5, 0, "R") == (1, 5)
assert get_waypoint(1, 5, 360, "L") == (1, 5)
assert get_waypoint(1, 5, 90, "R") == (5, -1)
assert get_waypoint(1, 5, 270, "L") == (5, -1)
assert get_waypoint(1, 5, 180, "R") == (-1, -5)
assert get_waypoint(1, 5, 180, "L") == (-1, -5)
assert get_waypoint(1, 0, 0, "R") == (1, 0)
assert get_waypoint(1, 0, 360, "L") == (1, 0)
assert get_waypoint(1, 0, 90, "R") == (0, -1)
assert get_waypoint(1, 0, 270, "L") == (0, -1)
assert get_waypoint(1, 0, 180, "R") == (-1, 0)
assert get_waypoint(1, 0, 180, "L") == (-1, 0)
assert get_waypoint(2, -15, 180, "R") == (-2, 15)
assert get_waypoint(-2, 15, 90, "L") == (-15, -2)
assert get_waypoint(2, -15, 270, "R") == (15, 2)
lines = pnlp.read_lines(file)
x, y = 0, 0
dx, dy = 10, 1
for line in lines:
nvi = line[0]
num = int(line[1:])
if nvi == "R":
dx, dy = get_waypoint(dx, dy, num, "R")
elif nvi == "L":
dx, dy = get_waypoint(dx, dy, num, "L")
elif nvi in DIRECTS:
if nvi == "N":
dy += num
elif nvi == "S":
dy -= num
elif nvi == "W":
dx -= num
elif nvi == "E":
dx += num
elif nvi == "F":
x += num * dx
y += num * dy
print(abs(x) + abs(y))
| true | true |
f72d4fd3cbefd1e5e02930d93c37bbd0e45283f5 | 13,700 | py | Python | openconcept/components/splitter.py | vrsub/openconcept | 459aa24269cf54122ee4cfb3edf173c79c880be9 | [
"MIT"
] | null | null | null | openconcept/components/splitter.py | vrsub/openconcept | 459aa24269cf54122ee4cfb3edf173c79c880be9 | [
"MIT"
] | null | null | null | openconcept/components/splitter.py | vrsub/openconcept | 459aa24269cf54122ee4cfb3edf173c79c880be9 | [
"MIT"
] | null | null | null | from __future__ import division
import numpy as np
from openmdao.api import ExplicitComponent
from openmdao.api import Group
class PowerSplit(ExplicitComponent):
"""
A power split mechanism for mechanical or electrical power.
Inputs
------
power_in : float
Power fed to the splitter. (vector, W)
power_rating : float
Maximum rated power of the split mechanism. (scalar, W)
power_split_fraction:
If ``'rule'`` is set to ``'fraction'``, sets percentage of input power directed
to Output A (minus losses). (vector, dimensionless)
power_split_amount:
If ``'rule'`` is set to ``'fixed'``, sets amount of input power to Output A (minus
losses). (vector, W)
Outputs
-------
power_out_A : float
Power sent to first output (vector, W)
power_out_B : float
Power sent to second output (vector, W)
heat_out : float
Waste heat produced (vector, W)
component_cost : float
Nonrecurring cost of the component (scalar, USD)
component_weight : float
Weight of the component (scalar, kg)
component_sizing_margin : float
Equal to 1 when fed full rated power (vector, dimensionless)
Options
-------
num_nodes : int
Number of analysis points to run (sets vec length; default 1)
rule : str
Power split control rule to use; either ``'fixed'`` where a set
amount of power is sent to Output A or ``'fraction'`` where a
fraction of the total power is sent to Output A
efficiency : float
Component efficiency (default 1)
weight_inc : float
Weight per unit rated power
(default 0, kg/W)
weight_base : float
Base weight
(default 0, kg)
cost_inc : float
Nonrecurring cost per unit power
(default 0, USD/W)
cost_base : float
Base cost
(default 0 USD)
"""
def initialize(self):
# define control rules
self.options.declare('num_nodes', default=1, desc='Number of flight/control conditions')
self.options.declare('rule', default='fraction',
desc='Control strategy - fraction or fixed power')
self.options.declare('efficiency', default=1., desc='Efficiency (dimensionless)')
self.options.declare('weight_inc', default=0., desc='kg per input watt')
self.options.declare('weight_base', default=0., desc='kg base weight')
self.options.declare('cost_inc', default=0., desc='$ cost per input watt')
self.options.declare('cost_base', default=0., desc='$ cost base')
def setup(self):
nn = self.options['num_nodes']
self.add_input('power_in', units='W',
desc='Input shaft power or incoming electrical load', shape=(nn,))
self.add_input('power_rating', val=99999999, units='W', desc='Split mechanism power rating')
rule = self.options['rule']
if rule == 'fraction':
self.add_input('power_split_fraction', val=0.5,
desc='Fraction of power to output A', shape=(nn,))
elif rule == 'fixed':
self.add_input('power_split_amount', units='W',
desc='Raw amount of power to output A', shape=(nn,))
else:
msg = 'Specify either "fraction" or "fixed" as power split control rule'
raise ValueError(msg)
eta = self.options['efficiency']
weight_inc = self.options['weight_inc']
weight_base = self.options['weight_base']
cost_inc = self.options['cost_inc']
cost_base = self.options['cost_base']
self.add_output('power_out_A', units='W', desc='Output power or load to A', shape=(nn,))
self.add_output('power_out_B', units='W', desc='Output power or load to B', shape=(nn,))
self.add_output('heat_out', units='W', desc='Waste heat out', shape=(nn,))
self.add_output('component_cost', units='USD', desc='Splitter component cost')
self.add_output('component_weight', units='kg', desc='Splitter component weight')
self.add_output('component_sizing_margin', desc='Fraction of rated power', shape=(nn,))
if rule == 'fraction':
self.declare_partials(['power_out_A', 'power_out_B'],
['power_in', 'power_split_fraction'],
rows=range(nn), cols=range(nn))
elif rule == 'fixed':
self.declare_partials(['power_out_A', 'power_out_B'],
['power_in', 'power_split_amount'],
rows=range(nn), cols=range(nn))
self.declare_partials('heat_out', 'power_in', val=(1 - eta) * np.ones(nn),
rows=range(nn), cols=range(nn))
self.declare_partials('component_cost', 'power_rating', val=cost_inc)
self.declare_partials('component_weight', 'power_rating', val=weight_inc)
self.declare_partials('component_sizing_margin', 'power_in',
rows=range(nn), cols=range(nn))
self.declare_partials('component_sizing_margin', 'power_rating')
def compute(self, inputs, outputs):
nn = self.options['num_nodes']
rule = self.options['rule']
eta = self.options['efficiency']
weight_inc = self.options['weight_inc']
weight_base = self.options['weight_base']
cost_inc = self.options['cost_inc']
cost_base = self.options['cost_base']
if rule == 'fraction':
outputs['power_out_A'] = inputs['power_in'] * inputs['power_split_fraction'] * eta
outputs['power_out_B'] = inputs['power_in'] * (1 - inputs['power_split_fraction']) * eta
elif rule == 'fixed':
# check to make sure enough power is available
# if inputs['power_in'] < inputs['power_split_amount']:
not_enough_idx = np.where(inputs['power_in'] < inputs['power_split_amount'])
po_A = np.zeros(nn)
po_B = np.zeros(nn)
po_A[not_enough_idx] = inputs['power_in'][not_enough_idx] * eta
po_B[not_enough_idx] = np.zeros(nn)[not_enough_idx]
# else:
enough_idx = np.where(inputs['power_in'] >= inputs['power_split_amount'])
po_A[enough_idx] = inputs['power_split_amount'][enough_idx] * eta
po_B[enough_idx] = (inputs['power_in'][enough_idx] -
inputs['power_split_amount'][enough_idx]) * eta
outputs['power_out_A'] = po_A
outputs['power_out_B'] = po_B
outputs['heat_out'] = inputs['power_in'] * (1 - eta)
outputs['component_cost'] = inputs['power_rating'] * cost_inc + cost_base
outputs['component_weight'] = inputs['power_rating'] * weight_inc + weight_base
outputs['component_sizing_margin'] = inputs['power_in'] / inputs['power_rating']
def compute_partials(self, inputs, J):
nn = self.options['num_nodes']
rule = self.options['rule']
eta = self.options['efficiency']
if rule == 'fraction':
J['power_out_A', 'power_in'] = inputs['power_split_fraction'] * eta
J['power_out_A', 'power_split_fraction'] = inputs['power_in'] * eta
J['power_out_B', 'power_in'] = (1 - inputs['power_split_fraction']) * eta
J['power_out_B', 'power_split_fraction'] = -inputs['power_in'] * eta
elif rule == 'fixed':
not_enough_idx = np.where(inputs['power_in'] < inputs['power_split_amount'])
enough_idx = np.where(inputs['power_in'] >= inputs['power_split_amount'])
# if inputs['power_in'] < inputs['power_split_amount']:
Jpo_A_pi = np.zeros(nn)
Jpo_A_ps = np.zeros(nn)
Jpo_B_pi = np.zeros(nn)
Jpo_B_ps = np.zeros(nn)
Jpo_A_pi[not_enough_idx] = eta * np.ones(nn)[not_enough_idx]
Jpo_A_ps[not_enough_idx] = np.zeros(nn)[not_enough_idx]
Jpo_B_pi[not_enough_idx] = np.zeros(nn)[not_enough_idx]
Jpo_B_ps[not_enough_idx] = np.zeros(nn)[not_enough_idx]
# else:
Jpo_A_ps[enough_idx] = eta * np.ones(nn)[enough_idx]
Jpo_A_pi[enough_idx] = np.zeros(nn)[enough_idx]
Jpo_B_ps[enough_idx] = -eta * np.ones(nn)[enough_idx]
Jpo_B_pi[enough_idx] = eta * np.ones(nn)[enough_idx]
J['power_out_A', 'power_in'] = Jpo_A_pi
J['power_out_A', 'power_split_amount'] = Jpo_A_ps
J['power_out_B', 'power_in'] = Jpo_B_pi
J['power_out_B', 'power_split_amount'] = Jpo_B_ps
J['component_sizing_margin', 'power_in'] = 1 / inputs['power_rating']
J['component_sizing_margin', 'power_rating'] = - (inputs['power_in'] /
inputs['power_rating'] ** 2)
class FlowSplit(ExplicitComponent):
"""
Split incoming flow from one inlet into two outlets at a fractional ratio.
Inputs
------
mdot_in : float
Mass flow rate of incoming fluid (vector, kg/s)
mdot_split_fraction : float
Fraction of incoming mass flow directed to output A, must be in
range 0-1 inclusive (vector, dimensionless)
Outputs
-------
mdot_out_A : float
Mass flow rate directed to first output (vector, kg/s)
mdot_out_B : float
Mass flow rate directed to second output (vector, kg/s)
Options
-------
num_nodes : int
Number of analysis points to run (sets vec length; default 1)
"""
def initialize(self):
self.options.declare('num_nodes', default=1, desc='Number of analysis points')
def setup(self):
nn = self.options['num_nodes']
rng = np.arange(0, nn)
self.add_input('mdot_in', units='kg/s', shape=(nn,))
self.add_input('mdot_split_fraction', units=None, shape=(nn,), val=0.5)
self.add_output('mdot_out_A', units='kg/s', shape=(nn,))
self.add_output('mdot_out_B', units='kg/s', shape=(nn,))
self.declare_partials(['mdot_out_A'], ['mdot_in', 'mdot_split_fraction'], rows=rng, cols=rng)
self.declare_partials(['mdot_out_B'], ['mdot_in', 'mdot_split_fraction'], rows=rng, cols=rng)
def compute(self, inputs, outputs):
if np.any(inputs['mdot_split_fraction'] < 0) or np.any(inputs['mdot_split_fraction'] > 1):
raise RuntimeWarning(f"mdot_split_fraction of {inputs['mdot_split_fraction']} has at least one element out of range [0, 1]")
outputs['mdot_out_A'] = inputs['mdot_in'] * inputs['mdot_split_fraction']
outputs['mdot_out_B'] = inputs['mdot_in'] * (1 - inputs['mdot_split_fraction'])
def compute_partials(self, inputs, J):
J['mdot_out_A', 'mdot_in'] = inputs['mdot_split_fraction']
J['mdot_out_A', 'mdot_split_fraction'] = inputs['mdot_in']
J['mdot_out_B', 'mdot_in'] = 1 - inputs['mdot_split_fraction']
J['mdot_out_B', 'mdot_split_fraction'] = - inputs['mdot_in']
class FlowCombine(ExplicitComponent):
"""
Combines two incoming flows into a single outgoing flow and does a weighted average
of their temperatures based on the mass flow rate of each to compute the outlet temp.
Inputs
------
mdot_in_A : float
Mass flow rate of fluid from first inlet, should be nonegative (vector, kg/s)
mdot_in_B : float
Mass flow rate of fluid from second inlet, should be nonnegative (vector, kg/s)
T_in_A : float
Temperature of fluid from first inlet (vector, K)
T_in_B : float
Temperature of fluid from second inlet (vector, K)
Outputs
-------
mdot_out : float
Outgoing fluid mass flow rate (vector, kg/s)
T_out : float
Outgoing fluid temperature (vector, K)
Options
-------
num_nodes : int
Number of analysis points (scalar, default 1)
"""
def initialize(self):
self.options.declare('num_nodes', default=1, desc='Number of analysis points')
def setup(self):
nn = self.options['num_nodes']
rng = np.arange(0, nn)
self.add_input('mdot_in_A', units='kg/s', shape=(nn,))
self.add_input('mdot_in_B', units='kg/s', shape=(nn,))
self.add_input('T_in_A', units='K', shape=(nn,))
self.add_input('T_in_B', units='K', shape=(nn,))
self.add_output('mdot_out', units='kg/s', shape=(nn,))
self.add_output('T_out', units='K', shape=(nn,))
self.declare_partials(['mdot_out'], ['mdot_in_A', 'mdot_in_B'], rows=rng, cols=rng)
self.declare_partials(['T_out'], ['mdot_in_A', 'mdot_in_B', 'T_in_A', 'T_in_B'], rows=rng, cols=rng)
def compute(self, inputs, outputs):
mdot_A = inputs['mdot_in_A']
mdot_B = inputs['mdot_in_B']
outputs['mdot_out'] = mdot_A + mdot_B
# Weighted average of temperatures for output temperature
outputs['T_out'] = (mdot_A * inputs['T_in_A'] + mdot_B * inputs['T_in_B']) / (mdot_A + mdot_B)
def compute_partials(self, inputs, J):
nn = self.options['num_nodes']
J['mdot_out', 'mdot_in_A'] = np.ones((nn,))
J['mdot_out', 'mdot_in_B'] = np.ones((nn,))
mdot_A = inputs['mdot_in_A']
mdot_B = inputs['mdot_in_B']
mdot = mdot_A + mdot_B
T_A = inputs['T_in_A']
T_B = inputs['T_in_B']
J['T_out', 'mdot_in_A'] = (mdot * T_A - mdot_A * T_A - mdot_B * T_B) / (mdot**2)
J['T_out', 'mdot_in_B'] = (mdot * T_B - mdot_A * T_A - mdot_B * T_B) / (mdot**2)
J['T_out', 'T_in_A'] = mdot_A / mdot
J['T_out', 'T_in_B'] = mdot_B / mdot | 44.625407 | 136 | 0.606788 | from __future__ import division
import numpy as np
from openmdao.api import ExplicitComponent
from openmdao.api import Group
class PowerSplit(ExplicitComponent):
def initialize(self):
self.options.declare('num_nodes', default=1, desc='Number of flight/control conditions')
self.options.declare('rule', default='fraction',
desc='Control strategy - fraction or fixed power')
self.options.declare('efficiency', default=1., desc='Efficiency (dimensionless)')
self.options.declare('weight_inc', default=0., desc='kg per input watt')
self.options.declare('weight_base', default=0., desc='kg base weight')
self.options.declare('cost_inc', default=0., desc='$ cost per input watt')
self.options.declare('cost_base', default=0., desc='$ cost base')
def setup(self):
nn = self.options['num_nodes']
self.add_input('power_in', units='W',
desc='Input shaft power or incoming electrical load', shape=(nn,))
self.add_input('power_rating', val=99999999, units='W', desc='Split mechanism power rating')
rule = self.options['rule']
if rule == 'fraction':
self.add_input('power_split_fraction', val=0.5,
desc='Fraction of power to output A', shape=(nn,))
elif rule == 'fixed':
self.add_input('power_split_amount', units='W',
desc='Raw amount of power to output A', shape=(nn,))
else:
msg = 'Specify either "fraction" or "fixed" as power split control rule'
raise ValueError(msg)
eta = self.options['efficiency']
weight_inc = self.options['weight_inc']
weight_base = self.options['weight_base']
cost_inc = self.options['cost_inc']
cost_base = self.options['cost_base']
self.add_output('power_out_A', units='W', desc='Output power or load to A', shape=(nn,))
self.add_output('power_out_B', units='W', desc='Output power or load to B', shape=(nn,))
self.add_output('heat_out', units='W', desc='Waste heat out', shape=(nn,))
self.add_output('component_cost', units='USD', desc='Splitter component cost')
self.add_output('component_weight', units='kg', desc='Splitter component weight')
self.add_output('component_sizing_margin', desc='Fraction of rated power', shape=(nn,))
if rule == 'fraction':
self.declare_partials(['power_out_A', 'power_out_B'],
['power_in', 'power_split_fraction'],
rows=range(nn), cols=range(nn))
elif rule == 'fixed':
self.declare_partials(['power_out_A', 'power_out_B'],
['power_in', 'power_split_amount'],
rows=range(nn), cols=range(nn))
self.declare_partials('heat_out', 'power_in', val=(1 - eta) * np.ones(nn),
rows=range(nn), cols=range(nn))
self.declare_partials('component_cost', 'power_rating', val=cost_inc)
self.declare_partials('component_weight', 'power_rating', val=weight_inc)
self.declare_partials('component_sizing_margin', 'power_in',
rows=range(nn), cols=range(nn))
self.declare_partials('component_sizing_margin', 'power_rating')
def compute(self, inputs, outputs):
nn = self.options['num_nodes']
rule = self.options['rule']
eta = self.options['efficiency']
weight_inc = self.options['weight_inc']
weight_base = self.options['weight_base']
cost_inc = self.options['cost_inc']
cost_base = self.options['cost_base']
if rule == 'fraction':
outputs['power_out_A'] = inputs['power_in'] * inputs['power_split_fraction'] * eta
outputs['power_out_B'] = inputs['power_in'] * (1 - inputs['power_split_fraction']) * eta
elif rule == 'fixed':
not_enough_idx = np.where(inputs['power_in'] < inputs['power_split_amount'])
po_A = np.zeros(nn)
po_B = np.zeros(nn)
po_A[not_enough_idx] = inputs['power_in'][not_enough_idx] * eta
po_B[not_enough_idx] = np.zeros(nn)[not_enough_idx]
enough_idx = np.where(inputs['power_in'] >= inputs['power_split_amount'])
po_A[enough_idx] = inputs['power_split_amount'][enough_idx] * eta
po_B[enough_idx] = (inputs['power_in'][enough_idx] -
inputs['power_split_amount'][enough_idx]) * eta
outputs['power_out_A'] = po_A
outputs['power_out_B'] = po_B
outputs['heat_out'] = inputs['power_in'] * (1 - eta)
outputs['component_cost'] = inputs['power_rating'] * cost_inc + cost_base
outputs['component_weight'] = inputs['power_rating'] * weight_inc + weight_base
outputs['component_sizing_margin'] = inputs['power_in'] / inputs['power_rating']
def compute_partials(self, inputs, J):
nn = self.options['num_nodes']
rule = self.options['rule']
eta = self.options['efficiency']
if rule == 'fraction':
J['power_out_A', 'power_in'] = inputs['power_split_fraction'] * eta
J['power_out_A', 'power_split_fraction'] = inputs['power_in'] * eta
J['power_out_B', 'power_in'] = (1 - inputs['power_split_fraction']) * eta
J['power_out_B', 'power_split_fraction'] = -inputs['power_in'] * eta
elif rule == 'fixed':
not_enough_idx = np.where(inputs['power_in'] < inputs['power_split_amount'])
enough_idx = np.where(inputs['power_in'] >= inputs['power_split_amount'])
Jpo_A_pi = np.zeros(nn)
Jpo_A_ps = np.zeros(nn)
Jpo_B_pi = np.zeros(nn)
Jpo_B_ps = np.zeros(nn)
Jpo_A_pi[not_enough_idx] = eta * np.ones(nn)[not_enough_idx]
Jpo_A_ps[not_enough_idx] = np.zeros(nn)[not_enough_idx]
Jpo_B_pi[not_enough_idx] = np.zeros(nn)[not_enough_idx]
Jpo_B_ps[not_enough_idx] = np.zeros(nn)[not_enough_idx]
Jpo_A_ps[enough_idx] = eta * np.ones(nn)[enough_idx]
Jpo_A_pi[enough_idx] = np.zeros(nn)[enough_idx]
Jpo_B_ps[enough_idx] = -eta * np.ones(nn)[enough_idx]
Jpo_B_pi[enough_idx] = eta * np.ones(nn)[enough_idx]
J['power_out_A', 'power_in'] = Jpo_A_pi
J['power_out_A', 'power_split_amount'] = Jpo_A_ps
J['power_out_B', 'power_in'] = Jpo_B_pi
J['power_out_B', 'power_split_amount'] = Jpo_B_ps
J['component_sizing_margin', 'power_in'] = 1 / inputs['power_rating']
J['component_sizing_margin', 'power_rating'] = - (inputs['power_in'] /
inputs['power_rating'] ** 2)
class FlowSplit(ExplicitComponent):
def initialize(self):
self.options.declare('num_nodes', default=1, desc='Number of analysis points')
def setup(self):
nn = self.options['num_nodes']
rng = np.arange(0, nn)
self.add_input('mdot_in', units='kg/s', shape=(nn,))
self.add_input('mdot_split_fraction', units=None, shape=(nn,), val=0.5)
self.add_output('mdot_out_A', units='kg/s', shape=(nn,))
self.add_output('mdot_out_B', units='kg/s', shape=(nn,))
self.declare_partials(['mdot_out_A'], ['mdot_in', 'mdot_split_fraction'], rows=rng, cols=rng)
self.declare_partials(['mdot_out_B'], ['mdot_in', 'mdot_split_fraction'], rows=rng, cols=rng)
def compute(self, inputs, outputs):
if np.any(inputs['mdot_split_fraction'] < 0) or np.any(inputs['mdot_split_fraction'] > 1):
raise RuntimeWarning(f"mdot_split_fraction of {inputs['mdot_split_fraction']} has at least one element out of range [0, 1]")
outputs['mdot_out_A'] = inputs['mdot_in'] * inputs['mdot_split_fraction']
outputs['mdot_out_B'] = inputs['mdot_in'] * (1 - inputs['mdot_split_fraction'])
def compute_partials(self, inputs, J):
J['mdot_out_A', 'mdot_in'] = inputs['mdot_split_fraction']
J['mdot_out_A', 'mdot_split_fraction'] = inputs['mdot_in']
J['mdot_out_B', 'mdot_in'] = 1 - inputs['mdot_split_fraction']
J['mdot_out_B', 'mdot_split_fraction'] = - inputs['mdot_in']
class FlowCombine(ExplicitComponent):
def initialize(self):
self.options.declare('num_nodes', default=1, desc='Number of analysis points')
def setup(self):
nn = self.options['num_nodes']
rng = np.arange(0, nn)
self.add_input('mdot_in_A', units='kg/s', shape=(nn,))
self.add_input('mdot_in_B', units='kg/s', shape=(nn,))
self.add_input('T_in_A', units='K', shape=(nn,))
self.add_input('T_in_B', units='K', shape=(nn,))
self.add_output('mdot_out', units='kg/s', shape=(nn,))
self.add_output('T_out', units='K', shape=(nn,))
self.declare_partials(['mdot_out'], ['mdot_in_A', 'mdot_in_B'], rows=rng, cols=rng)
self.declare_partials(['T_out'], ['mdot_in_A', 'mdot_in_B', 'T_in_A', 'T_in_B'], rows=rng, cols=rng)
def compute(self, inputs, outputs):
mdot_A = inputs['mdot_in_A']
mdot_B = inputs['mdot_in_B']
outputs['mdot_out'] = mdot_A + mdot_B
outputs['T_out'] = (mdot_A * inputs['T_in_A'] + mdot_B * inputs['T_in_B']) / (mdot_A + mdot_B)
def compute_partials(self, inputs, J):
nn = self.options['num_nodes']
J['mdot_out', 'mdot_in_A'] = np.ones((nn,))
J['mdot_out', 'mdot_in_B'] = np.ones((nn,))
mdot_A = inputs['mdot_in_A']
mdot_B = inputs['mdot_in_B']
mdot = mdot_A + mdot_B
T_A = inputs['T_in_A']
T_B = inputs['T_in_B']
J['T_out', 'mdot_in_A'] = (mdot * T_A - mdot_A * T_A - mdot_B * T_B) / (mdot**2)
J['T_out', 'mdot_in_B'] = (mdot * T_B - mdot_A * T_A - mdot_B * T_B) / (mdot**2)
J['T_out', 'T_in_A'] = mdot_A / mdot
J['T_out', 'T_in_B'] = mdot_B / mdot | true | true |
f72d50746d095eb7757591129699b6a2bf403e6e | 1,956 | py | Python | 56_interactive_watershed.py | amit-bohra/Interactive-Image-Segmentation-with-OpenCV-Watershed-Algorithm-in-Python3 | 9fd6e2551fe19af76f1c91c714ba029d2d8599ca | [
"MIT"
] | null | null | null | 56_interactive_watershed.py | amit-bohra/Interactive-Image-Segmentation-with-OpenCV-Watershed-Algorithm-in-Python3 | 9fd6e2551fe19af76f1c91c714ba029d2d8599ca | [
"MIT"
] | null | null | null | 56_interactive_watershed.py | amit-bohra/Interactive-Image-Segmentation-with-OpenCV-Watershed-Algorithm-in-Python3 | 9fd6e2551fe19af76f1c91c714ba029d2d8599ca | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from copy import deepcopy as dp
aqua=(255,255,0)
marine=(116,139,69)
banana=(87,207,277)
blue=(255,0,0)
almond=(205,235,255)
brown=(64,64,255)
blue1=(255,245,152)
green=(0,100,0)
orange=(0,140,255)
orchid=(139,34,104)
pink=(147,20,255)
gold=(0,215,255)
gray=(127,127,127)
indigo=(130,0,75)
colors=[aqua,marine,banana,blue,almond,brown,blue1,green,orange,orchid,
pink,gold,gray,indigo]
size=0
color=0
def draw(event,x,y,flags,param):
global color,colors,img,marker,segment,tmg,size
mark=color+1
if event==cv2.EVENT_LBUTTONDOWN:
cv2.circle(marker,(x,y),size,mark,-1)
cv2.circle(tmg,(x,y),size,colors[color],-1)
marker_copy=dp(marker)
cv2.watershed(img,marker_copy)
segment=np.zeros(img.shape,np.uint8)
for i in range(1,len(colors)+1):
segment[marker_copy==i]=colors[i-1]
def func(x):
pass
a=0
a=int(input('Enter 1 for VideoCam else 0 '))
if a==1:
cap=cv2.VideoCapture(0)
if cap.isOpened():
ret,img=cap.read()
else:
ret=False
else:
img=cv2.imread('a.jpg')
img=cv2.GaussianBlur(img,(1,1),0)
cv2.namedWindow('image',cv2.WINDOW_NORMAL)
cv2.createTrackbar('color','image',0,len(colors)-1,func)
cv2.createTrackbar('size','image',10,200,func)
cv2.setMouseCallback('image',draw)
marker=np.zeros(img.shape[:2],np.int32)
segment=np.zeros(img.shape,np.uint8)
tmg=dp(img)
if a==1:
cap.release()
while True:
color=cv2.getTrackbarPos('color','image')
size=cv2.getTrackbarPos('size','image')
cv2.imshow('image',tmg)
cv2.imshow('segment',segment)
if cv2.waitKey(1)==27:
break
if cv2.waitKey(1)==ord('p'):
print()
if cv2.waitKey(1)==ord('c'):
tmg=dp(img)
marker=np.zeros(img.shape[:2],np.int32)
segment=np.zeros(img.shape,np.uint8)
color=0
cv2.destroyAllWindows()
| 25.076923 | 72 | 0.625767 | import cv2
import numpy as np
from copy import deepcopy as dp
aqua=(255,255,0)
marine=(116,139,69)
banana=(87,207,277)
blue=(255,0,0)
almond=(205,235,255)
brown=(64,64,255)
blue1=(255,245,152)
green=(0,100,0)
orange=(0,140,255)
orchid=(139,34,104)
pink=(147,20,255)
gold=(0,215,255)
gray=(127,127,127)
indigo=(130,0,75)
colors=[aqua,marine,banana,blue,almond,brown,blue1,green,orange,orchid,
pink,gold,gray,indigo]
size=0
color=0
def draw(event,x,y,flags,param):
global color,colors,img,marker,segment,tmg,size
mark=color+1
if event==cv2.EVENT_LBUTTONDOWN:
cv2.circle(marker,(x,y),size,mark,-1)
cv2.circle(tmg,(x,y),size,colors[color],-1)
marker_copy=dp(marker)
cv2.watershed(img,marker_copy)
segment=np.zeros(img.shape,np.uint8)
for i in range(1,len(colors)+1):
segment[marker_copy==i]=colors[i-1]
def func(x):
pass
a=0
a=int(input('Enter 1 for VideoCam else 0 '))
if a==1:
cap=cv2.VideoCapture(0)
if cap.isOpened():
ret,img=cap.read()
else:
ret=False
else:
img=cv2.imread('a.jpg')
img=cv2.GaussianBlur(img,(1,1),0)
cv2.namedWindow('image',cv2.WINDOW_NORMAL)
cv2.createTrackbar('color','image',0,len(colors)-1,func)
cv2.createTrackbar('size','image',10,200,func)
cv2.setMouseCallback('image',draw)
marker=np.zeros(img.shape[:2],np.int32)
segment=np.zeros(img.shape,np.uint8)
tmg=dp(img)
if a==1:
cap.release()
while True:
color=cv2.getTrackbarPos('color','image')
size=cv2.getTrackbarPos('size','image')
cv2.imshow('image',tmg)
cv2.imshow('segment',segment)
if cv2.waitKey(1)==27:
break
if cv2.waitKey(1)==ord('p'):
print()
if cv2.waitKey(1)==ord('c'):
tmg=dp(img)
marker=np.zeros(img.shape[:2],np.int32)
segment=np.zeros(img.shape,np.uint8)
color=0
cv2.destroyAllWindows()
| true | true |
f72d5149c0def6142cc248614e148e566f00322a | 2,120 | py | Python | rllib/examples/serving/cartpole_server.py | brechtmann/ray | 0c76ebd676f794847ea990aecced22b88717d09e | [
"Apache-2.0"
] | 4 | 2019-10-18T17:44:58.000Z | 2021-04-14T14:37:21.000Z | rllib/examples/serving/cartpole_server.py | Eric2Hamel/ray | bfaee49880611a65d16a4561c94c60851573b6f2 | [
"Apache-2.0"
] | 1 | 2022-03-30T17:52:44.000Z | 2022-03-30T17:52:44.000Z | rllib/examples/serving/cartpole_server.py | Eric2Hamel/ray | bfaee49880611a65d16a4561c94c60851573b6f2 | [
"Apache-2.0"
] | 1 | 2020-06-26T07:54:25.000Z | 2020-06-26T07:54:25.000Z | """Example of running a policy server. Copy this file for your use case.
To try this out, in two separate shells run:
$ python cartpole_server.py
$ python cartpole_client.py
"""
import os
from gym import spaces
import numpy as np
import ray
from ray.rllib.agents.dqn import DQNTrainer
from ray.rllib.env.external_env import ExternalEnv
from ray.rllib.utils.policy_server import PolicyServer
from ray.tune.logger import pretty_print
from ray.tune.registry import register_env
SERVER_ADDRESS = "localhost"
SERVER_PORT = 9900
CHECKPOINT_FILE = "last_checkpoint.out"
class CartpoleServing(ExternalEnv):
def __init__(self):
ExternalEnv.__init__(
self, spaces.Discrete(2),
spaces.Box(low=-10, high=10, shape=(4, ), dtype=np.float32))
def run(self):
print("Starting policy server at {}:{}".format(SERVER_ADDRESS,
SERVER_PORT))
server = PolicyServer(self, SERVER_ADDRESS, SERVER_PORT)
server.serve_forever()
if __name__ == "__main__":
ray.init()
register_env("srv", lambda _: CartpoleServing())
# We use DQN since it supports off-policy actions, but you can choose and
# configure any agent.
dqn = DQNTrainer(
env="srv",
config={
# Use a single process to avoid needing to set up a load balancer
"num_workers": 0,
# Configure the agent to run short iterations for debugging
"exploration_fraction": 0.01,
"learning_starts": 100,
"timesteps_per_iteration": 200,
})
# Attempt to restore from checkpoint if possible.
if os.path.exists(CHECKPOINT_FILE):
checkpoint_path = open(CHECKPOINT_FILE).read()
print("Restoring from checkpoint path", checkpoint_path)
dqn.restore(checkpoint_path)
# Serving and training loop
while True:
print(pretty_print(dqn.train()))
checkpoint_path = dqn.save()
print("Last checkpoint", checkpoint_path)
with open(CHECKPOINT_FILE, "w") as f:
f.write(checkpoint_path)
| 31.641791 | 77 | 0.658019 |
import os
from gym import spaces
import numpy as np
import ray
from ray.rllib.agents.dqn import DQNTrainer
from ray.rllib.env.external_env import ExternalEnv
from ray.rllib.utils.policy_server import PolicyServer
from ray.tune.logger import pretty_print
from ray.tune.registry import register_env
SERVER_ADDRESS = "localhost"
SERVER_PORT = 9900
CHECKPOINT_FILE = "last_checkpoint.out"
class CartpoleServing(ExternalEnv):
def __init__(self):
ExternalEnv.__init__(
self, spaces.Discrete(2),
spaces.Box(low=-10, high=10, shape=(4, ), dtype=np.float32))
def run(self):
print("Starting policy server at {}:{}".format(SERVER_ADDRESS,
SERVER_PORT))
server = PolicyServer(self, SERVER_ADDRESS, SERVER_PORT)
server.serve_forever()
if __name__ == "__main__":
ray.init()
register_env("srv", lambda _: CartpoleServing())
dqn = DQNTrainer(
env="srv",
config={
"num_workers": 0,
"exploration_fraction": 0.01,
"learning_starts": 100,
"timesteps_per_iteration": 200,
})
if os.path.exists(CHECKPOINT_FILE):
checkpoint_path = open(CHECKPOINT_FILE).read()
print("Restoring from checkpoint path", checkpoint_path)
dqn.restore(checkpoint_path)
while True:
print(pretty_print(dqn.train()))
checkpoint_path = dqn.save()
print("Last checkpoint", checkpoint_path)
with open(CHECKPOINT_FILE, "w") as f:
f.write(checkpoint_path)
| true | true |
f72d5366075c57378dcb950830cdf43d266193ee | 5,681 | py | Python | neurst/data/datasets/parallel_text_dataset.py | emailandxu/neurst | 235bddfc93b7784df01eddccec6791e1281651cf | [
"Apache-2.0"
] | null | null | null | neurst/data/datasets/parallel_text_dataset.py | emailandxu/neurst | 235bddfc93b7784df01eddccec6791e1281651cf | [
"Apache-2.0"
] | null | null | null | neurst/data/datasets/parallel_text_dataset.py | emailandxu/neurst | 235bddfc93b7784df01eddccec6791e1281651cf | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 ByteDance Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
import six
import tensorflow as tf
from absl import logging
from neurst.data.datasets import register_dataset
from neurst.data.datasets.dataset import TFRecordDataset
from neurst.data.datasets.text_gen_dataset import TextGenDataset
from neurst.utils.compat import DataStatus
from neurst.utils.flags_core import Flag
@six.add_metaclass(ABCMeta)
class AbstractParallelDataset(TextGenDataset):
""" The abstract dataset for parallel text.
The element spec must be
{
'feature': tf.TensorSpec(shape=(None,), dtype=tf.int64),
'label': tf.TensorSpec(shape=(None,), dtype=tf.int64)
}
"""
def __init__(self):
self._sources = None
super(AbstractParallelDataset, self).__init__()
@property
@abstractmethod
def status(self) -> str:
raise NotImplementedError
@abstractmethod
def build_iterator(self, map_func=None, shard_id=0, total_shards=1):
""" Returns the iterator of the dataset.
Args:
map_func: A function mapping a dataset element to another dataset element.
shard_id: Generator yields on the `shard_id`-th shard of the whole dataset.
total_shards: The number of total shards.
"""
raise NotImplementedError
@property
def sources(self):
""" Returns a list of source texts. """
return self._sources
@register_dataset("parallel_text")
class ParallelTextDataset(AbstractParallelDataset):
def __init__(self, args):
""" Initializes the dataset. """
super(ParallelTextDataset, self).__init__()
self._src_file = args["src_file"]
assert self._src_file, "`src_file` must be provided for ParallelTextDataset."
self._trg_file = args["trg_file"]
self._data_is_processed = args["data_is_processed"]
@staticmethod
def class_or_method_args():
return [
Flag("src_file", dtype=Flag.TYPE.STRING, help="The source text file"),
Flag("trg_file", dtype=Flag.TYPE.STRING, help="The target text file"),
Flag("data_is_processed", dtype=Flag.TYPE.BOOLEAN,
help="Whether the text data is already processed."),
]
@property
def status(self):
if self._data_is_processed:
return DataStatus.PROCESSED
return DataStatus.RAW
def build_iterator(self, map_func=None, shard_id=0, total_shards=1):
""" Reads data from files and returns the iterator.
Args:
map_func: A function mapping a dataset element to another dataset element.
shard_id: Generator yields on the `shard_id`-th shard of the whole dataset.
total_shards: The number of total shards.
"""
if total_shards > 1:
total_samples = self.num_samples
samples_per_part = total_samples // total_shards
range_begin = samples_per_part * shard_id
if shard_id == total_shards - 1:
range_end = total_samples + 1
logging.info(f"Iterate on dataset from {range_begin} "
f"to the end (total {total_samples}).")
else:
range_end = range_begin + samples_per_part
logging.info(f"Iterate on dataset from {range_begin} "
f"to {range_end} (total {total_samples}).")
def gen():
fsrc = tf.io.gfile.GFile(self._src_file)
ftrg = None if self._trg_file is None else tf.io.gfile.GFile(self._trg_file)
n = 0
for src in fsrc:
n += 1
data = {"feature": src.strip()}
if ftrg is not None:
data["label"] = ftrg.readline().strip()
if total_shards > 1:
if n < range_begin:
continue
if n >= range_end:
break
if map_func is not None:
data = map_func(data)
yield data
fsrc.close()
if ftrg is not None:
ftrg.close()
return gen
@property
def sources(self):
""" Returns a list of sources. """
if self._sources is None and self._src_file:
with tf.io.gfile.GFile(self._src_file) as fp:
self._sources = [line.strip() for line in fp]
return self._sources
@property
def targets(self):
""" Returns a list of targets. """
if self._targets is None and self._trg_file:
with tf.io.gfile.GFile(self._trg_file) as fp:
self._targets = [line.strip() for line in fp]
return self._targets
@register_dataset("parallel_tfrecord")
class ParallelTFRecordDataset(TFRecordDataset, AbstractParallelDataset):
@property
def status(self):
return DataStatus.PROJECTED
@property
def fields(self):
return {"feature": tf.io.VarLenFeature(tf.int64),
"label": tf.io.VarLenFeature(tf.int64)}
| 35.285714 | 88 | 0.620313 |
from abc import ABCMeta, abstractmethod
import six
import tensorflow as tf
from absl import logging
from neurst.data.datasets import register_dataset
from neurst.data.datasets.dataset import TFRecordDataset
from neurst.data.datasets.text_gen_dataset import TextGenDataset
from neurst.utils.compat import DataStatus
from neurst.utils.flags_core import Flag
@six.add_metaclass(ABCMeta)
class AbstractParallelDataset(TextGenDataset):
def __init__(self):
self._sources = None
super(AbstractParallelDataset, self).__init__()
@property
@abstractmethod
def status(self) -> str:
raise NotImplementedError
@abstractmethod
def build_iterator(self, map_func=None, shard_id=0, total_shards=1):
raise NotImplementedError
@property
def sources(self):
return self._sources
@register_dataset("parallel_text")
class ParallelTextDataset(AbstractParallelDataset):
def __init__(self, args):
super(ParallelTextDataset, self).__init__()
self._src_file = args["src_file"]
assert self._src_file, "`src_file` must be provided for ParallelTextDataset."
self._trg_file = args["trg_file"]
self._data_is_processed = args["data_is_processed"]
@staticmethod
def class_or_method_args():
return [
Flag("src_file", dtype=Flag.TYPE.STRING, help="The source text file"),
Flag("trg_file", dtype=Flag.TYPE.STRING, help="The target text file"),
Flag("data_is_processed", dtype=Flag.TYPE.BOOLEAN,
help="Whether the text data is already processed."),
]
@property
def status(self):
if self._data_is_processed:
return DataStatus.PROCESSED
return DataStatus.RAW
def build_iterator(self, map_func=None, shard_id=0, total_shards=1):
if total_shards > 1:
total_samples = self.num_samples
samples_per_part = total_samples // total_shards
range_begin = samples_per_part * shard_id
if shard_id == total_shards - 1:
range_end = total_samples + 1
logging.info(f"Iterate on dataset from {range_begin} "
f"to the end (total {total_samples}).")
else:
range_end = range_begin + samples_per_part
logging.info(f"Iterate on dataset from {range_begin} "
f"to {range_end} (total {total_samples}).")
def gen():
fsrc = tf.io.gfile.GFile(self._src_file)
ftrg = None if self._trg_file is None else tf.io.gfile.GFile(self._trg_file)
n = 0
for src in fsrc:
n += 1
data = {"feature": src.strip()}
if ftrg is not None:
data["label"] = ftrg.readline().strip()
if total_shards > 1:
if n < range_begin:
continue
if n >= range_end:
break
if map_func is not None:
data = map_func(data)
yield data
fsrc.close()
if ftrg is not None:
ftrg.close()
return gen
@property
def sources(self):
if self._sources is None and self._src_file:
with tf.io.gfile.GFile(self._src_file) as fp:
self._sources = [line.strip() for line in fp]
return self._sources
@property
def targets(self):
if self._targets is None and self._trg_file:
with tf.io.gfile.GFile(self._trg_file) as fp:
self._targets = [line.strip() for line in fp]
return self._targets
@register_dataset("parallel_tfrecord")
class ParallelTFRecordDataset(TFRecordDataset, AbstractParallelDataset):
@property
def status(self):
return DataStatus.PROJECTED
@property
def fields(self):
return {"feature": tf.io.VarLenFeature(tf.int64),
"label": tf.io.VarLenFeature(tf.int64)}
| true | true |
f72d53847ce5a7ccd7789359de36e77c4eea6916 | 6,583 | py | Python | tests/providers/airbyte/hooks/test_airbyte.py | ChaseKnowlden/airflow | 6b71eac1997a7c0db3b8e3aed6b4e65d01871440 | [
"Apache-2.0"
] | 15,947 | 2019-01-05T13:51:02.000Z | 2022-03-31T23:33:16.000Z | tests/providers/airbyte/hooks/test_airbyte.py | ChaseKnowlden/airflow | 6b71eac1997a7c0db3b8e3aed6b4e65d01871440 | [
"Apache-2.0"
] | 14,603 | 2019-01-05T09:43:19.000Z | 2022-03-31T23:11:59.000Z | tests/providers/airbyte/hooks/test_airbyte.py | ChaseKnowlden/airflow | 6b71eac1997a7c0db3b8e3aed6b4e65d01871440 | [
"Apache-2.0"
] | 8,429 | 2019-01-05T19:45:47.000Z | 2022-03-31T22:13:01.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest import mock
import pytest
import requests_mock
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.providers.airbyte.hooks.airbyte import AirbyteHook
from airflow.utils import db
class TestAirbyteHook(unittest.TestCase):
"""
Test all functions from Airbyte Hook
"""
airbyte_conn_id = 'airbyte_conn_id_test'
connection_id = 'conn_test_sync'
job_id = 1
sync_connection_endpoint = 'http://test-airbyte:8001/api/v1/connections/sync'
get_job_endpoint = 'http://test-airbyte:8001/api/v1/jobs/get'
health_endpoint = 'http://test-airbyte:8001/api/v1/health'
_mock_sync_conn_success_response_body = {'job': {'id': 1}}
_mock_job_status_success_response_body = {'job': {'status': 'succeeded'}}
def setUp(self):
db.merge_conn(
Connection(
conn_id='airbyte_conn_id_test', conn_type='airbyte', host='http://test-airbyte', port=8001
)
)
self.hook = AirbyteHook(airbyte_conn_id=self.airbyte_conn_id)
def return_value_get_job(self, status):
response = mock.Mock()
response.json.return_value = {'job': {'status': status}}
return response
@requests_mock.mock()
def test_submit_sync_connection(self, m):
m.post(
self.sync_connection_endpoint, status_code=200, json=self._mock_sync_conn_success_response_body
)
resp = self.hook.submit_sync_connection(connection_id=self.connection_id)
assert resp.status_code == 200
assert resp.json() == self._mock_sync_conn_success_response_body
@requests_mock.mock()
def test_get_job_status(self, m):
m.post(self.get_job_endpoint, status_code=200, json=self._mock_job_status_success_response_body)
resp = self.hook.get_job(job_id=self.job_id)
assert resp.status_code == 200
assert resp.json() == self._mock_job_status_success_response_body
@mock.patch('airflow.providers.airbyte.hooks.airbyte.AirbyteHook.get_job')
def test_wait_for_job_succeeded(self, mock_get_job):
mock_get_job.side_effect = [self.return_value_get_job(self.hook.SUCCEEDED)]
self.hook.wait_for_job(job_id=self.job_id, wait_seconds=0)
mock_get_job.assert_called_once_with(job_id=self.job_id)
@mock.patch('airflow.providers.airbyte.hooks.airbyte.AirbyteHook.get_job')
def test_wait_for_job_error(self, mock_get_job):
mock_get_job.side_effect = [
self.return_value_get_job(self.hook.RUNNING),
self.return_value_get_job(self.hook.ERROR),
]
with pytest.raises(AirflowException, match="Job failed"):
self.hook.wait_for_job(job_id=self.job_id, wait_seconds=0)
calls = [mock.call(job_id=self.job_id), mock.call(job_id=self.job_id)]
assert mock_get_job.has_calls(calls)
@mock.patch('airflow.providers.airbyte.hooks.airbyte.AirbyteHook.get_job')
def test_wait_for_job_incomplete_succeeded(self, mock_get_job):
mock_get_job.side_effect = [
self.return_value_get_job(self.hook.INCOMPLETE),
self.return_value_get_job(self.hook.SUCCEEDED),
]
self.hook.wait_for_job(job_id=self.job_id, wait_seconds=0)
calls = [mock.call(job_id=self.job_id), mock.call(job_id=self.job_id)]
assert mock_get_job.has_calls(calls)
@mock.patch('airflow.providers.airbyte.hooks.airbyte.AirbyteHook.get_job')
def test_wait_for_job_timeout(self, mock_get_job):
mock_get_job.side_effect = [
self.return_value_get_job(self.hook.PENDING),
self.return_value_get_job(self.hook.RUNNING),
self.return_value_get_job(self.hook.RUNNING),
]
with pytest.raises(AirflowException, match="Timeout"):
self.hook.wait_for_job(job_id=self.job_id, wait_seconds=2, timeout=1)
calls = [mock.call(job_id=self.job_id), mock.call(job_id=self.job_id), mock.call(job_id=self.job_id)]
assert mock_get_job.has_calls(calls)
@mock.patch('airflow.providers.airbyte.hooks.airbyte.AirbyteHook.get_job')
def test_wait_for_job_state_unrecognized(self, mock_get_job):
mock_get_job.side_effect = [
self.return_value_get_job(self.hook.RUNNING),
self.return_value_get_job("UNRECOGNIZED"),
]
with pytest.raises(Exception, match="unexpected state"):
self.hook.wait_for_job(job_id=self.job_id, wait_seconds=0)
calls = [mock.call(job_id=self.job_id), mock.call(job_id=self.job_id)]
assert mock_get_job.has_calls(calls)
@mock.patch('airflow.providers.airbyte.hooks.airbyte.AirbyteHook.get_job')
def test_wait_for_job_cancelled(self, mock_get_job):
mock_get_job.side_effect = [
self.return_value_get_job(self.hook.RUNNING),
self.return_value_get_job(self.hook.CANCELLED),
]
with pytest.raises(AirflowException, match="Job was cancelled"):
self.hook.wait_for_job(job_id=self.job_id, wait_seconds=0)
calls = [mock.call(job_id=self.job_id), mock.call(job_id=self.job_id)]
assert mock_get_job.has_calls(calls)
@requests_mock.mock()
def test_connection_success(self, m):
m.get(
self.health_endpoint,
status_code=200,
)
status, msg = self.hook.test_connection()
assert status is True
assert msg == 'Connection successfully tested'
@requests_mock.mock()
def test_connection_failure(self, m):
m.get(self.health_endpoint, status_code=500, json={"message": "internal server error"})
status, msg = self.hook.test_connection()
assert status is False
assert msg == '{"message": "internal server error"}'
| 41.664557 | 109 | 0.705453 |
import unittest
from unittest import mock
import pytest
import requests_mock
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.providers.airbyte.hooks.airbyte import AirbyteHook
from airflow.utils import db
class TestAirbyteHook(unittest.TestCase):
airbyte_conn_id = 'airbyte_conn_id_test'
connection_id = 'conn_test_sync'
job_id = 1
sync_connection_endpoint = 'http://test-airbyte:8001/api/v1/connections/sync'
get_job_endpoint = 'http://test-airbyte:8001/api/v1/jobs/get'
health_endpoint = 'http://test-airbyte:8001/api/v1/health'
_mock_sync_conn_success_response_body = {'job': {'id': 1}}
_mock_job_status_success_response_body = {'job': {'status': 'succeeded'}}
def setUp(self):
db.merge_conn(
Connection(
conn_id='airbyte_conn_id_test', conn_type='airbyte', host='http://test-airbyte', port=8001
)
)
self.hook = AirbyteHook(airbyte_conn_id=self.airbyte_conn_id)
def return_value_get_job(self, status):
response = mock.Mock()
response.json.return_value = {'job': {'status': status}}
return response
@requests_mock.mock()
def test_submit_sync_connection(self, m):
m.post(
self.sync_connection_endpoint, status_code=200, json=self._mock_sync_conn_success_response_body
)
resp = self.hook.submit_sync_connection(connection_id=self.connection_id)
assert resp.status_code == 200
assert resp.json() == self._mock_sync_conn_success_response_body
@requests_mock.mock()
def test_get_job_status(self, m):
m.post(self.get_job_endpoint, status_code=200, json=self._mock_job_status_success_response_body)
resp = self.hook.get_job(job_id=self.job_id)
assert resp.status_code == 200
assert resp.json() == self._mock_job_status_success_response_body
@mock.patch('airflow.providers.airbyte.hooks.airbyte.AirbyteHook.get_job')
def test_wait_for_job_succeeded(self, mock_get_job):
mock_get_job.side_effect = [self.return_value_get_job(self.hook.SUCCEEDED)]
self.hook.wait_for_job(job_id=self.job_id, wait_seconds=0)
mock_get_job.assert_called_once_with(job_id=self.job_id)
@mock.patch('airflow.providers.airbyte.hooks.airbyte.AirbyteHook.get_job')
def test_wait_for_job_error(self, mock_get_job):
mock_get_job.side_effect = [
self.return_value_get_job(self.hook.RUNNING),
self.return_value_get_job(self.hook.ERROR),
]
with pytest.raises(AirflowException, match="Job failed"):
self.hook.wait_for_job(job_id=self.job_id, wait_seconds=0)
calls = [mock.call(job_id=self.job_id), mock.call(job_id=self.job_id)]
assert mock_get_job.has_calls(calls)
@mock.patch('airflow.providers.airbyte.hooks.airbyte.AirbyteHook.get_job')
def test_wait_for_job_incomplete_succeeded(self, mock_get_job):
mock_get_job.side_effect = [
self.return_value_get_job(self.hook.INCOMPLETE),
self.return_value_get_job(self.hook.SUCCEEDED),
]
self.hook.wait_for_job(job_id=self.job_id, wait_seconds=0)
calls = [mock.call(job_id=self.job_id), mock.call(job_id=self.job_id)]
assert mock_get_job.has_calls(calls)
@mock.patch('airflow.providers.airbyte.hooks.airbyte.AirbyteHook.get_job')
def test_wait_for_job_timeout(self, mock_get_job):
mock_get_job.side_effect = [
self.return_value_get_job(self.hook.PENDING),
self.return_value_get_job(self.hook.RUNNING),
self.return_value_get_job(self.hook.RUNNING),
]
with pytest.raises(AirflowException, match="Timeout"):
self.hook.wait_for_job(job_id=self.job_id, wait_seconds=2, timeout=1)
calls = [mock.call(job_id=self.job_id), mock.call(job_id=self.job_id), mock.call(job_id=self.job_id)]
assert mock_get_job.has_calls(calls)
@mock.patch('airflow.providers.airbyte.hooks.airbyte.AirbyteHook.get_job')
def test_wait_for_job_state_unrecognized(self, mock_get_job):
mock_get_job.side_effect = [
self.return_value_get_job(self.hook.RUNNING),
self.return_value_get_job("UNRECOGNIZED"),
]
with pytest.raises(Exception, match="unexpected state"):
self.hook.wait_for_job(job_id=self.job_id, wait_seconds=0)
calls = [mock.call(job_id=self.job_id), mock.call(job_id=self.job_id)]
assert mock_get_job.has_calls(calls)
@mock.patch('airflow.providers.airbyte.hooks.airbyte.AirbyteHook.get_job')
def test_wait_for_job_cancelled(self, mock_get_job):
mock_get_job.side_effect = [
self.return_value_get_job(self.hook.RUNNING),
self.return_value_get_job(self.hook.CANCELLED),
]
with pytest.raises(AirflowException, match="Job was cancelled"):
self.hook.wait_for_job(job_id=self.job_id, wait_seconds=0)
calls = [mock.call(job_id=self.job_id), mock.call(job_id=self.job_id)]
assert mock_get_job.has_calls(calls)
@requests_mock.mock()
def test_connection_success(self, m):
m.get(
self.health_endpoint,
status_code=200,
)
status, msg = self.hook.test_connection()
assert status is True
assert msg == 'Connection successfully tested'
@requests_mock.mock()
def test_connection_failure(self, m):
m.get(self.health_endpoint, status_code=500, json={"message": "internal server error"})
status, msg = self.hook.test_connection()
assert status is False
assert msg == '{"message": "internal server error"}'
| true | true |
f72d5492165937b6bde071a810e477e645ff83f7 | 68,601 | py | Python | scripts/sampleOutputs/bkup/cmp_bwavesgcccactusADMgromacs/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | scripts/sampleOutputs/bkup/cmp_bwavesgcccactusADMgromacs/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | scripts/sampleOutputs/bkup/cmp_bwavesgcccactusADMgromacs/power.py | TugberkArkose/MLScheduler | e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061 | [
"Unlicense"
] | null | null | null | power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.241748,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.392568,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.37997,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.612382,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.06042,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.608183,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.28099,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.393745,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 8.09114,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.260706,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0221993,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.248008,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.164178,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.508715,
'Execution Unit/Register Files/Runtime Dynamic': 0.186377,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.665519,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.51034,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 4.77565,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00121522,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00121522,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00105964,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000410847,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00235842,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0058485,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0116093,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.157828,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.340866,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.536055,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.05221,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0758306,
'L2/Runtime Dynamic': 0.0118073,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 5.90447,
'Load Store Unit/Data Cache/Runtime Dynamic': 2.24899,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.151,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.151,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 6.62043,
'Load Store Unit/Runtime Dynamic': 3.14467,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.37234,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.74468,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.132145,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.133247,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0559887,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.786946,
'Memory Management Unit/Runtime Dynamic': 0.189236,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 29.1048,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.909545,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0422586,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.302307,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 1.25411,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 10.4277,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0822693,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.267307,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.496919,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.190956,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.308005,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.15547,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.654431,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.142213,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.94653,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0938787,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00800955,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0865788,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0592355,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.180458,
'Execution Unit/Register Files/Runtime Dynamic': 0.0672451,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.202969,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.510051,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.90441,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000525514,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000525514,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000469959,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000188622,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000850923,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00237191,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00460138,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0569446,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.62217,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.142437,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.19341,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 6.01647,
'Instruction Fetch Unit/Runtime Dynamic': 0.399764,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0442566,
'L2/Runtime Dynamic': 0.00803669,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.35548,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.02608,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.068534,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.068534,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.67911,
'Load Store Unit/Runtime Dynamic': 1.4326,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.168993,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.337986,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0599762,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.060618,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.225213,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0234183,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.484351,
'Memory Management Unit/Runtime Dynamic': 0.0840363,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 18.7602,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.246952,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0116208,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0926929,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.351266,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.18011,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0484701,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.240759,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.291723,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.169908,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.274055,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.138334,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.582297,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.1496,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.61241,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0551128,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0071267,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0684628,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0527063,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.123576,
'Execution Unit/Register Files/Runtime Dynamic': 0.059833,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.156352,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.394108,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.68237,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00112751,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00112751,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00101429,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000410278,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00075713,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00402645,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00965889,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0506679,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.22291,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.149545,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.172091,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.59784,
'Instruction Fetch Unit/Runtime Dynamic': 0.38599,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0379051,
'L2/Runtime Dynamic': 0.00782997,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.99314,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.85287,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0568115,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0568115,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.26142,
'Load Store Unit/Runtime Dynamic': 1.18986,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.140088,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.280175,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0497175,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0502125,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.200389,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0247355,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.441904,
'Memory Management Unit/Runtime Dynamic': 0.0749481,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.541,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.144977,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00943011,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0837861,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.238193,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.57919,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0368155,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.231605,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.215713,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.133657,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.215584,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.10882,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.458061,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.119793,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.4276,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0407529,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00560618,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0536358,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0414612,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0943887,
'Execution Unit/Register Files/Runtime Dynamic': 0.0470674,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.122202,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.311069,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.45318,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000925427,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000925427,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000834595,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000338699,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000595593,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00328104,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00785291,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0398577,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.53529,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.116541,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.135375,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.87685,
'Instruction Fetch Unit/Runtime Dynamic': 0.302907,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0382003,
'L2/Runtime Dynamic': 0.00819286,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.63979,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.684348,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0453798,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0453798,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.85409,
'Load Store Unit/Runtime Dynamic': 0.953526,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.111899,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.223798,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0397133,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0402344,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.157635,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.019261,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.381964,
'Memory Management Unit/Runtime Dynamic': 0.0594954,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 16.1682,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.107203,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00733488,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0660964,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.180634,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.95794,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 4.360682056640931,
'Runtime Dynamic': 4.360682056640931,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.234386,
'Runtime Dynamic': 0.064797,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 81.8085,
'Peak Power': 114.921,
'Runtime Dynamic': 21.2097,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 81.5741,
'Total Cores/Runtime Dynamic': 21.1449,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.234386,
'Total L3s/Runtime Dynamic': 0.064797,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}} | 75.055799 | 124 | 0.682045 | power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.241748,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.392568,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 1.37997,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.612382,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.06042,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.608183,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.28099,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.393745,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 8.09114,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.260706,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0221993,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.248008,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.164178,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.508715,
'Execution Unit/Register Files/Runtime Dynamic': 0.186377,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.665519,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.51034,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 4.77565,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00121522,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00121522,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00105964,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000410847,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00235842,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0058485,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0116093,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.157828,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.340866,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.536055,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.05221,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0758306,
'L2/Runtime Dynamic': 0.0118073,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 5.90447,
'Load Store Unit/Data Cache/Runtime Dynamic': 2.24899,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.151,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.151,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 6.62043,
'Load Store Unit/Runtime Dynamic': 3.14467,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.37234,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.74468,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.132145,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.133247,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0559887,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.786946,
'Memory Management Unit/Runtime Dynamic': 0.189236,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 29.1048,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.909545,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0422586,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.302307,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 1.25411,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 10.4277,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0822693,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.267307,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.496919,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.190956,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.308005,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.15547,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.654431,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.142213,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.94653,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0938787,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00800955,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0865788,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0592355,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.180458,
'Execution Unit/Register Files/Runtime Dynamic': 0.0672451,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.202969,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.510051,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.90441,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000525514,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000525514,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000469959,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000188622,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000850923,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00237191,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00460138,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0569446,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.62217,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.142437,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.19341,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 6.01647,
'Instruction Fetch Unit/Runtime Dynamic': 0.399764,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0442566,
'L2/Runtime Dynamic': 0.00803669,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.35548,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.02608,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.068534,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.068534,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.67911,
'Load Store Unit/Runtime Dynamic': 1.4326,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.168993,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.337986,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0599762,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.060618,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.225213,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0234183,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.484351,
'Memory Management Unit/Runtime Dynamic': 0.0840363,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 18.7602,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.246952,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0116208,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0926929,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.351266,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.18011,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0484701,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.240759,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.291723,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.169908,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.274055,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.138334,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.582297,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.1496,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.61241,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0551128,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0071267,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0684628,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0527063,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.123576,
'Execution Unit/Register Files/Runtime Dynamic': 0.059833,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.156352,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.394108,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.68237,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00112751,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00112751,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00101429,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000410278,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00075713,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00402645,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00965889,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0506679,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.22291,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.149545,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.172091,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.59784,
'Instruction Fetch Unit/Runtime Dynamic': 0.38599,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0379051,
'L2/Runtime Dynamic': 0.00782997,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.99314,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.85287,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0568115,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0568115,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.26142,
'Load Store Unit/Runtime Dynamic': 1.18986,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.140088,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.280175,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0497175,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0502125,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.200389,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0247355,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.441904,
'Memory Management Unit/Runtime Dynamic': 0.0749481,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.541,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.144977,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00943011,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0837861,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.238193,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.57919,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0368155,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.231605,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.215713,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.133657,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.215584,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.10882,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.458061,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.119793,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.4276,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0407529,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00560618,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0536358,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0414612,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0943887,
'Execution Unit/Register Files/Runtime Dynamic': 0.0470674,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.122202,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.311069,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.45318,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000925427,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000925427,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000834595,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000338699,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000595593,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00328104,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00785291,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0398577,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.53529,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.116541,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.135375,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.87685,
'Instruction Fetch Unit/Runtime Dynamic': 0.302907,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0382003,
'L2/Runtime Dynamic': 0.00819286,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.63979,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.684348,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0453798,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0453798,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.85409,
'Load Store Unit/Runtime Dynamic': 0.953526,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.111899,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.223798,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0397133,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0402344,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.157635,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.019261,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.381964,
'Memory Management Unit/Runtime Dynamic': 0.0594954,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 16.1682,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.107203,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00733488,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0660964,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.180634,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.95794,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 4.360682056640931,
'Runtime Dynamic': 4.360682056640931,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.234386,
'Runtime Dynamic': 0.064797,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 81.8085,
'Peak Power': 114.921,
'Runtime Dynamic': 21.2097,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 81.5741,
'Total Cores/Runtime Dynamic': 21.1449,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.234386,
'Total L3s/Runtime Dynamic': 0.064797,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}} | true | true |
f72d54cc192e42e146e0bea3d8aa115af59dd401 | 128,875 | py | Python | FWCore/ParameterSet/python/SequenceTypes.py | menglu21/cmssw | c3d6cb102c0aaddf652805743370c28044d53da6 | [
"Apache-2.0"
] | null | null | null | FWCore/ParameterSet/python/SequenceTypes.py | menglu21/cmssw | c3d6cb102c0aaddf652805743370c28044d53da6 | [
"Apache-2.0"
] | null | null | null | FWCore/ParameterSet/python/SequenceTypes.py | menglu21/cmssw | c3d6cb102c0aaddf652805743370c28044d53da6 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import sys
from builtins import range
from .Mixins import _ConfigureComponent, PrintOptions
from .Mixins import _Labelable, _Unlabelable
from .Mixins import _ValidatingParameterListBase
from .ExceptionHandling import *
from .OrderedSet import OrderedSet
class _HardDependency(object):
"""Information relevant for when a hard dependency,
which uses the * operator, is found"""
def __init__(self, sequenceName, depSet):
self.sequenceName = sequenceName
self.depSet = depSet
class _Sequenceable(object):
"""Denotes an object which can be placed in a sequence"""
def __init__(self):
pass
def __mul__(self,rhs):
return _SequenceCollection(self,rhs)
def __add__(self,rhs):
return _SequenceCollection(self,rhs)
def __invert__(self):
return _SequenceNegation(self)
def _clonesequence(self, lookuptable):
try:
return lookuptable[id(self)]
except:
raise KeyError("no "+str(type(self))+" with id "+str(id(self))+" found")
def resolve(self, processDict,keepIfCannotResolve=False):
return self
def isOperation(self):
"""Returns True if the object is an operator (e.g. *,+ or !) type"""
return False
def isLeaf(self):
return False
def _visitSubNodes(self,visitor):
pass
def visitNode(self,visitor):
visitor.enter(self)
self._visitSubNodes(visitor)
visitor.leave(self)
def _appendToCollection(self,collection):
collection.append(self)
def _errorstr(self):
return "A Sequenceable type"
def _checkIfSequenceable(caller, v):
if not isinstance(v,_Sequenceable):
typename = format_typename(caller)
msg = format_outerframe(2)
msg += "%s only takes arguments of types which are allowed in a sequence, but was given:\n" %typename
msg +=format_typename(v)
msg +="\nPlease remove the problematic object from the argument list"
raise TypeError(msg)
def _checkIfBooleanLogicSequenceable(caller, v):
if not isinstance(v,_BooleanLogicSequenceable):
typename = format_typename(caller)
msg = format_outerframe(2)
msg += "%s only takes arguments of types which are allowed in a boolean logic sequence, but was given:\n" %typename
msg +=format_typename(v)
msg +="\nPlease remove the problematic object from the argument list"
raise TypeError(msg)
class _BooleanLogicSequenceable(_Sequenceable):
"""Denotes an object which can be used in a boolean logic sequence"""
def __init__(self):
super(_BooleanLogicSequenceable,self).__init__()
def __or__(self,other):
return _BooleanLogicExpression(_BooleanLogicExpression.OR,self,other)
def __and__(self,other):
return _BooleanLogicExpression(_BooleanLogicExpression.AND,self,other)
class _BooleanLogicExpression(_BooleanLogicSequenceable):
"""Contains the operation of a boolean logic expression"""
OR = 0
AND = 1
def __init__(self,op,left,right):
_checkIfBooleanLogicSequenceable(self,left)
_checkIfBooleanLogicSequenceable(self,right)
self._op = op
self._items = list()
#if either the left or right side are the same kind of boolean expression
# then we can just add their items to our own. This keeps the expression
# tree more compact
if isinstance(left,_BooleanLogicExpression) and left._op == self._op:
self._items.extend(left._items)
else:
self._items.append(left)
if isinstance(right,_BooleanLogicExpression) and right._op == self._op:
self._items.extend(right._items)
else:
self._items.append(right)
def isOperation(self):
return True
def _visitSubNodes(self,visitor):
for i in self._items:
i.visitNode(visitor)
def dumpSequencePython(self, options=PrintOptions()):
returnValue = ''
join = ''
operatorJoin =self.operatorString()
for m in self._items:
returnValue +=join
join = operatorJoin
if not isinstance(m,_BooleanLogicSequenceLeaf):
returnValue += '('+m.dumpSequencePython(options)+')'
else:
returnValue += m.dumpSequencePython(options)
return returnValue
def operatorString(self):
returnValue ='|'
if self._op == self.AND:
returnValue = '&'
return returnValue
class _SequenceLeaf(_Sequenceable):
def __init__(self):
pass
def isLeaf(self):
return True
class _BooleanLogicSequenceLeaf(_BooleanLogicSequenceable):
def __init__(self):
pass
def isLeaf(self):
return True
class _SequenceCollection(_Sequenceable):
"""Holds representation of the operations without having to use recursion.
Operations are added to the beginning of the list and their operands are
added to the end of the list, with the left added before the right
"""
def __init__(self,*seqList):
self._collection = list()
for s in seqList:
_checkIfSequenceable(self,s)
s._appendToCollection(self._collection)
def __mul__(self,rhs):
_checkIfSequenceable(self,rhs)
rhs._appendToCollection(self._collection)
return self
def __add__(self,rhs):
_checkIfSequenceable(self,rhs)
rhs._appendToCollection(self._collection)
return self
def __str__(self):
sep = ''
returnValue = ''
for m in self._collection:
if m is not None:
returnValue += sep+str(m)
sep = '+'
return returnValue
def _appendToCollection(self,collection):
collection.extend(self._collection)
def dumpSequencePython(self, options=PrintOptions()):
returnValue = ''
separator = ''
for item in self._collection:
itemDump = item.dumpSequencePython(options)
if itemDump:
returnValue += (separator + itemDump)
separator = '+'
return returnValue
def dumpSequenceConfig(self):
returnValue = self._collection[0].dumpSequenceConfig()
for m in self._collection[1:]:
returnValue += '&'+m.dumpSequenceConfig()
return returnValue
def directDependencies(self,sortByType=True):
return findDirectDependencies(self, self._collection,sortByType=sortByType)
def visitNode(self,visitor):
for m in self._collection:
m.visitNode(visitor)
def resolve(self, processDict,keepIfCannotResolve=False):
self._collection = [x.resolve(processDict,keepIfCannotResolve) for x in self._collection]
return self
def index(self,item):
return self._collection.index(item)
def insert(self,index,item):
self._collection.insert(index,item)
def _replaceIfHeldDirectly(self,original,replacement):
didReplace = False
for i in self._collection:
if original == i:
self._collection[self._collection.index(original)] = replacement
didReplace = True
elif isinstance(i,_UnarySequenceOperator) and i._has(original):
didReplace = True
if replacement is None:
self._collection[self._collection.index(i)] = None
else:
self._collection[self._collection.index(i)] = type(i)(replacement)
if replacement is None:
self._collection = [ i for i in self._collection if i is not None]
return didReplace
def findDirectDependencies(element, collection,sortByType=True):
dependencies = []
for item in collection:
# skip null items
if item is None:
continue
# EDFilter, EDProducer, EDAnalyzer, OutputModule
# should check for Modules._Module, but that doesn't seem to work
elif isinstance(item, _SequenceLeaf):
t = 'modules'
# cms.ignore(module), ~(module)
elif isinstance(item, (_SequenceIgnore, _SequenceNegation)):
if isinstance(item._operand, _SequenceCollection):
dependencies += item.directDependencies(sortByType)
continue
t = 'modules'
# _SequenceCollection
elif isinstance(item, _SequenceCollection):
dependencies += item.directDependencies(sortByType)
continue
# cms.Sequence
elif isinstance(item, Sequence):
if not item.hasLabel_():
dependencies += item.directDependencies(sortByType)
continue
t = 'sequences'
# cms.Task
elif isinstance(item, Task):
if not item.hasLabel_():
dependencies += item.directDependencies(sortByType)
continue
t = 'tasks'
# cms.ConditionalTask
elif isinstance(item, ConditionalTask):
if not item.hasLabel_():
dependencies += item.directDependencies(sortByType)
continue
t = 'conditionaltasks'
# SequencePlaceholder and TaskPlaceholder do not add an explicit dependency
elif isinstance(item, (SequencePlaceholder, TaskPlaceholder, ConditionalTaskPlaceholder)):
continue
# unsupported elements
else:
sys.stderr.write("Warning: unsupported element '%s' in %s '%s'\n" % (str(item), type(element).__name__, element.label_()))
continue
dependencies.append((t, item.label_()))
if sortByType:
return sorted(set(dependencies), key = lambda t_item: (t_item[0].lower(), t_item[1].lower().replace('_cfi', '')))
else:
return dependencies
class _ModuleSequenceType(_ConfigureComponent, _Labelable):
"""Base class for classes which define a sequence of modules"""
def __init__(self,*arg, **argv):
self.__dict__["_isFrozen"] = False
self._seq = None
if (len(arg) > 1 and not isinstance(arg[1], _TaskBase)) or (len(arg) > 0 and not isinstance(arg[0],_Sequenceable) and not isinstance(arg[0],_TaskBase)):
typename = format_typename(self)
msg = format_outerframe(2)
msg += "The %s constructor takes zero or one sequenceable argument followed by zero or more arguments of type Task. But the following types are given:\n" %typename
for item,i in zip(arg, range(1,20)):
try:
msg += " %i) %s \n" %(i, item._errorstr())
except:
msg += " %i) Not sequenceable and not a Task\n" %(i)
if len(arg) > 1 and isinstance(arg[0],_Sequenceable) and isinstance(arg[1], _Sequenceable):
msg += "Maybe you forgot to combine the sequenceable arguments via '*' or '+'."
raise TypeError(msg)
tasks = arg
if len(arg) > 0 and isinstance(arg[0], _Sequenceable):
self._seq = _SequenceCollection()
arg[0]._appendToCollection(self._seq._collection)
tasks = arg[1:]
self._isModified = False
self._tasks = OrderedSet()
if len(tasks) > 0:
self.associate(*tasks)
def associate(self,*tasks):
for task in tasks:
if not isinstance(task, _TaskBase):
raise TypeError("associate only works with objects of type Task")
self._tasks.add(task)
def isFrozen(self):
return self._isFrozen
def setIsFrozen(self):
self._isFrozen = True
def _place(self,name,proc):
self._placeImpl(name,proc)
def __imul__(self,rhs):
_checkIfSequenceable(self, rhs)
if self._seq is None:
self.__dict__["_seq"] = _SequenceCollection()
self._seq+=rhs
return self
def __iadd__(self,rhs):
_checkIfSequenceable(self, rhs)
if self._seq is None:
self.__dict__["_seq"] = _SequenceCollection()
self._seq += rhs
return self
def __str__(self):
v = ExpandVisitor(type(self))
self.visit(v)
return v.resultString()
def dumpConfig(self, options):
s = ''
if self._seq is not None:
s = self._seq.dumpSequenceConfig()
return '{'+s+'}\n'
def dumpPython(self, options=PrintOptions()):
"""Returns a string which is the python representation of the object"""
s = self.dumpPythonNoNewline(options)
return s + "\n"
def dumpPythonNoNewline(self, options=PrintOptions()):
s=''
if self._seq is not None:
s =self._seq.dumpSequencePython(options)
associationContents = set()
for task in self._tasks:
if task.hasLabel_():
associationContents.add(_Labelable.dumpSequencePython(task, options))
else:
associationContents.add(task.dumpPythonNoNewline(options))
for iString in sorted(associationContents):
if s:
s += ", "
s += iString
if len(associationContents) > 254:
return 'cms.'+type(self).__name__+'(*['+s+'])'
return 'cms.'+type(self).__name__+'('+s+')'
def dumpSequencePython(self, options=PrintOptions()):
"""Returns a string which contains the python representation of just the internal sequence"""
# only dump the label, if possible
if self.hasLabel_():
return _Labelable.dumpSequencePython(self, options)
elif len(self._tasks) == 0:
if self._seq is None:
return ''
s = self._seq.dumpSequencePython(options)
if s:
return '('+s+')'
return ''
return self.dumpPythonNoNewline(options)
def dumpSequenceConfig(self):
"""Returns a string which contains the old config language representation of just the internal sequence"""
# only dump the label, if possible
if self.hasLabel_():
return _Labelable.dumpSequenceConfig(self)
else:
# dump it verbose
if self._seq is None:
return ''
return '('+self._seq.dumpSequenceConfig()+')'
def __repr__(self):
s = ''
if self._seq is not None:
s = str(self._seq)
return "cms."+type(self).__name__+'('+s+')\n'
def directDependencies(self,sortByType=True):
"""Returns the list of modules and other entities that are directly used"""
result = []
if self._seq:
result += self._seq.directDependencies(sortByType=sortByType)
if self._tasks:
result += findDirectDependencies(self, self._tasks,sortByType=sortByType)
return result
def moduleNames(self):
"""Returns a set containing the names of all modules being used"""
result = set()
visitor = NodeNameVisitor(result)
self.visit(visitor)
return result
def contains(self, mod):
visitor = ContainsModuleVisitor(mod)
self.visit(visitor)
return visitor.result()
def copy(self):
returnValue =_ModuleSequenceType.__new__(type(self))
if self._seq is not None:
returnValue.__init__(self._seq)
else:
returnValue.__init__()
returnValue._tasks = OrderedSet(self._tasks)
return returnValue
def copyAndExclude(self,listOfModulesToExclude):
"""Returns a copy of the sequence which excludes those module in 'listOfModulesToExclude'"""
# You can exclude instances of these types EDProducer, EDFilter, OutputModule,
# EDAnalyzer, ESSource, ESProducer, Service, Sequence, SequencePlaceholder, Task,
# _SequenceNegation, and _SequenceIgnore.
# Mostly this is very intuitive, but there are some complications in cases
# where objects that contain other objects are involved. See the comments
# for the _MutatingSequenceVisitor.
v = _CopyAndExcludeSequenceVisitor(listOfModulesToExclude)
self.visit(v)
result = self.__new__(type(self))
result.__init__(v.result(self)[0], *v.result(self)[1])
return result
def expandAndClone(self):
# Name of this function is not very good. It makes a shallow copy with all
# the subTasks and subSequences flattened out (removed), but keeping all the
# modules that were in those subSequences and subTasks as well as the top level
# ones. Note this will also remove placeholders so one should probably
# call resolve before using this if the sequence contains any placeholders.
visitor = ExpandVisitor(type(self))
self.visit(visitor)
return visitor.result()
def _postProcessFixup(self,lookuptable):
self._seq = self._seq._clonesequence(lookuptable)
return self
def replace(self, original, replacement):
"""Finds all instances of 'original' and substitutes 'replacement' for them.
Returns 'True' if a replacement occurs."""
# This works for either argument being of type EDProducer, EDFilter, OutputModule,
# EDAnalyzer, ESProducer, ESSource, Service, Sequence, SequencePlaceHolder,
# Task, _SequenceNegation, _SequenceIgnore. Although it will fail with a
# raised exception if the replacement actually hits a case where a
# non-Sequenceable object is placed in the sequenced part of a Sequence
# or a type not allowed on a Task is put on a Task.
# There is one special case where we need an explicit check to prevent
# the algorithm from getting confused, either both or neither can be Tasks
#
# Mostly this is very intuitive, but there are some complications in cases
# where objects that contain other objects are involved. See the comments
# for the _MutatingSequenceVisitor.
if (isinstance(original,Task) != isinstance(replacement,Task)):
raise TypeError("replace only works if both arguments are Tasks or neither")
if (isinstance(original,ConditionalTask) != isinstance(replacement,ConditionalTask)):
raise TypeError("replace only works if both arguments are ConditionalTasks or neither")
v = _CopyAndReplaceSequenceVisitor(original,replacement)
self.visit(v)
if v.didReplace():
self._seq = v.result(self)[0]
if v.result(self)[1]:
self._tasks.clear()
self.associate(*v.result(self)[1])
return v.didReplace()
def _replaceIfHeldDirectly(self,original,replacement):
"""Only replaces an 'original' with 'replacement' if 'original' is directly held.
If another Sequence or Task holds 'original' it will not be replaced."""
didReplace = False
if original in self._tasks:
self._tasks.remove(original)
if replacement is not None:
self._tasks.add(replacement)
didReplace = True
if self._seq is not None:
didReplace |= self._seq._replaceIfHeldDirectly(original,replacement)
return didReplace
def index(self,item):
"""Returns the index at which the item is found or raises an exception"""
if self._seq is not None:
return self._seq.index(item)
raise ValueError(str(item)+" is not in the sequence")
def insert(self,index,item):
"""Inserts the item at the index specified"""
_checkIfSequenceable(self, item)
if self._seq is None:
self.__dict__["_seq"] = _SequenceCollection()
self._seq.insert(index,item)
def remove(self, something):
"""Remove the first occurrence of 'something' (a sequence or a module)
Returns 'True' if the module has been removed, False if it was not found"""
# You can remove instances of these types EDProducer, EDFilter, OutputModule,
# EDAnalyzer, ESSource, ESProducer, Service, Sequence, SequencePlaceholder, Task,
# _SequenceNegation, and _SequenceIgnore.
# Mostly this is very intuitive, but there are some complications in cases
# where objects that contain other objects are involved. See the comments
# for the _MutatingSequenceVisitor.
#
# Works very similar to copyAndExclude, there are 2 differences. This changes
# the object itself instead of making a copy and second it only removes
# the first instance of the argument instead of all of them.
v = _CopyAndRemoveFirstSequenceVisitor(something)
self.visit(v)
if v.didRemove():
self._seq = v.result(self)[0]
if v.result(self)[1]:
self._tasks.clear()
self.associate(*v.result(self)[1])
return v.didRemove()
def resolve(self, processDict,keepIfCannotResolve=False):
if self._seq is not None:
self._seq = self._seq.resolve(processDict,keepIfCannotResolve)
for task in self._tasks:
task.resolve(processDict,keepIfCannotResolve)
return self
def __setattr__(self,name,value):
if not name.startswith("_"):
raise AttributeError("You cannot set parameters for sequence like objects.")
else:
self.__dict__[name] = value
#def replace(self,old,new):
#"""Find all instances of old and replace with new"""
#def insertAfter(self,which,new):
#"""new will depend on which but nothing after which will depend on new"""
#((a*b)*c) >> insertAfter(b,N) >> ((a*b)*(N+c))
#def insertBefore(self,which,new):
#"""new will be independent of which"""
#((a*b)*c) >> insertBefore(b,N) >> ((a*(N+b))*c)
#def __contains__(self,item):
#"""returns whether or not 'item' is in the sequence"""
#def modules_(self):
def nameInProcessDesc_(self, myname):
return myname
def insertInto(self, parameterSet, myname, decoratedList):
parameterSet.addVString(True, myname, decoratedList)
def visit(self,visitor):
"""Passes to visitor's 'enter' and 'leave' method each item describing the module sequence.
If the item contains 'sub' items then visitor will see those 'sub' items between the
item's 'enter' and 'leave' calls.
"""
if self._seq is not None:
self._seq.visitNode(visitor)
for item in self._tasks:
visitor.enter(item)
item.visit(visitor)
visitor.leave(item)
class _UnarySequenceOperator(_BooleanLogicSequenceable):
"""For ~ and - operators"""
def __init__(self, operand):
self._operand = operand
if isinstance(operand, _ModuleSequenceType):
raise RuntimeError("This operator cannot accept a sequence")
if not isinstance(operand, _Sequenceable):
raise RuntimeError("This operator cannot accept a non sequenceable type")
def __eq__(self, other):
# allows replace(~a, b)
return type(self) is type(other) and self._operand==other._operand
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# this definition implies that self._operand MUST NOT be changed after the construction
return hash((type(self), self._operand))
def _findDependencies(self,knownDeps, presentDeps):
self._operand._findDependencies(knownDeps, presentDeps)
def _clonesequence(self, lookuptable):
return type(self)(self._operand._clonesequence(lookuptable))
def _has(self, op):
return self._operand == op
def resolve(self, processDict,keepIfCannotResolve=False):
return type(self)(self._operand.resolve(processDict,keepIfCannotResolve))
def isOperation(self):
return True
def _visitSubNodes(self,visitor):
self._operand.visitNode(visitor)
def decoration(self):
self._operand.decoration()
def directDependencies(self,sortByType=True):
return self._operand.directDependencies(sortByType=sortByType)
def label_(self):
return self._operand.label_()
class _SequenceNegation(_UnarySequenceOperator):
"""Used in the expression tree for a sequence as a stand in for the '!' operator"""
def __init__(self, operand):
super(_SequenceNegation,self).__init__(operand)
def __str__(self):
return '~%s' %self._operand
def dumpSequenceConfig(self):
return '!%s' %self._operand.dumpSequenceConfig()
def dumpSequencePython(self, options=PrintOptions()):
if self._operand.isOperation():
return '~(%s)' %self._operand.dumpSequencePython(options)
return '~%s' %self._operand.dumpSequencePython(options)
def decoration(self):
return '!'
class _SequenceIgnore(_UnarySequenceOperator):
"""Used in the expression tree for a sequence as a stand in for the '-' operator"""
def __init__(self, operand):
super(_SequenceIgnore,self).__init__(operand)
def __str__(self):
return 'ignore(%s)' %self._operand
def dumpSequenceConfig(self):
return '-%s' %self._operand.dumpSequenceConfig()
def dumpSequencePython(self, options=PrintOptions()):
return 'cms.ignore(%s)' %self._operand.dumpSequencePython(options)
def decoration(self):
return '-'
class _SequenceWait(_UnarySequenceOperator):
"""Used in the expression tree for a sequence as a stand in for the '|' operator"""
def __init__(self, operand):
super(_SequenceWait,self).__init__(operand)
def __str__(self):
return 'wait(%s)' %self._operand
def dumpSequenceConfig(self):
return '|%s' %self._operand.dumpSequenceConfig()
def dumpSequencePython(self, options=PrintOptions()):
return 'cms.wait(%s)' %self._operand.dumpSequencePython(options)
def decoration(self):
return '|'
class _SequenceWaitAndIgnore(_UnarySequenceOperator):
"""Used in the expression tree for a sequence as a stand in for the '+' operator"""
def __init__(self, operand):
super(_SequenceWaitAndIgnore,self).__init__(operand)
def __str__(self):
return 'wait(ignore(%s))' %self._operand
def dumpSequenceConfig(self):
return '+%s' %self._operand.dumpSequenceConfig()
def dumpSequencePython(self, options=PrintOptions()):
return 'cms.wait(cms.ignore(%s))' %self._operand.dumpSequencePython(options)
def decoration(self):
return '+'
def ignore(seq):
"""The EDFilter passed as an argument will be run but its filter value will be ignored
"""
if isinstance(seq,_SequenceWait):
return _SequenceWaitAndIgnore(seq._operand)
return _SequenceIgnore(seq)
def wait(seq):
"""All modules after this module in the sequence will wait for this module to finish before being scheduled to run.
"""
if isinstance(seq,_SequenceIgnore):
return _SequenceWaitAndIgnore(seq._operand)
return _SequenceWait(seq)
class Path(_ModuleSequenceType):
def __init__(self,*arg,**argv):
super(Path,self).__init__(*arg,**argv)
def _placeImpl(self,name,proc):
proc._placePath(name,self)
class EndPath(_ModuleSequenceType):
def __init__(self,*arg,**argv):
super(EndPath,self).__init__(*arg,**argv)
def _placeImpl(self,name,proc):
proc._placeEndPath(name,self)
class FinalPath(_ModuleSequenceType):
def __init__(self,*arg,**argv):
super(FinalPath,self).__init__(*arg,**argv)
def _placeImpl(self,name,proc):
proc._placeFinalPath(name,self)
def associate(self,task):
raise TypeError("FinalPath does not allow associations with Tasks")
class Sequence(_ModuleSequenceType,_Sequenceable):
def __init__(self,*arg,**argv):
super(Sequence,self).__init__(*arg,**argv)
def _placeImpl(self,name,proc):
proc._placeSequence(name,self)
def _clonesequence(self, lookuptable):
if id(self) not in lookuptable:
#for sequences held by sequences we need to clone
# on the first reference
if self._seq is not None:
clone = type(self)(self._seq._clonesequence(lookuptable))
else:
clone = type(self)()
lookuptable[id(self)]=clone
lookuptable[id(clone)]=clone
return lookuptable[id(self)]
def _visitSubNodes(self,visitor):
self.visit(visitor)
class SequencePlaceholder(_Sequenceable):
def __init__(self, name):
self._name = name
def _placeImpl(self,name,proc):
pass
def __str__(self):
return self._name
def insertInto(self, parameterSet, myname):
raise RuntimeError("The SequencePlaceholder "+self._name
+" was never overridden")
def resolve(self, processDict,keepIfCannotResolve=False):
if not self._name in processDict:
#print str(processDict.keys())
if keepIfCannotResolve:
return self
raise RuntimeError("The SequencePlaceholder "+self._name+ " cannot be resolved.\n Known keys are:"+str(processDict.keys()))
o = processDict[self._name]
if not isinstance(o,_Sequenceable):
raise RuntimeError("The SequencePlaceholder "+self._name+ " refers to an object type which is not allowed to be on a sequence: "+str(type(o)))
return o.resolve(processDict)
def _clonesequence(self, lookuptable):
if id(self) not in lookuptable:
#for sequences held by sequences we need to clone
# on the first reference
clone = type(self)(self._name)
lookuptable[id(self)]=clone
lookuptable[id(clone)]=clone
return lookuptable[id(self)]
def copy(self):
returnValue =SequencePlaceholder.__new__(type(self))
returnValue.__init__(self._name)
return returnValue
def dumpSequenceConfig(self):
return 'cms.SequencePlaceholder("%s")' %self._name
def dumpSequencePython(self, options=PrintOptions()):
return 'cms.SequencePlaceholder("%s")'%self._name
def dumpPython(self, options=PrintOptions()):
result = 'cms.SequencePlaceholder(\"'
if options.isCfg:
result += 'process.'
result += self._name+'\")\n'
return result
class Schedule(_ValidatingParameterListBase,_ConfigureComponent,_Unlabelable):
def __init__(self,*arg,**argv):
super(Schedule,self).__init__(*arg)
self._tasks = OrderedSet()
theKeys = list(argv.keys())
if theKeys:
if len(theKeys) > 1 or theKeys[0] != "tasks":
raise RuntimeError("The Schedule constructor can only have one keyword argument after its Path and\nEndPath arguments and it must use the keyword 'tasks'")
taskList = argv["tasks"]
# Normally we want a list of tasks, but we let it also work if the value is one Task
if isinstance(taskList,Task):
self.associate(taskList)
else:
try:
# Call this just to check that taskList is a list or other iterable object
self.__dummy(*taskList)
except:
raise RuntimeError("The Schedule constructor argument with keyword 'tasks' must have a\nlist (or other iterable object) as its value")
if taskList:
self.associate(*taskList)
def __dummy(self, *args):
pass
def associate(self,*tasks):
for task in tasks:
if not isinstance(task, Task):
raise TypeError("The associate function in the class Schedule only works with arguments of type Task")
self._tasks.add(task)
@staticmethod
def _itemIsValid(item):
return isinstance(item,Path) or isinstance(item,EndPath) or isinstance(item,FinalPath)
def copy(self):
import copy
aCopy = copy.copy(self)
aCopy._tasks = OrderedSet(self._tasks)
return aCopy
def _place(self,label,process):
process.setPartialSchedule_(self,label)
def _replaceIfHeldDirectly(self,original,replacement):
"""Only replaces an 'original' with 'replacement' if 'original' is directly held.
If a contained Path or Task holds 'original' it will not be replaced."""
didReplace = False
if original in self._tasks:
self._tasks.remove(original)
if replacement is not None:
self._tasks.add(replacement)
didReplace = True
indices = []
for i, e in enumerate(self):
if original == e:
indices.append(i)
for i in reversed(indices):
self.pop(i)
if replacement is not None:
self.insert(i, replacement)
didReplace = True
return didReplace
def moduleNames(self):
result = set()
visitor = NodeNameVisitor(result)
for seq in self:
seq.visit(visitor)
for t in self._tasks:
t.visit(visitor)
return result
def contains(self, mod):
visitor = ContainsModuleVisitor(mod)
for seq in self:
seq.visit(visitor)
if visitor.result():
return True
for t in self._tasks:
t.visit(visitor)
if visitor.result():
return True
return visitor.result()
def tasks(self):
"""Returns the list of Tasks (that may contain other Tasks) that are associated directly to the Schedule."""
return self._tasks
def dumpPython(self, options=PrintOptions()):
pathNames = ['process.'+p.label_() for p in self]
if pathNames:
s=', '.join(pathNames)
else:
s = ''
associationContents = set()
for task in self._tasks:
if task.hasLabel_():
associationContents.add(_Labelable.dumpSequencePython(task, options))
else:
associationContents.add(task.dumpPythonNoNewline(options))
taskStrings = list()
for iString in sorted(associationContents):
taskStrings.append(iString)
if taskStrings and s:
return 'cms.Schedule(*[ ' + s + ' ], tasks=[' + ', '.join(taskStrings) + '])\n'
elif s:
return 'cms.Schedule(*[ ' + s + ' ])\n'
elif taskStrings:
return 'cms.Schedule(tasks=[' + ', '.join(taskStrings) + '])\n'
else:
return 'cms.Schedule()\n'
def __str__(self):
return self.dumpPython()
# Fills a list of all Sequences visited
# Can visit a Sequence, Path, or EndPath
class SequenceVisitor(object):
def __init__(self,d):
self.deps = d
def enter(self,visitee):
if isinstance(visitee,Sequence):
self.deps.append(visitee)
pass
def leave(self,visitee):
pass
# Fills a list of all Tasks visited
# Can visit a Task, Sequence, Path, or EndPath
class TaskVisitor(object):
def __init__(self,d):
self.deps = d
def enter(self,visitee):
if isinstance(visitee,Task):
self.deps.append(visitee)
pass
def leave(self,visitee):
pass
# Fills a list of all ConditionalTasks visited
# Can visit a ConditionalTask, Sequence, Path, or EndPath
class ConditionalTaskVisitor(object):
def __init__(self,d):
self.deps = d
def enter(self,visitee):
if isinstance(visitee,ConditionalTask):
self.deps.append(visitee)
pass
def leave(self,visitee):
pass
# Fills a list of all modules visited.
# Can visit a Sequence, Path, EndPath, or Task
# For purposes of this visitor, a module is considered
# to be an object that is one of these types: EDProducer,
# EDFilter, EDAnalyzer, OutputModule, ESProducer, ESSource,
# Service. The last three of these can only appear on a
# Task, they are not sequenceable. An object of one
# of these types is also called a leaf.
class ModuleNodeVisitor(object):
def __init__(self,l):
self.l = l
def enter(self,visitee):
if visitee.isLeaf():
self.l.append(visitee)
pass
def leave(self,visitee):
pass
# Should not be used on Tasks.
# Similar to ModuleNodeVisitor with the following
# differences. It only lists the modules that were
# contained inside a Task. It should only be used
# on Sequences, Paths, and EndPaths.
class ModuleNodeOnTaskVisitor(object):
def __init__(self,l):
self.l = l
self._levelInTasks = 0
def enter(self,visitee):
if isinstance(visitee, Task):
self._levelInTasks += 1
if self._levelInTasks == 0:
return
if visitee.isLeaf():
self.l.append(visitee)
pass
def leave(self,visitee):
if self._levelInTasks > 0:
if isinstance(visitee, Task):
self._levelInTasks -= 1
class ModuleNodeOnConditionalTaskVisitor(object):
def __init__(self,l):
self.l = l
self._levelInTasks = 0
def enter(self,visitee):
if isinstance(visitee, ConditionalTask):
self._levelInTasks += 1
# This block gets the modules contained by SwitchProducer. It
# needs to be before the "levelInTasks == 0" check because the
# contained modules need to be treated like in ConditionalTask
# also when the SwitchProducer itself is in the Path.
if hasattr(visitee, "modulesForConditionalTask_"):
self.l.extend(visitee.modulesForConditionalTask_())
if self._levelInTasks == 0:
return
if visitee.isLeaf():
self.l.append(visitee)
pass
def leave(self,visitee):
if self._levelInTasks > 0:
if isinstance(visitee, ConditionalTask):
self._levelInTasks -= 1
# Should not be used on Tasks.
# Similar to ModuleNodeVisitor with the following
# differences. It only lists the modules that were
# outside a Task, in the sequenced part of the sequence.
# It should only be used on Sequences, Paths, and
# EndPaths.
class ModuleNodeNotOnTaskVisitor(object):
def __init__(self,l):
self.l = l
self._levelInTasks = 0
def enter(self,visitee):
if isinstance(visitee, Task):
self._levelInTasks += 1
if self._levelInTasks > 0:
return
if visitee.isLeaf():
self.l.append(visitee)
pass
def leave(self,visitee):
if self._levelInTasks > 0:
if isinstance(visitee, Task):
self._levelInTasks -= 1
# Can visit Tasks, Sequences, Paths, and EndPaths
# result will be set to True if and only if
# the module is in the object directly or
# indirectly through contained Sequences or
# associated Tasks.
class ContainsModuleVisitor(object):
def __init__(self,mod):
self._mod = mod
self._result = False
def result(self):
return self._result
def enter(self,visitee):
if self._mod is visitee:
self._result = True
def leave(self,visitee):
pass
# Can visit Tasks, Sequences, Paths, and EndPaths
# Fills a set of the names of the visited leaves.
# For the labelable ones the name is the label.
# For a Service the name is the type.
# It raises an exception if a labelable object
# does not have a label at all. It will return
# 'None' if the label attribute exists but was set
# to None. If a Service is not attached to the process
# it will also raise an exception.
class NodeNameVisitor(object):
""" takes a set as input"""
def __init__(self,l):
self.l = l
def enter(self,visitee):
if visitee.isLeaf():
if isinstance(visitee, _Labelable):
self.l.add(visitee.label_())
else:
if visitee._inProcess:
self.l.add(visitee.type_())
else:
raise RuntimeError("Service not attached to process: {}".format(visitee.dumpPython()))
def leave(self,visitee):
pass
# This visitor works only with Sequences, Paths and EndPaths
# It will not work on Tasks
class ExpandVisitor(object):
""" Expands the sequence into leafs and UnaryOperators """
def __init__(self, type):
self._type = type
self.l = []
self.taskLeaves = []
self.taskLeavesInConditionalTasks = []
self.presentTaskLeaves = self.taskLeaves
self._levelInTasks = 0
self.conditionaltaskLeaves = []
self._levelInConditionalTasks = 0
def enter(self,visitee):
if isinstance(visitee, Task):
self._levelInTasks += 1
return
if isinstance(visitee, ConditionalTask):
self.presentTaskLeaves = self.taskLeavesInConditionalTasks
self._levelInConditionalTasks += 1
return
if visitee.isLeaf():
if self._levelInTasks > 0:
self.presentTaskLeaves.append(visitee)
elif self._levelInConditionalTasks > 0:
self.conditionaltaskLeaves.append(visitee)
else:
self.l.append(visitee)
def leave(self, visitee):
if self._levelInTasks > 0:
if isinstance(visitee, Task):
self._levelInTasks -= 1
return
if self._levelInConditionalTasks > 0:
if isinstance(visitee, ConditionalTask):
self._levelInConditionalTasks -= 1
if 0 == self._levelInConditionalTasks:
self.presentTaskLeaves = self.taskLeaves
return
if isinstance(visitee,_UnarySequenceOperator):
self.l[-1] = visitee
def result(self):
tsks = []
if self.taskLeaves:
tsks.append(Task(*self.taskLeaves))
if self.conditionaltaskLeaves:
ct = ConditionalTask(*self.conditionaltaskLeaves)
if self.taskLeavesInConditionalTasks:
ct.append(*self.taskLeavesInConditionalTasks)
tsks.append(ct)
if len(self.l) > 0:
# why doesn't (sum(self.l) work?
seq = self.l[0]
for el in self.l[1:]:
seq += el
return self._type(seq, *tsks)
else:
return self._type(*tsks)
def resultString(self):
sep = ''
returnValue = ''
for m in self.l:
if m is not None:
returnValue += sep+str(m)
sep = '+'
if returnValue:
sep = ','
for n in self.taskLeaves:
if n is not None:
returnValue += sep+str(n)
sep = ','
return returnValue
# This visitor is only meant to run on Sequences, Paths, and EndPaths
# It intentionally ignores nodes on Tasks when it does this.
class DecoratedNodeNameVisitor(object):
""" Adds any '!' or '-' needed. Takes a list """
def __init__(self,l):
self.l = l
self._decoration =''
self._levelInTasks = 0
def initialize(self):
self.l[:] = []
self._decoration =''
self._levelInTasks = 0
def enter(self,visitee):
if isinstance(visitee, _TaskBase):
self._levelInTasks += 1
if self._levelInTasks > 0:
return
if visitee.isLeaf():
if hasattr(visitee, "_Labelable__label"):
self.l.append(self._decoration+visitee.label_())
else:
error = "An object in a sequence was not found in the process\n"
if hasattr(visitee, "_filename"):
error += "From file " + visitee._filename
else:
error += "Dump follows\n" + repr(visitee)
raise RuntimeError(error)
if isinstance(visitee,_BooleanLogicExpression):
self.l.append(self._decoration+visitee.operatorString())
if isinstance(visitee,_UnarySequenceOperator):
self._decoration=visitee.decoration()
else:
self._decoration=''
def leave(self,visitee):
# Ignore if this visitee is inside a Task
if self._levelInTasks > 0:
if isinstance(visitee, _TaskBase):
self._levelInTasks -= 1
return
if isinstance(visitee,_BooleanLogicExpression):
#need to add the 'go back' command to keep track of where we are in the tree
self.l.append('@')
# This visitor is only meant to run on Sequences, Paths, and EndPaths
# Similar to DecoratedNodeNameVistor. The only difference
# is it also builds a separate list of leaves on Tasks.
class DecoratedNodeNamePlusVisitor(object):
""" Adds any '!' or '-' needed. Takes a list """
def __init__(self,l):
self.l = l
self._decoration =''
self._levelInTasks = 0
self._leavesOnTasks = []
def initialize(self):
self.l[:] = []
self._decoration =''
self._levelInTasks = 0
self._leavesOnTasks[:] = []
def enter(self,visitee):
if isinstance(visitee, Task):
self._levelInTasks += 1
if self._levelInTasks > 0:
if visitee.isLeaf():
self._leavesOnTasks.append(visitee)
return
if visitee.isLeaf():
if hasattr(visitee, "_Labelable__label"):
self.l.append(self._decoration+visitee.label_())
else:
error = "An object in a sequence was not found in the process\n"
if hasattr(visitee, "_filename"):
error += "From file " + visitee._filename
else:
error += "Dump follows\n" + repr(visitee)
raise RuntimeError(error)
if isinstance(visitee,_BooleanLogicExpression):
self.l.append(self._decoration+visitee.operatorString())
if isinstance(visitee,_UnarySequenceOperator):
self._decoration=visitee.decoration()
else:
self._decoration=''
def leave(self,visitee):
# Ignore if this visitee is inside a Task
if self._levelInTasks > 0:
if isinstance(visitee, Task):
self._levelInTasks -= 1
return
if isinstance(visitee,_BooleanLogicExpression):
#need to add the 'go back' command to keep track of where we are in the tree
self.l.append('@')
def leavesOnTasks(self):
return self._leavesOnTasks
class _CopyAndExcludeSequenceVisitorOld(object):
"""Traverses a Sequence and constructs a new sequence which does not contain modules from the specified list"""
def __init__(self,modulesToRemove):
self.__modulesToIgnore = modulesToRemove
self.__stack = list()
self.__stack.append(list())
self.__result = None
self.__didExclude = False
def enter(self,visitee):
if len(self.__stack) > 0:
#add visitee to its parent's stack entry
self.__stack[-1].append([visitee,False])
if visitee.isLeaf():
if visitee in self.__modulesToIgnore:
self.__didExclude = True
self.__stack[-1][-1]=[None,True]
elif isinstance(visitee, Sequence):
if visitee in self.__modulesToIgnore:
self.__didExclude = True
self.__stack[-1][-1]=[None,True]
self.__stack.append(list())
else:
#need to add a stack entry to keep track of children
self.__stack.append(list())
def leave(self,visitee):
node = visitee
if not visitee.isLeaf():
#were any children changed?
l = self.__stack[-1]
changed = False
countNulls = 0
nonNulls = list()
for c in l:
if c[1] == True:
changed = True
if c[0] is None:
countNulls +=1
else:
nonNulls.append(c[0])
if changed:
self.__didExclude = True
if countNulls != 0:
#this node must go away
if len(nonNulls) == 0:
#all subnodes went away
node = None
else:
node = nonNulls[0]
for n in nonNulls[1:]:
node = node+n
else:
#some child was changed so we need to clone
# this node and replace it with one that holds
# the new child(ren)
children = [x[0] for x in l ]
if not isinstance(visitee,Sequence):
node = visitee.__new__(type(visitee))
node.__init__(*children)
else:
node = nonNulls[0]
if node != visitee:
#we had to replace this node so now we need to
# change parent's stack entry as well
if len(self.__stack) > 1:
p = self.__stack[-2]
#find visitee and replace
for i,c in enumerate(p):
if c[0]==visitee:
c[0]=node
c[1]=True
break
if not visitee.isLeaf():
self.__stack = self.__stack[:-1]
def result(self):
result = None
for n in (x[0] for x in self.__stack[0]):
if n is None:
continue
if result is None:
result = n
else:
result = result+n
return result
def didExclude(self):
return self.__didExclude
# This visitor can also be used on Tasks.
class _MutatingSequenceVisitor(object):
"""Traverses a Sequence and constructs a new sequence by applying the operator to each element of the sequence"""
# In many cases this operates in an intuitive manner that needs
# no explanation, but there are some complex cases and I will try to
# explain these in the following comments.
#
# First of all the top level Sequence or Task being visited may contain
# many objects of different types. These contained objects are never
# modified. If they are not left the same, they are instead replaced
# by other instances, replaced by new instances or removed.
# Contained objects are only replaced or removed when they were directly
# modified or if they contain something that was modified.
# If all the contents of a Sequence, Task, _SequenceNegation or _SequenceIgnore
# object that is not at the top level are removed, then the containing
# object is also removed.
# If the contents of a Sequence other than the top level sequence are
# modified, then the sequence elements and Task objects it contains get
# passed up to be included in the top level sequence. If the contents of
# a Task are modified, a new Task object is created and passed up to be
# included in the top level Sequence or Task. If it is a _SequenceNegation
# or _SequenceIgnore instance it will simply be removed completely if its
# operand is removed. If the operand is replaced then a new object of the
# same type will be constructed replacing the old.
#
# Note that if a Sequence contains a SequencePlaceholder, the future contents
# of that placeholder are not affected by the changes. If that is an issue,
# then you probably want to resolve the placeholders before using this
# class.
#
# If this is used multiple times on the same sequence or task, the consequences
# might interfere with one another in unusual cases.
#
# One example, the matching to find objects to modify is based on instances
# (the python id) being the same. So if you modify the contents of a Task or
# Sequence and then subsequently try to modify that Sequence or Task, then
# it will either no longer exist or be a different instance and so nothing
# would get modified. Note that the one exception to this matching by instance
# is _SequenceIgnore and _SequenceNegation. In that case, two objects are
# recognized as matching if the contained module is the same instance instead
# of requiring the _SequenceNegation or _SequenceIgnore object to be the same
# instance.
#
# Another example. There is an input operator that removes the first instance
# of an object. Applying this visitor with that operation might give unexpected
# results if another operation previously changed the number of times the
# that instance appears or the order it appears in the visitation. This
# should only be an issue if the item is on a Task and even then only in
# unusual circumstances.
def __init__(self,operator):
self.__operator = operator
# You add a list to the __stack when entering any non-Leaf object
# and pop the last element when leaving any non-Leaf object
self.__stack = list()
self.__stack.append(list())
self.__didApply = False
self.__levelInModifiedNonLeaf = 0
def enter(self,visitee):
# Ignore the content of replaced or removed Sequences,
# Tasks, and operators.
if self.__levelInModifiedNonLeaf > 0:
if not visitee.isLeaf():
self.__levelInModifiedNonLeaf += 1
return
# Just a sanity check
if not len(self.__stack) > 0:
raise RuntimeError("LogicError Empty stack in MutatingSequenceVisitor.\n"
"This should never happen. Contact a Framework developer.")
# The most important part.
# Apply the operator that might change things, The rest
# of the class is just dealing with side effects of these changes.
v = self.__operator(visitee)
if v is visitee:
# the operator did not change the visitee
# The 3 element list being appended has the following contents
# element 0 - either the unmodified object, the modified object, or
# a sequence collection when it is a Sequence whose contents have
# been modified.
# element 1 - Indicates whether the object was modified.
# element 2 - None or a list of tasks for a Sequence
# whose contents have been modified.
self.__stack[-1].append([visitee, False, None])
if not visitee.isLeaf():
# need to add a list to keep track of the contents
# of the Sequence, Task, or operator we just entered.
self.__stack.append(list())
else:
# the operator changed the visitee
self.__didApply = True
self.__stack[-1].append([v, True, None])
if not visitee.isLeaf():
# Set flag to indicate modified Sequence, Task, or operator
self.__levelInModifiedNonLeaf = 1
def leave(self,visitee):
# nothing to do for leaf types because they do not have contents
if visitee.isLeaf():
return
# Ignore if this visitee is inside something that was already removed
# or replaced.
if self.__levelInModifiedNonLeaf > 0:
self.__levelInModifiedNonLeaf -= 1
return
# Deal with visitees which have contents (Sequence, Task, _SequenceIgnore,
# or _SequenceNegation) and although we know the visitee itself did not get
# changed by the operator, the contents of the visitee might have been changed.
# did any object inside the visitee change?
contents = self.__stack[-1]
changed = False
allNull = True
for c in contents:
if c[1] == True:
changed = True
if c[0] is not None:
allNull = False
if changed:
if allNull:
self.__stack[-2][-1] = [None, True, None]
elif isinstance(visitee, _UnarySequenceOperator):
node = visitee.__new__(type(visitee))
node.__init__(contents[0][0])
self.__stack[-2][-1] = [node, True, None]
elif isinstance(visitee, _TaskBase):
nonNull = []
for c in contents:
if c[0] is not None:
nonNull.append(c[0])
self.__stack[-2][-1] = [visitee._makeInstance(*nonNull), True, None]
elif isinstance(visitee, Sequence):
seq = _SequenceCollection()
tasks = list()
for c in contents:
if c[0] is None:
continue
if isinstance(c[0], _TaskBase):
tasks.append(c[0])
else:
seq = seq + c[0]
if c[2] is not None:
tasks.extend(c[2])
self.__stack[-2][-1] = [seq, True, tasks]
# When you exit the Sequence, Task, or operator,
# drop the list which holds information about
# its contents.
if not visitee.isLeaf():
self.__stack = self.__stack[:-1]
def result(self, visitedContainer):
if isinstance(visitedContainer, _TaskBase):
result = list()
for n in (x[0] for x in self.__stack[0]):
if n is not None:
result.append(n)
return result
seq = _SequenceCollection()
tasks = list()
for c in self.__stack[0]:
if c[0] is None:
continue
if isinstance(c[0], _TaskBase):
tasks.append(c[0])
else:
seq = seq + c[0]
if c[2] is not None:
tasks.extend(c[2])
return [seq, tasks]
def _didApply(self):
return self.__didApply
# This visitor can also be used on Tasks.
class _CopyAndRemoveFirstSequenceVisitor(_MutatingSequenceVisitor):
"""Traverses a Sequence and constructs a new sequence which does not contain modules from the specified list"""
def __init__(self,moduleToRemove):
class _RemoveFirstOperator(object):
def __init__(self,moduleToRemove):
self.__moduleToRemove = moduleToRemove
self.__found = False
def __call__(self,test):
if not self.__found and test is self.__moduleToRemove:
self.__found = True
return None
return test
super(type(self),self).__init__(_RemoveFirstOperator(moduleToRemove))
def didRemove(self):
return self._didApply()
# This visitor can also be used on Tasks.
class _CopyAndExcludeSequenceVisitor(_MutatingSequenceVisitor):
"""Traverses a Sequence and constructs a new sequence which does not contain the module specified"""
def __init__(self,modulesToRemove):
class _ExcludeOperator(object):
def __init__(self,modulesToRemove):
self.__modulesToIgnore = modulesToRemove
def __call__(self,test):
if test in modulesToRemove:
return None
return test
super(type(self),self).__init__(_ExcludeOperator(modulesToRemove))
def didExclude(self):
return self._didApply()
# This visitor can also be used on Tasks.
class _CopyAndReplaceSequenceVisitor(_MutatingSequenceVisitor):
"""Traverses a Sequence and constructs a new sequence which replaces a specified module with a different module"""
def __init__(self,target,replace):
class _ReplaceOperator(object):
def __init__(self,target,replace):
self.__target = target
self.__replace = replace
def __call__(self,test):
if test == self.__target:
return self.__replace
return test
super(type(self),self).__init__(_ReplaceOperator(target,replace))
def didReplace(self):
return self._didApply()
class _TaskBase(_ConfigureComponent, _Labelable) :
def __init__(self, *items):
self._collection = OrderedSet()
self.add(*items)
def __setattr__(self,name,value):
if not name.startswith("_"):
raise AttributeError("You cannot set parameters for {} objects.".format(self._taskType()))
else:
self.__dict__[name] = value
def add(self, *items):
for item in items:
if not self._allowedInTask(item):
raise RuntimeError("Adding an entry of type '{0}' to a {1}.\n"
"It is illegal to add this type to a {1}.".format(type(item).__name__, self._taskType()))
self._collection.add(item)
def fillContents(self, taskContents, options=PrintOptions()):
# only dump the label, if possible
if self.hasLabel_():
taskContents.add(_Labelable.dumpSequencePython(self, options))
else:
for i in self._collection:
if isinstance(i, _TaskBase):
i.fillContents(taskContents, options)
else:
taskContents.add(i.dumpSequencePython(options))
def dumpPython(self, options=PrintOptions()):
s = self.dumpPythonNoNewline(options)
return s + "\n"
def dumpPythonNoNewline(self, options=PrintOptions()):
"""Returns a string which is the python representation of the object"""
taskContents = set()
for i in self._collection:
if isinstance(i, _TaskBase):
i.fillContents(taskContents, options)
else:
taskContents.add(i.dumpSequencePython(options))
s=''
iFirst = True
for item in sorted(taskContents):
if not iFirst:
s += ", "
iFirst = False
s += item
if len(taskContents) > 255:
s = "*[" + s + "]"
return "cms.{}({})".format(self._taskType(),s)
def directDependencies(self,sortByType=True):
return findDirectDependencies(self, self._collection,sortByType=sortByType)
def _isTaskComponent(self):
return False
def isLeaf(self):
return False
def visit(self,visitor):
for i in self._collection:
visitor.enter(i)
if not i.isLeaf():
i.visit(visitor)
visitor.leave(i)
def _errorstr(self):
return "{}(...)".format(self.taskType_())
def __iter__(self):
for key in self._collection:
yield key
def __str__(self):
l = []
v = ModuleNodeVisitor(l)
self.visit(v)
s = ''
for i in l:
if s:
s += ', '
s += str (i)
return s
def __repr__(self):
s = str(self)
return "cms."+type(self).__name__+'('+s+')\n'
def moduleNames(self):
"""Returns a set containing the names of all modules being used"""
result = set()
visitor = NodeNameVisitor(result)
self.visit(visitor)
return result
def contains(self, mod):
visitor = ContainsModuleVisitor(mod)
self.visit(visitor)
return visitor.result()
def copy(self):
return self._makeInstance(*self._collection)
def copyAndExclude(self,listOfModulesToExclude):
"""Returns a copy of the sequence which excludes those module in 'listOfModulesToExclude'"""
# You can exclude instances of these types EDProducer, EDFilter, ESSource, ESProducer,
# Service, or Task.
# Mostly this is very intuitive, but there are some complications in cases
# where objects that contain other objects are involved. See the comments
# for the _MutatingSequenceVisitor.
for i in listOfModulesToExclude:
if not i._isTaskComponent():
raise TypeError("copyAndExclude can only exclude objects that can be placed on a Task")
v = _CopyAndExcludeSequenceVisitor(listOfModulesToExclude)
self.visit(v)
return self._makeInstance(*v.result(self))
def copyAndAdd(self, *modulesToAdd):
"""Returns a copy of the Task adding modules/tasks"""
t = self.copy()
t.add(*modulesToAdd)
return t
def expandAndClone(self):
# Name of this function is not very good. It makes a shallow copy with all
# the subTasks flattened out (removed), but keeping all the
# modules that were in those subTasks as well as the top level
# ones.
l = []
v = ModuleNodeVisitor(l)
self.visit(v)
return self._makeInstance(*l)
def replace(self, original, replacement):
"""Finds all instances of 'original' and substitutes 'replacement' for them.
Returns 'True' if a replacement occurs."""
# This works for either argument being of type EDProducer, EDFilter, ESProducer,
# ESSource, Service, or Task.
#
# Mostly this is very intuitive, but there are some complications in cases
# where objects that contain other objects are involved. See the comments
# for the _MutatingSequenceVisitor.
if not self._allowedInTask(original) or (not replacement is None and not self._allowedInTask(replacement)):
raise TypeError("The {0} replace function only works with objects that can be placed on a {0}\n".format(self._taskType()) + \
" replace was called with original type = {}\n".format(str(type(original))) + \
" and replacement type = {}\n".format(str(type(replacement))))
else:
v = _CopyAndReplaceSequenceVisitor(original,replacement)
self.visit(v)
if v.didReplace():
self._collection.clear()
self.add(*v.result(self))
return v.didReplace()
def remove(self, something):
"""Remove the first occurrence of a module
Returns 'True' if the module has been removed, False if it was not found"""
# You can remove instances of these types EDProducer, EDFilter, ESSource,
# ESProducer, Service, or Task,
#
# Mostly this is very intuitive, but there are some complications in cases
# where objects that contain other objects are involved. See the comments
# for the _MutatingSequenceVisitor.
#
# Works very similar to copyAndExclude, there are 2 differences. This changes
# the object itself instead of making a copy and second it only removes
# the first instance of the argument instead of all of them.
if not self._allowedInTask(something):
raise TypeError("remove only works with objects that can be placed on a Task")
v = _CopyAndRemoveFirstSequenceVisitor(something)
self.visit(v)
if v.didRemove():
self._collection.clear()
self.add(*v.result(self))
return v.didRemove()
def resolve(self, processDict,keepIfCannotResolve=False):
temp = OrderedSet()
for i in self._collection:
if self._mustResolve(i):
temp.add(i.resolve(processDict,keepIfCannotResolve))
else:
temp.add(i)
self._collection = temp
return self
class _TaskBasePlaceholder(object):
def __init__(self, name):
self._name = name
def _isTaskComponent(self):
return False
def isLeaf(self):
return False
def visit(self,visitor):
pass
def __str__(self):
return self._name
def insertInto(self, parameterSet, myname):
raise RuntimeError("The {} {} was never overridden".format(self._typeName(), self._name))
def resolve(self, processDict,keepIfCannotResolve=False):
if not self._name in processDict:
if keepIfCannotResolve:
return self
raise RuntimeError("The {} {} cannot be resolved.\n Known keys are: {}".format(self._typeName(), self._name,str(processDict.keys())))
o = processDict[self._name]
if not self._allowedInTask(o):
raise RuntimeError("The {} {} refers to an object type which is not allowed to be on a task: {}".format(self._typeName(), self._name, str(type(o))))
if isinstance(o, self._taskClass()):
return o.resolve(processDict)
return o
def copy(self):
return self._makeInstance(self._name)
def dumpSequencePython(self, options=PrintOptions()):
return 'cms.{}("{}")'.format(self._typeName(), self._name)
def dumpPython(self, options=PrintOptions()):
result = 'cms.{}(\"'.format(self._typeName())
if options.isCfg:
result += 'process.'
result += self._name+'\")\n'
return result
class Task(_TaskBase) :
"""Holds EDProducers, EDFilters, ESProducers, ESSources, Services, and Tasks.
A Task can be associated with Sequences, Paths, EndPaths, ConditionalTasks and the Schedule.
An EDProducer or EDFilter will be enabled to run unscheduled if it is on
a task associated with the Schedule or any scheduled Path or EndPath (directly
or indirectly through Sequences) and not be on any scheduled Path or EndPath.
ESSources, ESProducers, and Services will be enabled to run if they are on
a Task associated with the Schedule or a scheduled Path or EndPath. In other
cases, they will be enabled to run if and only if they are not on a Task attached
to the process.
"""
@staticmethod
def _taskType():
return "Task"
def _place(self, name, proc):
proc._placeTask(name,self)
def _isTaskComponent(self):
return True
@staticmethod
def _makeInstance(*items):
return Task(*items)
@staticmethod
def _allowedInTask(item ):
return (isinstance(item, _ConfigureComponent) and item._isTaskComponent()) or isinstance(item, TaskPlaceholder)
@staticmethod
def _mustResolve(item):
return isinstance(item, Task) or isinstance(item, TaskPlaceholder)
class TaskPlaceholder(_TaskBasePlaceholder):
def _isTaskComponent(self):
return True
@staticmethod
def _typeName():
return "TaskPlaceholder"
@staticmethod
def _makeInstance(name):
return TaskPlaceholder(name)
@staticmethod
def _allowedInTask(obj):
return Task._allowedInTask(obj)
@staticmethod
def _taskClass():
return Task
class ConditionalTask(_TaskBase) :
"""Holds EDProducers, EDFilters, ESProducers, ESSources, Services, Tasks and ConditionalTasks.
A ConditionalTask can be associated with Sequences, Paths, and EndPaths.
An EDProducer or EDFilter will be added to a Path or EndPath based on which other
modules on the Path consumes its data products. If that ConditionalTask assigned module
is placed after an EDFilter, the module will only run if the EDFilter passes. If no module
on the Path needs the module's data products, the module will be treated as if it were on a Task.
"""
@staticmethod
def _taskType():
return "ConditionalTask"
def _place(self, name, proc):
proc._placeConditionalTask(name,self)
def _isTaskComponent(self):
return False
@staticmethod
def _makeInstance(*items):
return ConditionalTask(*items)
@staticmethod
def _allowedInTask(item):
return isinstance(item, ConditionalTask) or isinstance(item, ConditionalTaskPlaceholder) or Task._allowedInTask(item)
@staticmethod
def _mustResolve(item):
return Task._mustResolve(item) or isinstance(item, ConditionalTask) or isinstance(item, ConditionalTaskPlaceholder)
class ConditionalTaskPlaceholder(_TaskBasePlaceholder):
def _isTaskComponent(self):
return False
@staticmethod
def _typeName():
return "ConditionalTaskPlaceholder"
@staticmethod
def _makeInstance(name):
return ConditionalTaskPlaceholder(name)
@staticmethod
def _allowedInTask(obj):
return Task._allowedInTask(obj) or ConditionalTask._allowedInTask(obj)
@staticmethod
def _taskClass():
return ConditionalTask
if __name__=="__main__":
import unittest
class DummyModule(_Labelable, _SequenceLeaf, _ConfigureComponent):
def __init__(self,name):
self.setLabel(name)
def _isTaskComponent(self):
return True
def __repr__(self):
return self.label_()
class DummyBooleanModule(_Labelable, _BooleanLogicSequenceLeaf):
def __init__(self,name):
self.setLabel(name)
class TestModuleCommand(unittest.TestCase):
def setUp(self):
"""Nothing to do """
pass
def testBoolean(self):
a = DummyBooleanModule("a")
b = DummyBooleanModule("b")
p = Path( a & b)
self.assertEqual(p.dumpPython(),"cms.Path(process.a&process.b)\n")
l = list()
namesVisitor = DecoratedNodeNameVisitor(l)
p.visit(namesVisitor)
self.assertEqual(l,['&','a','b','@'])
p2 = Path( a | b)
self.assertEqual(p2.dumpPython(),"cms.Path(process.a|process.b)\n")
l[:]=[]
p2.visit(namesVisitor)
self.assertEqual(l,['|','a','b','@'])
c = DummyBooleanModule("c")
d = DummyBooleanModule("d")
p3 = Path(a & b & c & d)
self.assertEqual(p3.dumpPython(),"cms.Path(process.a&process.b&process.c&process.d)\n")
l[:]=[]
p3.visit(namesVisitor)
self.assertEqual(l,['&','a','b','c','d','@'])
p3 = Path(((a & b) & c) & d)
self.assertEqual(p3.dumpPython(),"cms.Path(process.a&process.b&process.c&process.d)\n")
p3 = Path(a & (b & (c & d)))
self.assertEqual(p3.dumpPython(),"cms.Path(process.a&process.b&process.c&process.d)\n")
p3 = Path((a & b) & (c & d))
self.assertEqual(p3.dumpPython(),"cms.Path(process.a&process.b&process.c&process.d)\n")
p3 = Path(a & (b & c) & d)
self.assertEqual(p3.dumpPython(),"cms.Path(process.a&process.b&process.c&process.d)\n")
p4 = Path(a | b | c | d)
self.assertEqual(p4.dumpPython(),"cms.Path(process.a|process.b|process.c|process.d)\n")
p5 = Path(a | b & c & d )
self.assertEqual(p5.dumpPython(),"cms.Path(process.a|(process.b&process.c&process.d))\n")
l[:]=[]
p5.visit(namesVisitor)
self.assertEqual(l,['|','a','&','b','c','d','@','@'])
p5 = Path(a & b | c & d )
self.assertEqual(p5.dumpPython(),"cms.Path((process.a&process.b)|(process.c&process.d))\n")
l[:]=[]
p5.visit(namesVisitor)
self.assertEqual(l,['|','&','a','b','@','&','c','d','@','@'])
p5 = Path(a & (b | c) & d )
self.assertEqual(p5.dumpPython(),"cms.Path(process.a&(process.b|process.c)&process.d)\n")
l[:]=[]
p5.visit(namesVisitor)
self.assertEqual(l,['&','a','|','b','c','@','d','@'])
p5 = Path(a & b & c | d )
self.assertEqual(p5.dumpPython(),"cms.Path((process.a&process.b&process.c)|process.d)\n")
l[:]=[]
p5.visit(namesVisitor)
self.assertEqual(l,['|','&','a','b','c','@','d','@'])
p6 = Path( a & ~b)
self.assertEqual(p6.dumpPython(),"cms.Path(process.a&(~process.b))\n")
l[:]=[]
p6.visit(namesVisitor)
self.assertEqual(l,['&','a','!b','@'])
p6 = Path( a & ignore(b))
self.assertEqual(p6.dumpPython(),"cms.Path(process.a&(cms.ignore(process.b)))\n")
l[:]=[]
p6.visit(namesVisitor)
self.assertEqual(l,['&','a','-b','@'])
p6 = Path( a & wait(b))
self.assertEqual(p6.dumpPython(),"cms.Path(process.a&(cms.wait(process.b)))\n")
l[:]=[]
p6.visit(namesVisitor)
self.assertEqual(l,['&','a','|b','@'])
p6 = Path( a & wait(ignore(b)))
self.assertEqual(p6.dumpPython(),"cms.Path(process.a&(cms.wait(cms.ignore(process.b))))\n")
l[:]=[]
p6.visit(namesVisitor)
self.assertEqual(l,['&','a','+b','@'])
p6 = Path( a & ignore(wait(b)))
self.assertEqual(p6.dumpPython(),"cms.Path(process.a&(cms.wait(cms.ignore(process.b))))\n")
l[:]=[]
p6.visit(namesVisitor)
self.assertEqual(l,['&','a','+b','@'])
p6 = Path(~(a&b))
self.assertEqual(p6.dumpPython(),"cms.Path(~(process.a&process.b))\n")
l[:]=[]
p6.visit(namesVisitor)
self.assertEqual(l,['!&','a','b','@'])
def testTaskConstructor(self):
a = DummyModule("a")
self.assertRaises(RuntimeError, lambda : Task(ConditionalTask(a)) )
def testDumpPython(self):
a = DummyModule("a")
b = DummyModule('b')
p = Path((a*b))
#print p.dumpConfig('')
self.assertEqual(p.dumpPython(),"cms.Path(process.a+process.b)\n")
p2 = Path((b+a))
#print p2.dumpConfig('')
self.assertEqual(p2.dumpPython(),"cms.Path(process.b+process.a)\n")
c = DummyModule('c')
p3 = Path(c*(a+b))
#print p3.dumpConfig('')
self.assertEqual(p3.dumpPython(),"cms.Path(process.c+process.a+process.b)\n")
p4 = Path(c*a+b)
#print p4.dumpConfig('')
self.assertEqual(p4.dumpPython(),"cms.Path(process.c+process.a+process.b)\n")
p5 = Path(a+ignore(b))
#print p5.dumpConfig('')
self.assertEqual(p5.dumpPython(),"cms.Path(process.a+cms.ignore(process.b))\n")
p5a = Path(a+wait(b))
self.assertEqual(p5a.dumpPython(),"cms.Path(process.a+cms.wait(process.b))\n")
p5b = Path(a+ignore(wait(b)))
self.assertEqual(p5b.dumpPython(),"cms.Path(process.a+cms.wait(cms.ignore(process.b)))\n")
p5c = Path(a+wait(ignore(b)))
self.assertEqual(p5c.dumpPython(),"cms.Path(process.a+cms.wait(cms.ignore(process.b)))\n")
p6 = Path(c+a*b)
#print p6.dumpConfig('')
self.assertEqual(p6.dumpPython(),"cms.Path(process.c+process.a+process.b)\n")
p7 = Path(a+~b)
self.assertEqual(p7.dumpPython(),"cms.Path(process.a+~process.b)\n")
p8 = Path((a+b)*c)
self.assertEqual(p8.dumpPython(),"cms.Path(process.a+process.b+process.c)\n")
t1 = Task(a)
t2 = Task(c, b)
t3 = Task()
p9 = Path((a+b)*c, t1)
self.assertEqual(p9.dumpPython(),"cms.Path(process.a+process.b+process.c, cms.Task(process.a))\n")
p10 = Path((a+b)*c, t2, t1)
self.assertEqual(p10.dumpPython(),"cms.Path(process.a+process.b+process.c, cms.Task(process.a), cms.Task(process.b, process.c))\n")
p11 = Path(t1, t2, t3)
self.assertEqual(p11.dumpPython(),"cms.Path(cms.Task(), cms.Task(process.a), cms.Task(process.b, process.c))\n")
d = DummyModule("d")
e = DummyModule('e')
f = DummyModule('f')
t4 = Task(d, Task(f))
s = Sequence(e, t4)
p12 = Path(a+b+s+c,t1)
self.assertEqual(p12.dumpPython(),"cms.Path(process.a+process.b+cms.Sequence(process.e, cms.Task(process.d, process.f))+process.c, cms.Task(process.a))\n")
ct1 = ConditionalTask(a)
ct2 = ConditionalTask(c, b)
ct3 = ConditionalTask()
p13 = Path((a+b)*c, ct1)
self.assertEqual(p13.dumpPython(),"cms.Path(process.a+process.b+process.c, cms.ConditionalTask(process.a))\n")
p14 = Path((a+b)*c, ct2, ct1)
self.assertEqual(p14.dumpPython(),"cms.Path(process.a+process.b+process.c, cms.ConditionalTask(process.a), cms.ConditionalTask(process.b, process.c))\n")
p15 = Path(ct1, ct2, ct3)
self.assertEqual(p15.dumpPython(),"cms.Path(cms.ConditionalTask(), cms.ConditionalTask(process.a), cms.ConditionalTask(process.b, process.c))\n")
ct4 = ConditionalTask(d, Task(f))
s = Sequence(e, ct4)
p16 = Path(a+b+s+c,ct1)
self.assertEqual(p16.dumpPython(),"cms.Path(process.a+process.b+cms.Sequence(process.e, cms.ConditionalTask(process.d, process.f))+process.c, cms.ConditionalTask(process.a))\n")
n = 260
mods = []
labels = []
for i in range(0, n):
l = "a{}".format(i)
labels.append("process."+l)
mods.append(DummyModule(l))
labels.sort()
task = Task(*mods)
self.assertEqual(task.dumpPython(), "cms.Task(*[" + ", ".join(labels) + "])\n")
conditionalTask = ConditionalTask(*mods)
self.assertEqual(conditionalTask.dumpPython(), "cms.ConditionalTask(*[" + ", ".join(labels) + "])\n")
l = list()
namesVisitor = DecoratedNodeNameVisitor(l)
p.visit(namesVisitor)
self.assertEqual(l, ['a', 'b'])
l[:] = []
p5.visit(namesVisitor)
self.assertEqual(l, ['a', '-b'])
l[:] = []
p5a.visit(namesVisitor)
self.assertEqual(l, ['a', '|b'])
l[:] = []
p5b.visit(namesVisitor)
self.assertEqual(l, ['a', '+b'])
l[:] = []
p5c.visit(namesVisitor)
self.assertEqual(l, ['a', '+b'])
l[:] = []
p7.visit(namesVisitor)
self.assertEqual(l, ['a', '!b'])
l[:] = []
p10.visit(namesVisitor)
self.assertEqual(l, ['a', 'b', 'c'])
l[:] = []
p12.visit(namesVisitor)
self.assertEqual(l, ['a', 'b', 'e', 'c'])
l[:] = []
p16.visit(namesVisitor)
self.assertEqual(l, ['a', 'b', 'e', 'c'])
l[:] = []
moduleVisitor = ModuleNodeVisitor(l)
p8.visit(moduleVisitor)
names = [m.label_() for m in l]
self.assertEqual(names, ['a', 'b', 'c'])
tph = TaskPlaceholder('a')
self.assertEqual(tph.dumpPython(), 'cms.TaskPlaceholder("process.a")\n')
sph = SequencePlaceholder('a')
self.assertEqual(sph.dumpPython(), 'cms.SequencePlaceholder("process.a")\n')
ctph = ConditionalTaskPlaceholder('a')
self.assertEqual(ctph.dumpPython(), 'cms.ConditionalTaskPlaceholder("process.a")\n')
def testDumpConfig(self):
a = DummyModule("a")
b = DummyModule('b')
p = Path((a*b))
#print p.dumpConfig('')
self.assertEqual(p.dumpConfig(None),"{a&b}\n")
p2 = Path((b+a))
#print p2.dumpConfig('')
self.assertEqual(p2.dumpConfig(None),"{b&a}\n")
c = DummyModule('c')
p3 = Path(c*(a+b))
#print p3.dumpConfig('')
self.assertEqual(p3.dumpConfig(None),"{c&a&b}\n")
p4 = Path(c*a+b)
#print p4.dumpConfig('')
self.assertEqual(p4.dumpConfig(None),"{c&a&b}\n")
p5 = Path(a+ignore(b))
#print p5.dumpConfig('')
self.assertEqual(p5.dumpConfig(None),"{a&-b}\n")
p6 = Path(c+a*b)
#print p6.dumpConfig('')
self.assertEqual(p6.dumpConfig(None),"{c&a&b}\n")
p7 = Path(a+~b)
self.assertEqual(p7.dumpConfig(None),"{a&!b}\n")
p8 = Path((a+b)*c)
self.assertEqual(p8.dumpConfig(None),"{a&b&c}\n")
def testVisitor(self):
class TestVisitor(object):
def __init__(self, enters, leaves):
self._enters = enters
self._leaves = leaves
def enter(self,visitee):
#print visitee.dumpSequencePython()
if self._enters[0] != visitee:
raise RuntimeError("wrong node ("+str(visitee)+") on 'enter'")
else:
self._enters = self._enters[1:]
def leave(self,visitee):
if self._leaves[0] != visitee:
raise RuntimeError("wrong node ("+str(visitee)+") on 'leave'\n expected ("+str(self._leaves[0])+")")
else:
self._leaves = self._leaves[1:]
a = DummyModule("a")
b = DummyModule('b')
multAB = a*b
p = Path(multAB)
t = TestVisitor(enters=[a,b],
leaves=[a,b])
p.visit(t)
plusAB = a+b
p = Path(plusAB)
t = TestVisitor(enters=[a,b],
leaves=[a,b])
p.visit(t)
c=DummyModule("c")
d=DummyModule("d")
e=DummyModule("e")
f=DummyModule("f")
g=DummyModule("g")
ct1 = ConditionalTask(d)
ct2 = ConditionalTask(e, ct1)
ct3 = ConditionalTask(f, g, ct2)
s=Sequence(plusAB, ct3, ct2)
multSC = s*c
p=Path(multSC, ct1, ct2)
l = []
v = ModuleNodeVisitor(l)
p.visit(v)
expected = [a,b,f,g,e,d,e,d,c,d,e,d]
self.assertEqual(expected,l)
t1 = Task(d)
t2 = Task(e, t1)
t3 = Task(f, g, t2)
s=Sequence(plusAB, t3, t2)
multSC = s*c
p=Path(multSC, t1, t2)
l = []
v = ModuleNodeVisitor(l)
p.visit(v)
expected = [a,b,f,g,e,d,e,d,c,d,e,d]
self.assertEqual(expected,l)
l[:] = []
v = ModuleNodeOnTaskVisitor(l)
p.visit(v)
expected = [f,g,e,d,e,d,d,e,d]
self.assertEqual(expected,l)
l[:] = []
v = ModuleNodeNotOnTaskVisitor(l)
p.visit(v)
expected = [a,b,c]
self.assertEqual(expected,l)
t=TestVisitor(enters=[s,a,b,t3,f,g,t2,e,t1,d,t2,e,t1,d,c,t1,d,t2,e,t1,d],
leaves=[a,b,f,g,e,d,t1,t2,t3,e,d,t1,t2,s,c,d,t1,e,d,t1,t2])
p.visit(t)
notA= ~a
p=Path(notA)
t=TestVisitor(enters=[notA,a],leaves=[a,notA])
p.visit(t)
def testResolve(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
s1 = Sequence(m1)
s2 = SequencePlaceholder("s3")
s3 = Sequence(m2)
p = Path(s1*s2)
l = list()
#resolver = ResolveVisitor(d)
#p.visit(resolver)
namesVisitor = DecoratedNodeNameVisitor(l)
p.visit(namesVisitor)
self.assertEqual(l, ['m1'])
p.resolve(dict(s1=s1, s2=s2, s3=s3))
l[:] = []
p.visit(namesVisitor)
self.assertEqual(l, ['m1', 'm2'])
l[:]=[]
s1 = Sequence(m1)
s2 = SequencePlaceholder("s3")
s3 = Sequence(m2)
s4 = SequencePlaceholder("s2")
p=Path(s1+s4)
p.resolve(dict(s1=s1, s2=s2, s3=s3, s4=s4))
p.visit(namesVisitor)
self.assertEqual(l, ['m1', 'm2'])
l[:]=[]
m3 = DummyModule("m3")
m4 = DummyModule("m4")
s1 = Sequence(~m1)
s2 = SequencePlaceholder("s3")
s3 = Sequence(ignore(m2))
s4 = Sequence(wait(m3) + ignore(wait(m4)))
d = dict(s1=s1, s2=s2, s3=s3, s4=s4)
p = Path(s1*s2*s4)
p.resolve(dict(s1=s1, s2=s2, s3=s3, s4=s4))
p.visit(namesVisitor)
self.assertEqual(l, ['!m1', '-m2', '|m3', '+m4'])
def testReplace(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
m3 = DummyModule("m3")
m4 = DummyModule("m4")
m5 = DummyModule("m5")
s1 = Sequence(m1*~m2*m1*m2*ignore(m2))
s2 = Sequence(m1*m2)
l = []
namesVisitor = DecoratedNodeNameVisitor(l)
s1.visit(namesVisitor)
self.assertEqual(l,['m1', '!m2', 'm1', 'm2', '-m2'])
s3 = Sequence(~m1*s2)
s3.replace(~m1, m2)
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l, ['m2', 'm1', 'm2'])
s3.replace(m2, ~m1)
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l, ['!m1', 'm1', '!m1'])
s3 = Sequence(ignore(m1)*s2)
s3.replace(ignore(m1), m2)
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l, ['m2', 'm1', 'm2'])
s3.replace(m2, ignore(m1))
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l, ['-m1', 'm1', '-m1'])
ph = SequencePlaceholder('x')
s4 = Sequence(Sequence(ph))
s4.replace(ph,m2)
self.assertEqual(s4.dumpPython(), "cms.Sequence(process.m2)\n")
s1.replace(m2,m3)
l[:] = []
s1.visit(namesVisitor)
self.assertEqual(l,['m1', '!m3', 'm1', 'm3', '-m3'])
s2 = Sequence(m1*m2)
s3 = Sequence(~m1*s2)
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l,['!m1', 'm1', 'm2'])
l[:] = []
s3.replace(s2,m1)
s3.visit(namesVisitor)
self.assertEqual(l,['!m1', 'm1'])
s1 = Sequence(m1+m2)
s2 = Sequence(m3+m4)
s3 = Sequence(s1+s2)
s3.replace(m3,m5)
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l,['m1','m2','m5','m4'])
m6 = DummyModule("m6")
m7 = DummyModule("m7")
m8 = DummyModule("m8")
m9 = DummyModule("m9")
#Task
t6 = Task(m6)
t7 = Task(m7)
t89 = Task(m8, m9)
s1 = Sequence(m1+m2, t6)
s2 = Sequence(m3+m4, t7)
s3 = Sequence(s1+s2, t89)
s3.replace(m3,m5)
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l,['m1','m2','m5','m4'])
s3.replace(m8,m1)
self.assertTrue(s3.dumpPython() == "cms.Sequence(cms.Sequence(process.m1+process.m2, cms.Task(process.m6))+process.m5+process.m4, cms.Task(process.m1, process.m9), cms.Task(process.m7))\n")
s3.replace(m1,m7)
self.assertTrue(s3.dumpPython() == "cms.Sequence(process.m7+process.m2+process.m5+process.m4, cms.Task(process.m6), cms.Task(process.m7), cms.Task(process.m7, process.m9))\n")
result = s3.replace(t7, t89)
self.assertTrue(s3.dumpPython() == "cms.Sequence(process.m7+process.m2+process.m5+process.m4, cms.Task(process.m6), cms.Task(process.m7, process.m9), cms.Task(process.m8, process.m9))\n")
self.assertTrue(result)
result = s3.replace(t7, t89)
self.assertFalse(result)
t1 = Task()
t1.replace(m1,m2)
self.assertTrue(t1.dumpPython() == "cms.Task()\n")
t1 = Task(m1)
t1.replace(m1,m2)
self.assertTrue(t1.dumpPython() == "cms.Task(process.m2)\n")
t1 = Task(m1,m2, m2)
t1.replace(m2,m3)
self.assertTrue(t1.dumpPython() == "cms.Task(process.m1, process.m3)\n")
t1 = Task(m1,m2)
t2 = Task(m1,m3,t1)
t2.replace(m1,m4)
self.assertTrue(t2.dumpPython() == "cms.Task(process.m2, process.m3, process.m4)\n")
t1 = Task(m2)
t2 = Task(m1,m3,t1)
t2.replace(m1,m4)
self.assertTrue(t2.dumpPython() == "cms.Task(process.m2, process.m3, process.m4)\n")
t1 = Task(m2)
t2 = Task(m1,m3,t1)
t2.replace(t1,m4)
self.assertTrue(t2.dumpPython() == "cms.Task(process.m1, process.m3, process.m4)\n")
t1 = Task(m2)
t2 = Task(m1,m3,t1)
t3 = Task(m5)
t2.replace(m2,t3)
self.assertTrue(t2.dumpPython() == "cms.Task(process.m1, process.m3, process.m5)\n")
#ConditionalTask
ct6 = ConditionalTask(m6)
ct7 = ConditionalTask(m7)
ct89 = ConditionalTask(m8, m9)
cs1 = Sequence(m1+m2, ct6)
cs2 = Sequence(m3+m4, ct7)
cs3 = Sequence(cs1+cs2, ct89)
cs3.replace(m3,m5)
l[:] = []
cs3.visit(namesVisitor)
self.assertEqual(l,['m1','m2','m5','m4'])
cs3.replace(m8,m1)
self.assertEqual(cs3.dumpPython(), "cms.Sequence(cms.Sequence(process.m1+process.m2, cms.ConditionalTask(process.m6))+process.m5+process.m4, cms.ConditionalTask(process.m1, process.m9), cms.ConditionalTask(process.m7))\n")
cs3.replace(m1,m7)
self.assertEqual(cs3.dumpPython(), "cms.Sequence(process.m7+process.m2+process.m5+process.m4, cms.ConditionalTask(process.m6), cms.ConditionalTask(process.m7), cms.ConditionalTask(process.m7, process.m9))\n")
result = cs3.replace(ct7, ct89)
self.assertEqual(cs3.dumpPython(), "cms.Sequence(process.m7+process.m2+process.m5+process.m4, cms.ConditionalTask(process.m6), cms.ConditionalTask(process.m7, process.m9), cms.ConditionalTask(process.m8, process.m9))\n")
self.assertTrue(result)
result = cs3.replace(ct7, ct89)
self.assertFalse(result)
ct1 = ConditionalTask()
ct1.replace(m1,m2)
self.assertEqual(ct1.dumpPython(), "cms.ConditionalTask()\n")
ct1 = ConditionalTask(m1)
ct1.replace(m1,m2)
self.assertEqual(ct1.dumpPython(), "cms.ConditionalTask(process.m2)\n")
ct1 = ConditionalTask(m1,m2, m2)
ct1.replace(m2,m3)
self.assertEqual(ct1.dumpPython(), "cms.ConditionalTask(process.m1, process.m3)\n")
ct1 = ConditionalTask(m1,m2)
ct2 = ConditionalTask(m1,m3,ct1)
ct2.replace(m1,m4)
self.assertEqual(ct2.dumpPython(), "cms.ConditionalTask(process.m2, process.m3, process.m4)\n")
ct1 = ConditionalTask(m2)
ct2 = ConditionalTask(m1,m3,ct1)
ct2.replace(m1,m4)
self.assertEqual(ct2.dumpPython(), "cms.ConditionalTask(process.m2, process.m3, process.m4)\n")
ct1 = ConditionalTask(m2)
ct2 = ConditionalTask(m1,m3,ct1)
ct2.replace(ct1,m4)
self.assertEqual(ct2.dumpPython(), "cms.ConditionalTask(process.m1, process.m3, process.m4)\n")
ct1 = ConditionalTask(m2)
ct2 = ConditionalTask(m1,m3,ct1)
ct3 = ConditionalTask(m5)
ct2.replace(m2,ct3)
self.assertEqual(ct2.dumpPython(), "cms.ConditionalTask(process.m1, process.m3, process.m5)\n")
#FinalPath
fp = FinalPath()
fp.replace(m1,m2)
self.assertEqual(fp.dumpPython(), "cms.FinalPath()\n")
fp = FinalPath(m1)
fp.replace(m1,m2)
self.assertEqual(fp.dumpPython(), "cms.FinalPath(process.m2)\n")
def testReplaceIfHeldDirectly(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
m3 = DummyModule("m3")
m4 = DummyModule("m4")
m5 = DummyModule("m5")
s1 = Sequence(m1*~m2*m1*m2*ignore(m2))
s1._replaceIfHeldDirectly(m2,m3)
self.assertEqual(s1.dumpPython()[:-1],
"cms.Sequence(process.m1+~process.m3+process.m1+process.m3+cms.ignore(process.m3))")
s2 = Sequence(m1*m2)
l = []
s3 = Sequence(~m1*s2)
s3._replaceIfHeldDirectly(~m1, m2)
self.assertEqual(s3.dumpPython()[:-1],
"cms.Sequence(process.m2+(process.m1+process.m2))")
#Task
m6 = DummyModule("m6")
m7 = DummyModule("m7")
m8 = DummyModule("m8")
m9 = DummyModule("m9")
t6 = Task(m6)
t7 = Task(m7)
t89 = Task(m8, m9)
s1 = Sequence(m1+m2, t6)
s2 = Sequence(m3+m4, t7)
s3 = Sequence(s1+s2, t89)
s3._replaceIfHeldDirectly(m3,m5)
self.assertEqual(s3.dumpPython()[:-1], "cms.Sequence(cms.Sequence(process.m1+process.m2, cms.Task(process.m6))+cms.Sequence(process.m3+process.m4, cms.Task(process.m7)), cms.Task(process.m8, process.m9))")
s2._replaceIfHeldDirectly(m3,m5)
self.assertEqual(s2.dumpPython()[:-1],"cms.Sequence(process.m5+process.m4, cms.Task(process.m7))")
self.assertEqual(s3.dumpPython()[:-1], "cms.Sequence(cms.Sequence(process.m1+process.m2, cms.Task(process.m6))+cms.Sequence(process.m5+process.m4, cms.Task(process.m7)), cms.Task(process.m8, process.m9))")
s1 = Sequence(t6)
s1._replaceIfHeldDirectly(t6,t7)
self.assertEqual(s1.dumpPython()[:-1],"cms.Sequence(cms.Task(process.m7))")
#ConditionalTask
ct6 = ConditionalTask(m6)
ct7 = ConditionalTask(m7)
ct89 = ConditionalTask(m8, m9)
s1 = Sequence(m1+m2, ct6)
s2 = Sequence(m3+m4, ct7)
s3 = Sequence(s1+s2, ct89)
s3._replaceIfHeldDirectly(m3,m5)
self.assertEqual(s3.dumpPython()[:-1], "cms.Sequence(cms.Sequence(process.m1+process.m2, cms.ConditionalTask(process.m6))+cms.Sequence(process.m3+process.m4, cms.ConditionalTask(process.m7)), cms.ConditionalTask(process.m8, process.m9))")
s2._replaceIfHeldDirectly(m3,m5)
self.assertEqual(s2.dumpPython()[:-1],"cms.Sequence(process.m5+process.m4, cms.ConditionalTask(process.m7))")
self.assertEqual(s3.dumpPython()[:-1], "cms.Sequence(cms.Sequence(process.m1+process.m2, cms.ConditionalTask(process.m6))+cms.Sequence(process.m5+process.m4, cms.ConditionalTask(process.m7)), cms.ConditionalTask(process.m8, process.m9))")
s1 = Sequence(ct6)
s1._replaceIfHeldDirectly(ct6,ct7)
self.assertEqual(s1.dumpPython()[:-1],"cms.Sequence(cms.ConditionalTask(process.m7))")
def testIndex(self):
m1 = DummyModule("a")
m2 = DummyModule("b")
m3 = DummyModule("c")
s = Sequence(m1+m2+m3)
self.assertEqual(s.index(m1),0)
self.assertEqual(s.index(m2),1)
self.assertEqual(s.index(m3),2)
def testInsert(self):
m1 = DummyModule("a")
m2 = DummyModule("b")
m3 = DummyModule("c")
s = Sequence(m1+m3)
s.insert(1,m2)
self.assertEqual(s.index(m1),0)
self.assertEqual(s.index(m2),1)
self.assertEqual(s.index(m3),2)
s = Sequence()
s.insert(0, m1)
self.assertEqual(s.index(m1),0)
p = Path()
p.insert(0, m1)
self.assertEqual(s.index(m1),0)
def testExpandAndClone(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
m3 = DummyModule("m3")
m4 = DummyModule("m4")
m5 = DummyModule("m5")
s1 = Sequence(m1*~m2*m1*m2*ignore(m2))
s2 = Sequence(m1*m2)
s3 = Sequence(~m1*s2)
p = Path(s1+s3)
p2 = p.expandAndClone()
l = []
namesVisitor = DecoratedNodeNameVisitor(l)
p2.visit(namesVisitor)
self.assertEqual(l, ['m1', '!m2', 'm1', 'm2', '-m2', '!m1', 'm1', 'm2'])
#Task
m6 = DummyModule("m6")
m7 = DummyModule("m7")
m8 = DummyModule("m8")
m9 = DummyModule("m9")
p = Path(s1+s3, Task(m6))
p2 = p.expandAndClone()
l[:] = []
p2.visit(namesVisitor)
self.assertEqual(l, ['m1', '!m2', 'm1', 'm2', '-m2', '!m1', 'm1', 'm2'])
self.assertEqual(p2.dumpPython(), "cms.Path(process.m1+~process.m2+process.m1+process.m2+cms.ignore(process.m2)+~process.m1+process.m1+process.m2, cms.Task(process.m6))\n")
s2 = Sequence(m1*m2, Task(m9))
s3 = Sequence(~m1*s2)
t8 = Task(m8)
t8.setLabel("t8")
p = Path(s1+s3, Task(m6, Task(m7, t8)))
p2 = p.expandAndClone()
l[:] = []
p2.visit(namesVisitor)
self.assertEqual(l, ['m1', '!m2', 'm1', 'm2', '-m2', '!m1', 'm1', 'm2'])
self.assertTrue(p2.dumpPython() == "cms.Path(process.m1+~process.m2+process.m1+process.m2+cms.ignore(process.m2)+~process.m1+process.m1+process.m2, cms.Task(process.m6, process.m7, process.m8, process.m9))\n")
t1 = Task(m1,m2,m3)
s1 = Sequence(t1)
s2 = s1.expandAndClone()
l[:] = []
s2.visit(namesVisitor)
self.assertEqual(l, [])
self.assertTrue(s2.dumpPython() == "cms.Sequence(cms.Task(process.m1, process.m2, process.m3))\n")
t1 = Task(m1,m2)
t2 = Task(m1,m3,t1)
t3 = t2.expandAndClone()
self.assertTrue(t3.dumpPython() == "cms.Task(process.m1, process.m2, process.m3)\n")
t4 = Task()
t5 = t4.expandAndClone()
self.assertTrue(t5.dumpPython() == "cms.Task()\n")
#ConditionalTask
s1 = Sequence(m1*~m2*m1*m2*ignore(m2))
s2 = Sequence(m1*m2)
s3 = Sequence(~m1*s2)
p = Path(s1+s3, ConditionalTask(m6))
p2 = p.expandAndClone()
l[:] = []
p2.visit(namesVisitor)
self.assertEqual(l, ['m1', '!m2', 'm1', 'm2', '-m2', '!m1', 'm1', 'm2'])
self.assertEqual(p2.dumpPython(), "cms.Path(process.m1+~process.m2+process.m1+process.m2+cms.ignore(process.m2)+~process.m1+process.m1+process.m2, cms.ConditionalTask(process.m6))\n")
s2 = Sequence(m1*m2, ConditionalTask(m9))
s3 = Sequence(~m1*s2)
ct8 = ConditionalTask(m8)
ct8.setLabel("ct8")
p = Path(s1+s3, ConditionalTask(m6, ConditionalTask(m7, ct8)))
p2 = p.expandAndClone()
l[:] = []
p2.visit(namesVisitor)
self.assertEqual(l, ['m1', '!m2', 'm1', 'm2', '-m2', '!m1', 'm1', 'm2'])
self.assertEqual(p2.dumpPython(), "cms.Path(process.m1+~process.m2+process.m1+process.m2+cms.ignore(process.m2)+~process.m1+process.m1+process.m2, cms.ConditionalTask(process.m6, process.m7, process.m8, process.m9))\n")
t1 = ConditionalTask(m1,m2,m3)
s1 = Sequence(t1)
s2 = s1.expandAndClone()
l[:] = []
s2.visit(namesVisitor)
self.assertEqual(l, [])
self.assertEqual(s2.dumpPython(), "cms.Sequence(cms.ConditionalTask(process.m1, process.m2, process.m3))\n")
t1 = ConditionalTask(m1,m2)
t2 = ConditionalTask(m1,m3,t1)
t3 = t2.expandAndClone()
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask(process.m1, process.m2, process.m3)\n")
t4 = ConditionalTask()
t5 = t4.expandAndClone()
self.assertTrue(t5.dumpPython() == "cms.ConditionalTask()\n")
def testAdd(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
m3 = DummyModule("m3")
m4 = DummyModule("m4")
s1 = Sequence(m1)
s3 = Sequence(m3+ignore(m4))
p = Path(s1)
p += ~m2
p *= s3
l = []
namesVisitor = DecoratedNodeNameVisitor(l)
p.visit(namesVisitor)
self.assertEqual(l, ['m1', '!m2', 'm3', '-m4'])
s4 = Sequence()
s4 +=m1
l[:]=[]; s1.visit(namesVisitor); self.assertEqual(l,['m1'])
self.assertEqual(s4.dumpPython(),"cms.Sequence(process.m1)\n")
s4 = Sequence()
s4 *=m1
l[:]=[]; s1.visit(namesVisitor); self.assertEqual(l,['m1'])
self.assertEqual(s4.dumpPython(),"cms.Sequence(process.m1)\n")
def testRemove(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
m3 = DummyModule("m3")
m4 = DummyModule("m4")
s1 = Sequence(m1*m2+~m3)
s2 = Sequence(m1*s1)
l = []
namesVisitor = DecoratedNodeNameVisitor(l)
d = {'m1':m1 ,'m2':m2, 'm3':m3,'s1':s1, 's2':s2}
l[:] = []; s1.visit(namesVisitor); self.assertEqual(l,['m1', 'm2', '!m3'])
l[:] = []; s2.visit(namesVisitor); self.assertEqual(l,['m1', 'm1', 'm2', '!m3'])
s1.remove(m2)
l[:] = []; s1.visit(namesVisitor); self.assertEqual(l,['m1', '!m3'])
l[:] = []; s2.visit(namesVisitor); self.assertEqual(l,['m1', 'm1', '!m3'])
s2.remove(m3)
l[:] = []; s1.visit(namesVisitor); self.assertEqual(l,['m1', '!m3'])
l[:] = []; s2.visit(namesVisitor); self.assertEqual(l,['m1', 'm1'])
s1 = Sequence( m1 + m2 + m1 + m2 )
l[:] = []; s1.visit(namesVisitor); self.assertEqual(l,['m1', 'm2', 'm1', 'm2'])
s1.remove(m2)
l[:] = []; s1.visit(namesVisitor); self.assertEqual(l,['m1', 'm1', 'm2'])
s1 = Sequence( m1 + m3 )
s2 = Sequence( m2 + ignore(m3) + s1 + m3 )
l[:] = []; s2.visit(namesVisitor); self.assertEqual(l,['m2', '-m3', 'm1', 'm3', 'm3'])
s2.remove(s1)
l[:] = []; s2.visit(namesVisitor); self.assertEqual(l,['m2', '-m3', 'm3'])
s2.remove(m3)
l[:] = []; s2.visit(namesVisitor); self.assertEqual(l,['m2','m3'])
s1 = Sequence(m1*m2*m3)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2+process.m3)\n")
s1.remove(m2)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m3)\n")
s1 = Sequence(m1+m2+m3)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2+process.m3)\n")
s1.remove(m2)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m3)\n")
s1 = Sequence(m1*m2+m3)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2+process.m3)\n")
s1.remove(m2)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m3)\n")
s1 = Sequence(m1+m2*m3)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2+process.m3)\n")
s1.remove(m2)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m3)\n")
s1.remove(m1)
s1.remove(m3)
l[:]=[]; s1.visit(namesVisitor); self.assertEqual(l,[])
self.assertEqual(s1.dumpPython(), "cms.Sequence()\n")
s3 = Sequence(m1)
s3.remove(m1)
l[:]=[]; s3.visit(namesVisitor); self.assertEqual(l,[])
self.assertEqual(s3.dumpPython(), "cms.Sequence()\n")
s3 = Sequence(m1)
s4 = Sequence(s3)
s4.remove(m1)
l[:]=[]; s4.visit(namesVisitor); self.assertEqual(l,[])
self.assertEqual(s4.dumpPython(), "cms.Sequence()\n")
#Task
s1 = Sequence(m1+m2, Task(m3), Task(m4))
s1.remove(m4)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2, cms.Task(process.m3))\n")
s1 = Sequence(m1+m2+Sequence(Task(m3,m4), Task(m3), Task(m4)))
s1.remove(m4)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2, cms.Task(process.m3), cms.Task(process.m4))\n")
t1 = Task(m1)
t1.setLabel("t1")
t2 = Task(m2,t1)
t2.setLabel("t2")
t3 = Task(t1,t2,m1)
t3.remove(m1)
self.assertTrue(t3.dumpPython() == "cms.Task(process.m1, process.t2)\n")
t3.remove(m1)
self.assertTrue(t3.dumpPython() == "cms.Task(process.m1, process.m2)\n")
t3.remove(m1)
self.assertTrue(t3.dumpPython() == "cms.Task(process.m2)\n")
t3.remove(m2)
self.assertTrue(t3.dumpPython() == "cms.Task()\n")
#ConditionalTask
s1 = Sequence(m1+m2, ConditionalTask(m3), ConditionalTask(m4))
s1.remove(m4)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2, cms.ConditionalTask(process.m3))\n")
s1 = Sequence(m1+m2+Sequence(ConditionalTask(m3,m4), ConditionalTask(m3), ConditionalTask(m4)))
s1.remove(m4)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2, cms.ConditionalTask(process.m3), cms.ConditionalTask(process.m4))\n")
t1 = ConditionalTask(m1)
t1.setLabel("t1")
t2 = ConditionalTask(m2,t1)
t2.setLabel("t2")
t3 = ConditionalTask(t1,t2,m1)
t3.remove(m1)
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask(process.m1, process.t2)\n")
t3.remove(m1)
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask(process.m1, process.m2)\n")
t3.remove(m1)
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask(process.m2)\n")
t3.remove(m2)
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask()\n")
#FinalPath
fp = FinalPath(m1+m2)
fp.remove(m1)
self.assertEqual(fp.dumpPython(), "cms.FinalPath(process.m2)\n")
fp = FinalPath(m1)
fp.remove(m1)
self.assertEqual(fp.dumpPython(), "cms.FinalPath()\n")
def testCopyAndExclude(self):
a = DummyModule("a")
b = DummyModule("b")
c = DummyModule("c")
d = DummyModule("d")
s = Sequence(a+b+c)
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+process.b+process.c)\n")
s = Sequence(a+b+c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+process.b+process.c)\n")
s=Sequence(a*b+c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+process.b+process.c)\n")
s = Sequence(a+b*c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+process.b+process.c)\n")
s2 = Sequence(a+b)
s = Sequence(c+s2+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.c+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.c+process.a+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence((process.a+process.b)+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.c+(process.a+process.b))\n")
self.assertEqual(s.copyAndExclude([a,b]).dumpPython(),"cms.Sequence(process.c+process.d)\n")
s3 = s.copyAndExclude([c])
s2.remove(a)
self.assertEqual(s3.dumpPython(),"cms.Sequence((process.b)+process.d)\n")
s4 = s.copyAndExclude([a,b])
seqs = []
sequenceVisitor = SequenceVisitor(seqs)
s.visit(sequenceVisitor)
self.assertEqual(len(seqs),1)
seqs[:] = []
s4.visit(sequenceVisitor)
self.assertEqual(len(seqs),0)
self.assertEqual(s4.dumpPython(),"cms.Sequence(process.c+process.d)\n")
holder = SequencePlaceholder("x")
s3 = Sequence(b+d,Task(a))
s2 = Sequence(a+b+holder+s3)
s = Sequence(c+s2+d)
seqs[:] = []
s.visit(sequenceVisitor)
self.assertTrue(seqs == [s2,s3])
s2 = Sequence(a+b+holder)
s = Sequence(c+s2+d)
self.assertEqual(s.copyAndExclude([holder]).dumpPython(),"cms.Sequence(process.c+process.a+process.b+process.d)\n")
s2 = Sequence(a+b+c)
s = Sequence(s2+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence((process.a+process.b+process.c))\n")
self.assertEqual(s.copyAndExclude([s2]).dumpPython(),"cms.Sequence(process.d)\n")
s2 = Sequence(a+b+c)
s = Sequence(s2*d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence((process.a+process.b+process.c))\n")
self.assertEqual(s.copyAndExclude([a,b,c]).dumpPython(),"cms.Sequence(process.d)\n")
s = Sequence(ignore(a)+b+c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([ignore(a)]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(cms.ignore(process.a)+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(cms.ignore(process.a)+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(cms.ignore(process.a)+process.b+process.c)\n")
s = Sequence(a+ignore(b)+c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(cms.ignore(process.b)+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+cms.ignore(process.b)+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+cms.ignore(process.b)+process.c)\n")
s = Sequence(a+b+c+ignore(d))
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+cms.ignore(process.d))\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+cms.ignore(process.d))\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+cms.ignore(process.d))\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+process.b+process.c)\n")
s = Sequence(~a+b+c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(~process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(~process.a+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(~process.a+process.b+process.c)\n")
s = Sequence(a+~b+c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(~process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([~b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+~process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+~process.b+process.c)\n")
s = Sequence(a+b+c+~d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+~process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+~process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+process.b+process.c)\n")
self.assertEqual(s.copyAndExclude([a,b,c,d]).dumpPython(),"cms.Sequence()\n")
#Task
e = DummyModule("e")
f = DummyModule("f")
g = DummyModule("g")
h = DummyModule("h")
t1 = Task(h)
s = Sequence(a+b+c+~d, Task(e,f,Task(g,t1)))
self.assertEqual(s.copyAndExclude([a,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.Task(process.e, process.f, process.g))\n")
self.assertEqual(s.copyAndExclude([a,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.Task(process.e, process.f, process.g))\n")
self.assertEqual(s.copyAndExclude([a,e,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.Task(process.f, process.g))\n")
self.assertEqual(s.copyAndExclude([a,e,f,g,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d)\n")
self.assertEqual(s.copyAndExclude([a,b,c,d]).dumpPython(),"cms.Sequence(cms.Task(process.e, process.f, process.g, process.h))\n")
self.assertEqual(s.copyAndExclude([t1]).dumpPython(),"cms.Sequence(process.a+process.b+process.c+~process.d, cms.Task(process.e, process.f, process.g))\n")
taskList = []
taskVisitor = TaskVisitor(taskList)
s.visit(taskVisitor)
self.assertEqual(len(taskList),3)
s2 = s.copyAndExclude([g,h])
taskList[:] = []
s2.visit(taskVisitor)
self.assertEqual(len(taskList),1)
t2 = Task(t1)
taskList[:] = []
t2.visit(taskVisitor)
self.assertEqual(taskList[0],t1)
s3 = Sequence(s)
self.assertEqual(s3.copyAndExclude([a,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.Task(process.e, process.f, process.g))\n")
s4 = Sequence(s)
self.assertEqual(s4.copyAndExclude([a,b,c,d,e,f,g,h]).dumpPython(),"cms.Sequence()\n")
t1 = Task(e,f)
t11 = Task(a)
t11.setLabel("t11")
t2 = Task(g,t1,h,t11)
t3 = t2.copyAndExclude([e,h])
self.assertTrue(t3.dumpPython() == "cms.Task(process.f, process.g, process.t11)\n")
t4 = t2.copyAndExclude([e,f,g,h,a])
self.assertTrue(t4.dumpPython() == "cms.Task()\n")
#ConditionalTask
t1 = ConditionalTask(h)
s = Sequence(a+b+c+~d, ConditionalTask(e,f,ConditionalTask(g,t1)))
self.assertEqual(s.copyAndExclude([a,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.ConditionalTask(process.e, process.f, process.g))\n")
self.assertEqual(s.copyAndExclude([a,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.ConditionalTask(process.e, process.f, process.g))\n")
self.assertEqual(s.copyAndExclude([a,e,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.ConditionalTask(process.f, process.g))\n")
self.assertEqual(s.copyAndExclude([a,e,f,g,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d)\n")
self.assertEqual(s.copyAndExclude([a,b,c,d]).dumpPython(),"cms.Sequence(cms.ConditionalTask(process.e, process.f, process.g, process.h))\n")
self.assertEqual(s.copyAndExclude([t1]).dumpPython(),"cms.Sequence(process.a+process.b+process.c+~process.d, cms.ConditionalTask(process.e, process.f, process.g))\n")
taskList = []
taskVisitor = ConditionalTaskVisitor(taskList)
s.visit(taskVisitor)
self.assertEqual(len(taskList),3)
s2 = s.copyAndExclude([g,h])
taskList[:] = []
s2.visit(taskVisitor)
self.assertEqual(len(taskList),1)
t2 = ConditionalTask(t1)
taskList[:] = []
t2.visit(taskVisitor)
self.assertEqual(taskList[0],t1)
s3 = Sequence(s)
self.assertEqual(s3.copyAndExclude([a,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.ConditionalTask(process.e, process.f, process.g))\n")
s4 = Sequence(s)
self.assertEqual(s4.copyAndExclude([a,b,c,d,e,f,g,h]).dumpPython(),"cms.Sequence()\n")
t1 = ConditionalTask(e,f)
t11 = ConditionalTask(a)
t11.setLabel("t11")
t2 = ConditionalTask(g,t1,h,t11)
t3 = t2.copyAndExclude([e,h])
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask(process.f, process.g, process.t11)\n")
t4 = t2.copyAndExclude([e,f,g,h,a])
self.assertEqual(t4.dumpPython(), "cms.ConditionalTask()\n")
def testSequenceTypeChecks(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
s1 = Sequence(m1*m2)
def testRaise():
s1.something = 1
self.assertRaises(AttributeError,testRaise)
def testRaise2():
s2 = Sequence(m1*None)
self.assertRaises(TypeError,testRaise2)
def testCopy(self):
a = DummyModule("a")
b = DummyModule("b")
c = DummyModule("c")
p1 = Path(a+b+c)
p2 = p1.copy()
e = DummyModule("e")
p2.replace(b,e)
self.assertEqual(p1.dumpPython(),"cms.Path(process.a+process.b+process.c)\n")
self.assertEqual(p2.dumpPython(),"cms.Path(process.a+process.e+process.c)\n")
p1 = Path(a+b+c)
p2 = p1.copy()
p1 += e
self.assertEqual(p1.dumpPython(),"cms.Path(process.a+process.b+process.c+process.e)\n")
self.assertEqual(p2.dumpPython(),"cms.Path(process.a+process.b+process.c)\n")
#Task
t1 = Task(a, b)
t2 = t1.copy()
self.assertTrue(t1.dumpPython() == t2.dumpPython())
t1Contents = list(t1._collection)
t2Contents = list(t2._collection)
self.assertTrue(id(t1Contents[0]) == id(t2Contents[0]))
self.assertTrue(id(t1Contents[1]) == id(t2Contents[1]))
self.assertTrue(id(t1._collection) != id(t2._collection))
#ConditionalTask
t1 = ConditionalTask(a, b)
t2 = t1.copy()
self.assertTrue(t1.dumpPython() == t2.dumpPython())
t1Contents = list(t1._collection)
t2Contents = list(t2._collection)
self.assertTrue(id(t1Contents[0]) == id(t2Contents[0]))
self.assertTrue(id(t1Contents[1]) == id(t2Contents[1]))
self.assertTrue(id(t1._collection) != id(t2._collection))
def testCopyAndAdd(self):
a = DummyModule("a")
b = DummyModule("b")
c = DummyModule("c")
d = DummyModule("d")
e = DummyModule("e")
#Task
t1 = Task(a, b, c)
self.assertEqual(t1.dumpPython(), "cms.Task(process.a, process.b, process.c)\n")
t2 = t1.copyAndAdd(d, e)
self.assertEqual(t1.dumpPython(), "cms.Task(process.a, process.b, process.c)\n")
self.assertEqual(t2.dumpPython(), "cms.Task(process.a, process.b, process.c, process.d, process.e)\n")
t3 = t2.copyAndExclude([b])
self.assertEqual(t1.dumpPython(), "cms.Task(process.a, process.b, process.c)\n")
self.assertEqual(t2.dumpPython(), "cms.Task(process.a, process.b, process.c, process.d, process.e)\n")
self.assertEqual(t3.dumpPython(), "cms.Task(process.a, process.c, process.d, process.e)\n")
t4 = t1.copyAndExclude([b]).copyAndAdd(d)
self.assertEqual(t4.dumpPython(), "cms.Task(process.a, process.c, process.d)\n")
t5 = t2.copyAndExclude([b]).copyAndAdd(d)
self.assertEqual(t5.dumpPython(), "cms.Task(process.a, process.c, process.d, process.e)\n")
t6 = t4.copyAndAdd(Task(b))
self.assertEqual(t6.dumpPython(), "cms.Task(process.a, process.b, process.c, process.d)\n")
#ConditionalTask
t1 = ConditionalTask(a, b, c)
self.assertEqual(t1.dumpPython(), "cms.ConditionalTask(process.a, process.b, process.c)\n")
t2 = t1.copyAndAdd(d, e)
self.assertEqual(t1.dumpPython(), "cms.ConditionalTask(process.a, process.b, process.c)\n")
self.assertEqual(t2.dumpPython(), "cms.ConditionalTask(process.a, process.b, process.c, process.d, process.e)\n")
t3 = t2.copyAndExclude([b])
self.assertEqual(t1.dumpPython(), "cms.ConditionalTask(process.a, process.b, process.c)\n")
self.assertEqual(t2.dumpPython(), "cms.ConditionalTask(process.a, process.b, process.c, process.d, process.e)\n")
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask(process.a, process.c, process.d, process.e)\n")
t4 = t1.copyAndExclude([b]).copyAndAdd(d)
self.assertEqual(t4.dumpPython(), "cms.ConditionalTask(process.a, process.c, process.d)\n")
t5 = t2.copyAndExclude([b]).copyAndAdd(d)
self.assertEqual(t5.dumpPython(), "cms.ConditionalTask(process.a, process.c, process.d, process.e)\n")
t6 = t4.copyAndAdd(Task(b))
self.assertEqual(t6.dumpPython(), "cms.ConditionalTask(process.a, process.b, process.c, process.d)\n")
def testInsertInto(self):
from FWCore.ParameterSet.Types import vstring
class TestPSet(object):
def __init__(self):
self._dict = dict()
def addVString(self,isTracked,label,value):
self._dict[label]=value
a = DummyModule("a")
b = DummyModule("b")
c = DummyModule("c")
d = DummyModule("d")
p = Path(a+b+c+d)
decoratedList = []
lister = DecoratedNodeNameVisitor(decoratedList)
p.visit(lister)
ps = TestPSet()
p.insertInto(ps,"p",decoratedList)
self.assertEqual(ps._dict, {"p":vstring("a","b","c","d")})
s = Sequence(b+c)
p = Path(a+s+d)
decoratedList[:] = []
p.visit(lister)
ps = TestPSet()
p.insertInto(ps,"p",decoratedList)
self.assertEqual(ps._dict, {"p":vstring("a","b","c","d")})
unittest.main()
| 43.642059 | 250 | 0.587189 | from __future__ import absolute_import
import sys
from builtins import range
from .Mixins import _ConfigureComponent, PrintOptions
from .Mixins import _Labelable, _Unlabelable
from .Mixins import _ValidatingParameterListBase
from .ExceptionHandling import *
from .OrderedSet import OrderedSet
class _HardDependency(object):
def __init__(self, sequenceName, depSet):
self.sequenceName = sequenceName
self.depSet = depSet
class _Sequenceable(object):
def __init__(self):
pass
def __mul__(self,rhs):
return _SequenceCollection(self,rhs)
def __add__(self,rhs):
return _SequenceCollection(self,rhs)
def __invert__(self):
return _SequenceNegation(self)
def _clonesequence(self, lookuptable):
try:
return lookuptable[id(self)]
except:
raise KeyError("no "+str(type(self))+" with id "+str(id(self))+" found")
def resolve(self, processDict,keepIfCannotResolve=False):
return self
def isOperation(self):
return False
def isLeaf(self):
return False
def _visitSubNodes(self,visitor):
pass
def visitNode(self,visitor):
visitor.enter(self)
self._visitSubNodes(visitor)
visitor.leave(self)
def _appendToCollection(self,collection):
collection.append(self)
def _errorstr(self):
return "A Sequenceable type"
def _checkIfSequenceable(caller, v):
if not isinstance(v,_Sequenceable):
typename = format_typename(caller)
msg = format_outerframe(2)
msg += "%s only takes arguments of types which are allowed in a sequence, but was given:\n" %typename
msg +=format_typename(v)
msg +="\nPlease remove the problematic object from the argument list"
raise TypeError(msg)
def _checkIfBooleanLogicSequenceable(caller, v):
if not isinstance(v,_BooleanLogicSequenceable):
typename = format_typename(caller)
msg = format_outerframe(2)
msg += "%s only takes arguments of types which are allowed in a boolean logic sequence, but was given:\n" %typename
msg +=format_typename(v)
msg +="\nPlease remove the problematic object from the argument list"
raise TypeError(msg)
class _BooleanLogicSequenceable(_Sequenceable):
def __init__(self):
super(_BooleanLogicSequenceable,self).__init__()
def __or__(self,other):
return _BooleanLogicExpression(_BooleanLogicExpression.OR,self,other)
def __and__(self,other):
return _BooleanLogicExpression(_BooleanLogicExpression.AND,self,other)
class _BooleanLogicExpression(_BooleanLogicSequenceable):
OR = 0
AND = 1
def __init__(self,op,left,right):
_checkIfBooleanLogicSequenceable(self,left)
_checkIfBooleanLogicSequenceable(self,right)
self._op = op
self._items = list()
if isinstance(left,_BooleanLogicExpression) and left._op == self._op:
self._items.extend(left._items)
else:
self._items.append(left)
if isinstance(right,_BooleanLogicExpression) and right._op == self._op:
self._items.extend(right._items)
else:
self._items.append(right)
def isOperation(self):
return True
def _visitSubNodes(self,visitor):
for i in self._items:
i.visitNode(visitor)
def dumpSequencePython(self, options=PrintOptions()):
returnValue = ''
join = ''
operatorJoin =self.operatorString()
for m in self._items:
returnValue +=join
join = operatorJoin
if not isinstance(m,_BooleanLogicSequenceLeaf):
returnValue += '('+m.dumpSequencePython(options)+')'
else:
returnValue += m.dumpSequencePython(options)
return returnValue
def operatorString(self):
returnValue ='|'
if self._op == self.AND:
returnValue = '&'
return returnValue
class _SequenceLeaf(_Sequenceable):
def __init__(self):
pass
def isLeaf(self):
return True
class _BooleanLogicSequenceLeaf(_BooleanLogicSequenceable):
def __init__(self):
pass
def isLeaf(self):
return True
class _SequenceCollection(_Sequenceable):
def __init__(self,*seqList):
self._collection = list()
for s in seqList:
_checkIfSequenceable(self,s)
s._appendToCollection(self._collection)
def __mul__(self,rhs):
_checkIfSequenceable(self,rhs)
rhs._appendToCollection(self._collection)
return self
def __add__(self,rhs):
_checkIfSequenceable(self,rhs)
rhs._appendToCollection(self._collection)
return self
def __str__(self):
sep = ''
returnValue = ''
for m in self._collection:
if m is not None:
returnValue += sep+str(m)
sep = '+'
return returnValue
def _appendToCollection(self,collection):
collection.extend(self._collection)
def dumpSequencePython(self, options=PrintOptions()):
returnValue = ''
separator = ''
for item in self._collection:
itemDump = item.dumpSequencePython(options)
if itemDump:
returnValue += (separator + itemDump)
separator = '+'
return returnValue
def dumpSequenceConfig(self):
returnValue = self._collection[0].dumpSequenceConfig()
for m in self._collection[1:]:
returnValue += '&'+m.dumpSequenceConfig()
return returnValue
def directDependencies(self,sortByType=True):
return findDirectDependencies(self, self._collection,sortByType=sortByType)
def visitNode(self,visitor):
for m in self._collection:
m.visitNode(visitor)
def resolve(self, processDict,keepIfCannotResolve=False):
self._collection = [x.resolve(processDict,keepIfCannotResolve) for x in self._collection]
return self
def index(self,item):
return self._collection.index(item)
def insert(self,index,item):
self._collection.insert(index,item)
def _replaceIfHeldDirectly(self,original,replacement):
didReplace = False
for i in self._collection:
if original == i:
self._collection[self._collection.index(original)] = replacement
didReplace = True
elif isinstance(i,_UnarySequenceOperator) and i._has(original):
didReplace = True
if replacement is None:
self._collection[self._collection.index(i)] = None
else:
self._collection[self._collection.index(i)] = type(i)(replacement)
if replacement is None:
self._collection = [ i for i in self._collection if i is not None]
return didReplace
def findDirectDependencies(element, collection,sortByType=True):
dependencies = []
for item in collection:
if item is None:
continue
elif isinstance(item, _SequenceLeaf):
t = 'modules'
# cms.ignore(module), ~(module)
elif isinstance(item, (_SequenceIgnore, _SequenceNegation)):
if isinstance(item._operand, _SequenceCollection):
dependencies += item.directDependencies(sortByType)
continue
t = 'modules'
# _SequenceCollection
elif isinstance(item, _SequenceCollection):
dependencies += item.directDependencies(sortByType)
continue
# cms.Sequence
elif isinstance(item, Sequence):
if not item.hasLabel_():
dependencies += item.directDependencies(sortByType)
continue
t = 'sequences'
# cms.Task
elif isinstance(item, Task):
if not item.hasLabel_():
dependencies += item.directDependencies(sortByType)
continue
t = 'tasks'
# cms.ConditionalTask
elif isinstance(item, ConditionalTask):
if not item.hasLabel_():
dependencies += item.directDependencies(sortByType)
continue
t = 'conditionaltasks'
# SequencePlaceholder and TaskPlaceholder do not add an explicit dependency
elif isinstance(item, (SequencePlaceholder, TaskPlaceholder, ConditionalTaskPlaceholder)):
continue
# unsupported elements
else:
sys.stderr.write("Warning: unsupported element '%s' in %s '%s'\n" % (str(item), type(element).__name__, element.label_()))
continue
dependencies.append((t, item.label_()))
if sortByType:
return sorted(set(dependencies), key = lambda t_item: (t_item[0].lower(), t_item[1].lower().replace('_cfi', '')))
else:
return dependencies
class _ModuleSequenceType(_ConfigureComponent, _Labelable):
def __init__(self,*arg, **argv):
self.__dict__["_isFrozen"] = False
self._seq = None
if (len(arg) > 1 and not isinstance(arg[1], _TaskBase)) or (len(arg) > 0 and not isinstance(arg[0],_Sequenceable) and not isinstance(arg[0],_TaskBase)):
typename = format_typename(self)
msg = format_outerframe(2)
msg += "The %s constructor takes zero or one sequenceable argument followed by zero or more arguments of type Task. But the following types are given:\n" %typename
for item,i in zip(arg, range(1,20)):
try:
msg += " %i) %s \n" %(i, item._errorstr())
except:
msg += " %i) Not sequenceable and not a Task\n" %(i)
if len(arg) > 1 and isinstance(arg[0],_Sequenceable) and isinstance(arg[1], _Sequenceable):
msg += "Maybe you forgot to combine the sequenceable arguments via '*' or '+'."
raise TypeError(msg)
tasks = arg
if len(arg) > 0 and isinstance(arg[0], _Sequenceable):
self._seq = _SequenceCollection()
arg[0]._appendToCollection(self._seq._collection)
tasks = arg[1:]
self._isModified = False
self._tasks = OrderedSet()
if len(tasks) > 0:
self.associate(*tasks)
def associate(self,*tasks):
for task in tasks:
if not isinstance(task, _TaskBase):
raise TypeError("associate only works with objects of type Task")
self._tasks.add(task)
def isFrozen(self):
return self._isFrozen
def setIsFrozen(self):
self._isFrozen = True
def _place(self,name,proc):
self._placeImpl(name,proc)
def __imul__(self,rhs):
_checkIfSequenceable(self, rhs)
if self._seq is None:
self.__dict__["_seq"] = _SequenceCollection()
self._seq+=rhs
return self
def __iadd__(self,rhs):
_checkIfSequenceable(self, rhs)
if self._seq is None:
self.__dict__["_seq"] = _SequenceCollection()
self._seq += rhs
return self
def __str__(self):
v = ExpandVisitor(type(self))
self.visit(v)
return v.resultString()
def dumpConfig(self, options):
s = ''
if self._seq is not None:
s = self._seq.dumpSequenceConfig()
return '{'+s+'}\n'
def dumpPython(self, options=PrintOptions()):
s = self.dumpPythonNoNewline(options)
return s + "\n"
def dumpPythonNoNewline(self, options=PrintOptions()):
s=''
if self._seq is not None:
s =self._seq.dumpSequencePython(options)
associationContents = set()
for task in self._tasks:
if task.hasLabel_():
associationContents.add(_Labelable.dumpSequencePython(task, options))
else:
associationContents.add(task.dumpPythonNoNewline(options))
for iString in sorted(associationContents):
if s:
s += ", "
s += iString
if len(associationContents) > 254:
return 'cms.'+type(self).__name__+'(*['+s+'])'
return 'cms.'+type(self).__name__+'('+s+')'
def dumpSequencePython(self, options=PrintOptions()):
# only dump the label, if possible
if self.hasLabel_():
return _Labelable.dumpSequencePython(self, options)
elif len(self._tasks) == 0:
if self._seq is None:
return ''
s = self._seq.dumpSequencePython(options)
if s:
return '('+s+')'
return ''
return self.dumpPythonNoNewline(options)
def dumpSequenceConfig(self):
# only dump the label, if possible
if self.hasLabel_():
return _Labelable.dumpSequenceConfig(self)
else:
# dump it verbose
if self._seq is None:
return ''
return '('+self._seq.dumpSequenceConfig()+')'
def __repr__(self):
s = ''
if self._seq is not None:
s = str(self._seq)
return "cms."+type(self).__name__+'('+s+')\n'
def directDependencies(self,sortByType=True):
result = []
if self._seq:
result += self._seq.directDependencies(sortByType=sortByType)
if self._tasks:
result += findDirectDependencies(self, self._tasks,sortByType=sortByType)
return result
def moduleNames(self):
result = set()
visitor = NodeNameVisitor(result)
self.visit(visitor)
return result
def contains(self, mod):
visitor = ContainsModuleVisitor(mod)
self.visit(visitor)
return visitor.result()
def copy(self):
returnValue =_ModuleSequenceType.__new__(type(self))
if self._seq is not None:
returnValue.__init__(self._seq)
else:
returnValue.__init__()
returnValue._tasks = OrderedSet(self._tasks)
return returnValue
def copyAndExclude(self,listOfModulesToExclude):
# You can exclude instances of these types EDProducer, EDFilter, OutputModule,
# EDAnalyzer, ESSource, ESProducer, Service, Sequence, SequencePlaceholder, Task,
# _SequenceNegation, and _SequenceIgnore.
# Mostly this is very intuitive, but there are some complications in cases
# where objects that contain other objects are involved. See the comments
# for the _MutatingSequenceVisitor.
v = _CopyAndExcludeSequenceVisitor(listOfModulesToExclude)
self.visit(v)
result = self.__new__(type(self))
result.__init__(v.result(self)[0], *v.result(self)[1])
return result
def expandAndClone(self):
# Name of this function is not very good. It makes a shallow copy with all
# the subTasks and subSequences flattened out (removed), but keeping all the
# modules that were in those subSequences and subTasks as well as the top level
# ones. Note this will also remove placeholders so one should probably
# call resolve before using this if the sequence contains any placeholders.
visitor = ExpandVisitor(type(self))
self.visit(visitor)
return visitor.result()
def _postProcessFixup(self,lookuptable):
self._seq = self._seq._clonesequence(lookuptable)
return self
def replace(self, original, replacement):
# This works for either argument being of type EDProducer, EDFilter, OutputModule,
# EDAnalyzer, ESProducer, ESSource, Service, Sequence, SequencePlaceHolder,
# Task, _SequenceNegation, _SequenceIgnore. Although it will fail with a
# raised exception if the replacement actually hits a case where a
# non-Sequenceable object is placed in the sequenced part of a Sequence
# or a type not allowed on a Task is put on a Task.
# There is one special case where we need an explicit check to prevent
# the algorithm from getting confused, either both or neither can be Tasks
#
# Mostly this is very intuitive, but there are some complications in cases
# where objects that contain other objects are involved. See the comments
# for the _MutatingSequenceVisitor.
if (isinstance(original,Task) != isinstance(replacement,Task)):
raise TypeError("replace only works if both arguments are Tasks or neither")
if (isinstance(original,ConditionalTask) != isinstance(replacement,ConditionalTask)):
raise TypeError("replace only works if both arguments are ConditionalTasks or neither")
v = _CopyAndReplaceSequenceVisitor(original,replacement)
self.visit(v)
if v.didReplace():
self._seq = v.result(self)[0]
if v.result(self)[1]:
self._tasks.clear()
self.associate(*v.result(self)[1])
return v.didReplace()
def _replaceIfHeldDirectly(self,original,replacement):
didReplace = False
if original in self._tasks:
self._tasks.remove(original)
if replacement is not None:
self._tasks.add(replacement)
didReplace = True
if self._seq is not None:
didReplace |= self._seq._replaceIfHeldDirectly(original,replacement)
return didReplace
def index(self,item):
if self._seq is not None:
return self._seq.index(item)
raise ValueError(str(item)+" is not in the sequence")
def insert(self,index,item):
_checkIfSequenceable(self, item)
if self._seq is None:
self.__dict__["_seq"] = _SequenceCollection()
self._seq.insert(index,item)
def remove(self, something):
# You can remove instances of these types EDProducer, EDFilter, OutputModule,
# EDAnalyzer, ESSource, ESProducer, Service, Sequence, SequencePlaceholder, Task,
# _SequenceNegation, and _SequenceIgnore.
# Mostly this is very intuitive, but there are some complications in cases
# where objects that contain other objects are involved. See the comments
# for the _MutatingSequenceVisitor.
#
# Works very similar to copyAndExclude, there are 2 differences. This changes
# the object itself instead of making a copy and second it only removes
# the first instance of the argument instead of all of them.
v = _CopyAndRemoveFirstSequenceVisitor(something)
self.visit(v)
if v.didRemove():
self._seq = v.result(self)[0]
if v.result(self)[1]:
self._tasks.clear()
self.associate(*v.result(self)[1])
return v.didRemove()
def resolve(self, processDict,keepIfCannotResolve=False):
if self._seq is not None:
self._seq = self._seq.resolve(processDict,keepIfCannotResolve)
for task in self._tasks:
task.resolve(processDict,keepIfCannotResolve)
return self
def __setattr__(self,name,value):
if not name.startswith("_"):
raise AttributeError("You cannot set parameters for sequence like objects.")
else:
self.__dict__[name] = value
#def replace(self,old,new):
#"""Find all instances of old and replace with new"""
#def insertAfter(self,which,new):
#"""new will depend on which but nothing after which will depend on new"""
#((a*b)*c) >> insertAfter(b,N) >> ((a*b)*(N+c))
#def insertBefore(self,which,new):
#"""new will be independent of which"""
#((a*b)*c) >> insertBefore(b,N) >> ((a*(N+b))*c)
#def __contains__(self,item):
#"""returns whether or not 'item' is in the sequence"""
#def modules_(self):
def nameInProcessDesc_(self, myname):
return myname
def insertInto(self, parameterSet, myname, decoratedList):
parameterSet.addVString(True, myname, decoratedList)
def visit(self,visitor):
if self._seq is not None:
self._seq.visitNode(visitor)
for item in self._tasks:
visitor.enter(item)
item.visit(visitor)
visitor.leave(item)
class _UnarySequenceOperator(_BooleanLogicSequenceable):
def __init__(self, operand):
self._operand = operand
if isinstance(operand, _ModuleSequenceType):
raise RuntimeError("This operator cannot accept a sequence")
if not isinstance(operand, _Sequenceable):
raise RuntimeError("This operator cannot accept a non sequenceable type")
def __eq__(self, other):
# allows replace(~a, b)
return type(self) is type(other) and self._operand==other._operand
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# this definition implies that self._operand MUST NOT be changed after the construction
return hash((type(self), self._operand))
def _findDependencies(self,knownDeps, presentDeps):
self._operand._findDependencies(knownDeps, presentDeps)
def _clonesequence(self, lookuptable):
return type(self)(self._operand._clonesequence(lookuptable))
def _has(self, op):
return self._operand == op
def resolve(self, processDict,keepIfCannotResolve=False):
return type(self)(self._operand.resolve(processDict,keepIfCannotResolve))
def isOperation(self):
return True
def _visitSubNodes(self,visitor):
self._operand.visitNode(visitor)
def decoration(self):
self._operand.decoration()
def directDependencies(self,sortByType=True):
return self._operand.directDependencies(sortByType=sortByType)
def label_(self):
return self._operand.label_()
class _SequenceNegation(_UnarySequenceOperator):
def __init__(self, operand):
super(_SequenceNegation,self).__init__(operand)
def __str__(self):
return '~%s' %self._operand
def dumpSequenceConfig(self):
return '!%s' %self._operand.dumpSequenceConfig()
def dumpSequencePython(self, options=PrintOptions()):
if self._operand.isOperation():
return '~(%s)' %self._operand.dumpSequencePython(options)
return '~%s' %self._operand.dumpSequencePython(options)
def decoration(self):
return '!'
class _SequenceIgnore(_UnarySequenceOperator):
def __init__(self, operand):
super(_SequenceIgnore,self).__init__(operand)
def __str__(self):
return 'ignore(%s)' %self._operand
def dumpSequenceConfig(self):
return '-%s' %self._operand.dumpSequenceConfig()
def dumpSequencePython(self, options=PrintOptions()):
return 'cms.ignore(%s)' %self._operand.dumpSequencePython(options)
def decoration(self):
return '-'
class _SequenceWait(_UnarySequenceOperator):
def __init__(self, operand):
super(_SequenceWait,self).__init__(operand)
def __str__(self):
return 'wait(%s)' %self._operand
def dumpSequenceConfig(self):
return '|%s' %self._operand.dumpSequenceConfig()
def dumpSequencePython(self, options=PrintOptions()):
return 'cms.wait(%s)' %self._operand.dumpSequencePython(options)
def decoration(self):
return '|'
class _SequenceWaitAndIgnore(_UnarySequenceOperator):
def __init__(self, operand):
super(_SequenceWaitAndIgnore,self).__init__(operand)
def __str__(self):
return 'wait(ignore(%s))' %self._operand
def dumpSequenceConfig(self):
return '+%s' %self._operand.dumpSequenceConfig()
def dumpSequencePython(self, options=PrintOptions()):
return 'cms.wait(cms.ignore(%s))' %self._operand.dumpSequencePython(options)
def decoration(self):
return '+'
def ignore(seq):
if isinstance(seq,_SequenceWait):
return _SequenceWaitAndIgnore(seq._operand)
return _SequenceIgnore(seq)
def wait(seq):
if isinstance(seq,_SequenceIgnore):
return _SequenceWaitAndIgnore(seq._operand)
return _SequenceWait(seq)
class Path(_ModuleSequenceType):
def __init__(self,*arg,**argv):
super(Path,self).__init__(*arg,**argv)
def _placeImpl(self,name,proc):
proc._placePath(name,self)
class EndPath(_ModuleSequenceType):
def __init__(self,*arg,**argv):
super(EndPath,self).__init__(*arg,**argv)
def _placeImpl(self,name,proc):
proc._placeEndPath(name,self)
class FinalPath(_ModuleSequenceType):
def __init__(self,*arg,**argv):
super(FinalPath,self).__init__(*arg,**argv)
def _placeImpl(self,name,proc):
proc._placeFinalPath(name,self)
def associate(self,task):
raise TypeError("FinalPath does not allow associations with Tasks")
class Sequence(_ModuleSequenceType,_Sequenceable):
def __init__(self,*arg,**argv):
super(Sequence,self).__init__(*arg,**argv)
def _placeImpl(self,name,proc):
proc._placeSequence(name,self)
def _clonesequence(self, lookuptable):
if id(self) not in lookuptable:
#for sequences held by sequences we need to clone
# on the first reference
if self._seq is not None:
clone = type(self)(self._seq._clonesequence(lookuptable))
else:
clone = type(self)()
lookuptable[id(self)]=clone
lookuptable[id(clone)]=clone
return lookuptable[id(self)]
def _visitSubNodes(self,visitor):
self.visit(visitor)
class SequencePlaceholder(_Sequenceable):
def __init__(self, name):
self._name = name
def _placeImpl(self,name,proc):
pass
def __str__(self):
return self._name
def insertInto(self, parameterSet, myname):
raise RuntimeError("The SequencePlaceholder "+self._name
+" was never overridden")
def resolve(self, processDict,keepIfCannotResolve=False):
if not self._name in processDict:
#print str(processDict.keys())
if keepIfCannotResolve:
return self
raise RuntimeError("The SequencePlaceholder "+self._name+ " cannot be resolved.\n Known keys are:"+str(processDict.keys()))
o = processDict[self._name]
if not isinstance(o,_Sequenceable):
raise RuntimeError("The SequencePlaceholder "+self._name+ " refers to an object type which is not allowed to be on a sequence: "+str(type(o)))
return o.resolve(processDict)
def _clonesequence(self, lookuptable):
if id(self) not in lookuptable:
#for sequences held by sequences we need to clone
# on the first reference
clone = type(self)(self._name)
lookuptable[id(self)]=clone
lookuptable[id(clone)]=clone
return lookuptable[id(self)]
def copy(self):
returnValue =SequencePlaceholder.__new__(type(self))
returnValue.__init__(self._name)
return returnValue
def dumpSequenceConfig(self):
return 'cms.SequencePlaceholder("%s")' %self._name
def dumpSequencePython(self, options=PrintOptions()):
return 'cms.SequencePlaceholder("%s")'%self._name
def dumpPython(self, options=PrintOptions()):
result = 'cms.SequencePlaceholder(\"'
if options.isCfg:
result += 'process.'
result += self._name+'\")\n'
return result
class Schedule(_ValidatingParameterListBase,_ConfigureComponent,_Unlabelable):
def __init__(self,*arg,**argv):
super(Schedule,self).__init__(*arg)
self._tasks = OrderedSet()
theKeys = list(argv.keys())
if theKeys:
if len(theKeys) > 1 or theKeys[0] != "tasks":
raise RuntimeError("The Schedule constructor can only have one keyword argument after its Path and\nEndPath arguments and it must use the keyword 'tasks'")
taskList = argv["tasks"]
# Normally we want a list of tasks, but we let it also work if the value is one Task
if isinstance(taskList,Task):
self.associate(taskList)
else:
try:
# Call this just to check that taskList is a list or other iterable object
self.__dummy(*taskList)
except:
raise RuntimeError("The Schedule constructor argument with keyword 'tasks' must have a\nlist (or other iterable object) as its value")
if taskList:
self.associate(*taskList)
def __dummy(self, *args):
pass
def associate(self,*tasks):
for task in tasks:
if not isinstance(task, Task):
raise TypeError("The associate function in the class Schedule only works with arguments of type Task")
self._tasks.add(task)
@staticmethod
def _itemIsValid(item):
return isinstance(item,Path) or isinstance(item,EndPath) or isinstance(item,FinalPath)
def copy(self):
import copy
aCopy = copy.copy(self)
aCopy._tasks = OrderedSet(self._tasks)
return aCopy
def _place(self,label,process):
process.setPartialSchedule_(self,label)
def _replaceIfHeldDirectly(self,original,replacement):
didReplace = False
if original in self._tasks:
self._tasks.remove(original)
if replacement is not None:
self._tasks.add(replacement)
didReplace = True
indices = []
for i, e in enumerate(self):
if original == e:
indices.append(i)
for i in reversed(indices):
self.pop(i)
if replacement is not None:
self.insert(i, replacement)
didReplace = True
return didReplace
def moduleNames(self):
result = set()
visitor = NodeNameVisitor(result)
for seq in self:
seq.visit(visitor)
for t in self._tasks:
t.visit(visitor)
return result
def contains(self, mod):
visitor = ContainsModuleVisitor(mod)
for seq in self:
seq.visit(visitor)
if visitor.result():
return True
for t in self._tasks:
t.visit(visitor)
if visitor.result():
return True
return visitor.result()
def tasks(self):
return self._tasks
def dumpPython(self, options=PrintOptions()):
pathNames = ['process.'+p.label_() for p in self]
if pathNames:
s=', '.join(pathNames)
else:
s = ''
associationContents = set()
for task in self._tasks:
if task.hasLabel_():
associationContents.add(_Labelable.dumpSequencePython(task, options))
else:
associationContents.add(task.dumpPythonNoNewline(options))
taskStrings = list()
for iString in sorted(associationContents):
taskStrings.append(iString)
if taskStrings and s:
return 'cms.Schedule(*[ ' + s + ' ], tasks=[' + ', '.join(taskStrings) + '])\n'
elif s:
return 'cms.Schedule(*[ ' + s + ' ])\n'
elif taskStrings:
return 'cms.Schedule(tasks=[' + ', '.join(taskStrings) + '])\n'
else:
return 'cms.Schedule()\n'
def __str__(self):
return self.dumpPython()
# Fills a list of all Sequences visited
# Can visit a Sequence, Path, or EndPath
class SequenceVisitor(object):
def __init__(self,d):
self.deps = d
def enter(self,visitee):
if isinstance(visitee,Sequence):
self.deps.append(visitee)
pass
def leave(self,visitee):
pass
# Fills a list of all Tasks visited
# Can visit a Task, Sequence, Path, or EndPath
class TaskVisitor(object):
def __init__(self,d):
self.deps = d
def enter(self,visitee):
if isinstance(visitee,Task):
self.deps.append(visitee)
pass
def leave(self,visitee):
pass
# Fills a list of all ConditionalTasks visited
# Can visit a ConditionalTask, Sequence, Path, or EndPath
class ConditionalTaskVisitor(object):
def __init__(self,d):
self.deps = d
def enter(self,visitee):
if isinstance(visitee,ConditionalTask):
self.deps.append(visitee)
pass
def leave(self,visitee):
pass
# Fills a list of all modules visited.
# Can visit a Sequence, Path, EndPath, or Task
# For purposes of this visitor, a module is considered
# to be an object that is one of these types: EDProducer,
# EDFilter, EDAnalyzer, OutputModule, ESProducer, ESSource,
# Service. The last three of these can only appear on a
# Task, they are not sequenceable. An object of one
# of these types is also called a leaf.
class ModuleNodeVisitor(object):
def __init__(self,l):
self.l = l
def enter(self,visitee):
if visitee.isLeaf():
self.l.append(visitee)
pass
def leave(self,visitee):
pass
# Should not be used on Tasks.
# Similar to ModuleNodeVisitor with the following
# differences. It only lists the modules that were
# contained inside a Task. It should only be used
# on Sequences, Paths, and EndPaths.
class ModuleNodeOnTaskVisitor(object):
def __init__(self,l):
self.l = l
self._levelInTasks = 0
def enter(self,visitee):
if isinstance(visitee, Task):
self._levelInTasks += 1
if self._levelInTasks == 0:
return
if visitee.isLeaf():
self.l.append(visitee)
pass
def leave(self,visitee):
if self._levelInTasks > 0:
if isinstance(visitee, Task):
self._levelInTasks -= 1
class ModuleNodeOnConditionalTaskVisitor(object):
def __init__(self,l):
self.l = l
self._levelInTasks = 0
def enter(self,visitee):
if isinstance(visitee, ConditionalTask):
self._levelInTasks += 1
# This block gets the modules contained by SwitchProducer. It
# needs to be before the "levelInTasks == 0" check because the
# contained modules need to be treated like in ConditionalTask
# also when the SwitchProducer itself is in the Path.
if hasattr(visitee, "modulesForConditionalTask_"):
self.l.extend(visitee.modulesForConditionalTask_())
if self._levelInTasks == 0:
return
if visitee.isLeaf():
self.l.append(visitee)
pass
def leave(self,visitee):
if self._levelInTasks > 0:
if isinstance(visitee, ConditionalTask):
self._levelInTasks -= 1
# Should not be used on Tasks.
# Similar to ModuleNodeVisitor with the following
# differences. It only lists the modules that were
# outside a Task, in the sequenced part of the sequence.
# It should only be used on Sequences, Paths, and
# EndPaths.
class ModuleNodeNotOnTaskVisitor(object):
def __init__(self,l):
self.l = l
self._levelInTasks = 0
def enter(self,visitee):
if isinstance(visitee, Task):
self._levelInTasks += 1
if self._levelInTasks > 0:
return
if visitee.isLeaf():
self.l.append(visitee)
pass
def leave(self,visitee):
if self._levelInTasks > 0:
if isinstance(visitee, Task):
self._levelInTasks -= 1
# Can visit Tasks, Sequences, Paths, and EndPaths
# result will be set to True if and only if
# the module is in the object directly or
# indirectly through contained Sequences or
# associated Tasks.
class ContainsModuleVisitor(object):
def __init__(self,mod):
self._mod = mod
self._result = False
def result(self):
return self._result
def enter(self,visitee):
if self._mod is visitee:
self._result = True
def leave(self,visitee):
pass
# Can visit Tasks, Sequences, Paths, and EndPaths
# Fills a set of the names of the visited leaves.
# For the labelable ones the name is the label.
# For a Service the name is the type.
# It raises an exception if a labelable object
# does not have a label at all. It will return
# 'None' if the label attribute exists but was set
# to None. If a Service is not attached to the process
# it will also raise an exception.
class NodeNameVisitor(object):
def __init__(self,l):
self.l = l
def enter(self,visitee):
if visitee.isLeaf():
if isinstance(visitee, _Labelable):
self.l.add(visitee.label_())
else:
if visitee._inProcess:
self.l.add(visitee.type_())
else:
raise RuntimeError("Service not attached to process: {}".format(visitee.dumpPython()))
def leave(self,visitee):
pass
# This visitor works only with Sequences, Paths and EndPaths
# It will not work on Tasks
class ExpandVisitor(object):
def __init__(self, type):
self._type = type
self.l = []
self.taskLeaves = []
self.taskLeavesInConditionalTasks = []
self.presentTaskLeaves = self.taskLeaves
self._levelInTasks = 0
self.conditionaltaskLeaves = []
self._levelInConditionalTasks = 0
def enter(self,visitee):
if isinstance(visitee, Task):
self._levelInTasks += 1
return
if isinstance(visitee, ConditionalTask):
self.presentTaskLeaves = self.taskLeavesInConditionalTasks
self._levelInConditionalTasks += 1
return
if visitee.isLeaf():
if self._levelInTasks > 0:
self.presentTaskLeaves.append(visitee)
elif self._levelInConditionalTasks > 0:
self.conditionaltaskLeaves.append(visitee)
else:
self.l.append(visitee)
def leave(self, visitee):
if self._levelInTasks > 0:
if isinstance(visitee, Task):
self._levelInTasks -= 1
return
if self._levelInConditionalTasks > 0:
if isinstance(visitee, ConditionalTask):
self._levelInConditionalTasks -= 1
if 0 == self._levelInConditionalTasks:
self.presentTaskLeaves = self.taskLeaves
return
if isinstance(visitee,_UnarySequenceOperator):
self.l[-1] = visitee
def result(self):
tsks = []
if self.taskLeaves:
tsks.append(Task(*self.taskLeaves))
if self.conditionaltaskLeaves:
ct = ConditionalTask(*self.conditionaltaskLeaves)
if self.taskLeavesInConditionalTasks:
ct.append(*self.taskLeavesInConditionalTasks)
tsks.append(ct)
if len(self.l) > 0:
# why doesn't (sum(self.l) work?
seq = self.l[0]
for el in self.l[1:]:
seq += el
return self._type(seq, *tsks)
else:
return self._type(*tsks)
def resultString(self):
sep = ''
returnValue = ''
for m in self.l:
if m is not None:
returnValue += sep+str(m)
sep = '+'
if returnValue:
sep = ','
for n in self.taskLeaves:
if n is not None:
returnValue += sep+str(n)
sep = ','
return returnValue
class DecoratedNodeNameVisitor(object):
def __init__(self,l):
self.l = l
self._decoration =''
self._levelInTasks = 0
def initialize(self):
self.l[:] = []
self._decoration =''
self._levelInTasks = 0
def enter(self,visitee):
if isinstance(visitee, _TaskBase):
self._levelInTasks += 1
if self._levelInTasks > 0:
return
if visitee.isLeaf():
if hasattr(visitee, "_Labelable__label"):
self.l.append(self._decoration+visitee.label_())
else:
error = "An object in a sequence was not found in the process\n"
if hasattr(visitee, "_filename"):
error += "From file " + visitee._filename
else:
error += "Dump follows\n" + repr(visitee)
raise RuntimeError(error)
if isinstance(visitee,_BooleanLogicExpression):
self.l.append(self._decoration+visitee.operatorString())
if isinstance(visitee,_UnarySequenceOperator):
self._decoration=visitee.decoration()
else:
self._decoration=''
def leave(self,visitee):
if self._levelInTasks > 0:
if isinstance(visitee, _TaskBase):
self._levelInTasks -= 1
return
if isinstance(visitee,_BooleanLogicExpression):
self.l.append('@')
class DecoratedNodeNamePlusVisitor(object):
def __init__(self,l):
self.l = l
self._decoration =''
self._levelInTasks = 0
self._leavesOnTasks = []
def initialize(self):
self.l[:] = []
self._decoration =''
self._levelInTasks = 0
self._leavesOnTasks[:] = []
def enter(self,visitee):
if isinstance(visitee, Task):
self._levelInTasks += 1
if self._levelInTasks > 0:
if visitee.isLeaf():
self._leavesOnTasks.append(visitee)
return
if visitee.isLeaf():
if hasattr(visitee, "_Labelable__label"):
self.l.append(self._decoration+visitee.label_())
else:
error = "An object in a sequence was not found in the process\n"
if hasattr(visitee, "_filename"):
error += "From file " + visitee._filename
else:
error += "Dump follows\n" + repr(visitee)
raise RuntimeError(error)
if isinstance(visitee,_BooleanLogicExpression):
self.l.append(self._decoration+visitee.operatorString())
if isinstance(visitee,_UnarySequenceOperator):
self._decoration=visitee.decoration()
else:
self._decoration=''
def leave(self,visitee):
if self._levelInTasks > 0:
if isinstance(visitee, Task):
self._levelInTasks -= 1
return
if isinstance(visitee,_BooleanLogicExpression):
self.l.append('@')
def leavesOnTasks(self):
return self._leavesOnTasks
class _CopyAndExcludeSequenceVisitorOld(object):
def __init__(self,modulesToRemove):
self.__modulesToIgnore = modulesToRemove
self.__stack = list()
self.__stack.append(list())
self.__result = None
self.__didExclude = False
def enter(self,visitee):
if len(self.__stack) > 0:
self.__stack[-1].append([visitee,False])
if visitee.isLeaf():
if visitee in self.__modulesToIgnore:
self.__didExclude = True
self.__stack[-1][-1]=[None,True]
elif isinstance(visitee, Sequence):
if visitee in self.__modulesToIgnore:
self.__didExclude = True
self.__stack[-1][-1]=[None,True]
self.__stack.append(list())
else:
#need to add a stack entry to keep track of children
self.__stack.append(list())
def leave(self,visitee):
node = visitee
if not visitee.isLeaf():
#were any children changed?
l = self.__stack[-1]
changed = False
countNulls = 0
nonNulls = list()
for c in l:
if c[1] == True:
changed = True
if c[0] is None:
countNulls +=1
else:
nonNulls.append(c[0])
if changed:
self.__didExclude = True
if countNulls != 0:
#this node must go away
if len(nonNulls) == 0:
#all subnodes went away
node = None
else:
node = nonNulls[0]
for n in nonNulls[1:]:
node = node+n
else:
#some child was changed so we need to clone
# this node and replace it with one that holds
# the new child(ren)
children = [x[0] for x in l ]
if not isinstance(visitee,Sequence):
node = visitee.__new__(type(visitee))
node.__init__(*children)
else:
node = nonNulls[0]
if node != visitee:
#we had to replace this node so now we need to
# change parent's stack entry as well
if len(self.__stack) > 1:
p = self.__stack[-2]
for i,c in enumerate(p):
if c[0]==visitee:
c[0]=node
c[1]=True
break
if not visitee.isLeaf():
self.__stack = self.__stack[:-1]
def result(self):
result = None
for n in (x[0] for x in self.__stack[0]):
if n is None:
continue
if result is None:
result = n
else:
result = result+n
return result
def didExclude(self):
return self.__didExclude
class _MutatingSequenceVisitor(object):
def __init__(self,operator):
self.__operator = operator
self.__stack = list()
self.__stack.append(list())
self.__didApply = False
self.__levelInModifiedNonLeaf = 0
def enter(self,visitee):
if self.__levelInModifiedNonLeaf > 0:
if not visitee.isLeaf():
self.__levelInModifiedNonLeaf += 1
return
if not len(self.__stack) > 0:
raise RuntimeError("LogicError Empty stack in MutatingSequenceVisitor.\n"
"This should never happen. Contact a Framework developer.")
v = self.__operator(visitee)
if v is visitee:
self.__stack[-1].append([visitee, False, None])
if not visitee.isLeaf():
self.__stack.append(list())
else:
self.__didApply = True
self.__stack[-1].append([v, True, None])
if not visitee.isLeaf():
self.__levelInModifiedNonLeaf = 1
def leave(self,visitee):
if visitee.isLeaf():
return
if self.__levelInModifiedNonLeaf > 0:
self.__levelInModifiedNonLeaf -= 1
return
contents = self.__stack[-1]
changed = False
allNull = True
for c in contents:
if c[1] == True:
changed = True
if c[0] is not None:
allNull = False
if changed:
if allNull:
self.__stack[-2][-1] = [None, True, None]
elif isinstance(visitee, _UnarySequenceOperator):
node = visitee.__new__(type(visitee))
node.__init__(contents[0][0])
self.__stack[-2][-1] = [node, True, None]
elif isinstance(visitee, _TaskBase):
nonNull = []
for c in contents:
if c[0] is not None:
nonNull.append(c[0])
self.__stack[-2][-1] = [visitee._makeInstance(*nonNull), True, None]
elif isinstance(visitee, Sequence):
seq = _SequenceCollection()
tasks = list()
for c in contents:
if c[0] is None:
continue
if isinstance(c[0], _TaskBase):
tasks.append(c[0])
else:
seq = seq + c[0]
if c[2] is not None:
tasks.extend(c[2])
self.__stack[-2][-1] = [seq, True, tasks]
if not visitee.isLeaf():
self.__stack = self.__stack[:-1]
def result(self, visitedContainer):
if isinstance(visitedContainer, _TaskBase):
result = list()
for n in (x[0] for x in self.__stack[0]):
if n is not None:
result.append(n)
return result
seq = _SequenceCollection()
tasks = list()
for c in self.__stack[0]:
if c[0] is None:
continue
if isinstance(c[0], _TaskBase):
tasks.append(c[0])
else:
seq = seq + c[0]
if c[2] is not None:
tasks.extend(c[2])
return [seq, tasks]
def _didApply(self):
return self.__didApply
class _CopyAndRemoveFirstSequenceVisitor(_MutatingSequenceVisitor):
def __init__(self,moduleToRemove):
class _RemoveFirstOperator(object):
def __init__(self,moduleToRemove):
self.__moduleToRemove = moduleToRemove
self.__found = False
def __call__(self,test):
if not self.__found and test is self.__moduleToRemove:
self.__found = True
return None
return test
super(type(self),self).__init__(_RemoveFirstOperator(moduleToRemove))
def didRemove(self):
return self._didApply()
class _CopyAndExcludeSequenceVisitor(_MutatingSequenceVisitor):
def __init__(self,modulesToRemove):
class _ExcludeOperator(object):
def __init__(self,modulesToRemove):
self.__modulesToIgnore = modulesToRemove
def __call__(self,test):
if test in modulesToRemove:
return None
return test
super(type(self),self).__init__(_ExcludeOperator(modulesToRemove))
def didExclude(self):
return self._didApply()
class _CopyAndReplaceSequenceVisitor(_MutatingSequenceVisitor):
def __init__(self,target,replace):
class _ReplaceOperator(object):
def __init__(self,target,replace):
self.__target = target
self.__replace = replace
def __call__(self,test):
if test == self.__target:
return self.__replace
return test
super(type(self),self).__init__(_ReplaceOperator(target,replace))
def didReplace(self):
return self._didApply()
class _TaskBase(_ConfigureComponent, _Labelable) :
def __init__(self, *items):
self._collection = OrderedSet()
self.add(*items)
def __setattr__(self,name,value):
if not name.startswith("_"):
raise AttributeError("You cannot set parameters for {} objects.".format(self._taskType()))
else:
self.__dict__[name] = value
def add(self, *items):
for item in items:
if not self._allowedInTask(item):
raise RuntimeError("Adding an entry of type '{0}' to a {1}.\n"
"It is illegal to add this type to a {1}.".format(type(item).__name__, self._taskType()))
self._collection.add(item)
def fillContents(self, taskContents, options=PrintOptions()):
if self.hasLabel_():
taskContents.add(_Labelable.dumpSequencePython(self, options))
else:
for i in self._collection:
if isinstance(i, _TaskBase):
i.fillContents(taskContents, options)
else:
taskContents.add(i.dumpSequencePython(options))
def dumpPython(self, options=PrintOptions()):
s = self.dumpPythonNoNewline(options)
return s + "\n"
def dumpPythonNoNewline(self, options=PrintOptions()):
taskContents = set()
for i in self._collection:
if isinstance(i, _TaskBase):
i.fillContents(taskContents, options)
else:
taskContents.add(i.dumpSequencePython(options))
s=''
iFirst = True
for item in sorted(taskContents):
if not iFirst:
s += ", "
iFirst = False
s += item
if len(taskContents) > 255:
s = "*[" + s + "]"
return "cms.{}({})".format(self._taskType(),s)
def directDependencies(self,sortByType=True):
return findDirectDependencies(self, self._collection,sortByType=sortByType)
def _isTaskComponent(self):
return False
def isLeaf(self):
return False
def visit(self,visitor):
for i in self._collection:
visitor.enter(i)
if not i.isLeaf():
i.visit(visitor)
visitor.leave(i)
def _errorstr(self):
return "{}(...)".format(self.taskType_())
def __iter__(self):
for key in self._collection:
yield key
def __str__(self):
l = []
v = ModuleNodeVisitor(l)
self.visit(v)
s = ''
for i in l:
if s:
s += ', '
s += str (i)
return s
def __repr__(self):
s = str(self)
return "cms."+type(self).__name__+'('+s+')\n'
def moduleNames(self):
result = set()
visitor = NodeNameVisitor(result)
self.visit(visitor)
return result
def contains(self, mod):
visitor = ContainsModuleVisitor(mod)
self.visit(visitor)
return visitor.result()
def copy(self):
return self._makeInstance(*self._collection)
def copyAndExclude(self,listOfModulesToExclude):
for i in listOfModulesToExclude:
if not i._isTaskComponent():
raise TypeError("copyAndExclude can only exclude objects that can be placed on a Task")
v = _CopyAndExcludeSequenceVisitor(listOfModulesToExclude)
self.visit(v)
return self._makeInstance(*v.result(self))
def copyAndAdd(self, *modulesToAdd):
t = self.copy()
t.add(*modulesToAdd)
return t
def expandAndClone(self):
l = []
v = ModuleNodeVisitor(l)
self.visit(v)
return self._makeInstance(*l)
def replace(self, original, replacement):
if not self._allowedInTask(original) or (not replacement is None and not self._allowedInTask(replacement)):
raise TypeError("The {0} replace function only works with objects that can be placed on a {0}\n".format(self._taskType()) + \
" replace was called with original type = {}\n".format(str(type(original))) + \
" and replacement type = {}\n".format(str(type(replacement))))
else:
v = _CopyAndReplaceSequenceVisitor(original,replacement)
self.visit(v)
if v.didReplace():
self._collection.clear()
self.add(*v.result(self))
return v.didReplace()
def remove(self, something):
if not self._allowedInTask(something):
raise TypeError("remove only works with objects that can be placed on a Task")
v = _CopyAndRemoveFirstSequenceVisitor(something)
self.visit(v)
if v.didRemove():
self._collection.clear()
self.add(*v.result(self))
return v.didRemove()
def resolve(self, processDict,keepIfCannotResolve=False):
temp = OrderedSet()
for i in self._collection:
if self._mustResolve(i):
temp.add(i.resolve(processDict,keepIfCannotResolve))
else:
temp.add(i)
self._collection = temp
return self
class _TaskBasePlaceholder(object):
def __init__(self, name):
self._name = name
def _isTaskComponent(self):
return False
def isLeaf(self):
return False
def visit(self,visitor):
pass
def __str__(self):
return self._name
def insertInto(self, parameterSet, myname):
raise RuntimeError("The {} {} was never overridden".format(self._typeName(), self._name))
def resolve(self, processDict,keepIfCannotResolve=False):
if not self._name in processDict:
if keepIfCannotResolve:
return self
raise RuntimeError("The {} {} cannot be resolved.\n Known keys are: {}".format(self._typeName(), self._name,str(processDict.keys())))
o = processDict[self._name]
if not self._allowedInTask(o):
raise RuntimeError("The {} {} refers to an object type which is not allowed to be on a task: {}".format(self._typeName(), self._name, str(type(o))))
if isinstance(o, self._taskClass()):
return o.resolve(processDict)
return o
def copy(self):
return self._makeInstance(self._name)
def dumpSequencePython(self, options=PrintOptions()):
return 'cms.{}("{}")'.format(self._typeName(), self._name)
def dumpPython(self, options=PrintOptions()):
result = 'cms.{}(\"'.format(self._typeName())
if options.isCfg:
result += 'process.'
result += self._name+'\")\n'
return result
class Task(_TaskBase) :
@staticmethod
def _taskType():
return "Task"
def _place(self, name, proc):
proc._placeTask(name,self)
def _isTaskComponent(self):
return True
@staticmethod
def _makeInstance(*items):
return Task(*items)
@staticmethod
def _allowedInTask(item ):
return (isinstance(item, _ConfigureComponent) and item._isTaskComponent()) or isinstance(item, TaskPlaceholder)
@staticmethod
def _mustResolve(item):
return isinstance(item, Task) or isinstance(item, TaskPlaceholder)
class TaskPlaceholder(_TaskBasePlaceholder):
def _isTaskComponent(self):
return True
@staticmethod
def _typeName():
return "TaskPlaceholder"
@staticmethod
def _makeInstance(name):
return TaskPlaceholder(name)
@staticmethod
def _allowedInTask(obj):
return Task._allowedInTask(obj)
@staticmethod
def _taskClass():
return Task
class ConditionalTask(_TaskBase) :
@staticmethod
def _taskType():
return "ConditionalTask"
def _place(self, name, proc):
proc._placeConditionalTask(name,self)
def _isTaskComponent(self):
return False
@staticmethod
def _makeInstance(*items):
return ConditionalTask(*items)
@staticmethod
def _allowedInTask(item):
return isinstance(item, ConditionalTask) or isinstance(item, ConditionalTaskPlaceholder) or Task._allowedInTask(item)
@staticmethod
def _mustResolve(item):
return Task._mustResolve(item) or isinstance(item, ConditionalTask) or isinstance(item, ConditionalTaskPlaceholder)
class ConditionalTaskPlaceholder(_TaskBasePlaceholder):
def _isTaskComponent(self):
return False
@staticmethod
def _typeName():
return "ConditionalTaskPlaceholder"
@staticmethod
def _makeInstance(name):
return ConditionalTaskPlaceholder(name)
@staticmethod
def _allowedInTask(obj):
return Task._allowedInTask(obj) or ConditionalTask._allowedInTask(obj)
@staticmethod
def _taskClass():
return ConditionalTask
if __name__=="__main__":
import unittest
class DummyModule(_Labelable, _SequenceLeaf, _ConfigureComponent):
def __init__(self,name):
self.setLabel(name)
def _isTaskComponent(self):
return True
def __repr__(self):
return self.label_()
class DummyBooleanModule(_Labelable, _BooleanLogicSequenceLeaf):
def __init__(self,name):
self.setLabel(name)
class TestModuleCommand(unittest.TestCase):
def setUp(self):
pass
def testBoolean(self):
a = DummyBooleanModule("a")
b = DummyBooleanModule("b")
p = Path( a & b)
self.assertEqual(p.dumpPython(),"cms.Path(process.a&process.b)\n")
l = list()
namesVisitor = DecoratedNodeNameVisitor(l)
p.visit(namesVisitor)
self.assertEqual(l,['&','a','b','@'])
p2 = Path( a | b)
self.assertEqual(p2.dumpPython(),"cms.Path(process.a|process.b)\n")
l[:]=[]
p2.visit(namesVisitor)
self.assertEqual(l,['|','a','b','@'])
c = DummyBooleanModule("c")
d = DummyBooleanModule("d")
p3 = Path(a & b & c & d)
self.assertEqual(p3.dumpPython(),"cms.Path(process.a&process.b&process.c&process.d)\n")
l[:]=[]
p3.visit(namesVisitor)
self.assertEqual(l,['&','a','b','c','d','@'])
p3 = Path(((a & b) & c) & d)
self.assertEqual(p3.dumpPython(),"cms.Path(process.a&process.b&process.c&process.d)\n")
p3 = Path(a & (b & (c & d)))
self.assertEqual(p3.dumpPython(),"cms.Path(process.a&process.b&process.c&process.d)\n")
p3 = Path((a & b) & (c & d))
self.assertEqual(p3.dumpPython(),"cms.Path(process.a&process.b&process.c&process.d)\n")
p3 = Path(a & (b & c) & d)
self.assertEqual(p3.dumpPython(),"cms.Path(process.a&process.b&process.c&process.d)\n")
p4 = Path(a | b | c | d)
self.assertEqual(p4.dumpPython(),"cms.Path(process.a|process.b|process.c|process.d)\n")
p5 = Path(a | b & c & d )
self.assertEqual(p5.dumpPython(),"cms.Path(process.a|(process.b&process.c&process.d))\n")
l[:]=[]
p5.visit(namesVisitor)
self.assertEqual(l,['|','a','&','b','c','d','@','@'])
p5 = Path(a & b | c & d )
self.assertEqual(p5.dumpPython(),"cms.Path((process.a&process.b)|(process.c&process.d))\n")
l[:]=[]
p5.visit(namesVisitor)
self.assertEqual(l,['|','&','a','b','@','&','c','d','@','@'])
p5 = Path(a & (b | c) & d )
self.assertEqual(p5.dumpPython(),"cms.Path(process.a&(process.b|process.c)&process.d)\n")
l[:]=[]
p5.visit(namesVisitor)
self.assertEqual(l,['&','a','|','b','c','@','d','@'])
p5 = Path(a & b & c | d )
self.assertEqual(p5.dumpPython(),"cms.Path((process.a&process.b&process.c)|process.d)\n")
l[:]=[]
p5.visit(namesVisitor)
self.assertEqual(l,['|','&','a','b','c','@','d','@'])
p6 = Path( a & ~b)
self.assertEqual(p6.dumpPython(),"cms.Path(process.a&(~process.b))\n")
l[:]=[]
p6.visit(namesVisitor)
self.assertEqual(l,['&','a','!b','@'])
p6 = Path( a & ignore(b))
self.assertEqual(p6.dumpPython(),"cms.Path(process.a&(cms.ignore(process.b)))\n")
l[:]=[]
p6.visit(namesVisitor)
self.assertEqual(l,['&','a','-b','@'])
p6 = Path( a & wait(b))
self.assertEqual(p6.dumpPython(),"cms.Path(process.a&(cms.wait(process.b)))\n")
l[:]=[]
p6.visit(namesVisitor)
self.assertEqual(l,['&','a','|b','@'])
p6 = Path( a & wait(ignore(b)))
self.assertEqual(p6.dumpPython(),"cms.Path(process.a&(cms.wait(cms.ignore(process.b))))\n")
l[:]=[]
p6.visit(namesVisitor)
self.assertEqual(l,['&','a','+b','@'])
p6 = Path( a & ignore(wait(b)))
self.assertEqual(p6.dumpPython(),"cms.Path(process.a&(cms.wait(cms.ignore(process.b))))\n")
l[:]=[]
p6.visit(namesVisitor)
self.assertEqual(l,['&','a','+b','@'])
p6 = Path(~(a&b))
self.assertEqual(p6.dumpPython(),"cms.Path(~(process.a&process.b))\n")
l[:]=[]
p6.visit(namesVisitor)
self.assertEqual(l,['!&','a','b','@'])
def testTaskConstructor(self):
a = DummyModule("a")
self.assertRaises(RuntimeError, lambda : Task(ConditionalTask(a)) )
def testDumpPython(self):
a = DummyModule("a")
b = DummyModule('b')
p = Path((a*b))
self.assertEqual(p.dumpPython(),"cms.Path(process.a+process.b)\n")
p2 = Path((b+a))
self.assertEqual(p2.dumpPython(),"cms.Path(process.b+process.a)\n")
c = DummyModule('c')
p3 = Path(c*(a+b))
self.assertEqual(p3.dumpPython(),"cms.Path(process.c+process.a+process.b)\n")
p4 = Path(c*a+b)
self.assertEqual(p4.dumpPython(),"cms.Path(process.c+process.a+process.b)\n")
p5 = Path(a+ignore(b))
self.assertEqual(p5.dumpPython(),"cms.Path(process.a+cms.ignore(process.b))\n")
p5a = Path(a+wait(b))
self.assertEqual(p5a.dumpPython(),"cms.Path(process.a+cms.wait(process.b))\n")
p5b = Path(a+ignore(wait(b)))
self.assertEqual(p5b.dumpPython(),"cms.Path(process.a+cms.wait(cms.ignore(process.b)))\n")
p5c = Path(a+wait(ignore(b)))
self.assertEqual(p5c.dumpPython(),"cms.Path(process.a+cms.wait(cms.ignore(process.b)))\n")
p6 = Path(c+a*b)
self.assertEqual(p6.dumpPython(),"cms.Path(process.c+process.a+process.b)\n")
p7 = Path(a+~b)
self.assertEqual(p7.dumpPython(),"cms.Path(process.a+~process.b)\n")
p8 = Path((a+b)*c)
self.assertEqual(p8.dumpPython(),"cms.Path(process.a+process.b+process.c)\n")
t1 = Task(a)
t2 = Task(c, b)
t3 = Task()
p9 = Path((a+b)*c, t1)
self.assertEqual(p9.dumpPython(),"cms.Path(process.a+process.b+process.c, cms.Task(process.a))\n")
p10 = Path((a+b)*c, t2, t1)
self.assertEqual(p10.dumpPython(),"cms.Path(process.a+process.b+process.c, cms.Task(process.a), cms.Task(process.b, process.c))\n")
p11 = Path(t1, t2, t3)
self.assertEqual(p11.dumpPython(),"cms.Path(cms.Task(), cms.Task(process.a), cms.Task(process.b, process.c))\n")
d = DummyModule("d")
e = DummyModule('e')
f = DummyModule('f')
t4 = Task(d, Task(f))
s = Sequence(e, t4)
p12 = Path(a+b+s+c,t1)
self.assertEqual(p12.dumpPython(),"cms.Path(process.a+process.b+cms.Sequence(process.e, cms.Task(process.d, process.f))+process.c, cms.Task(process.a))\n")
ct1 = ConditionalTask(a)
ct2 = ConditionalTask(c, b)
ct3 = ConditionalTask()
p13 = Path((a+b)*c, ct1)
self.assertEqual(p13.dumpPython(),"cms.Path(process.a+process.b+process.c, cms.ConditionalTask(process.a))\n")
p14 = Path((a+b)*c, ct2, ct1)
self.assertEqual(p14.dumpPython(),"cms.Path(process.a+process.b+process.c, cms.ConditionalTask(process.a), cms.ConditionalTask(process.b, process.c))\n")
p15 = Path(ct1, ct2, ct3)
self.assertEqual(p15.dumpPython(),"cms.Path(cms.ConditionalTask(), cms.ConditionalTask(process.a), cms.ConditionalTask(process.b, process.c))\n")
ct4 = ConditionalTask(d, Task(f))
s = Sequence(e, ct4)
p16 = Path(a+b+s+c,ct1)
self.assertEqual(p16.dumpPython(),"cms.Path(process.a+process.b+cms.Sequence(process.e, cms.ConditionalTask(process.d, process.f))+process.c, cms.ConditionalTask(process.a))\n")
n = 260
mods = []
labels = []
for i in range(0, n):
l = "a{}".format(i)
labels.append("process."+l)
mods.append(DummyModule(l))
labels.sort()
task = Task(*mods)
self.assertEqual(task.dumpPython(), "cms.Task(*[" + ", ".join(labels) + "])\n")
conditionalTask = ConditionalTask(*mods)
self.assertEqual(conditionalTask.dumpPython(), "cms.ConditionalTask(*[" + ", ".join(labels) + "])\n")
l = list()
namesVisitor = DecoratedNodeNameVisitor(l)
p.visit(namesVisitor)
self.assertEqual(l, ['a', 'b'])
l[:] = []
p5.visit(namesVisitor)
self.assertEqual(l, ['a', '-b'])
l[:] = []
p5a.visit(namesVisitor)
self.assertEqual(l, ['a', '|b'])
l[:] = []
p5b.visit(namesVisitor)
self.assertEqual(l, ['a', '+b'])
l[:] = []
p5c.visit(namesVisitor)
self.assertEqual(l, ['a', '+b'])
l[:] = []
p7.visit(namesVisitor)
self.assertEqual(l, ['a', '!b'])
l[:] = []
p10.visit(namesVisitor)
self.assertEqual(l, ['a', 'b', 'c'])
l[:] = []
p12.visit(namesVisitor)
self.assertEqual(l, ['a', 'b', 'e', 'c'])
l[:] = []
p16.visit(namesVisitor)
self.assertEqual(l, ['a', 'b', 'e', 'c'])
l[:] = []
moduleVisitor = ModuleNodeVisitor(l)
p8.visit(moduleVisitor)
names = [m.label_() for m in l]
self.assertEqual(names, ['a', 'b', 'c'])
tph = TaskPlaceholder('a')
self.assertEqual(tph.dumpPython(), 'cms.TaskPlaceholder("process.a")\n')
sph = SequencePlaceholder('a')
self.assertEqual(sph.dumpPython(), 'cms.SequencePlaceholder("process.a")\n')
ctph = ConditionalTaskPlaceholder('a')
self.assertEqual(ctph.dumpPython(), 'cms.ConditionalTaskPlaceholder("process.a")\n')
def testDumpConfig(self):
a = DummyModule("a")
b = DummyModule('b')
p = Path((a*b))
self.assertEqual(p.dumpConfig(None),"{a&b}\n")
p2 = Path((b+a))
self.assertEqual(p2.dumpConfig(None),"{b&a}\n")
c = DummyModule('c')
p3 = Path(c*(a+b))
self.assertEqual(p3.dumpConfig(None),"{c&a&b}\n")
p4 = Path(c*a+b)
self.assertEqual(p4.dumpConfig(None),"{c&a&b}\n")
p5 = Path(a+ignore(b))
self.assertEqual(p5.dumpConfig(None),"{a&-b}\n")
p6 = Path(c+a*b)
self.assertEqual(p6.dumpConfig(None),"{c&a&b}\n")
p7 = Path(a+~b)
self.assertEqual(p7.dumpConfig(None),"{a&!b}\n")
p8 = Path((a+b)*c)
self.assertEqual(p8.dumpConfig(None),"{a&b&c}\n")
def testVisitor(self):
class TestVisitor(object):
def __init__(self, enters, leaves):
self._enters = enters
self._leaves = leaves
def enter(self,visitee):
if self._enters[0] != visitee:
raise RuntimeError("wrong node ("+str(visitee)+") on 'enter'")
else:
self._enters = self._enters[1:]
def leave(self,visitee):
if self._leaves[0] != visitee:
raise RuntimeError("wrong node ("+str(visitee)+") on 'leave'\n expected ("+str(self._leaves[0])+")")
else:
self._leaves = self._leaves[1:]
a = DummyModule("a")
b = DummyModule('b')
multAB = a*b
p = Path(multAB)
t = TestVisitor(enters=[a,b],
leaves=[a,b])
p.visit(t)
plusAB = a+b
p = Path(plusAB)
t = TestVisitor(enters=[a,b],
leaves=[a,b])
p.visit(t)
c=DummyModule("c")
d=DummyModule("d")
e=DummyModule("e")
f=DummyModule("f")
g=DummyModule("g")
ct1 = ConditionalTask(d)
ct2 = ConditionalTask(e, ct1)
ct3 = ConditionalTask(f, g, ct2)
s=Sequence(plusAB, ct3, ct2)
multSC = s*c
p=Path(multSC, ct1, ct2)
l = []
v = ModuleNodeVisitor(l)
p.visit(v)
expected = [a,b,f,g,e,d,e,d,c,d,e,d]
self.assertEqual(expected,l)
t1 = Task(d)
t2 = Task(e, t1)
t3 = Task(f, g, t2)
s=Sequence(plusAB, t3, t2)
multSC = s*c
p=Path(multSC, t1, t2)
l = []
v = ModuleNodeVisitor(l)
p.visit(v)
expected = [a,b,f,g,e,d,e,d,c,d,e,d]
self.assertEqual(expected,l)
l[:] = []
v = ModuleNodeOnTaskVisitor(l)
p.visit(v)
expected = [f,g,e,d,e,d,d,e,d]
self.assertEqual(expected,l)
l[:] = []
v = ModuleNodeNotOnTaskVisitor(l)
p.visit(v)
expected = [a,b,c]
self.assertEqual(expected,l)
t=TestVisitor(enters=[s,a,b,t3,f,g,t2,e,t1,d,t2,e,t1,d,c,t1,d,t2,e,t1,d],
leaves=[a,b,f,g,e,d,t1,t2,t3,e,d,t1,t2,s,c,d,t1,e,d,t1,t2])
p.visit(t)
notA= ~a
p=Path(notA)
t=TestVisitor(enters=[notA,a],leaves=[a,notA])
p.visit(t)
def testResolve(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
s1 = Sequence(m1)
s2 = SequencePlaceholder("s3")
s3 = Sequence(m2)
p = Path(s1*s2)
l = list()
namesVisitor = DecoratedNodeNameVisitor(l)
p.visit(namesVisitor)
self.assertEqual(l, ['m1'])
p.resolve(dict(s1=s1, s2=s2, s3=s3))
l[:] = []
p.visit(namesVisitor)
self.assertEqual(l, ['m1', 'm2'])
l[:]=[]
s1 = Sequence(m1)
s2 = SequencePlaceholder("s3")
s3 = Sequence(m2)
s4 = SequencePlaceholder("s2")
p=Path(s1+s4)
p.resolve(dict(s1=s1, s2=s2, s3=s3, s4=s4))
p.visit(namesVisitor)
self.assertEqual(l, ['m1', 'm2'])
l[:]=[]
m3 = DummyModule("m3")
m4 = DummyModule("m4")
s1 = Sequence(~m1)
s2 = SequencePlaceholder("s3")
s3 = Sequence(ignore(m2))
s4 = Sequence(wait(m3) + ignore(wait(m4)))
d = dict(s1=s1, s2=s2, s3=s3, s4=s4)
p = Path(s1*s2*s4)
p.resolve(dict(s1=s1, s2=s2, s3=s3, s4=s4))
p.visit(namesVisitor)
self.assertEqual(l, ['!m1', '-m2', '|m3', '+m4'])
def testReplace(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
m3 = DummyModule("m3")
m4 = DummyModule("m4")
m5 = DummyModule("m5")
s1 = Sequence(m1*~m2*m1*m2*ignore(m2))
s2 = Sequence(m1*m2)
l = []
namesVisitor = DecoratedNodeNameVisitor(l)
s1.visit(namesVisitor)
self.assertEqual(l,['m1', '!m2', 'm1', 'm2', '-m2'])
s3 = Sequence(~m1*s2)
s3.replace(~m1, m2)
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l, ['m2', 'm1', 'm2'])
s3.replace(m2, ~m1)
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l, ['!m1', 'm1', '!m1'])
s3 = Sequence(ignore(m1)*s2)
s3.replace(ignore(m1), m2)
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l, ['m2', 'm1', 'm2'])
s3.replace(m2, ignore(m1))
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l, ['-m1', 'm1', '-m1'])
ph = SequencePlaceholder('x')
s4 = Sequence(Sequence(ph))
s4.replace(ph,m2)
self.assertEqual(s4.dumpPython(), "cms.Sequence(process.m2)\n")
s1.replace(m2,m3)
l[:] = []
s1.visit(namesVisitor)
self.assertEqual(l,['m1', '!m3', 'm1', 'm3', '-m3'])
s2 = Sequence(m1*m2)
s3 = Sequence(~m1*s2)
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l,['!m1', 'm1', 'm2'])
l[:] = []
s3.replace(s2,m1)
s3.visit(namesVisitor)
self.assertEqual(l,['!m1', 'm1'])
s1 = Sequence(m1+m2)
s2 = Sequence(m3+m4)
s3 = Sequence(s1+s2)
s3.replace(m3,m5)
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l,['m1','m2','m5','m4'])
m6 = DummyModule("m6")
m7 = DummyModule("m7")
m8 = DummyModule("m8")
m9 = DummyModule("m9")
t6 = Task(m6)
t7 = Task(m7)
t89 = Task(m8, m9)
s1 = Sequence(m1+m2, t6)
s2 = Sequence(m3+m4, t7)
s3 = Sequence(s1+s2, t89)
s3.replace(m3,m5)
l[:] = []
s3.visit(namesVisitor)
self.assertEqual(l,['m1','m2','m5','m4'])
s3.replace(m8,m1)
self.assertTrue(s3.dumpPython() == "cms.Sequence(cms.Sequence(process.m1+process.m2, cms.Task(process.m6))+process.m5+process.m4, cms.Task(process.m1, process.m9), cms.Task(process.m7))\n")
s3.replace(m1,m7)
self.assertTrue(s3.dumpPython() == "cms.Sequence(process.m7+process.m2+process.m5+process.m4, cms.Task(process.m6), cms.Task(process.m7), cms.Task(process.m7, process.m9))\n")
result = s3.replace(t7, t89)
self.assertTrue(s3.dumpPython() == "cms.Sequence(process.m7+process.m2+process.m5+process.m4, cms.Task(process.m6), cms.Task(process.m7, process.m9), cms.Task(process.m8, process.m9))\n")
self.assertTrue(result)
result = s3.replace(t7, t89)
self.assertFalse(result)
t1 = Task()
t1.replace(m1,m2)
self.assertTrue(t1.dumpPython() == "cms.Task()\n")
t1 = Task(m1)
t1.replace(m1,m2)
self.assertTrue(t1.dumpPython() == "cms.Task(process.m2)\n")
t1 = Task(m1,m2, m2)
t1.replace(m2,m3)
self.assertTrue(t1.dumpPython() == "cms.Task(process.m1, process.m3)\n")
t1 = Task(m1,m2)
t2 = Task(m1,m3,t1)
t2.replace(m1,m4)
self.assertTrue(t2.dumpPython() == "cms.Task(process.m2, process.m3, process.m4)\n")
t1 = Task(m2)
t2 = Task(m1,m3,t1)
t2.replace(m1,m4)
self.assertTrue(t2.dumpPython() == "cms.Task(process.m2, process.m3, process.m4)\n")
t1 = Task(m2)
t2 = Task(m1,m3,t1)
t2.replace(t1,m4)
self.assertTrue(t2.dumpPython() == "cms.Task(process.m1, process.m3, process.m4)\n")
t1 = Task(m2)
t2 = Task(m1,m3,t1)
t3 = Task(m5)
t2.replace(m2,t3)
self.assertTrue(t2.dumpPython() == "cms.Task(process.m1, process.m3, process.m5)\n")
ct6 = ConditionalTask(m6)
ct7 = ConditionalTask(m7)
ct89 = ConditionalTask(m8, m9)
cs1 = Sequence(m1+m2, ct6)
cs2 = Sequence(m3+m4, ct7)
cs3 = Sequence(cs1+cs2, ct89)
cs3.replace(m3,m5)
l[:] = []
cs3.visit(namesVisitor)
self.assertEqual(l,['m1','m2','m5','m4'])
cs3.replace(m8,m1)
self.assertEqual(cs3.dumpPython(), "cms.Sequence(cms.Sequence(process.m1+process.m2, cms.ConditionalTask(process.m6))+process.m5+process.m4, cms.ConditionalTask(process.m1, process.m9), cms.ConditionalTask(process.m7))\n")
cs3.replace(m1,m7)
self.assertEqual(cs3.dumpPython(), "cms.Sequence(process.m7+process.m2+process.m5+process.m4, cms.ConditionalTask(process.m6), cms.ConditionalTask(process.m7), cms.ConditionalTask(process.m7, process.m9))\n")
result = cs3.replace(ct7, ct89)
self.assertEqual(cs3.dumpPython(), "cms.Sequence(process.m7+process.m2+process.m5+process.m4, cms.ConditionalTask(process.m6), cms.ConditionalTask(process.m7, process.m9), cms.ConditionalTask(process.m8, process.m9))\n")
self.assertTrue(result)
result = cs3.replace(ct7, ct89)
self.assertFalse(result)
ct1 = ConditionalTask()
ct1.replace(m1,m2)
self.assertEqual(ct1.dumpPython(), "cms.ConditionalTask()\n")
ct1 = ConditionalTask(m1)
ct1.replace(m1,m2)
self.assertEqual(ct1.dumpPython(), "cms.ConditionalTask(process.m2)\n")
ct1 = ConditionalTask(m1,m2, m2)
ct1.replace(m2,m3)
self.assertEqual(ct1.dumpPython(), "cms.ConditionalTask(process.m1, process.m3)\n")
ct1 = ConditionalTask(m1,m2)
ct2 = ConditionalTask(m1,m3,ct1)
ct2.replace(m1,m4)
self.assertEqual(ct2.dumpPython(), "cms.ConditionalTask(process.m2, process.m3, process.m4)\n")
ct1 = ConditionalTask(m2)
ct2 = ConditionalTask(m1,m3,ct1)
ct2.replace(m1,m4)
self.assertEqual(ct2.dumpPython(), "cms.ConditionalTask(process.m2, process.m3, process.m4)\n")
ct1 = ConditionalTask(m2)
ct2 = ConditionalTask(m1,m3,ct1)
ct2.replace(ct1,m4)
self.assertEqual(ct2.dumpPython(), "cms.ConditionalTask(process.m1, process.m3, process.m4)\n")
ct1 = ConditionalTask(m2)
ct2 = ConditionalTask(m1,m3,ct1)
ct3 = ConditionalTask(m5)
ct2.replace(m2,ct3)
self.assertEqual(ct2.dumpPython(), "cms.ConditionalTask(process.m1, process.m3, process.m5)\n")
fp = FinalPath()
fp.replace(m1,m2)
self.assertEqual(fp.dumpPython(), "cms.FinalPath()\n")
fp = FinalPath(m1)
fp.replace(m1,m2)
self.assertEqual(fp.dumpPython(), "cms.FinalPath(process.m2)\n")
def testReplaceIfHeldDirectly(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
m3 = DummyModule("m3")
m4 = DummyModule("m4")
m5 = DummyModule("m5")
s1 = Sequence(m1*~m2*m1*m2*ignore(m2))
s1._replaceIfHeldDirectly(m2,m3)
self.assertEqual(s1.dumpPython()[:-1],
"cms.Sequence(process.m1+~process.m3+process.m1+process.m3+cms.ignore(process.m3))")
s2 = Sequence(m1*m2)
l = []
s3 = Sequence(~m1*s2)
s3._replaceIfHeldDirectly(~m1, m2)
self.assertEqual(s3.dumpPython()[:-1],
"cms.Sequence(process.m2+(process.m1+process.m2))")
m6 = DummyModule("m6")
m7 = DummyModule("m7")
m8 = DummyModule("m8")
m9 = DummyModule("m9")
t6 = Task(m6)
t7 = Task(m7)
t89 = Task(m8, m9)
s1 = Sequence(m1+m2, t6)
s2 = Sequence(m3+m4, t7)
s3 = Sequence(s1+s2, t89)
s3._replaceIfHeldDirectly(m3,m5)
self.assertEqual(s3.dumpPython()[:-1], "cms.Sequence(cms.Sequence(process.m1+process.m2, cms.Task(process.m6))+cms.Sequence(process.m3+process.m4, cms.Task(process.m7)), cms.Task(process.m8, process.m9))")
s2._replaceIfHeldDirectly(m3,m5)
self.assertEqual(s2.dumpPython()[:-1],"cms.Sequence(process.m5+process.m4, cms.Task(process.m7))")
self.assertEqual(s3.dumpPython()[:-1], "cms.Sequence(cms.Sequence(process.m1+process.m2, cms.Task(process.m6))+cms.Sequence(process.m5+process.m4, cms.Task(process.m7)), cms.Task(process.m8, process.m9))")
s1 = Sequence(t6)
s1._replaceIfHeldDirectly(t6,t7)
self.assertEqual(s1.dumpPython()[:-1],"cms.Sequence(cms.Task(process.m7))")
ct6 = ConditionalTask(m6)
ct7 = ConditionalTask(m7)
ct89 = ConditionalTask(m8, m9)
s1 = Sequence(m1+m2, ct6)
s2 = Sequence(m3+m4, ct7)
s3 = Sequence(s1+s2, ct89)
s3._replaceIfHeldDirectly(m3,m5)
self.assertEqual(s3.dumpPython()[:-1], "cms.Sequence(cms.Sequence(process.m1+process.m2, cms.ConditionalTask(process.m6))+cms.Sequence(process.m3+process.m4, cms.ConditionalTask(process.m7)), cms.ConditionalTask(process.m8, process.m9))")
s2._replaceIfHeldDirectly(m3,m5)
self.assertEqual(s2.dumpPython()[:-1],"cms.Sequence(process.m5+process.m4, cms.ConditionalTask(process.m7))")
self.assertEqual(s3.dumpPython()[:-1], "cms.Sequence(cms.Sequence(process.m1+process.m2, cms.ConditionalTask(process.m6))+cms.Sequence(process.m5+process.m4, cms.ConditionalTask(process.m7)), cms.ConditionalTask(process.m8, process.m9))")
s1 = Sequence(ct6)
s1._replaceIfHeldDirectly(ct6,ct7)
self.assertEqual(s1.dumpPython()[:-1],"cms.Sequence(cms.ConditionalTask(process.m7))")
def testIndex(self):
m1 = DummyModule("a")
m2 = DummyModule("b")
m3 = DummyModule("c")
s = Sequence(m1+m2+m3)
self.assertEqual(s.index(m1),0)
self.assertEqual(s.index(m2),1)
self.assertEqual(s.index(m3),2)
def testInsert(self):
m1 = DummyModule("a")
m2 = DummyModule("b")
m3 = DummyModule("c")
s = Sequence(m1+m3)
s.insert(1,m2)
self.assertEqual(s.index(m1),0)
self.assertEqual(s.index(m2),1)
self.assertEqual(s.index(m3),2)
s = Sequence()
s.insert(0, m1)
self.assertEqual(s.index(m1),0)
p = Path()
p.insert(0, m1)
self.assertEqual(s.index(m1),0)
def testExpandAndClone(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
m3 = DummyModule("m3")
m4 = DummyModule("m4")
m5 = DummyModule("m5")
s1 = Sequence(m1*~m2*m1*m2*ignore(m2))
s2 = Sequence(m1*m2)
s3 = Sequence(~m1*s2)
p = Path(s1+s3)
p2 = p.expandAndClone()
l = []
namesVisitor = DecoratedNodeNameVisitor(l)
p2.visit(namesVisitor)
self.assertEqual(l, ['m1', '!m2', 'm1', 'm2', '-m2', '!m1', 'm1', 'm2'])
m6 = DummyModule("m6")
m7 = DummyModule("m7")
m8 = DummyModule("m8")
m9 = DummyModule("m9")
p = Path(s1+s3, Task(m6))
p2 = p.expandAndClone()
l[:] = []
p2.visit(namesVisitor)
self.assertEqual(l, ['m1', '!m2', 'm1', 'm2', '-m2', '!m1', 'm1', 'm2'])
self.assertEqual(p2.dumpPython(), "cms.Path(process.m1+~process.m2+process.m1+process.m2+cms.ignore(process.m2)+~process.m1+process.m1+process.m2, cms.Task(process.m6))\n")
s2 = Sequence(m1*m2, Task(m9))
s3 = Sequence(~m1*s2)
t8 = Task(m8)
t8.setLabel("t8")
p = Path(s1+s3, Task(m6, Task(m7, t8)))
p2 = p.expandAndClone()
l[:] = []
p2.visit(namesVisitor)
self.assertEqual(l, ['m1', '!m2', 'm1', 'm2', '-m2', '!m1', 'm1', 'm2'])
self.assertTrue(p2.dumpPython() == "cms.Path(process.m1+~process.m2+process.m1+process.m2+cms.ignore(process.m2)+~process.m1+process.m1+process.m2, cms.Task(process.m6, process.m7, process.m8, process.m9))\n")
t1 = Task(m1,m2,m3)
s1 = Sequence(t1)
s2 = s1.expandAndClone()
l[:] = []
s2.visit(namesVisitor)
self.assertEqual(l, [])
self.assertTrue(s2.dumpPython() == "cms.Sequence(cms.Task(process.m1, process.m2, process.m3))\n")
t1 = Task(m1,m2)
t2 = Task(m1,m3,t1)
t3 = t2.expandAndClone()
self.assertTrue(t3.dumpPython() == "cms.Task(process.m1, process.m2, process.m3)\n")
t4 = Task()
t5 = t4.expandAndClone()
self.assertTrue(t5.dumpPython() == "cms.Task()\n")
s1 = Sequence(m1*~m2*m1*m2*ignore(m2))
s2 = Sequence(m1*m2)
s3 = Sequence(~m1*s2)
p = Path(s1+s3, ConditionalTask(m6))
p2 = p.expandAndClone()
l[:] = []
p2.visit(namesVisitor)
self.assertEqual(l, ['m1', '!m2', 'm1', 'm2', '-m2', '!m1', 'm1', 'm2'])
self.assertEqual(p2.dumpPython(), "cms.Path(process.m1+~process.m2+process.m1+process.m2+cms.ignore(process.m2)+~process.m1+process.m1+process.m2, cms.ConditionalTask(process.m6))\n")
s2 = Sequence(m1*m2, ConditionalTask(m9))
s3 = Sequence(~m1*s2)
ct8 = ConditionalTask(m8)
ct8.setLabel("ct8")
p = Path(s1+s3, ConditionalTask(m6, ConditionalTask(m7, ct8)))
p2 = p.expandAndClone()
l[:] = []
p2.visit(namesVisitor)
self.assertEqual(l, ['m1', '!m2', 'm1', 'm2', '-m2', '!m1', 'm1', 'm2'])
self.assertEqual(p2.dumpPython(), "cms.Path(process.m1+~process.m2+process.m1+process.m2+cms.ignore(process.m2)+~process.m1+process.m1+process.m2, cms.ConditionalTask(process.m6, process.m7, process.m8, process.m9))\n")
t1 = ConditionalTask(m1,m2,m3)
s1 = Sequence(t1)
s2 = s1.expandAndClone()
l[:] = []
s2.visit(namesVisitor)
self.assertEqual(l, [])
self.assertEqual(s2.dumpPython(), "cms.Sequence(cms.ConditionalTask(process.m1, process.m2, process.m3))\n")
t1 = ConditionalTask(m1,m2)
t2 = ConditionalTask(m1,m3,t1)
t3 = t2.expandAndClone()
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask(process.m1, process.m2, process.m3)\n")
t4 = ConditionalTask()
t5 = t4.expandAndClone()
self.assertTrue(t5.dumpPython() == "cms.ConditionalTask()\n")
def testAdd(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
m3 = DummyModule("m3")
m4 = DummyModule("m4")
s1 = Sequence(m1)
s3 = Sequence(m3+ignore(m4))
p = Path(s1)
p += ~m2
p *= s3
l = []
namesVisitor = DecoratedNodeNameVisitor(l)
p.visit(namesVisitor)
self.assertEqual(l, ['m1', '!m2', 'm3', '-m4'])
s4 = Sequence()
s4 +=m1
l[:]=[]; s1.visit(namesVisitor); self.assertEqual(l,['m1'])
self.assertEqual(s4.dumpPython(),"cms.Sequence(process.m1)\n")
s4 = Sequence()
s4 *=m1
l[:]=[]; s1.visit(namesVisitor); self.assertEqual(l,['m1'])
self.assertEqual(s4.dumpPython(),"cms.Sequence(process.m1)\n")
def testRemove(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
m3 = DummyModule("m3")
m4 = DummyModule("m4")
s1 = Sequence(m1*m2+~m3)
s2 = Sequence(m1*s1)
l = []
namesVisitor = DecoratedNodeNameVisitor(l)
d = {'m1':m1 ,'m2':m2, 'm3':m3,'s1':s1, 's2':s2}
l[:] = []; s1.visit(namesVisitor); self.assertEqual(l,['m1', 'm2', '!m3'])
l[:] = []; s2.visit(namesVisitor); self.assertEqual(l,['m1', 'm1', 'm2', '!m3'])
s1.remove(m2)
l[:] = []; s1.visit(namesVisitor); self.assertEqual(l,['m1', '!m3'])
l[:] = []; s2.visit(namesVisitor); self.assertEqual(l,['m1', 'm1', '!m3'])
s2.remove(m3)
l[:] = []; s1.visit(namesVisitor); self.assertEqual(l,['m1', '!m3'])
l[:] = []; s2.visit(namesVisitor); self.assertEqual(l,['m1', 'm1'])
s1 = Sequence( m1 + m2 + m1 + m2 )
l[:] = []; s1.visit(namesVisitor); self.assertEqual(l,['m1', 'm2', 'm1', 'm2'])
s1.remove(m2)
l[:] = []; s1.visit(namesVisitor); self.assertEqual(l,['m1', 'm1', 'm2'])
s1 = Sequence( m1 + m3 )
s2 = Sequence( m2 + ignore(m3) + s1 + m3 )
l[:] = []; s2.visit(namesVisitor); self.assertEqual(l,['m2', '-m3', 'm1', 'm3', 'm3'])
s2.remove(s1)
l[:] = []; s2.visit(namesVisitor); self.assertEqual(l,['m2', '-m3', 'm3'])
s2.remove(m3)
l[:] = []; s2.visit(namesVisitor); self.assertEqual(l,['m2','m3'])
s1 = Sequence(m1*m2*m3)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2+process.m3)\n")
s1.remove(m2)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m3)\n")
s1 = Sequence(m1+m2+m3)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2+process.m3)\n")
s1.remove(m2)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m3)\n")
s1 = Sequence(m1*m2+m3)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2+process.m3)\n")
s1.remove(m2)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m3)\n")
s1 = Sequence(m1+m2*m3)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2+process.m3)\n")
s1.remove(m2)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m3)\n")
s1.remove(m1)
s1.remove(m3)
l[:]=[]; s1.visit(namesVisitor); self.assertEqual(l,[])
self.assertEqual(s1.dumpPython(), "cms.Sequence()\n")
s3 = Sequence(m1)
s3.remove(m1)
l[:]=[]; s3.visit(namesVisitor); self.assertEqual(l,[])
self.assertEqual(s3.dumpPython(), "cms.Sequence()\n")
s3 = Sequence(m1)
s4 = Sequence(s3)
s4.remove(m1)
l[:]=[]; s4.visit(namesVisitor); self.assertEqual(l,[])
self.assertEqual(s4.dumpPython(), "cms.Sequence()\n")
s1 = Sequence(m1+m2, Task(m3), Task(m4))
s1.remove(m4)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2, cms.Task(process.m3))\n")
s1 = Sequence(m1+m2+Sequence(Task(m3,m4), Task(m3), Task(m4)))
s1.remove(m4)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2, cms.Task(process.m3), cms.Task(process.m4))\n")
t1 = Task(m1)
t1.setLabel("t1")
t2 = Task(m2,t1)
t2.setLabel("t2")
t3 = Task(t1,t2,m1)
t3.remove(m1)
self.assertTrue(t3.dumpPython() == "cms.Task(process.m1, process.t2)\n")
t3.remove(m1)
self.assertTrue(t3.dumpPython() == "cms.Task(process.m1, process.m2)\n")
t3.remove(m1)
self.assertTrue(t3.dumpPython() == "cms.Task(process.m2)\n")
t3.remove(m2)
self.assertTrue(t3.dumpPython() == "cms.Task()\n")
s1 = Sequence(m1+m2, ConditionalTask(m3), ConditionalTask(m4))
s1.remove(m4)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2, cms.ConditionalTask(process.m3))\n")
s1 = Sequence(m1+m2+Sequence(ConditionalTask(m3,m4), ConditionalTask(m3), ConditionalTask(m4)))
s1.remove(m4)
self.assertEqual(s1.dumpPython(), "cms.Sequence(process.m1+process.m2, cms.ConditionalTask(process.m3), cms.ConditionalTask(process.m4))\n")
t1 = ConditionalTask(m1)
t1.setLabel("t1")
t2 = ConditionalTask(m2,t1)
t2.setLabel("t2")
t3 = ConditionalTask(t1,t2,m1)
t3.remove(m1)
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask(process.m1, process.t2)\n")
t3.remove(m1)
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask(process.m1, process.m2)\n")
t3.remove(m1)
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask(process.m2)\n")
t3.remove(m2)
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask()\n")
fp = FinalPath(m1+m2)
fp.remove(m1)
self.assertEqual(fp.dumpPython(), "cms.FinalPath(process.m2)\n")
fp = FinalPath(m1)
fp.remove(m1)
self.assertEqual(fp.dumpPython(), "cms.FinalPath()\n")
def testCopyAndExclude(self):
a = DummyModule("a")
b = DummyModule("b")
c = DummyModule("c")
d = DummyModule("d")
s = Sequence(a+b+c)
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+process.b+process.c)\n")
s = Sequence(a+b+c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+process.b+process.c)\n")
s=Sequence(a*b+c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+process.b+process.c)\n")
s = Sequence(a+b*c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+process.b+process.c)\n")
s2 = Sequence(a+b)
s = Sequence(c+s2+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.c+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.c+process.a+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence((process.a+process.b)+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.c+(process.a+process.b))\n")
self.assertEqual(s.copyAndExclude([a,b]).dumpPython(),"cms.Sequence(process.c+process.d)\n")
s3 = s.copyAndExclude([c])
s2.remove(a)
self.assertEqual(s3.dumpPython(),"cms.Sequence((process.b)+process.d)\n")
s4 = s.copyAndExclude([a,b])
seqs = []
sequenceVisitor = SequenceVisitor(seqs)
s.visit(sequenceVisitor)
self.assertEqual(len(seqs),1)
seqs[:] = []
s4.visit(sequenceVisitor)
self.assertEqual(len(seqs),0)
self.assertEqual(s4.dumpPython(),"cms.Sequence(process.c+process.d)\n")
holder = SequencePlaceholder("x")
s3 = Sequence(b+d,Task(a))
s2 = Sequence(a+b+holder+s3)
s = Sequence(c+s2+d)
seqs[:] = []
s.visit(sequenceVisitor)
self.assertTrue(seqs == [s2,s3])
s2 = Sequence(a+b+holder)
s = Sequence(c+s2+d)
self.assertEqual(s.copyAndExclude([holder]).dumpPython(),"cms.Sequence(process.c+process.a+process.b+process.d)\n")
s2 = Sequence(a+b+c)
s = Sequence(s2+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence((process.a+process.b+process.c))\n")
self.assertEqual(s.copyAndExclude([s2]).dumpPython(),"cms.Sequence(process.d)\n")
s2 = Sequence(a+b+c)
s = Sequence(s2*d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence((process.a+process.b+process.c))\n")
self.assertEqual(s.copyAndExclude([a,b,c]).dumpPython(),"cms.Sequence(process.d)\n")
s = Sequence(ignore(a)+b+c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([ignore(a)]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(cms.ignore(process.a)+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(cms.ignore(process.a)+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(cms.ignore(process.a)+process.b+process.c)\n")
s = Sequence(a+ignore(b)+c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(cms.ignore(process.b)+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+cms.ignore(process.b)+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+cms.ignore(process.b)+process.c)\n")
s = Sequence(a+b+c+ignore(d))
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+cms.ignore(process.d))\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+cms.ignore(process.d))\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+cms.ignore(process.d))\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+process.b+process.c)\n")
s = Sequence(~a+b+c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(~process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(~process.a+process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(~process.a+process.b+process.c)\n")
s = Sequence(a+~b+c+d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(~process.b+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([~b]).dumpPython(),"cms.Sequence(process.a+process.c+process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+~process.b+process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+~process.b+process.c)\n")
s = Sequence(a+b+c+~d)
self.assertEqual(s.copyAndExclude([a]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d)\n")
self.assertEqual(s.copyAndExclude([b]).dumpPython(),"cms.Sequence(process.a+process.c+~process.d)\n")
self.assertEqual(s.copyAndExclude([c]).dumpPython(),"cms.Sequence(process.a+process.b+~process.d)\n")
self.assertEqual(s.copyAndExclude([d]).dumpPython(),"cms.Sequence(process.a+process.b+process.c)\n")
self.assertEqual(s.copyAndExclude([a,b,c,d]).dumpPython(),"cms.Sequence()\n")
e = DummyModule("e")
f = DummyModule("f")
g = DummyModule("g")
h = DummyModule("h")
t1 = Task(h)
s = Sequence(a+b+c+~d, Task(e,f,Task(g,t1)))
self.assertEqual(s.copyAndExclude([a,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.Task(process.e, process.f, process.g))\n")
self.assertEqual(s.copyAndExclude([a,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.Task(process.e, process.f, process.g))\n")
self.assertEqual(s.copyAndExclude([a,e,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.Task(process.f, process.g))\n")
self.assertEqual(s.copyAndExclude([a,e,f,g,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d)\n")
self.assertEqual(s.copyAndExclude([a,b,c,d]).dumpPython(),"cms.Sequence(cms.Task(process.e, process.f, process.g, process.h))\n")
self.assertEqual(s.copyAndExclude([t1]).dumpPython(),"cms.Sequence(process.a+process.b+process.c+~process.d, cms.Task(process.e, process.f, process.g))\n")
taskList = []
taskVisitor = TaskVisitor(taskList)
s.visit(taskVisitor)
self.assertEqual(len(taskList),3)
s2 = s.copyAndExclude([g,h])
taskList[:] = []
s2.visit(taskVisitor)
self.assertEqual(len(taskList),1)
t2 = Task(t1)
taskList[:] = []
t2.visit(taskVisitor)
self.assertEqual(taskList[0],t1)
s3 = Sequence(s)
self.assertEqual(s3.copyAndExclude([a,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.Task(process.e, process.f, process.g))\n")
s4 = Sequence(s)
self.assertEqual(s4.copyAndExclude([a,b,c,d,e,f,g,h]).dumpPython(),"cms.Sequence()\n")
t1 = Task(e,f)
t11 = Task(a)
t11.setLabel("t11")
t2 = Task(g,t1,h,t11)
t3 = t2.copyAndExclude([e,h])
self.assertTrue(t3.dumpPython() == "cms.Task(process.f, process.g, process.t11)\n")
t4 = t2.copyAndExclude([e,f,g,h,a])
self.assertTrue(t4.dumpPython() == "cms.Task()\n")
t1 = ConditionalTask(h)
s = Sequence(a+b+c+~d, ConditionalTask(e,f,ConditionalTask(g,t1)))
self.assertEqual(s.copyAndExclude([a,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.ConditionalTask(process.e, process.f, process.g))\n")
self.assertEqual(s.copyAndExclude([a,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.ConditionalTask(process.e, process.f, process.g))\n")
self.assertEqual(s.copyAndExclude([a,e,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.ConditionalTask(process.f, process.g))\n")
self.assertEqual(s.copyAndExclude([a,e,f,g,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d)\n")
self.assertEqual(s.copyAndExclude([a,b,c,d]).dumpPython(),"cms.Sequence(cms.ConditionalTask(process.e, process.f, process.g, process.h))\n")
self.assertEqual(s.copyAndExclude([t1]).dumpPython(),"cms.Sequence(process.a+process.b+process.c+~process.d, cms.ConditionalTask(process.e, process.f, process.g))\n")
taskList = []
taskVisitor = ConditionalTaskVisitor(taskList)
s.visit(taskVisitor)
self.assertEqual(len(taskList),3)
s2 = s.copyAndExclude([g,h])
taskList[:] = []
s2.visit(taskVisitor)
self.assertEqual(len(taskList),1)
t2 = ConditionalTask(t1)
taskList[:] = []
t2.visit(taskVisitor)
self.assertEqual(taskList[0],t1)
s3 = Sequence(s)
self.assertEqual(s3.copyAndExclude([a,h]).dumpPython(),"cms.Sequence(process.b+process.c+~process.d, cms.ConditionalTask(process.e, process.f, process.g))\n")
s4 = Sequence(s)
self.assertEqual(s4.copyAndExclude([a,b,c,d,e,f,g,h]).dumpPython(),"cms.Sequence()\n")
t1 = ConditionalTask(e,f)
t11 = ConditionalTask(a)
t11.setLabel("t11")
t2 = ConditionalTask(g,t1,h,t11)
t3 = t2.copyAndExclude([e,h])
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask(process.f, process.g, process.t11)\n")
t4 = t2.copyAndExclude([e,f,g,h,a])
self.assertEqual(t4.dumpPython(), "cms.ConditionalTask()\n")
def testSequenceTypeChecks(self):
m1 = DummyModule("m1")
m2 = DummyModule("m2")
s1 = Sequence(m1*m2)
def testRaise():
s1.something = 1
self.assertRaises(AttributeError,testRaise)
def testRaise2():
s2 = Sequence(m1*None)
self.assertRaises(TypeError,testRaise2)
def testCopy(self):
a = DummyModule("a")
b = DummyModule("b")
c = DummyModule("c")
p1 = Path(a+b+c)
p2 = p1.copy()
e = DummyModule("e")
p2.replace(b,e)
self.assertEqual(p1.dumpPython(),"cms.Path(process.a+process.b+process.c)\n")
self.assertEqual(p2.dumpPython(),"cms.Path(process.a+process.e+process.c)\n")
p1 = Path(a+b+c)
p2 = p1.copy()
p1 += e
self.assertEqual(p1.dumpPython(),"cms.Path(process.a+process.b+process.c+process.e)\n")
self.assertEqual(p2.dumpPython(),"cms.Path(process.a+process.b+process.c)\n")
t1 = Task(a, b)
t2 = t1.copy()
self.assertTrue(t1.dumpPython() == t2.dumpPython())
t1Contents = list(t1._collection)
t2Contents = list(t2._collection)
self.assertTrue(id(t1Contents[0]) == id(t2Contents[0]))
self.assertTrue(id(t1Contents[1]) == id(t2Contents[1]))
self.assertTrue(id(t1._collection) != id(t2._collection))
t1 = ConditionalTask(a, b)
t2 = t1.copy()
self.assertTrue(t1.dumpPython() == t2.dumpPython())
t1Contents = list(t1._collection)
t2Contents = list(t2._collection)
self.assertTrue(id(t1Contents[0]) == id(t2Contents[0]))
self.assertTrue(id(t1Contents[1]) == id(t2Contents[1]))
self.assertTrue(id(t1._collection) != id(t2._collection))
def testCopyAndAdd(self):
a = DummyModule("a")
b = DummyModule("b")
c = DummyModule("c")
d = DummyModule("d")
e = DummyModule("e")
t1 = Task(a, b, c)
self.assertEqual(t1.dumpPython(), "cms.Task(process.a, process.b, process.c)\n")
t2 = t1.copyAndAdd(d, e)
self.assertEqual(t1.dumpPython(), "cms.Task(process.a, process.b, process.c)\n")
self.assertEqual(t2.dumpPython(), "cms.Task(process.a, process.b, process.c, process.d, process.e)\n")
t3 = t2.copyAndExclude([b])
self.assertEqual(t1.dumpPython(), "cms.Task(process.a, process.b, process.c)\n")
self.assertEqual(t2.dumpPython(), "cms.Task(process.a, process.b, process.c, process.d, process.e)\n")
self.assertEqual(t3.dumpPython(), "cms.Task(process.a, process.c, process.d, process.e)\n")
t4 = t1.copyAndExclude([b]).copyAndAdd(d)
self.assertEqual(t4.dumpPython(), "cms.Task(process.a, process.c, process.d)\n")
t5 = t2.copyAndExclude([b]).copyAndAdd(d)
self.assertEqual(t5.dumpPython(), "cms.Task(process.a, process.c, process.d, process.e)\n")
t6 = t4.copyAndAdd(Task(b))
self.assertEqual(t6.dumpPython(), "cms.Task(process.a, process.b, process.c, process.d)\n")
t1 = ConditionalTask(a, b, c)
self.assertEqual(t1.dumpPython(), "cms.ConditionalTask(process.a, process.b, process.c)\n")
t2 = t1.copyAndAdd(d, e)
self.assertEqual(t1.dumpPython(), "cms.ConditionalTask(process.a, process.b, process.c)\n")
self.assertEqual(t2.dumpPython(), "cms.ConditionalTask(process.a, process.b, process.c, process.d, process.e)\n")
t3 = t2.copyAndExclude([b])
self.assertEqual(t1.dumpPython(), "cms.ConditionalTask(process.a, process.b, process.c)\n")
self.assertEqual(t2.dumpPython(), "cms.ConditionalTask(process.a, process.b, process.c, process.d, process.e)\n")
self.assertEqual(t3.dumpPython(), "cms.ConditionalTask(process.a, process.c, process.d, process.e)\n")
t4 = t1.copyAndExclude([b]).copyAndAdd(d)
self.assertEqual(t4.dumpPython(), "cms.ConditionalTask(process.a, process.c, process.d)\n")
t5 = t2.copyAndExclude([b]).copyAndAdd(d)
self.assertEqual(t5.dumpPython(), "cms.ConditionalTask(process.a, process.c, process.d, process.e)\n")
t6 = t4.copyAndAdd(Task(b))
self.assertEqual(t6.dumpPython(), "cms.ConditionalTask(process.a, process.b, process.c, process.d)\n")
def testInsertInto(self):
from FWCore.ParameterSet.Types import vstring
class TestPSet(object):
def __init__(self):
self._dict = dict()
def addVString(self,isTracked,label,value):
self._dict[label]=value
a = DummyModule("a")
b = DummyModule("b")
c = DummyModule("c")
d = DummyModule("d")
p = Path(a+b+c+d)
decoratedList = []
lister = DecoratedNodeNameVisitor(decoratedList)
p.visit(lister)
ps = TestPSet()
p.insertInto(ps,"p",decoratedList)
self.assertEqual(ps._dict, {"p":vstring("a","b","c","d")})
s = Sequence(b+c)
p = Path(a+s+d)
decoratedList[:] = []
p.visit(lister)
ps = TestPSet()
p.insertInto(ps,"p",decoratedList)
self.assertEqual(ps._dict, {"p":vstring("a","b","c","d")})
unittest.main()
| true | true |
f72d55398fb004b998a56639bb271afa5e8e1c9c | 167 | py | Python | eswar4.py | Eswar12345/training-kit | 294e622eed175144faead24206d4241e25fbda35 | [
"CC-BY-4.0"
] | null | null | null | eswar4.py | Eswar12345/training-kit | 294e622eed175144faead24206d4241e25fbda35 | [
"CC-BY-4.0"
] | null | null | null | eswar4.py | Eswar12345/training-kit | 294e622eed175144faead24206d4241e25fbda35 | [
"CC-BY-4.0"
] | null | null | null | n=int(input("enter the size"))
a=[]
for i in range(n):
b=input("enter")
a.append(b)
for i in range(n):
count=a.count(i)
if(count==1):
print(i)
| 16.7 | 30 | 0.54491 | n=int(input("enter the size"))
a=[]
for i in range(n):
b=input("enter")
a.append(b)
for i in range(n):
count=a.count(i)
if(count==1):
print(i)
| true | true |
f72d562143f7bbb140a16c6d0887b2f083f9e29c | 342 | py | Python | utility.py | sahabi/opt | 79f82bd9e0db77dada9554950b0b95d4e1435ca0 | [
"MIT"
] | 26 | 2018-12-30T20:32:45.000Z | 2022-03-15T06:11:40.000Z | utility.py | sahabi/opt | 79f82bd9e0db77dada9554950b0b95d4e1435ca0 | [
"MIT"
] | 20 | 2018-08-29T10:34:48.000Z | 2022-03-11T23:16:24.000Z | utility.py | sahabi/opt | 79f82bd9e0db77dada9554950b0b95d4e1435ca0 | [
"MIT"
] | 13 | 2019-05-11T01:59:58.000Z | 2022-03-15T14:12:40.000Z | #!/usr/bin/python
# -*- encoding=utf-8 -*-
# author: Ian
# e-mail: stmayue@gmail.com
# description:
def list_to_dict(in_list):
return dict((i, in_list[i]) for i in range(0, len(in_list)))
def exchange_key_value(in_dict):
return dict((in_dict[i], i) for i in in_dict)
def main():
pass
if __name__ == '__main__':
main() | 16.285714 | 64 | 0.643275 |
def list_to_dict(in_list):
return dict((i, in_list[i]) for i in range(0, len(in_list)))
def exchange_key_value(in_dict):
return dict((in_dict[i], i) for i in in_dict)
def main():
pass
if __name__ == '__main__':
main() | true | true |
f72d5641bc821b12e049ec960ca9abbb3155f191 | 18,824 | py | Python | ddf_utils/model/package.py | semio/ddf_utils | e10c4cb6dc7722415a5863579a552cc7b7e3668d | [
"MIT"
] | 2 | 2016-11-23T12:28:15.000Z | 2019-03-04T16:06:25.000Z | ddf_utils/model/package.py | semio/ddf_utils | e10c4cb6dc7722415a5863579a552cc7b7e3668d | [
"MIT"
] | 124 | 2016-07-14T13:39:41.000Z | 2021-12-24T01:45:23.000Z | ddf_utils/model/package.py | semio/ddf_utils | e10c4cb6dc7722415a5863579a552cc7b7e3668d | [
"MIT"
] | 1 | 2016-11-30T23:42:56.000Z | 2016-11-30T23:42:56.000Z | # -*- coding: utf-8 -*-
"""datapackage model"""
import os.path as osp
from typing import List, Tuple, Dict, Union, Callable
import attr
import json
from itertools import product
from collections import OrderedDict
from tqdm import tqdm
import pandas as pd
from .ddf import DDF, Concept, EntityDomain, Entity, DaskDataPoint, Synonym
from .utils import absolute_path
import logging
logger = logging.getLogger(__name__)
@attr.s(auto_attribs=True, repr=False)
class TableSchema:
"""Table Schema Object Class"""
fields: List[dict]
primaryKey: Union[List[str], str]
@classmethod
def from_dict(cls, d: dict):
fields = d['fields']
primaryKey = d['primaryKey']
return cls(fields, primaryKey)
@property
def field_names(self):
return [f['name'] for f in self.fields]
@property
def common_fields(self):
field_names = self.field_names
pkey = self.primaryKey
if isinstance(pkey, str):
common_fields = list(filter(lambda x: x != pkey, field_names))
else:
common_fields = list(filter(lambda x: x not in pkey, field_names))
return common_fields
def __repr__(self):
return "TableSchema(primaryKey: {}, fields: {})".format(self.primaryKey, self.common_fields)
@attr.s(auto_attribs=True)
class Resource:
name: str
path: str
schema: TableSchema
@classmethod
def from_dict(cls, d: dict):
path = d['path']
name = d['name']
schema = TableSchema.from_dict(d['schema'])
return cls(name, path, schema)
def to_dict(self):
res = vars(self).copy()
if 'schema' in res:
res['schema'] = vars(res['schema']).copy()
return res
@attr.s(auto_attribs=True)
class DDFSchema:
primaryKey: List[str]
value: str
resources: List[str] # a list of resource names
@classmethod
def from_dict(cls, d: dict):
primaryKey = d['primaryKey']
value = d['value']
resources = d['resources']
return cls(primaryKey=primaryKey, value=value, resources=resources)
@attr.s(auto_attribs=True, repr=False)
class DataPackage:
base_path: str
resources: List[Resource]
props: dict = attr.ib(factory=dict)
def __attrs_post_init__(self):
self.base_path = absolute_path(self.base_path)
def __repr__(self):
return f"DataPackage({self.base_path})"
@classmethod
def from_dict(cls, d_: dict, base_path='./'):
d = d_.copy()
resources = list(map(Resource.from_dict, d.pop('resources')))
return cls(base_path=base_path, resources=resources, props=d)
@classmethod
def from_json(cls, json_path):
json_path = absolute_path(json_path)
base_path = osp.dirname(json_path)
d = json.load(open(json_path))
return cls.from_dict(d, base_path)
@classmethod
def from_path(cls, path):
path = absolute_path(path)
json_path = osp.join(path, 'datapackage.json')
return cls.from_json(json_path)
def to_dict(self):
"""dump the datapackage to disk"""
raise NotImplementedError
@attr.s(repr=False)
class DDFcsv(DataPackage):
"""DDFCSV datapackage."""
ddfSchema: Dict[str, List[DDFSchema]] = attr.ib(factory=dict)
ddf: DDF = attr.ib(init=False)
concepts_resources: List[Resource] = attr.ib(init=False)
entities_resources: List[Resource] = attr.ib(init=False)
datapoints_resources: List[Resource] = attr.ib(init=False)
synonyms_resources: List[Resource] = attr.ib(init=False)
# config for read_csv
_default_reader_options = {'keep_default_na': False, 'na_values': ['']}
_default_dask_reader_options = {'keep_default_na': False,
'na_values': [''],
'sample_rows': 1000000}
def __attrs_post_init__(self):
super(DDFcsv, self).__attrs_post_init__()
conc = list()
ent = list()
dp = list()
syn = list()
for r in self.resources:
pkey = r.schema.primaryKey
if isinstance(pkey, str):
if pkey == 'concept':
conc.append(r)
else:
ent.append(r)
else: # TODO: datapoints key might be one column, not list of columns?
if 'synonym' in pkey:
syn.append(r)
else:
dp.append(r)
self.concepts_resources = conc
self.entities_resources = ent
self.datapoints_resources = dp
self.synonyms_resources = syn
self.ddf = self.load_ddf()
@classmethod
def from_dict(cls, d_: dict, base_path='./'):
d = d_.copy()
resources = list(map(Resource.from_dict, d.pop('resources')))
if 'ddfSchema' in d.keys():
ddf_schema_ = d.pop('ddfSchema')
ddf_schema = dict()
for k, v in ddf_schema_.items():
ddf_schema[k] = [DDFSchema.from_dict(d) for d in v]
else:
ddf_schema = {}
return cls(base_path=base_path, resources=resources, ddfSchema=ddf_schema, props=d)
def to_dict(self):
res = OrderedDict(self.props.copy())
res['resources'] = [r.to_dict() for r in self.resources]
if self.ddfSchema:
res['ddfSchema'] = dict()
for k, v in self.ddfSchema.items():
res['ddfSchema'][k] = [vars(sch).copy() for sch in v]
return res
def _gen_concepts(self):
concepts_paths = [osp.join(self.base_path, r.path) for r in self.concepts_resources]
for p in concepts_paths:
df = pd.read_csv(p, index_col='concept', dtype=str, **self._default_reader_options)
for concept, row in df.iterrows():
concept_type = row['concept_type']
props = row.drop('concept_type').to_dict()
yield (concept, Concept(id=concept, concept_type=concept_type, props=props))
def _gen_entities(self, concepts: Dict[str, Concept]):
for r in self.entities_resources:
pkey = r.schema.primaryKey
if concepts[pkey].concept_type == 'entity_domain':
domain = concepts[pkey].id
else:
domain = concepts[pkey].props['domain']
df = pd.read_csv(osp.join(self.base_path, r.path), dtype=str, # TODO: is it okay to use str for all?
**self._default_reader_options)
df = df.set_index(pkey)
is_cols = list(filter(lambda x: x.startswith('is--'), df.columns.values))
for ent, row in df.iterrows():
sets = list()
for c in is_cols:
if row[c] == 'TRUE' and c[4:] != domain:
sets.append(c[4:]) # strip the 'is--' part, only keep set name
yield (domain, Entity(id=ent, domain=domain, sets=sets, props=row.drop(is_cols).to_dict()))
def _gen_datapoints(self):
for r in self.datapoints_resources:
fields = r.schema.common_fields
pkey = r.schema.primaryKey
for f in fields:
yield (f, pkey, osp.join(self.base_path, r.path))
def _gen_synonyms(self):
for r in self.synonyms_resources:
# there should be only two columns
pkey = r.schema.primaryKey
if pkey[0] == 'synonym':
concept = pkey[1]
else:
concept = pkey[0]
df = pd.read_csv(osp.join(self.base_path, r.path), **self._default_reader_options)
sym = Synonym(concept_id=concept, synonyms=df.set_index('synonym')[concept].to_dict())
yield (concept, sym)
@staticmethod
def entity_domain_to_categorical(domain: EntityDomain):
entities = [e.id for e in domain.entities]
return pd.api.types.CategoricalDtype(entities)
@staticmethod
def entity_set_to_categorical(domain: EntityDomain, s: str):
entity_set = domain.get_entity_set(s)
entities = [e.id for e in entity_set]
return pd.api.types.CategoricalDtype(entities)
def load_ddf(self):
"""-> DDF"""
# load concepts
concepts = dict(self._gen_concepts())
# load entities
entities = list(self._gen_entities(concepts))
domains = dict()
domains_tmp = dict()
for domain, entity in entities:
if domain not in domains_tmp.keys():
domains_tmp[domain] = list()
domains_tmp[domain].append(entity)
for domain, entities_ in domains_tmp.items():
# TODO: maybe get properties from concepts table
# Allow duplicated entity because they may be defined in multiple resources
# i.e. multiple entity sets in separated files.
domains[domain] = EntityDomain.from_entity_list(domain_id=domain, entities=entities_, allow_duplicated=True)
# load datapoints. Here we will use Dask for all
# 1. create categories for entity domains
dtypes = dict()
# parse_dates = list()
concept_types = dict()
for domain_name, domain in domains.items():
dtypes[domain_name] = self.entity_domain_to_categorical(domain)
for eset in domain.entity_sets:
dtypes[eset] = self.entity_set_to_categorical(domain, eset)
# 2. get all concept types, update dtypes for time concepts
for c_id, c in concepts.items():
concept_types[c_id] = c.concept_type
if c.concept_type == 'time':
dtypes[c_id] = 'str'
# 3. group files for same indicator together
indicators = dict()
for field, pkey, path in self._gen_datapoints():
# import ipdb; ipdb.set_trace()
indicator = field
pkey = tuple(sorted(pkey))
if indicator not in indicators:
indicators.setdefault(indicator, dict([(pkey, [path])]))
else:
if pkey not in indicators[indicator]:
indicators[indicator][pkey] = [path]
else:
indicators[indicator][pkey].append(path)
datapoints = dict()
for i, v in indicators.items():
datapoints[i] = dict()
# dtypes_ = dtypes.copy()
# dtypes_[i] = 'float' # TODO: supporting string/float datatypes, not just float
read_csv_options = self._default_dask_reader_options.copy()
read_csv_options.update(dict(dtype=dtypes))
for k, paths in v.items():
dp = DaskDataPoint(id=i, dimensions=k, path=paths, concept_types=concept_types,
read_csv_options=read_csv_options)
datapoints[i][k] = dp
# load synonyms
synonyms = dict(self._gen_synonyms())
# return complete DDF object
return DDF(concepts=concepts, entities=domains, datapoints=datapoints, synonyms=synonyms, props=self.props)
def generate_ddf_schema(self, progress_bar=False):
"""generate ddf schema from all resources.
Parameters
----------
progress_bar : bool
whether progress bar should be shown when generating ddfSchema.
"""
hash_table = {}
ddf_schema = {'concepts': [], 'entities': [], 'datapoints': [], 'synonyms': []}
entity_value_cache = dict()
dtypes = dict()
# check if we need progress bar
if progress_bar:
if logger.getEffectiveLevel() == 10: # debug: force not showing progress bar
logger.warning("progress bar will be disabled in debugging mode.")
progress_bar = False
# generate set-membership details for every single entity in dataset
# also create dtypes for later use
for domain_id, domain in self.ddf.entities.items():
dtypes[domain_id] = self.entity_domain_to_categorical(domain)
for s in self.ddf.entities[domain_id].entity_sets:
dtypes[s] = self.entity_set_to_categorical(domain, s)
entity_value_cache[domain_id] = dict()
for ent in domain.entities:
sets = set()
sets.add(domain_id)
for s in ent.sets:
sets.add(s)
entity_value_cache[domain_id][ent.id] = tuple(sets)
def _which_sets(entity_, domain_):
try:
return entity_value_cache[domain_][entity_]
except KeyError:
logger.debug('entity {} is not in {} domain!'.format(entity_, domain_))
raise
def _gen_key_value_object(resource: Resource):
logger.debug('working on: {}'.format(resource.path))
if isinstance(resource.schema.primaryKey, str):
pkeys = [resource.schema.primaryKey]
else:
pkeys = resource.schema.primaryKey
entity_cols = [x for x in pkeys
if x in self.ddf.concepts
and self.ddf.concepts[x].concept_type in ['entity_domain', 'entity_set']]
value_cols = resource.schema.common_fields
data = pd.read_csv(osp.join(self.base_path, resource.path), dtype=dtypes,
**self._default_reader_options)
# check if entity columns data match entity defined in entity files
for c in entity_cols:
if data[c].hasnans:
data_ = pd.read_csv(osp.join(self.base_path, resource.path), dtype={c: str}, **self._default_reader_options)
ents = dtypes[c].categories.values
ents_ = data_[c].unique()
diff = set(ents_) - set(ents)
logger.critical("in file {}:".format(resource.path))
logger.critical("{} column contains entity which does not belong to {} domain/set: {}".format(c, c, list(diff)))
raise ValueError("entity mismatch")
# for resources that have entity_columns: only consider all permutations on entity columns
if len(entity_cols) > 0:
data = data[entity_cols].drop_duplicates()
pkeys_prop = dict()
for c in pkeys:
if c == 'cocnept':
pkeys_prop[c] = {'type': 'concept'}
elif c not in self.ddf.concepts:
pkeys_prop[c] = {'type': 'non_concept'}
else:
concept = self.ddf.concepts[c]
if concept.concept_type == 'entity_set':
pkeys_prop[c] = {'type': 'entity_set',
'domain': concept.props['domain']}
elif concept.concept_type == 'entity_domain':
pkeys_prop[c] = {'type': 'entity_domain'}
else:
pkeys_prop[c] = {'type': 'others'}
all_permutations = set()
for _, r in data.iterrows():
perm = list()
for c in pkeys:
if pkeys_prop[c]['type'] == 'entity_set':
domain = pkeys_prop[c]['domain']
perm.append(_which_sets(r[c], domain))
elif pkeys_prop[c]['type'] == 'entity_domain':
perm.append(_which_sets(r[c], c))
else:
perm.append(tuple([c]))
all_permutations.add(tuple(perm))
# if data is empty. Just emit an object with primarykey and null value
if len(all_permutations) == 0:
obj = {'primaryKey': pkeys, 'value': None, 'resource': resource.name}
logger.debug('yielding: {}'.format(str(obj)))
yield obj
for row in all_permutations:
for perm in product(*row):
if len(value_cols) > 0:
for c in value_cols:
obj = {'primaryKey': list(perm), 'value': c, 'resource': resource.name}
logger.debug('yielding: {}'.format(str(obj)))
yield obj
else:
obj = {'primaryKey': list(perm), 'value': None, 'resource': resource.name}
logger.debug('yielding: {}'.format(str(obj)))
yield obj
def _add_to_schema(resource_schema):
"""handle objects generated by ``_gen_key_value_object``"""
key = '-'.join(sorted(resource_schema['primaryKey']))
if not pd.isnull(resource_schema['value']):
hash_val = key + '--' + resource_schema['value']
else:
hash_val = key + '--' + 'nan'
if hash_val not in hash_table.keys():
hash_table[hash_val] = {
'primaryKey': sorted(resource_schema['primaryKey']),
'value': resource_schema['value'],
'resources': {resource_schema['resource']}
}
else:
hash_table[hash_val]['resources'].add(resource_schema['resource'])
# make progressbar and run the process to generate schema
if progress_bar:
pbar = tqdm(total=len(self.resources))
for g in map(_gen_key_value_object, self.resources):
if progress_bar:
pbar.update(1)
for kvo in g:
logging.debug("adding kvo {}".format(str(kvo)))
_add_to_schema(kvo)
if progress_bar:
pbar.close()
for sch in hash_table.values():
sch['resources'] = list(sch['resources']) # convert set to list
sch_object = DDFSchema.from_dict(sch)
if len(sch['primaryKey']) == 1:
if sch['primaryKey'][0] == 'concept':
ddf_schema['concepts'].append(sch_object)
else:
ddf_schema['entities'].append(sch_object)
else:
if 'synonym' in sch['primaryKey']:
ddf_schema['synonyms'].append(sch_object)
else:
ddf_schema['datapoints'].append(sch_object)
return ddf_schema
def get_ddf_schema(self, update=False):
if not update and self.ddfSchema is not None:
return self.ddfSchema
elif not update and self.ddfSchema is None:
raise ValueError('No ddfSchema, please use update=True to generate one')
else:
self.ddfSchema = self.generate_ddf_schema()
return self.ddfSchema
| 39.216667 | 132 | 0.565555 |
import os.path as osp
from typing import List, Tuple, Dict, Union, Callable
import attr
import json
from itertools import product
from collections import OrderedDict
from tqdm import tqdm
import pandas as pd
from .ddf import DDF, Concept, EntityDomain, Entity, DaskDataPoint, Synonym
from .utils import absolute_path
import logging
logger = logging.getLogger(__name__)
@attr.s(auto_attribs=True, repr=False)
class TableSchema:
fields: List[dict]
primaryKey: Union[List[str], str]
@classmethod
def from_dict(cls, d: dict):
fields = d['fields']
primaryKey = d['primaryKey']
return cls(fields, primaryKey)
@property
def field_names(self):
return [f['name'] for f in self.fields]
@property
def common_fields(self):
field_names = self.field_names
pkey = self.primaryKey
if isinstance(pkey, str):
common_fields = list(filter(lambda x: x != pkey, field_names))
else:
common_fields = list(filter(lambda x: x not in pkey, field_names))
return common_fields
def __repr__(self):
return "TableSchema(primaryKey: {}, fields: {})".format(self.primaryKey, self.common_fields)
@attr.s(auto_attribs=True)
class Resource:
name: str
path: str
schema: TableSchema
@classmethod
def from_dict(cls, d: dict):
path = d['path']
name = d['name']
schema = TableSchema.from_dict(d['schema'])
return cls(name, path, schema)
def to_dict(self):
res = vars(self).copy()
if 'schema' in res:
res['schema'] = vars(res['schema']).copy()
return res
@attr.s(auto_attribs=True)
class DDFSchema:
primaryKey: List[str]
value: str
resources: List[str]
@classmethod
def from_dict(cls, d: dict):
primaryKey = d['primaryKey']
value = d['value']
resources = d['resources']
return cls(primaryKey=primaryKey, value=value, resources=resources)
@attr.s(auto_attribs=True, repr=False)
class DataPackage:
base_path: str
resources: List[Resource]
props: dict = attr.ib(factory=dict)
def __attrs_post_init__(self):
self.base_path = absolute_path(self.base_path)
def __repr__(self):
return f"DataPackage({self.base_path})"
@classmethod
def from_dict(cls, d_: dict, base_path='./'):
d = d_.copy()
resources = list(map(Resource.from_dict, d.pop('resources')))
return cls(base_path=base_path, resources=resources, props=d)
@classmethod
def from_json(cls, json_path):
json_path = absolute_path(json_path)
base_path = osp.dirname(json_path)
d = json.load(open(json_path))
return cls.from_dict(d, base_path)
@classmethod
def from_path(cls, path):
path = absolute_path(path)
json_path = osp.join(path, 'datapackage.json')
return cls.from_json(json_path)
def to_dict(self):
raise NotImplementedError
@attr.s(repr=False)
class DDFcsv(DataPackage):
ddfSchema: Dict[str, List[DDFSchema]] = attr.ib(factory=dict)
ddf: DDF = attr.ib(init=False)
concepts_resources: List[Resource] = attr.ib(init=False)
entities_resources: List[Resource] = attr.ib(init=False)
datapoints_resources: List[Resource] = attr.ib(init=False)
synonyms_resources: List[Resource] = attr.ib(init=False)
_default_reader_options = {'keep_default_na': False, 'na_values': ['']}
_default_dask_reader_options = {'keep_default_na': False,
'na_values': [''],
'sample_rows': 1000000}
def __attrs_post_init__(self):
super(DDFcsv, self).__attrs_post_init__()
conc = list()
ent = list()
dp = list()
syn = list()
for r in self.resources:
pkey = r.schema.primaryKey
if isinstance(pkey, str):
if pkey == 'concept':
conc.append(r)
else:
ent.append(r)
else:
if 'synonym' in pkey:
syn.append(r)
else:
dp.append(r)
self.concepts_resources = conc
self.entities_resources = ent
self.datapoints_resources = dp
self.synonyms_resources = syn
self.ddf = self.load_ddf()
@classmethod
def from_dict(cls, d_: dict, base_path='./'):
d = d_.copy()
resources = list(map(Resource.from_dict, d.pop('resources')))
if 'ddfSchema' in d.keys():
ddf_schema_ = d.pop('ddfSchema')
ddf_schema = dict()
for k, v in ddf_schema_.items():
ddf_schema[k] = [DDFSchema.from_dict(d) for d in v]
else:
ddf_schema = {}
return cls(base_path=base_path, resources=resources, ddfSchema=ddf_schema, props=d)
def to_dict(self):
res = OrderedDict(self.props.copy())
res['resources'] = [r.to_dict() for r in self.resources]
if self.ddfSchema:
res['ddfSchema'] = dict()
for k, v in self.ddfSchema.items():
res['ddfSchema'][k] = [vars(sch).copy() for sch in v]
return res
def _gen_concepts(self):
concepts_paths = [osp.join(self.base_path, r.path) for r in self.concepts_resources]
for p in concepts_paths:
df = pd.read_csv(p, index_col='concept', dtype=str, **self._default_reader_options)
for concept, row in df.iterrows():
concept_type = row['concept_type']
props = row.drop('concept_type').to_dict()
yield (concept, Concept(id=concept, concept_type=concept_type, props=props))
def _gen_entities(self, concepts: Dict[str, Concept]):
for r in self.entities_resources:
pkey = r.schema.primaryKey
if concepts[pkey].concept_type == 'entity_domain':
domain = concepts[pkey].id
else:
domain = concepts[pkey].props['domain']
df = pd.read_csv(osp.join(self.base_path, r.path), dtype=str,
**self._default_reader_options)
df = df.set_index(pkey)
is_cols = list(filter(lambda x: x.startswith('is--'), df.columns.values))
for ent, row in df.iterrows():
sets = list()
for c in is_cols:
if row[c] == 'TRUE' and c[4:] != domain:
sets.append(c[4:])
yield (domain, Entity(id=ent, domain=domain, sets=sets, props=row.drop(is_cols).to_dict()))
def _gen_datapoints(self):
for r in self.datapoints_resources:
fields = r.schema.common_fields
pkey = r.schema.primaryKey
for f in fields:
yield (f, pkey, osp.join(self.base_path, r.path))
def _gen_synonyms(self):
for r in self.synonyms_resources:
pkey = r.schema.primaryKey
if pkey[0] == 'synonym':
concept = pkey[1]
else:
concept = pkey[0]
df = pd.read_csv(osp.join(self.base_path, r.path), **self._default_reader_options)
sym = Synonym(concept_id=concept, synonyms=df.set_index('synonym')[concept].to_dict())
yield (concept, sym)
@staticmethod
def entity_domain_to_categorical(domain: EntityDomain):
entities = [e.id for e in domain.entities]
return pd.api.types.CategoricalDtype(entities)
@staticmethod
def entity_set_to_categorical(domain: EntityDomain, s: str):
entity_set = domain.get_entity_set(s)
entities = [e.id for e in entity_set]
return pd.api.types.CategoricalDtype(entities)
def load_ddf(self):
concepts = dict(self._gen_concepts())
entities = list(self._gen_entities(concepts))
domains = dict()
domains_tmp = dict()
for domain, entity in entities:
if domain not in domains_tmp.keys():
domains_tmp[domain] = list()
domains_tmp[domain].append(entity)
for domain, entities_ in domains_tmp.items():
domains[domain] = EntityDomain.from_entity_list(domain_id=domain, entities=entities_, allow_duplicated=True)
dtypes = dict()
concept_types = dict()
for domain_name, domain in domains.items():
dtypes[domain_name] = self.entity_domain_to_categorical(domain)
for eset in domain.entity_sets:
dtypes[eset] = self.entity_set_to_categorical(domain, eset)
for c_id, c in concepts.items():
concept_types[c_id] = c.concept_type
if c.concept_type == 'time':
dtypes[c_id] = 'str'
indicators = dict()
for field, pkey, path in self._gen_datapoints():
indicator = field
pkey = tuple(sorted(pkey))
if indicator not in indicators:
indicators.setdefault(indicator, dict([(pkey, [path])]))
else:
if pkey not in indicators[indicator]:
indicators[indicator][pkey] = [path]
else:
indicators[indicator][pkey].append(path)
datapoints = dict()
for i, v in indicators.items():
datapoints[i] = dict()
_options.copy()
read_csv_options.update(dict(dtype=dtypes))
for k, paths in v.items():
dp = DaskDataPoint(id=i, dimensions=k, path=paths, concept_types=concept_types,
read_csv_options=read_csv_options)
datapoints[i][k] = dp
synonyms = dict(self._gen_synonyms())
return DDF(concepts=concepts, entities=domains, datapoints=datapoints, synonyms=synonyms, props=self.props)
def generate_ddf_schema(self, progress_bar=False):
hash_table = {}
ddf_schema = {'concepts': [], 'entities': [], 'datapoints': [], 'synonyms': []}
entity_value_cache = dict()
dtypes = dict()
if progress_bar:
if logger.getEffectiveLevel() == 10:
logger.warning("progress bar will be disabled in debugging mode.")
progress_bar = False
for domain_id, domain in self.ddf.entities.items():
dtypes[domain_id] = self.entity_domain_to_categorical(domain)
for s in self.ddf.entities[domain_id].entity_sets:
dtypes[s] = self.entity_set_to_categorical(domain, s)
entity_value_cache[domain_id] = dict()
for ent in domain.entities:
sets = set()
sets.add(domain_id)
for s in ent.sets:
sets.add(s)
entity_value_cache[domain_id][ent.id] = tuple(sets)
def _which_sets(entity_, domain_):
try:
return entity_value_cache[domain_][entity_]
except KeyError:
logger.debug('entity {} is not in {} domain!'.format(entity_, domain_))
raise
def _gen_key_value_object(resource: Resource):
logger.debug('working on: {}'.format(resource.path))
if isinstance(resource.schema.primaryKey, str):
pkeys = [resource.schema.primaryKey]
else:
pkeys = resource.schema.primaryKey
entity_cols = [x for x in pkeys
if x in self.ddf.concepts
and self.ddf.concepts[x].concept_type in ['entity_domain', 'entity_set']]
value_cols = resource.schema.common_fields
data = pd.read_csv(osp.join(self.base_path, resource.path), dtype=dtypes,
**self._default_reader_options)
for c in entity_cols:
if data[c].hasnans:
data_ = pd.read_csv(osp.join(self.base_path, resource.path), dtype={c: str}, **self._default_reader_options)
ents = dtypes[c].categories.values
ents_ = data_[c].unique()
diff = set(ents_) - set(ents)
logger.critical("in file {}:".format(resource.path))
logger.critical("{} column contains entity which does not belong to {} domain/set: {}".format(c, c, list(diff)))
raise ValueError("entity mismatch")
if len(entity_cols) > 0:
data = data[entity_cols].drop_duplicates()
pkeys_prop = dict()
for c in pkeys:
if c == 'cocnept':
pkeys_prop[c] = {'type': 'concept'}
elif c not in self.ddf.concepts:
pkeys_prop[c] = {'type': 'non_concept'}
else:
concept = self.ddf.concepts[c]
if concept.concept_type == 'entity_set':
pkeys_prop[c] = {'type': 'entity_set',
'domain': concept.props['domain']}
elif concept.concept_type == 'entity_domain':
pkeys_prop[c] = {'type': 'entity_domain'}
else:
pkeys_prop[c] = {'type': 'others'}
all_permutations = set()
for _, r in data.iterrows():
perm = list()
for c in pkeys:
if pkeys_prop[c]['type'] == 'entity_set':
domain = pkeys_prop[c]['domain']
perm.append(_which_sets(r[c], domain))
elif pkeys_prop[c]['type'] == 'entity_domain':
perm.append(_which_sets(r[c], c))
else:
perm.append(tuple([c]))
all_permutations.add(tuple(perm))
if len(all_permutations) == 0:
obj = {'primaryKey': pkeys, 'value': None, 'resource': resource.name}
logger.debug('yielding: {}'.format(str(obj)))
yield obj
for row in all_permutations:
for perm in product(*row):
if len(value_cols) > 0:
for c in value_cols:
obj = {'primaryKey': list(perm), 'value': c, 'resource': resource.name}
logger.debug('yielding: {}'.format(str(obj)))
yield obj
else:
obj = {'primaryKey': list(perm), 'value': None, 'resource': resource.name}
logger.debug('yielding: {}'.format(str(obj)))
yield obj
def _add_to_schema(resource_schema):
key = '-'.join(sorted(resource_schema['primaryKey']))
if not pd.isnull(resource_schema['value']):
hash_val = key + '--' + resource_schema['value']
else:
hash_val = key + '--' + 'nan'
if hash_val not in hash_table.keys():
hash_table[hash_val] = {
'primaryKey': sorted(resource_schema['primaryKey']),
'value': resource_schema['value'],
'resources': {resource_schema['resource']}
}
else:
hash_table[hash_val]['resources'].add(resource_schema['resource'])
if progress_bar:
pbar = tqdm(total=len(self.resources))
for g in map(_gen_key_value_object, self.resources):
if progress_bar:
pbar.update(1)
for kvo in g:
logging.debug("adding kvo {}".format(str(kvo)))
_add_to_schema(kvo)
if progress_bar:
pbar.close()
for sch in hash_table.values():
sch['resources'] = list(sch['resources'])
sch_object = DDFSchema.from_dict(sch)
if len(sch['primaryKey']) == 1:
if sch['primaryKey'][0] == 'concept':
ddf_schema['concepts'].append(sch_object)
else:
ddf_schema['entities'].append(sch_object)
else:
if 'synonym' in sch['primaryKey']:
ddf_schema['synonyms'].append(sch_object)
else:
ddf_schema['datapoints'].append(sch_object)
return ddf_schema
def get_ddf_schema(self, update=False):
if not update and self.ddfSchema is not None:
return self.ddfSchema
elif not update and self.ddfSchema is None:
raise ValueError('No ddfSchema, please use update=True to generate one')
else:
self.ddfSchema = self.generate_ddf_schema()
return self.ddfSchema
| true | true |
f72d5748bd487ceab701dc4cc272207445379be2 | 2,211 | py | Python | app/views/main.py | ybqdren/BygjRace-DataShowChart | eb903aaf0dd8682e1c12eb182e9f0b7f4eef2daf | [
"Apache-2.0"
] | null | null | null | app/views/main.py | ybqdren/BygjRace-DataShowChart | eb903aaf0dd8682e1c12eb182e9f0b7f4eef2daf | [
"Apache-2.0"
] | null | null | null | app/views/main.py | ybqdren/BygjRace-DataShowChart | eb903aaf0dd8682e1c12eb182e9f0b7f4eef2daf | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
#@Time : 2020/9/20 14:48
#@Author: 赵雯
#@File : main.py
from flask import Blueprint
from flask import render_template
from app.models import Tbl_Video_Game_Sales
from app import create_app
from app.extensions import db
# 创建蓝图
main_print = Blueprint('main_print',__name__)
# 图像切换
@main_print.route('/charts/changeView')
def change_View():
game_sale = Tbl_Video_Game_Sales.query.order_by(Tbl_Video_Game_Sales.Global_Sales.desc()).first()
return render_template('/main/changeView-chart.html',game_sale = game_sale)
# 字符云
@main_print.route('/charts/wordCount')
def word_Count():
db.init_app(create_app('app'))
game_name = list(set(db.session.query(Tbl_Video_Game_Sales.Platform).order_by(Tbl_Video_Game_Sales.Global_Sales.desc()).limit(100)))
return render_template('/main/wordCount-chart.html',game_name = game_name)
# 玫瑰饼图
@main_print.route('/charts/rosePie')
def charts_rosePie():
game = Tbl_Video_Game_Sales.query.filter(Tbl_Video_Game_Sales.Platform == 'PS3').order_by(Tbl_Video_Game_Sales.Global_Sales.desc()).all()[:10]
return render_template('/main/rosePie-chart.html',game = game)
# 饼图
@main_print.route('/charts/pie')
def charts_pie():
game = Tbl_Video_Game_Sales.query.filter(Tbl_Video_Game_Sales.Platform == 'PS3').order_by(Tbl_Video_Game_Sales.Global_Sales.desc()).all()[:10]
return render_template('/main/pie-chart.html',game = game)
# 雷达图
@main_print.route('/charts/radar')
def charts_radar():
game = Tbl_Video_Game_Sales.query.filter(Tbl_Video_Game_Sales.Platform == 'Wii').all()[:3]
return render_template('/main/radar-chart.html',game = game)
# 折线图
@main_print.route('/charts/line')
def charts_line():
game = dict()
for y in range(1999,2011,1): #2000~2010
game[y] = Tbl_Video_Game_Sales.query.filter(Tbl_Video_Game_Sales.Year == y).count()
return render_template('/main/line-chart.html',game = game)
# 柱状图
@main_print.route('/charts/bar')
def charts_bar():
game_sale = Tbl_Video_Game_Sales.query.first()
if( game_sale != None):
return render_template('/main/bar-chart.html',game_sale = game_sale)
@main_print.route('/')
def index():
return render_template('/main/index.html') | 34.015385 | 146 | 0.736318 |
from flask import Blueprint
from flask import render_template
from app.models import Tbl_Video_Game_Sales
from app import create_app
from app.extensions import db
main_print = Blueprint('main_print',__name__)
@main_print.route('/charts/changeView')
def change_View():
game_sale = Tbl_Video_Game_Sales.query.order_by(Tbl_Video_Game_Sales.Global_Sales.desc()).first()
return render_template('/main/changeView-chart.html',game_sale = game_sale)
@main_print.route('/charts/wordCount')
def word_Count():
db.init_app(create_app('app'))
game_name = list(set(db.session.query(Tbl_Video_Game_Sales.Platform).order_by(Tbl_Video_Game_Sales.Global_Sales.desc()).limit(100)))
return render_template('/main/wordCount-chart.html',game_name = game_name)
@main_print.route('/charts/rosePie')
def charts_rosePie():
game = Tbl_Video_Game_Sales.query.filter(Tbl_Video_Game_Sales.Platform == 'PS3').order_by(Tbl_Video_Game_Sales.Global_Sales.desc()).all()[:10]
return render_template('/main/rosePie-chart.html',game = game)
@main_print.route('/charts/pie')
def charts_pie():
game = Tbl_Video_Game_Sales.query.filter(Tbl_Video_Game_Sales.Platform == 'PS3').order_by(Tbl_Video_Game_Sales.Global_Sales.desc()).all()[:10]
return render_template('/main/pie-chart.html',game = game)
@main_print.route('/charts/radar')
def charts_radar():
game = Tbl_Video_Game_Sales.query.filter(Tbl_Video_Game_Sales.Platform == 'Wii').all()[:3]
return render_template('/main/radar-chart.html',game = game)
@main_print.route('/charts/line')
def charts_line():
game = dict()
for y in range(1999,2011,1):
game[y] = Tbl_Video_Game_Sales.query.filter(Tbl_Video_Game_Sales.Year == y).count()
return render_template('/main/line-chart.html',game = game)
@main_print.route('/charts/bar')
def charts_bar():
game_sale = Tbl_Video_Game_Sales.query.first()
if( game_sale != None):
return render_template('/main/bar-chart.html',game_sale = game_sale)
@main_print.route('/')
def index():
return render_template('/main/index.html') | true | true |
f72d577bcfe3d24380f86ed18bbd6aa06bd74d8b | 457 | py | Python | app/migrations/0011_signature_status.py | leonolan2020/phoenix | b5956a7003e548f01255cbd5d0d76cfd0ac77a81 | [
"MIT"
] | 1 | 2020-09-19T21:56:40.000Z | 2020-09-19T21:56:40.000Z | app/migrations/0011_signature_status.py | leonolan2020/phoenix | b5956a7003e548f01255cbd5d0d76cfd0ac77a81 | [
"MIT"
] | null | null | null | app/migrations/0011_signature_status.py | leonolan2020/phoenix | b5956a7003e548f01255cbd5d0d76cfd0ac77a81 | [
"MIT"
] | 5 | 2020-09-18T18:53:03.000Z | 2020-10-21T14:42:00.000Z | # Generated by Django 3.1 on 2020-09-25 16:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0010_auto_20200923_0246'),
]
operations = [
migrations.AddField(
model_name='signature',
name='status',
field=models.CharField(default='sdsd', max_length=200, verbose_name='status'),
preserve_default=False,
),
]
| 22.85 | 90 | 0.608315 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0010_auto_20200923_0246'),
]
operations = [
migrations.AddField(
model_name='signature',
name='status',
field=models.CharField(default='sdsd', max_length=200, verbose_name='status'),
preserve_default=False,
),
]
| true | true |
f72d57b4c59f86945e4272585aaa430b4ea5075e | 1,343 | py | Python | pymata-aio/blink.py | hevangel/arduino_examples | 06c717ff87eab1b0fb0a7f17bf3a1e824fe59b6a | [
"MIT"
] | null | null | null | pymata-aio/blink.py | hevangel/arduino_examples | 06c717ff87eab1b0fb0a7f17bf3a1e824fe59b6a | [
"MIT"
] | null | null | null | pymata-aio/blink.py | hevangel/arduino_examples | 06c717ff87eab1b0fb0a7f17bf3a1e824fe59b6a | [
"MIT"
] | null | null | null | #!/usr/bin/python
"""
Turns on an LED on for one second, then off for one second, repeatedly.
Most Arduinos have an on-board LED you can control. On the Uno and
Leonardo, it is attached to digital pin 13. If you're unsure what
pin the on-board LED is connected to on your Arduino model, check
the documentation at http://www.arduino.cc
"""
from pymata_aio.pymata3 import PyMata3
from pymata_aio.constants import Constants
# Arduino LED is on pin 13
BOARD_LED = 13
# If you are having problems connecting, you may
# wish to add some time the arduino_wait parameter.
# replace:
# board = PyMata3()
# with:
# board = PyMata3(arduino_wait=5)
# adjust the arduino_wait value to meet the needs
# of your computer
# instantiate PyMata3
board = PyMata3()
def setup():
"""
Set the Arduino BOARD_LED pin as an output
:return:
"""
board.set_pin_mode(BOARD_LED, Constants.OUTPUT)
def loop():
"""
Toggle the LED by alternating the values written
to the LED pin. Wait 1 second between writes.
Also note the use of board.sleep and not
time.sleep.
:return:
"""
print("LED On")
board.digital_write(BOARD_LED, 1)
board.sleep(1.0)
print("LED Off")
board.digital_write(BOARD_LED, 0)
board.sleep(1.0)
if __name__ == "__main__":
setup()
while True:
loop() | 23.155172 | 73 | 0.689501 |
from pymata_aio.pymata3 import PyMata3
from pymata_aio.constants import Constants
BOARD_LED = 13
board = PyMata3()
def setup():
board.set_pin_mode(BOARD_LED, Constants.OUTPUT)
def loop():
print("LED On")
board.digital_write(BOARD_LED, 1)
board.sleep(1.0)
print("LED Off")
board.digital_write(BOARD_LED, 0)
board.sleep(1.0)
if __name__ == "__main__":
setup()
while True:
loop() | true | true |
f72d57dd8a2accb925b58023a61affc55a11c045 | 946 | py | Python | insta/forms.py | WaMungai/InstaClone | 68279dfbf93801c1b5355b91c9e03e3b469cd6d0 | [
"Unlicense"
] | null | null | null | insta/forms.py | WaMungai/InstaClone | 68279dfbf93801c1b5355b91c9e03e3b469cd6d0 | [
"Unlicense"
] | 5 | 2021-06-08T20:55:05.000Z | 2022-03-12T00:14:51.000Z | insta/forms.py | WaMungai/InstaClone | 68279dfbf93801c1b5355b91c9e03e3b469cd6d0 | [
"Unlicense"
] | null | null | null | from django import forms
from .models import Image,Profile,Comments
class NewsLetterForm(forms.Form):
your_name=forms.CharField(label='First Name',max_length=30)
email=forms.EmailField(label='Email')
class NewImageForm(forms.ModelForm):
class Meta:
model= Image
exclude =['editor','pub_date','profile','likes','comments','followers']
widgets={
'tags':forms.CheckboxSelectMultiple(),
}
class NewProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude =['editor']
widget={
'tags':forms.CheckboxSelectMultiple(),
}
class UpdateProfileForm(forms.ModelForm):
class Meta:
model=Profile
exclude=['editor']
widgets={
'tags':forms.CheckboxSelectMultiple(),
}
class NewCommentForm(forms.ModelForm):
class Meta:
model=Comments
exclude=['editor'] | 27.028571 | 79 | 0.615222 | from django import forms
from .models import Image,Profile,Comments
class NewsLetterForm(forms.Form):
your_name=forms.CharField(label='First Name',max_length=30)
email=forms.EmailField(label='Email')
class NewImageForm(forms.ModelForm):
class Meta:
model= Image
exclude =['editor','pub_date','profile','likes','comments','followers']
widgets={
'tags':forms.CheckboxSelectMultiple(),
}
class NewProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude =['editor']
widget={
'tags':forms.CheckboxSelectMultiple(),
}
class UpdateProfileForm(forms.ModelForm):
class Meta:
model=Profile
exclude=['editor']
widgets={
'tags':forms.CheckboxSelectMultiple(),
}
class NewCommentForm(forms.ModelForm):
class Meta:
model=Comments
exclude=['editor'] | true | true |
f72d5872f699dd0d0180714ec301adb7cf7026cd | 2,963 | py | Python | fid-judaica/compactmemory/Scripts/CM-uni-name-filter.py | judaicalink/judaicalink-generators | 845dbd6886fa82ec45adf16ba08fad9d26169419 | [
"MIT"
] | 1 | 2020-09-20T17:00:05.000Z | 2020-09-20T17:00:05.000Z | fid-judaica/compactmemory/Scripts/CM-uni-name-filter.py | wisslab/judaicalink-generators | 845dbd6886fa82ec45adf16ba08fad9d26169419 | [
"MIT"
] | null | null | null | fid-judaica/compactmemory/Scripts/CM-uni-name-filter.py | wisslab/judaicalink-generators | 845dbd6886fa82ec45adf16ba08fad9d26169419 | [
"MIT"
] | null | null | null | #Maral Dadvar
#09/01/2019
#This script filters the names with initialls.
import unicodedata
import os , glob
import rdflib
from rdflib import Namespace, URIRef, Graph , Literal , OWL, RDFS , RDF
from SPARQLWrapper import SPARQLWrapper2, XML , JSON , TURTLE
import re
import pprint
os.chdir('C:\\Users\\Maral\\Desktop')
graphout = Graph()
foaf = Namespace("http://xmlns.com/foaf/0.1/")
rdf = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
jl = Namespace("http://data.judaicalink.org/ontology/")
gndo = Namespace("http://d-nb.info/standards/elementset/gnd#")
skos = Namespace("http://www.w3.org/2004/02/skos/core#")
dc = Namespace ("http://purl.org/dc/elements/1.1/")
edm = Namespace("http://www.europeana.eu/schemas/edm/")
graphout.bind('jl', jl)
graphout.bind('rdfs',RDFS)
graphout.bind('foaf',foaf)
graphout.bind('skos',skos)
graphout.bind('owl',OWL)
graphout.bind('gndo',gndo)
graphout.bind('dc',dc)
graphout.bind('edm',edm)
graph = Graph()
graph.parse('C:\\Users\\Maral\\Desktop\\cm-authors-context-GND-uni-02.rdf', format="turtle")
spar1= """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX gndo: <http://d-nb.info/standards/elementset/gnd#>
PREFIX pro: <http://purl.org/hpi/patchr#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX edm: <http://www.europeana.eu/schemas/edm/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dblp: <http://dblp.org/rdf/schema-2015-01-26#>
PREFIX dcterms: <http://purl.org/dc/terms/>
PREFIX dbpedia: <http://dbpedia.org/resource/>
PREFIX jl: <http://data.judaicalink.org/ontology/>
SELECT ?x ?label ?id ?desc ?title ?gnd
where {
?x a edm:ProvidedCHO.
?x dc:creator ?label.
?x dc:identifier ?id.
?x dc:description ?desc.
?x dc:title ?title.
?x gndo:gndIdentifier ?gnd.
}
"""
result = graph.query(spar1)
for item in result:
labels = item[1].value
print (labels)
if re.search(r'\w{1}\.\s*\w{1}\.',labels):
print ('not valid')
elif re.search(r'\w{1}\.',labels):
print ('not valid')
else:
graphout.add((URIRef(item[0]), RDF.type , edm.ProvidedCHO ))
graphout.add( (URIRef(item[0]), dc.creator , Literal(item[1].value) ) )
graphout.add( (URIRef(item[0]), dc.identifier , Literal(item[2].value) ) )
graphout.add( (URIRef(item[0]), gndo.gndIdentifier , URIRef(item[5]) ) )
graphout.add ((URIRef(item[0]) , dc.description , Literal((item[3].value))))
graphout.add ((URIRef(item[0]) , dc.title , Literal((item[4]))))
graphout.serialize(destination = 'cm-uni-names-filtered.ttl' , format="turtle")
| 29.929293 | 92 | 0.616605 |
import unicodedata
import os , glob
import rdflib
from rdflib import Namespace, URIRef, Graph , Literal , OWL, RDFS , RDF
from SPARQLWrapper import SPARQLWrapper2, XML , JSON , TURTLE
import re
import pprint
os.chdir('C:\\Users\\Maral\\Desktop')
graphout = Graph()
foaf = Namespace("http://xmlns.com/foaf/0.1/")
rdf = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
jl = Namespace("http://data.judaicalink.org/ontology/")
gndo = Namespace("http://d-nb.info/standards/elementset/gnd#")
skos = Namespace("http://www.w3.org/2004/02/skos/core#")
dc = Namespace ("http://purl.org/dc/elements/1.1/")
edm = Namespace("http://www.europeana.eu/schemas/edm/")
graphout.bind('jl', jl)
graphout.bind('rdfs',RDFS)
graphout.bind('foaf',foaf)
graphout.bind('skos',skos)
graphout.bind('owl',OWL)
graphout.bind('gndo',gndo)
graphout.bind('dc',dc)
graphout.bind('edm',edm)
graph = Graph()
graph.parse('C:\\Users\\Maral\\Desktop\\cm-authors-context-GND-uni-02.rdf', format="turtle")
spar1= """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX gndo: <http://d-nb.info/standards/elementset/gnd#>
PREFIX pro: <http://purl.org/hpi/patchr#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX edm: <http://www.europeana.eu/schemas/edm/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dblp: <http://dblp.org/rdf/schema-2015-01-26#>
PREFIX dcterms: <http://purl.org/dc/terms/>
PREFIX dbpedia: <http://dbpedia.org/resource/>
PREFIX jl: <http://data.judaicalink.org/ontology/>
SELECT ?x ?label ?id ?desc ?title ?gnd
where {
?x a edm:ProvidedCHO.
?x dc:creator ?label.
?x dc:identifier ?id.
?x dc:description ?desc.
?x dc:title ?title.
?x gndo:gndIdentifier ?gnd.
}
"""
result = graph.query(spar1)
for item in result:
labels = item[1].value
print (labels)
if re.search(r'\w{1}\.\s*\w{1}\.',labels):
print ('not valid')
elif re.search(r'\w{1}\.',labels):
print ('not valid')
else:
graphout.add((URIRef(item[0]), RDF.type , edm.ProvidedCHO ))
graphout.add( (URIRef(item[0]), dc.creator , Literal(item[1].value) ) )
graphout.add( (URIRef(item[0]), dc.identifier , Literal(item[2].value) ) )
graphout.add( (URIRef(item[0]), gndo.gndIdentifier , URIRef(item[5]) ) )
graphout.add ((URIRef(item[0]) , dc.description , Literal((item[3].value))))
graphout.add ((URIRef(item[0]) , dc.title , Literal((item[4]))))
graphout.serialize(destination = 'cm-uni-names-filtered.ttl' , format="turtle")
| true | true |
f72d59225445029c6a86a5bdbd4987fd50a20da0 | 106 | py | Python | code/spavanac.py | kkirigaya/Kattis | 29b7180aef70f51ea5b4d064204f70fc6d29d312 | [
"MIT"
] | 1 | 2021-06-05T20:52:43.000Z | 2021-06-05T20:52:43.000Z | code/spavanac.py | kkirigaya/Kattis | 29b7180aef70f51ea5b4d064204f70fc6d29d312 | [
"MIT"
] | null | null | null | code/spavanac.py | kkirigaya/Kattis | 29b7180aef70f51ea5b4d064204f70fc6d29d312 | [
"MIT"
] | null | null | null | h,m = map(int, input().split())
mins = (24+h) * 60 + m
mins -= 45
mins %= 24*60
print(mins//60, mins%60)
| 15.142857 | 31 | 0.556604 | h,m = map(int, input().split())
mins = (24+h) * 60 + m
mins -= 45
mins %= 24*60
print(mins//60, mins%60)
| true | true |
f72d5cc69110919bd4a78eb7980484c39ec6a0ac | 2,256 | py | Python | Neural Networks and Deep Learning/Week 3/Planar data classification with one hidden layer/planar_utils.py | 837278709/Deep-Learning-Coursera-1 | 2498a90d3f61ec0876752205066ec95323f83161 | [
"MIT"
] | 2 | 2020-05-08T21:18:08.000Z | 2020-07-18T22:13:22.000Z | Neural Networks and Deep Learning/Week 3/Planar data classification with one hidden layer/planar_utils.py | 837278709/Deep-Learning-Coursera-1 | 2498a90d3f61ec0876752205066ec95323f83161 | [
"MIT"
] | null | null | null | Neural Networks and Deep Learning/Week 3/Planar data classification with one hidden layer/planar_utils.py | 837278709/Deep-Learning-Coursera-1 | 2498a90d3f61ec0876752205066ec95323f83161 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.datasets
import sklearn.linear_model
def plot_decision_boundary(model, X, y):
# Set min and max values and give it some padding
x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole grid
Z = model(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.ylabel('x2')
plt.xlabel('x1')
plt.scatter(X[0, :], X[1, :], c=y[0], cmap=plt.cm.Spectral)
def sigmoid(x):
"""
Compute the sigmoid of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- sigmoid(x)
"""
s = 1/(1+np.exp(-x))
return s
def load_planar_dataset():
np.random.seed(1)
m = 400 # number of examples
N = int(m/2) # number of points per class
D = 2 # dimensionality
X = np.zeros((m,D)) # data matrix where each row is a single example
Y = np.zeros((m,1), dtype='uint8') # labels vector (0 for red, 1 for blue)
a = 4 # maximum ray of the flower
for j in range(2):
ix = range(N*j,N*(j+1))
t = np.linspace(j*3.12,(j+1)*3.12,N) + np.random.randn(N)*0.2 # theta
r = a*np.sin(4*t) + np.random.randn(N)*0.2 # radius
X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
Y[ix] = j
X = X.T
Y = Y.T
return X, Y
def load_extra_datasets():
N = 200
noisy_circles = sklearn.datasets.make_circles(n_samples=N, factor=.5, noise=.3)
noisy_moons = sklearn.datasets.make_moons(n_samples=N, noise=.2)
blobs = sklearn.datasets.make_blobs(n_samples=N, random_state=5, n_features=2, centers=6)
gaussian_quantiles = sklearn.datasets.make_gaussian_quantiles(mean=None, cov=0.5, n_samples=N, n_features=2, n_classes=2, shuffle=True, random_state=None)
no_structure = np.random.rand(N, 2), np.random.rand(N, 2)
return noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure | 34.181818 | 158 | 0.626773 | import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.datasets
import sklearn.linear_model
def plot_decision_boundary(model, X, y):
x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
h = 0.01
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = model(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.ylabel('x2')
plt.xlabel('x1')
plt.scatter(X[0, :], X[1, :], c=y[0], cmap=plt.cm.Spectral)
def sigmoid(x):
s = 1/(1+np.exp(-x))
return s
def load_planar_dataset():
np.random.seed(1)
m = 400
N = int(m/2)
D = 2
X = np.zeros((m,D))
Y = np.zeros((m,1), dtype='uint8')
a = 4
for j in range(2):
ix = range(N*j,N*(j+1))
t = np.linspace(j*3.12,(j+1)*3.12,N) + np.random.randn(N)*0.2
r = a*np.sin(4*t) + np.random.randn(N)*0.2
X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
Y[ix] = j
X = X.T
Y = Y.T
return X, Y
def load_extra_datasets():
N = 200
noisy_circles = sklearn.datasets.make_circles(n_samples=N, factor=.5, noise=.3)
noisy_moons = sklearn.datasets.make_moons(n_samples=N, noise=.2)
blobs = sklearn.datasets.make_blobs(n_samples=N, random_state=5, n_features=2, centers=6)
gaussian_quantiles = sklearn.datasets.make_gaussian_quantiles(mean=None, cov=0.5, n_samples=N, n_features=2, n_classes=2, shuffle=True, random_state=None)
no_structure = np.random.rand(N, 2), np.random.rand(N, 2)
return noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure | true | true |
f72d5e0130a982d18538ffc607524187f38dc74a | 71,614 | py | Python | PV_ICE/main.py | NREL/PV-DEMICE | 6e2938950ff10c37f176f46aeb76c78de609f535 | [
"BSD-3-Clause"
] | 3 | 2020-05-11T15:19:47.000Z | 2020-09-10T16:53:10.000Z | PV_ICE/main.py | NREL/PV_DEMICE | 6e2938950ff10c37f176f46aeb76c78de609f535 | [
"BSD-3-Clause"
] | 2 | 2020-04-09T17:41:54.000Z | 2020-07-20T17:25:26.000Z | PV_ICE/main.py | NREL/PV_DEMICE | 6e2938950ff10c37f176f46aeb76c78de609f535 | [
"BSD-3-Clause"
] | 1 | 2020-04-09T17:36:28.000Z | 2020-04-09T17:36:28.000Z | # -*- coding: utf-8 -*-
"""
Main.py contains the functions to calculate the different quantities of materials
in each step of the process. Reffer to the diagram on Package-Overview for the
steps considered.
Support functions include Weibull functions for reliability and failure; also,
functions to modify baseline values and evaluate sensitivity to the parameters.
"""
import numpy as np
import pandas as pd
import datetime
import os
import matplotlib.pyplot as plt
def read_baseline_material(scenario, material='None', file=None):
if file is None:
try:
file = _interactive_load('Select baseline file')
except:
raise Exception('Interactive load failed. Tkinter not supported'+
'on this system. Try installing X-Quartz and reloading')
def _interactive_load(title=None):
# Tkinter file picker
import tkinter
from tkinter import filedialog
root = tkinter.Tk()
root.withdraw() #Start interactive file input
root.attributes("-topmost", True) #Bring window into foreground
return filedialog.askopenfilename(parent=root, title=title) #initialdir = data_dir
def _unitReferences(keyword):
'''
Specify units for variable in scenario or materials
Parameters
----------
keyword : str
String of scenario or material column label
Returns
-------
yunits : str
Unit specific to the keyword provided
'''
moduleDictionary = {'year': {'unit': 'Years', 'source': 'input'},
'new_Installed_Capacity_[MW]': {'unit': 'Power [MW]', 'source':'input'},
'mod_eff': {'unit': 'Efficiency $\eta$ [%]', 'source':'input'},
'mod_reliability_t50': {'unit': 'Years' , 'source':'input'},
'mod_reliability_t90': {'unit': 'Years', 'source':'input'},
'mod_degradation': {'unit': 'Percentage [%]', 'source':'input'},
'mod_lifetime': {'unit': 'Years', 'source':'input'},
'mod_MFG_eff': {'unit': 'Efficiency $\eta$ [%]', 'source':'input'},
'mod_EOL_collection_eff': {'unit': 'Efficiency $\eta$ [%]', 'source':'input'},
'mod_EOL_collected_recycled': {'unit': 'Percentage [%]', 'source':'input'},
'mod_Repair': {'unit': 'Percentage [%]', 'source':'input'},
'mod_MerchantTail': {'unit': 'Percentage [%]', 'source':'input'},
'mod_Reuse': {'unit': 'Percentage [%]', 'source':'input'},
'Area': {'unit': 'm$^2$', 'source': 'generated'},
'Cumulative_Area_disposedby_Failure': {'unit': 'm$^2$', 'source': 'generated'},
'Cumulative_Area_disposedby_ProjectLifetime': {'unit': 'm$^2$', 'source': 'generated'},
'Cumulative_Area_disposed': {'unit': 'm$^2$', 'source': 'generated'},
'Cumulative_Active_Area': {'unit': 'm$^2$', 'source': 'generated'},
'Installed_Capacity_[W]': {'unit': 'Power [W]', 'source': 'generated'},
'EOL_on_Year_0': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_1': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_2': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_3': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_4': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_5': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_6': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_7': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_8': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_9': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_10': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_11': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_12': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_13': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_14': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_15': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_16': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_17': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_18': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_19': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_20': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_21': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_22': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_23': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_24': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_25': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_26': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_27': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_28': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_29': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_30': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_31': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_32': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_33': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_34': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_35': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_36': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_37': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_38': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_39': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_40': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_41': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_42': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_43': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_44': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_45': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_46': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_47': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_48': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_49': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_50': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_51': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_52': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_53': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_54': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_55': {'unit': 'm$^2$', 'source': 'generated'},
'EoL_Collected': {'unit': 'm$^2$', 'source': 'generated'},
'EoL_NotCollected': {'unit': 'm$^2$', 'source': 'generated'},
'EoL_Recycled': {'unit': 'm$^2$', 'source': 'generated'},
'EoL_NotRecycled_Landfilled': {'unit': 'm$^2$', 'source': 'generated'}
}
materialDictionary={'year': {'unit': 'Years', 'source': 'input'},
'mat_virgin_eff': {'unit': 'Efficiency $\eta$ [%]', 'source': 'input'},
'mat_massperm2': {'unit': 'Mass [g]', 'source': 'input'},
'mat_MFG_eff': {'unit': 'Efficiency $\eta$ [%]', 'source': 'input'},
'mat_MFG_scrap_recycled': {'unit': 'Percentage [%]', 'source': 'input'},
'mat_MFG_scrap_Recycled': {'unit': 'Efficiency $\eta$ [%]', 'source': 'input'},
'mat_MFG_scrap_Recycled_into_HQ': {'unit': 'Percentage [%]', 'source': 'input'},
'mat_MFG_scrap_Recycled_into_HQ_Reused4MFG': {'unit': 'Percentage [%]', 'source': 'input'},
'mat_EOL_collected_Recycled': {'unit': 'Percentage [%]', 'source': 'input'},
'mat_EOL_Recycling_eff': {'unit': 'Efficiency $\eta$ [%]', 'source': 'input'},
'mat_EOL_Recycled_into_HQ': {'unit': 'Percentage [%]', 'source': 'input'},
'mat_EOL_RecycledHQ_Reused4MFG': {'unit': 'Percentage [%]', 'source': 'input'},
'mat_modules_NotRecycled': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_modules_NotCollected': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_EOL_sento_Recycling': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_EOL_NotRecycled_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_EOL_Recycled': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_EOL_Recycled_Losses_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_EOL_Recycled_2_HQ': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_EOL_Recycled_2_OQ': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_EoL_Recycled_HQ_into_MFG': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_EOL_Recycled_HQ_into_OU': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_UsedinManufacturing': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_Manufacturing_Input': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_MFG_Scrap': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_MFG_Scrap_Sentto_Recycling': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_MFG_Scrap_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_MFG_Scrap_Recycled_Successfully': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_MFG_Scrap_Recycled_Losses_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_MFG_Recycled_into_HQ': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_MFG_Recycled_into_OQ': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_MFG_Recycled_HQ_into_MFG': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_MFG_Recycled_HQ_into_OU': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_Virgin_Stock': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_Total_EOL_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_Total_MFG_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_Total_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_Total_Recycled_OU': {'unit': 'Mass [g]', 'source': 'generated'}
}
if keyword in moduleDictionary.keys():
yunits = moduleDictionary[keyword]['unit']
elif keyword in materialDictionary.keys():
yunits = materialDictionary[keyword]['unit']
else:
print("Warning: Keyword / Units not Found")
yunits = 'UNITS'
return yunits
def distance(s_lat, s_lng, e_lat, e_lng):
"""
# Haversine formula for numpy arrays
# Author: MalyutinS
# imported from comment on: https://gist.github.com/rochacbruno/2883505
# Example:
# s_lat = 45; s_lng = -110; e_lat=[33, 44]; e_lng = [-115, -140]
# Returns distance from the source point to the two ending points:
# r = distance(s_lat, s_lng, e_lat, e_lng)
# r = array([1402.24996689, 2369.0150434 ])
#
"""
# approximate radius of earth in km
R = 6373.0
# s_lat = s_lat*np.pi/180.0
s_lat = np.deg2rad(s_lat)
s_lng = np.deg2rad(s_lng)
e_lat = np.deg2rad(e_lat)
e_lng = np.deg2rad(e_lng)
d = np.sin((e_lat - s_lat)/2)**2 + np.cos(s_lat)*np.cos(e_lat) * np.sin((e_lng - s_lng)/2)**2
distance = 2 * R * np.arcsin(np.sqrt(d))
return distance
def drivingdistance(origin, destination, APIkey):
"""
Creates call for google-maps api to get driving directions betwen two points.
Input
-----
origin: array
[lat, lon] expected
destination: array
[lat, lon] expected
APYkey: str
String
"""
lat1, lon1 = origin
lat2, lon2 = destination
gm_url = ('https://maps.googleapis.com/maps/api/directions/xml?'+
'origin='+str(lat1)+','+str(lon1)+
'&destination='+str(lat2)+','+str(lon2)+
'&key='+APIkey)
return gm_url
class Simulation:
"""
The ScenarioObj top level class is used to work on Circular Economy scenario objects,
keep track of filenames, data for module and materials, operations modifying
the baselines, etc.
Parameters
----------
name : text to append to output files
nowstr : current date/time string
path : working directory with circular economy results
Methods
-------
__init__ : initialize the object
_setPath : change the working directory
"""
def __init__(self, name=None, path=None):
'''
initialize ScenarioObj with path of Scenario's baseline of module and materials
as well as a basename to append to
Parameters
----------
name: string, append temporary and output files with this value
path: location of Radiance materials and objects
Returns
-------
none
'''
self.path = "" # path of working directory
self.name = "" # basename to append
now = datetime.datetime.now()
self.nowstr = str(now.date())+'_'+str(now.hour)+str(now.minute)+str(now.second)
if path is None:
self._setPath(os.getcwd())
else:
self._setPath(path)
if name is None:
self.name = self.nowstr # set default filename for output files
else:
self.name = name
self.scenario={}
def _setPath(self, path):
"""
setPath - move path and working directory
"""
self.path = os.path.abspath(path)
print('path = '+ path)
try:
os.chdir(self.path)
except OSError as exc:
LOGGER.error('Path doesn''t exist: %s' % (path))
LOGGER.exception(exc)
raise(exc)
# check for path in the new Radiance directory:
def _checkPath(path): # create the file structure if it doesn't exist
if not os.path.exists(path):
os.makedirs(path)
print('Making path: '+path)
def createScenario(self, name, file=None):
self.scenario[name] = Scenario(name, file)
def modifyScenario(self, scenarios, stage, value, start_year=None):
if start_year is None:
start_year = int(datetime.datetime.now().year)
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
selectyears = self.scenario[scenarios[0]].data['year']>start_year
for scen in scenarios:
self.scenario[scen].data.loc[selectyears, stage] = value
def calculateMassFlow(self, scenarios = None, materials=None, weibullInputParams = None,
bifacialityfactors = None, reducecapacity = True, debugflag=False):
'''
Function takes as input a baseline dataframe already imported,
with the right number of columns and content.
It returns the dataframe with all the added calculation columns.
Parameters
------------
weibullInputParams : None
Dictionary with 'alpha' and 'beta' value for shaping the weibull
curve. beta is sometimes exchanged with lifetime, for example on
Irena 2016 values beta = 30. If weibullInputParams = None,
alfa and beta are calcualted from the t50 and t90 columns on the
module baseline.
scenarios : None
string with the scenario name or list of strings with
scenarios names to loop over. Must exist on the PV ICE object.
materials : None
string with the material name or list of strings with the
materials names to loop over. Must exists on the PV ICE object
scenario(s) modeled.
bifacialityfactors : str
File with bifacialtiy factors for each year under consideration
Returns
--------
df: dataframe
input dataframe with addeds columns for the calculations of recycled,
collected, waste, installed area, etc.
'''
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
for scen in scenarios:
print("Working on Scenario: ", scen)
print("********************")
df = self.scenario[scen].data
# Constant
if bifacialityfactors is not None:
bf = pd.read_csv(bifacialityfactors)
df['irradiance_stc'] = 1000.0 + bf['bifi']*100.0 # W/m^2 (min. Bifacial STC Increase)
else:
df['irradiance_stc'] = 1000.0 # W/m^2
# Renaming and re-scaling
df['t50'] = df['mod_reliability_t50']
df['t90'] = df['mod_reliability_t90']
# Calculating Area and Mass
if 'Mass_[MetricTonnes]' in df:
df['new_Installed_Capacity_[W]'] = 0
df['new_Installed_Capacity_[MW]'] = 0
df['Area'] = df['Mass_[MetricTonnes]']
print("Warning, this is for special debuging of Wambach Procedure."+
"Make sure to use Wambach Module")
else:
df['new_Installed_Capacity_[W]'] = df['new_Installed_Capacity_[MW]']*1e6
if reducecapacity:
df['Area'] = df['new_Installed_Capacity_[W]']/(df['mod_eff']*0.01)/df['irradiance_stc'] # m^2
else:
df['Area'] = df['new_Installed_Capacity_[W]']/(df['mod_eff']*0.01)/1000.0 # m^2
df['Area'] = df['Area'].fillna(0) # Chagne na's to 0s.
# Calculating Wast by Generation by Year, and Cumulative Waste by Year.
Generation_Disposed_byYear = []
Generation_Active_byYear= []
Generation_Power_byYear = []
weibullParamList = []
df['Cumulative_Area_disposedby_Failure'] = 0
df['Cumulative_Area_disposedby_ProjectLifetime'] = 0
df['Cumulative_Area_disposed'] = 0
df['Repaired_[W]'] = 0
df['Repaired_Area'] = 0
df['Cumulative_Active_Area'] = 0
df['Installed_Capacity_[W]'] = 0
for generation, row in df.iterrows():
#generation is an int 0,1,2,.... etc.
#generation=4
#row=df.iloc[generation]
if weibullInputParams:
weibullIParams = weibullInputParams
elif 'weibull_alpha' in row:
# "Weibull Input Params passed internally as a column"
weibullIParams = {'alpha': row['weibull_alpha'], 'beta': row['weibull_beta']}
else:
# "Calculating Weibull Params from Modules t50 and T90"
t50, t90 = row['t50'], row['t90']
weibullIParams = weibull_params({t50: 0.50, t90: 0.90})
f = weibull_cdf(weibullIParams['alpha'], weibullIParams['beta'])
weibullParamList.append(weibullIParams)
x = np.clip(df.index - generation, 0, np.inf)
cdf = list(map(f, x))
pdf = [0] + [j - i for i, j in zip(cdf[: -1], cdf[1 :])]
activearea = row['Area']
if np.isnan(activearea):
activearea=0
activeareacount = []
areadisposed_failure = []
areadisposed_projectlifetime = []
arearepaired = []
arearepaired_powergen = []
areapowergen = []
active=0
disposed_projectlifetime=0
for age in range(len(cdf)):
disposed_projectlifetime=0
if x[age] == 0.0:
activeareacount.append(0)
areadisposed_failure.append(0)
areadisposed_projectlifetime.append(0)
areapowergen.append(0)
arearepaired.append(0)
arearepaired_powergen.append(0)
else:
active += 1
activeareaprev = activearea
activearea = activearea-row['Area']*pdf[age]+row['Area']*pdf[age]*df.iloc[age]['mod_Repair']*0.01
# arearepaired_failure = activearea*cdf[age]*df.iloc[age]['mod_Repair']*0.01
arearepaired_failure = row['Area']*pdf[age]*df.iloc[age]['mod_Repair']*0.01
arearepaired.append(arearepaired_failure)
arearepaired_powergen.append(arearepaired_failure*row['mod_eff']*0.01*row['irradiance_stc']*(1-row['mod_degradation']*0.01)**active)
areadisposed_failure.append(activeareaprev-activearea)
if age == int(row['mod_lifetime']+generation):
activearea_temp = activearea
activearea = 0+activearea*(df.iloc[age]['mod_MerchantTail']*0.01)
disposed_projectlifetime = activearea_temp-activearea
activearea2 = 0+disposed_projectlifetime*(df.iloc[age]['mod_Reuse']*0.01) # 12
activearea = activearea + activearea2 # 92
disposed_projectlifetime = disposed_projectlifetime - activearea2 # 8
# activearea = 0+disposed_projectlifetime*(df.iloc[age]['mod_Reuse']*0.01)
# disposed_projectlifetime = activearea_temp-activearea
areadisposed_projectlifetime.append(disposed_projectlifetime)
activeareacount.append(activearea)
areapowergen.append(activearea*row['mod_eff']*0.01*row['irradiance_stc']*(1-row['mod_degradation']*0.01)**active)
try:
# becuase the clip starts with 0 for the installation year, identifying installation year
# and adding initial area
fixinitialareacount = next((i for i, e in enumerate(x) if e), None) - 1
activeareacount[fixinitialareacount] = activeareacount[fixinitialareacount]+row['Area']
areapowergen[fixinitialareacount] = (areapowergen[fixinitialareacount] +
row['Area'] * row['mod_eff'] *0.01 * row['irradiance_stc'])
except:
# Last value does not have a xclip value of nonzero so it goes
# to except. But it also means the loop finished for the calculations
# of Lifetime.
fixinitialareacount = len(cdf)-1
activeareacount[fixinitialareacount] = activeareacount[fixinitialareacount]+row['Area']
areapowergen[fixinitialareacount] = (areapowergen[fixinitialareacount] +
row['Area'] * row['mod_eff'] *0.01 * row['irradiance_stc'])
print("Finished Area+Power Generation Calculations")
# area_disposed_of_generation_by_year = [element*row['Area'] for element in pdf]
df['Cumulative_Area_disposedby_Failure'] += areadisposed_failure
df['Cumulative_Area_disposedby_ProjectLifetime'] += areadisposed_projectlifetime
df['Cumulative_Area_disposed'] += areadisposed_failure
df['Cumulative_Area_disposed'] += areadisposed_projectlifetime
df['Repaired_[W]'] += arearepaired_powergen
df['Repaired_Area'] += arearepaired
df['Cumulative_Active_Area'] += activeareacount
df['Installed_Capacity_[W]'] += areapowergen
Generation_Disposed_byYear.append([x + y for x, y in zip(areadisposed_failure, areadisposed_projectlifetime)])
Generation_Active_byYear.append(activeareacount)
Generation_Power_byYear.append(areapowergen)
df['WeibullParams'] = weibullParamList
MatrixDisposalbyYear = pd.DataFrame(Generation_Disposed_byYear, columns = df.index, index = df.index)
MatrixDisposalbyYear = MatrixDisposalbyYear.add_prefix("EOL_on_Year_")
try:
df = df[df.columns.drop(list(df.filter(regex='EOL_on_Year_')))]
except:
print("Warning: Issue dropping EOL columns generated by " \
"calculateMFC routine to overwrite")
df = df.join(MatrixDisposalbyYear)
## Start to do EOL Processes
############################
filter_col = [col for col in df if col.startswith('EOL_on_Year_')]
EOL = df[filter_col]
# This Multiplication pattern goes through Module and then material.
# It is for processes that depend on each year as they improve, i.e.
# Collection Efficiency,
#
# [ G1_1 G1_2 G1_3 G2_4 ...] [N1
# [ 0 G2_1 G2_2 G2_3 ...] X N2
# [ 0 0 G3_1 G3_2 ...] N3
# N4]
#
# EQUAL
# EOL_Collected =
# [ G1_1*N1 G1_2 *N2 G1_3 *N3 G2_4 *N4 ...]
# [ 0 G2_1 *N2 G2_2 *N3 G2_3 *N4 ...]
# [ 0 0 G3_1 *N3 G3_2 *N4 ...]
#
EOL_Collected = EOL.mul(df['mod_EOL_collection_eff'].values*0.01)
df['EoL_Collected'] = list(EOL_Collected.sum())
landfill_Collection = EOL.mul(1-(df['mod_EOL_collection_eff'].values*0.01))
df['EoL_NotCollected'] = list(landfill_Collection.sum())
EOL_Recycled = EOL_Collected.mul(df['mod_EOL_collected_recycled'].values*0.01)
df['EoL_Recycled'] = list(EOL_Recycled.sum())
EOL_NotRecycled_Landfilled = EOL_Collected.mul((1-df['mod_EOL_collected_recycled'].values*0.01))
df['EoL_NotRecycled_Landfilled'] = list(EOL_NotRecycled_Landfilled.sum())
# Cleanup of internal renaming and internal use columns
df.drop(['new_Installed_Capacity_[W]', 't50', 't90'], axis = 1, inplace=True)
df['ModuleTotal_MFG']=df['Area']*100/df['mod_MFG_eff']
self.scenario[scen].data = df
# collection losses here
# Recyle % here
################
# Material Loop#
################
if materials is None:
materials = list(self.scenario[scenarios[0]].material.keys())
else:
if isinstance(materials, str):
materials = [materials]
for mat in materials:
print("==> Working on Material : ", mat)
dm = self.scenario[scen].material[mat].materialdata
# SWITCH TO MASS UNITS FOR THE MATERILA NOW:
# THIS IS DIFFERENT MULTIPLICATION THAN THE REST
# BECAUSE IT DEPENDS TO THE ORIGINAL MASS OF EACH MODULE WHEN INSTALLED
# [M1 * [ G1_1 G1_2 G1_3 G2_4 ...]
# M2 [ 0 G2_1 G2_2 G2_3 ...]
# M3] [ 0 0 G3_1 G3_2 ...]
#
# EQUAL
# mat_EOL_sentoRecycling =
# [ G1_1*M1 G1_2*M1 G1_3*M1 G2_4*M1 ...]
# [ 0 G2_1*M2 G2_2*M2 G2_3*M2 ...]
# [ 0 0 G3_1*M3 G3_2*M3 ...]
#
mat_modules_EOL_sentoRecycling = EOL_Recycled.multiply(dm['mat_massperm2'], axis=0)
dm['mat_modules_Collected'] = list(EOL_Collected.multiply(dm['mat_massperm2'], axis=0).sum())
dm['mat_modules_NotCollected'] = list(landfill_Collection.multiply(dm['mat_massperm2'], axis=0).sum())
dm['mat_modules_Recycled'] = list(EOL_Recycled.multiply(dm['mat_massperm2'], axis=0).sum())
dm['mat_modules_NotRecycled'] = list(EOL_NotRecycled_Landfilled.multiply(dm['mat_massperm2'], axis=0).sum())
# mat_EOL_collected_Recycled CHANGE NAME
# chnge also landfill_material_EOL_NotRecycled_Landfilled
mat_EOL_sento_Recycling = mat_modules_EOL_sentoRecycling.mul(dm['mat_EOL_collected_Recycled'].values*0.01)
dm['mat_EOL_sento_Recycling'] = list(mat_EOL_sento_Recycling.sum())
landfill_material_EOL_NotRecycled_Landfilled = mat_modules_EOL_sentoRecycling.mul(1-(dm['mat_EOL_collected_Recycled'].values*0.01))
dm['mat_EOL_NotRecycled_Landfilled'] = list(landfill_material_EOL_NotRecycled_Landfilled.sum())
mat_EOL_Recycled_Succesfully = mat_EOL_sento_Recycling.mul(dm['mat_EOL_Recycling_eff'].values*0.01)
dm['mat_EOL_Recycled'] = list(mat_EOL_Recycled_Succesfully.sum())
landfill_material_EOL_Recyled_Losses_Landfilled = mat_EOL_sento_Recycling.mul(1-(dm['mat_EOL_Recycling_eff'].values*0.01))
dm['mat_EOL_Recycled_Losses_Landfilled'] = list(landfill_material_EOL_Recyled_Losses_Landfilled.sum())
mat_EOL_Recycled_HQ = mat_EOL_Recycled_Succesfully.mul(dm['mat_EOL_Recycled_into_HQ'].values*0.01)
dm['mat_EOL_Recycled_2_HQ'] = list(mat_EOL_Recycled_HQ.sum())
mat_EOL_Recycled_OQ = mat_EOL_Recycled_Succesfully.mul(1-(dm['mat_EOL_Recycled_into_HQ'].values*0.01))
dm['mat_EOL_Recycled_2_OQ'] = list(mat_EOL_Recycled_OQ.sum())
mat_EOL_Recycled_HQ_into_MFG = mat_EOL_Recycled_HQ.mul(dm['mat_EOL_RecycledHQ_Reused4MFG'].values*0.01)
dm['mat_EoL_Recycled_HQ_into_MFG'] = list(mat_EOL_Recycled_HQ_into_MFG.sum())
mat_EOL_Recycled_HQ_into_OU = mat_EOL_Recycled_HQ.mul(1-(dm['mat_EOL_RecycledHQ_Reused4MFG'].values*0.01))
dm['mat_EOL_Recycled_HQ_into_OU'] = list(mat_EOL_Recycled_HQ_into_OU.sum())
# BULK Calculations Now
dm['mat_UsedSuccessfullyinModuleManufacturing'] = (df['Area'] * dm['mat_massperm2'])
dm['mat_EnteringModuleManufacturing'] = (df['Area'] * dm['mat_massperm2']*100/df['mod_MFG_eff'])
dm['mat_LostinModuleManufacturing'] = dm['mat_EnteringModuleManufacturing'] - dm['mat_UsedSuccessfullyinModuleManufacturing']
dm['mat_Manufacturing_Input'] = dm['mat_EnteringModuleManufacturing'] / (dm['mat_MFG_eff'] * 0.01)
# Scrap = Lost to Material manufacturing losses + Module manufacturing losses
dm['mat_MFG_Scrap'] = (dm['mat_Manufacturing_Input'] - dm['mat_EnteringModuleManufacturing'] +
dm['mat_LostinModuleManufacturing'])
dm['mat_MFG_Scrap_Sentto_Recycling'] = dm['mat_MFG_Scrap'] * dm['mat_MFG_scrap_Recycled'] * 0.01
dm['mat_MFG_Scrap_Landfilled'] = dm['mat_MFG_Scrap'] - dm['mat_MFG_Scrap_Sentto_Recycling']
dm['mat_MFG_Scrap_Recycled_Successfully'] = (dm['mat_MFG_Scrap_Sentto_Recycling'] *
dm['mat_MFG_scrap_Recycling_eff'] * 0.01)
dm['mat_MFG_Scrap_Recycled_Losses_Landfilled'] = (dm['mat_MFG_Scrap_Sentto_Recycling'] -
dm['mat_MFG_Scrap_Recycled_Successfully'])
dm['mat_MFG_Recycled_into_HQ'] = (dm['mat_MFG_Scrap_Recycled_Successfully'] *
dm['mat_MFG_scrap_Recycled_into_HQ'] * 0.01)
dm['mat_MFG_Recycled_into_OQ'] = dm['mat_MFG_Scrap_Recycled_Successfully'] - dm['mat_MFG_Recycled_into_HQ']
dm['mat_MFG_Recycled_HQ_into_MFG'] = (dm['mat_MFG_Recycled_into_HQ'] *
dm['mat_MFG_scrap_Recycled_into_HQ_Reused4MFG'] * 0.01)
dm['mat_MFG_Recycled_HQ_into_OU'] = dm['mat_MFG_Recycled_into_HQ'] - dm['mat_MFG_Recycled_HQ_into_MFG']
dm['mat_Virgin_Stock'] = dm['mat_Manufacturing_Input'] - dm['mat_EoL_Recycled_HQ_into_MFG'] - dm['mat_MFG_Recycled_HQ_into_MFG']
# Calculate raw virgin needs before mining and refining efficiency losses
dm['mat_Virgin_Stock_Raw'] = (dm['mat_Virgin_Stock'] * 100 / dm['mat_virgin_eff'])
# Add Wastes
dm['mat_Total_EOL_Landfilled'] = (dm['mat_modules_NotCollected'] +
dm['mat_modules_NotRecycled'] +
dm['mat_EOL_NotRecycled_Landfilled'] +
dm['mat_EOL_Recycled_Losses_Landfilled'])
dm['mat_Total_MFG_Landfilled'] = (dm['mat_MFG_Scrap_Landfilled'] +
dm['mat_MFG_Scrap_Recycled_Losses_Landfilled'])
dm['mat_Total_Landfilled'] = (dm['mat_Total_EOL_Landfilled'] +
dm['mat_Total_MFG_Landfilled'])
dm['mat_Total_Recycled_OU'] = (dm['mat_EOL_Recycled_2_OQ'] +
dm['mat_EOL_Recycled_HQ_into_OU'] +
dm['mat_MFG_Recycled_into_OQ'] +
dm['mat_MFG_Recycled_HQ_into_OU'])
self.scenario[scen].material[mat].materialdata = dm
def scenMod_IRENIFY(self, scenarios=None, ELorRL='RL'):
if ELorRL == 'RL':
weibullInputParams = {'alpha': 5.3759, 'beta': 30} # Regular-loss scenario IRENA
print("Using Irena Regular Loss Assumptions")
if ELorRL == 'EL':
weibullInputParams = {'alpha': 2.4928, 'beta': 30} # Regular-loss scenario IRENA
print("Using Irena Early Loss Assumptions")
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
for scen in scenarios:
self.scenario[scen].data['weibull_alpha'] = weibullInputParams['alpha']
self.scenario[scen].data['weibull_beta'] = weibullInputParams['beta']
self.scenario[scen].data['mod_lifetime'] = 40.0
self.scenario[scen].data['mod_MFG_eff'] = 100.0
for mat in self.scenario[scen].material:
self.scenario[scen].material[mat].materialdata['mat_MFG_eff'] = 100.0
self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled'] = 0.0
return
def check_Years_dataandMaterials(self, scenarios=None, materials=None):
'''
'''
print ("Not Done")
def trim_Years( self, startYear=None, endYear=None, aggregateInstalls=False,
averageEfficiency=False, averageMaterialData = False, methodAddedYears='repeat',
scenarios=None, materials=None):
'''
methodStart : str
'trim' or 'aggregate'. Trim cuts the values before the year specified.
Aggregate sums the values (if any) up to the year specified and sets it
in that year. No backfilling of data enabled at the moment.
methodEnd : str
'repeat' or 'zeroes' only options at the moment.
'repeat' Increases to the endYear by repeating the last value.
zeroes places zeroes.
'''
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
scen0 = scenarios[0]
dataStartYear = int(self.scenario[scen0].data.iloc[0]['year'])
dataEndYear = int(self.scenario[scen0].data.iloc[-1]['year'])
if startYear is None:
startYear = dataStartYear
print("startYear not provided. Setting to start year of Module data", startYear)
if endYear is None:
endYear = dataEndYear
print("endYear not provided. Setting to end year of Module data", endYear)
startYear = startYear
endYear = endYear
for scen in scenarios:
baseline = self.scenario[scen].data
if int(startYear) < int(dataStartYear):
print("ADD YEARS HERE. not done yet")
if int(endYear) > int(dataEndYear):
print("ADD YEARS HERE. not done yet")
# Add check if data does not need to be reduced to not do these.
reduced = baseline.loc[(baseline['year']>=startYear) & (baseline['year']<=endYear)].copy()
if aggregateInstalls:
prev = baseline.loc[(baseline['year']<startYear)].sum()
reduced.loc[reduced['year'] == startYear, 'new_Installed_Capacity_[MW]'] = prev['new_Installed_Capacity_[MW]']
if averageEfficiency:
prev = baseline.loc[(baseline['year']<startYear)].mean()
reduced.loc[reduced['year'] == startYear, 'mod_eff '] = prev['mod_eff ']
reduced.reset_index(drop=True, inplace=True)
self.scenario[scen].data = reduced #reassign the material data to the simulation
for mat in self.scenario[scen].material:
if int(startYear) < int(dataStartYear):
print("ADD YEARS HERE. not done yet")
if int(endYear) > int(dataEndYear):
print("ADD YEARS HERE. not done yet")
matdf = self.scenario[scen].material[mat].materialdata #pull out the df
reduced = matdf.loc[(matdf['year']>=startYear) & (matdf['year']<=endYear)].copy()
if averageMaterialData == 'average':
prev = matdf.loc[(baseline['year']<startYear)].mean()
matkeys = list(reduced.keys())[1:12]
for matkey in matkeys: # skipping year (0). Skipping added columsn from mass flow
reduced.loc[reduced['year'] == startYear, matkey] = prev[matkey]
reduced.reset_index(drop=True, inplace=True)
self.scenario[scen].material[mat].materialdata = reduced #reassign the material data to the simulation
def scenMod_IRENIFY(self, scenarios=None, ELorRL='RL'):
if ELorRL == 'RL':
weibullInputParams = {'alpha': 5.3759, 'beta': 30} # Regular-loss scenario IRENA
print("Using Irena Regular Loss Assumptions")
if ELorRL == 'EL':
weibullInputParams = {'alpha': 2.4928, 'beta': 30} # Regular-loss scenario IRENA
print("Using Irena Early Loss Assumptions")
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
for scen in scenarios:
self.scenario[scen].data['weibull_alpha'] = weibullInputParams['alpha']
self.scenario[scen].data['weibull_beta'] = weibullInputParams['beta']
self.scenario[scen].data['mod_lifetime'] = 40.0
self.scenario[scen].data['mod_MFG_eff'] = 100.0
for mat in self.scenario[scen].material:
self.scenario[scen].material[mat].materialdata['mat_MFG_eff'] = 100.0
self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled'] = 0.0
return
def scenMod_PerfectManufacturing(self, scenarios=None):
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
for scen in scenarios:
self.scenario[scen].data['mod_MFG_eff'] = 100.0
for mat in self.scenario[scen].material:
self.scenario[scen].material[mat].materialdata['mat_virgin_eff'] = 100.0
self.scenario[scen].material[mat].materialdata['mat_MFG_eff'] = 100.0
return
def scenMod_noCircularity(self, scenarios=None):
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
for scen in scenarios:
self.scenario[scen].data['mod_EOL_collection_eff '] = 0.0
self.scenario[scen].data['mod_EOL_collected_recycled'] = 0.0
self.scenario[scen].data['mod_Repair'] = 0.0
self.scenario[scen].data['mod_MerchantTail'] = 0.0
self.scenario[scen].data['mod_Reuse'] = 0.0
for mat in self.scenario[scen].material:
self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled'] = 0.0
self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycling_eff'] = 0.0
self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled_into_HQ'] = 0.0
self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled_into_HQ_Reused4MFG'] = 0.0
self.scenario[scen].material[mat].materialdata['mat_EOL_collected_Recycled'] = 0.0
self.scenario[scen].material[mat].materialdata['mat_EOL_Recycling_eff'] = 0.0
self.scenario[scen].material[mat].materialdata['mat_EOL_Recycled_into_HQ'] = 0.0
self.scenario[scen].material[mat].materialdata['mat_EOL_RecycledHQ_Reused4MFG'] = 0.0
return
def aggregateResults(self, scenarios=None, materials=None):
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
if materials is None:
materials = list(self.scenario[scenarios[0]].material.keys())
else:
if isinstance(materials, str):
materials = [materials]
keywds = ['mat_Virgin_Stock', 'mat_Total_Landfilled', 'mat_Total_EOL_Landfilled', 'mat_Total_MFG_Landfilled']
nice_keywds = ['VirginStock', 'WasteAll', 'WasteEOL', 'WasteMFG']
USyearly=pd.DataFrame()
for scen in scenarios:
for ii in range(len(keywds)):
keywd = keywds[ii]
nicekey = nice_keywds[ii]
for mat in materials:
USyearly[nicekey+'_'+mat+'_'+self.name+'_'+scen] = self.scenario[scen].material[mat].materialdata[keywd]
filter_col = [col for col in USyearly if (col.startswith(nicekey) and col.endswith(self.name+'_'+scen)) ]
USyearly[nicekey+'_Module_'+self.name+'_'+scen] = USyearly[filter_col].sum(axis=1)
# 2DO: Add multiple objects option
USyearly = USyearly/1000000 # This is the ratio for grams to Metric tonnes
USyearly = USyearly.add_suffix('_[Tonnes]')
# Different units, so no need to do the ratio to Metric tonnes :p
keywd1='new_Installed_Capacity_[MW]'
for scen in scenarios:
USyearly['newInstalledCapacity_'+self.name+'_'+scen+'_[MW]'] = self.scenario[scen].data[keywd1]
# Creating c umulative results
UScum = USyearly.copy()
UScum = UScum.cumsum()
# Adding Installed Capacity to US (This is already 'Cumulative') so not including it in UScum
# We are also renaming it to 'ActiveCapacity' and calculating Decommisioned Capacity.
# TODO: Rename Installed_CApacity to ActiveCapacity throughout.
keywd='Installed_Capacity_[W]'
for scen in scenarios:
USyearly['ActiveCapacity_'+self.name+'_'+scen+'_[MW]'] = self.scenario[scen].data[keywd]/1e6
USyearly['DecommisionedCapacity_'+self.name+'_'+scen+'_[MW]'] = (
UScum['newInstalledCapacity_'+self.name+'_'+scen+'_[MW]']-
USyearly['ActiveCapacity_'+self.name+'_'+scen+'_[MW]'])
# Adding Decommissioned Capacity
# Reindexing and Merging
USyearly.index = self.scenario[scen].data['year']
UScum.index = self.scenario[scen].data['year']
self.USyearly = USyearly
self.UScum = UScum
return USyearly, UScum
def plotScenariosComparison(self, keyword=None, scenarios=None):
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
if keyword is None:
scens = list(self.scenario.keys())[0]
print("Choose one of the keywords: ", list(self.scenario[scens].data.keys()))
return
yunits = _unitReferences(keyword)
plt.figure()
for scen in scenarios:
plt.plot(self.scenario[scen].data['year'],self.scenario[scen].data[keyword], label=scen)
plt.legend()
plt.xlabel('Year')
plt.title(keyword.replace('_', " "))
plt.ylabel(yunits)
def plotMetricResults(self):
from plotly.subplots import make_subplots
# import plotly.graph_objects as go
y1 = self.plotMaterialResults(keyword='VirginStock', yearlyorcumulative='yearly')
y2 = self.plotMaterialResults(keyword='WasteAll', yearlyorcumulative='yearly')
y3 = self.plotMaterialResults(keyword='WasteEOL', yearlyorcumulative='yearly')
y4 = self.plotMaterialResults(keyword='WasteMFG', yearlyorcumulative='yearly')
c1 = self.plotMaterialResults(keyword='VirginStock', yearlyorcumulative='cumulative')
c2 = self.plotMaterialResults(keyword='WasteAll', yearlyorcumulative='cumulative')
c3 = self.plotMaterialResults(keyword='WasteEOL', yearlyorcumulative='cumulative')
c4 = self.plotMaterialResults(keyword='WasteMFG', yearlyorcumulative='cumulative')
ic = self.plotInstalledCapacityResults()
def plotMaterialResults(self, keyword, yearlyorcumulative='yearly', cumplot=False):
import plotly.express as px
import re
if yearlyorcumulative == 'yearly':
data = self.USyearly
else:
data = self.UScum
if keyword is None:
print("keyword options are :" 'VirginStock', 'WasteALL', 'WasteEOL', 'WasteMFG')
return
#TODO: add a split to first bracket and print unique values option and return.
filter_col = [col for col in data if col.startswith(keyword)]
# Getting Title, Y-Axis Labels, and Legend Readable
titlekeyword = str.capitalize(yearlyorcumulative) + re.sub( r"([A-Z])", r" \1", keyword)
units = filter_col[0].split('_')[-1]
mylegend = [col.split('_')[1:] for col in filter_col]
mylegend = [col[:-1] for col in mylegend]
mylegend = [' '.join(col) for col in mylegend]
mylegend = [str.capitalize(col) for col in mylegend]
fig = px.line(data[filter_col], template="plotly_white")
fig.update_layout(
title=titlekeyword,
xaxis_title="Year",
yaxis_title=units
)
for idx, name in enumerate(mylegend):
fig.data[idx].name = name
fig.data[idx].hovertemplate = name
if cumplot:
return fig
else:
fig.show()
return
def plotInstalledCapacityResults(self, cumplot=False):
# TODO: Add scenarios input to subselect which ones to plot.
import plotly.express as px
datay = self.USyearly
datac = self.UScum
filter_colc = [col for col in datac if col.startswith('newInstalledCapacity')]
filter_coly = [col for col in datay if col.startswith('Capacity')]
datay = datay[filter_coly].copy()
mylegend = [col.split('_')[1:] for col in datay]
mylegend = [col[:-1] for col in mylegend]
mylegend = [str(col)[2:-2] for col in mylegend]
mylegendy = ['Cumulative New Installs, '+col for col in mylegend]
print(mylegend)
datac = datac[filter_colc].copy()
mylegend = [col.split('_')[1:] for col in datac]
mylegend = [col[:-1] for col in mylegend]
mylegend = [str(col)[2:-2] for col in mylegend]
mylegendc = ['Capacity, '+col for col in mylegend]
data = datay.join(datac)
mylegend = mylegendy + mylegendc
titlekeyword = 'Installed Capacity and Cumulative new Installs'
# Getting Title, Y-Axis Labels, and Legend Readable
units = filter_colc[0].split('_')[-1]
fig = px.line(data, template="plotly_white")
fig.update_layout(
title=titlekeyword,
xaxis_title="Year",
yaxis_title=units
)
for idx, name in enumerate(mylegend):
fig.data[idx].name = name
fig.data[idx].hovertemplate = name
if cumplot:
return fig
else:
fig.show()
return
def plotMaterialComparisonAcrossScenarios(self, keyword=None, scenarios=None, material = None):
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
if keyword is None:
scens = list(self.scenario.keys())[0]
mats = list(self.scenario[scens].material.keys())[0]
print("Choose one of the keywords: ", list(self.scenario[scens].material[mats].materialdata.keys()))
return
if material is None:
scens = list(self.scenario.keys())[0]
mats = list(self.scenario[scens].material.keys())
print("Choose one of the Materials: ", mats)
return
else:
if isinstance(material, str) is False:
mats = list(self.scenario[scens].material.keys())
print("Can only pass one material name (str). Choose one of the Materials: ", mats)
return
yunits = _unitReferences(keyword)
plt.figure()
for scen in scenarios:
plt.plot(self.scenario[scen].data['year'], self.scenario[scen].material[material].materialdata[keyword], label=scen)
plt.legend()
plt.xlabel('Year')
plt.title((material + ' ' + keyword.replace('_', " ")))
plt.ylabel(yunits)
class Scenario(Simulation):
def __init__(self, name, file=None):
self.name = name
self.material = {}
if file is None:
try:
file = _interactive_load('Select module baseline file')
except:
raise Exception('Interactive load failed. Tkinter not supported'+
'on this system. Try installing X-Quartz and reloading')
csvdata = open(str(file), 'r', encoding="UTF-8")
csvdata = open(str(file), 'r', encoding="UTF-8-sig")
firstline = csvdata.readline()
secondline = csvdata.readline()
head = firstline.rstrip('\n').split(",")
meta = dict(zip(head, secondline.rstrip('\n').split(",")))
data = pd.read_csv(csvdata, names=head)
data.loc[:, data.columns != 'year'] = data.loc[:, data.columns != 'year'].astype(float)
self.baselinefile = file
self.metdata = meta,
self.data = data
def addMaterial(self, materialname, file=None):
self.material[materialname] = Material(materialname, file)
def addMaterials(self, materials, baselinefolder=None, nameformat=None):
if baselinefolder is None:
baselinefolder = r'..\..\baselines'
if nameformat is None:
nameformat = r'\baseline_material_{}.csv'
for mat in materials:
filemat = baselinefolder + nameformat.format(mat)
self.material[mat] = Material(mat, filemat)
def modifyMaterials(self, materials, stage, value, start_year=None):
if start_year is None:
start_year = int(datetime.datetime.now().year)
if materials is None:
materials = list(self.material.keys())
else:
if isinstance(materials, str):
materials = [materials]
selectyears = self.data['year']>start_year
for mat in materials:
self.material[mat].materialdata.loc[selectyears, stage] = value
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key):
return setattr(self, key)
class Material:
def __init__(self, materialname, file):
self.materialname = materialname
if file is None:
try:
file = _interactive_load('Select material baseline file')
except:
raise Exception('Interactive load failed. Tkinter not supported'+
'on this system. Try installing X-Quartz and reloading')
csvdata = open(str(file), 'r', encoding="UTF-8")
csvdata = open(str(file), 'r', encoding="UTF-8-sig")
firstline = csvdata.readline()
secondline = csvdata.readline()
head = firstline.rstrip('\n').split(",")
meta = dict(zip(head, secondline.rstrip('\n').split(",")))
data = pd.read_csv(csvdata, names=head)
data.loc[:, data.columns != 'year'] = data.loc[:, data.columns != 'year'].astype(float)
self.materialfile = file
self.materialmetdata = meta
self.materialdata = data
def weibull_params(keypoints):
r'''Returns shape parameter `alpha` and scale parameter `beta`
for a Weibull distribution whose CDF passes through the
two time: value pairs in `keypoints`
Parameters
----------
keypoints : list
Two lists of t50 and 590 values, where t50 is the year since deployment
that the cohort has lost 50% of originally installed modules, and t90
is the year since deployment that the cohort has lost 90% of the originally
installed modules. These values are used to calcualte the shape and scale
parameters for the weibull distribution.
Returns
-------
alpha : float
Shape parameter `alpha` for weibull distribution.
beta : float
Scale parameter `beta` for weibull distribution. Often exchanged with ``lifetime``
like in Irena 2016, beta = 30.
'''
t1, t2 = tuple(keypoints.keys())
cdf1, cdf2 = tuple(keypoints.values())
alpha = np.ndarray.item(np.real_if_close(
(np.log(np.log(1 - cdf1)+0j) - np.log(np.log(1 - cdf2)+0j))/(np.log(t1) - np.log(t2))
))
beta = np.abs(np.exp(
(
np.log(t2)*((0+1j)*np.pi + np.log(np.log(1 - cdf1)+0j))
+ np.log(t1)*(((0-1j))*np.pi - np.log(np.log(1 - cdf2)+0j))
)/(
np.log(np.log(1 - cdf1)+0j) - np.log(np.log(1 - cdf2)+0j)
)
))
return {'alpha': alpha, 'beta': beta}
def weibull_cdf(alpha, beta):
'''Return the CDF for a Weibull distribution having:
shape parameter `alpha`
scale parameter `beta`
Parameters
----------
alpha : float
Shape parameter `alpha` for weibull distribution.
beta : float
Scale parameter `beta` for weibull distribution. Often exchanged with ``lifetime``
like in Irena 2016, beta = 30.
'''
def cdf(x):
return 1 - np.exp(-(np.array(x)/beta)**alpha)
return cdf
def weibull_pdf(alpha, beta):
r'''Return the PDF for a Weibull distribution having:
shape parameter `alpha`
scale parameter `beta`
Parameters
----------
alpha : float
Shape parameter `alpha` for weibull distribution.
beta : float
Scale parameter `beta` for weibull distribution. Often exchanged with ``lifetime``
like in Irena 2016, beta = 30.
'''
def pdf(x):
return (alpha/np.array(x)) * ((np.array(x)/beta)**alpha) * (np.exp(-(np.array(x)/beta)**alpha))
return pdf
def weibull_pdf_vis(alpha, beta, xlim=56):
r''' Returns the CDF for a weibull distribution of 1 generation
so it can be plotted.
Parameters
----------
alpha : float
Shape parameter `alpha` for weibull distribution.
beta : float
Scale parameter `beta` for weibull distribution. Often exchanged with ``lifetime``
like in Irena 2016, beta = 30.
xlim : int
Number of years to calculate the distribution for. i.e. x-axis limit.
Returns
-------
idf : list
List of weibull cumulative distribution values for year 0 until xlim.
'''
dfindex = pd.RangeIndex(0,xlim,1)
x = np.clip(dfindex - 0, 0, np.inf)
if alpha and beta:
i = weibull_pdf(alpha, beta)
idf = list(map(i, x))
return idf
def weibull_cdf_vis(alpha, beta, xlim=56):
r''' Returns the CDF for a weibull distribution of 1 generation
so it can be plotted.
Parameters
----------
alpha : float
Shape parameter `alpha` for weibull distribution.
beta : float
Scale parameter `beta` for weibull distribution. Often exchanged with ``lifetime``
like in Irena 2016, beta = 30.
xlim : int
Number of years to calculate the distribution for. i.e. x-axis limit.
Returns
-------
idf : list
List of weibull cumulative distribution values for year 0 until xlim.
'''
dfindex = pd.RangeIndex(0,xlim,1)
x = np.clip(dfindex - 0, 0, np.inf)
if alpha and beta:
i = weibull_cdf(alpha, beta)
idf = list(map(i, x))
return idf
def sens_StageImprovement(df, stage, improvement=1.3, start_year=None):
'''
Modifies baseline scenario for evaluating sensitivity of lifetime parameter.
t50 and t90 reliability years get incresed by `improvement` parameter
starting the `year_increase` year specified.
Parameters
----------
df : dataframe
dataframe to be modified
stage : str
Stage that wants to be modified. This can be any of the module or
material specified values, for example:'MFG_Material_eff',
'mat_MFG_scrap_recycled', 'mat_MFG_scrap_Recycled',
'mat_MFG_scrap_Recycled_into_HQ', 'mat_MFG_scrap_Recycled_into_HQ_Reused4MFG'
'mod_EOL_collection_losses', 'mod_EOL_collected_recycled',
'mat_EOL_Recycling_eff', 'mat_EOL_Recycled_into_HQ',
'mat_EOL_RecycledHQ_Reused4MFG', 'mod_Repair',
'mod_MerchantTail', 'mod_Reuse', 'mod_eff', etc.
improvement : decimal
Percent increase in decimal (i.e. "1.3" for 30% increase in value)
or percent decrease (i.e. "0.3") relative to values in df.
start_year :
the year at which the improvement occurs
Returns
--------
df : dataframe
dataframe of expected module lifetime increased or decreased at specified year
'''
if start_year is None:
start_year = int(datetime.datetime.now().year)
#df[df.index > 2000]['mod_reliability_t50'].apply(lambda x: x*1.3)
df[stage] = df[stage].astype(float)
df.loc[df.index > start_year, stage] = df[df.index > start_year][stage].apply(lambda x: x*improvement)
return df
def sens_StageEfficiency(df, stage, target_eff = 95.0, start_year = None,
goal_year = 2030, plotflag = False):
'''
Modifies baseline scenario for evaluating sensitivity to increasing a stage in the
lifetime of the module's efficiency. It either increases or decreases from the
start year until the goal year the value to the target efficiency by interpolation.
Parameters
----------
df : dataframe
dataframe to be modified
stage : str
Stage that wants to be modified. This can be any of the module or
material specified efficiencies, for example:'MFG_Material_eff',
'mat_MFG_scrap_recycled', 'mat_MFG_scrap_Recycled',
'mat_MFG_scrap_Recycled_into_HQ', 'mat_MFG_scrap_Recycled_into_HQ_Reused4MFG'
'mod_EOL_collection_losses', 'mod_EOL_collected_recycled',
'mat_EOL_Recycling_eff', 'mat_EOL_Recycled_into_HQ',
'mat_EOL_RecycledHQ_Reused4MFG', 'mod_Repair',
'mod_MerchantTail', 'mod_Reuse', 'mod_eff', etc.
start_year: int
Year to start modifying the value. This specifies the initial efficiency
value that is going to be modified. If None is passed, current year is used.
target_eff: flat
target eff value in percentage to be reached. i.e. 95.0 %.
goal_year : int
year by which target efficiency will be reached. i.e. 2030. Must be higher than current year.
Returns
-------
df : dataframe
modified dataframe
'''
if start_year is None:
start_year = int(datetime.datetime.now().year)
if start_year > goal_year:
print("Error. Goal Year is before start year")
return
if 0 < abs(target_eff) < 1: # checking it is not 0.95 but 95% i.e.
print("Warning: target_eff value is between 0 and 1; it has been"
"multiplied by 100% assuming it was a percentage in decimal form.")
target_eff = target_eff*100
if target_eff > 100 or target_eff < 0:
print("Warning: target_eff is out of range. Input value between"
"0 and 100")
return
if stage in df.columns:
df2 = df.copy()
df2[stage]=df2[stage].astype(float)
df2.loc[(df2.index < goal_year) & (df2.index > start_year), stage] = np.nan
df2.loc[df2.index >= goal_year , stage] = target_eff
df2[stage] = df2[stage].interpolate()
if plotflag:
plt.plot(df[stage], label='Original')
plt.plot(df2[stage], label='Modified')
plt.title('Updated values for '+stage)
plt.legend()
return df2
else:
print("Stage name incorrect.")
def _modDict(originaldict, moddict):
'''
Compares keys in originaldict with moddict and updates values of
originaldict to moddict if existing.
Parameters
----------
originaldict : dictionary
Original dictionary calculated, for example frontscan or backscan dictionaries.
moddict : dictionary
Modified dictinoary, for example modscan['x'] = 0 to change position of x.
Returns
-------
originaldict : dictionary
Updated original dictionary with values from moddict.
'''
for key in moddict:
try:
originaldict[key] = moddict[key]
except:
print("Wrong key in modified dictionary")
return originaldict
def calculateLCA(PVarea, modified_impacts=None, printflag = False):
'''
'''
if printflag:
print("Doing calculations of LCA analysis for Silicon Photovoltaic Panels")
impacts = {'Acidification':{'UUID': '75d0c8a2-e466-3bd7-813b-5beef2209330',
'Result': 1.29374135667815,
'Unit': 'kg SO2' },
'Carcinogenics':{'UUID': 'a6e5e5d8-a1e5-3c77-8170-586c4fe37514',
'Result': 0.0000231966690476102,
'Unit': 'CTUh' },
'Ecotoxicity':{'UUID': '338e9370-ceb0-3d18-9d87-5f91feb7829c',
'Result': 5933.77859696668,
'Unit': 'CTUe' },
'Eutrophication':{'UUID': '45b8cd56-498a-3c6f-9488-134e951d8c02',
'Result': 1.34026194777363,
'Unit': 'kg N eq' },
'Fossil fuel depletion':{'UUID': '0e45786f-67fa-3b8a-b8a3-73a7c316434c',
'Result': 249.642261689385,
'Unit': 'MJ surplus' },
'Global warming':{'UUID': '31967441-d687-313d-9910-13da3a584ab7',
'Result': 268.548841324818,
'Unit': 'kg CO2 eq' },
'Non carcinogenics':{'UUID': 'd4827ae3-c873-3ea4-85fb-860b7f3f2dee',
'Result': 0.000135331806321799,
'Unit': 'CTUh' },
'Ozone depletion':{'UUID': '6c05dad1-6661-35f2-82aa-6e8e6a498aec',
'Result': 0.0000310937628622019,
'Unit': 'kg CFC-11 eq' },
'Respiratory effects':{'UUID': 'e0916d62-7fbd-3d0a-a4a5-52659b0ac9c1',
'Result': 0.373415542664206,
'Unit': 'kg PM2.5 eq' },
'Smog':{'UUID': '7a149078-e2fd-3e07-a5a3-79035c60e7c3',
'Result': 15.35483065,
'Unit': 'kg O3 eq' },
}
if modified_impacts is not None:
impacts = _modDict(impacts, modified_impacts)
if printflag:
print("Following Modified impacts provided instead of TRACI 2.1 default")
print(impacts)
print("")
else:
if printflag:
print("Following TRACI 2.1")
acidification = impacts['Acidification']['Result']*PVarea
carcinogenics = impacts['Carcinogenics']['Result']*PVarea
ecotoxicity = impacts['Ecotoxicity']['Result']*PVarea
eutrophication = impacts['Eutrophication']['Result']*PVarea
fossil_fuel_depletion = impacts['Fossil fuel depletion']['Result']*PVarea
global_warming = impacts['Global warming']['Result']*PVarea
non_carcinogenics = impacts['Non carcinogenics']['Result']*PVarea
ozone_depletion = impacts['Ozone depletion']['Result']*PVarea
respiratory_effects = impacts['Respiratory effects']['Result']*PVarea
smog = impacts['Smog']['Result']*PVarea
if printflag:
print("RESULTS FOR PV AREA ", PVarea, " m2 ")
print("****************************************")
print('Acidification: ', round(impacts['Acidification']['Result']*PVarea, 2), ' ', impacts['Acidification']['Unit'])
print('Carcinogenics: ', round(impacts['Carcinogenics']['Result']*PVarea, 2), ' ', impacts['Carcinogenics']['Unit'])
print('Ecotoxicity: ', round(impacts['Ecotoxicity']['Result']*PVarea, 2), ' ', impacts['Ecotoxicity']['Unit'])
print('Eutrophication: ', round(impacts['Eutrophication']['Result']*PVarea, 2), ' ', impacts['Eutrophication']['Unit'])
print('Fossil fuel depletion: ', round(impacts['Fossil fuel depletion']['Result']*PVarea, 2), ' ', impacts['Fossil fuel depletion']['Unit'])
print('Global warming: ', round(impacts['Global warming']['Result']*PVarea, 2), ' ', impacts['Global warming']['Unit'])
print('Non carcinogenics: ', round(impacts['Non carcinogenics']['Result']*PVarea, 2), ' ', impacts['Non carcinogenics']['Unit'])
print('Ozone depletion: ', round(impacts['Ozone depletion']['Result']*PVarea, 2), ' ', impacts['Ozone depletion']['Unit'])
print('Respiratory effects: ', round(impacts['Respiratory effects']['Result']*PVarea, 2), ' ', impacts['Respiratory effects']['Unit'])
print('Smog: ', round(impacts['Smog']['Result']*PVarea, 2), ' ', impacts['Smog']['Unit'])
return (acidification, carcinogenics, ecotoxicity, eutrophication,
fossil_fuel_depletion, global_warming,
non_carcinogenics, ozone_depletion, respiratory_effects, smog) | 45.182334 | 184 | 0.549725 |
import numpy as np
import pandas as pd
import datetime
import os
import matplotlib.pyplot as plt
def read_baseline_material(scenario, material='None', file=None):
if file is None:
try:
file = _interactive_load('Select baseline file')
except:
raise Exception('Interactive load failed. Tkinter not supported'+
'on this system. Try installing X-Quartz and reloading')
def _interactive_load(title=None):
import tkinter
from tkinter import filedialog
root = tkinter.Tk()
root.withdraw()
root.attributes("-topmost", True)
return filedialog.askopenfilename(parent=root, title=title)
def _unitReferences(keyword):
moduleDictionary = {'year': {'unit': 'Years', 'source': 'input'},
'new_Installed_Capacity_[MW]': {'unit': 'Power [MW]', 'source':'input'},
'mod_eff': {'unit': 'Efficiency $\eta$ [%]', 'source':'input'},
'mod_reliability_t50': {'unit': 'Years' , 'source':'input'},
'mod_reliability_t90': {'unit': 'Years', 'source':'input'},
'mod_degradation': {'unit': 'Percentage [%]', 'source':'input'},
'mod_lifetime': {'unit': 'Years', 'source':'input'},
'mod_MFG_eff': {'unit': 'Efficiency $\eta$ [%]', 'source':'input'},
'mod_EOL_collection_eff': {'unit': 'Efficiency $\eta$ [%]', 'source':'input'},
'mod_EOL_collected_recycled': {'unit': 'Percentage [%]', 'source':'input'},
'mod_Repair': {'unit': 'Percentage [%]', 'source':'input'},
'mod_MerchantTail': {'unit': 'Percentage [%]', 'source':'input'},
'mod_Reuse': {'unit': 'Percentage [%]', 'source':'input'},
'Area': {'unit': 'm$^2$', 'source': 'generated'},
'Cumulative_Area_disposedby_Failure': {'unit': 'm$^2$', 'source': 'generated'},
'Cumulative_Area_disposedby_ProjectLifetime': {'unit': 'm$^2$', 'source': 'generated'},
'Cumulative_Area_disposed': {'unit': 'm$^2$', 'source': 'generated'},
'Cumulative_Active_Area': {'unit': 'm$^2$', 'source': 'generated'},
'Installed_Capacity_[W]': {'unit': 'Power [W]', 'source': 'generated'},
'EOL_on_Year_0': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_1': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_2': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_3': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_4': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_5': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_6': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_7': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_8': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_9': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_10': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_11': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_12': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_13': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_14': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_15': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_16': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_17': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_18': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_19': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_20': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_21': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_22': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_23': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_24': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_25': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_26': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_27': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_28': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_29': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_30': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_31': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_32': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_33': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_34': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_35': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_36': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_37': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_38': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_39': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_40': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_41': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_42': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_43': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_44': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_45': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_46': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_47': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_48': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_49': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_50': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_51': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_52': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_53': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_54': {'unit': 'm$^2$', 'source': 'generated'},
'EOL_on_Year_55': {'unit': 'm$^2$', 'source': 'generated'},
'EoL_Collected': {'unit': 'm$^2$', 'source': 'generated'},
'EoL_NotCollected': {'unit': 'm$^2$', 'source': 'generated'},
'EoL_Recycled': {'unit': 'm$^2$', 'source': 'generated'},
'EoL_NotRecycled_Landfilled': {'unit': 'm$^2$', 'source': 'generated'}
}
materialDictionary={'year': {'unit': 'Years', 'source': 'input'},
'mat_virgin_eff': {'unit': 'Efficiency $\eta$ [%]', 'source': 'input'},
'mat_massperm2': {'unit': 'Mass [g]', 'source': 'input'},
'mat_MFG_eff': {'unit': 'Efficiency $\eta$ [%]', 'source': 'input'},
'mat_MFG_scrap_recycled': {'unit': 'Percentage [%]', 'source': 'input'},
'mat_MFG_scrap_Recycled': {'unit': 'Efficiency $\eta$ [%]', 'source': 'input'},
'mat_MFG_scrap_Recycled_into_HQ': {'unit': 'Percentage [%]', 'source': 'input'},
'mat_MFG_scrap_Recycled_into_HQ_Reused4MFG': {'unit': 'Percentage [%]', 'source': 'input'},
'mat_EOL_collected_Recycled': {'unit': 'Percentage [%]', 'source': 'input'},
'mat_EOL_Recycling_eff': {'unit': 'Efficiency $\eta$ [%]', 'source': 'input'},
'mat_EOL_Recycled_into_HQ': {'unit': 'Percentage [%]', 'source': 'input'},
'mat_EOL_RecycledHQ_Reused4MFG': {'unit': 'Percentage [%]', 'source': 'input'},
'mat_modules_NotRecycled': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_modules_NotCollected': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_EOL_sento_Recycling': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_EOL_NotRecycled_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_EOL_Recycled': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_EOL_Recycled_Losses_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_EOL_Recycled_2_HQ': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_EOL_Recycled_2_OQ': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_EoL_Recycled_HQ_into_MFG': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_EOL_Recycled_HQ_into_OU': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_UsedinManufacturing': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_Manufacturing_Input': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_MFG_Scrap': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_MFG_Scrap_Sentto_Recycling': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_MFG_Scrap_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_MFG_Scrap_Recycled_Successfully': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_MFG_Scrap_Recycled_Losses_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_MFG_Recycled_into_HQ': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_MFG_Recycled_into_OQ': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_MFG_Recycled_HQ_into_MFG': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_MFG_Recycled_HQ_into_OU': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_Virgin_Stock': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_Total_EOL_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_Total_MFG_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_Total_Landfilled': {'unit': 'Mass [g]', 'source': 'generated'},
'mat_Total_Recycled_OU': {'unit': 'Mass [g]', 'source': 'generated'}
}
if keyword in moduleDictionary.keys():
yunits = moduleDictionary[keyword]['unit']
elif keyword in materialDictionary.keys():
yunits = materialDictionary[keyword]['unit']
else:
print("Warning: Keyword / Units not Found")
yunits = 'UNITS'
return yunits
def distance(s_lat, s_lng, e_lat, e_lng):
R = 6373.0
s_lat = np.deg2rad(s_lat)
s_lng = np.deg2rad(s_lng)
e_lat = np.deg2rad(e_lat)
e_lng = np.deg2rad(e_lng)
d = np.sin((e_lat - s_lat)/2)**2 + np.cos(s_lat)*np.cos(e_lat) * np.sin((e_lng - s_lng)/2)**2
distance = 2 * R * np.arcsin(np.sqrt(d))
return distance
def drivingdistance(origin, destination, APIkey):
lat1, lon1 = origin
lat2, lon2 = destination
gm_url = ('https://maps.googleapis.com/maps/api/directions/xml?'+
'origin='+str(lat1)+','+str(lon1)+
'&destination='+str(lat2)+','+str(lon2)+
'&key='+APIkey)
return gm_url
class Simulation:
def __init__(self, name=None, path=None):
self.path = ""
self.name = ""
now = datetime.datetime.now()
self.nowstr = str(now.date())+'_'+str(now.hour)+str(now.minute)+str(now.second)
if path is None:
self._setPath(os.getcwd())
else:
self._setPath(path)
if name is None:
self.name = self.nowstr
else:
self.name = name
self.scenario={}
def _setPath(self, path):
self.path = os.path.abspath(path)
print('path = '+ path)
try:
os.chdir(self.path)
except OSError as exc:
LOGGER.error('Path doesn''t exist: %s' % (path))
LOGGER.exception(exc)
raise(exc)
def _checkPath(path):
if not os.path.exists(path):
os.makedirs(path)
print('Making path: '+path)
def createScenario(self, name, file=None):
self.scenario[name] = Scenario(name, file)
def modifyScenario(self, scenarios, stage, value, start_year=None):
if start_year is None:
start_year = int(datetime.datetime.now().year)
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
selectyears = self.scenario[scenarios[0]].data['year']>start_year
for scen in scenarios:
self.scenario[scen].data.loc[selectyears, stage] = value
def calculateMassFlow(self, scenarios = None, materials=None, weibullInputParams = None,
bifacialityfactors = None, reducecapacity = True, debugflag=False):
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
for scen in scenarios:
print("Working on Scenario: ", scen)
print("********************")
df = self.scenario[scen].data
# Constant
if bifacialityfactors is not None:
bf = pd.read_csv(bifacialityfactors)
df['irradiance_stc'] = 1000.0 + bf['bifi']*100.0 # W/m^2 (min. Bifacial STC Increase)
else:
df['irradiance_stc'] = 1000.0 # W/m^2
# Renaming and re-scaling
df['t50'] = df['mod_reliability_t50']
df['t90'] = df['mod_reliability_t90']
# Calculating Area and Mass
if 'Mass_[MetricTonnes]' in df:
df['new_Installed_Capacity_[W]'] = 0
df['new_Installed_Capacity_[MW]'] = 0
df['Area'] = df['Mass_[MetricTonnes]']
print("Warning, this is for special debuging of Wambach Procedure."+
"Make sure to use Wambach Module")
else:
df['new_Installed_Capacity_[W]'] = df['new_Installed_Capacity_[MW]']*1e6
if reducecapacity:
df['Area'] = df['new_Installed_Capacity_[W]']/(df['mod_eff']*0.01)/df['irradiance_stc'] # m^2
else:
df['Area'] = df['new_Installed_Capacity_[W]']/(df['mod_eff']*0.01)/1000.0 # m^2
df['Area'] = df['Area'].fillna(0) # Chagne na's to 0s.
Generation_Disposed_byYear = []
Generation_Active_byYear= []
Generation_Power_byYear = []
weibullParamList = []
df['Cumulative_Area_disposedby_Failure'] = 0
df['Cumulative_Area_disposedby_ProjectLifetime'] = 0
df['Cumulative_Area_disposed'] = 0
df['Repaired_[W]'] = 0
df['Repaired_Area'] = 0
df['Cumulative_Active_Area'] = 0
df['Installed_Capacity_[W]'] = 0
for generation, row in df.iterrows():
if weibullInputParams:
weibullIParams = weibullInputParams
elif 'weibull_alpha' in row:
weibullIParams = {'alpha': row['weibull_alpha'], 'beta': row['weibull_beta']}
else:
t50, t90 = row['t50'], row['t90']
weibullIParams = weibull_params({t50: 0.50, t90: 0.90})
f = weibull_cdf(weibullIParams['alpha'], weibullIParams['beta'])
weibullParamList.append(weibullIParams)
x = np.clip(df.index - generation, 0, np.inf)
cdf = list(map(f, x))
pdf = [0] + [j - i for i, j in zip(cdf[: -1], cdf[1 :])]
activearea = row['Area']
if np.isnan(activearea):
activearea=0
activeareacount = []
areadisposed_failure = []
areadisposed_projectlifetime = []
arearepaired = []
arearepaired_powergen = []
areapowergen = []
active=0
disposed_projectlifetime=0
for age in range(len(cdf)):
disposed_projectlifetime=0
if x[age] == 0.0:
activeareacount.append(0)
areadisposed_failure.append(0)
areadisposed_projectlifetime.append(0)
areapowergen.append(0)
arearepaired.append(0)
arearepaired_powergen.append(0)
else:
active += 1
activeareaprev = activearea
activearea = activearea-row['Area']*pdf[age]+row['Area']*pdf[age]*df.iloc[age]['mod_Repair']*0.01
arearepaired_failure = row['Area']*pdf[age]*df.iloc[age]['mod_Repair']*0.01
arearepaired.append(arearepaired_failure)
arearepaired_powergen.append(arearepaired_failure*row['mod_eff']*0.01*row['irradiance_stc']*(1-row['mod_degradation']*0.01)**active)
areadisposed_failure.append(activeareaprev-activearea)
if age == int(row['mod_lifetime']+generation):
activearea_temp = activearea
activearea = 0+activearea*(df.iloc[age]['mod_MerchantTail']*0.01)
disposed_projectlifetime = activearea_temp-activearea
activearea2 = 0+disposed_projectlifetime*(df.iloc[age]['mod_Reuse']*0.01)
activearea = activearea + activearea2
disposed_projectlifetime = disposed_projectlifetime - activearea2
areadisposed_projectlifetime.append(disposed_projectlifetime)
activeareacount.append(activearea)
areapowergen.append(activearea*row['mod_eff']*0.01*row['irradiance_stc']*(1-row['mod_degradation']*0.01)**active)
try:
fixinitialareacount = next((i for i, e in enumerate(x) if e), None) - 1
activeareacount[fixinitialareacount] = activeareacount[fixinitialareacount]+row['Area']
areapowergen[fixinitialareacount] = (areapowergen[fixinitialareacount] +
row['Area'] * row['mod_eff'] *0.01 * row['irradiance_stc'])
except:
fixinitialareacount = len(cdf)-1
activeareacount[fixinitialareacount] = activeareacount[fixinitialareacount]+row['Area']
areapowergen[fixinitialareacount] = (areapowergen[fixinitialareacount] +
row['Area'] * row['mod_eff'] *0.01 * row['irradiance_stc'])
print("Finished Area+Power Generation Calculations")
df['Cumulative_Area_disposedby_Failure'] += areadisposed_failure
df['Cumulative_Area_disposedby_ProjectLifetime'] += areadisposed_projectlifetime
df['Cumulative_Area_disposed'] += areadisposed_failure
df['Cumulative_Area_disposed'] += areadisposed_projectlifetime
df['Repaired_[W]'] += arearepaired_powergen
df['Repaired_Area'] += arearepaired
df['Cumulative_Active_Area'] += activeareacount
df['Installed_Capacity_[W]'] += areapowergen
Generation_Disposed_byYear.append([x + y for x, y in zip(areadisposed_failure, areadisposed_projectlifetime)])
Generation_Active_byYear.append(activeareacount)
Generation_Power_byYear.append(areapowergen)
df['WeibullParams'] = weibullParamList
MatrixDisposalbyYear = pd.DataFrame(Generation_Disposed_byYear, columns = df.index, index = df.index)
MatrixDisposalbyYear = MatrixDisposalbyYear.add_prefix("EOL_on_Year_")
try:
df = df[df.columns.drop(list(df.filter(regex='EOL_on_Year_')))]
except:
print("Warning: Issue dropping EOL columns generated by " \
"calculateMFC routine to overwrite")
df = df.join(MatrixDisposalbyYear)
OL_collection_eff'].values*0.01)
df['EoL_Collected'] = list(EOL_Collected.sum())
landfill_Collection = EOL.mul(1-(df['mod_EOL_collection_eff'].values*0.01))
df['EoL_NotCollected'] = list(landfill_Collection.sum())
EOL_Recycled = EOL_Collected.mul(df['mod_EOL_collected_recycled'].values*0.01)
df['EoL_Recycled'] = list(EOL_Recycled.sum())
EOL_NotRecycled_Landfilled = EOL_Collected.mul((1-df['mod_EOL_collected_recycled'].values*0.01))
df['EoL_NotRecycled_Landfilled'] = list(EOL_NotRecycled_Landfilled.sum())
df.drop(['new_Installed_Capacity_[W]', 't50', 't90'], axis = 1, inplace=True)
df['ModuleTotal_MFG']=df['Area']*100/df['mod_MFG_eff']
self.scenario[scen].data = df
erials]
for mat in materials:
print("==> Working on Material : ", mat)
dm = self.scenario[scen].material[mat].materialdata
mat_modules_EOL_sentoRecycling = EOL_Recycled.multiply(dm['mat_massperm2'], axis=0)
dm['mat_modules_Collected'] = list(EOL_Collected.multiply(dm['mat_massperm2'], axis=0).sum())
dm['mat_modules_NotCollected'] = list(landfill_Collection.multiply(dm['mat_massperm2'], axis=0).sum())
dm['mat_modules_Recycled'] = list(EOL_Recycled.multiply(dm['mat_massperm2'], axis=0).sum())
dm['mat_modules_NotRecycled'] = list(EOL_NotRecycled_Landfilled.multiply(dm['mat_massperm2'], axis=0).sum())
mat_EOL_sento_Recycling = mat_modules_EOL_sentoRecycling.mul(dm['mat_EOL_collected_Recycled'].values*0.01)
dm['mat_EOL_sento_Recycling'] = list(mat_EOL_sento_Recycling.sum())
landfill_material_EOL_NotRecycled_Landfilled = mat_modules_EOL_sentoRecycling.mul(1-(dm['mat_EOL_collected_Recycled'].values*0.01))
dm['mat_EOL_NotRecycled_Landfilled'] = list(landfill_material_EOL_NotRecycled_Landfilled.sum())
mat_EOL_Recycled_Succesfully = mat_EOL_sento_Recycling.mul(dm['mat_EOL_Recycling_eff'].values*0.01)
dm['mat_EOL_Recycled'] = list(mat_EOL_Recycled_Succesfully.sum())
landfill_material_EOL_Recyled_Losses_Landfilled = mat_EOL_sento_Recycling.mul(1-(dm['mat_EOL_Recycling_eff'].values*0.01))
dm['mat_EOL_Recycled_Losses_Landfilled'] = list(landfill_material_EOL_Recyled_Losses_Landfilled.sum())
mat_EOL_Recycled_HQ = mat_EOL_Recycled_Succesfully.mul(dm['mat_EOL_Recycled_into_HQ'].values*0.01)
dm['mat_EOL_Recycled_2_HQ'] = list(mat_EOL_Recycled_HQ.sum())
mat_EOL_Recycled_OQ = mat_EOL_Recycled_Succesfully.mul(1-(dm['mat_EOL_Recycled_into_HQ'].values*0.01))
dm['mat_EOL_Recycled_2_OQ'] = list(mat_EOL_Recycled_OQ.sum())
mat_EOL_Recycled_HQ_into_MFG = mat_EOL_Recycled_HQ.mul(dm['mat_EOL_RecycledHQ_Reused4MFG'].values*0.01)
dm['mat_EoL_Recycled_HQ_into_MFG'] = list(mat_EOL_Recycled_HQ_into_MFG.sum())
mat_EOL_Recycled_HQ_into_OU = mat_EOL_Recycled_HQ.mul(1-(dm['mat_EOL_RecycledHQ_Reused4MFG'].values*0.01))
dm['mat_EOL_Recycled_HQ_into_OU'] = list(mat_EOL_Recycled_HQ_into_OU.sum())
dm['mat_UsedSuccessfullyinModuleManufacturing'] = (df['Area'] * dm['mat_massperm2'])
dm['mat_EnteringModuleManufacturing'] = (df['Area'] * dm['mat_massperm2']*100/df['mod_MFG_eff'])
dm['mat_LostinModuleManufacturing'] = dm['mat_EnteringModuleManufacturing'] - dm['mat_UsedSuccessfullyinModuleManufacturing']
dm['mat_Manufacturing_Input'] = dm['mat_EnteringModuleManufacturing'] / (dm['mat_MFG_eff'] * 0.01)
dm['mat_MFG_Scrap'] = (dm['mat_Manufacturing_Input'] - dm['mat_EnteringModuleManufacturing'] +
dm['mat_LostinModuleManufacturing'])
dm['mat_MFG_Scrap_Sentto_Recycling'] = dm['mat_MFG_Scrap'] * dm['mat_MFG_scrap_Recycled'] * 0.01
dm['mat_MFG_Scrap_Landfilled'] = dm['mat_MFG_Scrap'] - dm['mat_MFG_Scrap_Sentto_Recycling']
dm['mat_MFG_Scrap_Recycled_Successfully'] = (dm['mat_MFG_Scrap_Sentto_Recycling'] *
dm['mat_MFG_scrap_Recycling_eff'] * 0.01)
dm['mat_MFG_Scrap_Recycled_Losses_Landfilled'] = (dm['mat_MFG_Scrap_Sentto_Recycling'] -
dm['mat_MFG_Scrap_Recycled_Successfully'])
dm['mat_MFG_Recycled_into_HQ'] = (dm['mat_MFG_Scrap_Recycled_Successfully'] *
dm['mat_MFG_scrap_Recycled_into_HQ'] * 0.01)
dm['mat_MFG_Recycled_into_OQ'] = dm['mat_MFG_Scrap_Recycled_Successfully'] - dm['mat_MFG_Recycled_into_HQ']
dm['mat_MFG_Recycled_HQ_into_MFG'] = (dm['mat_MFG_Recycled_into_HQ'] *
dm['mat_MFG_scrap_Recycled_into_HQ_Reused4MFG'] * 0.01)
dm['mat_MFG_Recycled_HQ_into_OU'] = dm['mat_MFG_Recycled_into_HQ'] - dm['mat_MFG_Recycled_HQ_into_MFG']
dm['mat_Virgin_Stock'] = dm['mat_Manufacturing_Input'] - dm['mat_EoL_Recycled_HQ_into_MFG'] - dm['mat_MFG_Recycled_HQ_into_MFG']
dm['mat_Virgin_Stock_Raw'] = (dm['mat_Virgin_Stock'] * 100 / dm['mat_virgin_eff'])
dm['mat_Total_EOL_Landfilled'] = (dm['mat_modules_NotCollected'] +
dm['mat_modules_NotRecycled'] +
dm['mat_EOL_NotRecycled_Landfilled'] +
dm['mat_EOL_Recycled_Losses_Landfilled'])
dm['mat_Total_MFG_Landfilled'] = (dm['mat_MFG_Scrap_Landfilled'] +
dm['mat_MFG_Scrap_Recycled_Losses_Landfilled'])
dm['mat_Total_Landfilled'] = (dm['mat_Total_EOL_Landfilled'] +
dm['mat_Total_MFG_Landfilled'])
dm['mat_Total_Recycled_OU'] = (dm['mat_EOL_Recycled_2_OQ'] +
dm['mat_EOL_Recycled_HQ_into_OU'] +
dm['mat_MFG_Recycled_into_OQ'] +
dm['mat_MFG_Recycled_HQ_into_OU'])
self.scenario[scen].material[mat].materialdata = dm
def scenMod_IRENIFY(self, scenarios=None, ELorRL='RL'):
if ELorRL == 'RL':
weibullInputParams = {'alpha': 5.3759, 'beta': 30}
print("Using Irena Regular Loss Assumptions")
if ELorRL == 'EL':
weibullInputParams = {'alpha': 2.4928, 'beta': 30}
print("Using Irena Early Loss Assumptions")
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
for scen in scenarios:
self.scenario[scen].data['weibull_alpha'] = weibullInputParams['alpha']
self.scenario[scen].data['weibull_beta'] = weibullInputParams['beta']
self.scenario[scen].data['mod_lifetime'] = 40.0
self.scenario[scen].data['mod_MFG_eff'] = 100.0
for mat in self.scenario[scen].material:
self.scenario[scen].material[mat].materialdata['mat_MFG_eff'] = 100.0
self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled'] = 0.0
return
def check_Years_dataandMaterials(self, scenarios=None, materials=None):
print ("Not Done")
def trim_Years( self, startYear=None, endYear=None, aggregateInstalls=False,
averageEfficiency=False, averageMaterialData = False, methodAddedYears='repeat',
scenarios=None, materials=None):
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
scen0 = scenarios[0]
dataStartYear = int(self.scenario[scen0].data.iloc[0]['year'])
dataEndYear = int(self.scenario[scen0].data.iloc[-1]['year'])
if startYear is None:
startYear = dataStartYear
print("startYear not provided. Setting to start year of Module data", startYear)
if endYear is None:
endYear = dataEndYear
print("endYear not provided. Setting to end year of Module data", endYear)
startYear = startYear
endYear = endYear
for scen in scenarios:
baseline = self.scenario[scen].data
if int(startYear) < int(dataStartYear):
print("ADD YEARS HERE. not done yet")
if int(endYear) > int(dataEndYear):
print("ADD YEARS HERE. not done yet")
reduced = baseline.loc[(baseline['year']>=startYear) & (baseline['year']<=endYear)].copy()
if aggregateInstalls:
prev = baseline.loc[(baseline['year']<startYear)].sum()
reduced.loc[reduced['year'] == startYear, 'new_Installed_Capacity_[MW]'] = prev['new_Installed_Capacity_[MW]']
if averageEfficiency:
prev = baseline.loc[(baseline['year']<startYear)].mean()
reduced.loc[reduced['year'] == startYear, 'mod_eff '] = prev['mod_eff ']
reduced.reset_index(drop=True, inplace=True)
self.scenario[scen].data = reduced
for mat in self.scenario[scen].material:
if int(startYear) < int(dataStartYear):
print("ADD YEARS HERE. not done yet")
if int(endYear) > int(dataEndYear):
print("ADD YEARS HERE. not done yet")
matdf = self.scenario[scen].material[mat].materialdata
reduced = matdf.loc[(matdf['year']>=startYear) & (matdf['year']<=endYear)].copy()
if averageMaterialData == 'average':
prev = matdf.loc[(baseline['year']<startYear)].mean()
matkeys = list(reduced.keys())[1:12]
for matkey in matkeys:
reduced.loc[reduced['year'] == startYear, matkey] = prev[matkey]
reduced.reset_index(drop=True, inplace=True)
self.scenario[scen].material[mat].materialdata = reduced
def scenMod_IRENIFY(self, scenarios=None, ELorRL='RL'):
if ELorRL == 'RL':
weibullInputParams = {'alpha': 5.3759, 'beta': 30}
print("Using Irena Regular Loss Assumptions")
if ELorRL == 'EL':
weibullInputParams = {'alpha': 2.4928, 'beta': 30}
print("Using Irena Early Loss Assumptions")
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
for scen in scenarios:
self.scenario[scen].data['weibull_alpha'] = weibullInputParams['alpha']
self.scenario[scen].data['weibull_beta'] = weibullInputParams['beta']
self.scenario[scen].data['mod_lifetime'] = 40.0
self.scenario[scen].data['mod_MFG_eff'] = 100.0
for mat in self.scenario[scen].material:
self.scenario[scen].material[mat].materialdata['mat_MFG_eff'] = 100.0
self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled'] = 0.0
return
def scenMod_PerfectManufacturing(self, scenarios=None):
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
for scen in scenarios:
self.scenario[scen].data['mod_MFG_eff'] = 100.0
for mat in self.scenario[scen].material:
self.scenario[scen].material[mat].materialdata['mat_virgin_eff'] = 100.0
self.scenario[scen].material[mat].materialdata['mat_MFG_eff'] = 100.0
return
def scenMod_noCircularity(self, scenarios=None):
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
for scen in scenarios:
self.scenario[scen].data['mod_EOL_collection_eff '] = 0.0
self.scenario[scen].data['mod_EOL_collected_recycled'] = 0.0
self.scenario[scen].data['mod_Repair'] = 0.0
self.scenario[scen].data['mod_MerchantTail'] = 0.0
self.scenario[scen].data['mod_Reuse'] = 0.0
for mat in self.scenario[scen].material:
self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled'] = 0.0
self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycling_eff'] = 0.0
self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled_into_HQ'] = 0.0
self.scenario[scen].material[mat].materialdata['mat_MFG_scrap_Recycled_into_HQ_Reused4MFG'] = 0.0
self.scenario[scen].material[mat].materialdata['mat_EOL_collected_Recycled'] = 0.0
self.scenario[scen].material[mat].materialdata['mat_EOL_Recycling_eff'] = 0.0
self.scenario[scen].material[mat].materialdata['mat_EOL_Recycled_into_HQ'] = 0.0
self.scenario[scen].material[mat].materialdata['mat_EOL_RecycledHQ_Reused4MFG'] = 0.0
return
def aggregateResults(self, scenarios=None, materials=None):
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
if materials is None:
materials = list(self.scenario[scenarios[0]].material.keys())
else:
if isinstance(materials, str):
materials = [materials]
keywds = ['mat_Virgin_Stock', 'mat_Total_Landfilled', 'mat_Total_EOL_Landfilled', 'mat_Total_MFG_Landfilled']
nice_keywds = ['VirginStock', 'WasteAll', 'WasteEOL', 'WasteMFG']
USyearly=pd.DataFrame()
for scen in scenarios:
for ii in range(len(keywds)):
keywd = keywds[ii]
nicekey = nice_keywds[ii]
for mat in materials:
USyearly[nicekey+'_'+mat+'_'+self.name+'_'+scen] = self.scenario[scen].material[mat].materialdata[keywd]
filter_col = [col for col in USyearly if (col.startswith(nicekey) and col.endswith(self.name+'_'+scen)) ]
USyearly[nicekey+'_Module_'+self.name+'_'+scen] = USyearly[filter_col].sum(axis=1)
USyearly = USyearly/1000000
USyearly = USyearly.add_suffix('_[Tonnes]')
keywd1='new_Installed_Capacity_[MW]'
for scen in scenarios:
USyearly['newInstalledCapacity_'+self.name+'_'+scen+'_[MW]'] = self.scenario[scen].data[keywd1]
UScum = USyearly.copy()
UScum = UScum.cumsum()
keywd='Installed_Capacity_[W]'
for scen in scenarios:
USyearly['ActiveCapacity_'+self.name+'_'+scen+'_[MW]'] = self.scenario[scen].data[keywd]/1e6
USyearly['DecommisionedCapacity_'+self.name+'_'+scen+'_[MW]'] = (
UScum['newInstalledCapacity_'+self.name+'_'+scen+'_[MW]']-
USyearly['ActiveCapacity_'+self.name+'_'+scen+'_[MW]'])
USyearly.index = self.scenario[scen].data['year']
UScum.index = self.scenario[scen].data['year']
self.USyearly = USyearly
self.UScum = UScum
return USyearly, UScum
def plotScenariosComparison(self, keyword=None, scenarios=None):
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
if keyword is None:
scens = list(self.scenario.keys())[0]
print("Choose one of the keywords: ", list(self.scenario[scens].data.keys()))
return
yunits = _unitReferences(keyword)
plt.figure()
for scen in scenarios:
plt.plot(self.scenario[scen].data['year'],self.scenario[scen].data[keyword], label=scen)
plt.legend()
plt.xlabel('Year')
plt.title(keyword.replace('_', " "))
plt.ylabel(yunits)
def plotMetricResults(self):
from plotly.subplots import make_subplots
y1 = self.plotMaterialResults(keyword='VirginStock', yearlyorcumulative='yearly')
y2 = self.plotMaterialResults(keyword='WasteAll', yearlyorcumulative='yearly')
y3 = self.plotMaterialResults(keyword='WasteEOL', yearlyorcumulative='yearly')
y4 = self.plotMaterialResults(keyword='WasteMFG', yearlyorcumulative='yearly')
c1 = self.plotMaterialResults(keyword='VirginStock', yearlyorcumulative='cumulative')
c2 = self.plotMaterialResults(keyword='WasteAll', yearlyorcumulative='cumulative')
c3 = self.plotMaterialResults(keyword='WasteEOL', yearlyorcumulative='cumulative')
c4 = self.plotMaterialResults(keyword='WasteMFG', yearlyorcumulative='cumulative')
ic = self.plotInstalledCapacityResults()
def plotMaterialResults(self, keyword, yearlyorcumulative='yearly', cumplot=False):
import plotly.express as px
import re
if yearlyorcumulative == 'yearly':
data = self.USyearly
else:
data = self.UScum
if keyword is None:
print("keyword options are :" 'VirginStock', 'WasteALL', 'WasteEOL', 'WasteMFG')
return
filter_col = [col for col in data if col.startswith(keyword)]
titlekeyword = str.capitalize(yearlyorcumulative) + re.sub( r"([A-Z])", r" \1", keyword)
units = filter_col[0].split('_')[-1]
mylegend = [col.split('_')[1:] for col in filter_col]
mylegend = [col[:-1] for col in mylegend]
mylegend = [' '.join(col) for col in mylegend]
mylegend = [str.capitalize(col) for col in mylegend]
fig = px.line(data[filter_col], template="plotly_white")
fig.update_layout(
title=titlekeyword,
xaxis_title="Year",
yaxis_title=units
)
for idx, name in enumerate(mylegend):
fig.data[idx].name = name
fig.data[idx].hovertemplate = name
if cumplot:
return fig
else:
fig.show()
return
def plotInstalledCapacityResults(self, cumplot=False):
import plotly.express as px
datay = self.USyearly
datac = self.UScum
filter_colc = [col for col in datac if col.startswith('newInstalledCapacity')]
filter_coly = [col for col in datay if col.startswith('Capacity')]
datay = datay[filter_coly].copy()
mylegend = [col.split('_')[1:] for col in datay]
mylegend = [col[:-1] for col in mylegend]
mylegend = [str(col)[2:-2] for col in mylegend]
mylegendy = ['Cumulative New Installs, '+col for col in mylegend]
print(mylegend)
datac = datac[filter_colc].copy()
mylegend = [col.split('_')[1:] for col in datac]
mylegend = [col[:-1] for col in mylegend]
mylegend = [str(col)[2:-2] for col in mylegend]
mylegendc = ['Capacity, '+col for col in mylegend]
data = datay.join(datac)
mylegend = mylegendy + mylegendc
titlekeyword = 'Installed Capacity and Cumulative new Installs'
units = filter_colc[0].split('_')[-1]
fig = px.line(data, template="plotly_white")
fig.update_layout(
title=titlekeyword,
xaxis_title="Year",
yaxis_title=units
)
for idx, name in enumerate(mylegend):
fig.data[idx].name = name
fig.data[idx].hovertemplate = name
if cumplot:
return fig
else:
fig.show()
return
def plotMaterialComparisonAcrossScenarios(self, keyword=None, scenarios=None, material = None):
if scenarios is None:
scenarios = list(self.scenario.keys())
else:
if isinstance(scenarios, str):
scenarios = [scenarios]
if keyword is None:
scens = list(self.scenario.keys())[0]
mats = list(self.scenario[scens].material.keys())[0]
print("Choose one of the keywords: ", list(self.scenario[scens].material[mats].materialdata.keys()))
return
if material is None:
scens = list(self.scenario.keys())[0]
mats = list(self.scenario[scens].material.keys())
print("Choose one of the Materials: ", mats)
return
else:
if isinstance(material, str) is False:
mats = list(self.scenario[scens].material.keys())
print("Can only pass one material name (str). Choose one of the Materials: ", mats)
return
yunits = _unitReferences(keyword)
plt.figure()
for scen in scenarios:
plt.plot(self.scenario[scen].data['year'], self.scenario[scen].material[material].materialdata[keyword], label=scen)
plt.legend()
plt.xlabel('Year')
plt.title((material + ' ' + keyword.replace('_', " ")))
plt.ylabel(yunits)
class Scenario(Simulation):
def __init__(self, name, file=None):
self.name = name
self.material = {}
if file is None:
try:
file = _interactive_load('Select module baseline file')
except:
raise Exception('Interactive load failed. Tkinter not supported'+
'on this system. Try installing X-Quartz and reloading')
csvdata = open(str(file), 'r', encoding="UTF-8")
csvdata = open(str(file), 'r', encoding="UTF-8-sig")
firstline = csvdata.readline()
secondline = csvdata.readline()
head = firstline.rstrip('\n').split(",")
meta = dict(zip(head, secondline.rstrip('\n').split(",")))
data = pd.read_csv(csvdata, names=head)
data.loc[:, data.columns != 'year'] = data.loc[:, data.columns != 'year'].astype(float)
self.baselinefile = file
self.metdata = meta,
self.data = data
def addMaterial(self, materialname, file=None):
self.material[materialname] = Material(materialname, file)
def addMaterials(self, materials, baselinefolder=None, nameformat=None):
if baselinefolder is None:
baselinefolder = r'..\..\baselines'
if nameformat is None:
nameformat = r'\baseline_material_{}.csv'
for mat in materials:
filemat = baselinefolder + nameformat.format(mat)
self.material[mat] = Material(mat, filemat)
def modifyMaterials(self, materials, stage, value, start_year=None):
if start_year is None:
start_year = int(datetime.datetime.now().year)
if materials is None:
materials = list(self.material.keys())
else:
if isinstance(materials, str):
materials = [materials]
selectyears = self.data['year']>start_year
for mat in materials:
self.material[mat].materialdata.loc[selectyears, stage] = value
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key):
return setattr(self, key)
class Material:
def __init__(self, materialname, file):
self.materialname = materialname
if file is None:
try:
file = _interactive_load('Select material baseline file')
except:
raise Exception('Interactive load failed. Tkinter not supported'+
'on this system. Try installing X-Quartz and reloading')
csvdata = open(str(file), 'r', encoding="UTF-8")
csvdata = open(str(file), 'r', encoding="UTF-8-sig")
firstline = csvdata.readline()
secondline = csvdata.readline()
head = firstline.rstrip('\n').split(",")
meta = dict(zip(head, secondline.rstrip('\n').split(",")))
data = pd.read_csv(csvdata, names=head)
data.loc[:, data.columns != 'year'] = data.loc[:, data.columns != 'year'].astype(float)
self.materialfile = file
self.materialmetdata = meta
self.materialdata = data
def weibull_params(keypoints):
t1, t2 = tuple(keypoints.keys())
cdf1, cdf2 = tuple(keypoints.values())
alpha = np.ndarray.item(np.real_if_close(
(np.log(np.log(1 - cdf1)+0j) - np.log(np.log(1 - cdf2)+0j))/(np.log(t1) - np.log(t2))
))
beta = np.abs(np.exp(
(
np.log(t2)*((0+1j)*np.pi + np.log(np.log(1 - cdf1)+0j))
+ np.log(t1)*(((0-1j))*np.pi - np.log(np.log(1 - cdf2)+0j))
)/(
np.log(np.log(1 - cdf1)+0j) - np.log(np.log(1 - cdf2)+0j)
)
))
return {'alpha': alpha, 'beta': beta}
def weibull_cdf(alpha, beta):
def cdf(x):
return 1 - np.exp(-(np.array(x)/beta)**alpha)
return cdf
def weibull_pdf(alpha, beta):
def pdf(x):
return (alpha/np.array(x)) * ((np.array(x)/beta)**alpha) * (np.exp(-(np.array(x)/beta)**alpha))
return pdf
def weibull_pdf_vis(alpha, beta, xlim=56):
dfindex = pd.RangeIndex(0,xlim,1)
x = np.clip(dfindex - 0, 0, np.inf)
if alpha and beta:
i = weibull_pdf(alpha, beta)
idf = list(map(i, x))
return idf
def weibull_cdf_vis(alpha, beta, xlim=56):
dfindex = pd.RangeIndex(0,xlim,1)
x = np.clip(dfindex - 0, 0, np.inf)
if alpha and beta:
i = weibull_cdf(alpha, beta)
idf = list(map(i, x))
return idf
def sens_StageImprovement(df, stage, improvement=1.3, start_year=None):
if start_year is None:
start_year = int(datetime.datetime.now().year)
df[stage] = df[stage].astype(float)
df.loc[df.index > start_year, stage] = df[df.index > start_year][stage].apply(lambda x: x*improvement)
return df
def sens_StageEfficiency(df, stage, target_eff = 95.0, start_year = None,
goal_year = 2030, plotflag = False):
if start_year is None:
start_year = int(datetime.datetime.now().year)
if start_year > goal_year:
print("Error. Goal Year is before start year")
return
if 0 < abs(target_eff) < 1:
print("Warning: target_eff value is between 0 and 1; it has been"
"multiplied by 100% assuming it was a percentage in decimal form.")
target_eff = target_eff*100
if target_eff > 100 or target_eff < 0:
print("Warning: target_eff is out of range. Input value between"
"0 and 100")
return
if stage in df.columns:
df2 = df.copy()
df2[stage]=df2[stage].astype(float)
df2.loc[(df2.index < goal_year) & (df2.index > start_year), stage] = np.nan
df2.loc[df2.index >= goal_year , stage] = target_eff
df2[stage] = df2[stage].interpolate()
if plotflag:
plt.plot(df[stage], label='Original')
plt.plot(df2[stage], label='Modified')
plt.title('Updated values for '+stage)
plt.legend()
return df2
else:
print("Stage name incorrect.")
def _modDict(originaldict, moddict):
for key in moddict:
try:
originaldict[key] = moddict[key]
except:
print("Wrong key in modified dictionary")
return originaldict
def calculateLCA(PVarea, modified_impacts=None, printflag = False):
if printflag:
print("Doing calculations of LCA analysis for Silicon Photovoltaic Panels")
impacts = {'Acidification':{'UUID': '75d0c8a2-e466-3bd7-813b-5beef2209330',
'Result': 1.29374135667815,
'Unit': 'kg SO2' },
'Carcinogenics':{'UUID': 'a6e5e5d8-a1e5-3c77-8170-586c4fe37514',
'Result': 0.0000231966690476102,
'Unit': 'CTUh' },
'Ecotoxicity':{'UUID': '338e9370-ceb0-3d18-9d87-5f91feb7829c',
'Result': 5933.77859696668,
'Unit': 'CTUe' },
'Eutrophication':{'UUID': '45b8cd56-498a-3c6f-9488-134e951d8c02',
'Result': 1.34026194777363,
'Unit': 'kg N eq' },
'Fossil fuel depletion':{'UUID': '0e45786f-67fa-3b8a-b8a3-73a7c316434c',
'Result': 249.642261689385,
'Unit': 'MJ surplus' },
'Global warming':{'UUID': '31967441-d687-313d-9910-13da3a584ab7',
'Result': 268.548841324818,
'Unit': 'kg CO2 eq' },
'Non carcinogenics':{'UUID': 'd4827ae3-c873-3ea4-85fb-860b7f3f2dee',
'Result': 0.000135331806321799,
'Unit': 'CTUh' },
'Ozone depletion':{'UUID': '6c05dad1-6661-35f2-82aa-6e8e6a498aec',
'Result': 0.0000310937628622019,
'Unit': 'kg CFC-11 eq' },
'Respiratory effects':{'UUID': 'e0916d62-7fbd-3d0a-a4a5-52659b0ac9c1',
'Result': 0.373415542664206,
'Unit': 'kg PM2.5 eq' },
'Smog':{'UUID': '7a149078-e2fd-3e07-a5a3-79035c60e7c3',
'Result': 15.35483065,
'Unit': 'kg O3 eq' },
}
if modified_impacts is not None:
impacts = _modDict(impacts, modified_impacts)
if printflag:
print("Following Modified impacts provided instead of TRACI 2.1 default")
print(impacts)
print("")
else:
if printflag:
print("Following TRACI 2.1")
acidification = impacts['Acidification']['Result']*PVarea
carcinogenics = impacts['Carcinogenics']['Result']*PVarea
ecotoxicity = impacts['Ecotoxicity']['Result']*PVarea
eutrophication = impacts['Eutrophication']['Result']*PVarea
fossil_fuel_depletion = impacts['Fossil fuel depletion']['Result']*PVarea
global_warming = impacts['Global warming']['Result']*PVarea
non_carcinogenics = impacts['Non carcinogenics']['Result']*PVarea
ozone_depletion = impacts['Ozone depletion']['Result']*PVarea
respiratory_effects = impacts['Respiratory effects']['Result']*PVarea
smog = impacts['Smog']['Result']*PVarea
if printflag:
print("RESULTS FOR PV AREA ", PVarea, " m2 ")
print("****************************************")
print('Acidification: ', round(impacts['Acidification']['Result']*PVarea, 2), ' ', impacts['Acidification']['Unit'])
print('Carcinogenics: ', round(impacts['Carcinogenics']['Result']*PVarea, 2), ' ', impacts['Carcinogenics']['Unit'])
print('Ecotoxicity: ', round(impacts['Ecotoxicity']['Result']*PVarea, 2), ' ', impacts['Ecotoxicity']['Unit'])
print('Eutrophication: ', round(impacts['Eutrophication']['Result']*PVarea, 2), ' ', impacts['Eutrophication']['Unit'])
print('Fossil fuel depletion: ', round(impacts['Fossil fuel depletion']['Result']*PVarea, 2), ' ', impacts['Fossil fuel depletion']['Unit'])
print('Global warming: ', round(impacts['Global warming']['Result']*PVarea, 2), ' ', impacts['Global warming']['Unit'])
print('Non carcinogenics: ', round(impacts['Non carcinogenics']['Result']*PVarea, 2), ' ', impacts['Non carcinogenics']['Unit'])
print('Ozone depletion: ', round(impacts['Ozone depletion']['Result']*PVarea, 2), ' ', impacts['Ozone depletion']['Unit'])
print('Respiratory effects: ', round(impacts['Respiratory effects']['Result']*PVarea, 2), ' ', impacts['Respiratory effects']['Unit'])
print('Smog: ', round(impacts['Smog']['Result']*PVarea, 2), ' ', impacts['Smog']['Unit'])
return (acidification, carcinogenics, ecotoxicity, eutrophication,
fossil_fuel_depletion, global_warming,
non_carcinogenics, ozone_depletion, respiratory_effects, smog) | true | true |
f72d609c4df4a3f3c318b99781086c3d6d4d85e2 | 4,134 | py | Python | bot/helper/mirror_utils/download_utils/aria2_download.py | styloxyash1/mybot | 285efe23d8fa429738ff2198da684d846fe2bf6f | [
"MIT"
] | null | null | null | bot/helper/mirror_utils/download_utils/aria2_download.py | styloxyash1/mybot | 285efe23d8fa429738ff2198da684d846fe2bf6f | [
"MIT"
] | null | null | null | bot/helper/mirror_utils/download_utils/aria2_download.py | styloxyash1/mybot | 285efe23d8fa429738ff2198da684d846fe2bf6f | [
"MIT"
] | null | null | null | from bot import aria2, download_dict_lock, STOP_DUPLICATE_MIRROR
from bot.helper.mirror_utils.upload_utils.gdriveTools import GoogleDriveHelper
from bot.helper.ext_utils.bot_utils import *
from .download_helper import DownloadHelper
from bot.helper.mirror_utils.status_utils.aria_download_status import AriaDownloadStatus
from bot.helper.telegram_helper.message_utils import *
import threading
from aria2p import API
from time import sleep
class AriaDownloadHelper(DownloadHelper):
def __init__(self):
super().__init__()
@new_thread
def __onDownloadStarted(self, api, gid):
sleep(1)
LOGGER.info(f"onDownloadStart: {gid}")
dl = getDownloadByGid(gid)
download = api.get_download(gid)
self.name = download.name
sname = download.name
gdrive = GoogleDriveHelper(None)
smsg, button = gdrive.drive_list(sname)
if STOP_DUPLICATE_MIRROR:
if smsg:
dl.getListener().onDownloadError(f'😡 𝑭𝒊𝒍𝒆 𝒊𝒔 𝒂𝒍𝒓𝒆𝒂𝒅𝒚 𝒂𝒗𝒂𝒊𝒍𝒂𝒃𝒍𝒆 𝒊𝒏 𝑫𝒓𝒊𝒗𝒆\n𝑭𝒊𝒔𝒓𝒕 𝒔𝒆𝒂𝒓𝒄𝒉 𝑩𝒆𝒇𝒐𝒓𝒆 𝑴𝒊𝒓𝒓𝒐𝒓𝒊𝒏𝒈 𝒂𝒏𝒚𝒕𝒉𝒊𝒏𝒈 😡\n𝑰𝒇 𝒚𝒐𝒖 𝒅𝒐 𝒕𝒉𝒊𝒔 𝒂𝒈𝒂𝒊𝒏❗ 𝒀𝒐𝒖 𝒘𝒊𝒍𝒍 𝒃𝒆 𝑩𝒂𝒏 😐.\n\n')
print(dl.getListener())
sendMarkup(" 𝐇𝐞𝐫𝐞 𝐚𝐫𝐞 𝐭𝐡𝐞 𝐒𝐞𝐚𝐫𝐜𝐡 🔍 𝐑𝐞𝐬𝐮𝐥𝐭𝐬:👇👇", dl.getListener().bot, dl.getListener().update, button)
aria2.remove([download])
return
update_all_messages()
def __onDownloadComplete(self, api: API, gid):
LOGGER.info(f"onDownloadComplete: {gid}")
dl = getDownloadByGid(gid)
download = api.get_download(gid)
if download.followed_by_ids:
new_gid = download.followed_by_ids[0]
new_download = api.get_download(new_gid)
with download_dict_lock:
download_dict[dl.uid()] = AriaDownloadStatus(new_gid, dl.getListener())
if new_download.is_torrent:
download_dict[dl.uid()].is_torrent = True
update_all_messages()
LOGGER.info(f'Changed gid from {gid} to {new_gid}')
else:
if dl: threading.Thread(target=dl.getListener().onDownloadComplete).start()
@new_thread
def __onDownloadPause(self, api, gid):
LOGGER.info(f"onDownloadPause: {gid}")
dl = getDownloadByGid(gid)
dl.getListener().onDownloadError('Download stopped by user!🌜🌛')
@new_thread
def __onDownloadStopped(self, api, gid):
LOGGER.info(f"onDownloadStop: {gid}")
dl = getDownloadByGid(gid)
if dl: dl.getListener().onDownloadError('𝐘𝐨𝐮𝐫 𝐋𝐢𝐧𝐤 𝐢𝐬 𝐃𝐄𝐀𝐃 ❗ 😒 𝐃𝐨𝐧❜𝐭 𝐮𝐬𝐞 𝐋𝐨𝐰 𝐒𝐞𝐞𝐝𝐬 𝐓𝐨𝐫𝐫𝐞𝐧𝐭')
@new_thread
def __onDownloadError(self, api, gid):
sleep(0.5) #sleep for split second to ensure proper dl gid update from onDownloadComplete
LOGGER.info(f"onDownloadError: {gid}")
dl = getDownloadByGid(gid)
download = api.get_download(gid)
error = download.error_message
LOGGER.info(f"Download Error: {error}")
if dl: dl.getListener().onDownloadError(error)
def start_listener(self):
aria2.listen_to_notifications(threaded=True, on_download_start=self.__onDownloadStarted,
on_download_error=self.__onDownloadError,
on_download_pause=self.__onDownloadPause,
on_download_stop=self.__onDownloadStopped,
on_download_complete=self.__onDownloadComplete)
def add_download(self, link: str, path,listener):
if is_magnet(link):
download = aria2.add_magnet(link, {'dir': path})
else:
download = aria2.add_uris([link], {'dir': path})
if download.error_message: #no need to proceed further at this point
listener.onDownloadError(download.error_message)
return
with download_dict_lock:
download_dict[listener.uid] = AriaDownloadStatus(download.gid,listener)
LOGGER.info(f"Started: {download.gid} DIR:{download.dir} ")
| 43.515789 | 178 | 0.631834 | from bot import aria2, download_dict_lock, STOP_DUPLICATE_MIRROR
from bot.helper.mirror_utils.upload_utils.gdriveTools import GoogleDriveHelper
from bot.helper.ext_utils.bot_utils import *
from .download_helper import DownloadHelper
from bot.helper.mirror_utils.status_utils.aria_download_status import AriaDownloadStatus
from bot.helper.telegram_helper.message_utils import *
import threading
from aria2p import API
from time import sleep
class AriaDownloadHelper(DownloadHelper):
def __init__(self):
super().__init__()
@new_thread
def __onDownloadStarted(self, api, gid):
sleep(1)
LOGGER.info(f"onDownloadStart: {gid}")
dl = getDownloadByGid(gid)
download = api.get_download(gid)
self.name = download.name
sname = download.name
gdrive = GoogleDriveHelper(None)
smsg, button = gdrive.drive_list(sname)
if STOP_DUPLICATE_MIRROR:
if smsg:
dl.getListener().onDownloadError(f'😡 𝑭𝒊𝒍𝒆 𝒊𝒔 𝒂𝒍𝒓𝒆𝒂𝒅𝒚 𝒂𝒗𝒂𝒊𝒍𝒂𝒃𝒍𝒆 𝒊𝒏 𝑫𝒓𝒊𝒗𝒆\n𝑭𝒊𝒔𝒓𝒕 𝒔𝒆𝒂𝒓𝒄𝒉 𝑩𝒆𝒇𝒐𝒓𝒆 𝑴𝒊𝒓𝒓𝒐𝒓𝒊𝒏𝒈 𝒂𝒏𝒚𝒕𝒉𝒊𝒏𝒈 😡\n𝑰𝒇 𝒚𝒐𝒖 𝒅𝒐 𝒕𝒉𝒊𝒔 𝒂𝒈𝒂𝒊𝒏❗ 𝒀𝒐𝒖 𝒘𝒊𝒍𝒍 𝒃𝒆 𝑩𝒂𝒏 😐.\n\n')
print(dl.getListener())
sendMarkup(" 𝐇𝐞𝐫𝐞 𝐚𝐫𝐞 𝐭𝐡𝐞 𝐒𝐞𝐚𝐫𝐜𝐡 🔍 𝐑𝐞𝐬𝐮𝐥𝐭𝐬:👇👇", dl.getListener().bot, dl.getListener().update, button)
aria2.remove([download])
return
update_all_messages()
def __onDownloadComplete(self, api: API, gid):
LOGGER.info(f"onDownloadComplete: {gid}")
dl = getDownloadByGid(gid)
download = api.get_download(gid)
if download.followed_by_ids:
new_gid = download.followed_by_ids[0]
new_download = api.get_download(new_gid)
with download_dict_lock:
download_dict[dl.uid()] = AriaDownloadStatus(new_gid, dl.getListener())
if new_download.is_torrent:
download_dict[dl.uid()].is_torrent = True
update_all_messages()
LOGGER.info(f'Changed gid from {gid} to {new_gid}')
else:
if dl: threading.Thread(target=dl.getListener().onDownloadComplete).start()
@new_thread
def __onDownloadPause(self, api, gid):
LOGGER.info(f"onDownloadPause: {gid}")
dl = getDownloadByGid(gid)
dl.getListener().onDownloadError('Download stopped by user!🌜🌛')
@new_thread
def __onDownloadStopped(self, api, gid):
LOGGER.info(f"onDownloadStop: {gid}")
dl = getDownloadByGid(gid)
if dl: dl.getListener().onDownloadError('𝐘𝐨𝐮𝐫 𝐋𝐢𝐧𝐤 𝐢𝐬 𝐃𝐄𝐀𝐃 ❗ 😒 𝐃𝐨𝐧❜𝐭 𝐮𝐬𝐞 𝐋𝐨𝐰 𝐒𝐞𝐞𝐝𝐬 𝐓𝐨𝐫𝐫𝐞𝐧𝐭')
@new_thread
def __onDownloadError(self, api, gid):
sleep(0.5)
LOGGER.info(f"onDownloadError: {gid}")
dl = getDownloadByGid(gid)
download = api.get_download(gid)
error = download.error_message
LOGGER.info(f"Download Error: {error}")
if dl: dl.getListener().onDownloadError(error)
def start_listener(self):
aria2.listen_to_notifications(threaded=True, on_download_start=self.__onDownloadStarted,
on_download_error=self.__onDownloadError,
on_download_pause=self.__onDownloadPause,
on_download_stop=self.__onDownloadStopped,
on_download_complete=self.__onDownloadComplete)
def add_download(self, link: str, path,listener):
if is_magnet(link):
download = aria2.add_magnet(link, {'dir': path})
else:
download = aria2.add_uris([link], {'dir': path})
if download.error_message:
listener.onDownloadError(download.error_message)
return
with download_dict_lock:
download_dict[listener.uid] = AriaDownloadStatus(download.gid,listener)
LOGGER.info(f"Started: {download.gid} DIR:{download.dir} ")
| true | true |
f72d61d379f5848b7d7637715d23e9aa174497d6 | 4,399 | py | Python | djangocms_installer/config/ini.py | michalnik/djangocms-installer | 5f825c02b1c324a2c9c3d0662913a3a2fdf798dd | [
"BSD-3-Clause"
] | 145 | 2015-01-17T12:03:48.000Z | 2022-03-09T16:54:27.000Z | djangocms_installer/config/ini.py | michalnik/djangocms-installer | 5f825c02b1c324a2c9c3d0662913a3a2fdf798dd | [
"BSD-3-Clause"
] | 204 | 2015-01-04T23:19:03.000Z | 2022-03-23T12:28:14.000Z | djangocms_installer/config/ini.py | michalnik/djangocms-installer | 5f825c02b1c324a2c9c3d0662913a3a2fdf798dd | [
"BSD-3-Clause"
] | 88 | 2015-01-11T09:41:28.000Z | 2022-03-05T15:29:47.000Z | import sys
from configparser import ConfigParser
from .data import CMS_VERSION_MATRIX, DJANGO_VERSION_MATRIX
SECTION = "djangocms_installer"
def parse_config_file(parser, stdin_args):
"""Parse config file.
Returns a list of additional args.
"""
config_args = []
# Temporary switch required args and save them to restore.
required_args = []
for action in parser._actions:
if action.required:
required_args.append(action)
action.required = False
parsed_args = parser.parse_args(stdin_args)
# Restore required args.
for action in required_args:
action.required = True
if not parsed_args.config_file:
return config_args
config = ConfigParser()
if not config.read(parsed_args.config_file):
sys.stderr.write('Config file "{}" doesn\'t exists\n'.format(parsed_args.config_file))
sys.exit(7) # It isn't used anywhere.
config_args = _convert_config_to_stdin(config, parser)
return config_args
def dump_config_file(filename, args, parser=None):
"""Dump args to config file."""
config = ConfigParser()
config.add_section(SECTION)
if parser is None:
for attr in args:
config.set(SECTION, attr, args.attr)
else:
keys_empty_values_not_pass = (
"--extra-settings",
"--languages",
"--requirements",
"--template",
"--timezone",
)
# positionals._option_string_actions
for action in parser._actions:
if action.dest in ("help", "config_file", "config_dump", "project_name"):
continue
keyp = action.option_strings[0]
option_name = keyp.lstrip("-")
option_value = getattr(args, action.dest)
if any(i for i in keys_empty_values_not_pass if i in action.option_strings):
if action.dest == "languages":
if len(option_value) == 1 and option_value[0] == "en":
config.set(SECTION, option_name, "")
else:
config.set(SECTION, option_name, ",".join(option_value))
else:
config.set(SECTION, option_name, option_value if option_value else "")
elif action.choices == ("yes", "no"):
config.set(SECTION, option_name, "yes" if option_value else "no")
elif action.dest == "templates":
config.set(SECTION, option_name, option_value if option_value else "no")
elif action.dest == "cms_version":
version = "stable" if option_value == CMS_VERSION_MATRIX["stable"] else option_value
config.set(SECTION, option_name, version)
elif action.dest == "django_version":
version = "stable" if option_value == DJANGO_VERSION_MATRIX["stable"] else option_value
config.set(SECTION, option_name, version)
elif action.const:
config.set(SECTION, option_name, "true" if option_value else "false")
else:
config.set(SECTION, option_name, str(option_value))
with open(filename, "w") as fp:
config.write(fp)
def _convert_config_to_stdin(config, parser):
"""Convert config options to stdin args.
Especially boolean values, for more information
@see https://docs.python.org/3.4/library/configparser.html#supported-datatypes
"""
keys_empty_values_not_pass = (
"--extra-settings",
"--languages",
"--requirements",
"--template",
"--timezone",
)
args = []
for key, val in config.items(SECTION):
keyp = "--{}".format(key)
action = parser._option_string_actions[keyp]
if action.const:
try:
if config.getboolean(SECTION, key):
args.append(keyp)
except ValueError:
args.extend([keyp, val]) # Pass it as is to get the error from ArgumentParser.
elif any(i for i in keys_empty_values_not_pass if i in action.option_strings):
# Some keys with empty values shouldn't be passed into args to use their defaults
# from ArgumentParser.
if val != "":
args.extend([keyp, val])
else:
args.extend([keyp, val])
return args
| 35.475806 | 103 | 0.597409 | import sys
from configparser import ConfigParser
from .data import CMS_VERSION_MATRIX, DJANGO_VERSION_MATRIX
SECTION = "djangocms_installer"
def parse_config_file(parser, stdin_args):
config_args = []
required_args = []
for action in parser._actions:
if action.required:
required_args.append(action)
action.required = False
parsed_args = parser.parse_args(stdin_args)
for action in required_args:
action.required = True
if not parsed_args.config_file:
return config_args
config = ConfigParser()
if not config.read(parsed_args.config_file):
sys.stderr.write('Config file "{}" doesn\'t exists\n'.format(parsed_args.config_file))
sys.exit(7) # It isn't used anywhere.
config_args = _convert_config_to_stdin(config, parser)
return config_args
def dump_config_file(filename, args, parser=None):
config = ConfigParser()
config.add_section(SECTION)
if parser is None:
for attr in args:
config.set(SECTION, attr, args.attr)
else:
keys_empty_values_not_pass = (
"--extra-settings",
"--languages",
"--requirements",
"--template",
"--timezone",
)
for action in parser._actions:
if action.dest in ("help", "config_file", "config_dump", "project_name"):
continue
keyp = action.option_strings[0]
option_name = keyp.lstrip("-")
option_value = getattr(args, action.dest)
if any(i for i in keys_empty_values_not_pass if i in action.option_strings):
if action.dest == "languages":
if len(option_value) == 1 and option_value[0] == "en":
config.set(SECTION, option_name, "")
else:
config.set(SECTION, option_name, ",".join(option_value))
else:
config.set(SECTION, option_name, option_value if option_value else "")
elif action.choices == ("yes", "no"):
config.set(SECTION, option_name, "yes" if option_value else "no")
elif action.dest == "templates":
config.set(SECTION, option_name, option_value if option_value else "no")
elif action.dest == "cms_version":
version = "stable" if option_value == CMS_VERSION_MATRIX["stable"] else option_value
config.set(SECTION, option_name, version)
elif action.dest == "django_version":
version = "stable" if option_value == DJANGO_VERSION_MATRIX["stable"] else option_value
config.set(SECTION, option_name, version)
elif action.const:
config.set(SECTION, option_name, "true" if option_value else "false")
else:
config.set(SECTION, option_name, str(option_value))
with open(filename, "w") as fp:
config.write(fp)
def _convert_config_to_stdin(config, parser):
keys_empty_values_not_pass = (
"--extra-settings",
"--languages",
"--requirements",
"--template",
"--timezone",
)
args = []
for key, val in config.items(SECTION):
keyp = "--{}".format(key)
action = parser._option_string_actions[keyp]
if action.const:
try:
if config.getboolean(SECTION, key):
args.append(keyp)
except ValueError:
args.extend([keyp, val])
elif any(i for i in keys_empty_values_not_pass if i in action.option_strings):
# from ArgumentParser.
if val != "":
args.extend([keyp, val])
else:
args.extend([keyp, val])
return args
| true | true |
f72d627417aaa695ea5bcada408ff82f6f850efa | 11,113 | py | Python | trajectory_generator.py | keshaviyengar/rl-baselines-zoo | 6e39f5c7c6c2d30873297308ed064551bffaa52d | [
"MIT"
] | null | null | null | trajectory_generator.py | keshaviyengar/rl-baselines-zoo | 6e39f5c7c6c2d30873297308ed064551bffaa52d | [
"MIT"
] | null | null | null | trajectory_generator.py | keshaviyengar/rl-baselines-zoo | 6e39f5c7c6c2d30873297308ed064551bffaa52d | [
"MIT"
] | null | null | null | import rospy
from geometry_msgs.msg import Pose, Point
from std_msgs.msg import Bool
import numpy as np
import os
# This script creates a square trajectory for a robot to follow.
# Will output errors as well.
class CircleTrajectory(object):
def __init__(self, x_offset, y_offset, z_height, radius, theta_step):
self.trajectory_pub = rospy.Publisher("desired_goal", Pose, queue_size=10)
self.trajectory_finish_pub = rospy.Publisher("trajectory_finish", Bool, queue_size=10)
self._current_pose = Pose()
# Create a timer to update the desired trajectory
self.trajectory_timer = rospy.Timer(rospy.Duration(0.01), self._trajectory_callback)
self.traj_finish = False
# For now set initial current pose as 0
self._desired_pose = Pose()
self.x_offset = x_offset
self.y_offset = y_offset
self.radius = radius
self.thetas = np.arange(0, 2 * np.pi, np.deg2rad(theta_step))
self.thetas_counter = 0
self._desired_pose.position.x = self.x_offset + self.radius * np.cos(self.thetas[self.thetas_counter])
self._desired_pose.position.y = self.y_offset + self.radius * np.sin(self.thetas[self.thetas_counter])
self._desired_pose.position.z = z_height
self._desired_pose.orientation.x = 0
self._desired_pose.orientation.y = 0
self._desired_pose.orientation.z = 0
self._desired_pose.orientation.w = 1
self.speed = 1
def _trajectory_callback(self, event):
self.thetas_counter += 1
if self.thetas_counter == self.thetas.size - 1:
self.traj_finish = True
print("Trajectory is complete.")
self.trajectory_finish_pub.publish(True)
self.trajectory_timer.shutdown()
if not self.traj_finish:
self._desired_pose.position.x = self.x_offset + self.radius * np.cos(self.thetas[self.thetas_counter])
self._desired_pose.position.y = self.y_offset + self.radius * np.sin(self.thetas[self.thetas_counter])
# Publish new pose
self.trajectory_pub.publish(self._desired_pose)
class TriangleTrajectory(object):
def __init__(self, point_a, point_b, point_c, z_height):
self.trajectory_pub = rospy.Publisher("desired_goal", Pose, queue_size=10)
self.trajectory_finish_pub = rospy.Publisher("trajectory_finish", Bool, queue_size=10)
self._current_pose = Pose()
# Second timer for how long to move in axis before moving to next
# self.change_direction_timer = rospy.Timer(rospy.Duration(5.0), self._change_direction)
# Specify three points to reach to create the triangle
self.points = np.array([point_a, point_b, point_c])
self._turn_count = 0
self.del_vector = [(self.points[1][0] - self.points[0][0]), (self.points[1][1] - self.points[0][1])]
self._done_trajectory = False
self._desired_pose = Pose()
self._desired_pose.position.x = point_a[0]
self._desired_pose.position.y = point_a[1]
self._desired_pose.position.z = z_height
self._desired_pose.orientation.x = 0
self._desired_pose.orientation.y = 0
self._desired_pose.orientation.z = 0
self._desired_pose.orientation.w = 1
# Publish initial point and sleep to initialize
for _ in range(10):
self.trajectory_pub.publish(self._desired_pose)
rospy.sleep(0.1)
self.prev_time = rospy.get_time()
self.traj_finish = False
# Create a timer to update the desired trajectory
self.trajectory_timer = rospy.Timer(rospy.Duration(0.01), self._trajectory_callback)
# This callback changes the direction by 90 degrees, to make the square.
def _change_direction(self):
if self._turn_count == 0:
if np.linalg.norm(self.points[self._turn_count + 1] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[1][0] - self.points[0][0]),
(self.points[1][1] - self.points[0][1])]
if self._turn_count == 1:
if np.linalg.norm(self.points[self._turn_count + 1] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[2][0] - self.points[1][0]),
(self.points[2][1] - self.points[1][1])]
if self._turn_count == 2:
if np.linalg.norm(self.points[0] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[0][0] - self.points[2][0]),
(self.points[0][1] - self.points[2][1])]
if self._turn_count == 3:
print("Trajectory is complete.")
self.traj_finish = True
self.trajectory_finish_pub.publish(True)
self.trajectory_timer.shutdown()
# self.change_direction_timer.shutdown()
def _trajectory_callback(self, event):
# Compute current difference in time from last callback
if not self.traj_finish:
current_time = rospy.get_time()
delta_t = current_time - self.prev_time
self.prev_time = current_time
self._change_direction()
self._desired_pose.position.x += self.del_vector[0] * delta_t
self._desired_pose.position.y += self.del_vector[1] * delta_t
self.trajectory_pub.publish(self._desired_pose)
class SquareTrajectory2(object):
def __init__(self, point_a, point_b, point_c, point_d, z_height):
self.trajectory_pub = rospy.Publisher("desired_goal", Pose, queue_size=10)
self.trajectory_finish_pub = rospy.Publisher("trajectory_finish", Bool, queue_size=10)
self._current_pose = Pose()
self.points = [point_a, point_b, point_c, point_d]
self._turn_count = 0
self.del_vector = [(self.points[1][0] - self.points[0][0]), (self.points[1][1] - self.points[0][1])]
# For now set initial current pose as 0
self._desired_pose = Pose()
self._desired_pose.position.x = point_a[0]
self._desired_pose.position.y = point_a[1]
self._desired_pose.position.z = z_height
self._desired_pose.orientation.x = 0
self._desired_pose.orientation.y = 0
self._desired_pose.orientation.z = 0
self._desired_pose.orientation.w = 1
# Publish initial point and sleep to initialize
for _ in range(10):
self.trajectory_pub.publish(self._desired_pose)
rospy.sleep(0.1)
self.prev_time = rospy.get_time()
self.traj_finish = False
# Create a timer to update the desired trajectory
self.trajectory_timer = rospy.Timer(rospy.Duration(0.01), self._trajectory_callback)
# This callback changes the direction by 90 degrees, to make the square.
def _change_direction(self):
if self._turn_count == 0:
if np.linalg.norm(self.points[self._turn_count + 1] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[1][0] - self.points[0][0]),
(self.points[1][1] - self.points[0][1])]
if self._turn_count == 1:
if np.linalg.norm(self.points[self._turn_count + 1] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[2][0] - self.points[1][0]),
(self.points[2][1] - self.points[1][1])]
if self._turn_count == 2:
if np.linalg.norm(self.points[self._turn_count + 1] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[3][0] - self.points[2][0]),
(self.points[3][1] - self.points[2][1])]
if self._turn_count == 3:
if np.linalg.norm(self.points[0] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[0][0] - self.points[3][0]),
(self.points[0][1] - self.points[3][1])]
if self._turn_count == 4:
print("Trajectory is complete.")
self.traj_finish = True
self.trajectory_finish_pub.publish(True)
self.trajectory_timer.shutdown()
def _trajectory_callback(self, event):
# Compute current difference in time from last callback
if not self.traj_finish:
current_time = rospy.get_time()
delta_t = current_time - self.prev_time
self.prev_time = current_time
self._change_direction()
self._desired_pose.position.x += self.del_vector[0] * delta_t
self._desired_pose.position.y += self.del_vector[1] * delta_t
self.trajectory_pub.publish(self._desired_pose)
if __name__ == '__main__':
rospy.init_node("trajectory_generator")
experiments = [7]
for exp in experiments:
x_offset = 5
y_offset = 5
if exp in [1, 2, 3, 4, 5]:
z_height = 100
elif exp in [6, 7, 8, 9, 10]:
z_height = 100
else:
z_height = 125
radius = 2.0
theta_step = 0.5
print("Circle trajectory")
circle_trajectory = CircleTrajectory(x_offset, y_offset, z_height, radius, theta_step)
while not circle_trajectory.traj_finish:
if circle_trajectory.traj_finish:
break
# point_a = [20, 20]
# point_b = [20, 30]
# point_c = [30, 20]
# point_a = [-5, 0]
# point_b = [-10, -5]
# point_c = [5, 0]
# if exp in [1, 2, 3, 4, 5]:
# z_height = 100
# elif exp in [6, 7, 8, 9, 10]:
# z_height = 125
# else:
# z_height = 125
# print("Triangle trajectory")
# triangle_trajectory = TriangleTrajectory(point_a, point_b, point_c, z_height)
# while not triangle_trajectory.traj_finish:
# pass
# point_a = [5, 0]
# point_b = [-5, 0]
# point_c = [-5, -5]
# point_d = [5, -5]
# if exp in [1, 2, 3, 4, 5]:
# z_height = 100
# elif exp in [6, 7, 8, 9, 10]:
# z_height = 125
# else:
# z_height = 125
# print("Square trajectory")
# square_trajectory = SquareTrajectory2(point_a, point_b, point_c, point_d, z_height)
# while not square_trajectory.traj_finish:
# pass
| 41.778195 | 114 | 0.602268 | import rospy
from geometry_msgs.msg import Pose, Point
from std_msgs.msg import Bool
import numpy as np
import os
class CircleTrajectory(object):
def __init__(self, x_offset, y_offset, z_height, radius, theta_step):
self.trajectory_pub = rospy.Publisher("desired_goal", Pose, queue_size=10)
self.trajectory_finish_pub = rospy.Publisher("trajectory_finish", Bool, queue_size=10)
self._current_pose = Pose()
self.trajectory_timer = rospy.Timer(rospy.Duration(0.01), self._trajectory_callback)
self.traj_finish = False
self._desired_pose = Pose()
self.x_offset = x_offset
self.y_offset = y_offset
self.radius = radius
self.thetas = np.arange(0, 2 * np.pi, np.deg2rad(theta_step))
self.thetas_counter = 0
self._desired_pose.position.x = self.x_offset + self.radius * np.cos(self.thetas[self.thetas_counter])
self._desired_pose.position.y = self.y_offset + self.radius * np.sin(self.thetas[self.thetas_counter])
self._desired_pose.position.z = z_height
self._desired_pose.orientation.x = 0
self._desired_pose.orientation.y = 0
self._desired_pose.orientation.z = 0
self._desired_pose.orientation.w = 1
self.speed = 1
def _trajectory_callback(self, event):
self.thetas_counter += 1
if self.thetas_counter == self.thetas.size - 1:
self.traj_finish = True
print("Trajectory is complete.")
self.trajectory_finish_pub.publish(True)
self.trajectory_timer.shutdown()
if not self.traj_finish:
self._desired_pose.position.x = self.x_offset + self.radius * np.cos(self.thetas[self.thetas_counter])
self._desired_pose.position.y = self.y_offset + self.radius * np.sin(self.thetas[self.thetas_counter])
self.trajectory_pub.publish(self._desired_pose)
class TriangleTrajectory(object):
def __init__(self, point_a, point_b, point_c, z_height):
self.trajectory_pub = rospy.Publisher("desired_goal", Pose, queue_size=10)
self.trajectory_finish_pub = rospy.Publisher("trajectory_finish", Bool, queue_size=10)
self._current_pose = Pose()
self.points = np.array([point_a, point_b, point_c])
self._turn_count = 0
self.del_vector = [(self.points[1][0] - self.points[0][0]), (self.points[1][1] - self.points[0][1])]
self._done_trajectory = False
self._desired_pose = Pose()
self._desired_pose.position.x = point_a[0]
self._desired_pose.position.y = point_a[1]
self._desired_pose.position.z = z_height
self._desired_pose.orientation.x = 0
self._desired_pose.orientation.y = 0
self._desired_pose.orientation.z = 0
self._desired_pose.orientation.w = 1
for _ in range(10):
self.trajectory_pub.publish(self._desired_pose)
rospy.sleep(0.1)
self.prev_time = rospy.get_time()
self.traj_finish = False
self.trajectory_timer = rospy.Timer(rospy.Duration(0.01), self._trajectory_callback)
def _change_direction(self):
if self._turn_count == 0:
if np.linalg.norm(self.points[self._turn_count + 1] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[1][0] - self.points[0][0]),
(self.points[1][1] - self.points[0][1])]
if self._turn_count == 1:
if np.linalg.norm(self.points[self._turn_count + 1] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[2][0] - self.points[1][0]),
(self.points[2][1] - self.points[1][1])]
if self._turn_count == 2:
if np.linalg.norm(self.points[0] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[0][0] - self.points[2][0]),
(self.points[0][1] - self.points[2][1])]
if self._turn_count == 3:
print("Trajectory is complete.")
self.traj_finish = True
self.trajectory_finish_pub.publish(True)
self.trajectory_timer.shutdown()
def _trajectory_callback(self, event):
if not self.traj_finish:
current_time = rospy.get_time()
delta_t = current_time - self.prev_time
self.prev_time = current_time
self._change_direction()
self._desired_pose.position.x += self.del_vector[0] * delta_t
self._desired_pose.position.y += self.del_vector[1] * delta_t
self.trajectory_pub.publish(self._desired_pose)
class SquareTrajectory2(object):
def __init__(self, point_a, point_b, point_c, point_d, z_height):
self.trajectory_pub = rospy.Publisher("desired_goal", Pose, queue_size=10)
self.trajectory_finish_pub = rospy.Publisher("trajectory_finish", Bool, queue_size=10)
self._current_pose = Pose()
self.points = [point_a, point_b, point_c, point_d]
self._turn_count = 0
self.del_vector = [(self.points[1][0] - self.points[0][0]), (self.points[1][1] - self.points[0][1])]
self._desired_pose = Pose()
self._desired_pose.position.x = point_a[0]
self._desired_pose.position.y = point_a[1]
self._desired_pose.position.z = z_height
self._desired_pose.orientation.x = 0
self._desired_pose.orientation.y = 0
self._desired_pose.orientation.z = 0
self._desired_pose.orientation.w = 1
for _ in range(10):
self.trajectory_pub.publish(self._desired_pose)
rospy.sleep(0.1)
self.prev_time = rospy.get_time()
self.traj_finish = False
self.trajectory_timer = rospy.Timer(rospy.Duration(0.01), self._trajectory_callback)
def _change_direction(self):
if self._turn_count == 0:
if np.linalg.norm(self.points[self._turn_count + 1] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[1][0] - self.points[0][0]),
(self.points[1][1] - self.points[0][1])]
if self._turn_count == 1:
if np.linalg.norm(self.points[self._turn_count + 1] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[2][0] - self.points[1][0]),
(self.points[2][1] - self.points[1][1])]
if self._turn_count == 2:
if np.linalg.norm(self.points[self._turn_count + 1] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[3][0] - self.points[2][0]),
(self.points[3][1] - self.points[2][1])]
if self._turn_count == 3:
if np.linalg.norm(self.points[0] - np.array(
[self._desired_pose.position.x, self._desired_pose.position.y])) < 0.5:
self._turn_count += 1
self.del_vector = [(self.points[0][0] - self.points[3][0]),
(self.points[0][1] - self.points[3][1])]
if self._turn_count == 4:
print("Trajectory is complete.")
self.traj_finish = True
self.trajectory_finish_pub.publish(True)
self.trajectory_timer.shutdown()
def _trajectory_callback(self, event):
if not self.traj_finish:
current_time = rospy.get_time()
delta_t = current_time - self.prev_time
self.prev_time = current_time
self._change_direction()
self._desired_pose.position.x += self.del_vector[0] * delta_t
self._desired_pose.position.y += self.del_vector[1] * delta_t
self.trajectory_pub.publish(self._desired_pose)
if __name__ == '__main__':
rospy.init_node("trajectory_generator")
experiments = [7]
for exp in experiments:
x_offset = 5
y_offset = 5
if exp in [1, 2, 3, 4, 5]:
z_height = 100
elif exp in [6, 7, 8, 9, 10]:
z_height = 100
else:
z_height = 125
radius = 2.0
theta_step = 0.5
print("Circle trajectory")
circle_trajectory = CircleTrajectory(x_offset, y_offset, z_height, radius, theta_step)
while not circle_trajectory.traj_finish:
if circle_trajectory.traj_finish:
break
| true | true |
f72d6285375c24c78245dbcf07e15d8c189eb8b6 | 59 | py | Python | app/repository/services.py | maestro-server/data-app | cde6479cc84fe410220b34742772d5017571e3d3 | [
"Apache-2.0"
] | null | null | null | app/repository/services.py | maestro-server/data-app | cde6479cc84fe410220b34742772d5017571e3d3 | [
"Apache-2.0"
] | 1 | 2019-11-21T17:06:31.000Z | 2019-11-21T17:06:31.000Z | app/repository/services.py | maestro-server/data-app | cde6479cc84fe410220b34742772d5017571e3d3 | [
"Apache-2.0"
] | null | null | null | from .model import Model
class Services(Model):
pass
| 9.833333 | 24 | 0.711864 | from .model import Model
class Services(Model):
pass
| true | true |
f72d629fff0e039793bbb803e5a71873269a33db | 2,269 | py | Python | shadowray/core/server.py | shunf4/Shadowray | 3ec2e69a9b079e051983f7d84252ba787ce933a2 | [
"MIT"
] | 30 | 2019-02-25T23:20:20.000Z | 2021-06-29T02:31:39.000Z | shadowray/core/server.py | shunf4/Shadowray | 3ec2e69a9b079e051983f7d84252ba787ce933a2 | [
"MIT"
] | 4 | 2019-06-15T02:15:37.000Z | 2020-02-19T08:05:43.000Z | shadowray/core/server.py | shunf4/Shadowray | 3ec2e69a9b079e051983f7d84252ba787ce933a2 | [
"MIT"
] | 7 | 2019-06-14T13:04:27.000Z | 2021-06-11T02:28:52.000Z | import json
from shadowray.config.v2ray import SERVER_FILE
from shadowray.config.v2ray import SERVER_KEY_FROM_SUBSCRIBE, SERVER_KEY_FROM_ORIGINAL
class Server:
def __init__(self, filename=None):
self.__servers = json.loads('{"servers_subscribe": [] ,"servers_original": []}')
self.__filename = SERVER_FILE
if filename is not None:
f = open(filename, 'r')
self.__servers = json.load(f)
f.close()
self.__filename = filename
def save(self, filename=None):
if filename is None:
filename = self.__filename
f = open(filename, 'w')
f.write(json.dumps(self.__servers))
f.close()
def add(self, protocol, config, ps, key, host):
self.__servers[key].append({
"protocol": protocol,
"config": config,
"ps": ps,
"host": host
})
def get(self, index):
if self.__servers is None:
return None
return self.__servers[index]
def get_servers(self):
return self.__servers
@property
def original_servers_number(self):
return len(self.__servers[SERVER_KEY_FROM_ORIGINAL])
@property
def subscribe_servers_number(self):
return len(self.__servers[SERVER_KEY_FROM_SUBSCRIBE])
@property
def servers_number(self):
return self.subscribe_servers_number + self.original_servers_number
def get_server(self, index):
if index >= self.servers_number:
print("Index out of range.")
return None
if index < self.original_servers_number:
return self.__servers[SERVER_KEY_FROM_ORIGINAL][index]
else:
return self.__servers[SERVER_KEY_FROM_SUBSCRIBE][index - self.original_servers_number]
def get_config(self, index):
if index >= self.servers_number:
print("Index out of range.")
return None
if index < self.original_servers_number:
return self.__servers[SERVER_KEY_FROM_ORIGINAL][index]['config']
else:
return self.__servers[SERVER_KEY_FROM_SUBSCRIBE][index - self.original_servers_number]['config']
def clear(self, key):
self.__servers[key].clear()
| 30.253333 | 108 | 0.629352 | import json
from shadowray.config.v2ray import SERVER_FILE
from shadowray.config.v2ray import SERVER_KEY_FROM_SUBSCRIBE, SERVER_KEY_FROM_ORIGINAL
class Server:
def __init__(self, filename=None):
self.__servers = json.loads('{"servers_subscribe": [] ,"servers_original": []}')
self.__filename = SERVER_FILE
if filename is not None:
f = open(filename, 'r')
self.__servers = json.load(f)
f.close()
self.__filename = filename
def save(self, filename=None):
if filename is None:
filename = self.__filename
f = open(filename, 'w')
f.write(json.dumps(self.__servers))
f.close()
def add(self, protocol, config, ps, key, host):
self.__servers[key].append({
"protocol": protocol,
"config": config,
"ps": ps,
"host": host
})
def get(self, index):
if self.__servers is None:
return None
return self.__servers[index]
def get_servers(self):
return self.__servers
@property
def original_servers_number(self):
return len(self.__servers[SERVER_KEY_FROM_ORIGINAL])
@property
def subscribe_servers_number(self):
return len(self.__servers[SERVER_KEY_FROM_SUBSCRIBE])
@property
def servers_number(self):
return self.subscribe_servers_number + self.original_servers_number
def get_server(self, index):
if index >= self.servers_number:
print("Index out of range.")
return None
if index < self.original_servers_number:
return self.__servers[SERVER_KEY_FROM_ORIGINAL][index]
else:
return self.__servers[SERVER_KEY_FROM_SUBSCRIBE][index - self.original_servers_number]
def get_config(self, index):
if index >= self.servers_number:
print("Index out of range.")
return None
if index < self.original_servers_number:
return self.__servers[SERVER_KEY_FROM_ORIGINAL][index]['config']
else:
return self.__servers[SERVER_KEY_FROM_SUBSCRIBE][index - self.original_servers_number]['config']
def clear(self, key):
self.__servers[key].clear()
| true | true |
f72d6339cd3ded535fd21c00b1fa0263e5217447 | 15,849 | py | Python | napari/layers/surface/surface.py | truatpasteurdotfr/napari | 06ba5f3ebc964d83169f786f734b1b1c9609592e | [
"BSD-3-Clause"
] | 1 | 2021-12-14T14:07:40.000Z | 2021-12-14T14:07:40.000Z | napari/layers/surface/surface.py | maweigert/napari | 48cdf4d1c4bcf6f76603e90b1c0c7498e2aba6c0 | [
"BSD-3-Clause"
] | null | null | null | napari/layers/surface/surface.py | maweigert/napari | 48cdf4d1c4bcf6f76603e90b1c0c7498e2aba6c0 | [
"BSD-3-Clause"
] | 1 | 2019-01-12T21:04:14.000Z | 2019-01-12T21:04:14.000Z | import warnings
import numpy as np
from ...utils.colormaps import AVAILABLE_COLORMAPS
from ...utils.events import Event
from ...utils.translations import trans
from ..base import Layer
from ..intensity_mixin import IntensityVisualizationMixin
from ..utils.layer_utils import calc_data_range
from ._surface_constants import Shading
from .normals import SurfaceNormals
from .wireframe import SurfaceWireframe
# Mixin must come before Layer
class Surface(IntensityVisualizationMixin, Layer):
"""
Surface layer renders meshes onto the canvas.
Parameters
----------
data : 2-tuple or 3-tuple of array
The first element of the tuple is an (N, D) array of vertices of
mesh triangles. The second is an (M, 3) array of int of indices
of the mesh triangles. The optional third element is the
(K0, ..., KL, N) array of values used to color vertices where the
additional L dimensions are used to color the same mesh with
different values. If not provided, it defaults to ones.
colormap : str, napari.utils.Colormap, tuple, dict
Colormap to use for luminance images. If a string must be the name
of a supported colormap from vispy or matplotlib. If a tuple the
first value must be a string to assign as a name to a colormap and
the second item must be a Colormap. If a dict the key must be a
string to assign as a name to a colormap and the value must be a
Colormap.
contrast_limits : list (2,)
Color limits to be used for determining the colormap bounds for
luminance images. If not passed is calculated as the min and max of
the image.
gamma : float
Gamma correction for determining colormap linearity. Defaults to 1.
name : str
Name of the layer.
metadata : dict
Layer metadata.
scale : tuple of float
Scale factors for the layer.
translate : tuple of float
Translation values for the layer.
rotate : float, 3-tuple of float, or n-D array.
If a float convert into a 2D rotation matrix using that value as an
angle. If 3-tuple convert into a 3D rotation matrix, using a yaw,
pitch, roll convention. Otherwise assume an nD rotation. Angles are
assumed to be in degrees. They can be converted from radians with
np.degrees if needed.
shear : 1-D array or n-D array
Either a vector of upper triangular values, or an nD shear matrix with
ones along the main diagonal.
affine : n-D array or napari.utils.transforms.Affine
(N+1, N+1) affine transformation matrix in homogeneous coordinates.
The first (N, N) entries correspond to a linear transform and
the final column is a length N translation vector and a 1 or a napari
`Affine` transform object. Applied as an extra transform on top of the
provided scale, rotate, and shear values.
opacity : float
Opacity of the layer visual, between 0.0 and 1.0.
blending : str
One of a list of preset blending modes that determines how RGB and
alpha values of the layer visual get mixed. Allowed values are
{'opaque', 'translucent', and 'additive'}.
shading : str, Shading
One of a list of preset shading modes that determine the lighting model
using when rendering the surface in 3D.
* ``Shading.NONE``
Corresponds to ``shading='none'``.
* ``Shading.FLAT``
Corresponds to ``shading='flat'``.
* ``Shading.SMOOTH``
Corresponds to ``shading='smooth'``.
visible : bool
Whether the layer visual is currently being displayed.
cache : bool
Whether slices of out-of-core datasets should be cached upon retrieval.
Currently, this only applies to dask arrays.
wireframe : dict or SurfaceWireframe
Whether and how to display the edges of the surface mesh with a wireframe.
normals : dict or SurfaceNormals
Whether and how to display the face and vertex normals of the surface mesh.
Attributes
----------
data : 3-tuple of array
The first element of the tuple is an (N, D) array of vertices of
mesh triangles. The second is an (M, 3) array of int of indices
of the mesh triangles. The third element is the (K0, ..., KL, N)
array of values used to color vertices where the additional L
dimensions are used to color the same mesh with different values.
vertices : (N, D) array
Vertices of mesh triangles.
faces : (M, 3) array of int
Indices of mesh triangles.
vertex_values : (K0, ..., KL, N) array
Values used to color vertices.
colormap : str, napari.utils.Colormap, tuple, dict
Colormap to use for luminance images. If a string must be the name
of a supported colormap from vispy or matplotlib. If a tuple the
first value must be a string to assign as a name to a colormap and
the second item must be a Colormap. If a dict the key must be a
string to assign as a name to a colormap and the value must be a
Colormap.
contrast_limits : list (2,)
Color limits to be used for determining the colormap bounds for
luminance images. If not passed is calculated as the min and max of
the image.
shading: str
One of a list of preset shading modes that determine the lighting model
using when rendering the surface.
* ``'none'``
* ``'flat'``
* ``'smooth'``
gamma : float
Gamma correction for determining colormap linearity.
wireframe : SurfaceWireframe
Whether and how to display the edges of the surface mesh with a wireframe.
normals : SurfaceNormals
Whether and how to display the face and vertex normals of the surface mesh.
Notes
-----
_data_view : (M, 2) or (M, 3) array
The coordinates of the vertices given the viewed dimensions.
_view_faces : (P, 3) array
The integer indices of the vertices that form the triangles
in the currently viewed slice.
_colorbar : array
Colorbar for current colormap.
"""
_colormaps = AVAILABLE_COLORMAPS
def __init__(
self,
data,
*,
colormap='gray',
contrast_limits=None,
gamma=1,
name=None,
metadata=None,
scale=None,
translate=None,
rotate=None,
shear=None,
affine=None,
opacity=1,
blending='translucent',
shading='flat',
visible=True,
cache=True,
experimental_clipping_planes=None,
wireframe=None,
normals=None,
):
ndim = data[0].shape[1]
super().__init__(
data,
ndim,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
rotate=rotate,
shear=shear,
affine=affine,
opacity=opacity,
blending=blending,
visible=visible,
cache=cache,
experimental_clipping_planes=experimental_clipping_planes,
)
self.events.add(
interpolation=Event,
rendering=Event,
shading=Event,
)
# assign mesh data and establish default behavior
if len(data) not in (2, 3):
raise ValueError(
trans._(
'Surface data tuple must be 2 or 3, specifying verictes, faces, and optionally vertex values, instead got length {length}.',
deferred=True,
length=len(data),
)
)
self._vertices = data[0]
self._faces = data[1]
if len(data) == 3:
self._vertex_values = data[2]
else:
self._vertex_values = np.ones(len(self._vertices))
# Set contrast_limits and colormaps
self._gamma = gamma
if contrast_limits is None:
self._contrast_limits_range = calc_data_range(self._vertex_values)
else:
self._contrast_limits_range = contrast_limits
self._contrast_limits = tuple(self._contrast_limits_range)
self.colormap = colormap
self.contrast_limits = self._contrast_limits
# Data containing vectors in the currently viewed slice
self._data_view = np.zeros((0, self._ndisplay))
self._view_faces = np.zeros((0, 3))
self._view_vertex_values = []
# Trigger generation of view slice and thumbnail
self._update_dims()
# Shading mode
self._shading = shading
self.wireframe = wireframe or SurfaceWireframe()
self.normals = normals or SurfaceNormals()
def _calc_data_range(self, mode='data'):
return calc_data_range(self.vertex_values)
@property
def dtype(self):
return self.vertex_values.dtype
@property
def data(self):
return (self.vertices, self.faces, self.vertex_values)
@data.setter
def data(self, data):
if len(data) not in (2, 3):
raise ValueError(
trans._(
'Surface data tuple must be 2 or 3, specifying vertices, faces, and optionally vertex values, instead got length {data_length}.',
deferred=True,
data_length=len(data),
)
)
self._vertices = data[0]
self._faces = data[1]
if len(data) == 3:
self._vertex_values = data[2]
else:
self._vertex_values = np.ones(len(self._vertices))
self._update_dims()
self.events.data(value=self.data)
if self._keep_auto_contrast:
self.reset_contrast_limits()
@property
def vertices(self):
return self._vertices
@vertices.setter
def vertices(self, vertices):
"""Array of vertices of mesh triangles."""
self._vertices = vertices
self._update_dims()
self.refresh()
self.events.data(value=self.data)
self._set_editable()
@property
def vertex_values(self) -> np.ndarray:
return self._vertex_values
@vertex_values.setter
def vertex_values(self, vertex_values: np.ndarray):
"""Array of values used to color vertices.."""
self._vertex_values = vertex_values
self.refresh()
self.events.data(value=self.data)
self._set_editable()
@property
def faces(self) -> np.ndarray:
return self._faces
@faces.setter
def faces(self, faces: np.ndarray):
"""Array of indices of mesh triangles.."""
self.faces = faces
self.refresh()
self.events.data(value=self.data)
self._set_editable()
def _get_ndim(self):
"""Determine number of dimensions of the layer."""
return self.vertices.shape[1] + (self.vertex_values.ndim - 1)
@property
def _extent_data(self) -> np.ndarray:
"""Extent of layer in data coordinates.
Returns
-------
extent_data : array, shape (2, D)
"""
if len(self.vertices) == 0:
extrema = np.full((2, self.ndim), np.nan)
else:
maxs = np.max(self.vertices, axis=0)
mins = np.min(self.vertices, axis=0)
# The full dimensionality and shape of the layer is determined by
# the number of additional vertex value dimensions and the
# dimensionality of the vertices themselves
if self.vertex_values.ndim > 1:
mins = [0] * (self.vertex_values.ndim - 1) + list(mins)
maxs = list(self.vertex_values.shape[:-1]) + list(maxs)
extrema = np.vstack([mins, maxs])
return extrema
@property
def shading(self):
return str(self._shading)
@shading.setter
def shading(self, shading):
if isinstance(shading, Shading):
self._shading = shading
else:
self._shading = Shading(shading)
self.events.shading(value=self._shading)
def _get_state(self):
"""Get dictionary of layer state.
Returns
-------
state : dict
Dictionary of layer state.
"""
state = self._get_base_state()
state.update(
{
'colormap': self.colormap.name,
'contrast_limits': self.contrast_limits,
'gamma': self.gamma,
'shading': self.shading,
'data': self.data,
'wireframe': self.wireframe.dict(),
'normals': self.normals.dict(),
}
)
return state
def _set_view_slice(self):
"""Sets the view given the indices to slice with."""
N, vertex_ndim = self.vertices.shape
values_ndim = self.vertex_values.ndim - 1
# Take vertex_values dimensionality into account if more than one value
# is provided per vertex.
if values_ndim > 0:
# Get indices for axes corresponding to values dimensions
values_indices = self._slice_indices[:-vertex_ndim]
values = self.vertex_values[values_indices]
if values.ndim > 1:
warnings.warn(
trans._(
"Assigning multiple values per vertex after slicing is not allowed. All dimensions corresponding to vertex_values must be non-displayed dimensions. Data will not be visible.",
deferred=True,
)
)
self._data_view = np.zeros((0, self._ndisplay))
self._view_faces = np.zeros((0, 3))
self._view_vertex_values = []
return
self._view_vertex_values = values
# Determine which axes of the vertices data are being displayed
# and not displayed, ignoring the additional dimensions
# corresponding to the vertex_values.
indices = np.array(self._slice_indices[-vertex_ndim:])
disp = [
d
for d in np.subtract(self._dims_displayed, values_ndim)
if d >= 0
]
not_disp = [
d
for d in np.subtract(self._dims_not_displayed, values_ndim)
if d >= 0
]
else:
self._view_vertex_values = self.vertex_values
indices = np.array(self._slice_indices)
not_disp = list(self._dims_not_displayed)
disp = list(self._dims_displayed)
self._data_view = self.vertices[:, disp]
if len(self.vertices) == 0:
self._view_faces = np.zeros((0, 3))
elif vertex_ndim > self._ndisplay:
vertices = self.vertices[:, not_disp].astype('int')
triangles = vertices[self.faces]
matches = np.all(triangles == indices[not_disp], axis=(1, 2))
matches = np.where(matches)[0]
if len(matches) == 0:
self._view_faces = np.zeros((0, 3))
else:
self._view_faces = self.faces[matches]
else:
self._view_faces = self.faces
if self._keep_auto_contrast:
self.reset_contrast_limits()
def _update_thumbnail(self):
"""Update thumbnail with current surface."""
pass
def _get_value(self, position):
"""Value of the data at a position in data coordinates.
Parameters
----------
position : tuple
Position in data coordinates.
Returns
-------
value : None
Value of the data at the coord.
"""
return None
| 35.141907 | 199 | 0.600921 | import warnings
import numpy as np
from ...utils.colormaps import AVAILABLE_COLORMAPS
from ...utils.events import Event
from ...utils.translations import trans
from ..base import Layer
from ..intensity_mixin import IntensityVisualizationMixin
from ..utils.layer_utils import calc_data_range
from ._surface_constants import Shading
from .normals import SurfaceNormals
from .wireframe import SurfaceWireframe
class Surface(IntensityVisualizationMixin, Layer):
_colormaps = AVAILABLE_COLORMAPS
def __init__(
self,
data,
*,
colormap='gray',
contrast_limits=None,
gamma=1,
name=None,
metadata=None,
scale=None,
translate=None,
rotate=None,
shear=None,
affine=None,
opacity=1,
blending='translucent',
shading='flat',
visible=True,
cache=True,
experimental_clipping_planes=None,
wireframe=None,
normals=None,
):
ndim = data[0].shape[1]
super().__init__(
data,
ndim,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
rotate=rotate,
shear=shear,
affine=affine,
opacity=opacity,
blending=blending,
visible=visible,
cache=cache,
experimental_clipping_planes=experimental_clipping_planes,
)
self.events.add(
interpolation=Event,
rendering=Event,
shading=Event,
)
if len(data) not in (2, 3):
raise ValueError(
trans._(
'Surface data tuple must be 2 or 3, specifying verictes, faces, and optionally vertex values, instead got length {length}.',
deferred=True,
length=len(data),
)
)
self._vertices = data[0]
self._faces = data[1]
if len(data) == 3:
self._vertex_values = data[2]
else:
self._vertex_values = np.ones(len(self._vertices))
self._gamma = gamma
if contrast_limits is None:
self._contrast_limits_range = calc_data_range(self._vertex_values)
else:
self._contrast_limits_range = contrast_limits
self._contrast_limits = tuple(self._contrast_limits_range)
self.colormap = colormap
self.contrast_limits = self._contrast_limits
self._data_view = np.zeros((0, self._ndisplay))
self._view_faces = np.zeros((0, 3))
self._view_vertex_values = []
self._update_dims()
self._shading = shading
self.wireframe = wireframe or SurfaceWireframe()
self.normals = normals or SurfaceNormals()
def _calc_data_range(self, mode='data'):
return calc_data_range(self.vertex_values)
@property
def dtype(self):
return self.vertex_values.dtype
@property
def data(self):
return (self.vertices, self.faces, self.vertex_values)
@data.setter
def data(self, data):
if len(data) not in (2, 3):
raise ValueError(
trans._(
'Surface data tuple must be 2 or 3, specifying vertices, faces, and optionally vertex values, instead got length {data_length}.',
deferred=True,
data_length=len(data),
)
)
self._vertices = data[0]
self._faces = data[1]
if len(data) == 3:
self._vertex_values = data[2]
else:
self._vertex_values = np.ones(len(self._vertices))
self._update_dims()
self.events.data(value=self.data)
if self._keep_auto_contrast:
self.reset_contrast_limits()
@property
def vertices(self):
return self._vertices
@vertices.setter
def vertices(self, vertices):
self._vertices = vertices
self._update_dims()
self.refresh()
self.events.data(value=self.data)
self._set_editable()
@property
def vertex_values(self) -> np.ndarray:
return self._vertex_values
@vertex_values.setter
def vertex_values(self, vertex_values: np.ndarray):
self._vertex_values = vertex_values
self.refresh()
self.events.data(value=self.data)
self._set_editable()
@property
def faces(self) -> np.ndarray:
return self._faces
@faces.setter
def faces(self, faces: np.ndarray):
self.faces = faces
self.refresh()
self.events.data(value=self.data)
self._set_editable()
def _get_ndim(self):
return self.vertices.shape[1] + (self.vertex_values.ndim - 1)
@property
def _extent_data(self) -> np.ndarray:
if len(self.vertices) == 0:
extrema = np.full((2, self.ndim), np.nan)
else:
maxs = np.max(self.vertices, axis=0)
mins = np.min(self.vertices, axis=0)
if self.vertex_values.ndim > 1:
mins = [0] * (self.vertex_values.ndim - 1) + list(mins)
maxs = list(self.vertex_values.shape[:-1]) + list(maxs)
extrema = np.vstack([mins, maxs])
return extrema
@property
def shading(self):
return str(self._shading)
@shading.setter
def shading(self, shading):
if isinstance(shading, Shading):
self._shading = shading
else:
self._shading = Shading(shading)
self.events.shading(value=self._shading)
def _get_state(self):
state = self._get_base_state()
state.update(
{
'colormap': self.colormap.name,
'contrast_limits': self.contrast_limits,
'gamma': self.gamma,
'shading': self.shading,
'data': self.data,
'wireframe': self.wireframe.dict(),
'normals': self.normals.dict(),
}
)
return state
def _set_view_slice(self):
N, vertex_ndim = self.vertices.shape
values_ndim = self.vertex_values.ndim - 1
if values_ndim > 0:
values_indices = self._slice_indices[:-vertex_ndim]
values = self.vertex_values[values_indices]
if values.ndim > 1:
warnings.warn(
trans._(
"Assigning multiple values per vertex after slicing is not allowed. All dimensions corresponding to vertex_values must be non-displayed dimensions. Data will not be visible.",
deferred=True,
)
)
self._data_view = np.zeros((0, self._ndisplay))
self._view_faces = np.zeros((0, 3))
self._view_vertex_values = []
return
self._view_vertex_values = values
indices = np.array(self._slice_indices[-vertex_ndim:])
disp = [
d
for d in np.subtract(self._dims_displayed, values_ndim)
if d >= 0
]
not_disp = [
d
for d in np.subtract(self._dims_not_displayed, values_ndim)
if d >= 0
]
else:
self._view_vertex_values = self.vertex_values
indices = np.array(self._slice_indices)
not_disp = list(self._dims_not_displayed)
disp = list(self._dims_displayed)
self._data_view = self.vertices[:, disp]
if len(self.vertices) == 0:
self._view_faces = np.zeros((0, 3))
elif vertex_ndim > self._ndisplay:
vertices = self.vertices[:, not_disp].astype('int')
triangles = vertices[self.faces]
matches = np.all(triangles == indices[not_disp], axis=(1, 2))
matches = np.where(matches)[0]
if len(matches) == 0:
self._view_faces = np.zeros((0, 3))
else:
self._view_faces = self.faces[matches]
else:
self._view_faces = self.faces
if self._keep_auto_contrast:
self.reset_contrast_limits()
def _update_thumbnail(self):
pass
def _get_value(self, position):
return None
| true | true |
f72d638bd51100a4bc9f891fd1b36e89b5cb1ce3 | 596 | py | Python | env/lib/python3.8/site-packages/plotly/validators/funnel/textfont/_family.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/funnel/textfont/_family.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/funnel/textfont/_family.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="family", parent_name="funnel.textfont", **kwargs):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
| 37.25 | 86 | 0.630872 | import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="family", parent_name="funnel.textfont", **kwargs):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
| true | true |
f72d67de22841e398e607b96810cbb106f887d88 | 16,892 | py | Python | nni/compression/pytorch/utils/mask_conflict.py | ggzhang0071/nni | f4145e62d89c3ca383cf00f2de5dfd2d1025ad92 | [
"MIT"
] | 9,680 | 2019-05-07T01:42:30.000Z | 2022-03-31T16:48:33.000Z | nni/compression/pytorch/utils/mask_conflict.py | soma2000-lang/nni | eaad98528c7aa714c9848800d607d6aa3bdd531d | [
"MIT"
] | 1,957 | 2019-05-06T21:44:21.000Z | 2022-03-31T09:21:53.000Z | nni/compression/pytorch/utils/mask_conflict.py | soma2000-lang/nni | eaad98528c7aa714c9848800d607d6aa3bdd531d | [
"MIT"
] | 1,571 | 2019-05-07T06:42:55.000Z | 2022-03-31T03:19:24.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import logging
import torch
import numpy as np
from .shape_dependency import ChannelDependency, GroupDependency, InputChannelDependency
from .utils import get_module_by_name
# logging.basicConfig(level = logging.DEBUG)
_logger = logging.getLogger('FixMaskConflict')
def fix_mask_conflict(masks, model, dummy_input, traced=None):
"""
MaskConflict fix the mask conflict for the channel dependencies
and group dependency.
Parameters
----------
masks : dict/str
A dict object that stores the masks or the path of the mask file
model : torch.nn.Module
model to fix the mask conflict
dummy_input : torch.Tensor/list of tensors/dict of tensors
input example to trace the model
traced : torch._C.torch.jit.TopLevelTracedModule
the traced model of the target model, is this parameter is not None,
we donnot use the model and dummpy_input to get the trace graph.
"""
if isinstance(masks, str):
# if the input is the path of the mask_file
assert os.path.exists(masks)
masks = torch.load(masks)
assert len(masks) > 0, 'Mask tensor cannot be empty'
# if the user uses the model and dummy_input to trace the model, we
# should get the traced model handly, so that, we only trace the
# model once, GroupMaskConflict and ChannelMaskConflict will reuse
# this traced model.
if traced is None:
assert model is not None and dummy_input is not None
training = model.training
# We need to trace the model in eval mode
model.eval()
kw_args = {}
if torch.__version__ >= '1.6.0':
# only pytorch with version greater than 1.6.0 has the strict option
kw_args['strict'] = False
traced = torch.jit.trace(model, dummy_input, **kw_args)
model.train(training)
fix_group_mask = GroupMaskConflict(masks, model, dummy_input, traced)
masks = fix_group_mask.fix_mask()
fix_channel_mask = ChannelMaskConflict(masks, model, dummy_input, traced)
masks = fix_channel_mask.fix_mask()
return masks
class MaskFix:
def __init__(self, masks, model=None, dummy_input=None, traced=None):
# check if the parameters are valid
parameter_valid = False
if traced is not None:
parameter_valid = True
elif (model is not None) and (dummy_input is not None):
parameter_valid = True
if not parameter_valid:
raise Exception('The input parameters is invalid!')
self.model = model
self.dummy_input = dummy_input
self.traced = traced
self.masks = masks
def fix_mask(self):
raise NotImplementedError
def export(self, path):
"""
Export the masks after fixing the conflict to file.
"""
torch.save(self.masks, path)
class GroupMaskConflict(MaskFix):
def __init__(self, masks, model, dummy_input, traced=None):
"""
GroupMaskConflict fix the mask conflict between the layers that
has group dependecy with each other.
Parameters
----------
masks : dict
a dict object that stores the masks
model : torch.nn.Module
model to fix the mask conflict
dummy_input : torch.Tensor
input example to trace the model
traced : torch._C.torch.jit.TopLevelTracedModule
the traced model of the target model, is this parameter is not None,
we donnot use the model and dummpy_input to get the trace graph.
"""
super(GroupMaskConflict, self).__init__(
masks, model, dummy_input, traced)
def fix_mask(self):
"""
Fix the mask conflict before the mask inference for the layers that
has group dependencies. This function should be called before the
mask inference of the 'speedup' module.
"""
group_depen = GroupDependency(
self.model, self.dummy_input, self.traced)
depens = group_depen.dependency
min_groups = group_depen.min_groups
_logger.info(depens)
for layername in depens:
group_max = depens[layername]
group_min = min_groups[layername]
if layername not in self.masks:
# this layer not pruned
continue
w_mask = self.masks[layername]['weight']
shape = w_mask.size()
count = np.prod(shape[1:])
all_ones = (w_mask.flatten(1).sum(-1) == count).nonzero().squeeze(1).tolist()
all_zeros = (w_mask.flatten(1).sum(-1) == 0).nonzero().squeeze(1).tolist()
if len(all_ones) + len(all_zeros) < w_mask.size(0):
# In fine-grained pruning, skip this layer
_logger.info('Layers %s using fine-grained pruning', layername)
continue
assert shape[0] % group_max == 0
# Find the number of masked filter for each group (mini_masked).
# Because we have to keep the pruned filter can still
# be divided into the same number of groups, so we only can
# prune mini_masked filters for each group.
step = shape[0] / group_max
group_masked = []
for i in range(group_max):
_start = step * i
_end = step * (i + 1)
_tmp_list = list(
filter(lambda x: _start <= x and x < _end, all_zeros))
group_masked.append(_tmp_list)
mini_masked = min([len(x) for x in group_masked])
need_unmask = set()
for gm in group_masked:
for i in range(mini_masked, len(gm)):
# To keep the output channel number still being divisible to
# groups, we set the masks of following filters to be zero.
pos = gm[i]
need_unmask.add(pos)
step = shape[0] / group_min
for i in range(group_min):
_start = step * i
_end = step * (i+1)
_tmp_list = list(
filter(lambda x: _start <= x and x < _end, all_zeros))
if len(_tmp_list) == step:
# if the whole group is removed, then we don't have to unmask for
# the filters in this group
for pos in _tmp_list:
if pos in need_unmask:
need_unmask.remove(pos)
for pos in need_unmask:
self.masks[layername]['weight'][pos] = torch.ones(shape[1:])
if hasattr(self.masks[layername], 'bias'):
self.masks[layername]['bias'][pos] = 1
return self.masks
class ChannelMaskConflict(MaskFix):
def __init__(self, masks, model, dummy_input, traced=None):
"""
ChannelMaskConflict fix the mask conflict between the layers that
has channel dependecy with each other.
Parameters
----------
masks : dict
a dict object that stores the masks
model : torch.nn.Module
model to fix the mask conflict
dummy_input : torch.Tensor
input example to trace the model
graph : torch._C.torch.jit.TopLevelTracedModule
the traced graph of the target model, is this parameter is not None,
we donnot use the model and dummpy_input to get the trace graph.
"""
super(ChannelMaskConflict, self).__init__(
masks, model, dummy_input, traced)
self.conv_prune_dim = detect_mask_prune_dim(masks, model)
self.channel_prune_type = detect_channel_prune_type(masks, model)
_logger.info('Dectected conv prune dim" %d', self.conv_prune_dim)
def fix_mask(self):
"""
Fix the mask conflict before the mask inference for the layers that
has shape dependencies. This function should be called before the
mask inference of the 'speedup' module. Only structured pruning masks
are supported.
"""
if self.conv_prune_dim == 0:
channel_depen = ChannelDependency(
self.model, self.dummy_input, self.traced, self.channel_prune_type)
else:
channel_depen = InputChannelDependency(
self.model, self.dummy_input, self.traced)
depen_sets = channel_depen.dependency_sets
sum_idx = (1, 2, 3) if self.conv_prune_dim == 0 else (0, 2, 3)
(_tmp_name, _tmp_tensor) = list(self.masks.items())[0]
device = _tmp_tensor['weight'].device
for dset in depen_sets:
if len(dset) <= 1:
continue
# channel_masks is a list, each element is None or a vector, for example:
# [[0, 1, 1, 0, 0], [0, 0, 1, 1, 0], None], None means no channel
# is pruned.
channel_masks = []
fine_grained = False
for name in dset:
if name in self.masks:
_, m = get_module_by_name(self.model, name)
assert m is not None
mask = self.masks[name]['weight']
if type(m).__name__ == 'Conv2d':
channel_mask = (mask.abs().sum(sum_idx) != 0).int()
channel_masks.append(channel_mask)
if (channel_mask.sum() * (mask.numel() / mask.shape[self.conv_prune_dim])).item() != (mask > 0).sum().item():
fine_grained = True
elif type(m).__name__ == 'Linear':
if self.conv_prune_dim == 1:
channel_masks.append(
(mask.abs().sum(0) != 0).int())
else:
channel_masks.append(
(mask.abs().sum(1) != 0).int())
elif type(m).__name__ == 'BatchNorm2d':
channel_masks.append(mask.int())
elif type(m).__name__ == 'ConvTranspose2d':
# convtranspose have difference memory layout, so that we need create
# a tmp_sum_idx for conv_transpose
tmp_sum_idx = (
0, 2, 3) if self.conv_prune_dim == 0 else (1, 2, 3)
channel_mask = (mask.abs().sum(tmp_sum_idx) != 0).int()
channel_masks.append(channel_mask)
if (channel_mask.sum() * (mask.numel() / mask.shape[1 - self.conv_prune_dim])).item() != (mask > 0).sum().item():
fine_grained = True
else:
raise RuntimeError(
f'unsupported module type: {type(m).__name__}')
else:
# no mask means not pruned, equivlent to full masks
channel_masks.append(None)
if fine_grained:
_logger.info("Fine-grianed mask detected")
if all(x is None for x in channel_masks):
continue
num_channels_list = [len(x)
for x in channel_masks if x is not None]
# number of channels in same set should be identical
assert len(set(num_channels_list)) == 1
num_channels = num_channels_list[0]
for i, dim_mask in enumerate(channel_masks):
if dim_mask is None:
channel_masks[i] = torch.ones(
num_channels).int().to(device)
# merge masks with 'or'
merged_channel_mask = channel_masks[0].clone()
for i in range(1, len(channel_masks)):
merged_channel_mask = (
(merged_channel_mask + channel_masks[i]) != 0).int()
merged_index = torch.nonzero(merged_channel_mask, as_tuple=True)[0]
for name in dset:
if name not in self.masks:
assert all(merged_channel_mask)
continue
orig_mask = self.masks[name]['weight']
_, m = get_module_by_name(self.model, name)
new_mask = torch.zeros_like(orig_mask)
if type(m).__name__ == 'Conv2d':
if self.conv_prune_dim == 0:
new_mask[merged_index, :, :, :] = 1.
else:
new_mask[:, merged_index, :, :] = 1.
elif type(m).__name__ == 'Linear':
if self.conv_prune_dim == 0:
new_mask[merged_index, :] = 1
elif self.conv_prune_dim == 1:
new_mask[:, merged_index] = 1.
elif type(m).__name__ == 'BatchNorm2d':
new_mask = merged_channel_mask.type_as(orig_mask)
else:
raise RuntimeError(
f'unsupported module type: {type(m).__name__}')
self.masks[name]['weight'] = new_mask
if 'bias' in self.masks[name] and self.masks[name]['bias'] is not None:
if type(m).__name__ == 'Conv2d':
assert self.conv_prune_dim == 0
if self.conv_prune_dim == 0:
self.masks[name]['bias'] = merged_channel_mask.type_as(
self.masks[name]['bias'])
return self.masks
def detect_channel_prune_type(masks, model):
"""
User can prune a channel through two ways: 1) prune
the corresponding filter of the conv layer(all the
filter related pruner), 2) prune the BN layers that
followed after a conv(Slim pruner). This function find
the pruning type of the masks.
Parameters
----------
masks: dict
A dict object that stores the masks.
model: nn.Module
Model object which the mask can be applied on.
Returns:
-------
prune_type: str
Could be Filter or Batchnorm
"""
prune_type = 'Filter'
all_batch_norm = True
for layer_name in masks:
_, m = get_module_by_name(model, layer_name)
if m is None or (not isinstance(m, torch.nn.BatchNorm2d)):
all_batch_norm = False
break
if all_batch_norm:
# if all masks are for batchnorm layers, then the prune_type is BatchNorm
# Note, actually we currently do not support pruning both Conv and BatchNorm
# at the same time.
prune_type = 'Batchnorm'
return prune_type
def detect_mask_prune_dim(masks, model):
"""
Detect how the masks of convolutional layers are pruned.
Parameters
----------
masks: dict
A dict object that stores the masks.
model: nn.Module
Model object which the mask can be applied on.
Returns:
-------
How the masks of convolutional layers are pruned, this depends on pruning algorithms, it should
return 1 for masks generated by AMCPruner, and returns 0 for masks generated by the rest
NNI builtin pruners.
0: filter pruning, prune filters of weights which causes channels of output feature maps are pruned.
1: channel pruning, prune kernels corresponding to each input channels which causes channels of
input feature maps are pruned.
"""
dim0_preserved, dim1_preserved = 0., 0.
dim0_num, dim1_num = 0., 0.
for module_name in masks:
_, m = get_module_by_name(model, module_name)
if m is None or type(m).__name__ != 'Conv2d':
continue
mask = masks[module_name]['weight'].clone()
assert (mask >= 0).sum() == mask.numel(), \
"mask values should be greater than or equal to 0."
mask = (mask > 0).int()
mask = mask.view(mask.shape[0], mask.shape[1], -1)
dim0_mask = (mask.sum((1, 2)) > 0).int()
dim1_mask = (mask.sum((0, 2)) > 0).int()
dim0_preserved += dim0_mask.sum().item()
dim1_preserved += dim1_mask.sum().item()
dim0_num += len(dim0_mask)
dim1_num += len(dim1_mask)
if dim0_num == 0 or dim1_num == 0:
_logger.warning('no multi-dimension masks found.')
return 0
dim0_sparsity, dim1_sparsity = 1. - dim0_preserved / \
dim0_num, 1. - dim1_preserved / dim1_num
_logger.info('dim0 sparsity: %f', dim0_sparsity)
_logger.info('dim1 sparsity: %f', dim1_sparsity)
if dim0_sparsity == dim1_sparsity == 0.:
_logger.warning('nothing masked.')
if dim0_sparsity > 0 and dim1_sparsity > 0:
_logger.warning('both dim0 and dim1 masks found.')
return 0 if dim0_sparsity >= dim1_sparsity else 1
| 42.33584 | 137 | 0.572401 |
import os
import logging
import torch
import numpy as np
from .shape_dependency import ChannelDependency, GroupDependency, InputChannelDependency
from .utils import get_module_by_name
_logger = logging.getLogger('FixMaskConflict')
def fix_mask_conflict(masks, model, dummy_input, traced=None):
if isinstance(masks, str):
assert os.path.exists(masks)
masks = torch.load(masks)
assert len(masks) > 0, 'Mask tensor cannot be empty'
if traced is None:
assert model is not None and dummy_input is not None
training = model.training
model.eval()
kw_args = {}
if torch.__version__ >= '1.6.0':
kw_args['strict'] = False
traced = torch.jit.trace(model, dummy_input, **kw_args)
model.train(training)
fix_group_mask = GroupMaskConflict(masks, model, dummy_input, traced)
masks = fix_group_mask.fix_mask()
fix_channel_mask = ChannelMaskConflict(masks, model, dummy_input, traced)
masks = fix_channel_mask.fix_mask()
return masks
class MaskFix:
def __init__(self, masks, model=None, dummy_input=None, traced=None):
parameter_valid = False
if traced is not None:
parameter_valid = True
elif (model is not None) and (dummy_input is not None):
parameter_valid = True
if not parameter_valid:
raise Exception('The input parameters is invalid!')
self.model = model
self.dummy_input = dummy_input
self.traced = traced
self.masks = masks
def fix_mask(self):
raise NotImplementedError
def export(self, path):
torch.save(self.masks, path)
class GroupMaskConflict(MaskFix):
def __init__(self, masks, model, dummy_input, traced=None):
super(GroupMaskConflict, self).__init__(
masks, model, dummy_input, traced)
def fix_mask(self):
group_depen = GroupDependency(
self.model, self.dummy_input, self.traced)
depens = group_depen.dependency
min_groups = group_depen.min_groups
_logger.info(depens)
for layername in depens:
group_max = depens[layername]
group_min = min_groups[layername]
if layername not in self.masks:
continue
w_mask = self.masks[layername]['weight']
shape = w_mask.size()
count = np.prod(shape[1:])
all_ones = (w_mask.flatten(1).sum(-1) == count).nonzero().squeeze(1).tolist()
all_zeros = (w_mask.flatten(1).sum(-1) == 0).nonzero().squeeze(1).tolist()
if len(all_ones) + len(all_zeros) < w_mask.size(0):
_logger.info('Layers %s using fine-grained pruning', layername)
continue
assert shape[0] % group_max == 0
step = shape[0] / group_max
group_masked = []
for i in range(group_max):
_start = step * i
_end = step * (i + 1)
_tmp_list = list(
filter(lambda x: _start <= x and x < _end, all_zeros))
group_masked.append(_tmp_list)
mini_masked = min([len(x) for x in group_masked])
need_unmask = set()
for gm in group_masked:
for i in range(mini_masked, len(gm)):
pos = gm[i]
need_unmask.add(pos)
step = shape[0] / group_min
for i in range(group_min):
_start = step * i
_end = step * (i+1)
_tmp_list = list(
filter(lambda x: _start <= x and x < _end, all_zeros))
if len(_tmp_list) == step:
# the filters in this group
for pos in _tmp_list:
if pos in need_unmask:
need_unmask.remove(pos)
for pos in need_unmask:
self.masks[layername]['weight'][pos] = torch.ones(shape[1:])
if hasattr(self.masks[layername], 'bias'):
self.masks[layername]['bias'][pos] = 1
return self.masks
class ChannelMaskConflict(MaskFix):
def __init__(self, masks, model, dummy_input, traced=None):
super(ChannelMaskConflict, self).__init__(
masks, model, dummy_input, traced)
self.conv_prune_dim = detect_mask_prune_dim(masks, model)
self.channel_prune_type = detect_channel_prune_type(masks, model)
_logger.info('Dectected conv prune dim" %d', self.conv_prune_dim)
def fix_mask(self):
if self.conv_prune_dim == 0:
channel_depen = ChannelDependency(
self.model, self.dummy_input, self.traced, self.channel_prune_type)
else:
channel_depen = InputChannelDependency(
self.model, self.dummy_input, self.traced)
depen_sets = channel_depen.dependency_sets
sum_idx = (1, 2, 3) if self.conv_prune_dim == 0 else (0, 2, 3)
(_tmp_name, _tmp_tensor) = list(self.masks.items())[0]
device = _tmp_tensor['weight'].device
for dset in depen_sets:
if len(dset) <= 1:
continue
# channel_masks is a list, each element is None or a vector, for example:
# [[0, 1, 1, 0, 0], [0, 0, 1, 1, 0], None], None means no channel
# is pruned.
channel_masks = []
fine_grained = False
for name in dset:
if name in self.masks:
_, m = get_module_by_name(self.model, name)
assert m is not None
mask = self.masks[name]['weight']
if type(m).__name__ == 'Conv2d':
channel_mask = (mask.abs().sum(sum_idx) != 0).int()
channel_masks.append(channel_mask)
if (channel_mask.sum() * (mask.numel() / mask.shape[self.conv_prune_dim])).item() != (mask > 0).sum().item():
fine_grained = True
elif type(m).__name__ == 'Linear':
if self.conv_prune_dim == 1:
channel_masks.append(
(mask.abs().sum(0) != 0).int())
else:
channel_masks.append(
(mask.abs().sum(1) != 0).int())
elif type(m).__name__ == 'BatchNorm2d':
channel_masks.append(mask.int())
elif type(m).__name__ == 'ConvTranspose2d':
# convtranspose have difference memory layout, so that we need create
# a tmp_sum_idx for conv_transpose
tmp_sum_idx = (
0, 2, 3) if self.conv_prune_dim == 0 else (1, 2, 3)
channel_mask = (mask.abs().sum(tmp_sum_idx) != 0).int()
channel_masks.append(channel_mask)
if (channel_mask.sum() * (mask.numel() / mask.shape[1 - self.conv_prune_dim])).item() != (mask > 0).sum().item():
fine_grained = True
else:
raise RuntimeError(
f'unsupported module type: {type(m).__name__}')
else:
# no mask means not pruned, equivlent to full masks
channel_masks.append(None)
if fine_grained:
_logger.info("Fine-grianed mask detected")
if all(x is None for x in channel_masks):
continue
num_channels_list = [len(x)
for x in channel_masks if x is not None]
# number of channels in same set should be identical
assert len(set(num_channels_list)) == 1
num_channels = num_channels_list[0]
for i, dim_mask in enumerate(channel_masks):
if dim_mask is None:
channel_masks[i] = torch.ones(
num_channels).int().to(device)
# merge masks with 'or'
merged_channel_mask = channel_masks[0].clone()
for i in range(1, len(channel_masks)):
merged_channel_mask = (
(merged_channel_mask + channel_masks[i]) != 0).int()
merged_index = torch.nonzero(merged_channel_mask, as_tuple=True)[0]
for name in dset:
if name not in self.masks:
assert all(merged_channel_mask)
continue
orig_mask = self.masks[name]['weight']
_, m = get_module_by_name(self.model, name)
new_mask = torch.zeros_like(orig_mask)
if type(m).__name__ == 'Conv2d':
if self.conv_prune_dim == 0:
new_mask[merged_index, :, :, :] = 1.
else:
new_mask[:, merged_index, :, :] = 1.
elif type(m).__name__ == 'Linear':
if self.conv_prune_dim == 0:
new_mask[merged_index, :] = 1
elif self.conv_prune_dim == 1:
new_mask[:, merged_index] = 1.
elif type(m).__name__ == 'BatchNorm2d':
new_mask = merged_channel_mask.type_as(orig_mask)
else:
raise RuntimeError(
f'unsupported module type: {type(m).__name__}')
self.masks[name]['weight'] = new_mask
if 'bias' in self.masks[name] and self.masks[name]['bias'] is not None:
if type(m).__name__ == 'Conv2d':
assert self.conv_prune_dim == 0
if self.conv_prune_dim == 0:
self.masks[name]['bias'] = merged_channel_mask.type_as(
self.masks[name]['bias'])
return self.masks
def detect_channel_prune_type(masks, model):
prune_type = 'Filter'
all_batch_norm = True
for layer_name in masks:
_, m = get_module_by_name(model, layer_name)
if m is None or (not isinstance(m, torch.nn.BatchNorm2d)):
all_batch_norm = False
break
if all_batch_norm:
# if all masks are for batchnorm layers, then the prune_type is BatchNorm
# Note, actually we currently do not support pruning both Conv and BatchNorm
# at the same time.
prune_type = 'Batchnorm'
return prune_type
def detect_mask_prune_dim(masks, model):
dim0_preserved, dim1_preserved = 0., 0.
dim0_num, dim1_num = 0., 0.
for module_name in masks:
_, m = get_module_by_name(model, module_name)
if m is None or type(m).__name__ != 'Conv2d':
continue
mask = masks[module_name]['weight'].clone()
assert (mask >= 0).sum() == mask.numel(), \
"mask values should be greater than or equal to 0."
mask = (mask > 0).int()
mask = mask.view(mask.shape[0], mask.shape[1], -1)
dim0_mask = (mask.sum((1, 2)) > 0).int()
dim1_mask = (mask.sum((0, 2)) > 0).int()
dim0_preserved += dim0_mask.sum().item()
dim1_preserved += dim1_mask.sum().item()
dim0_num += len(dim0_mask)
dim1_num += len(dim1_mask)
if dim0_num == 0 or dim1_num == 0:
_logger.warning('no multi-dimension masks found.')
return 0
dim0_sparsity, dim1_sparsity = 1. - dim0_preserved / \
dim0_num, 1. - dim1_preserved / dim1_num
_logger.info('dim0 sparsity: %f', dim0_sparsity)
_logger.info('dim1 sparsity: %f', dim1_sparsity)
if dim0_sparsity == dim1_sparsity == 0.:
_logger.warning('nothing masked.')
if dim0_sparsity > 0 and dim1_sparsity > 0:
_logger.warning('both dim0 and dim1 masks found.')
return 0 if dim0_sparsity >= dim1_sparsity else 1
| true | true |
f72d67e6523be41eb111c7190580bae833cc635f | 100 | py | Python | Week 1/grok/samples/1b/19.view dimensions of fits images.py | anandprabhakar0507/Assignments-Data-Driven-Astronomy-from-University-of-sydney-on-coursera- | 58fab1c413d7ad5693b1d63f14be05b0f5ec448c | [
"MIT"
] | 4 | 2021-07-02T02:57:31.000Z | 2022-02-01T17:31:14.000Z | Week 1/grok/samples/1b/19.view dimensions of fits images.py | anandprabhakar0507/Assignments-Data-Driven-Astronomy-from-University-of-sydney-on-coursera- | 58fab1c413d7ad5693b1d63f14be05b0f5ec448c | [
"MIT"
] | null | null | null | Week 1/grok/samples/1b/19.view dimensions of fits images.py | anandprabhakar0507/Assignments-Data-Driven-Astronomy-from-University-of-sydney-on-coursera- | 58fab1c413d7ad5693b1d63f14be05b0f5ec448c | [
"MIT"
] | 3 | 2021-07-12T21:54:43.000Z | 2022-02-01T17:31:42.000Z | from astropy.io import fits
a = fits.open('image0.fits')
image = a[0].data
print(image.shape) | 16.666667 | 29 | 0.68 | from astropy.io import fits
a = fits.open('image0.fits')
image = a[0].data
print(image.shape) | true | true |
f72d6827c967a7c37c9189b3d2a2e0b6721fb490 | 228 | py | Python | hknweb/exams/admin.py | yuji3w/hknweb | 0df5369da28f46dc9016da97652cb6b8e2b7f3e6 | [
"MIT"
] | 3 | 2019-04-22T21:51:07.000Z | 2019-12-16T21:54:00.000Z | hknweb/exams/admin.py | yuji3w/hknweb | 0df5369da28f46dc9016da97652cb6b8e2b7f3e6 | [
"MIT"
] | null | null | null | hknweb/exams/admin.py | yuji3w/hknweb | 0df5369da28f46dc9016da97652cb6b8e2b7f3e6 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Course, CourseSemester, Department, Instructor
admin.site.register(Course)
admin.site.register(CourseSemester)
admin.site.register(Department)
admin.site.register(Instructor) | 32.571429 | 66 | 0.842105 | from django.contrib import admin
from .models import Course, CourseSemester, Department, Instructor
admin.site.register(Course)
admin.site.register(CourseSemester)
admin.site.register(Department)
admin.site.register(Instructor) | true | true |
f72d686e003d9fdaeb69d3bfc5e9e05d5aff6ab6 | 260 | py | Python | scripts/basins/create_ptshp.py | jrising/research-common | 2b930d29fa0b16b3b08b33b7d8fffa583ccde94f | [
"MIT"
] | null | null | null | scripts/basins/create_ptshp.py | jrising/research-common | 2b930d29fa0b16b3b08b33b7d8fffa583ccde94f | [
"MIT"
] | null | null | null | scripts/basins/create_ptshp.py | jrising/research-common | 2b930d29fa0b16b3b08b33b7d8fffa583ccde94f | [
"MIT"
] | 3 | 2016-02-09T00:59:01.000Z | 2018-03-31T10:17:49.000Z | import sys
import shapefile
longitude = float(sys.argv[1])
latitude = float(sys.argv[2])
outpath = sys.argv[3]
writer = shapefile.Writer(shapefile.POINT)
writer.field('label')
writer.point(longitude, latitude)
writer.record('singleton')
writer.save(outpath)
| 20 | 42 | 0.765385 | import sys
import shapefile
longitude = float(sys.argv[1])
latitude = float(sys.argv[2])
outpath = sys.argv[3]
writer = shapefile.Writer(shapefile.POINT)
writer.field('label')
writer.point(longitude, latitude)
writer.record('singleton')
writer.save(outpath)
| true | true |
f72d68fb62a2806074aca8a22832fda252b941ff | 12,657 | py | Python | seedminer/seedminer_launcher3.py | Marenthyu/seedminer | c46eb002e34cce4fe847d5b5ea59955ec9a69626 | [
"MIT"
] | null | null | null | seedminer/seedminer_launcher3.py | Marenthyu/seedminer | c46eb002e34cce4fe847d5b5ea59955ec9a69626 | [
"MIT"
] | null | null | null | seedminer/seedminer_launcher3.py | Marenthyu/seedminer | c46eb002e34cce4fe847d5b5ea59955ec9a69626 | [
"MIT"
] | null | null | null | import os,sys,struct,glob
import urllib.request
from binascii import hexlify, unhexlify
#don't change this mid brute force - can be different amount multiple computers - powers of two recommended for even distribution of workload 1 2 4 8 etc.
process_count=4
offset_override=0 #for gpu options, this allows starting brute-force at a user-defined offset
#-----------------------------------------------------------------------------------------------------------------
#Don't edit below this line unless you have multiple computers brute-forcing - most of you won't need this feature
#-----------------------------------------------------------------------------------------------------------------
number_of_computers=1 #each computer needs this set to same number if more than one
which_computer_is_this=0 #each computer has a different id # that's less than number_of_computers
#-----------------------------------------------------------------------------------------------------------------
#Don't edit below this line unless you know what you're doing (function defs begin)
#-----------------------------------------------------------------------------------------------------------------
lfcs=[]
ftune=[]
lfcs_new=[]
ftune_new=[]
err_correct=0
def int16bytes(n):
return n.to_bytes(16, 'big')
def expand():
for i in range(1,len(lfcs)):
lfcs[i]=lfcs[i]<<12 | 0x800
for i in range(1,len(lfcs_new)):
lfcs_new[i]=lfcs_new[i]<<12 | 0x800
def bytes2int(s):
n=0
for i in range(4):
n+=ord(s[i:i+1])<<(i*8)
return n
def int2bytes(n):
s=bytearray(4)
for i in range(4):
s[i]=n & 0xFF
n=n>>8
return s
def byteSwap4(n):
# using a slice to reverse is better, and easier for bytes
return n[::-1]
def endian4(n):
return (n&0xFF000000)>>24 | (n&0x00FF0000)>>8 | (n&0x0000FF00)<<8 | (n&0x000000FF)<<24
def getMsed3Estimate(n,isNew):
global err_correct
newbit=0x0
if isNew:
fc=lfcs_new
ft=ftune_new
newbit=0x80000000
else:
fc=lfcs
ft=ftune
fc_size=len(fc)
ft_size=len(ft)
if fc_size != ft_size:
return -1
for i in range(fc_size):
if n<fc[i]:
xs=(n-fc[i-1])
xl=(fc[i]-fc[i-1])
y=ft[i-1]
yl=(ft[i]-ft[i-1])
ys=((xs*yl)//xl)+y
err_correct=ys
return ((n//5)-ys) | newbit
return ((n//5)-ft[ft_size-1]) | newbit
def mii_gpu():
from Cryptodome.Cipher import AES
nk31=0x59FC817E6446EA6190347B20E9BDCE52
with open("input.bin", "rb") as f:
enc=f.read()
if(len(enc) != 0x70):
print("Error: input.bin is invalid size (likely QR -> input.bin conversion issue)")
sys.exit(0)
nonce=enc[:8]+b"\x00"*4
cipher = AES.new(int16bytes(nk31), AES.MODE_CCM, nonce )
dec=cipher.decrypt(enc[8:0x60])
nonce=nonce[:8]
final=dec[:12]+nonce+dec[12:]
with open("output.bin", "wb") as f:
f.write(final)
if(len(sys.argv) >= 3):
model=sys.argv[2].lower()
else:
print("Error: need to specify new|old movable.sed")
sys.exit(0)
model_str=b""
start_lfcs_old=0x0B000000//2
start_lfcs_new=0x05000000//2
start_lfcs=0
year=0
if(len(sys.argv)==4):
year=int(sys.argv[3])
if(model=="old"):
model_str=b"\x00\x00"
if (year==2011):
start_lfcs_old=0x01000000
elif(year==2012):
start_lfcs_old=0x04000000
elif(year==2013):
start_lfcs_old=0x07000000
elif(year==2014):
start_lfcs_old=0x09000000
elif(year==2015):
start_lfcs_old=0x09800000
elif(year==2016):
start_lfcs_old=0x0A000000
elif(year==2017):
start_lfcs_old=0x0A800000
else:
print("Year 2011-2017 not entered so beginning at lfcs midpoint "+hex(start_lfcs_old))
start_lfcs=start_lfcs_old
elif(model=="new"):
model_str=b"\x02\x00"
if (year==2014):
start_lfcs_new=0x00800000
elif (year==2015):
start_lfcs_new=0x01800000
elif (year==2016):
start_lfcs_new=0x03000000
elif (year==2017):
start_lfcs_new=0x04000000
else:
print("Year 2014-2017 not entered so beginning at lfcs midpoint "+hex(start_lfcs_new))
start_lfcs=start_lfcs_new
start_lfcs=endian4(start_lfcs)
command="bfcl lfcs %08X %s %s %08X" % (start_lfcs, hexlify(model_str).decode('ascii'), hexlify(final[4:4+8]).decode('ascii'), endian4(offset_override))
print(command)
os.system(command)
def generate_part2():
global err_correct
with open("saves/lfcs.dat", "rb") as f:
buf=f.read()
lfcs_len=len(buf)//8
err_correct=0
for i in range(lfcs_len):
lfcs.append(struct.unpack("<i",buf[i*8:i*8+4])[0])
for i in range(lfcs_len):
ftune.append(struct.unpack("<i",buf[i*8+4:i*8+8])[0])
with open("saves/lfcs_new.dat", "rb") as f:
buf=f.read()
lfcs_new_len=len(buf)//8
for i in range(lfcs_new_len):
lfcs_new.append(struct.unpack("<i",buf[i*8:i*8+4])[0])
for i in range(lfcs_new_len):
ftune_new.append(struct.unpack("<i",buf[i*8+4:i*8+8])[0])
isNew=False
msed3=0
noobtest=b"\x00"*0x20
with open("movable_part1.sed", "rb") as f:
seed=f.read()
if(noobtest in seed[0x10:0x30]):
print("Error: ID0 has been left blank, please add an ID0")
print("Ex: python %s id0 abcdef012345EXAMPLEdef0123456789" % (sys.argv[0]))
sys.exit(0)
if(noobtest[:4] in seed[:4]):
print("Error: LFCS has been left blank, did you do a complete two-way friend code exchange before dumping friendlist?")
sys.exit(0)
if len(seed) != 0x1000:
print("Error: movable_part1.sed is not 4KB")
sys.exit(0)
if seed[4:5]==b"\x02":
print("New3DS msed")
isNew=True
elif seed[4:5]==b"\x00":
print("Old3DS msed - this can happen on a New3DS")
isNew=False
else:
print("Error: can't read u8 msed[4]")
sys.exit(0)
expand()
print("LFCS : "+hex(bytes2int(seed[0:4])))
print("msed3 est : "+hex(getMsed3Estimate(bytes2int(seed[0:4]),isNew)))
print("Error est : "+str(err_correct))
msed3=getMsed3Estimate(bytes2int(seed[0:4]),isNew)
offset=0x10
hash_final=b""
for i in range(64):
try:
hash=unhexlify(seed[offset:offset+0x20])
except:
break
hash_single=byteSwap4(hash[0:4])+byteSwap4(hash[4:8])+byteSwap4(hash[8:12])+byteSwap4(hash[12:16])
print("ID0 hash "+str(i)+": "+hexlify(hash_single).decode('ascii'))
hash_final+=hash_single
offset+=0x20
print("Hash total: "+str(i))
part2=seed[0:12]+int2bytes(msed3)+hash_final
pad=0x1000-len(part2)
part2+=b"\x00"*pad
with open("movable_part2.sed", "wb") as f:
f.write(part2)
print("movable_part2.sed generation success")
def hash_clusterer():
buf=b""
hashcount=0
if(len(sys.argv)==3):
dirs=[]
dirs.append(sys.argv[2])
else:
dirs=glob.glob("*")
try:
with open("movable_part1.sed", "rb") as f:
file=f.read()
except:
print("movable_part1.sed not found, generating a new one")
print("don't forget to add an lfcs to it!\n")
with open("movable_part1.sed", "wb") as f:
file=b"\x00"*0x1000
f.write(file)
for i in dirs:
try:
temp=str(i).encode("ascii")
print(i,end='')
int(i,16)
if(len(i)==32 and temp not in file):
buf+=temp
hashcount+=1
else:
print(" -- improper ID0 length or already in file",end='')
print("")
except:
print(" -- not an ID0")
print("")
if(hashcount>1):
print("Too many ID0 dirs! (%d)\nMove the ones your 3ds isn't using!" % (hashcount))
sys.exit(0)
if(hashcount==1):
print("Hash added!")
else:
print("No hashes added!")
sys.exit(0)
with open("movable_part1.sed.backup", "wb") as f:
f.write(file)
file=file[:0x10]
pad_len=0x1000-len(file+buf)
pad=b"\x00"*pad_len
with open("movable_part1.sed", "wb") as f:
f.write(file+buf+pad)
print("There are now %d ID0 hashes in your movable_part1.sed!" % ((len(file+buf)//0x20)))
print("Done!")
def do_cpu():
global process_count
if(len(sys.argv)==3):
process_count=int(sys.argv[2])
if(which_computer_is_this >= number_of_computers):
print("You can't assign an id # to a computer that doesn't exist")
sys.exit(0)
MAX=0x100000000
address_begin=0
address_end=MAX
address_space=MAX//number_of_computers
for i in range(number_of_computers):
if(which_computer_is_this==i):
address_begin=(i*address_space)
address_end=(address_begin+address_space)
print("This computer id: "+str(i));
if(which_computer_is_this==number_of_computers-1):
address_end=MAX
print("Overall starting msed2 address: "+hex(address_begin))
print("Overall ending msed2 address: "+hex(address_end))
print("")
process_space=address_end-address_begin
process_size=process_space//process_count
for i in range(process_count):
process_begin=address_begin+(process_size*i)
process_end=process_begin+process_size
if(i==(process_count-1)):
process_end=address_end
start=process_begin
size=process_end-process_begin
os.system("start seedMiner.exe %08X %09X" % (start,size))
print("Process: "+str(i)+" Start: "+hex(process_begin)+" Size: "+hex(size))
def do_gpu():
with open("movable_part2.sed", "rb") as f:
buf=f.read()
keyy=hexlify(buf[:16]).decode('ascii')
ID0=hexlify(buf[16:32]).decode('ascii')
command="bfcl msky %s %s %08X" % (keyy,ID0, endian4(offset_override))
print(command)
os.system(command)
def download(url, dest):
try:
response = urllib.request.urlopen(url)
html = response.read()
data=""
with open(dest, "rb") as f:
data=f.read()
if(data != html):
with open(dest, "wb") as f:
f.write(html)
print("Updating "+dest+" success!")
else:
print(dest+" is already up-to-date!")
except:
print("Error updating "+dest)
def update_db():
download("https://github.com/zoogie/seedminer/blob/master/seedminer/saves/lfcs.dat?raw=true","saves/lfcs.dat")
download("https://github.com/zoogie/seedminer/blob/master/seedminer/saves/lfcs_new.dat?raw=true","saves/lfcs_new.dat")
def error_print():
print("\nCommand line error")
print("Usage:")
print("python %s cpu|gpu|id0|mii old|mii new|update-db [# cpu processes] [ID0 hash] [year 3ds built]" % (sys.argv[0]))
print("Examples:")
print("python %s cpu 4" % (sys.argv[0]))
print("python %s gpu" % (sys.argv[0]))
print("python %s id0 abcdef012345EXAMPLEdef0123456789" % (sys.argv[0]))
print("python %s mii new 2017" % (sys.argv[0]))
print("python %s mii old 2011" % (sys.argv[0]))
print("python %s mii old" % (sys.argv[0]))
print("python %s update-db" % (sys.argv[0]))
#---------------------------------------------------------------------------
#command handler
#---------------------------------------------------------------------------
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
if(len(sys.argv) < 2 or len(sys.argv) > 4):
error_print()
sys.exit(0)
if(sys.argv[1].lower() == "gpu"):
if(len(sys.argv)==3):
offset_override = int(sys.argv[2]) * 2
print("GPU selected")
generate_part2()
do_gpu()
sys.exit(0)
elif(sys.argv[1].lower()=="cpu"):
print("CPU selected")
generate_part2()
do_cpu()
sys.exit(0)
elif(sys.argv[1].lower()=="id0"):
print("ID0 selected")
hash_clusterer()
sys.exit(0)
elif(sys.argv[1].lower()=="mii"):
print("MII selected")
mii_gpu()
generate_part2()
offset_override=0
do_gpu()
sys.exit(0)
elif(sys.argv[1].lower()=="update-db"):
print("Update msed_data selected")
update_db()
sys.exit(0)
else:
error_print()
sys.exit(0)
| 31.174877 | 155 | 0.56372 | import os,sys,struct,glob
import urllib.request
from binascii import hexlify, unhexlify
process_count=4
offset_override=0 #for gpu options, this allows starting brute-force at a user-defined offset
#-----------------------------------------------------------------------------------------------------------------
#Don't edit below this line unless you have multiple computers brute-forcing - most of you won't need this feature
#-----------------------------------------------------------------------------------------------------------------
number_of_computers=1 #each computer needs this set to same number if more than one
which_computer_is_this=0 #each computer has a different id # that's less than number_of_computers
lfcs=[]
ftune=[]
lfcs_new=[]
ftune_new=[]
err_correct=0
def int16bytes(n):
return n.to_bytes(16, 'big')
def expand():
for i in range(1,len(lfcs)):
lfcs[i]=lfcs[i]<<12 | 0x800
for i in range(1,len(lfcs_new)):
lfcs_new[i]=lfcs_new[i]<<12 | 0x800
def bytes2int(s):
n=0
for i in range(4):
n+=ord(s[i:i+1])<<(i*8)
return n
def int2bytes(n):
s=bytearray(4)
for i in range(4):
s[i]=n & 0xFF
n=n>>8
return s
def byteSwap4(n):
return n[::-1]
def endian4(n):
return (n&0xFF000000)>>24 | (n&0x00FF0000)>>8 | (n&0x0000FF00)<<8 | (n&0x000000FF)<<24
def getMsed3Estimate(n,isNew):
global err_correct
newbit=0x0
if isNew:
fc=lfcs_new
ft=ftune_new
newbit=0x80000000
else:
fc=lfcs
ft=ftune
fc_size=len(fc)
ft_size=len(ft)
if fc_size != ft_size:
return -1
for i in range(fc_size):
if n<fc[i]:
xs=(n-fc[i-1])
xl=(fc[i]-fc[i-1])
y=ft[i-1]
yl=(ft[i]-ft[i-1])
ys=((xs*yl)//xl)+y
err_correct=ys
return ((n//5)-ys) | newbit
return ((n//5)-ft[ft_size-1]) | newbit
def mii_gpu():
from Cryptodome.Cipher import AES
nk31=0x59FC817E6446EA6190347B20E9BDCE52
with open("input.bin", "rb") as f:
enc=f.read()
if(len(enc) != 0x70):
print("Error: input.bin is invalid size (likely QR -> input.bin conversion issue)")
sys.exit(0)
nonce=enc[:8]+b"\x00"*4
cipher = AES.new(int16bytes(nk31), AES.MODE_CCM, nonce )
dec=cipher.decrypt(enc[8:0x60])
nonce=nonce[:8]
final=dec[:12]+nonce+dec[12:]
with open("output.bin", "wb") as f:
f.write(final)
if(len(sys.argv) >= 3):
model=sys.argv[2].lower()
else:
print("Error: need to specify new|old movable.sed")
sys.exit(0)
model_str=b""
start_lfcs_old=0x0B000000//2
start_lfcs_new=0x05000000//2
start_lfcs=0
year=0
if(len(sys.argv)==4):
year=int(sys.argv[3])
if(model=="old"):
model_str=b"\x00\x00"
if (year==2011):
start_lfcs_old=0x01000000
elif(year==2012):
start_lfcs_old=0x04000000
elif(year==2013):
start_lfcs_old=0x07000000
elif(year==2014):
start_lfcs_old=0x09000000
elif(year==2015):
start_lfcs_old=0x09800000
elif(year==2016):
start_lfcs_old=0x0A000000
elif(year==2017):
start_lfcs_old=0x0A800000
else:
print("Year 2011-2017 not entered so beginning at lfcs midpoint "+hex(start_lfcs_old))
start_lfcs=start_lfcs_old
elif(model=="new"):
model_str=b"\x02\x00"
if (year==2014):
start_lfcs_new=0x00800000
elif (year==2015):
start_lfcs_new=0x01800000
elif (year==2016):
start_lfcs_new=0x03000000
elif (year==2017):
start_lfcs_new=0x04000000
else:
print("Year 2014-2017 not entered so beginning at lfcs midpoint "+hex(start_lfcs_new))
start_lfcs=start_lfcs_new
start_lfcs=endian4(start_lfcs)
command="bfcl lfcs %08X %s %s %08X" % (start_lfcs, hexlify(model_str).decode('ascii'), hexlify(final[4:4+8]).decode('ascii'), endian4(offset_override))
print(command)
os.system(command)
def generate_part2():
global err_correct
with open("saves/lfcs.dat", "rb") as f:
buf=f.read()
lfcs_len=len(buf)//8
err_correct=0
for i in range(lfcs_len):
lfcs.append(struct.unpack("<i",buf[i*8:i*8+4])[0])
for i in range(lfcs_len):
ftune.append(struct.unpack("<i",buf[i*8+4:i*8+8])[0])
with open("saves/lfcs_new.dat", "rb") as f:
buf=f.read()
lfcs_new_len=len(buf)//8
for i in range(lfcs_new_len):
lfcs_new.append(struct.unpack("<i",buf[i*8:i*8+4])[0])
for i in range(lfcs_new_len):
ftune_new.append(struct.unpack("<i",buf[i*8+4:i*8+8])[0])
isNew=False
msed3=0
noobtest=b"\x00"*0x20
with open("movable_part1.sed", "rb") as f:
seed=f.read()
if(noobtest in seed[0x10:0x30]):
print("Error: ID0 has been left blank, please add an ID0")
print("Ex: python %s id0 abcdef012345EXAMPLEdef0123456789" % (sys.argv[0]))
sys.exit(0)
if(noobtest[:4] in seed[:4]):
print("Error: LFCS has been left blank, did you do a complete two-way friend code exchange before dumping friendlist?")
sys.exit(0)
if len(seed) != 0x1000:
print("Error: movable_part1.sed is not 4KB")
sys.exit(0)
if seed[4:5]==b"\x02":
print("New3DS msed")
isNew=True
elif seed[4:5]==b"\x00":
print("Old3DS msed - this can happen on a New3DS")
isNew=False
else:
print("Error: can't read u8 msed[4]")
sys.exit(0)
expand()
print("LFCS : "+hex(bytes2int(seed[0:4])))
print("msed3 est : "+hex(getMsed3Estimate(bytes2int(seed[0:4]),isNew)))
print("Error est : "+str(err_correct))
msed3=getMsed3Estimate(bytes2int(seed[0:4]),isNew)
offset=0x10
hash_final=b""
for i in range(64):
try:
hash=unhexlify(seed[offset:offset+0x20])
except:
break
hash_single=byteSwap4(hash[0:4])+byteSwap4(hash[4:8])+byteSwap4(hash[8:12])+byteSwap4(hash[12:16])
print("ID0 hash "+str(i)+": "+hexlify(hash_single).decode('ascii'))
hash_final+=hash_single
offset+=0x20
print("Hash total: "+str(i))
part2=seed[0:12]+int2bytes(msed3)+hash_final
pad=0x1000-len(part2)
part2+=b"\x00"*pad
with open("movable_part2.sed", "wb") as f:
f.write(part2)
print("movable_part2.sed generation success")
def hash_clusterer():
buf=b""
hashcount=0
if(len(sys.argv)==3):
dirs=[]
dirs.append(sys.argv[2])
else:
dirs=glob.glob("*")
try:
with open("movable_part1.sed", "rb") as f:
file=f.read()
except:
print("movable_part1.sed not found, generating a new one")
print("don't forget to add an lfcs to it!\n")
with open("movable_part1.sed", "wb") as f:
file=b"\x00"*0x1000
f.write(file)
for i in dirs:
try:
temp=str(i).encode("ascii")
print(i,end='')
int(i,16)
if(len(i)==32 and temp not in file):
buf+=temp
hashcount+=1
else:
print(" -- improper ID0 length or already in file",end='')
print("")
except:
print(" -- not an ID0")
print("")
if(hashcount>1):
print("Too many ID0 dirs! (%d)\nMove the ones your 3ds isn't using!" % (hashcount))
sys.exit(0)
if(hashcount==1):
print("Hash added!")
else:
print("No hashes added!")
sys.exit(0)
with open("movable_part1.sed.backup", "wb") as f:
f.write(file)
file=file[:0x10]
pad_len=0x1000-len(file+buf)
pad=b"\x00"*pad_len
with open("movable_part1.sed", "wb") as f:
f.write(file+buf+pad)
print("There are now %d ID0 hashes in your movable_part1.sed!" % ((len(file+buf)//0x20)))
print("Done!")
def do_cpu():
global process_count
if(len(sys.argv)==3):
process_count=int(sys.argv[2])
if(which_computer_is_this >= number_of_computers):
print("You can't assign an id # to a computer that doesn't exist")
sys.exit(0)
MAX=0x100000000
address_begin=0
address_end=MAX
address_space=MAX//number_of_computers
for i in range(number_of_computers):
if(which_computer_is_this==i):
address_begin=(i*address_space)
address_end=(address_begin+address_space)
print("This computer id: "+str(i));
if(which_computer_is_this==number_of_computers-1):
address_end=MAX
print("Overall starting msed2 address: "+hex(address_begin))
print("Overall ending msed2 address: "+hex(address_end))
print("")
process_space=address_end-address_begin
process_size=process_space//process_count
for i in range(process_count):
process_begin=address_begin+(process_size*i)
process_end=process_begin+process_size
if(i==(process_count-1)):
process_end=address_end
start=process_begin
size=process_end-process_begin
os.system("start seedMiner.exe %08X %09X" % (start,size))
print("Process: "+str(i)+" Start: "+hex(process_begin)+" Size: "+hex(size))
def do_gpu():
with open("movable_part2.sed", "rb") as f:
buf=f.read()
keyy=hexlify(buf[:16]).decode('ascii')
ID0=hexlify(buf[16:32]).decode('ascii')
command="bfcl msky %s %s %08X" % (keyy,ID0, endian4(offset_override))
print(command)
os.system(command)
def download(url, dest):
try:
response = urllib.request.urlopen(url)
html = response.read()
data=""
with open(dest, "rb") as f:
data=f.read()
if(data != html):
with open(dest, "wb") as f:
f.write(html)
print("Updating "+dest+" success!")
else:
print(dest+" is already up-to-date!")
except:
print("Error updating "+dest)
def update_db():
download("https://github.com/zoogie/seedminer/blob/master/seedminer/saves/lfcs.dat?raw=true","saves/lfcs.dat")
download("https://github.com/zoogie/seedminer/blob/master/seedminer/saves/lfcs_new.dat?raw=true","saves/lfcs_new.dat")
def error_print():
print("\nCommand line error")
print("Usage:")
print("python %s cpu|gpu|id0|mii old|mii new|update-db [# cpu processes] [ID0 hash] [year 3ds built]" % (sys.argv[0]))
print("Examples:")
print("python %s cpu 4" % (sys.argv[0]))
print("python %s gpu" % (sys.argv[0]))
print("python %s id0 abcdef012345EXAMPLEdef0123456789" % (sys.argv[0]))
print("python %s mii new 2017" % (sys.argv[0]))
print("python %s mii old 2011" % (sys.argv[0]))
print("python %s mii old" % (sys.argv[0]))
print("python %s update-db" % (sys.argv[0]))
#---------------------------------------------------------------------------
#command handler
#---------------------------------------------------------------------------
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
if(len(sys.argv) < 2 or len(sys.argv) > 4):
error_print()
sys.exit(0)
if(sys.argv[1].lower() == "gpu"):
if(len(sys.argv)==3):
offset_override = int(sys.argv[2]) * 2
print("GPU selected")
generate_part2()
do_gpu()
sys.exit(0)
elif(sys.argv[1].lower()=="cpu"):
print("CPU selected")
generate_part2()
do_cpu()
sys.exit(0)
elif(sys.argv[1].lower()=="id0"):
print("ID0 selected")
hash_clusterer()
sys.exit(0)
elif(sys.argv[1].lower()=="mii"):
print("MII selected")
mii_gpu()
generate_part2()
offset_override=0
do_gpu()
sys.exit(0)
elif(sys.argv[1].lower()=="update-db"):
print("Update msed_data selected")
update_db()
sys.exit(0)
else:
error_print()
sys.exit(0)
| true | true |
f72d6933dae80411f980cfd73104cb634eba5baa | 19,394 | py | Python | tests/test_FluxCSVParser.py | influxdata/influxdb-client-python | bb378af3a56470ba74daeeb51f77b0d2c4a61350 | [
"MIT"
] | 380 | 2019-09-19T20:20:10.000Z | 2022-03-31T12:59:33.000Z | tests/test_FluxCSVParser.py | influxdata/influxdb-client-python | bb378af3a56470ba74daeeb51f77b0d2c4a61350 | [
"MIT"
] | 362 | 2019-09-16T11:53:29.000Z | 2022-03-29T03:11:59.000Z | tests/test_FluxCSVParser.py | influxdata/influxdb-client-python | bb378af3a56470ba74daeeb51f77b0d2c4a61350 | [
"MIT"
] | 130 | 2019-09-20T08:02:35.000Z | 2022-03-30T16:44:45.000Z | import math
import unittest
from io import BytesIO
from urllib3 import HTTPResponse
from influxdb_client.client.flux_csv_parser import FluxCsvParser, FluxSerializationMode, FluxQueryException
from influxdb_client.client.flux_table import FluxStructureEncoder
class FluxCsvParserTest(unittest.TestCase):
def test_one_table(self):
data = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,long,long,string\n" \
"#group,false,false,true,true,true,true,true,true,false,false,false\n" \
"#default,_result,,,,,,,,,,\n" \
",result,table,_start,_stop,_field,_measurement,host,region,_value2,value1,value_str\n" \
",,0,1677-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,121,11,test\n"
tables = self._parse_to_tables(data=data)
self.assertEqual(1, tables.__len__())
self.assertEqual(11, tables[0].columns.__len__())
self.assertEqual(1, tables[0].records.__len__())
def test_more_tables(self):
data = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,long,long,string\n" \
"#group,false,false,true,true,true,true,true,true,false,false,false\n" \
"#default,_result,,,,,,,,,,\n" \
",result,table,_start,_stop,_field,_measurement,host,region,_value2,value1,value_str\n" \
",,0,1677-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,121,11,test\n" \
",,1,1677-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,B,west,484,22,test\n" \
",,2,1677-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,usage_system,cpu,A,west,1444,38,test\n" \
",,3,1677-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,user_usage,cpu,A,west,2401,49,test"
tables = self._parse_to_tables(data=data)
self.assertEqual(4, tables.__len__())
self.assertEqual(11, tables[0].columns.__len__())
self.assertEqual(1, tables[0].records.__len__())
self.assertEqual(11, tables[1].columns.__len__())
self.assertEqual(1, tables[1].records.__len__())
self.assertEqual(11, tables[2].columns.__len__())
self.assertEqual(1, tables[2].records.__len__())
self.assertEqual(11, tables[3].columns.__len__())
self.assertEqual(1, tables[3].records.__len__())
def test_multiple_queries(self):
data = "#datatype,string,long,string,string,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string\n" \
"#group,false,false,true,true,true,true,false,false,true\n" \
"#default,t1,,,,,,,,\n" \
",result,table,_field,_measurement,_start,_stop,_time,_value,tag\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:21:40Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:23:20Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:25:00Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:26:40Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:28:20Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:30:00Z,2,test1\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:21:40Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:23:20Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:25:00Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:26:40Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:28:20Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:30:00Z,2,test2\n" \
"\n" \
"#datatype,string,long,string,string,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string\n" \
"#group,false,false,true,true,true,true,false,false,true\n" \
"#default,t2,,,,,,,,\n" \
",result,table,_field,_measurement,_start,_stop,_time,_value,tag\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:21:40Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:23:20Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:25:00Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:26:40Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:28:20Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:30:00Z,2,test1\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:21:40Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:23:20Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:25:00Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:26:40Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:28:20Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:30:00Z,2,test2"
tables = self._parse_to_tables(data=data)
self.assertEqual(4, tables.__len__())
self.assertEqual(9, tables[0].columns.__len__())
self.assertEqual(7, tables[0].records.__len__())
self.assertEqual(9, tables[1].columns.__len__())
self.assertEqual(7, tables[1].records.__len__())
self.assertEqual(9, tables[2].columns.__len__())
self.assertEqual(7, tables[2].records.__len__())
self.assertEqual(9, tables[3].columns.__len__())
self.assertEqual(7, tables[3].records.__len__())
def test_table_index_not_start_at_zero(self):
data = "#datatype,string,long,string,string,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string\n" \
"#group,false,false,true,true,true,true,false,false,true\n" \
"#default,t1,,,,,,,,\n" \
",result,table,_field,_measurement,_start,_stop,_time,_value,tag\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test1\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:21:40Z,2,test1\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:23:20Z,2,test1\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:25:00Z,2,test1\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:26:40Z,2,test1\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:28:20Z,2,test1\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:30:00Z,2,test1\n" \
",,2,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test2\n" \
",,2,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:21:40Z,2,test2\n" \
",,2,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:23:20Z,2,test2\n" \
",,2,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:25:00Z,2,test2\n" \
",,2,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:26:40Z,2,test2\n" \
",,2,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:28:20Z,2,test2\n" \
",,2,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:30:00Z,2,test2\n"
tables = self._parse_to_tables(data=data)
self.assertEqual(2, tables.__len__())
self.assertEqual(9, tables[0].columns.__len__())
self.assertEqual(7, tables[0].records.__len__())
self.assertEqual(9, tables[1].columns.__len__())
self.assertEqual(7, tables[1].records.__len__())
def test_response_with_error(self):
data = "#datatype,string,long,string,string,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string\n" \
"#group,false,false,true,true,true,true,false,false,true\n" \
"#default,t1,,,,,,,,\n" \
",result,table,_field,_measurement,_start,_stop,_time,_value,tag\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:21:40Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:23:20Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:25:00Z,2,test1\n" \
"\n" \
"#datatype,string,string\n" \
"#group,true,true\n" \
"#default,,\n" \
",error,reference\n" \
",\"engine: unknown field type for value: xyz\","
with self.assertRaises(FluxQueryException) as cm:
self._parse_to_tables(data=data)
exception = cm.exception
self.assertEqual('engine: unknown field type for value: xyz', exception.message)
self.assertEqual('', exception.reference)
def test_ParseExportFromUserInterface(self):
data = "#group,false,false,true,true,true,true,true,true,false,false\n" \
+ "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,double,dateTime:RFC3339\n" \
+ "#default,mean,,,,,,,,,\n" \
+ ",result,table,_start,_stop,_field,_measurement,city,location,_value,_time\n" \
+ ",,0,1754-06-26T11:30:27.613654848Z,2040-10-27T12:13:46.485Z,temperatureC,weather,London,us-midwest,30,1975-09-01T16:59:54.5Z\n" \
+ ",,1,1754-06-26T11:30:27.613654848Z,2040-10-27T12:13:46.485Z,temperatureF,weather,London,us-midwest,86,1975-09-01T16:59:54.5Z\n";
tables = self._parse_to_tables(data=data)
self.assertEqual(2, tables.__len__())
self.assertEqual(1, tables[0].records.__len__())
self.assertEqual(1, tables[1].records.__len__())
self.assertFalse(tables[1].columns[0].group)
self.assertFalse(tables[1].columns[1].group)
self.assertTrue(tables[1].columns[2].group)
def test_ParseInf(self):
data = """#group,false,false,true,true,true,true,true,true,true,true,false,false
#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,string,string,double,double
#default,_result,,,,,,,,,,,
,result,table,_start,_stop,_field,_measurement,language,license,name,owner,le,_value
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,0,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,10,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,20,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,30,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,40,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,50,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,60,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,70,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,80,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,90,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,+Inf,15
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,-Inf,15
"""
tables = self._parse_to_tables(data=data)
self.assertEqual(1, tables.__len__())
self.assertEqual(12, tables[0].records.__len__())
self.assertEqual(math.inf, tables[0].records[10]["le"])
self.assertEqual(-math.inf, tables[0].records[11]["le"])
def test_to_json(self):
data = "#datatype,string,long,string,string,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string\n" \
"#group,false,false,true,true,true,true,false,false,true\n" \
"#default,_result,,,,,,,,\n" \
",result,table,_field,_measurement,_start,_stop,_time,_value,tag\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:21:40Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:23:20Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:25:00Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:26:40Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:28:20Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:30:00Z,2,test1\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:21:40Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:23:20Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:25:00Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:26:40Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:28:20Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:30:00Z,2,test2\n"
tables = self._parse_to_tables(data=data)
with open('tests/query_output.json', 'r') as file:
query_output = file.read()
import json
self.assertEqual(query_output, json.dumps(tables, cls=FluxStructureEncoder, indent=2))
def test_pandas_lot_of_columns(self):
data_types = ""
groups = ""
defaults = ""
columns = ""
values = ""
for i in range(0, 200):
data_types += f",long"
groups += f",false"
defaults += f","
columns += f",column_{i}"
values += f",{i}"
data = f"#datatype,string,long,string,string,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string{data_types}\n" \
f"#group,false,false,true,true,true,true,false,false,true{groups}\n" \
f"#default,_result,,,,,,,,{defaults}\n" \
f",result,table,_field,_measurement,_start,_stop,_time,_value,tag{columns}\n" \
f",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test1{values}\n" \
parser = self._parse(data=data, serialization_mode=FluxSerializationMode.dataFrame)
_dataFrames = list(parser.generator())
self.assertEqual(1, _dataFrames.__len__())
@staticmethod
def _parse_to_tables(data: str, serialization_mode=FluxSerializationMode.tables):
_parser = FluxCsvParserTest._parse(data, serialization_mode)
list(_parser.generator())
tables = _parser.tables
return tables
@staticmethod
def _parse(data, serialization_mode):
fp = BytesIO(str.encode(data))
return FluxCsvParser(response=HTTPResponse(fp, preload_content=False),
serialization_mode=serialization_mode)
| 77.576 | 149 | 0.678973 | import math
import unittest
from io import BytesIO
from urllib3 import HTTPResponse
from influxdb_client.client.flux_csv_parser import FluxCsvParser, FluxSerializationMode, FluxQueryException
from influxdb_client.client.flux_table import FluxStructureEncoder
class FluxCsvParserTest(unittest.TestCase):
def test_one_table(self):
data = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,long,long,string\n" \
"#group,false,false,true,true,true,true,true,true,false,false,false\n" \
"#default,_result,,,,,,,,,,\n" \
",result,table,_start,_stop,_field,_measurement,host,region,_value2,value1,value_str\n" \
",,0,1677-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,121,11,test\n"
tables = self._parse_to_tables(data=data)
self.assertEqual(1, tables.__len__())
self.assertEqual(11, tables[0].columns.__len__())
self.assertEqual(1, tables[0].records.__len__())
def test_more_tables(self):
data = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,long,long,string\n" \
"#group,false,false,true,true,true,true,true,true,false,false,false\n" \
"#default,_result,,,,,,,,,,\n" \
",result,table,_start,_stop,_field,_measurement,host,region,_value2,value1,value_str\n" \
",,0,1677-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,121,11,test\n" \
",,1,1677-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,B,west,484,22,test\n" \
",,2,1677-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,usage_system,cpu,A,west,1444,38,test\n" \
",,3,1677-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,user_usage,cpu,A,west,2401,49,test"
tables = self._parse_to_tables(data=data)
self.assertEqual(4, tables.__len__())
self.assertEqual(11, tables[0].columns.__len__())
self.assertEqual(1, tables[0].records.__len__())
self.assertEqual(11, tables[1].columns.__len__())
self.assertEqual(1, tables[1].records.__len__())
self.assertEqual(11, tables[2].columns.__len__())
self.assertEqual(1, tables[2].records.__len__())
self.assertEqual(11, tables[3].columns.__len__())
self.assertEqual(1, tables[3].records.__len__())
def test_multiple_queries(self):
data = "#datatype,string,long,string,string,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string\n" \
"#group,false,false,true,true,true,true,false,false,true\n" \
"#default,t1,,,,,,,,\n" \
",result,table,_field,_measurement,_start,_stop,_time,_value,tag\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:21:40Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:23:20Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:25:00Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:26:40Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:28:20Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:30:00Z,2,test1\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:21:40Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:23:20Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:25:00Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:26:40Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:28:20Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:30:00Z,2,test2\n" \
"\n" \
"#datatype,string,long,string,string,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string\n" \
"#group,false,false,true,true,true,true,false,false,true\n" \
"#default,t2,,,,,,,,\n" \
",result,table,_field,_measurement,_start,_stop,_time,_value,tag\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:21:40Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:23:20Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:25:00Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:26:40Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:28:20Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:30:00Z,2,test1\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:21:40Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:23:20Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:25:00Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:26:40Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:28:20Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:30:00Z,2,test2"
tables = self._parse_to_tables(data=data)
self.assertEqual(4, tables.__len__())
self.assertEqual(9, tables[0].columns.__len__())
self.assertEqual(7, tables[0].records.__len__())
self.assertEqual(9, tables[1].columns.__len__())
self.assertEqual(7, tables[1].records.__len__())
self.assertEqual(9, tables[2].columns.__len__())
self.assertEqual(7, tables[2].records.__len__())
self.assertEqual(9, tables[3].columns.__len__())
self.assertEqual(7, tables[3].records.__len__())
def test_table_index_not_start_at_zero(self):
data = "#datatype,string,long,string,string,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string\n" \
"#group,false,false,true,true,true,true,false,false,true\n" \
"#default,t1,,,,,,,,\n" \
",result,table,_field,_measurement,_start,_stop,_time,_value,tag\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test1\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:21:40Z,2,test1\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:23:20Z,2,test1\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:25:00Z,2,test1\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:26:40Z,2,test1\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:28:20Z,2,test1\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:30:00Z,2,test1\n" \
",,2,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test2\n" \
",,2,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:21:40Z,2,test2\n" \
",,2,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:23:20Z,2,test2\n" \
",,2,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:25:00Z,2,test2\n" \
",,2,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:26:40Z,2,test2\n" \
",,2,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:28:20Z,2,test2\n" \
",,2,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:30:00Z,2,test2\n"
tables = self._parse_to_tables(data=data)
self.assertEqual(2, tables.__len__())
self.assertEqual(9, tables[0].columns.__len__())
self.assertEqual(7, tables[0].records.__len__())
self.assertEqual(9, tables[1].columns.__len__())
self.assertEqual(7, tables[1].records.__len__())
def test_response_with_error(self):
data = "#datatype,string,long,string,string,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string\n" \
"#group,false,false,true,true,true,true,false,false,true\n" \
"#default,t1,,,,,,,,\n" \
",result,table,_field,_measurement,_start,_stop,_time,_value,tag\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:21:40Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:23:20Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:25:00Z,2,test1\n" \
"\n" \
"#datatype,string,string\n" \
"#group,true,true\n" \
"#default,,\n" \
",error,reference\n" \
",\"engine: unknown field type for value: xyz\","
with self.assertRaises(FluxQueryException) as cm:
self._parse_to_tables(data=data)
exception = cm.exception
self.assertEqual('engine: unknown field type for value: xyz', exception.message)
self.assertEqual('', exception.reference)
def test_ParseExportFromUserInterface(self):
data = "#group,false,false,true,true,true,true,true,true,false,false\n" \
+ "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,double,dateTime:RFC3339\n" \
+ "#default,mean,,,,,,,,,\n" \
+ ",result,table,_start,_stop,_field,_measurement,city,location,_value,_time\n" \
+ ",,0,1754-06-26T11:30:27.613654848Z,2040-10-27T12:13:46.485Z,temperatureC,weather,London,us-midwest,30,1975-09-01T16:59:54.5Z\n" \
+ ",,1,1754-06-26T11:30:27.613654848Z,2040-10-27T12:13:46.485Z,temperatureF,weather,London,us-midwest,86,1975-09-01T16:59:54.5Z\n";
tables = self._parse_to_tables(data=data)
self.assertEqual(2, tables.__len__())
self.assertEqual(1, tables[0].records.__len__())
self.assertEqual(1, tables[1].records.__len__())
self.assertFalse(tables[1].columns[0].group)
self.assertFalse(tables[1].columns[1].group)
self.assertTrue(tables[1].columns[2].group)
def test_ParseInf(self):
data = """#group,false,false,true,true,true,true,true,true,true,true,false,false
#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,string,string,double,double
#default,_result,,,,,,,,,,,
,result,table,_start,_stop,_field,_measurement,language,license,name,owner,le,_value
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,0,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,10,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,20,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,30,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,40,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,50,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,60,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,70,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,80,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,90,0
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,+Inf,15
,,0,2021-06-23T06:50:11.897825012Z,2021-06-25T06:50:11.897825012Z,stars,github_repository,C#,MIT License,influxdb-client-csharp,influxdata,-Inf,15
"""
tables = self._parse_to_tables(data=data)
self.assertEqual(1, tables.__len__())
self.assertEqual(12, tables[0].records.__len__())
self.assertEqual(math.inf, tables[0].records[10]["le"])
self.assertEqual(-math.inf, tables[0].records[11]["le"])
def test_to_json(self):
data = "#datatype,string,long,string,string,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string\n" \
"#group,false,false,true,true,true,true,false,false,true\n" \
"#default,_result,,,,,,,,\n" \
",result,table,_field,_measurement,_start,_stop,_time,_value,tag\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:21:40Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:23:20Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:25:00Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:26:40Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:28:20Z,2,test1\n" \
",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:30:00Z,2,test1\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:21:40Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:23:20Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:25:00Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:26:40Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:28:20Z,2,test2\n" \
",,1,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:30:00Z,2,test2\n"
tables = self._parse_to_tables(data=data)
with open('tests/query_output.json', 'r') as file:
query_output = file.read()
import json
self.assertEqual(query_output, json.dumps(tables, cls=FluxStructureEncoder, indent=2))
def test_pandas_lot_of_columns(self):
data_types = ""
groups = ""
defaults = ""
columns = ""
values = ""
for i in range(0, 200):
data_types += f",long"
groups += f",false"
defaults += f","
columns += f",column_{i}"
values += f",{i}"
data = f"#datatype,string,long,string,string,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,double,string{data_types}\n" \
f"#group,false,false,true,true,true,true,false,false,true{groups}\n" \
f"#default,_result,,,,,,,,{defaults}\n" \
f",result,table,_field,_measurement,_start,_stop,_time,_value,tag{columns}\n" \
f",,0,value,python_client_test,2010-02-27T04:48:32.752600083Z,2020-02-27T16:48:32.752600083Z,2020-02-27T16:20:00Z,2,test1{values}\n" \
parser = self._parse(data=data, serialization_mode=FluxSerializationMode.dataFrame)
_dataFrames = list(parser.generator())
self.assertEqual(1, _dataFrames.__len__())
@staticmethod
def _parse_to_tables(data: str, serialization_mode=FluxSerializationMode.tables):
_parser = FluxCsvParserTest._parse(data, serialization_mode)
list(_parser.generator())
tables = _parser.tables
return tables
@staticmethod
def _parse(data, serialization_mode):
fp = BytesIO(str.encode(data))
return FluxCsvParser(response=HTTPResponse(fp, preload_content=False),
serialization_mode=serialization_mode)
| true | true |
f72d6a033dce893b176c2539c0e154a035cb9415 | 1,437 | py | Python | RecoTracker/TkSeedGenerator/python/GlobalSeedsFromPairsWithVertices_cff.py | samarendran23/cmssw | 849dd9897db9b894ca83e1b630a3c1eecafd6097 | [
"Apache-2.0"
] | 6 | 2017-09-08T14:12:56.000Z | 2022-03-09T23:57:01.000Z | RecoTracker/TkSeedGenerator/python/GlobalSeedsFromPairsWithVertices_cff.py | samarendran23/cmssw | 849dd9897db9b894ca83e1b630a3c1eecafd6097 | [
"Apache-2.0"
] | 545 | 2017-09-19T17:10:19.000Z | 2022-03-07T16:55:27.000Z | RecoTracker/TkSeedGenerator/python/GlobalSeedsFromPairsWithVertices_cff.py | samarendran23/cmssw | 849dd9897db9b894ca83e1b630a3c1eecafd6097 | [
"Apache-2.0"
] | 14 | 2017-10-04T09:47:21.000Z | 2019-10-23T18:04:45.000Z | import FWCore.ParameterSet.Config as cms
from RecoLocalTracker.SiStripRecHitConverter.StripCPEfromTrackAngle_cfi import *
from RecoLocalTracker.SiStripRecHitConverter.SiStripRecHitMatcher_cfi import *
from RecoLocalTracker.SiPixelRecHits.PixelCPEParmError_cfi import *
from RecoTracker.TransientTrackingRecHit.TransientTrackingRecHitBuilder_cfi import *
from RecoTracker.MeasurementDet.MeasurementTrackerESProducer_cfi import *
from TrackingTools.MaterialEffects.MaterialPropagator_cfi import *
from RecoTracker.TkSeedingLayers.TTRHBuilderWithoutAngle4MixedPairs_cfi import *
from RecoTracker.TkSeedingLayers.TTRHBuilderWithoutAngle4PixelPairs_cfi import *
from RecoTracker.TkSeedingLayers.PixelLayerPairs_cfi import *
from RecoTracker.TkSeedingLayers.MixedLayerPairs_cfi import *
from RecoTracker.TkTrackingRegions.globalTrackingRegionWithVertices_cff import *
import RecoTracker.TkSeedGenerator.SeedGeneratorFromRegionHitsEDProducer_cfi
globalSeedsFromPairsWithVertices = RecoTracker.TkSeedGenerator.SeedGeneratorFromRegionHitsEDProducer_cfi.seedGeneratorFromRegionHitsEDProducer.clone(
OrderedHitsFactoryPSet = dict(
ComponentName = 'StandardHitPairGenerator',
SeedingLayers = 'MixedLayerPairs',
maxElement = 1000000
),
RegionFactoryPSet = dict(
RegionPSet = globalTrackingRegionWithVertices.RegionPSet.clone(),
ComponentName = 'GlobalTrackingRegionWithVerticesProducer'
)
)
| 51.321429 | 149 | 0.857342 | import FWCore.ParameterSet.Config as cms
from RecoLocalTracker.SiStripRecHitConverter.StripCPEfromTrackAngle_cfi import *
from RecoLocalTracker.SiStripRecHitConverter.SiStripRecHitMatcher_cfi import *
from RecoLocalTracker.SiPixelRecHits.PixelCPEParmError_cfi import *
from RecoTracker.TransientTrackingRecHit.TransientTrackingRecHitBuilder_cfi import *
from RecoTracker.MeasurementDet.MeasurementTrackerESProducer_cfi import *
from TrackingTools.MaterialEffects.MaterialPropagator_cfi import *
from RecoTracker.TkSeedingLayers.TTRHBuilderWithoutAngle4MixedPairs_cfi import *
from RecoTracker.TkSeedingLayers.TTRHBuilderWithoutAngle4PixelPairs_cfi import *
from RecoTracker.TkSeedingLayers.PixelLayerPairs_cfi import *
from RecoTracker.TkSeedingLayers.MixedLayerPairs_cfi import *
from RecoTracker.TkTrackingRegions.globalTrackingRegionWithVertices_cff import *
import RecoTracker.TkSeedGenerator.SeedGeneratorFromRegionHitsEDProducer_cfi
globalSeedsFromPairsWithVertices = RecoTracker.TkSeedGenerator.SeedGeneratorFromRegionHitsEDProducer_cfi.seedGeneratorFromRegionHitsEDProducer.clone(
OrderedHitsFactoryPSet = dict(
ComponentName = 'StandardHitPairGenerator',
SeedingLayers = 'MixedLayerPairs',
maxElement = 1000000
),
RegionFactoryPSet = dict(
RegionPSet = globalTrackingRegionWithVertices.RegionPSet.clone(),
ComponentName = 'GlobalTrackingRegionWithVerticesProducer'
)
)
| true | true |
f72d6a6678fa943f663ba4e1f59271b649442a99 | 1,180 | py | Python | example/ssd/tools/visualize_net.py | axbaretto/mxnet | 5f593885356ff6d14f5519fa18e79b944beb51cd | [
"Apache-2.0"
] | 9 | 2017-07-13T03:12:24.000Z | 2021-11-10T16:15:27.000Z | example/ssd/tools/visualize_net.py | yanghaojin/BMXNet | 102f8d0ed59529bbd162c37bf07ae58ad6c4caa1 | [
"Apache-2.0"
] | 3 | 2017-07-10T21:49:18.000Z | 2017-07-12T22:40:06.000Z | example/ssd/tools/visualize_net.py | yanghaojin/BMXNet | 102f8d0ed59529bbd162c37bf07ae58ad6c4caa1 | [
"Apache-2.0"
] | 11 | 2018-02-27T15:32:09.000Z | 2021-04-21T08:48:17.000Z | from __future__ import print_function
import find_mxnet
import mxnet as mx
import argparse
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'symbol'))
import symbol_factory
parser = argparse.ArgumentParser(description='network visualization')
parser.add_argument('--network', type=str, default='vgg16_reduced',
help = 'the cnn to use')
parser.add_argument('--num-classes', type=int, default=20,
help='the number of classes')
parser.add_argument('--data-shape', type=int, default=300,
help='set image\'s shape')
parser.add_argument('--train', action='store_true', default=False, help='show train net')
args = parser.parse_args()
if not args.train:
net = symbol_factory.get_symbol(args.network, args.data_shape, num_classes=args.num_classes)
a = mx.viz.plot_network(net, shape={"data":(1,3,args.data_shape,args.data_shape)}, \
node_attrs={"shape":'rect', "fixedsize":'false'})
a.render("ssd_" + args.network + '_' + str(args.data_shape))
else:
net = symbol_factory.get_symbol_train(args.network, args.data_shape, num_classes=args.num_classes)
print(net.tojson())
| 42.142857 | 102 | 0.701695 | from __future__ import print_function
import find_mxnet
import mxnet as mx
import argparse
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'symbol'))
import symbol_factory
parser = argparse.ArgumentParser(description='network visualization')
parser.add_argument('--network', type=str, default='vgg16_reduced',
help = 'the cnn to use')
parser.add_argument('--num-classes', type=int, default=20,
help='the number of classes')
parser.add_argument('--data-shape', type=int, default=300,
help='set image\'s shape')
parser.add_argument('--train', action='store_true', default=False, help='show train net')
args = parser.parse_args()
if not args.train:
net = symbol_factory.get_symbol(args.network, args.data_shape, num_classes=args.num_classes)
a = mx.viz.plot_network(net, shape={"data":(1,3,args.data_shape,args.data_shape)}, \
node_attrs={"shape":'rect', "fixedsize":'false'})
a.render("ssd_" + args.network + '_' + str(args.data_shape))
else:
net = symbol_factory.get_symbol_train(args.network, args.data_shape, num_classes=args.num_classes)
print(net.tojson())
| true | true |
f72d6c4ce56c7ff17559ee79b46baa9dea887b9d | 1,154 | py | Python | Chapter 3/prototypes.py | alisx/lightdjango_practice | 9a0662a04b306e6f0e6accc3d0be08ea1f274a2a | [
"MIT"
] | 1 | 2019-11-24T13:49:02.000Z | 2019-11-24T13:49:02.000Z | Chapter 3/prototypes.py | alisx/lightdjango_practice | 9a0662a04b306e6f0e6accc3d0be08ea1f274a2a | [
"MIT"
] | null | null | null | Chapter 3/prototypes.py | alisx/lightdjango_practice | 9a0662a04b306e6f0e6accc3d0be08ea1f274a2a | [
"MIT"
] | 1 | 2018-10-11T05:39:42.000Z | 2018-10-11T05:39:42.000Z | import sys
import os
from django.conf import settings
DEBUG = os.environ.get('DEBUG', 'on') == 'on'
SECRET_KEY = os.environ.get('SECRET_KEY', 'a^hi#2sv)yy%v(6fhlv(j@-5e%+7h*d%#g%+ru(hv-7rj08r7n'),
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', 'localhost').split(',')
BASE_DIR = os.path.dirname(__file__)
settings.configure(
DEBUG=DEBUG,
SECRET_KEY=SECRET_KEY,
ALLOWED_HOSTS=ALLOWED_HOSTS,
ROOT_URLCONF='sitebuilder.urls',
MIDDLEWARE_CLASSES=(),
INSTALLED_APPS=(
'django.contrib.staticfiles',
'sitebuilder'
),
TEMPLATES=(
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True
},
),
STATIC_URL='/static/',
SITE_PAGES_DIRECTORY=os.path.join(BASE_DIR, 'pages'),
SITE_OUTPUT_DIRECTORY=os.path.join(BASE_DIR, '_build'),
STATIC_ROOT=os.path.join(BASE_DIR, '_build', 'static'),
STATICFILES_STORAGE='django.contrib.staticfiles.storage.CachedStaticFilesStorage'
)
if __name__ == '__main__':
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 31.189189 | 96 | 0.67851 | import sys
import os
from django.conf import settings
DEBUG = os.environ.get('DEBUG', 'on') == 'on'
SECRET_KEY = os.environ.get('SECRET_KEY', 'a^hi#2sv)yy%v(6fhlv(j@-5e%+7h*d%#g%+ru(hv-7rj08r7n'),
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', 'localhost').split(',')
BASE_DIR = os.path.dirname(__file__)
settings.configure(
DEBUG=DEBUG,
SECRET_KEY=SECRET_KEY,
ALLOWED_HOSTS=ALLOWED_HOSTS,
ROOT_URLCONF='sitebuilder.urls',
MIDDLEWARE_CLASSES=(),
INSTALLED_APPS=(
'django.contrib.staticfiles',
'sitebuilder'
),
TEMPLATES=(
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True
},
),
STATIC_URL='/static/',
SITE_PAGES_DIRECTORY=os.path.join(BASE_DIR, 'pages'),
SITE_OUTPUT_DIRECTORY=os.path.join(BASE_DIR, '_build'),
STATIC_ROOT=os.path.join(BASE_DIR, '_build', 'static'),
STATICFILES_STORAGE='django.contrib.staticfiles.storage.CachedStaticFilesStorage'
)
if __name__ == '__main__':
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| true | true |
f72d6ce9565fd009da96d87e7bbeeb8795b95739 | 6,165 | py | Python | examples/resample_cube_to_sphere_with_depth.py | zuru/MappedConvolutions | 77dda41ac9e552fb77dc4494bd7e552c7c5b4e5d | [
"BSD-3-Clause"
] | null | null | null | examples/resample_cube_to_sphere_with_depth.py | zuru/MappedConvolutions | 77dda41ac9e552fb77dc4494bd7e552c7c5b4e5d | [
"BSD-3-Clause"
] | null | null | null | examples/resample_cube_to_sphere_with_depth.py | zuru/MappedConvolutions | 77dda41ac9e552fb77dc4494bd7e552c7c5b4e5d | [
"BSD-3-Clause"
] | null | null | null | # EXAMPLE: Resampling an cubemap to the vertices of an depth scaled icosphere
#
# This example shows how to resample a cubemap to the vertices of an
# icosphere. We then scale the vertices according to provided depth
# information, which reshapes the mesh to the indoor scene it captures. We
# then show how to render back to an equirectangular image and to render the
# surface normals.
# =============================================================================
import torch.nn.functional as F
from mapped_convolution.util import *
from skimage import io
# =================
# PARAMETERS
# =================
order = 7 # Resolution icosphere desired
output_equirect_shape = (512, 1024) # Output equirectangular image dims
cuda = True # Whether to use GPU (recommended)
# -----------------------------------------------------------------------------
# Generate an icosphere
# -----------------------------------------------------------------------------
print('Generating icosphere')
icosphere = generate_icosphere(order)
# -----------------------------------------------------------------------------
# Load and process the cubemap image
# -----------------------------------------------------------------------------
print('Loading the cube map data')
# Load the multi-page TIFF image
# Channel 0: RGB
# Channel 1: Depth
# Channel 2: Sematic labels
# Channel 3: Instance labels
tiff = io.MultiImage('examples/inputs/cubemap.tiff')
# Convert RGB image to a torch tensor with dimensions (1, 3, H, W)
cube_rgb = torch.from_numpy(tiff[0]).permute(2, 0, 1).float().unsqueeze(0)
if cuda:
cube_rgb = cube_rgb.cuda()
# Convert depth image to torch tensor with dimensions (1, 1, H, W)
cube_inv_depth = torch.from_numpy(tiff[1].astype(
np.int32)).float().unsqueeze(0).unsqueeze(0)
if cuda:
cube_inv_depth = cube_inv_depth.cuda()
# Convert inverse depth to regular depth
cube_inv_depth[cube_inv_depth == 0] = -1
cube_depth = 1 / cube_inv_depth
cube_depth[cube_depth < 0] = 0
# Convert to metric scale according to min-distance = 0.3m
# This is a sample image from the SUMO dataset
scale = 0.3 * (2**16 - 1)
cube_depth *= scale
# -----------------------------------------------------------------------------
# Resample the image to the sphere
# -----------------------------------------------------------------------------
print('Resampling the image data to the sphere')
# Resample the depth cubemap using barycentric interpolation
rgb_vertices = resample_cube_to_vertex(cube_rgb, icosphere, order)
# Resample the depth cubemap using nearest-neighbor interpolation
depth_vertices = resample_cube_to_vertex(cube_depth, icosphere, order, True)
# Gather remaining info needed for the PLY
rgb_vertices = rgb_vertices.squeeze() # (3, V)
vertices = icosphere.get_vertices() # (V, 3)
face_idx = icosphere.get_all_face_vertex_indices() # (F, 3)
# Write the textured sphere to file
write_ply('examples/outputs/rgb_sphere.ply',
vertices.transpose(0, 1).numpy(),
rgb=rgb_vertices.cpu().numpy(),
faces=face_idx.cpu().numpy(),
text=False)
print('Textured icosphere written to `outputs/rgb_sphere.ply`')
# -----------------------------------------------------------------------------
# Scale the vertices according to depth
# -----------------------------------------------------------------------------
print('Scaling the vertices according to the depth data')
# Get the vertices of the icosphere (V, 3)
pts = icosphere.get_vertices()
if cuda:
pts = pts.cuda()
# Scale the vertices by the depth values (V, 1) * (V, 3)
pts = depth_vertices.squeeze().unsqueeze(-1) * pts
# Write the resulting mesh to file
# This mesh is the result of warping the sphere according the depth values for
# each vertices
write_ply('examples/outputs/deformed_sphere.ply',
pts.cpu().transpose(0, 1).numpy(),
rgb=rgb_vertices.cpu().numpy(),
faces=face_idx.cpu().numpy(),
text=False)
print('Deformed spherical mesh written to `outputs/deformed_sphere.ply`')
# --------------------------------------------------------------------
# Let's also resample the mesh back to an equirectangular image
# --------------------------------------------------------------------
print('Render sphere back into equirectangular image')
# Resample back to an equirectangular image
rgb_rect = resample_vertex_to_rect(rgb_vertices.view(1, 3, 1, -1),
output_equirect_shape, order)
# Save the re-rendered RGB image
io.imsave('examples/outputs/rerendered_rect.png',
rgb_rect.squeeze().permute(1, 2, 0).byte().cpu().numpy())
print('Rendered equirectangular image written to `outputs/rerendered_rect.png`')
# --------------------------------------------------------------------
# Now that we have the mesh deformed to the proper geometry, let's also compute a surface normal map from the mesh faces
# --------------------------------------------------------------------
print('Render surface normal map into equirectangular image')
# Compute face normals
face_coords = pts[face_idx.to(pts.get_device())] # (F, 3, 3)
a = face_coords[:, 2, :] - face_coords[:, 1, :]
b = face_coords[:, 0, :] - face_coords[:, 1, :]
face_normals = F.normalize(torch.cross(a, b, dim=-1), p=2, dim=-1) # (F, 3)
# Compute the vertex normals by averaging the surrounding face normals (V, 3)
adj_idx = icosphere.get_adjacent_face_indices_to_vertices()
vertex_normals = F.normalize(face_normals[adj_idx.to(
face_normals.get_device())].mean(1),
p=2,
dim=-1)
# Resample normals back to an equirectangular image to and visualize them
normals_rect = resample_vertex_to_rect(
vertex_normals.permute(1, 0).contiguous().view(1, 3, 1, -1),
output_equirect_shape, order)
normals_rect = F.normalize(normals_rect.squeeze(), 2, 0)
# Visualize the normals in RGB in equirectangular format
np_rect = ((normals_rect * 127.5) + 127.5).byte().permute(1, 2, 0).cpu().numpy()
io.imsave('examples/outputs/normals_rect.png', np_rect)
print(
'Rendered surface normals written to equirectangular image as `outputs/normals_rect.png`'
) | 40.827815 | 120 | 0.601946 |
import torch.nn.functional as F
from mapped_convolution.util import *
from skimage import io
order = 7
output_equirect_shape = (512, 1024)
cuda = True
print('Generating icosphere')
icosphere = generate_icosphere(order)
print('Loading the cube map data')
tiff = io.MultiImage('examples/inputs/cubemap.tiff')
cube_rgb = torch.from_numpy(tiff[0]).permute(2, 0, 1).float().unsqueeze(0)
if cuda:
cube_rgb = cube_rgb.cuda()
cube_inv_depth = torch.from_numpy(tiff[1].astype(
np.int32)).float().unsqueeze(0).unsqueeze(0)
if cuda:
cube_inv_depth = cube_inv_depth.cuda()
cube_inv_depth[cube_inv_depth == 0] = -1
cube_depth = 1 / cube_inv_depth
cube_depth[cube_depth < 0] = 0
scale = 0.3 * (2**16 - 1)
cube_depth *= scale
print('Resampling the image data to the sphere')
rgb_vertices = resample_cube_to_vertex(cube_rgb, icosphere, order)
depth_vertices = resample_cube_to_vertex(cube_depth, icosphere, order, True)
rgb_vertices = rgb_vertices.squeeze()
vertices = icosphere.get_vertices()
face_idx = icosphere.get_all_face_vertex_indices()
write_ply('examples/outputs/rgb_sphere.ply',
vertices.transpose(0, 1).numpy(),
rgb=rgb_vertices.cpu().numpy(),
faces=face_idx.cpu().numpy(),
text=False)
print('Textured icosphere written to `outputs/rgb_sphere.ply`')
print('Scaling the vertices according to the depth data')
pts = icosphere.get_vertices()
if cuda:
pts = pts.cuda()
pts = depth_vertices.squeeze().unsqueeze(-1) * pts
write_ply('examples/outputs/deformed_sphere.ply',
pts.cpu().transpose(0, 1).numpy(),
rgb=rgb_vertices.cpu().numpy(),
faces=face_idx.cpu().numpy(),
text=False)
print('Deformed spherical mesh written to `outputs/deformed_sphere.ply`')
# --------------------------------------------------------------------
print('Render sphere back into equirectangular image')
# Resample back to an equirectangular image
rgb_rect = resample_vertex_to_rect(rgb_vertices.view(1, 3, 1, -1),
output_equirect_shape, order)
# Save the re-rendered RGB image
io.imsave('examples/outputs/rerendered_rect.png',
rgb_rect.squeeze().permute(1, 2, 0).byte().cpu().numpy())
print('Rendered equirectangular image written to `outputs/rerendered_rect.png`')
# --------------------------------------------------------------------
# Now that we have the mesh deformed to the proper geometry, let's also compute a surface normal map from the mesh faces
print('Render surface normal map into equirectangular image')
face_coords = pts[face_idx.to(pts.get_device())]
a = face_coords[:, 2, :] - face_coords[:, 1, :]
b = face_coords[:, 0, :] - face_coords[:, 1, :]
face_normals = F.normalize(torch.cross(a, b, dim=-1), p=2, dim=-1)
adj_idx = icosphere.get_adjacent_face_indices_to_vertices()
vertex_normals = F.normalize(face_normals[adj_idx.to(
face_normals.get_device())].mean(1),
p=2,
dim=-1)
normals_rect = resample_vertex_to_rect(
vertex_normals.permute(1, 0).contiguous().view(1, 3, 1, -1),
output_equirect_shape, order)
normals_rect = F.normalize(normals_rect.squeeze(), 2, 0)
np_rect = ((normals_rect * 127.5) + 127.5).byte().permute(1, 2, 0).cpu().numpy()
io.imsave('examples/outputs/normals_rect.png', np_rect)
print(
'Rendered surface normals written to equirectangular image as `outputs/normals_rect.png`'
) | true | true |
f72d6d6bd8b769be90fb9315479c998445aa2960 | 2,872 | py | Python | cogs/utils/context.py | quiprr/gir | c6910f3f61d15d52da7b12e57d1d4f159c61689b | [
"MIT"
] | null | null | null | cogs/utils/context.py | quiprr/gir | c6910f3f61d15d52da7b12e57d1d4f159c61689b | [
"MIT"
] | null | null | null | cogs/utils/context.py | quiprr/gir | c6910f3f61d15d52da7b12e57d1d4f159c61689b | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
import discord
import asyncio
from discord.ext import commands
import pytimeparse
class Context(commands.Context):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.settings = self.bot.settings
self.permissions = self.bot.settings.permissions
self.tasks = self.bot.settings.tasks
async def send_success(self, description: str, delete_after: int = None):
return await self.reply(embed=discord.Embed(description=description, color=discord.Color.blurple()), delete_after=delete_after)
async def prompt(self, value, data):
"""Custom prompt system
Data format is a dictionary:
{
'prompt': "The message to ask the user",
'converter': function to use as converter, for example str or commands.MemberConverter().convert,
'event': optional, if you want to prompt for reaction for example
}
"""
question = data['prompt']
convertor = data['convertor']
event = data.get('event') or 'message'
def wait_check(m):
return m.author == self.author and m.channel == self.channel
ret = None
prompt = await self.send(embed=discord.Embed(description=question, color=discord.Color.blurple()))
try:
response = await self.bot.wait_for(event, check=wait_check, timeout=120)
except asyncio.TimeoutError:
await prompt.delete()
return
else:
await response.delete()
await prompt.delete()
if response.content.lower() == "cancel":
return
elif response.content is not None and response.content != "":
if convertor in [str, int, pytimeparse.parse]:
try:
ret = convertor(response.content)
except Exception:
ret = None
if ret is None:
raise commands.BadArgument(f"Could not parse value for parameter \"{value}\".")
if convertor is pytimeparse.parse:
now = datetime.now()
time = now + timedelta(seconds=ret)
if time < now:
raise commands.BadArgument("Time has to be in the future >:(")
else:
ret = await convertor(self, response.content)
return ret
async def send_error(self, error):
embed = discord.Embed(title=":(\nYour command ran into a problem")
embed.color = discord.Color.red()
embed.description = discord.utils.escape_markdown(f'{error}')
await self.send(embed=embed, delete_after=8)
| 38.810811 | 135 | 0.562674 | from datetime import datetime, timedelta
import discord
import asyncio
from discord.ext import commands
import pytimeparse
class Context(commands.Context):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.settings = self.bot.settings
self.permissions = self.bot.settings.permissions
self.tasks = self.bot.settings.tasks
async def send_success(self, description: str, delete_after: int = None):
return await self.reply(embed=discord.Embed(description=description, color=discord.Color.blurple()), delete_after=delete_after)
async def prompt(self, value, data):
question = data['prompt']
convertor = data['convertor']
event = data.get('event') or 'message'
def wait_check(m):
return m.author == self.author and m.channel == self.channel
ret = None
prompt = await self.send(embed=discord.Embed(description=question, color=discord.Color.blurple()))
try:
response = await self.bot.wait_for(event, check=wait_check, timeout=120)
except asyncio.TimeoutError:
await prompt.delete()
return
else:
await response.delete()
await prompt.delete()
if response.content.lower() == "cancel":
return
elif response.content is not None and response.content != "":
if convertor in [str, int, pytimeparse.parse]:
try:
ret = convertor(response.content)
except Exception:
ret = None
if ret is None:
raise commands.BadArgument(f"Could not parse value for parameter \"{value}\".")
if convertor is pytimeparse.parse:
now = datetime.now()
time = now + timedelta(seconds=ret)
if time < now:
raise commands.BadArgument("Time has to be in the future >:(")
else:
ret = await convertor(self, response.content)
return ret
async def send_error(self, error):
embed = discord.Embed(title=":(\nYour command ran into a problem")
embed.color = discord.Color.red()
embed.description = discord.utils.escape_markdown(f'{error}')
await self.send(embed=embed, delete_after=8)
| true | true |
f72d6e2c4be3d01f2b6edef15a4ca63f9c34d66f | 6,328 | py | Python | clock-in.py | CaiChenshu/ZJU-Clock-In | 7501e73ce4ec052c44fdcef9d17df5d77d8f9e39 | [
"MIT"
] | 10 | 2021-09-30T05:17:20.000Z | 2022-02-18T06:33:42.000Z | clock-in.py | CaiChenshu/ZJU-Clock-In | 7501e73ce4ec052c44fdcef9d17df5d77d8f9e39 | [
"MIT"
] | null | null | null | clock-in.py | CaiChenshu/ZJU-Clock-In | 7501e73ce4ec052c44fdcef9d17df5d77d8f9e39 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# 打卡脚修改自ZJU-nCov-Hitcarder的开源代码,感谢这位同学开源的代码
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import json
import re
import datetime
import time
import sys
class DaKa(object):
"""Hit card class
Attributes:
username: (str) 浙大统一认证平台用户名(一般为学号)
password: (str) 浙大统一认证平台密码
login_url: (str) 登录url
base_url: (str) 打卡首页url
save_url: (str) 提交打卡url
sess: (requests.Session) 统一的session
"""
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36',
}
LOGIN_URL = "https://zjuam.zju.edu.cn/cas/login?service=https%3A%2F%2Fhealthreport.zju.edu.cn%2Fa_zju%2Fapi%2Fsso%2Findex%3Fredirect%3Dhttps%253A%252F%252Fhealthreport.zju.edu.cn%252Fncov%252Fwap%252Fdefault%252Findex"
BASE_URL = "https://healthreport.zju.edu.cn/ncov/wap/default/index"
SAVE_URL = "https://healthreport.zju.edu.cn/ncov/wap/default/save"
def __init__(self, username, password):
self.username = username
self.password = password
# self.login_url = "https://zjuam.zju.edu.cn/cas/login?service=https%3A%2F%2Fhealthreport.zju.edu.cn%2Fa_zju%2Fapi%2Fsso%2Findex%3Fredirect%3Dhttps%253A%252F%252Fhealthreport.zju.edu.cn%252Fncov%252Fwap%252Fdefault%252Findex"
# self.base_url = "https://healthreport.zju.edu.cn/ncov/wap/default/index"
# self.save_url = "https://healthreport.zju.edu.cn/ncov/wap/default/save"
self.sess = requests.Session()
self.sess.keep_alive = False
def login(self):
"""Login to ZJU platform"""
res = self.sess.get(self.LOGIN_URL)
execution = re.search(
'name="execution" value="(.*?)"', res.text).group(1)
res = self.sess.get(
url='https://zjuam.zju.edu.cn/cas/v2/getPubKey').json()
n, e = res['modulus'], res['exponent']
encrypt_password = self._rsa_encrypt(self.password, e, n)
data = {
'username': self.username,
'password': encrypt_password,
'execution': execution,
'_eventId': 'submit'
}
res = self.sess.post(url=self.LOGIN_URL, data=data)
# check if login successfully
if '统一身份认证' in res.content.decode():
raise LoginError('登录失败,请核实账号密码重新登录')
return self.sess
def post(self):
"""Post the hitcard info"""
res = self.sess.post(self.SAVE_URL, data=self.info, headers=self.headers)
return json.loads(res.text)
def get_date(self):
"""Get current date"""
today = datetime.date.today()
return "%4d%02d%02d" % (today.year, today.month, today.day)
def get_info(self, html=None):
"""Get hitcard info, which is the old info with updated new time."""
if not html:
res = self.sess.get(self.BASE_URL, headers=self.headers)
html = res.content.decode()
# print('html' + html)
try:
old_infos = re.findall(r'oldInfo: ({[^\n]+})', html)
if len(old_infos) != 0:
old_info = json.loads(old_infos[0])
else:
raise RegexMatchError("未发现缓存信息,请先至少手动成功打卡一次再运行脚本")
new_info_tmp = json.loads(re.findall(r'def = ({[^\n]+})', html)[0])
new_id = new_info_tmp['id']
name = re.findall(r'realname: "([^\"]+)",', html)[0]
number = re.findall(r"number: '([^\']+)',", html)[0]
except IndexError:
raise RegexMatchError('Relative info not found in html with regex')
except json.decoder.JSONDecodeError:
raise DecodeError('JSON decode error')
new_info = old_info.copy()
new_info['id'] = new_id
new_info['name'] = name
new_info['number'] = number
new_info["date"] = self.get_date()
new_info["created"] = round(time.time())
new_info["address"] = "浙江省杭州市西湖区"
new_info["area"] = "浙江省 杭州市 西湖区"
new_info["province"] = new_info["area"].split(' ')[0]
new_info["city"] = new_info["area"].split(' ')[1]
# form change
new_info['jrdqtlqk[]'] = 0
new_info['jrdqjcqk[]'] = 0
new_info['sfsqhzjkk'] = 1 # 是否申领杭州健康码
new_info['sqhzjkkys'] = 1 # 杭州健康吗颜色,1:绿色 2:红色 3:黄色
new_info['sfqrxxss'] = 1 # 是否确认信息属实
new_info['sfzx'] = 1 # 是否在校
new_info['jcqzrq'] = ""
new_info['gwszdd'] = ""
new_info['szgjcs'] = ""
self.info = new_info
return new_info
def _rsa_encrypt(self, password_str, e_str, M_str):
password_bytes = bytes(password_str, 'ascii')
password_int = int.from_bytes(password_bytes, 'big')
e_int = int(e_str, 16)
M_int = int(M_str, 16)
result_int = pow(password_int, e_int, M_int)
return hex(result_int)[2:].rjust(128, '0')
# Exceptions
class LoginError(Exception):
"""Login Exception"""
pass
class RegexMatchError(Exception):
"""Regex Matching Exception"""
pass
class DecodeError(Exception):
"""JSON Decode Exception"""
pass
def main(username, password):
"""Hit card process
Arguments:
username: (str) 浙大统一认证平台用户名(一般为学号)
password: (str) 浙大统一认证平台密码
"""
print("\n[Time] %s" %
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print("🚌 打卡任务启动")
dk = DaKa(username, password)
print("登录到浙大统一身份认证平台...")
try:
dk.login()
print("已登录到浙大统一身份认证平台")
except Exception as err:
print(str(err))
raise Exception
print('正在获取个人信息...')
try:
dk.get_info()
print('%s %s同学, 你好~' % (dk.info['number'], dk.info['name']))
except Exception as err:
print('获取信息失败,请手动打卡,更多信息: ' + str(err))
raise Exception
print('正在为您打卡打卡打卡')
try:
res = dk.post()
if str(res['e']) == '0':
print('已为您打卡成功!')
else:
print(res['m'])
except Exception:
print('数据提交失败')
raise Exception
if __name__ == "__main__":
print(sys.argv)
username = sys.argv[1]
password = sys.argv[2]
try:
main(username, password)
except Exception:
exit(1)
| 32.451282 | 233 | 0.59134 |
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import json
import re
import datetime
import time
import sys
class DaKa(object):
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36',
}
LOGIN_URL = "https://zjuam.zju.edu.cn/cas/login?service=https%3A%2F%2Fhealthreport.zju.edu.cn%2Fa_zju%2Fapi%2Fsso%2Findex%3Fredirect%3Dhttps%253A%252F%252Fhealthreport.zju.edu.cn%252Fncov%252Fwap%252Fdefault%252Findex"
BASE_URL = "https://healthreport.zju.edu.cn/ncov/wap/default/index"
SAVE_URL = "https://healthreport.zju.edu.cn/ncov/wap/default/save"
def __init__(self, username, password):
self.username = username
self.password = password
self.sess = requests.Session()
self.sess.keep_alive = False
def login(self):
res = self.sess.get(self.LOGIN_URL)
execution = re.search(
'name="execution" value="(.*?)"', res.text).group(1)
res = self.sess.get(
url='https://zjuam.zju.edu.cn/cas/v2/getPubKey').json()
n, e = res['modulus'], res['exponent']
encrypt_password = self._rsa_encrypt(self.password, e, n)
data = {
'username': self.username,
'password': encrypt_password,
'execution': execution,
'_eventId': 'submit'
}
res = self.sess.post(url=self.LOGIN_URL, data=data)
if '统一身份认证' in res.content.decode():
raise LoginError('登录失败,请核实账号密码重新登录')
return self.sess
def post(self):
res = self.sess.post(self.SAVE_URL, data=self.info, headers=self.headers)
return json.loads(res.text)
def get_date(self):
today = datetime.date.today()
return "%4d%02d%02d" % (today.year, today.month, today.day)
def get_info(self, html=None):
if not html:
res = self.sess.get(self.BASE_URL, headers=self.headers)
html = res.content.decode()
try:
old_infos = re.findall(r'oldInfo: ({[^\n]+})', html)
if len(old_infos) != 0:
old_info = json.loads(old_infos[0])
else:
raise RegexMatchError("未发现缓存信息,请先至少手动成功打卡一次再运行脚本")
new_info_tmp = json.loads(re.findall(r'def = ({[^\n]+})', html)[0])
new_id = new_info_tmp['id']
name = re.findall(r'realname: "([^\"]+)",', html)[0]
number = re.findall(r"number: '([^\']+)',", html)[0]
except IndexError:
raise RegexMatchError('Relative info not found in html with regex')
except json.decoder.JSONDecodeError:
raise DecodeError('JSON decode error')
new_info = old_info.copy()
new_info['id'] = new_id
new_info['name'] = name
new_info['number'] = number
new_info["date"] = self.get_date()
new_info["created"] = round(time.time())
new_info["address"] = "浙江省杭州市西湖区"
new_info["area"] = "浙江省 杭州市 西湖区"
new_info["province"] = new_info["area"].split(' ')[0]
new_info["city"] = new_info["area"].split(' ')[1]
# form change
new_info['jrdqtlqk[]'] = 0
new_info['jrdqjcqk[]'] = 0
new_info['sfsqhzjkk'] = 1 # 是否申领杭州健康码
new_info['sqhzjkkys'] = 1 # 杭州健康吗颜色,1:绿色 2:红色 3:黄色
new_info['sfqrxxss'] = 1 # 是否确认信息属实
new_info['sfzx'] = 1 # 是否在校
new_info['jcqzrq'] = ""
new_info['gwszdd'] = ""
new_info['szgjcs'] = ""
self.info = new_info
return new_info
def _rsa_encrypt(self, password_str, e_str, M_str):
password_bytes = bytes(password_str, 'ascii')
password_int = int.from_bytes(password_bytes, 'big')
e_int = int(e_str, 16)
M_int = int(M_str, 16)
result_int = pow(password_int, e_int, M_int)
return hex(result_int)[2:].rjust(128, '0')
# Exceptions
class LoginError(Exception):
pass
class RegexMatchError(Exception):
pass
class DecodeError(Exception):
pass
def main(username, password):
print("\n[Time] %s" %
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print("🚌 打卡任务启动")
dk = DaKa(username, password)
print("登录到浙大统一身份认证平台...")
try:
dk.login()
print("已登录到浙大统一身份认证平台")
except Exception as err:
print(str(err))
raise Exception
print('正在获取个人信息...')
try:
dk.get_info()
print('%s %s同学, 你好~' % (dk.info['number'], dk.info['name']))
except Exception as err:
print('获取信息失败,请手动打卡,更多信息: ' + str(err))
raise Exception
print('正在为您打卡打卡打卡')
try:
res = dk.post()
if str(res['e']) == '0':
print('已为您打卡成功!')
else:
print(res['m'])
except Exception:
print('数据提交失败')
raise Exception
if __name__ == "__main__":
print(sys.argv)
username = sys.argv[1]
password = sys.argv[2]
try:
main(username, password)
except Exception:
exit(1)
| true | true |
f72d6f9e38555c7e0d5030a3358acec901499195 | 4,057 | py | Python | test/test_soil/test_load.py | voidpp/configpp | 6d395eef6a2279c8902c40c3f005d530674a6cba | [
"MIT"
] | null | null | null | test/test_soil/test_load.py | voidpp/configpp | 6d395eef6a2279c8902c40c3f005d530674a6cba | [
"MIT"
] | 6 | 2018-09-15T09:14:12.000Z | 2019-07-10T11:40:36.000Z | test/test_soil/test_load.py | voidpp/configpp | 6d395eef6a2279c8902c40c3f005d530674a6cba | [
"MIT"
] | null | null | null |
from configpp.soil import Config, Group, GroupMember, Transport, ClimberLocation
from voidpp_tools.mocks.file_system import FileSystem, mockfs
_data_filename = 'test1.json'
_data = {_data_filename: '{"a": 42}'}
def test_load_simple_not_found():
cfg = Config(_data_filename)
assert cfg.load() is False
@mockfs({'etc': _data})
def test_load_simple_found_etc():
cfg = Config(_data_filename)
assert cfg.load() is True
assert cfg.data == {"a": 42}
assert cfg.path == '/etc/' + _data_filename
@mockfs({'home': {'douglas': _data}})
def test_load_simple_found_home():
cfg = Config(_data_filename)
assert cfg.load() is True
assert cfg.data == {"a": 42}
@mockfs({'teve': _data}, cwd = '/teve')
def test_load_simple_found_cwd():
cfg = Config(_data_filename)
assert cfg.load() is True
assert cfg.data == {"a": 42}
@mockfs({'etc': _data, 'home': {'douglas': {_data_filename: '{"a": 84}'}}})
def test_load_simple_location_order():
cfg = Config(_data_filename)
assert cfg.load() is True
assert cfg.data == {"a": 84}
@mockfs({'etc': {'test1': {'core.json': '{"a": 42}', 'logger.json': '{"b": 42}'}}})
def test_load_group():
core = GroupMember('core.json')
logger = GroupMember('logger.json')
grp = Group('test1', [core, logger])
assert grp.load()
assert core.data == {"a": 42}
assert logger.data == {"b": 42}
@mockfs({'etc': {'test1': {'logger.json': '{"b": 42}'}}})
def test_cant_load_group_missing_one():
core = GroupMember('core.json')
logger = GroupMember('logger.json')
grp = Group('test1', [core, logger] + [GroupMember('op%s' % i, mandatory=False) for i in range(10)])
assert grp.load() is False
@mockfs({'etc': {'test1': {'logger.json': '{"b": 42}'}}})
def test_cant_load_group_missing_many():
core = GroupMember('core.json')
logger = GroupMember('logger.json')
grp = Group('test1', [core, logger])
assert grp.load() is False
@mockfs({'etc': {'app.json': '{"a": 42}'}})
def test_load_group_single():
core = GroupMember('app.json')
grp = Group('', [core])
assert grp.load()
assert core.data == {"a": 42}
@mockfs({'etc': {'test1': {'core.json': '{"a": 42}'}}})
def test_load_group_optional():
core = GroupMember('core.json')
logger = GroupMember('logger.json', mandatory = False)
grp = Group('test1', [core, logger])
assert grp.load() is True
assert core.data == {"a": 42}
assert core.path == '/etc/test1/core.json'
assert logger.is_loaded is False
@mockfs({
'home': {
'douglas': {
'test1': {
'core.json': '{"a": 21}'
}
}
},
'etc': {
'test1': {
'core.json': '{"a": 42}',
'logger.json': '{"b": 42}',
}
}
})
def test_load_group_optional_full_group_is_more_imporant_than_location_order():
core = GroupMember('core.json')
logger = GroupMember('logger.json', mandatory = False)
grp = Group('test1', [core, logger])
assert grp.load() is True
assert core.data == {"a": 42}
assert logger.is_loaded
assert logger.data == {"b": 42}
@mockfs({'home': {'douglas': {'teve': {_data_filename: '{"a": 84}'}}}}, cwd = '/home/douglas/teve/muha/subn')
def test_load_simple_climber():
cfg = Config(_data_filename, transport = Transport([ClimberLocation()]))
assert cfg.load() is True
assert cfg.data == {"a": 84}
assert cfg.path == '/home/douglas/teve/' + _data_filename
@mockfs({'home': {'douglas': {'teve': {'test1': {'core.json': '{"a": 42}', 'logger.json': '{"b": 42}'}}}}}, cwd = '/home/douglas/teve/muha/subn')
def test_load_group_climber_loc():
core = GroupMember('core.json')
logger = GroupMember('logger.json')
grp = Group('test1', [core, logger], transport = Transport([ClimberLocation()]))
assert grp.load()
assert core.data == {"a": 42}
assert logger.data == {"b": 42}
assert grp.path == '/home/douglas/teve/test1'
assert core.path == '/home/douglas/teve/test1/core.json'
| 25.515723 | 145 | 0.605866 |
from configpp.soil import Config, Group, GroupMember, Transport, ClimberLocation
from voidpp_tools.mocks.file_system import FileSystem, mockfs
_data_filename = 'test1.json'
_data = {_data_filename: '{"a": 42}'}
def test_load_simple_not_found():
cfg = Config(_data_filename)
assert cfg.load() is False
@mockfs({'etc': _data})
def test_load_simple_found_etc():
cfg = Config(_data_filename)
assert cfg.load() is True
assert cfg.data == {"a": 42}
assert cfg.path == '/etc/' + _data_filename
@mockfs({'home': {'douglas': _data}})
def test_load_simple_found_home():
cfg = Config(_data_filename)
assert cfg.load() is True
assert cfg.data == {"a": 42}
@mockfs({'teve': _data}, cwd = '/teve')
def test_load_simple_found_cwd():
cfg = Config(_data_filename)
assert cfg.load() is True
assert cfg.data == {"a": 42}
@mockfs({'etc': _data, 'home': {'douglas': {_data_filename: '{"a": 84}'}}})
def test_load_simple_location_order():
cfg = Config(_data_filename)
assert cfg.load() is True
assert cfg.data == {"a": 84}
@mockfs({'etc': {'test1': {'core.json': '{"a": 42}', 'logger.json': '{"b": 42}'}}})
def test_load_group():
core = GroupMember('core.json')
logger = GroupMember('logger.json')
grp = Group('test1', [core, logger])
assert grp.load()
assert core.data == {"a": 42}
assert logger.data == {"b": 42}
@mockfs({'etc': {'test1': {'logger.json': '{"b": 42}'}}})
def test_cant_load_group_missing_one():
core = GroupMember('core.json')
logger = GroupMember('logger.json')
grp = Group('test1', [core, logger] + [GroupMember('op%s' % i, mandatory=False) for i in range(10)])
assert grp.load() is False
@mockfs({'etc': {'test1': {'logger.json': '{"b": 42}'}}})
def test_cant_load_group_missing_many():
core = GroupMember('core.json')
logger = GroupMember('logger.json')
grp = Group('test1', [core, logger])
assert grp.load() is False
@mockfs({'etc': {'app.json': '{"a": 42}'}})
def test_load_group_single():
core = GroupMember('app.json')
grp = Group('', [core])
assert grp.load()
assert core.data == {"a": 42}
@mockfs({'etc': {'test1': {'core.json': '{"a": 42}'}}})
def test_load_group_optional():
core = GroupMember('core.json')
logger = GroupMember('logger.json', mandatory = False)
grp = Group('test1', [core, logger])
assert grp.load() is True
assert core.data == {"a": 42}
assert core.path == '/etc/test1/core.json'
assert logger.is_loaded is False
@mockfs({
'home': {
'douglas': {
'test1': {
'core.json': '{"a": 21}'
}
}
},
'etc': {
'test1': {
'core.json': '{"a": 42}',
'logger.json': '{"b": 42}',
}
}
})
def test_load_group_optional_full_group_is_more_imporant_than_location_order():
core = GroupMember('core.json')
logger = GroupMember('logger.json', mandatory = False)
grp = Group('test1', [core, logger])
assert grp.load() is True
assert core.data == {"a": 42}
assert logger.is_loaded
assert logger.data == {"b": 42}
@mockfs({'home': {'douglas': {'teve': {_data_filename: '{"a": 84}'}}}}, cwd = '/home/douglas/teve/muha/subn')
def test_load_simple_climber():
cfg = Config(_data_filename, transport = Transport([ClimberLocation()]))
assert cfg.load() is True
assert cfg.data == {"a": 84}
assert cfg.path == '/home/douglas/teve/' + _data_filename
@mockfs({'home': {'douglas': {'teve': {'test1': {'core.json': '{"a": 42}', 'logger.json': '{"b": 42}'}}}}}, cwd = '/home/douglas/teve/muha/subn')
def test_load_group_climber_loc():
core = GroupMember('core.json')
logger = GroupMember('logger.json')
grp = Group('test1', [core, logger], transport = Transport([ClimberLocation()]))
assert grp.load()
assert core.data == {"a": 42}
assert logger.data == {"b": 42}
assert grp.path == '/home/douglas/teve/test1'
assert core.path == '/home/douglas/teve/test1/core.json'
| true | true |
f72d708187d0aba7b19a053b52100f35313303b3 | 4,061 | py | Python | src/models/esvs.py | tomstark99/epic-kitchens-100-fyrp | cbc9e59569fb6110b900a51def1947b8a3c93699 | [
"Apache-2.0"
] | null | null | null | src/models/esvs.py | tomstark99/epic-kitchens-100-fyrp | cbc9e59569fb6110b900a51def1947b8a3c93699 | [
"Apache-2.0"
] | null | null | null | src/models/esvs.py | tomstark99/epic-kitchens-100-fyrp | cbc9e59569fb6110b900a51def1947b8a3c93699 | [
"Apache-2.0"
] | null | null | null | import torch as t
import torch.nn as nn
import torch.nn.functional as F
class MTRN(nn.Module):
def __init__(self, frame_count: int):
super().__init__()
self.frame_count = frame_count
self.fc1 = nn.Linear(256 * frame_count, 1024)
self.fc2 = nn.Linear(1024, 512)
self.fc3 = nn.Linear(512, 397)
def forward(self, x):
x = x.view(-1, 256 * self.frame_count)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3_verb(x)
return x
class V_MTRN(nn.Module):
def __init__(self, frame_count: int, hidden_layer_size: int, dropout_count: int, dropout_probability: int = 0.5):
super().__init__()
if dropout_probability < 0 or dropout_probability > 1:
raise ValueError(f'Probability needs to be between 0 and 1, was: {dropout_probability}')
self.frame_count = frame_count
self.dropout_count = dropout_count
self.fc1 = nn.Linear(256 * frame_count, hidden_layer_size)
self.dropout = nn.Dropout(p=dropout_probability)
self.fc2 = nn.Linear(hidden_layer_size, 512)
self.fc3_verb = nn.Linear(512, 97)
def forward(self, x):
x = x.view(-1, 256 * self.frame_count)
x = F.relu(self.fc1(x))
if self.dropout_count >= 1:
x = self.dropout(x)
x = F.relu(self.fc2(x))
if self.dropout_count == 2:
x = self.dropout(x)
x = self.fc3_verb(x)
return x
class N_MTRN(nn.Module):
def __init__(self, frame_count: int, hidden_layer_size: int, dropout_count: int, dropout_probability: int = 0.5):
super().__init__()
if dropout_probability < 0 or dropout_probability > 1:
raise ValueError(f'Probability needs to be between 0 and 1, was: {dropout_probability}')
self.frame_count = frame_count
self.dropout_count = dropout_count
self.fc1 = nn.Linear(256 * frame_count, hidden_layer_size)
self.dropout = nn.Dropout(p=dropout_probability)
self.fc2 = nn.Linear(hidden_layer_size, 512)
self.fc3_noun = nn.Linear(512, 300)
def forward(self, x):
x = x.view(-1, 256 * self.frame_count)
x = F.relu(self.fc1(x))
if self.dropout_count >= 1:
x = self.dropout(x)
x = F.relu(self.fc2(x))
if self.dropout_count == 2:
x = self.dropout(x)
x = self.fc3_noun(x)
return x
class V_MF(nn.Module):
def __init__(self, frame_count: int, hidden_layer_size: int, dropout_probability: int = 0.5):
super().__init__()
if dropout_probability < 0 or dropout_probability > 1:
raise ValueError(f'Probability needs to be between 0 and 1, was: {dropout_probability}')
self.frame_count = frame_count
self.fc1 = nn.Linear(768 * frame_count, hidden_layer_size)
self.dropout = nn.Dropout(p=dropout_probability)
self.fc2_verb = nn.Linear(hidden_layer_size, 97)
def forward(self, x):
x = x.view(-1, 768 * self.frame_count)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2_verb(x)
return x
class N_MF(nn.Module):
def __init__(self, frame_count: int, hidden_layer_size: int, dropout_probability: int = 0.5):
super().__init__()
if dropout_probability < 0 or dropout_probability > 1:
raise ValueError(f'Probability needs to be between 0 and 1, was: {dropout_probability}')
self.frame_count = frame_count
self.fc1 = nn.Linear(768 * frame_count, hidden_layer_size)
self.dropout = nn.Dropout(p=dropout_probability)
self.fc2_noun = nn.Linear(hidden_layer_size, 300)
def forward(self, x):
x = x.view(-1, 768 * self.frame_count)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2_noun(x)
return x
| 37.256881 | 118 | 0.592219 | import torch as t
import torch.nn as nn
import torch.nn.functional as F
class MTRN(nn.Module):
def __init__(self, frame_count: int):
super().__init__()
self.frame_count = frame_count
self.fc1 = nn.Linear(256 * frame_count, 1024)
self.fc2 = nn.Linear(1024, 512)
self.fc3 = nn.Linear(512, 397)
def forward(self, x):
x = x.view(-1, 256 * self.frame_count)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3_verb(x)
return x
class V_MTRN(nn.Module):
def __init__(self, frame_count: int, hidden_layer_size: int, dropout_count: int, dropout_probability: int = 0.5):
super().__init__()
if dropout_probability < 0 or dropout_probability > 1:
raise ValueError(f'Probability needs to be between 0 and 1, was: {dropout_probability}')
self.frame_count = frame_count
self.dropout_count = dropout_count
self.fc1 = nn.Linear(256 * frame_count, hidden_layer_size)
self.dropout = nn.Dropout(p=dropout_probability)
self.fc2 = nn.Linear(hidden_layer_size, 512)
self.fc3_verb = nn.Linear(512, 97)
def forward(self, x):
x = x.view(-1, 256 * self.frame_count)
x = F.relu(self.fc1(x))
if self.dropout_count >= 1:
x = self.dropout(x)
x = F.relu(self.fc2(x))
if self.dropout_count == 2:
x = self.dropout(x)
x = self.fc3_verb(x)
return x
class N_MTRN(nn.Module):
def __init__(self, frame_count: int, hidden_layer_size: int, dropout_count: int, dropout_probability: int = 0.5):
super().__init__()
if dropout_probability < 0 or dropout_probability > 1:
raise ValueError(f'Probability needs to be between 0 and 1, was: {dropout_probability}')
self.frame_count = frame_count
self.dropout_count = dropout_count
self.fc1 = nn.Linear(256 * frame_count, hidden_layer_size)
self.dropout = nn.Dropout(p=dropout_probability)
self.fc2 = nn.Linear(hidden_layer_size, 512)
self.fc3_noun = nn.Linear(512, 300)
def forward(self, x):
x = x.view(-1, 256 * self.frame_count)
x = F.relu(self.fc1(x))
if self.dropout_count >= 1:
x = self.dropout(x)
x = F.relu(self.fc2(x))
if self.dropout_count == 2:
x = self.dropout(x)
x = self.fc3_noun(x)
return x
class V_MF(nn.Module):
def __init__(self, frame_count: int, hidden_layer_size: int, dropout_probability: int = 0.5):
super().__init__()
if dropout_probability < 0 or dropout_probability > 1:
raise ValueError(f'Probability needs to be between 0 and 1, was: {dropout_probability}')
self.frame_count = frame_count
self.fc1 = nn.Linear(768 * frame_count, hidden_layer_size)
self.dropout = nn.Dropout(p=dropout_probability)
self.fc2_verb = nn.Linear(hidden_layer_size, 97)
def forward(self, x):
x = x.view(-1, 768 * self.frame_count)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2_verb(x)
return x
class N_MF(nn.Module):
def __init__(self, frame_count: int, hidden_layer_size: int, dropout_probability: int = 0.5):
super().__init__()
if dropout_probability < 0 or dropout_probability > 1:
raise ValueError(f'Probability needs to be between 0 and 1, was: {dropout_probability}')
self.frame_count = frame_count
self.fc1 = nn.Linear(768 * frame_count, hidden_layer_size)
self.dropout = nn.Dropout(p=dropout_probability)
self.fc2_noun = nn.Linear(hidden_layer_size, 300)
def forward(self, x):
x = x.view(-1, 768 * self.frame_count)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2_noun(x)
return x
| true | true |
f72d70e9b4cf7e6f3bcfcd54939051b3f505e477 | 800 | py | Python | sources_non_forked/ultisnips/pythonx/UltiSnips/snippet/definition/snipmate.py | khatchad/vimrc | e4fb69d3b7a8635f0881461853c9144763fae4c7 | [
"MIT"
] | 1 | 2017-04-24T04:07:48.000Z | 2017-04-24T04:07:48.000Z | sources_non_forked/ultisnips/pythonx/UltiSnips/snippet/definition/snipmate.py | RobotMa/vimrc | 5beda397d3c6f88b8542d843107a64c42bf13c93 | [
"MIT"
] | null | null | null | sources_non_forked/ultisnips/pythonx/UltiSnips/snippet/definition/snipmate.py | RobotMa/vimrc | 5beda397d3c6f88b8542d843107a64c42bf13c93 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# encoding: utf-8
"""A snipMate snippet after parsing."""
from UltiSnips.snippet.definition.base import SnippetDefinition
from UltiSnips.snippet.parsing.snipmate import parse_and_instantiate
class SnipMateSnippetDefinition(SnippetDefinition):
"""See module doc."""
SNIPMATE_SNIPPET_PRIORITY = -1000
def __init__(self, trigger, value, description, location):
SnippetDefinition.__init__(
self,
self.SNIPMATE_SNIPPET_PRIORITY,
trigger,
value,
description,
"w",
{},
location,
None,
{},
)
def instantiate(self, snippet_instance, initial_text, indent):
parse_and_instantiate(snippet_instance, initial_text, indent)
| 25 | 69 | 0.6375 |
from UltiSnips.snippet.definition.base import SnippetDefinition
from UltiSnips.snippet.parsing.snipmate import parse_and_instantiate
class SnipMateSnippetDefinition(SnippetDefinition):
SNIPMATE_SNIPPET_PRIORITY = -1000
def __init__(self, trigger, value, description, location):
SnippetDefinition.__init__(
self,
self.SNIPMATE_SNIPPET_PRIORITY,
trigger,
value,
description,
"w",
{},
location,
None,
{},
)
def instantiate(self, snippet_instance, initial_text, indent):
parse_and_instantiate(snippet_instance, initial_text, indent)
| true | true |
f72d711ff1d7d537e1b1c14304366b86f56d6150 | 2,176 | py | Python | src/data_generator/npz_generator.py | JaimeCernuda/dlio_benchmark | d9cfbf76b4c7fb0d48a0dd43b8d2f2ea6ba75949 | [
"MIT"
] | 10 | 2020-08-13T19:14:21.000Z | 2022-03-16T00:31:00.000Z | src/data_generator/npz_generator.py | JaimeCernuda/dlio_benchmark | d9cfbf76b4c7fb0d48a0dd43b8d2f2ea6ba75949 | [
"MIT"
] | null | null | null | src/data_generator/npz_generator.py | JaimeCernuda/dlio_benchmark | d9cfbf76b4c7fb0d48a0dd43b8d2f2ea6ba75949 | [
"MIT"
] | 3 | 2020-08-18T21:29:38.000Z | 2021-11-16T15:37:09.000Z | """
Copyright (C) 2020 Argonne, Hariharan Devarajan <hdevarajan@anl.gov>
This file is part of DLProfile
DLIO is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of the published by the Free Software Foundation, either
version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU General Public License along with this program.
If not, see <http://www.gnu.org/licenses/>.
"""
from src.common.enumerations import Compression
from src.data_generator.data_generator import DataGenerator
import numpy as np
from numpy import random
from src.utils.utility import progress
from shutil import copyfile
"""
Generator for creating data in NPZ format.
"""
class NPZGenerator(DataGenerator):
def __init__(self):
super().__init__()
def generate(self):
"""
Generator for creating data in NPZ format of 3d dataset.
"""
super().generate()
records = random.random((self._dimension, self._dimension, self.num_samples))
record_labels = [0] * self.num_samples
prev_out_spec =""
count = 0
for i in range(0, int(self.num_files)):
if i % self.comm_size == self.my_rank:
progress(i+1, self.num_files, "Generating NPZ Data")
out_path_spec = "{}_{}_of_{}.npz".format(self._file_prefix, i, self.num_files)
if count == 0:
prev_out_spec = out_path_spec
if self.compression != Compression.ZIP:
np.savez(out_path_spec, x=records, y=record_labels)
else:
np.savez_compressed(out_path_spec, x=records, y=record_labels)
count += 1
else:
copyfile(prev_out_spec, out_path_spec) | 42.666667 | 117 | 0.664522 |
from src.common.enumerations import Compression
from src.data_generator.data_generator import DataGenerator
import numpy as np
from numpy import random
from src.utils.utility import progress
from shutil import copyfile
class NPZGenerator(DataGenerator):
def __init__(self):
super().__init__()
def generate(self):
super().generate()
records = random.random((self._dimension, self._dimension, self.num_samples))
record_labels = [0] * self.num_samples
prev_out_spec =""
count = 0
for i in range(0, int(self.num_files)):
if i % self.comm_size == self.my_rank:
progress(i+1, self.num_files, "Generating NPZ Data")
out_path_spec = "{}_{}_of_{}.npz".format(self._file_prefix, i, self.num_files)
if count == 0:
prev_out_spec = out_path_spec
if self.compression != Compression.ZIP:
np.savez(out_path_spec, x=records, y=record_labels)
else:
np.savez_compressed(out_path_spec, x=records, y=record_labels)
count += 1
else:
copyfile(prev_out_spec, out_path_spec) | true | true |
f72d736ad51216bde57d535049cd30a0302cf279 | 495 | py | Python | great_expectations/data_context/store/__init__.py | louispotok/great_expectations | b91a3ce10f771742f49ccad9c403bda03f318515 | [
"Apache-2.0"
] | null | null | null | great_expectations/data_context/store/__init__.py | louispotok/great_expectations | b91a3ce10f771742f49ccad9c403bda03f318515 | [
"Apache-2.0"
] | null | null | null | great_expectations/data_context/store/__init__.py | louispotok/great_expectations | b91a3ce10f771742f49ccad9c403bda03f318515 | [
"Apache-2.0"
] | null | null | null | from .store_backend import (
StoreBackend,
InMemoryStoreBackend,
# FilesystemStoreBackend,
FixedLengthTupleFilesystemStoreBackend,
FixedLengthTupleS3StoreBackend,
)
from .store import (
WriteOnlyStore,
ReadWriteStore,
BasicInMemoryStore,
)
from .namespaced_read_write_store import (
NamespacedReadWriteStore,
ValidationsStore,
ExpectationsStore,
HtmlSiteStore,
)
from .evaluation_parameter_store import (
InMemoryEvaluationParameterStore,
) | 20.625 | 43 | 0.769697 | from .store_backend import (
StoreBackend,
InMemoryStoreBackend,
FixedLengthTupleFilesystemStoreBackend,
FixedLengthTupleS3StoreBackend,
)
from .store import (
WriteOnlyStore,
ReadWriteStore,
BasicInMemoryStore,
)
from .namespaced_read_write_store import (
NamespacedReadWriteStore,
ValidationsStore,
ExpectationsStore,
HtmlSiteStore,
)
from .evaluation_parameter_store import (
InMemoryEvaluationParameterStore,
) | true | true |
f72d738266321e8929e1cbbd0dc068b84fcc8a8d | 20,754 | py | Python | hydra/_internal/utils.py | evdcush/hydra | 5a34a01eaa0f0426d967e918a3ecd8ac6fcf9f47 | [
"MIT"
] | null | null | null | hydra/_internal/utils.py | evdcush/hydra | 5a34a01eaa0f0426d967e918a3ecd8ac6fcf9f47 | [
"MIT"
] | null | null | null | hydra/_internal/utils.py | evdcush/hydra | 5a34a01eaa0f0426d967e918a3ecd8ac6fcf9f47 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import copy
import inspect
import logging.config
import os
import sys
import warnings
from dataclasses import dataclass
from os.path import dirname, join, normpath, realpath
from traceback import print_exc, print_exception
from types import FrameType
from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union
from omegaconf import DictConfig, OmegaConf, read_write
from omegaconf.errors import OmegaConfBaseException
from hydra._internal.config_search_path_impl import ConfigSearchPathImpl
from hydra.core.config_search_path import ConfigSearchPath, SearchPathQuery
from hydra.core.utils import get_valid_filename, split_config_path
from hydra.errors import (
CompactHydraException,
InstantiationException,
SearchPathException,
)
from hydra.types import ObjectConf, TaskFunction
log = logging.getLogger(__name__)
def _get_module_name_override() -> Optional[str]:
module_envs = ["HYDRA_MAIN_MODULE", "FB_PAR_MAIN_MODULE", "FB_XAR_MAIN_MODULE"]
for module_env in module_envs:
if module_env in os.environ:
return os.environ[module_env]
return None
def detect_calling_file_or_module_from_task_function(
task_function: Any,
) -> Tuple[Optional[str], Optional[str], str]:
mdl = task_function.__module__
override = _get_module_name_override()
if override is not None:
mdl = override
calling_file: Optional[str]
calling_module: Optional[str]
if mdl not in (None, "__main__"):
calling_file = None
calling_module = mdl
else:
calling_file = task_function.__code__.co_filename
calling_module = None
task_name = detect_task_name(calling_file, mdl)
return calling_file, calling_module, task_name
def detect_calling_file_or_module_from_stack_frame(
stack_depth: int,
) -> Tuple[Optional[str], Optional[str]]:
stack = inspect.stack()
frame = stack[stack_depth]
if is_notebook() and "_dh" in frame[0].f_globals:
pynb_dir = frame[0].f_globals["_dh"][0]
calling_file = join(pynb_dir, "notebook.ipynb")
return calling_file, None
calling_file = frame.filename
calling_module = None
try:
calling_module = _get_module_name_override()
if calling_module is None:
calling_module = frame[0].f_globals[frame[3]].__module__
except KeyError:
try:
calling_module = frame[0].f_locals["self"].__module__
except KeyError:
pass
return calling_file, calling_module
def is_notebook() -> bool:
try:
shell = get_ipython().__class__.__name__ # type: ignore
if shell == "ZMQInteractiveShell":
return True # Jupyter notebook or qtconsole
elif shell == "TerminalInteractiveShell":
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False
def detect_task_name(calling_file: Optional[str], calling_module: Optional[str]) -> str:
if calling_file is not None:
target_file = os.path.basename(calling_file)
task_name = get_valid_filename(os.path.splitext(target_file)[0])
elif calling_module is not None:
last_dot = calling_module.rfind(".")
if last_dot != -1:
task_name = calling_module[last_dot + 1 :]
else:
task_name = calling_module
else:
raise ValueError()
return task_name
def compute_search_path_dir(
calling_file: Optional[str],
calling_module: Optional[str],
config_path: Optional[str],
) -> str:
if calling_file is not None:
abs_base_dir = realpath(dirname(calling_file))
if config_path is not None:
search_path_dir = join(abs_base_dir, config_path)
else:
search_path_dir = abs_base_dir
search_path_dir = normpath(search_path_dir)
elif calling_module is not None:
last_dot = calling_module.rfind(".")
if last_dot != -1:
calling_module = calling_module[0:last_dot]
else:
calling_module = ""
if config_path is not None:
config_path = config_path.replace(os.path.sep, "/")
while str.startswith(config_path, "../"):
config_path = config_path[len("../") :]
last_dot = calling_module.rfind(".")
if last_dot != -1:
calling_module = calling_module[0:last_dot]
else:
calling_module = ""
search_path_dir = "pkg://" + calling_module
if config_path is not None:
if calling_module != "":
search_path_dir = search_path_dir + "/" + config_path
else:
search_path_dir = search_path_dir + config_path
else:
raise ValueError()
return search_path_dir
def create_automatic_config_search_path(
calling_file: Optional[str],
calling_module: Optional[str],
config_path: Optional[str],
) -> ConfigSearchPath:
search_path_dir = compute_search_path_dir(calling_file, calling_module, config_path)
return create_config_search_path(search_path_dir)
def create_config_search_path(search_path_dir: Optional[str]) -> ConfigSearchPath:
from hydra.core.plugins import Plugins
from hydra.plugins.search_path_plugin import SearchPathPlugin
search_path = ConfigSearchPathImpl()
search_path.append("hydra", "pkg://hydra.conf")
if search_path_dir is not None:
search_path.append("main", search_path_dir)
search_path_plugins = Plugins.instance().discover(SearchPathPlugin)
for spp in search_path_plugins:
plugin = spp()
assert isinstance(plugin, SearchPathPlugin)
plugin.manipulate_search_path(search_path)
search_path.append("schema", "structured://")
return search_path
def _is_env_set(name: str) -> bool:
return name in os.environ and os.environ[name] == "1"
def run_and_report(func: Any) -> Any:
try:
return func()
except Exception as ex:
if _is_env_set("HYDRA_FULL_ERROR"):
raise ex
else:
if isinstance(ex, CompactHydraException):
sys.stderr.write(str(ex) + os.linesep)
if isinstance(ex.__cause__, OmegaConfBaseException):
sys.stderr.write(str(ex.__cause__) + os.linesep)
else:
# Custom printing that strips the Hydra related stack frames from the top
# And any omegaconf frames from the bottom.
# It is possible to add additional libraries to sanitize from the bottom later,
# maybe even make it configurable.
tb: Any = ex.__traceback__
search_max = 10
# strip Hydra frames from start of stack
# will strip until it hits run_job()
while search_max > 0:
if tb is None:
break
frame = tb.tb_frame
tb = tb.tb_next
search_max = search_max - 1
if inspect.getframeinfo(frame).function == "run_job":
break
if search_max == 0 or tb is None:
# could not detect run_job, probably a runtime exception before we got there.
# do not sanitize the stack trace.
print_exc()
sys.exit(1)
# strip OmegaConf frames from bottom of stack
end = tb
num_frames = 0
while end is not None:
frame = end.tb_frame
mdl = inspect.getmodule(frame)
assert mdl is not None
name = mdl.__name__
if name.startswith("omegaconf."):
break
end = end.tb_next
num_frames = num_frames + 1
@dataclass
class FakeTracebackType:
tb_next: Any = None # Optional[FakeTracebackType]
tb_frame: Optional[FrameType] = None
tb_lasti: Optional[int] = None
tb_lineno: Optional[int] = None
iter_tb = tb
final_tb = FakeTracebackType()
cur = final_tb
added = 0
while True:
cur.tb_lasti = iter_tb.tb_lasti
cur.tb_lineno = iter_tb.tb_lineno
cur.tb_frame = iter_tb.tb_frame
if added == num_frames - 1:
break
added = added + 1
cur.tb_next = FakeTracebackType()
cur = cur.tb_next
iter_tb = iter_tb.tb_next
print_exception(etype=None, value=ex, tb=final_tb) # type: ignore
sys.stderr.write(
"\nSet the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.\n"
)
sys.exit(1)
def _run_hydra(
args_parser: argparse.ArgumentParser,
task_function: TaskFunction,
config_path: Optional[str],
config_name: Optional[str],
strict: Optional[bool],
) -> None:
from hydra.core.global_hydra import GlobalHydra
from .hydra import Hydra
args = args_parser.parse_args()
if args.config_name is not None:
config_name = args.config_name
if args.config_path is not None:
config_path = args.config_path
(
calling_file,
calling_module,
task_name,
) = detect_calling_file_or_module_from_task_function(task_function)
config_dir, config_name = split_config_path(config_path, config_name)
search_path = create_automatic_config_search_path(
calling_file, calling_module, config_dir
)
def add_conf_dir() -> None:
if args.config_dir is not None:
abs_config_dir = os.path.abspath(args.config_dir)
if not os.path.isdir(abs_config_dir):
raise SearchPathException(
f"Additional config directory '{abs_config_dir}' not found"
)
search_path.prepend(
provider="command-line",
path=f"file://{abs_config_dir}",
anchor=SearchPathQuery(provider="schema"),
)
run_and_report(add_conf_dir)
hydra = run_and_report(
lambda: Hydra.create_main_hydra2(
task_name=task_name, config_search_path=search_path, strict=strict
)
)
try:
if args.help:
hydra.app_help(config_name=config_name, args_parser=args_parser, args=args)
sys.exit(0)
if args.hydra_help:
hydra.hydra_help(
config_name=config_name, args_parser=args_parser, args=args
)
sys.exit(0)
has_show_cfg = args.cfg is not None
num_commands = (
args.run + has_show_cfg + args.multirun + args.shell_completion + args.info
)
if num_commands > 1:
raise ValueError(
"Only one of --run, --multirun, -cfg, --info and --shell_completion can be specified"
)
if num_commands == 0:
args.run = True
if args.run:
run_and_report(
lambda: hydra.run(
config_name=config_name,
task_function=task_function,
overrides=args.overrides,
)
)
elif args.multirun:
run_and_report(
lambda: hydra.multirun(
config_name=config_name,
task_function=task_function,
overrides=args.overrides,
)
)
elif args.cfg:
run_and_report(
lambda: hydra.show_cfg(
config_name=config_name,
overrides=args.overrides,
cfg_type=args.cfg,
package=args.package,
)
)
elif args.shell_completion:
run_and_report(
lambda: hydra.shell_completion(
config_name=config_name, overrides=args.overrides
)
)
elif args.info:
hydra.show_info(config_name=config_name, overrides=args.overrides)
else:
sys.stderr.write("Command not specified\n")
sys.exit(1)
finally:
GlobalHydra.instance().clear()
def _get_exec_command() -> str:
if sys.argv[0].endswith(".py"):
return f"python {sys.argv[0]}"
else:
# Running as an installed app (setuptools entry point)
executable = os.path.basename(sys.argv[0])
return executable
def _get_completion_help() -> str:
from hydra.core.plugins import Plugins
from hydra.plugins.completion_plugin import CompletionPlugin
completion_plugins = Plugins.instance().discover(CompletionPlugin)
completion_info: List[str] = []
for plugin_cls in completion_plugins:
assert issubclass(plugin_cls, CompletionPlugin)
for cmd in ["install", "uninstall"]:
head = f"{plugin_cls.provides().capitalize()} - {cmd.capitalize()}:"
completion_info.append(head)
completion_info.append(plugin_cls.help(cmd).format(_get_exec_command()))
completion_info.append("")
completion_help = "\n".join([f" {x}" if x else x for x in completion_info])
return completion_help
def get_args_parser() -> argparse.ArgumentParser:
from .. import __version__
parser = argparse.ArgumentParser(add_help=False, description="Hydra")
parser.add_argument("--help", "-h", action="store_true", help="Application's help")
parser.add_argument("--hydra-help", action="store_true", help="Hydra's help")
parser.add_argument(
"--version",
action="version",
help="Show Hydra's version and exit",
version=f"Hydra {__version__}",
)
parser.add_argument(
"overrides",
nargs="*",
help="Any key=value arguments to override config values (use dots for.nested=overrides)",
)
parser.add_argument(
"--cfg",
"-c",
choices=["job", "hydra", "all"],
help="Show config instead of running [job|hydra|all]",
)
parser.add_argument("--package", "-p", help="Config package to show")
parser.add_argument("--run", "-r", action="store_true", help="Run a job")
parser.add_argument(
"--multirun",
"-m",
action="store_true",
help="Run multiple jobs with the configured launcher and sweeper",
)
parser.add_argument(
"--shell-completion",
"-sc",
action="store_true",
help=f"Install or Uninstall shell completion:\n{_get_completion_help()}",
)
parser.add_argument(
"--config-path",
"-cp",
help="""Overrides the config_path specified in hydra.main().
The config_path is relative to the Python file declaring @hydra.main()""",
)
parser.add_argument(
"--config-name",
"-cn",
help="Overrides the config_name specified in hydra.main()",
)
parser.add_argument(
"--config-dir",
"-cd",
help="Adds an additional config dir to the config search path",
)
parser.add_argument(
"--info", "-i", action="store_true", help="Print Hydra information"
)
return parser
def get_args(args: Optional[Sequence[str]] = None) -> Any:
return get_args_parser().parse_args(args=args)
def get_column_widths(matrix: List[List[str]]) -> List[int]:
num_cols = 0
for row in matrix:
num_cols = max(num_cols, len(row))
widths: List[int] = [0] * num_cols
for row in matrix:
for idx, col in enumerate(row):
widths[idx] = max(widths[idx], len(col))
return widths
def _instantiate_class(
clazz: Type[Any], config: Union[ObjectConf, DictConfig], *args: Any, **kwargs: Any
) -> Any:
# TODO: pull out to caller?
final_kwargs = _get_kwargs(config, **kwargs)
return clazz(*args, **final_kwargs)
def _call_callable(
fn: Callable[..., Any],
config: Union[ObjectConf, DictConfig],
*args: Any,
**kwargs: Any,
) -> Any:
final_kwargs = _get_kwargs(config, **kwargs)
return fn(*args, **final_kwargs)
def _locate(path: str) -> Union[type, Callable[..., Any]]:
"""
Locate an object by name or dotted path, importing as necessary.
This is similar to the pydoc function `locate`, except that it checks for
the module from the given path from back to front.
"""
if path == "":
raise ImportError("Empty path")
import builtins
from importlib import import_module
parts = [part for part in path.split(".") if part]
module = None
for n in reversed(range(len(parts))):
try:
mod = ".".join(parts[:n])
module = import_module(mod)
except Exception as e:
if n == 0:
raise ImportError(f"Error loading module '{path}'") from e
continue
if module:
break
if module:
obj = module
else:
obj = builtins
for part in parts[n:]:
mod = mod + "." + part
if not hasattr(obj, part):
try:
import_module(mod)
except Exception as e:
raise ImportError(
f"Encountered error: `{e}` when loading module '{path}'"
) from e
obj = getattr(obj, part)
if isinstance(obj, type):
obj_type: type = obj
return obj_type
elif callable(obj):
obj_callable: Callable[..., Any] = obj
return obj_callable
else:
# dummy case
raise ValueError(f"Invalid type ({type(obj)}) found for {path}")
def _get_kwargs(config: Union[ObjectConf, DictConfig], **kwargs: Any) -> Any:
if isinstance(config, ObjectConf):
config = OmegaConf.structured(config)
if config.params is not None:
params = config.params
else:
params = OmegaConf.create()
else:
config = copy.deepcopy(config)
if "params" in config:
msg = (
"\nField 'params' is deprecated since Hydra 1.0 and will be removed in Hydra 1.1."
"\nInline the content of params directly at the containing node."
"\nSee https://hydra.cc/docs/next/upgrades/0.11_to_1.0/object_instantiation_changes"
)
warnings.warn(category=UserWarning, message=msg)
params = config.params
else:
params = config
assert isinstance(
params, DictConfig
), f"Input config params are expected to be a mapping, found {type(config.params).__name__}"
config_overrides = {}
passthrough = {}
for k, v in kwargs.items():
if k in params:
config_overrides[k] = v
else:
passthrough[k] = v
final_kwargs = {}
with read_write(params):
params.merge_with(config_overrides)
for k in params.keys():
if k == "_target_":
continue
if OmegaConf.is_missing(params, k) and k in passthrough:
continue
final_kwargs[k] = params[k]
for k, v in passthrough.items():
final_kwargs[k] = v
return final_kwargs
def _get_cls_name(config: DictConfig, pop: bool = True) -> str:
def _getcls(field: str) -> str:
if pop:
classname = config.pop(field)
else:
classname = config[field]
if not isinstance(classname, str):
raise InstantiationException(f"_target_ field '{field}' must be a string")
return classname
for field in ["target", "cls", "class"]:
if field in config:
key = config._get_full_key(field)
msg = (
f"\nConfig key '{key}' is deprecated since Hydra 1.0 and will be removed in Hydra 1.1."
f"\nUse '_target_' instead of '{field}'."
f"\nSee https://hydra.cc/docs/next/upgrades/0.11_to_1.0/object_instantiation_changes"
)
warnings.warn(message=msg, category=UserWarning)
if "_target_" in config:
return _getcls("_target_")
for field in ["target", "cls", "class"]:
if field in config:
return _getcls(field)
raise InstantiationException("Input config does not have a `_target_` field")
| 32.580848 | 103 | 0.597909 |
import argparse
import copy
import inspect
import logging.config
import os
import sys
import warnings
from dataclasses import dataclass
from os.path import dirname, join, normpath, realpath
from traceback import print_exc, print_exception
from types import FrameType
from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union
from omegaconf import DictConfig, OmegaConf, read_write
from omegaconf.errors import OmegaConfBaseException
from hydra._internal.config_search_path_impl import ConfigSearchPathImpl
from hydra.core.config_search_path import ConfigSearchPath, SearchPathQuery
from hydra.core.utils import get_valid_filename, split_config_path
from hydra.errors import (
CompactHydraException,
InstantiationException,
SearchPathException,
)
from hydra.types import ObjectConf, TaskFunction
log = logging.getLogger(__name__)
def _get_module_name_override() -> Optional[str]:
module_envs = ["HYDRA_MAIN_MODULE", "FB_PAR_MAIN_MODULE", "FB_XAR_MAIN_MODULE"]
for module_env in module_envs:
if module_env in os.environ:
return os.environ[module_env]
return None
def detect_calling_file_or_module_from_task_function(
task_function: Any,
) -> Tuple[Optional[str], Optional[str], str]:
mdl = task_function.__module__
override = _get_module_name_override()
if override is not None:
mdl = override
calling_file: Optional[str]
calling_module: Optional[str]
if mdl not in (None, "__main__"):
calling_file = None
calling_module = mdl
else:
calling_file = task_function.__code__.co_filename
calling_module = None
task_name = detect_task_name(calling_file, mdl)
return calling_file, calling_module, task_name
def detect_calling_file_or_module_from_stack_frame(
stack_depth: int,
) -> Tuple[Optional[str], Optional[str]]:
stack = inspect.stack()
frame = stack[stack_depth]
if is_notebook() and "_dh" in frame[0].f_globals:
pynb_dir = frame[0].f_globals["_dh"][0]
calling_file = join(pynb_dir, "notebook.ipynb")
return calling_file, None
calling_file = frame.filename
calling_module = None
try:
calling_module = _get_module_name_override()
if calling_module is None:
calling_module = frame[0].f_globals[frame[3]].__module__
except KeyError:
try:
calling_module = frame[0].f_locals["self"].__module__
except KeyError:
pass
return calling_file, calling_module
def is_notebook() -> bool:
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return True
elif shell == "TerminalInteractiveShell":
return False
else:
return False
except NameError:
return False
def detect_task_name(calling_file: Optional[str], calling_module: Optional[str]) -> str:
if calling_file is not None:
target_file = os.path.basename(calling_file)
task_name = get_valid_filename(os.path.splitext(target_file)[0])
elif calling_module is not None:
last_dot = calling_module.rfind(".")
if last_dot != -1:
task_name = calling_module[last_dot + 1 :]
else:
task_name = calling_module
else:
raise ValueError()
return task_name
def compute_search_path_dir(
calling_file: Optional[str],
calling_module: Optional[str],
config_path: Optional[str],
) -> str:
if calling_file is not None:
abs_base_dir = realpath(dirname(calling_file))
if config_path is not None:
search_path_dir = join(abs_base_dir, config_path)
else:
search_path_dir = abs_base_dir
search_path_dir = normpath(search_path_dir)
elif calling_module is not None:
last_dot = calling_module.rfind(".")
if last_dot != -1:
calling_module = calling_module[0:last_dot]
else:
calling_module = ""
if config_path is not None:
config_path = config_path.replace(os.path.sep, "/")
while str.startswith(config_path, "../"):
config_path = config_path[len("../") :]
last_dot = calling_module.rfind(".")
if last_dot != -1:
calling_module = calling_module[0:last_dot]
else:
calling_module = ""
search_path_dir = "pkg://" + calling_module
if config_path is not None:
if calling_module != "":
search_path_dir = search_path_dir + "/" + config_path
else:
search_path_dir = search_path_dir + config_path
else:
raise ValueError()
return search_path_dir
def create_automatic_config_search_path(
calling_file: Optional[str],
calling_module: Optional[str],
config_path: Optional[str],
) -> ConfigSearchPath:
search_path_dir = compute_search_path_dir(calling_file, calling_module, config_path)
return create_config_search_path(search_path_dir)
def create_config_search_path(search_path_dir: Optional[str]) -> ConfigSearchPath:
from hydra.core.plugins import Plugins
from hydra.plugins.search_path_plugin import SearchPathPlugin
search_path = ConfigSearchPathImpl()
search_path.append("hydra", "pkg://hydra.conf")
if search_path_dir is not None:
search_path.append("main", search_path_dir)
search_path_plugins = Plugins.instance().discover(SearchPathPlugin)
for spp in search_path_plugins:
plugin = spp()
assert isinstance(plugin, SearchPathPlugin)
plugin.manipulate_search_path(search_path)
search_path.append("schema", "structured://")
return search_path
def _is_env_set(name: str) -> bool:
return name in os.environ and os.environ[name] == "1"
def run_and_report(func: Any) -> Any:
try:
return func()
except Exception as ex:
if _is_env_set("HYDRA_FULL_ERROR"):
raise ex
else:
if isinstance(ex, CompactHydraException):
sys.stderr.write(str(ex) + os.linesep)
if isinstance(ex.__cause__, OmegaConfBaseException):
sys.stderr.write(str(ex.__cause__) + os.linesep)
else:
tb: Any = ex.__traceback__
search_max = 10
while search_max > 0:
if tb is None:
break
frame = tb.tb_frame
tb = tb.tb_next
search_max = search_max - 1
if inspect.getframeinfo(frame).function == "run_job":
break
if search_max == 0 or tb is None:
print_exc()
sys.exit(1)
end = tb
num_frames = 0
while end is not None:
frame = end.tb_frame
mdl = inspect.getmodule(frame)
assert mdl is not None
name = mdl.__name__
if name.startswith("omegaconf."):
break
end = end.tb_next
num_frames = num_frames + 1
@dataclass
class FakeTracebackType:
tb_next: Any = None
tb_frame: Optional[FrameType] = None
tb_lasti: Optional[int] = None
tb_lineno: Optional[int] = None
iter_tb = tb
final_tb = FakeTracebackType()
cur = final_tb
added = 0
while True:
cur.tb_lasti = iter_tb.tb_lasti
cur.tb_lineno = iter_tb.tb_lineno
cur.tb_frame = iter_tb.tb_frame
if added == num_frames - 1:
break
added = added + 1
cur.tb_next = FakeTracebackType()
cur = cur.tb_next
iter_tb = iter_tb.tb_next
print_exception(etype=None, value=ex, tb=final_tb)
sys.stderr.write(
"\nSet the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.\n"
)
sys.exit(1)
def _run_hydra(
args_parser: argparse.ArgumentParser,
task_function: TaskFunction,
config_path: Optional[str],
config_name: Optional[str],
strict: Optional[bool],
) -> None:
from hydra.core.global_hydra import GlobalHydra
from .hydra import Hydra
args = args_parser.parse_args()
if args.config_name is not None:
config_name = args.config_name
if args.config_path is not None:
config_path = args.config_path
(
calling_file,
calling_module,
task_name,
) = detect_calling_file_or_module_from_task_function(task_function)
config_dir, config_name = split_config_path(config_path, config_name)
search_path = create_automatic_config_search_path(
calling_file, calling_module, config_dir
)
def add_conf_dir() -> None:
if args.config_dir is not None:
abs_config_dir = os.path.abspath(args.config_dir)
if not os.path.isdir(abs_config_dir):
raise SearchPathException(
f"Additional config directory '{abs_config_dir}' not found"
)
search_path.prepend(
provider="command-line",
path=f"file://{abs_config_dir}",
anchor=SearchPathQuery(provider="schema"),
)
run_and_report(add_conf_dir)
hydra = run_and_report(
lambda: Hydra.create_main_hydra2(
task_name=task_name, config_search_path=search_path, strict=strict
)
)
try:
if args.help:
hydra.app_help(config_name=config_name, args_parser=args_parser, args=args)
sys.exit(0)
if args.hydra_help:
hydra.hydra_help(
config_name=config_name, args_parser=args_parser, args=args
)
sys.exit(0)
has_show_cfg = args.cfg is not None
num_commands = (
args.run + has_show_cfg + args.multirun + args.shell_completion + args.info
)
if num_commands > 1:
raise ValueError(
"Only one of --run, --multirun, -cfg, --info and --shell_completion can be specified"
)
if num_commands == 0:
args.run = True
if args.run:
run_and_report(
lambda: hydra.run(
config_name=config_name,
task_function=task_function,
overrides=args.overrides,
)
)
elif args.multirun:
run_and_report(
lambda: hydra.multirun(
config_name=config_name,
task_function=task_function,
overrides=args.overrides,
)
)
elif args.cfg:
run_and_report(
lambda: hydra.show_cfg(
config_name=config_name,
overrides=args.overrides,
cfg_type=args.cfg,
package=args.package,
)
)
elif args.shell_completion:
run_and_report(
lambda: hydra.shell_completion(
config_name=config_name, overrides=args.overrides
)
)
elif args.info:
hydra.show_info(config_name=config_name, overrides=args.overrides)
else:
sys.stderr.write("Command not specified\n")
sys.exit(1)
finally:
GlobalHydra.instance().clear()
def _get_exec_command() -> str:
if sys.argv[0].endswith(".py"):
return f"python {sys.argv[0]}"
else:
executable = os.path.basename(sys.argv[0])
return executable
def _get_completion_help() -> str:
from hydra.core.plugins import Plugins
from hydra.plugins.completion_plugin import CompletionPlugin
completion_plugins = Plugins.instance().discover(CompletionPlugin)
completion_info: List[str] = []
for plugin_cls in completion_plugins:
assert issubclass(plugin_cls, CompletionPlugin)
for cmd in ["install", "uninstall"]:
head = f"{plugin_cls.provides().capitalize()} - {cmd.capitalize()}:"
completion_info.append(head)
completion_info.append(plugin_cls.help(cmd).format(_get_exec_command()))
completion_info.append("")
completion_help = "\n".join([f" {x}" if x else x for x in completion_info])
return completion_help
def get_args_parser() -> argparse.ArgumentParser:
from .. import __version__
parser = argparse.ArgumentParser(add_help=False, description="Hydra")
parser.add_argument("--help", "-h", action="store_true", help="Application's help")
parser.add_argument("--hydra-help", action="store_true", help="Hydra's help")
parser.add_argument(
"--version",
action="version",
help="Show Hydra's version and exit",
version=f"Hydra {__version__}",
)
parser.add_argument(
"overrides",
nargs="*",
help="Any key=value arguments to override config values (use dots for.nested=overrides)",
)
parser.add_argument(
"--cfg",
"-c",
choices=["job", "hydra", "all"],
help="Show config instead of running [job|hydra|all]",
)
parser.add_argument("--package", "-p", help="Config package to show")
parser.add_argument("--run", "-r", action="store_true", help="Run a job")
parser.add_argument(
"--multirun",
"-m",
action="store_true",
help="Run multiple jobs with the configured launcher and sweeper",
)
parser.add_argument(
"--shell-completion",
"-sc",
action="store_true",
help=f"Install or Uninstall shell completion:\n{_get_completion_help()}",
)
parser.add_argument(
"--config-path",
"-cp",
help="""Overrides the config_path specified in hydra.main().
The config_path is relative to the Python file declaring @hydra.main()""",
)
parser.add_argument(
"--config-name",
"-cn",
help="Overrides the config_name specified in hydra.main()",
)
parser.add_argument(
"--config-dir",
"-cd",
help="Adds an additional config dir to the config search path",
)
parser.add_argument(
"--info", "-i", action="store_true", help="Print Hydra information"
)
return parser
def get_args(args: Optional[Sequence[str]] = None) -> Any:
return get_args_parser().parse_args(args=args)
def get_column_widths(matrix: List[List[str]]) -> List[int]:
num_cols = 0
for row in matrix:
num_cols = max(num_cols, len(row))
widths: List[int] = [0] * num_cols
for row in matrix:
for idx, col in enumerate(row):
widths[idx] = max(widths[idx], len(col))
return widths
def _instantiate_class(
clazz: Type[Any], config: Union[ObjectConf, DictConfig], *args: Any, **kwargs: Any
) -> Any:
# TODO: pull out to caller?
final_kwargs = _get_kwargs(config, **kwargs)
return clazz(*args, **final_kwargs)
def _call_callable(
fn: Callable[..., Any],
config: Union[ObjectConf, DictConfig],
*args: Any,
**kwargs: Any,
) -> Any:
final_kwargs = _get_kwargs(config, **kwargs)
return fn(*args, **final_kwargs)
def _locate(path: str) -> Union[type, Callable[..., Any]]:
if path == "":
raise ImportError("Empty path")
import builtins
from importlib import import_module
parts = [part for part in path.split(".") if part]
module = None
for n in reversed(range(len(parts))):
try:
mod = ".".join(parts[:n])
module = import_module(mod)
except Exception as e:
if n == 0:
raise ImportError(f"Error loading module '{path}'") from e
continue
if module:
break
if module:
obj = module
else:
obj = builtins
for part in parts[n:]:
mod = mod + "." + part
if not hasattr(obj, part):
try:
import_module(mod)
except Exception as e:
raise ImportError(
f"Encountered error: `{e}` when loading module '{path}'"
) from e
obj = getattr(obj, part)
if isinstance(obj, type):
obj_type: type = obj
return obj_type
elif callable(obj):
obj_callable: Callable[..., Any] = obj
return obj_callable
else:
# dummy case
raise ValueError(f"Invalid type ({type(obj)}) found for {path}")
def _get_kwargs(config: Union[ObjectConf, DictConfig], **kwargs: Any) -> Any:
if isinstance(config, ObjectConf):
config = OmegaConf.structured(config)
if config.params is not None:
params = config.params
else:
params = OmegaConf.create()
else:
config = copy.deepcopy(config)
if "params" in config:
msg = (
"\nField 'params' is deprecated since Hydra 1.0 and will be removed in Hydra 1.1."
"\nInline the content of params directly at the containing node."
"\nSee https://hydra.cc/docs/next/upgrades/0.11_to_1.0/object_instantiation_changes"
)
warnings.warn(category=UserWarning, message=msg)
params = config.params
else:
params = config
assert isinstance(
params, DictConfig
), f"Input config params are expected to be a mapping, found {type(config.params).__name__}"
config_overrides = {}
passthrough = {}
for k, v in kwargs.items():
if k in params:
config_overrides[k] = v
else:
passthrough[k] = v
final_kwargs = {}
with read_write(params):
params.merge_with(config_overrides)
for k in params.keys():
if k == "_target_":
continue
if OmegaConf.is_missing(params, k) and k in passthrough:
continue
final_kwargs[k] = params[k]
for k, v in passthrough.items():
final_kwargs[k] = v
return final_kwargs
def _get_cls_name(config: DictConfig, pop: bool = True) -> str:
def _getcls(field: str) -> str:
if pop:
classname = config.pop(field)
else:
classname = config[field]
if not isinstance(classname, str):
raise InstantiationException(f"_target_ field '{field}' must be a string")
return classname
for field in ["target", "cls", "class"]:
if field in config:
key = config._get_full_key(field)
msg = (
f"\nConfig key '{key}' is deprecated since Hydra 1.0 and will be removed in Hydra 1.1."
f"\nUse '_target_' instead of '{field}'."
f"\nSee https://hydra.cc/docs/next/upgrades/0.11_to_1.0/object_instantiation_changes"
)
warnings.warn(message=msg, category=UserWarning)
if "_target_" in config:
return _getcls("_target_")
for field in ["target", "cls", "class"]:
if field in config:
return _getcls(field)
raise InstantiationException("Input config does not have a `_target_` field")
| true | true |
f72d75ce8a4a75b2b39f7b3d547dddf85ae41813 | 70,121 | py | Python | medcat/cat.py | CogStack/CAT | 5ac04d2676aede13f8e8d0ab408472c3c6d46a86 | [
"MIT"
] | 4 | 2019-03-18T11:54:58.000Z | 2019-06-26T02:53:38.000Z | medcat/cat.py | CogStack/CAT | 5ac04d2676aede13f8e8d0ab408472c3c6d46a86 | [
"MIT"
] | null | null | null | medcat/cat.py | CogStack/CAT | 5ac04d2676aede13f8e8d0ab408472c3c6d46a86 | [
"MIT"
] | null | null | null | import os
import shutil
import pickle
import traceback
import json
import logging
import math
import time
import psutil
from time import sleep
from copy import deepcopy
from multiprocess import Process, Manager, cpu_count
from multiprocess.queues import Queue
from multiprocess.synchronize import Lock
from typing import Union, List, Tuple, Optional, Dict, Iterable, Set
from itertools import islice, chain, repeat
from datetime import date
from tqdm.autonotebook import tqdm, trange
from spacy.tokens import Span, Doc, Token
from spacy.language import Language
from medcat import __version__
from medcat.preprocessing.tokenizers import spacy_split_all
from medcat.pipe import Pipe
from medcat.preprocessing.taggers import tag_skip_and_punct
from medcat.cdb import CDB
from medcat.utils.matutils import intersect_nonempty_set
from medcat.utils.data_utils import make_mc_train_test, get_false_positives
from medcat.utils.normalizers import BasicSpellChecker
from medcat.utils.checkpoint import Checkpoint, CheckpointConfig, CheckpointManager
from medcat.utils.helpers import tkns_from_doc, get_important_config_parameters
from medcat.utils.hasher import Hasher
from medcat.ner.vocab_based_ner import NER
from medcat.linking.context_based_linker import Linker
from medcat.utils.filters import get_project_filters, check_filters
from medcat.preprocessing.cleaners import prepare_name
from medcat.meta_cat import MetaCAT
from medcat.utils.meta_cat.data_utils import json_to_fake_spacy
from medcat.config import Config
from medcat.vocab import Vocab
from medcat.utils.decorators import deprecated
from medcat.ner.transformers_ner import TransformersNER
class CAT(object):
r"""
The main MedCAT class used to annotate documents, it is built on top of spaCy
and works as a spaCy pipline. Creates an instance of a spaCy pipline that can
be used as a spacy nlp model.
Args:
cdb (medcat.cdb.CDB):
The concept database that will be used for NER+L
config (medcat.config.Config):
Global configuration for medcat
vocab (medcat.vocab.Vocab, optional):
Vocabulary used for vector embeddings and spelling. Default: None
meta_cats (list of medcat.meta_cat.MetaCAT, optional):
A list of models that will be applied sequentially on each
detected annotation.
Attributes (limited):
cdb (medcat.cdb.CDB):
Concept database used with this CAT instance, please do not assign
this value directly.
config (medcat.config.Config):
The global configuration for medcat. Usually cdb.config will be used for this
field. WILL BE REMOVED - TEMPORARY PLACEHOLDER
vocab (medcat.utils.vocab.Vocab):
The vocabulary object used with this instance, please do not assign
this value directly.
Examples:
>>> cat = CAT(cdb, vocab)
>>> spacy_doc = cat("Put some text here")
>>> print(spacy_doc.ents) # Detected entites
"""
# Add file and console handlers
log = logging.getLogger(__package__)
DEFAULT_MODEL_PACK_NAME = "medcat_model_pack"
def __init__(self,
cdb: CDB,
vocab: Union[Vocab, None] = None,
config: Optional[Config] = None,
meta_cats: List[MetaCAT] = [],
addl_ner: Union[TransformersNER, List[TransformersNER]] = []) -> None:
self.cdb = cdb
self.vocab = vocab
if config is None:
# Take config from the cdb
self.config = cdb.config
else:
# Take the new config and assign it to the CDB also
self.config = config
self.cdb.config = config
self._meta_cats = meta_cats
self._addl_ner = addl_ner if isinstance(addl_ner, list) else [addl_ner]
self._create_pipeline(self.config)
def _create_pipeline(self, config):
# Set log level
self.log.setLevel(config.general['log_level'])
# Build the pipeline
self.pipe = Pipe(tokenizer=spacy_split_all, config=config)
self.pipe.add_tagger(tagger=tag_skip_and_punct,
name='skip_and_punct',
additional_fields=['is_punct'])
if self.vocab is not None:
spell_checker = BasicSpellChecker(cdb_vocab=self.cdb.vocab, config=config, data_vocab=self.vocab)
self.pipe.add_token_normalizer(spell_checker=spell_checker, config=config)
# Add NER
self.ner = NER(self.cdb, config)
self.pipe.add_ner(self.ner)
# Add LINKER
self.linker = Linker(self.cdb, self.vocab, config)
self.pipe.add_linker(self.linker)
# Add addl_ner if they exist
for ner in self._addl_ner:
self.pipe.add_addl_ner(ner, ner.config.general['name'])
# Add meta_annotaiton classes if they exist
for meta_cat in self._meta_cats:
self.pipe.add_meta_cat(meta_cat, meta_cat.config.general['category_name'])
# Set max document length
self.pipe.spacy_nlp.max_length = config.preprocessing.get('max_document_length', 1000000)
@deprecated(message="Replaced with cat.pipe.spacy_nlp.")
def get_spacy_nlp(self) -> Language:
""" Returns the spacy pipeline with MedCAT
"""
return self.pipe.spacy_nlp
def get_hash(self):
r""" Will not be a deep hash but will try to cactch all the changing parts during training.
"""
hasher = Hasher()
hasher.update(self.cdb.get_hash())
hasher.update(self.config.get_hash())
for mc in self._meta_cats:
hasher.update(mc.get_hash())
for trf in self._addl_ner:
hasher.update(trf.get_hash())
return hasher.hexdigest()
def get_model_card(self, as_dict=False):
"""
A minimal model card for MedCAT model packs.
Args:
as_dict: return the model card as a dictionary instead of a str.
Returns:
By default a str - indented JSON object.
"""
card = {
'Model ID': self.config.version['id'],
'Last Modified On': self.config.version['last_modified'],
'History (from least to most recent)': self.config.version['history'],
'Description': self.config.version['description'],
'Source Ontology': self.config.version['ontology'],
'Location': self.config.version['location'],
'MetaCAT models': self.config.version['meta_cats'],
'Basic CDB Stats': self.config.version['cdb_info'],
'Performance': self.config.version['performance'],
'Important Parameters (Partial view, all available in cat.config)': get_important_config_parameters(self.config),
'MedCAT Version': self.config.version['medcat_version']
}
if as_dict:
return card
else:
return json.dumps(card, indent=2, sort_keys=False)
def _versioning(self):
# Check version info and do not allow without it
if self.config.version['description'] == 'No description':
self.log.warning("Please consider populating the version information [description, performance, location, ontology] in cat.config.version")
# Fill the stuff automatically that is needed for versioning
m = self.get_hash()
version = self.config.version
if version['id'] is None or m != version['id']:
if version['id'] is not None:
version['history'].append(version['id'])
version['id'] = m
version['last_modified'] = date.today().strftime("%d %B %Y")
version['cdb_info'] = self.cdb._make_stats()
version['meta_cats'] = [meta_cat.get_model_card(as_dict=True) for meta_cat in self._meta_cats]
version['medcat_version'] = __version__
self.log.warning("Please consider updating [description, performance, location, ontology] in cat.config.version")
def create_model_pack(self, save_dir_path: str, model_pack_name: str = DEFAULT_MODEL_PACK_NAME) -> str:
r""" Will crete a .zip file containing all the models in the current running instance
of MedCAT. This is not the most efficient way, for sure, but good enough for now.
model_pack_name - an id will be appended to this name
returns:
Model pack name
"""
# Spacy model always should be just the name, but during loading it can be reset to path
self.config.general['spacy_model'] = os.path.basename(self.config.general['spacy_model'])
# Versioning
self._versioning()
model_pack_name += "_{}".format(self.config.version['id'])
self.log.warning("This will save all models into a zip file, can take some time and require quite a bit of disk space.")
_save_dir_path = save_dir_path
save_dir_path = os.path.join(save_dir_path, model_pack_name)
# expand user path to make this work with '~'
os.makedirs(os.path.expanduser(save_dir_path), exist_ok=True)
# Save the used spacy model
spacy_path = os.path.join(save_dir_path, self.config.general['spacy_model'])
if str(self.pipe.spacy_nlp._path) != spacy_path:
# First remove if something is there
shutil.rmtree(spacy_path, ignore_errors=True)
shutil.copytree(str(self.pipe.spacy_nlp._path), spacy_path)
# Save the CDB
cdb_path = os.path.join(save_dir_path, "cdb.dat")
self.cdb.save(cdb_path)
# Save the Vocab
vocab_path = os.path.join(save_dir_path, "vocab.dat")
if self.vocab is not None:
# We will allow creation of modelpacks without vocabs
self.vocab.save(vocab_path)
# Save addl_ner
for comp in self.pipe.spacy_nlp.components:
if isinstance(comp[1], TransformersNER):
trf_path = os.path.join(save_dir_path, "trf_" + comp[1].config.general['name'])
comp[1].save(trf_path)
# Save all meta_cats
for comp in self.pipe.spacy_nlp.components:
if isinstance(comp[1], MetaCAT):
name = comp[0]
meta_path = os.path.join(save_dir_path, "meta_" + name)
comp[1].save(meta_path)
# Add a model card also, why not
model_card_path = os.path.join(save_dir_path, "model_card.json")
json.dump(self.get_model_card(as_dict=True), open(model_card_path, 'w'), indent=2)
# Zip everything
shutil.make_archive(os.path.join(_save_dir_path, model_pack_name), 'zip', root_dir=save_dir_path)
# Log model card and return new name
self.log.info(self.get_model_card()) # Print the model card
return model_pack_name
@classmethod
def load_model_pack(cls, zip_path: str, meta_cat_config_dict: Optional[Dict] = None) -> "CAT":
r"""Load everything within the 'model pack', i.e. the CDB, config, vocab and any MetaCAT models
(if present)
Args:
zip_path:
path to model pack zip.
meta_cat_config_dict:
A config dict that will overwrite existing configs in meta_cat.
e.g. meta_cat_config_dict = {'general': {'device': 'cpu'}}
"""
from medcat.cdb import CDB
from medcat.vocab import Vocab
from medcat.meta_cat import MetaCAT
base_dir = os.path.dirname(zip_path)
filename = os.path.basename(zip_path)
foldername = filename.replace(".zip", '')
model_pack_path = os.path.join(base_dir, foldername)
if os.path.exists(model_pack_path):
cls.log.info("Found an existing unziped model pack at: {}, the provided zip will not be touched.".format(model_pack_path))
else:
cls.log.info("Unziping the model pack and loading models.")
shutil.unpack_archive(zip_path, extract_dir=model_pack_path)
# Load the CDB
cdb_path = os.path.join(model_pack_path, "cdb.dat")
cdb = CDB.load(cdb_path)
# TODO load addl_ner
# Modify the config to contain full path to spacy model
cdb.config.general['spacy_model'] = os.path.join(model_pack_path, os.path.basename(cdb.config.general['spacy_model']))
# Load Vocab
vocab_path = os.path.join(model_pack_path, "vocab.dat")
if os.path.exists(vocab_path):
vocab = Vocab.load(vocab_path)
else:
vocab = None
# Find meta models in the model_pack
trf_paths = [os.path.join(model_pack_path, path) for path in os.listdir(model_pack_path) if path.startswith('trf_')]
addl_ner = []
for trf_path in trf_paths:
trf = TransformersNER.load(save_dir_path=trf_path)
trf.cdb = cdb # Set the cat.cdb to be the CDB of the TRF model
addl_ner.append(trf)
# Find meta models in the model_pack
meta_paths = [os.path.join(model_pack_path, path) for path in os.listdir(model_pack_path) if path.startswith('meta_')]
meta_cats = []
for meta_path in meta_paths:
meta_cats.append(MetaCAT.load(save_dir_path=meta_path,
config_dict=meta_cat_config_dict))
cat = cls(cdb=cdb, config=cdb.config, vocab=vocab, meta_cats=meta_cats, addl_ner=addl_ner)
cls.log.info(cat.get_model_card()) # Print the model card
return cat
def __call__(self, text: Optional[str], do_train: bool = False) -> Optional[Doc]:
r"""
Push the text through the pipeline.
Args:
text (string):
The text to be annotated, if the text length is longer than
self.config.preprocessing['max_document_length'] it will be trimmed to that length.
do_train (bool, defaults to `False`):
This causes so many screwups when not there, so I'll force training
to False. To run training it is much better to use the self.train() function
but for some special cases I'm leaving it here also.
Returns:
A single spacy document or multiple spacy documents with the extracted entities
"""
# Should we train - do not use this for training, unless you know what you are doing. Use the
#self.train() function
self.config.linking['train'] = do_train
if text is None:
self.log.error("The input text should be either a string or a sequence of strings but got %s", type(text))
return None
else:
text = self._get_trimmed_text(str(text))
return self.pipe(text)
def __repr__(self):
"""
Prints the model_card for this CAT instance.
Returns:
the 'Model Card' for this CAT instance. This includes NER+L config and any MetaCATs
"""
return self.get_model_card(as_dict=False)
def _print_stats(self,
data: Dict,
epoch: int = 0,
use_project_filters: bool = False,
use_overlaps: bool = False,
use_cui_doc_limit: bool = False,
use_groups: bool = False,
extra_cui_filter: Optional[Set] = None) -> Tuple:
r""" TODO: Refactor and make nice
Print metrics on a dataset (F1, P, R), it will also print the concepts that have the most FP,FN,TP.
Args:
data (list of dict):
The json object that we get from MedCATtrainer on export.
epoch (int):
Used during training, so we know what epoch is it.
use_project_filters (boolean):
Each project in medcattrainer can have filters, do we want to respect those filters
when calculating metrics.
use_overlaps (boolean):
Allow overlapping entites, nearly always False as it is very difficult to annotate overlapping entites.
use_cui_doc_limit (boolean):
If True the metrics for a CUI will be only calculated if that CUI appears in a document, in other words
if the document was annotated for that CUI. Useful in very specific situations when during the annotation
process the set of CUIs changed.
use_groups (boolean):
If True concepts that have groups will be combined and stats will be reported on groups.
extra_cui_filter(Optional[Set]):
This filter will be intersected with all other filters, or if all others are not set then only this one will be used.
Returns:
fps (dict):
False positives for each CUI
fns (dict):
False negatives for each CUI
tps (dict):
True positives for each CUI
cui_prec (dict):
Precision for each CUI
cui_rec (dict):
Recall for each CUI
cui_f1 (dict):
F1 for each CUI
cui_counts (dict):
Number of occurrence for each CUI
examples (dict):
Examples for each of the fp, fn, tp. Format will be examples['fp']['cui'][<list_of_examples>]
"""
tp = 0
fp = 0
fn = 0
fps: Dict = {}
fns: Dict = {}
tps: Dict = {}
cui_prec: Dict = {}
cui_rec: Dict = {}
cui_f1: Dict = {}
cui_counts: Dict = {}
examples: Dict = {'fp': {}, 'fn': {}, 'tp': {}}
fp_docs: Set = set()
fn_docs: Set = set()
# reset and back up filters
_filters = deepcopy(self.config.linking['filters'])
filters = self.config.linking['filters']
for pind, project in tqdm(enumerate(data['projects']), desc="Stats project", total=len(data['projects']), leave=False):
filters['cuis'] = set()
# Add extrafilter if set
if isinstance(extra_cui_filter, set):
filters['cuis'] = extra_cui_filter
if use_project_filters:
project_filter = get_project_filters(cuis=project.get('cuis', None),
type_ids=project.get('tuis', None),
cdb=self.cdb,
project=project)
# Intersect project filter with existing if it has something
if project_filter:
filters['cuis'] = intersect_nonempty_set(project_filter, filters['cuis'])
for dind, doc in tqdm(
enumerate(project["documents"]),
desc="Stats document",
total=len(project["documents"]),
leave=False,
):
anns = self._get_doc_annotations(doc)
# Apply document level filtering, in this case project_filter is ignored while the extra_cui_filter is respected still
if use_cui_doc_limit:
_cuis = set([ann['cui'] for ann in anns])
if _cuis:
filters['cuis'] = intersect_nonempty_set(_cuis, extra_cui_filter)
else:
filters['cuis'] = {'empty'}
spacy_doc: Doc = self(doc['text'])
if use_overlaps:
p_anns = spacy_doc._.ents
else:
p_anns = spacy_doc.ents
anns_norm = []
anns_norm_neg = []
anns_examples = []
anns_norm_cui = []
for ann in anns:
cui = ann['cui']
if check_filters(cui, filters):
if use_groups:
cui = self.cdb.addl_info['cui2group'].get(cui, cui)
if ann.get('validated', True) and (not ann.get('killed', False) and not ann.get('deleted', False)):
anns_norm.append((ann['start'], cui))
anns_examples.append({"text": doc['text'][max(0, ann['start']-60):ann['end']+60],
"cui": cui,
"source value": ann['value'],
"acc": 1,
"project index": pind,
"document inedex": dind})
elif ann.get('validated', True) and (ann.get('killed', False) or ann.get('deleted', False)):
anns_norm_neg.append((ann['start'], cui))
if ann.get("validated", True):
# This is used to test was someone annotating for this CUI in this document
anns_norm_cui.append(cui)
cui_counts[cui] = cui_counts.get(cui, 0) + 1
p_anns_norm = []
p_anns_examples = []
for ann in p_anns:
cui = ann._.cui
if use_groups:
cui = self.cdb.addl_info['cui2group'].get(cui, cui)
p_anns_norm.append((ann.start_char, cui))
p_anns_examples.append({"text": doc['text'][max(0, ann.start_char-60):ann.end_char+60],
"cui": cui,
"source value": ann.text,
"acc": float(ann._.context_similarity),
"project index": pind,
"document inedex": dind})
for iann, ann in enumerate(p_anns_norm):
cui = ann[1]
if ann in anns_norm:
tp += 1
tps[cui] = tps.get(cui, 0) + 1
example = p_anns_examples[iann]
examples['tp'][cui] = examples['tp'].get(cui, []) + [example]
else:
fp += 1
fps[cui] = fps.get(cui, 0) + 1
fp_docs.add(doc.get('name', 'unk'))
# Add example for this FP prediction
example = p_anns_examples[iann]
if ann in anns_norm_neg:
# Means that it really was annotated as negative
example['real_fp'] = True
examples['fp'][cui] = examples['fp'].get(cui, []) + [example]
for iann, ann in enumerate(anns_norm):
if ann not in p_anns_norm:
cui = ann[1]
fn += 1
fn_docs.add(doc.get('name', 'unk'))
fns[cui] = fns.get(cui, 0) + 1
examples['fn'][cui] = examples['fn'].get(cui, []) + [anns_examples[iann]]
try:
prec = tp / (tp + fp)
rec = tp / (tp + fn)
f1 = 2*(prec*rec) / (prec + rec)
print("Epoch: {}, Prec: {}, Rec: {}, F1: {}\n".format(epoch, prec, rec, f1))
print("Docs with false positives: {}\n".format("; ".join([str(x) for x in list(fp_docs)[0:10]])))
print("Docs with false negatives: {}\n".format("; ".join([str(x) for x in list(fn_docs)[0:10]])))
# Sort fns & prec
fps = {k: v for k, v in sorted(fps.items(), key=lambda item: item[1], reverse=True)}
fns = {k: v for k, v in sorted(fns.items(), key=lambda item: item[1], reverse=True)}
tps = {k: v for k, v in sorted(tps.items(), key=lambda item: item[1], reverse=True)}
# F1 per concept
for cui in tps.keys():
prec = tps[cui] / (tps.get(cui, 0) + fps.get(cui, 0))
rec = tps[cui] / (tps.get(cui, 0) + fns.get(cui, 0))
f1 = 2*(prec*rec) / (prec + rec)
cui_prec[cui] = prec
cui_rec[cui] = rec
cui_f1[cui] = f1
# Get top 10
pr_fps = [(self.cdb.cui2preferred_name.get(cui,
list(self.cdb.cui2names.get(cui, [cui]))[0]), cui, fps[cui]) for cui in list(fps.keys())[0:10]]
pr_fns = [(self.cdb.cui2preferred_name.get(cui,
list(self.cdb.cui2names.get(cui, [cui]))[0]), cui, fns[cui]) for cui in list(fns.keys())[0:10]]
pr_tps = [(self.cdb.cui2preferred_name.get(cui,
list(self.cdb.cui2names.get(cui, [cui]))[0]), cui, tps[cui]) for cui in list(tps.keys())[0:10]]
print("\n\nFalse Positives\n")
for one in pr_fps:
print("{:70} - {:20} - {:10}".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))
print("\n\nFalse Negatives\n")
for one in pr_fns:
print("{:70} - {:20} - {:10}".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))
print("\n\nTrue Positives\n")
for one in pr_tps:
print("{:70} - {:20} - {:10}".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))
print("*"*110 + "\n")
except Exception:
traceback.print_exc()
# restore filters to original state
self.config.linking['filters'] = _filters
return fps, fns, tps, cui_prec, cui_rec, cui_f1, cui_counts, examples
def _init_ckpts(self, is_resumed, checkpoint):
if self.config.general['checkpoint']['steps'] is not None or checkpoint is not None:
checkpoint_config = CheckpointConfig(**self.config.general.get('checkpoint', {}))
checkpoint_manager = CheckpointManager('cat_train', checkpoint_config)
if is_resumed:
# TODO: probably remove is_resumed mark and always resume if a checkpoint is provided,
#but I'll leave it for now
checkpoint = checkpoint or checkpoint_manager.get_latest_checkpoint()
self.log.info(f"Resume training on the most recent checkpoint at {checkpoint.dir_path}...")
self.cdb = checkpoint.restore_latest_cdb()
self.cdb.config.merge_config(self.config.__dict__)
self.config = self.cdb.config
self._create_pipeline(self.config)
else:
checkpoint = checkpoint or checkpoint_manager.create_checkpoint()
self.log.info(f"Start new training and checkpoints will be saved at {checkpoint.dir_path}...")
return checkpoint
def train(self,
data_iterator: Iterable,
nepochs: int = 1,
fine_tune: bool = True,
progress_print: int = 1000,
checkpoint: Optional[Checkpoint] = None,
is_resumed: bool = False) -> None:
""" Runs training on the data, note that the maximum length of a line
or document is 1M characters. Anything longer will be trimmed.
Args:
data_iterator (Iterable):
Simple iterator over sentences/documents, e.g. a open file
or an array or anything that we can use in a for loop.
nepochs (int):
Number of epochs for which to run the training.
fine_tune (bool):
If False old training will be removed.
progress_print (int):
Print progress after N lines.
checkpoint (Optional[medcat.utils.checkpoint.CheckpointUT]):
The MedCAT checkpoint object
is_resumed (bool):
If True resume the previous training; If False, start a fresh new training.
"""
if not fine_tune:
self.log.info("Removing old training data!")
self.cdb.reset_training()
checkpoint = self._init_ckpts(is_resumed, checkpoint)
latest_trained_step = checkpoint.count if checkpoint is not None else 0
epochal_data_iterator = chain.from_iterable(repeat(data_iterator, nepochs))
for line in islice(epochal_data_iterator, latest_trained_step, None):
if line is not None and line:
# Convert to string
line = str(line).strip()
try:
_ = self(line, do_train=True)
except Exception as e:
self.log.warning("LINE: '%s...' \t WAS SKIPPED", line[0:100])
self.log.warning("BECAUSE OF: %s", str(e))
else:
self.log.warning("EMPTY LINE WAS DETECTED AND SKIPPED")
latest_trained_step += 1
if latest_trained_step % progress_print == 0:
self.log.info("DONE: %s", str(latest_trained_step))
if checkpoint is not None and checkpoint.steps is not None and latest_trained_step % checkpoint.steps == 0:
checkpoint.save(cdb=self.cdb, count=latest_trained_step)
self.config.linking['train'] = False
def add_cui_to_group(self, cui: str, group_name: str) -> None:
r"""
Ads a CUI to a group, will appear in cdb.addl_info['cui2group']
Args:
cui (str):
The concept to be added
group_name (str):
The group to whcih the concept will be added
Examples:
>>> cat.add_cui_to_group("S-17", 'pain')
"""
# Add group_name
self.cdb.addl_info['cui2group'][cui] = group_name
def unlink_concept_name(self, cui: str, name: str, preprocessed_name: bool = False) -> None:
r"""
Unlink a concept name from the CUI (or all CUIs if full_unlink), removes the link from
the Concept Database (CDB). As a consequence medcat will never again link the `name`
to this CUI - meaning the name will not be detected as a concept in the future.
Args:
cui (str):
The CUI from which the `name` will be removed
name (str):
The span of text to be removed from the linking dictionary
Examples:
>>> # To never again link C0020538 to HTN
>>> cat.unlink_concept_name('C0020538', 'htn', False)
"""
cuis = [cui]
if preprocessed_name:
names = {name: 'nothing'}
else:
names = prepare_name(name, self.pipe.spacy_nlp, {}, self.config)
# If full unlink find all CUIs
if self.config.general.get('full_unlink', False):
for n in names:
cuis.extend(self.cdb.name2cuis.get(n, []))
# Remove name from all CUIs
for c in cuis:
self.cdb.remove_names(cui=c, names=names)
def add_and_train_concept(self,
cui: str,
name: str,
spacy_doc: Optional[Doc] = None,
spacy_entity: Optional[Union[List[Token], Span]] = None,
ontologies: Set = set(),
name_status: str = 'A',
type_ids: Set = set(),
description: str = '',
full_build: bool = True,
negative: bool = False,
devalue_others: bool = False,
do_add_concept: bool = True) -> None:
r""" Add a name to an existing concept, or add a new concept, or do not do anything if the name or concept already exists. Perform
training if spacy_entity and spacy_doc are set.
Args:
cui (str):
CUI of the concept
name (str):
Name to be linked to the concept (in the case of MedCATtrainer this is simply the
selected value in text, no preprocessing or anything needed).
spacy_doc (spacy.tokens.Doc):
Spacy represenation of the document that was manually annotated.
spacy_entity (Optional[Union[List[Token], Span]]):
Given the spacy document, this is the annotated span of text - list of annotated tokens that are marked with this CUI.
negative (bool):
Is this a negative or positive example.
devalue_others:
If set, cuis to which this name is assigned and are not `cui` will receive negative training given
that negative=False.
\*\*other:
Refer to medcat.cat.cdb.CDB.add_concept
"""
names = prepare_name(name, self.pipe.spacy_nlp, {}, self.config)
# Only if not negative, otherwise do not add the new name if in fact it should not be detected
if do_add_concept and not negative:
self.cdb.add_concept(cui=cui, names=names, ontologies=ontologies, name_status=name_status, type_ids=type_ids, description=description,
full_build=full_build)
if spacy_entity is not None and spacy_doc is not None:
# Train Linking
self.linker.context_model.train(cui=cui, entity=spacy_entity, doc=spacy_doc, negative=negative, names=names)
if not negative and devalue_others:
# Find all cuis
cuis = set()
for n in names:
cuis.update(self.cdb.name2cuis.get(n, []))
# Remove the cui for which we just added positive training
if cui in cuis:
cuis.remove(cui)
# Add negative training for all other CUIs that link to these names
for _cui in cuis:
self.linker.context_model.train(cui=_cui, entity=spacy_entity, doc=spacy_doc, negative=True)
def train_supervised(self,
data_path: str,
reset_cui_count: bool = False,
nepochs: int = 1,
print_stats: int = 0,
use_filters: bool = False,
terminate_last: bool = False,
use_overlaps: bool = False,
use_cui_doc_limit: bool = False,
test_size: int = 0,
devalue_others: bool = False,
use_groups: bool = False,
never_terminate: bool = False,
train_from_false_positives: bool = False,
extra_cui_filter: Optional[Set] = None,
checkpoint: Optional[Checkpoint] = None,
is_resumed: bool = False) -> Tuple:
r""" TODO: Refactor, left from old
Run supervised training on a dataset from MedCATtrainer. Please take care that this is more a simulated
online training then supervised.
Args:
data_path (str):
The path to the json file that we get from MedCATtrainer on export.
reset_cui_count (boolean):
Used for training with weight_decay (annealing). Each concept has a count that is there
from the beginning of the CDB, that count is used for annealing. Resetting the count will
significantly increase the training impact. This will reset the count only for concepts
that exist in the the training data.
nepochs (int):
Number of epochs for which to run the training.
print_stats (int):
If > 0 it will print stats every print_stats epochs.
use_filters (boolean):
Each project in medcattrainer can have filters, do we want to respect those filters
when calculating metrics.
terminate_last (boolean):
If true, concept termination will be done after all training.
use_overlaps (boolean):
Allow overlapping entities, nearly always False as it is very difficult to annotate overlapping entities.
use_cui_doc_limit (boolean):
If True the metrics for a CUI will be only calculated if that CUI appears in a document, in other words
if the document was annotated for that CUI. Useful in very specific situations when during the annotation
process the set of CUIs changed.
test_size (float):
If > 0 the data set will be split into train test based on this ration. Should be between 0 and 1.
Usually 0.1 is fine.
devalue_others(bool):
Check add_name for more details.
use_groups (boolean):
If True concepts that have groups will be combined and stats will be reported on groups.
never_terminate (boolean):
If True no termination will be applied
train_from_false_positives (boolean):
If True it will use false positive examples detected by medcat and train from them as negative examples.
extra_cui_filter(Optional[Set]):
This filter will be intersected with all other filters, or if all others are not set then only this one will be used.
checkpoint (Optional[Optional[medcat.utils.checkpoint.CheckpointST]):
The MedCAT CheckpointST object
is_resumed (bool):
If True resume the previous training; If False, start a fresh new training.
Returns:
fp (dict):
False positives for each CUI
fn (dict):
False negatives for each CUI
tp (dict):
True positives for each CUI
p (dict):
Precision for each CUI
r (dict):
Recall for each CUI
f1 (dict):
F1 for each CUI
cui_counts (dict):
Number of occurrence for each CUI
examples (dict):
FP/FN examples of sentences for each CUI
"""
checkpoint = self._init_ckpts(is_resumed, checkpoint)
# Backup filters
_filters = deepcopy(self.config.linking['filters'])
filters = self.config.linking['filters']
fp = fn = tp = p = r = f1 = examples = {}
with open(data_path) as f:
data = json.load(f)
cui_counts = {}
if test_size == 0:
self.log.info("Running without a test set, or train==test")
test_set = data
train_set = data
else:
train_set, test_set, _, _ = make_mc_train_test(data, self.cdb, test_size=test_size)
if print_stats > 0:
fp, fn, tp, p, r, f1, cui_counts, examples = self._print_stats(test_set,
use_project_filters=use_filters,
use_cui_doc_limit=use_cui_doc_limit,
use_overlaps=use_overlaps,
use_groups=use_groups,
extra_cui_filter=extra_cui_filter)
if reset_cui_count:
# Get all CUIs
cuis = []
for project in train_set['projects']:
for doc in project['documents']:
doc_annotations = self._get_doc_annotations(doc)
for ann in doc_annotations:
cuis.append(ann['cui'])
for cui in set(cuis):
if cui in self.cdb.cui2count_train:
self.cdb.cui2count_train[cui] = 100
# Remove entities that were terminated
if not never_terminate:
for project in train_set['projects']:
for doc in project['documents']:
doc_annotations = self._get_doc_annotations(doc)
for ann in doc_annotations:
if ann.get('killed', False):
self.unlink_concept_name(ann['cui'], ann['value'])
latest_trained_step = checkpoint.count if checkpoint is not None else 0
current_epoch, current_project, current_document = self._get_training_start(train_set, latest_trained_step)
for epoch in trange(current_epoch, nepochs, initial=current_epoch, total=nepochs, desc='Epoch', leave=False):
# Print acc before training
for idx_project in trange(current_project, len(train_set['projects']), initial=current_project, total=len(train_set['projects']), desc='Project', leave=False):
project = train_set['projects'][idx_project]
# Set filters in case we are using the train_from_fp
filters['cuis'] = set()
if isinstance(extra_cui_filter, set):
filters['cuis'] = extra_cui_filter
if use_filters:
project_filter = get_project_filters(cuis=project.get('cuis', None),
type_ids=project.get('tuis', None),
cdb=self.cdb,
project=project)
if project_filter:
filters['cuis'] = intersect_nonempty_set(project_filter, filters['cuis'])
for idx_doc in trange(current_document, len(project['documents']), initial=current_document, total=len(project['documents']), desc='Document', leave=False):
doc = project['documents'][idx_doc]
spacy_doc: Doc = self(doc['text'])
# Compatibility with old output where annotations are a list
doc_annotations = self._get_doc_annotations(doc)
for ann in doc_annotations:
if not ann.get('killed', False):
cui = ann['cui']
start = ann['start']
end = ann['end']
spacy_entity = tkns_from_doc(spacy_doc=spacy_doc, start=start, end=end)
deleted = ann.get('deleted', False)
self.add_and_train_concept(cui=cui,
name=ann['value'],
spacy_doc=spacy_doc,
spacy_entity=spacy_entity,
negative=deleted,
devalue_others=devalue_others)
if train_from_false_positives:
fps: List[Span] = get_false_positives(doc, spacy_doc)
for fp in fps:
fp_: Span = fp
self.add_and_train_concept(cui=fp_._.cui,
name=fp_.text,
spacy_doc=spacy_doc,
spacy_entity=fp_,
negative=True,
do_add_concept=False)
latest_trained_step += 1
if checkpoint is not None and checkpoint.steps is not None and latest_trained_step % checkpoint.steps == 0:
checkpoint.save(self.cdb, latest_trained_step)
if terminate_last and not never_terminate:
# Remove entities that were terminated, but after all training is done
for project in train_set['projects']:
for doc in project['documents']:
doc_annotations = self._get_doc_annotations(doc)
for ann in doc_annotations:
if ann.get('killed', False):
self.unlink_concept_name(ann['cui'], ann['value'])
if print_stats > 0 and (epoch + 1) % print_stats == 0:
fp, fn, tp, p, r, f1, cui_counts, examples = self._print_stats(test_set,
epoch=epoch + 1,
use_project_filters=use_filters,
use_cui_doc_limit=use_cui_doc_limit,
use_overlaps=use_overlaps,
use_groups=use_groups,
extra_cui_filter=extra_cui_filter)
# Set the filters again
self.config.linking['filters'] = _filters
return fp, fn, tp, p, r, f1, cui_counts, examples
def get_entities(self,
text: str,
only_cui: bool = False,
addl_info: List[str] = ['cui2icd10', 'cui2ontologies', 'cui2snomed']) -> Dict:
doc = self(text)
out = self._doc_to_out(doc, only_cui, addl_info)
return out
def get_entities_multi_texts(self,
texts: Union[Iterable[str], Iterable[Tuple]],
only_cui: bool = False,
addl_info: List[str] = ['cui2icd10', 'cui2ontologies', 'cui2snomed'],
n_process: Optional[int] = None,
batch_size: Optional[int] = None) -> List[Dict]:
r""" Get entities
text: text to be annotated
return: entities
"""
out: List[Dict] = []
if n_process is None:
texts_ = self._generate_trimmed_texts(texts)
for text in texts_:
out.append(self._doc_to_out(self(text), only_cui, addl_info))
else:
self.pipe.set_error_handler(self._pipe_error_handler)
try:
texts_ = self._get_trimmed_texts(texts)
docs = self.pipe.batch_multi_process(texts_, n_process, batch_size)
for doc in tqdm(docs, total=len(texts_)):
doc = None if doc.text.strip() == '' else doc
out.append(self._doc_to_out(doc, only_cui, addl_info, out_with_text=True))
# Currently spaCy cannot mark which pieces of texts failed within the pipe so be this workaround,
# which also assumes texts are different from each others.
if len(out) < len(texts_):
self.log.warning("Found at least one failed batch and set output for enclosed texts to empty")
for i, text in enumerate(texts_):
if i == len(out):
out.append(self._doc_to_out(None, only_cui, addl_info))
elif out[i].get('text', '') != text:
out.insert(i, self._doc_to_out(None, only_cui, addl_info))
cnf_annotation_output = getattr(self.config, 'annotation_output', {})
if not(cnf_annotation_output.get('include_text_in_output', False)):
for o in out:
if o is not None:
o.pop('text', None)
finally:
self.pipe.reset_error_handler()
return out
def get_json(self, text: str, only_cui: bool = False, addl_info=['cui2icd10', 'cui2ontologies']) -> str:
""" Get output in json format
text: text to be annotated
return: json with fields {'entities': <>, 'text': text}
"""
ents = self.get_entities(text, only_cui, addl_info=addl_info)['entities']
out = {'annotations': ents, 'text': text}
return json.dumps(out)
@staticmethod
def _get_training_start(train_set, latest_trained_step):
total_steps_per_epoch = sum([1 for project in train_set['projects'] for _ in project['documents']])
if total_steps_per_epoch == 0:
raise ValueError("MedCATtrainer export contains no documents")
current_epoch, last_step_in_epoch = divmod(latest_trained_step, total_steps_per_epoch)
document_count = 0
current_project = 0
current_document = 0
for idx_project, project in enumerate(train_set['projects']):
for idx_doc, _ in enumerate(project['documents']):
document_count += 1
if document_count == last_step_in_epoch:
current_project = idx_project
current_document = idx_doc
break
if current_project > 0:
break
current_document = 0
return current_epoch, current_project, current_document
def _separate_nn_components(self):
# Loop though the models and check are there GPU devices
nn_components = []
for component in self.pipe.spacy_nlp.components:
if isinstance(component[1], MetaCAT) or isinstance(component[1], TransformersNER):
self.pipe.spacy_nlp.disable_pipe(component[0])
nn_components.append(component)
return nn_components
def _run_nn_components(self, docs: Dict, nn_components: List, id2text: Dict) -> None:
r""" This will add meta_anns in-place to the docs dict.
"""
self.log.debug("Running GPU components separately")
# First convert the docs into the fake spacy doc format
spacy_docs = json_to_fake_spacy(docs, id2text=id2text)
# Disable component locks also
for name, component in nn_components:
component.config.general['disable_component_lock'] = True
# For meta_cat compoments
for name, component in [c for c in nn_components if isinstance(c[1], MetaCAT)]:
spacy_docs = component.pipe(spacy_docs)
for spacy_doc in spacy_docs:
for ent in spacy_doc.ents:
docs[spacy_doc.id]['entities'][ent._.id]['meta_anns'].update(ent._.meta_anns)
def _batch_generator(self, data: Iterable, batch_size_chars: int, skip_ids: Set = set()):
docs = []
char_count = 0
for doc in data:
if doc[0] not in skip_ids:
char_count += len(str(doc[1]))
docs.append(doc)
if char_count < batch_size_chars:
continue
yield docs
docs = []
char_count = 0
if len(docs) > 0:
yield docs
def _save_docs_to_file(self, docs: Iterable, annotated_ids: List[str], save_dir_path: str, annotated_ids_path: Optional[str], part_counter: int = 0) -> int:
path = os.path.join(save_dir_path, 'part_{}.pickle'.format(part_counter))
pickle.dump(docs, open(path, "wb"))
self.log.info("Saved part: %s, to: %s", part_counter, path)
part_counter = part_counter + 1 # Increase for save, as it should be what is the next part
if annotated_ids_path is not None:
pickle.dump((annotated_ids, part_counter), open(annotated_ids_path, 'wb'))
return part_counter
def multiprocessing(self,
data: Union[List[Tuple], Iterable[Tuple]],
nproc: int = 2,
batch_size_chars: int = 5000 * 1000,
only_cui: bool = False,
addl_info: List[str] = [],
separate_nn_components: bool = True,
out_split_size_chars: Optional[int] = None,
save_dir_path: str = os.path.abspath(os.getcwd()),
min_free_memory=0.1) -> Dict:
r""" Run multiprocessing for inference, if out_save_path and out_split_size_chars is used this will also continue annotating
documents if something is saved in that directory.
Args:
data:
Iterator or array with format: [(id, text), (id, text), ...]
nproc (`int`, defaults to 8):
Number of processors
batch_size_chars (`int`, defaults to 1000000):
Size of a batch in number of characters, this should be around: NPROC * average_document_length * 200
separate_nn_components (`bool`, defaults to True):
If set the medcat pipe will be broken up into NN and not-NN components and
they will be run sequentially. This is useful as the NN components
have batching and like to process many docs at once, while the rest of the pipeline
runs the documents one by one.
out_split_size_chars (`int`, None):
If set once more than out_split_size_chars are annotated
they will be saved to a file (save_dir_path) and the memory cleared. Recommended
value is 20*batch_size_chars.
save_dir_path(`str`, defaults to the current working directory):
Where to save the annotated documents if splitting.
min_free_memory(`float`, defaults to 0.1):
If set a process will not start unless there is at least this much RAM memory left,
should be a range between [0, 1] meaning how much of the memory has to be free. Helps when annotating
very large datasets because spacy is not the best with memory management and multiprocessing.
Returns:
A dictionary: {id: doc_json, id2: doc_json2, ...}, in case out_split_size_chars is used
the last batch will be returned while that and all previous batches will be
written to disk (out_save_dir).
"""
for comp in self.pipe.spacy_nlp.components:
if isinstance(comp[1], TransformersNER):
raise Exception("Please do not use multiprocessing when running a transformer model for NER, run sequentially.")
# Set max document length
self.pipe.spacy_nlp.max_length = self.config.preprocessing.get('max_document_length', 1000000)
if self._meta_cats and not separate_nn_components:
# Hack for torch using multithreading, which is not good if not
#separate_nn_components, need for CPU runs only
import torch
torch.set_num_threads(1)
nn_components = []
if separate_nn_components:
nn_components = self._separate_nn_components()
if save_dir_path is not None:
os.makedirs(save_dir_path, exist_ok=True)
# "5" looks like a magic number here so better with comment about why the choice was made.
internal_batch_size_chars = batch_size_chars // (5 * nproc)
annotated_ids_path = os.path.join(save_dir_path, 'annotated_ids.pickle') if save_dir_path is not None else None
if annotated_ids_path is not None and os.path.exists(annotated_ids_path):
annotated_ids, part_counter = pickle.load(open(annotated_ids_path, 'rb'))
else:
annotated_ids = []
part_counter = 0
docs = {}
_start_time = time.time()
_batch_counter = 0 # Used for splitting the output, counts batches inbetween saves
for batch in self._batch_generator(data, batch_size_chars, skip_ids=set(annotated_ids)):
self.log.info("Annotated until now: %s docs; Current BS: %s docs; Elapsed time: %.2f minutes",
len(annotated_ids),
len(batch),
(time.time() - _start_time)/60)
try:
_docs = self._multiprocessing_batch(data=batch,
nproc=nproc,
only_cui=only_cui,
batch_size_chars=internal_batch_size_chars,
addl_info=addl_info,
nn_components=nn_components,
min_free_memory=min_free_memory)
docs.update(_docs)
annotated_ids.extend(_docs.keys())
_batch_counter += 1
del _docs
if out_split_size_chars is not None and (_batch_counter * batch_size_chars) > out_split_size_chars:
# Save to file and reset the docs
part_counter = self._save_docs_to_file(docs=docs,
annotated_ids=annotated_ids,
save_dir_path=save_dir_path,
annotated_ids_path=annotated_ids_path,
part_counter=part_counter)
del docs
docs = {}
_batch_counter = 0
except Exception as e:
self.log.warning("Failed an outer batch in the multiprocessing script")
self.log.warning(e, exc_info=True, stack_info=True)
# Save the last batch
if out_split_size_chars is not None and len(docs) > 0:
# Save to file and reset the docs
self._save_docs_to_file(docs=docs,
annotated_ids=annotated_ids,
save_dir_path=save_dir_path,
annotated_ids_path=annotated_ids_path,
part_counter=part_counter)
# Enable the GPU Components again
if separate_nn_components:
for name, _ in nn_components:
# No need to do anything else as it was already in the pipe
self.pipe.spacy_nlp.enable_pipe(name)
return docs
def _multiprocessing_batch(self,
data: Union[List[Tuple], Iterable[Tuple]],
nproc: int = 8,
batch_size_chars: int = 1000000,
only_cui: bool = False,
addl_info: List[str] = [],
nn_components: List = [],
min_free_memory: int = 0) -> Dict:
r""" Run multiprocessing on one batch
Args:
data:
Iterator or array with format: [(id, text), (id, text), ...]
nproc (`int`, defaults to 8):
Number of processors
batch_size_chars (`int`, defaults to 1000000):
Size of a batch in number of characters
Returns:
A dictionary: {id: doc_json, id2: doc_json2, ...}
"""
# Create the input output for MP
with Manager() as manager:
out_list = manager.list()
lock = manager.Lock()
in_q = manager.Queue(maxsize=10*nproc)
id2text = {}
for batch in self._batch_generator(data, batch_size_chars):
if nn_components:
# We need this for the json_to_fake_spacy
id2text.update({k:v for k,v in batch})
in_q.put(batch)
# Final data point for workers
for _ in range(nproc):
in_q.put(None)
sleep(2)
# Create processes
procs = []
for i in range(nproc):
p = Process(target=self._mp_cons,
kwargs={'in_q': in_q,
'out_list': out_list,
'pid': i,
'only_cui': only_cui,
'addl_info': addl_info,
'min_free_memory': min_free_memory,
'lock': lock})
p.start()
procs.append(p)
# Join processes
for p in procs:
p.join()
docs = {}
# Covnerts a touple into a dict
docs.update({k:v for k,v in out_list})
# If we have separate GPU components now we pipe that
if nn_components:
try:
self._run_nn_components(docs, nn_components, id2text=id2text)
except Exception as e:
self.log.warning(e, exc_info=True, stack_info=True)
return docs
def multiprocessing_pipe(self,
in_data: Union[List[Tuple], Iterable[Tuple]],
nproc: Optional[int] = None,
batch_size: Optional[int] = None,
only_cui: bool = False,
addl_info: List[str] = [],
return_dict: bool = True,
batch_factor: int = 2) -> Union[List[Tuple], Dict]:
r""" Run multiprocessing NOT FOR TRAINING
in_data: a list with format: [(id, text), (id, text), ...]
nproc: the number of processors
batch_size: the number of texts to buffer
return_dict: a flag for returning either a dict or a list of tuples
return: a dict: {id: doc_json, id: doc_json, ...} or if return_dict is False, a list of tuples: [(id, doc_json), (id, doc_json), ...]
"""
out: Union[Dict, List[Tuple]]
if nproc == 0:
raise ValueError("nproc cannot be set to zero")
in_data = list(in_data) if isinstance(in_data, Iterable) else in_data
n_process = nproc if nproc is not None else min(max(cpu_count() - 1, 1), math.ceil(len(in_data) / batch_factor))
batch_size = batch_size if batch_size is not None else math.ceil(len(in_data) / (batch_factor * abs(n_process)))
start_method = None
try:
if self._meta_cats:
import torch
if torch.multiprocessing.get_start_method() != "spawn":
start_method = torch.multiprocessing.get_start_method()
torch.multiprocessing.set_start_method("spawn", force=True)
entities = self.get_entities_multi_texts(texts=in_data, only_cui=only_cui, addl_info=addl_info,
n_process=n_process, batch_size=batch_size)
finally:
if start_method is not None:
import torch
torch.multiprocessing.set_start_method(start_method, force=True)
if return_dict:
out = {}
for idx, data in enumerate(in_data):
out[data[0]] = entities[idx]
else:
out = []
for idx, data in enumerate(in_data):
out.append((data[0], entities[idx]))
return out
def _mp_cons(self, in_q: Queue, out_list: List, min_free_memory: int, lock: Lock, pid: int = 0, only_cui: bool = False, addl_info: List = []) -> None:
out: List = []
while True:
if not in_q.empty():
if psutil.virtual_memory().available / psutil.virtual_memory().total < min_free_memory:
with lock:
out_list.extend(out)
# Stop a process if there is not enough memory left
break
data = in_q.get()
if data is None:
with lock:
out_list.extend(out)
break
for i_text, text in data:
try:
# Annotate document
doc = self.get_entities(text=text, only_cui=only_cui, addl_info=addl_info)
out.append((i_text, doc))
except Exception as e:
self.log.warning("PID: %s failed one document in _mp_cons, running will continue normally. \n" +
"Document length in chars: %s, and ID: %s", pid, len(str(text)), i_text)
self.log.warning(str(e))
sleep(2)
def _doc_to_out(self,
doc: Doc,
only_cui: bool,
addl_info: List[str],
out_with_text: bool = False) -> Dict:
out: Dict = {'entities': {}, 'tokens': []}
cnf_annotation_output = getattr(self.config, 'annotation_output', {})
if doc is not None:
out_ent: Dict = {}
if self.config.general.get('show_nested_entities', False):
_ents = []
for _ent in doc._.ents:
entity = Span(doc, _ent['start'], _ent['end'], label=_ent['label'])
entity._.cui = _ent['cui']
entity._.detected_name = _ent['detected_name']
entity._.context_similarity = _ent['context_similarity']
entity._.id = _ent['id']
if 'meta_anns' in _ent:
entity._.meta_anns = _ent['meta_anns']
_ents.append(entity)
else:
_ents = doc.ents
if cnf_annotation_output.get("lowercase_context", True):
doc_tokens = [tkn.text_with_ws.lower() for tkn in list(doc)]
else:
doc_tokens = [tkn.text_with_ws for tkn in list(doc)]
if cnf_annotation_output.get('doc_extended_info', False):
# Add tokens if extended info
out['tokens'] = doc_tokens
context_left = cnf_annotation_output.get('context_left', -1)
context_right = cnf_annotation_output.get('context_right', -1)
doc_extended_info = cnf_annotation_output.get('doc_extended_info', False)
for _, ent in enumerate(_ents):
cui = str(ent._.cui)
if not only_cui:
out_ent['pretty_name'] = self.cdb.get_name(cui)
out_ent['cui'] = cui
out_ent['type_ids'] = list(self.cdb.cui2type_ids.get(cui, ''))
out_ent['types'] = [self.cdb.addl_info['type_id2name'].get(tui, '') for tui in out_ent['type_ids']]
out_ent['source_value'] = ent.text
out_ent['detected_name'] = str(ent._.detected_name)
out_ent['acc'] = float(ent._.context_similarity)
out_ent['context_similarity'] = float(ent._.context_similarity)
out_ent['start'] = ent.start_char
out_ent['end'] = ent.end_char
for addl in addl_info:
tmp = self.cdb.addl_info.get(addl, {}).get(cui, [])
out_ent[addl.split("2")[-1]] = list(tmp) if type(tmp) == set else tmp
out_ent['id'] = ent._.id
out_ent['meta_anns'] = {}
if doc_extended_info:
out_ent['start_tkn'] = ent.start
out_ent['end_tkn'] = ent.end
if context_left > 0 and context_right > 0:
out_ent['context_left'] = doc_tokens[max(ent.start - context_left, 0):ent.start]
out_ent['context_right'] = doc_tokens[ent.end:min(ent.end + context_right, len(doc_tokens))]
out_ent['context_center'] = doc_tokens[ent.start:ent.end]
if hasattr(ent._, 'meta_anns') and ent._.meta_anns:
out_ent['meta_anns'] = ent._.meta_anns
out['entities'][out_ent['id']] = dict(out_ent)
else:
out['entities'][ent._.id] = cui
if cnf_annotation_output.get('include_text_in_output', False) or out_with_text:
out['text'] = doc.text
return out
def _get_trimmed_text(self, text: Optional[str]) -> str:
return text[0:self.config.preprocessing.get('max_document_length')] if text is not None and len(text) > 0 else ""
def _generate_trimmed_texts(self, texts: Union[Iterable[str], Iterable[Tuple]]) -> Iterable[str]:
text_: str
for text in texts:
text_ = text[1] if isinstance(text, tuple) else text
yield self._get_trimmed_text(text_)
def _get_trimmed_texts(self, texts: Union[Iterable[str], Iterable[Tuple]]) -> List[str]:
trimmed: List = []
text_: str
for text in texts:
text_ = text[1] if isinstance(text, tuple) else text
trimmed.append(self._get_trimmed_text(text_))
return trimmed
@staticmethod
def _pipe_error_handler(proc_name: str, proc: "Pipe", docs: List[Doc], e: Exception) -> None:
CAT.log.warning("Exception raised when applying component %s to a batch of docs.", proc_name)
CAT.log.warning(e, exc_info=True, stack_info=True)
if docs is not None:
CAT.log.warning("Docs contained in the batch:")
for doc in docs:
if hasattr(doc, "text"):
CAT.log.warning("%s...", doc.text[:50])
@staticmethod
def _get_doc_annotations(doc: Doc):
if type(doc['annotations']) == list:
return doc['annotations']
if type(doc['annotations']) == dict:
return doc['annotations'].values()
return None
def destroy_pipe(self):
self.pipe.destroy()
| 46.778519 | 172 | 0.54878 | import os
import shutil
import pickle
import traceback
import json
import logging
import math
import time
import psutil
from time import sleep
from copy import deepcopy
from multiprocess import Process, Manager, cpu_count
from multiprocess.queues import Queue
from multiprocess.synchronize import Lock
from typing import Union, List, Tuple, Optional, Dict, Iterable, Set
from itertools import islice, chain, repeat
from datetime import date
from tqdm.autonotebook import tqdm, trange
from spacy.tokens import Span, Doc, Token
from spacy.language import Language
from medcat import __version__
from medcat.preprocessing.tokenizers import spacy_split_all
from medcat.pipe import Pipe
from medcat.preprocessing.taggers import tag_skip_and_punct
from medcat.cdb import CDB
from medcat.utils.matutils import intersect_nonempty_set
from medcat.utils.data_utils import make_mc_train_test, get_false_positives
from medcat.utils.normalizers import BasicSpellChecker
from medcat.utils.checkpoint import Checkpoint, CheckpointConfig, CheckpointManager
from medcat.utils.helpers import tkns_from_doc, get_important_config_parameters
from medcat.utils.hasher import Hasher
from medcat.ner.vocab_based_ner import NER
from medcat.linking.context_based_linker import Linker
from medcat.utils.filters import get_project_filters, check_filters
from medcat.preprocessing.cleaners import prepare_name
from medcat.meta_cat import MetaCAT
from medcat.utils.meta_cat.data_utils import json_to_fake_spacy
from medcat.config import Config
from medcat.vocab import Vocab
from medcat.utils.decorators import deprecated
from medcat.ner.transformers_ner import TransformersNER
class CAT(object):
log = logging.getLogger(__package__)
DEFAULT_MODEL_PACK_NAME = "medcat_model_pack"
def __init__(self,
cdb: CDB,
vocab: Union[Vocab, None] = None,
config: Optional[Config] = None,
meta_cats: List[MetaCAT] = [],
addl_ner: Union[TransformersNER, List[TransformersNER]] = []) -> None:
self.cdb = cdb
self.vocab = vocab
if config is None:
self.config = cdb.config
else:
self.config = config
self.cdb.config = config
self._meta_cats = meta_cats
self._addl_ner = addl_ner if isinstance(addl_ner, list) else [addl_ner]
self._create_pipeline(self.config)
def _create_pipeline(self, config):
self.log.setLevel(config.general['log_level'])
self.pipe = Pipe(tokenizer=spacy_split_all, config=config)
self.pipe.add_tagger(tagger=tag_skip_and_punct,
name='skip_and_punct',
additional_fields=['is_punct'])
if self.vocab is not None:
spell_checker = BasicSpellChecker(cdb_vocab=self.cdb.vocab, config=config, data_vocab=self.vocab)
self.pipe.add_token_normalizer(spell_checker=spell_checker, config=config)
self.ner = NER(self.cdb, config)
self.pipe.add_ner(self.ner)
self.linker = Linker(self.cdb, self.vocab, config)
self.pipe.add_linker(self.linker)
for ner in self._addl_ner:
self.pipe.add_addl_ner(ner, ner.config.general['name'])
for meta_cat in self._meta_cats:
self.pipe.add_meta_cat(meta_cat, meta_cat.config.general['category_name'])
self.pipe.spacy_nlp.max_length = config.preprocessing.get('max_document_length', 1000000)
@deprecated(message="Replaced with cat.pipe.spacy_nlp.")
def get_spacy_nlp(self) -> Language:
return self.pipe.spacy_nlp
def get_hash(self):
hasher = Hasher()
hasher.update(self.cdb.get_hash())
hasher.update(self.config.get_hash())
for mc in self._meta_cats:
hasher.update(mc.get_hash())
for trf in self._addl_ner:
hasher.update(trf.get_hash())
return hasher.hexdigest()
def get_model_card(self, as_dict=False):
card = {
'Model ID': self.config.version['id'],
'Last Modified On': self.config.version['last_modified'],
'History (from least to most recent)': self.config.version['history'],
'Description': self.config.version['description'],
'Source Ontology': self.config.version['ontology'],
'Location': self.config.version['location'],
'MetaCAT models': self.config.version['meta_cats'],
'Basic CDB Stats': self.config.version['cdb_info'],
'Performance': self.config.version['performance'],
'Important Parameters (Partial view, all available in cat.config)': get_important_config_parameters(self.config),
'MedCAT Version': self.config.version['medcat_version']
}
if as_dict:
return card
else:
return json.dumps(card, indent=2, sort_keys=False)
def _versioning(self):
if self.config.version['description'] == 'No description':
self.log.warning("Please consider populating the version information [description, performance, location, ontology] in cat.config.version")
m = self.get_hash()
version = self.config.version
if version['id'] is None or m != version['id']:
if version['id'] is not None:
version['history'].append(version['id'])
version['id'] = m
version['last_modified'] = date.today().strftime("%d %B %Y")
version['cdb_info'] = self.cdb._make_stats()
version['meta_cats'] = [meta_cat.get_model_card(as_dict=True) for meta_cat in self._meta_cats]
version['medcat_version'] = __version__
self.log.warning("Please consider updating [description, performance, location, ontology] in cat.config.version")
def create_model_pack(self, save_dir_path: str, model_pack_name: str = DEFAULT_MODEL_PACK_NAME) -> str:
self.config.general['spacy_model'] = os.path.basename(self.config.general['spacy_model'])
self._versioning()
model_pack_name += "_{}".format(self.config.version['id'])
self.log.warning("This will save all models into a zip file, can take some time and require quite a bit of disk space.")
_save_dir_path = save_dir_path
save_dir_path = os.path.join(save_dir_path, model_pack_name)
os.makedirs(os.path.expanduser(save_dir_path), exist_ok=True)
spacy_path = os.path.join(save_dir_path, self.config.general['spacy_model'])
if str(self.pipe.spacy_nlp._path) != spacy_path:
shutil.rmtree(spacy_path, ignore_errors=True)
shutil.copytree(str(self.pipe.spacy_nlp._path), spacy_path)
cdb_path = os.path.join(save_dir_path, "cdb.dat")
self.cdb.save(cdb_path)
vocab_path = os.path.join(save_dir_path, "vocab.dat")
if self.vocab is not None:
self.vocab.save(vocab_path)
for comp in self.pipe.spacy_nlp.components:
if isinstance(comp[1], TransformersNER):
trf_path = os.path.join(save_dir_path, "trf_" + comp[1].config.general['name'])
comp[1].save(trf_path)
for comp in self.pipe.spacy_nlp.components:
if isinstance(comp[1], MetaCAT):
name = comp[0]
meta_path = os.path.join(save_dir_path, "meta_" + name)
comp[1].save(meta_path)
model_card_path = os.path.join(save_dir_path, "model_card.json")
json.dump(self.get_model_card(as_dict=True), open(model_card_path, 'w'), indent=2)
shutil.make_archive(os.path.join(_save_dir_path, model_pack_name), 'zip', root_dir=save_dir_path)
self.log.info(self.get_model_card())
return model_pack_name
@classmethod
def load_model_pack(cls, zip_path: str, meta_cat_config_dict: Optional[Dict] = None) -> "CAT":
from medcat.cdb import CDB
from medcat.vocab import Vocab
from medcat.meta_cat import MetaCAT
base_dir = os.path.dirname(zip_path)
filename = os.path.basename(zip_path)
foldername = filename.replace(".zip", '')
model_pack_path = os.path.join(base_dir, foldername)
if os.path.exists(model_pack_path):
cls.log.info("Found an existing unziped model pack at: {}, the provided zip will not be touched.".format(model_pack_path))
else:
cls.log.info("Unziping the model pack and loading models.")
shutil.unpack_archive(zip_path, extract_dir=model_pack_path)
cdb_path = os.path.join(model_pack_path, "cdb.dat")
cdb = CDB.load(cdb_path)
cdb.config.general['spacy_model'] = os.path.join(model_pack_path, os.path.basename(cdb.config.general['spacy_model']))
vocab_path = os.path.join(model_pack_path, "vocab.dat")
if os.path.exists(vocab_path):
vocab = Vocab.load(vocab_path)
else:
vocab = None
trf_paths = [os.path.join(model_pack_path, path) for path in os.listdir(model_pack_path) if path.startswith('trf_')]
addl_ner = []
for trf_path in trf_paths:
trf = TransformersNER.load(save_dir_path=trf_path)
trf.cdb = cdb
addl_ner.append(trf)
meta_paths = [os.path.join(model_pack_path, path) for path in os.listdir(model_pack_path) if path.startswith('meta_')]
meta_cats = []
for meta_path in meta_paths:
meta_cats.append(MetaCAT.load(save_dir_path=meta_path,
config_dict=meta_cat_config_dict))
cat = cls(cdb=cdb, config=cdb.config, vocab=vocab, meta_cats=meta_cats, addl_ner=addl_ner)
cls.log.info(cat.get_model_card())
return cat
def __call__(self, text: Optional[str], do_train: bool = False) -> Optional[Doc]:
self.config.linking['train'] = do_train
if text is None:
self.log.error("The input text should be either a string or a sequence of strings but got %s", type(text))
return None
else:
text = self._get_trimmed_text(str(text))
return self.pipe(text)
def __repr__(self):
return self.get_model_card(as_dict=False)
def _print_stats(self,
data: Dict,
epoch: int = 0,
use_project_filters: bool = False,
use_overlaps: bool = False,
use_cui_doc_limit: bool = False,
use_groups: bool = False,
extra_cui_filter: Optional[Set] = None) -> Tuple:
tp = 0
fp = 0
fn = 0
fps: Dict = {}
fns: Dict = {}
tps: Dict = {}
cui_prec: Dict = {}
cui_rec: Dict = {}
cui_f1: Dict = {}
cui_counts: Dict = {}
examples: Dict = {'fp': {}, 'fn': {}, 'tp': {}}
fp_docs: Set = set()
fn_docs: Set = set()
_filters = deepcopy(self.config.linking['filters'])
filters = self.config.linking['filters']
for pind, project in tqdm(enumerate(data['projects']), desc="Stats project", total=len(data['projects']), leave=False):
filters['cuis'] = set()
if isinstance(extra_cui_filter, set):
filters['cuis'] = extra_cui_filter
if use_project_filters:
project_filter = get_project_filters(cuis=project.get('cuis', None),
type_ids=project.get('tuis', None),
cdb=self.cdb,
project=project)
if project_filter:
filters['cuis'] = intersect_nonempty_set(project_filter, filters['cuis'])
for dind, doc in tqdm(
enumerate(project["documents"]),
desc="Stats document",
total=len(project["documents"]),
leave=False,
):
anns = self._get_doc_annotations(doc)
if use_cui_doc_limit:
_cuis = set([ann['cui'] for ann in anns])
if _cuis:
filters['cuis'] = intersect_nonempty_set(_cuis, extra_cui_filter)
else:
filters['cuis'] = {'empty'}
spacy_doc: Doc = self(doc['text'])
if use_overlaps:
p_anns = spacy_doc._.ents
else:
p_anns = spacy_doc.ents
anns_norm = []
anns_norm_neg = []
anns_examples = []
anns_norm_cui = []
for ann in anns:
cui = ann['cui']
if check_filters(cui, filters):
if use_groups:
cui = self.cdb.addl_info['cui2group'].get(cui, cui)
if ann.get('validated', True) and (not ann.get('killed', False) and not ann.get('deleted', False)):
anns_norm.append((ann['start'], cui))
anns_examples.append({"text": doc['text'][max(0, ann['start']-60):ann['end']+60],
"cui": cui,
"source value": ann['value'],
"acc": 1,
"project index": pind,
"document inedex": dind})
elif ann.get('validated', True) and (ann.get('killed', False) or ann.get('deleted', False)):
anns_norm_neg.append((ann['start'], cui))
if ann.get("validated", True):
anns_norm_cui.append(cui)
cui_counts[cui] = cui_counts.get(cui, 0) + 1
p_anns_norm = []
p_anns_examples = []
for ann in p_anns:
cui = ann._.cui
if use_groups:
cui = self.cdb.addl_info['cui2group'].get(cui, cui)
p_anns_norm.append((ann.start_char, cui))
p_anns_examples.append({"text": doc['text'][max(0, ann.start_char-60):ann.end_char+60],
"cui": cui,
"source value": ann.text,
"acc": float(ann._.context_similarity),
"project index": pind,
"document inedex": dind})
for iann, ann in enumerate(p_anns_norm):
cui = ann[1]
if ann in anns_norm:
tp += 1
tps[cui] = tps.get(cui, 0) + 1
example = p_anns_examples[iann]
examples['tp'][cui] = examples['tp'].get(cui, []) + [example]
else:
fp += 1
fps[cui] = fps.get(cui, 0) + 1
fp_docs.add(doc.get('name', 'unk'))
example = p_anns_examples[iann]
if ann in anns_norm_neg:
example['real_fp'] = True
examples['fp'][cui] = examples['fp'].get(cui, []) + [example]
for iann, ann in enumerate(anns_norm):
if ann not in p_anns_norm:
cui = ann[1]
fn += 1
fn_docs.add(doc.get('name', 'unk'))
fns[cui] = fns.get(cui, 0) + 1
examples['fn'][cui] = examples['fn'].get(cui, []) + [anns_examples[iann]]
try:
prec = tp / (tp + fp)
rec = tp / (tp + fn)
f1 = 2*(prec*rec) / (prec + rec)
print("Epoch: {}, Prec: {}, Rec: {}, F1: {}\n".format(epoch, prec, rec, f1))
print("Docs with false positives: {}\n".format("; ".join([str(x) for x in list(fp_docs)[0:10]])))
print("Docs with false negatives: {}\n".format("; ".join([str(x) for x in list(fn_docs)[0:10]])))
fps = {k: v for k, v in sorted(fps.items(), key=lambda item: item[1], reverse=True)}
fns = {k: v for k, v in sorted(fns.items(), key=lambda item: item[1], reverse=True)}
tps = {k: v for k, v in sorted(tps.items(), key=lambda item: item[1], reverse=True)}
for cui in tps.keys():
prec = tps[cui] / (tps.get(cui, 0) + fps.get(cui, 0))
rec = tps[cui] / (tps.get(cui, 0) + fns.get(cui, 0))
f1 = 2*(prec*rec) / (prec + rec)
cui_prec[cui] = prec
cui_rec[cui] = rec
cui_f1[cui] = f1
pr_fps = [(self.cdb.cui2preferred_name.get(cui,
list(self.cdb.cui2names.get(cui, [cui]))[0]), cui, fps[cui]) for cui in list(fps.keys())[0:10]]
pr_fns = [(self.cdb.cui2preferred_name.get(cui,
list(self.cdb.cui2names.get(cui, [cui]))[0]), cui, fns[cui]) for cui in list(fns.keys())[0:10]]
pr_tps = [(self.cdb.cui2preferred_name.get(cui,
list(self.cdb.cui2names.get(cui, [cui]))[0]), cui, tps[cui]) for cui in list(tps.keys())[0:10]]
print("\n\nFalse Positives\n")
for one in pr_fps:
print("{:70} - {:20} - {:10}".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))
print("\n\nFalse Negatives\n")
for one in pr_fns:
print("{:70} - {:20} - {:10}".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))
print("\n\nTrue Positives\n")
for one in pr_tps:
print("{:70} - {:20} - {:10}".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))
print("*"*110 + "\n")
except Exception:
traceback.print_exc()
self.config.linking['filters'] = _filters
return fps, fns, tps, cui_prec, cui_rec, cui_f1, cui_counts, examples
def _init_ckpts(self, is_resumed, checkpoint):
if self.config.general['checkpoint']['steps'] is not None or checkpoint is not None:
checkpoint_config = CheckpointConfig(**self.config.general.get('checkpoint', {}))
checkpoint_manager = CheckpointManager('cat_train', checkpoint_config)
if is_resumed:
checkpoint = checkpoint or checkpoint_manager.get_latest_checkpoint()
self.log.info(f"Resume training on the most recent checkpoint at {checkpoint.dir_path}...")
self.cdb = checkpoint.restore_latest_cdb()
self.cdb.config.merge_config(self.config.__dict__)
self.config = self.cdb.config
self._create_pipeline(self.config)
else:
checkpoint = checkpoint or checkpoint_manager.create_checkpoint()
self.log.info(f"Start new training and checkpoints will be saved at {checkpoint.dir_path}...")
return checkpoint
def train(self,
data_iterator: Iterable,
nepochs: int = 1,
fine_tune: bool = True,
progress_print: int = 1000,
checkpoint: Optional[Checkpoint] = None,
is_resumed: bool = False) -> None:
if not fine_tune:
self.log.info("Removing old training data!")
self.cdb.reset_training()
checkpoint = self._init_ckpts(is_resumed, checkpoint)
latest_trained_step = checkpoint.count if checkpoint is not None else 0
epochal_data_iterator = chain.from_iterable(repeat(data_iterator, nepochs))
for line in islice(epochal_data_iterator, latest_trained_step, None):
if line is not None and line:
# Convert to string
line = str(line).strip()
try:
_ = self(line, do_train=True)
except Exception as e:
self.log.warning("LINE: '%s...' \t WAS SKIPPED", line[0:100])
self.log.warning("BECAUSE OF: %s", str(e))
else:
self.log.warning("EMPTY LINE WAS DETECTED AND SKIPPED")
latest_trained_step += 1
if latest_trained_step % progress_print == 0:
self.log.info("DONE: %s", str(latest_trained_step))
if checkpoint is not None and checkpoint.steps is not None and latest_trained_step % checkpoint.steps == 0:
checkpoint.save(cdb=self.cdb, count=latest_trained_step)
self.config.linking['train'] = False
def add_cui_to_group(self, cui: str, group_name: str) -> None:
# Add group_name
self.cdb.addl_info['cui2group'][cui] = group_name
def unlink_concept_name(self, cui: str, name: str, preprocessed_name: bool = False) -> None:
cuis = [cui]
if preprocessed_name:
names = {name: 'nothing'}
else:
names = prepare_name(name, self.pipe.spacy_nlp, {}, self.config)
# If full unlink find all CUIs
if self.config.general.get('full_unlink', False):
for n in names:
cuis.extend(self.cdb.name2cuis.get(n, []))
# Remove name from all CUIs
for c in cuis:
self.cdb.remove_names(cui=c, names=names)
def add_and_train_concept(self,
cui: str,
name: str,
spacy_doc: Optional[Doc] = None,
spacy_entity: Optional[Union[List[Token], Span]] = None,
ontologies: Set = set(),
name_status: str = 'A',
type_ids: Set = set(),
description: str = '',
full_build: bool = True,
negative: bool = False,
devalue_others: bool = False,
do_add_concept: bool = True) -> None:
names = prepare_name(name, self.pipe.spacy_nlp, {}, self.config)
# Only if not negative, otherwise do not add the new name if in fact it should not be detected
if do_add_concept and not negative:
self.cdb.add_concept(cui=cui, names=names, ontologies=ontologies, name_status=name_status, type_ids=type_ids, description=description,
full_build=full_build)
if spacy_entity is not None and spacy_doc is not None:
# Train Linking
self.linker.context_model.train(cui=cui, entity=spacy_entity, doc=spacy_doc, negative=negative, names=names)
if not negative and devalue_others:
# Find all cuis
cuis = set()
for n in names:
cuis.update(self.cdb.name2cuis.get(n, []))
# Remove the cui for which we just added positive training
if cui in cuis:
cuis.remove(cui)
# Add negative training for all other CUIs that link to these names
for _cui in cuis:
self.linker.context_model.train(cui=_cui, entity=spacy_entity, doc=spacy_doc, negative=True)
def train_supervised(self,
data_path: str,
reset_cui_count: bool = False,
nepochs: int = 1,
print_stats: int = 0,
use_filters: bool = False,
terminate_last: bool = False,
use_overlaps: bool = False,
use_cui_doc_limit: bool = False,
test_size: int = 0,
devalue_others: bool = False,
use_groups: bool = False,
never_terminate: bool = False,
train_from_false_positives: bool = False,
extra_cui_filter: Optional[Set] = None,
checkpoint: Optional[Checkpoint] = None,
is_resumed: bool = False) -> Tuple:
checkpoint = self._init_ckpts(is_resumed, checkpoint)
# Backup filters
_filters = deepcopy(self.config.linking['filters'])
filters = self.config.linking['filters']
fp = fn = tp = p = r = f1 = examples = {}
with open(data_path) as f:
data = json.load(f)
cui_counts = {}
if test_size == 0:
self.log.info("Running without a test set, or train==test")
test_set = data
train_set = data
else:
train_set, test_set, _, _ = make_mc_train_test(data, self.cdb, test_size=test_size)
if print_stats > 0:
fp, fn, tp, p, r, f1, cui_counts, examples = self._print_stats(test_set,
use_project_filters=use_filters,
use_cui_doc_limit=use_cui_doc_limit,
use_overlaps=use_overlaps,
use_groups=use_groups,
extra_cui_filter=extra_cui_filter)
if reset_cui_count:
# Get all CUIs
cuis = []
for project in train_set['projects']:
for doc in project['documents']:
doc_annotations = self._get_doc_annotations(doc)
for ann in doc_annotations:
cuis.append(ann['cui'])
for cui in set(cuis):
if cui in self.cdb.cui2count_train:
self.cdb.cui2count_train[cui] = 100
# Remove entities that were terminated
if not never_terminate:
for project in train_set['projects']:
for doc in project['documents']:
doc_annotations = self._get_doc_annotations(doc)
for ann in doc_annotations:
if ann.get('killed', False):
self.unlink_concept_name(ann['cui'], ann['value'])
latest_trained_step = checkpoint.count if checkpoint is not None else 0
current_epoch, current_project, current_document = self._get_training_start(train_set, latest_trained_step)
for epoch in trange(current_epoch, nepochs, initial=current_epoch, total=nepochs, desc='Epoch', leave=False):
# Print acc before training
for idx_project in trange(current_project, len(train_set['projects']), initial=current_project, total=len(train_set['projects']), desc='Project', leave=False):
project = train_set['projects'][idx_project]
# Set filters in case we are using the train_from_fp
filters['cuis'] = set()
if isinstance(extra_cui_filter, set):
filters['cuis'] = extra_cui_filter
if use_filters:
project_filter = get_project_filters(cuis=project.get('cuis', None),
type_ids=project.get('tuis', None),
cdb=self.cdb,
project=project)
if project_filter:
filters['cuis'] = intersect_nonempty_set(project_filter, filters['cuis'])
for idx_doc in trange(current_document, len(project['documents']), initial=current_document, total=len(project['documents']), desc='Document', leave=False):
doc = project['documents'][idx_doc]
spacy_doc: Doc = self(doc['text'])
# Compatibility with old output where annotations are a list
doc_annotations = self._get_doc_annotations(doc)
for ann in doc_annotations:
if not ann.get('killed', False):
cui = ann['cui']
start = ann['start']
end = ann['end']
spacy_entity = tkns_from_doc(spacy_doc=spacy_doc, start=start, end=end)
deleted = ann.get('deleted', False)
self.add_and_train_concept(cui=cui,
name=ann['value'],
spacy_doc=spacy_doc,
spacy_entity=spacy_entity,
negative=deleted,
devalue_others=devalue_others)
if train_from_false_positives:
fps: List[Span] = get_false_positives(doc, spacy_doc)
for fp in fps:
fp_: Span = fp
self.add_and_train_concept(cui=fp_._.cui,
name=fp_.text,
spacy_doc=spacy_doc,
spacy_entity=fp_,
negative=True,
do_add_concept=False)
latest_trained_step += 1
if checkpoint is not None and checkpoint.steps is not None and latest_trained_step % checkpoint.steps == 0:
checkpoint.save(self.cdb, latest_trained_step)
if terminate_last and not never_terminate:
# Remove entities that were terminated, but after all training is done
for project in train_set['projects']:
for doc in project['documents']:
doc_annotations = self._get_doc_annotations(doc)
for ann in doc_annotations:
if ann.get('killed', False):
self.unlink_concept_name(ann['cui'], ann['value'])
if print_stats > 0 and (epoch + 1) % print_stats == 0:
fp, fn, tp, p, r, f1, cui_counts, examples = self._print_stats(test_set,
epoch=epoch + 1,
use_project_filters=use_filters,
use_cui_doc_limit=use_cui_doc_limit,
use_overlaps=use_overlaps,
use_groups=use_groups,
extra_cui_filter=extra_cui_filter)
# Set the filters again
self.config.linking['filters'] = _filters
return fp, fn, tp, p, r, f1, cui_counts, examples
def get_entities(self,
text: str,
only_cui: bool = False,
addl_info: List[str] = ['cui2icd10', 'cui2ontologies', 'cui2snomed']) -> Dict:
doc = self(text)
out = self._doc_to_out(doc, only_cui, addl_info)
return out
def get_entities_multi_texts(self,
texts: Union[Iterable[str], Iterable[Tuple]],
only_cui: bool = False,
addl_info: List[str] = ['cui2icd10', 'cui2ontologies', 'cui2snomed'],
n_process: Optional[int] = None,
batch_size: Optional[int] = None) -> List[Dict]:
out: List[Dict] = []
if n_process is None:
texts_ = self._generate_trimmed_texts(texts)
for text in texts_:
out.append(self._doc_to_out(self(text), only_cui, addl_info))
else:
self.pipe.set_error_handler(self._pipe_error_handler)
try:
texts_ = self._get_trimmed_texts(texts)
docs = self.pipe.batch_multi_process(texts_, n_process, batch_size)
for doc in tqdm(docs, total=len(texts_)):
doc = None if doc.text.strip() == '' else doc
out.append(self._doc_to_out(doc, only_cui, addl_info, out_with_text=True))
# Currently spaCy cannot mark which pieces of texts failed within the pipe so be this workaround,
# which also assumes texts are different from each others.
if len(out) < len(texts_):
self.log.warning("Found at least one failed batch and set output for enclosed texts to empty")
for i, text in enumerate(texts_):
if i == len(out):
out.append(self._doc_to_out(None, only_cui, addl_info))
elif out[i].get('text', '') != text:
out.insert(i, self._doc_to_out(None, only_cui, addl_info))
cnf_annotation_output = getattr(self.config, 'annotation_output', {})
if not(cnf_annotation_output.get('include_text_in_output', False)):
for o in out:
if o is not None:
o.pop('text', None)
finally:
self.pipe.reset_error_handler()
return out
def get_json(self, text: str, only_cui: bool = False, addl_info=['cui2icd10', 'cui2ontologies']) -> str:
ents = self.get_entities(text, only_cui, addl_info=addl_info)['entities']
out = {'annotations': ents, 'text': text}
return json.dumps(out)
@staticmethod
def _get_training_start(train_set, latest_trained_step):
total_steps_per_epoch = sum([1 for project in train_set['projects'] for _ in project['documents']])
if total_steps_per_epoch == 0:
raise ValueError("MedCATtrainer export contains no documents")
current_epoch, last_step_in_epoch = divmod(latest_trained_step, total_steps_per_epoch)
document_count = 0
current_project = 0
current_document = 0
for idx_project, project in enumerate(train_set['projects']):
for idx_doc, _ in enumerate(project['documents']):
document_count += 1
if document_count == last_step_in_epoch:
current_project = idx_project
current_document = idx_doc
break
if current_project > 0:
break
current_document = 0
return current_epoch, current_project, current_document
def _separate_nn_components(self):
# Loop though the models and check are there GPU devices
nn_components = []
for component in self.pipe.spacy_nlp.components:
if isinstance(component[1], MetaCAT) or isinstance(component[1], TransformersNER):
self.pipe.spacy_nlp.disable_pipe(component[0])
nn_components.append(component)
return nn_components
def _run_nn_components(self, docs: Dict, nn_components: List, id2text: Dict) -> None:
self.log.debug("Running GPU components separately")
# First convert the docs into the fake spacy doc format
spacy_docs = json_to_fake_spacy(docs, id2text=id2text)
# Disable component locks also
for name, component in nn_components:
component.config.general['disable_component_lock'] = True
# For meta_cat compoments
for name, component in [c for c in nn_components if isinstance(c[1], MetaCAT)]:
spacy_docs = component.pipe(spacy_docs)
for spacy_doc in spacy_docs:
for ent in spacy_doc.ents:
docs[spacy_doc.id]['entities'][ent._.id]['meta_anns'].update(ent._.meta_anns)
def _batch_generator(self, data: Iterable, batch_size_chars: int, skip_ids: Set = set()):
docs = []
char_count = 0
for doc in data:
if doc[0] not in skip_ids:
char_count += len(str(doc[1]))
docs.append(doc)
if char_count < batch_size_chars:
continue
yield docs
docs = []
char_count = 0
if len(docs) > 0:
yield docs
def _save_docs_to_file(self, docs: Iterable, annotated_ids: List[str], save_dir_path: str, annotated_ids_path: Optional[str], part_counter: int = 0) -> int:
path = os.path.join(save_dir_path, 'part_{}.pickle'.format(part_counter))
pickle.dump(docs, open(path, "wb"))
self.log.info("Saved part: %s, to: %s", part_counter, path)
part_counter = part_counter + 1 # Increase for save, as it should be what is the next part
if annotated_ids_path is not None:
pickle.dump((annotated_ids, part_counter), open(annotated_ids_path, 'wb'))
return part_counter
def multiprocessing(self,
data: Union[List[Tuple], Iterable[Tuple]],
nproc: int = 2,
batch_size_chars: int = 5000 * 1000,
only_cui: bool = False,
addl_info: List[str] = [],
separate_nn_components: bool = True,
out_split_size_chars: Optional[int] = None,
save_dir_path: str = os.path.abspath(os.getcwd()),
min_free_memory=0.1) -> Dict:
for comp in self.pipe.spacy_nlp.components:
if isinstance(comp[1], TransformersNER):
raise Exception("Please do not use multiprocessing when running a transformer model for NER, run sequentially.")
# Set max document length
self.pipe.spacy_nlp.max_length = self.config.preprocessing.get('max_document_length', 1000000)
if self._meta_cats and not separate_nn_components:
# Hack for torch using multithreading, which is not good if not
#separate_nn_components, need for CPU runs only
import torch
torch.set_num_threads(1)
nn_components = []
if separate_nn_components:
nn_components = self._separate_nn_components()
if save_dir_path is not None:
os.makedirs(save_dir_path, exist_ok=True)
# "5" looks like a magic number here so better with comment about why the choice was made.
internal_batch_size_chars = batch_size_chars // (5 * nproc)
annotated_ids_path = os.path.join(save_dir_path, 'annotated_ids.pickle') if save_dir_path is not None else None
if annotated_ids_path is not None and os.path.exists(annotated_ids_path):
annotated_ids, part_counter = pickle.load(open(annotated_ids_path, 'rb'))
else:
annotated_ids = []
part_counter = 0
docs = {}
_start_time = time.time()
_batch_counter = 0 # Used for splitting the output, counts batches inbetween saves
for batch in self._batch_generator(data, batch_size_chars, skip_ids=set(annotated_ids)):
self.log.info("Annotated until now: %s docs; Current BS: %s docs; Elapsed time: %.2f minutes",
len(annotated_ids),
len(batch),
(time.time() - _start_time)/60)
try:
_docs = self._multiprocessing_batch(data=batch,
nproc=nproc,
only_cui=only_cui,
batch_size_chars=internal_batch_size_chars,
addl_info=addl_info,
nn_components=nn_components,
min_free_memory=min_free_memory)
docs.update(_docs)
annotated_ids.extend(_docs.keys())
_batch_counter += 1
del _docs
if out_split_size_chars is not None and (_batch_counter * batch_size_chars) > out_split_size_chars:
# Save to file and reset the docs
part_counter = self._save_docs_to_file(docs=docs,
annotated_ids=annotated_ids,
save_dir_path=save_dir_path,
annotated_ids_path=annotated_ids_path,
part_counter=part_counter)
del docs
docs = {}
_batch_counter = 0
except Exception as e:
self.log.warning("Failed an outer batch in the multiprocessing script")
self.log.warning(e, exc_info=True, stack_info=True)
# Save the last batch
if out_split_size_chars is not None and len(docs) > 0:
# Save to file and reset the docs
self._save_docs_to_file(docs=docs,
annotated_ids=annotated_ids,
save_dir_path=save_dir_path,
annotated_ids_path=annotated_ids_path,
part_counter=part_counter)
# Enable the GPU Components again
if separate_nn_components:
for name, _ in nn_components:
# No need to do anything else as it was already in the pipe
self.pipe.spacy_nlp.enable_pipe(name)
return docs
def _multiprocessing_batch(self,
data: Union[List[Tuple], Iterable[Tuple]],
nproc: int = 8,
batch_size_chars: int = 1000000,
only_cui: bool = False,
addl_info: List[str] = [],
nn_components: List = [],
min_free_memory: int = 0) -> Dict:
# Create the input output for MP
with Manager() as manager:
out_list = manager.list()
lock = manager.Lock()
in_q = manager.Queue(maxsize=10*nproc)
id2text = {}
for batch in self._batch_generator(data, batch_size_chars):
if nn_components:
# We need this for the json_to_fake_spacy
id2text.update({k:v for k,v in batch})
in_q.put(batch)
# Final data point for workers
for _ in range(nproc):
in_q.put(None)
sleep(2)
# Create processes
procs = []
for i in range(nproc):
p = Process(target=self._mp_cons,
kwargs={'in_q': in_q,
'out_list': out_list,
'pid': i,
'only_cui': only_cui,
'addl_info': addl_info,
'min_free_memory': min_free_memory,
'lock': lock})
p.start()
procs.append(p)
# Join processes
for p in procs:
p.join()
docs = {}
# Covnerts a touple into a dict
docs.update({k:v for k,v in out_list})
# If we have separate GPU components now we pipe that
if nn_components:
try:
self._run_nn_components(docs, nn_components, id2text=id2text)
except Exception as e:
self.log.warning(e, exc_info=True, stack_info=True)
return docs
def multiprocessing_pipe(self,
in_data: Union[List[Tuple], Iterable[Tuple]],
nproc: Optional[int] = None,
batch_size: Optional[int] = None,
only_cui: bool = False,
addl_info: List[str] = [],
return_dict: bool = True,
batch_factor: int = 2) -> Union[List[Tuple], Dict]:
out: Union[Dict, List[Tuple]]
if nproc == 0:
raise ValueError("nproc cannot be set to zero")
in_data = list(in_data) if isinstance(in_data, Iterable) else in_data
n_process = nproc if nproc is not None else min(max(cpu_count() - 1, 1), math.ceil(len(in_data) / batch_factor))
batch_size = batch_size if batch_size is not None else math.ceil(len(in_data) / (batch_factor * abs(n_process)))
start_method = None
try:
if self._meta_cats:
import torch
if torch.multiprocessing.get_start_method() != "spawn":
start_method = torch.multiprocessing.get_start_method()
torch.multiprocessing.set_start_method("spawn", force=True)
entities = self.get_entities_multi_texts(texts=in_data, only_cui=only_cui, addl_info=addl_info,
n_process=n_process, batch_size=batch_size)
finally:
if start_method is not None:
import torch
torch.multiprocessing.set_start_method(start_method, force=True)
if return_dict:
out = {}
for idx, data in enumerate(in_data):
out[data[0]] = entities[idx]
else:
out = []
for idx, data in enumerate(in_data):
out.append((data[0], entities[idx]))
return out
def _mp_cons(self, in_q: Queue, out_list: List, min_free_memory: int, lock: Lock, pid: int = 0, only_cui: bool = False, addl_info: List = []) -> None:
out: List = []
while True:
if not in_q.empty():
if psutil.virtual_memory().available / psutil.virtual_memory().total < min_free_memory:
with lock:
out_list.extend(out)
# Stop a process if there is not enough memory left
break
data = in_q.get()
if data is None:
with lock:
out_list.extend(out)
break
for i_text, text in data:
try:
# Annotate document
doc = self.get_entities(text=text, only_cui=only_cui, addl_info=addl_info)
out.append((i_text, doc))
except Exception as e:
self.log.warning("PID: %s failed one document in _mp_cons, running will continue normally. \n" +
"Document length in chars: %s, and ID: %s", pid, len(str(text)), i_text)
self.log.warning(str(e))
sleep(2)
def _doc_to_out(self,
doc: Doc,
only_cui: bool,
addl_info: List[str],
out_with_text: bool = False) -> Dict:
out: Dict = {'entities': {}, 'tokens': []}
cnf_annotation_output = getattr(self.config, 'annotation_output', {})
if doc is not None:
out_ent: Dict = {}
if self.config.general.get('show_nested_entities', False):
_ents = []
for _ent in doc._.ents:
entity = Span(doc, _ent['start'], _ent['end'], label=_ent['label'])
entity._.cui = _ent['cui']
entity._.detected_name = _ent['detected_name']
entity._.context_similarity = _ent['context_similarity']
entity._.id = _ent['id']
if 'meta_anns' in _ent:
entity._.meta_anns = _ent['meta_anns']
_ents.append(entity)
else:
_ents = doc.ents
if cnf_annotation_output.get("lowercase_context", True):
doc_tokens = [tkn.text_with_ws.lower() for tkn in list(doc)]
else:
doc_tokens = [tkn.text_with_ws for tkn in list(doc)]
if cnf_annotation_output.get('doc_extended_info', False):
# Add tokens if extended info
out['tokens'] = doc_tokens
context_left = cnf_annotation_output.get('context_left', -1)
context_right = cnf_annotation_output.get('context_right', -1)
doc_extended_info = cnf_annotation_output.get('doc_extended_info', False)
for _, ent in enumerate(_ents):
cui = str(ent._.cui)
if not only_cui:
out_ent['pretty_name'] = self.cdb.get_name(cui)
out_ent['cui'] = cui
out_ent['type_ids'] = list(self.cdb.cui2type_ids.get(cui, ''))
out_ent['types'] = [self.cdb.addl_info['type_id2name'].get(tui, '') for tui in out_ent['type_ids']]
out_ent['source_value'] = ent.text
out_ent['detected_name'] = str(ent._.detected_name)
out_ent['acc'] = float(ent._.context_similarity)
out_ent['context_similarity'] = float(ent._.context_similarity)
out_ent['start'] = ent.start_char
out_ent['end'] = ent.end_char
for addl in addl_info:
tmp = self.cdb.addl_info.get(addl, {}).get(cui, [])
out_ent[addl.split("2")[-1]] = list(tmp) if type(tmp) == set else tmp
out_ent['id'] = ent._.id
out_ent['meta_anns'] = {}
if doc_extended_info:
out_ent['start_tkn'] = ent.start
out_ent['end_tkn'] = ent.end
if context_left > 0 and context_right > 0:
out_ent['context_left'] = doc_tokens[max(ent.start - context_left, 0):ent.start]
out_ent['context_right'] = doc_tokens[ent.end:min(ent.end + context_right, len(doc_tokens))]
out_ent['context_center'] = doc_tokens[ent.start:ent.end]
if hasattr(ent._, 'meta_anns') and ent._.meta_anns:
out_ent['meta_anns'] = ent._.meta_anns
out['entities'][out_ent['id']] = dict(out_ent)
else:
out['entities'][ent._.id] = cui
if cnf_annotation_output.get('include_text_in_output', False) or out_with_text:
out['text'] = doc.text
return out
def _get_trimmed_text(self, text: Optional[str]) -> str:
return text[0:self.config.preprocessing.get('max_document_length')] if text is not None and len(text) > 0 else ""
def _generate_trimmed_texts(self, texts: Union[Iterable[str], Iterable[Tuple]]) -> Iterable[str]:
text_: str
for text in texts:
text_ = text[1] if isinstance(text, tuple) else text
yield self._get_trimmed_text(text_)
def _get_trimmed_texts(self, texts: Union[Iterable[str], Iterable[Tuple]]) -> List[str]:
trimmed: List = []
text_: str
for text in texts:
text_ = text[1] if isinstance(text, tuple) else text
trimmed.append(self._get_trimmed_text(text_))
return trimmed
@staticmethod
def _pipe_error_handler(proc_name: str, proc: "Pipe", docs: List[Doc], e: Exception) -> None:
CAT.log.warning("Exception raised when applying component %s to a batch of docs.", proc_name)
CAT.log.warning(e, exc_info=True, stack_info=True)
if docs is not None:
CAT.log.warning("Docs contained in the batch:")
for doc in docs:
if hasattr(doc, "text"):
CAT.log.warning("%s...", doc.text[:50])
@staticmethod
def _get_doc_annotations(doc: Doc):
if type(doc['annotations']) == list:
return doc['annotations']
if type(doc['annotations']) == dict:
return doc['annotations'].values()
return None
def destroy_pipe(self):
self.pipe.destroy()
| true | true |
f72d76138982bf4b2da476be3f46ae6979a7c6a5 | 5,536 | py | Python | src/aks-preview/azext_aks_preview/_loadbalancer.py | ConnectionMaster/azure-cli-extensions | 08d184f4efeac397c1ffcd21a83d651f4fad2782 | [
"MIT"
] | 1 | 2021-02-03T23:06:06.000Z | 2021-02-03T23:06:06.000Z | src/aks-preview/azext_aks_preview/_loadbalancer.py | ConnectionMaster/azure-cli-extensions | 08d184f4efeac397c1ffcd21a83d651f4fad2782 | [
"MIT"
] | null | null | null | src/aks-preview/azext_aks_preview/_loadbalancer.py | ConnectionMaster/azure-cli-extensions | 08d184f4efeac397c1ffcd21a83d651f4fad2782 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from distutils.version import StrictVersion # pylint: disable=no-name-in-module,import-error
from knack.log import get_logger
from .vendored_sdks.azure_mgmt_preview_aks.v2020_12_01.models import ManagedClusterLoadBalancerProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_12_01.models import ManagedClusterLoadBalancerProfileManagedOutboundIPs
from .vendored_sdks.azure_mgmt_preview_aks.v2020_12_01.models import ManagedClusterLoadBalancerProfileOutboundIPPrefixes
from .vendored_sdks.azure_mgmt_preview_aks.v2020_12_01.models import ManagedClusterLoadBalancerProfileOutboundIPs
from .vendored_sdks.azure_mgmt_preview_aks.v2020_12_01.models import ResourceReference
logger = get_logger(__name__)
def set_load_balancer_sku(sku, kubernetes_version):
if sku:
return sku
if kubernetes_version and StrictVersion(kubernetes_version) < StrictVersion("1.13.0"):
logger.warning('Setting load_balancer_sku to basic as it is not specified and kubernetes'
'version(%s) less than 1.13.0 only supports basic load balancer SKU\n',
kubernetes_version)
return "basic"
return "standard"
def update_load_balancer_profile(managed_outbound_ip_count, outbound_ips, outbound_ip_prefixes,
outbound_ports, idle_timeout, profile):
"""parse and update an existing load balancer profile"""
if not is_load_balancer_profile_provided(managed_outbound_ip_count, outbound_ips, outbound_ip_prefixes,
outbound_ports, idle_timeout):
return profile
return configure_load_balancer_profile(managed_outbound_ip_count, outbound_ips, outbound_ip_prefixes,
outbound_ports, idle_timeout, profile)
def create_load_balancer_profile(managed_outbound_ip_count, outbound_ips, outbound_ip_prefixes,
outbound_ports, idle_timeout):
"""parse and build load balancer profile"""
if not is_load_balancer_profile_provided(managed_outbound_ip_count, outbound_ips, outbound_ip_prefixes,
outbound_ports, idle_timeout):
return None
profile = ManagedClusterLoadBalancerProfile()
return configure_load_balancer_profile(managed_outbound_ip_count, outbound_ips, outbound_ip_prefixes,
outbound_ports, idle_timeout, profile)
def configure_load_balancer_profile(managed_outbound_ip_count, outbound_ips, outbound_ip_prefixes, outbound_ports,
idle_timeout, profile):
"""configure a load balancer with customer supplied values"""
if not profile:
return profile
outbound_ip_resources = _get_load_balancer_outbound_ips(outbound_ips)
outbound_ip_prefix_resources = _get_load_balancer_outbound_ip_prefixes(outbound_ip_prefixes)
if managed_outbound_ip_count or outbound_ip_resources or outbound_ip_prefix_resources:
profile.managed_outbound_ips = None
profile.outbound_ips = None
profile.outbound_ip_prefixes = None
if managed_outbound_ip_count:
profile.managed_outbound_ips = ManagedClusterLoadBalancerProfileManagedOutboundIPs(
count=managed_outbound_ip_count
)
if outbound_ip_resources:
profile.outbound_ips = ManagedClusterLoadBalancerProfileOutboundIPs(
public_ips=outbound_ip_resources
)
if outbound_ip_prefix_resources:
profile.outbound_ip_prefixes = ManagedClusterLoadBalancerProfileOutboundIPPrefixes(
public_ip_prefixes=outbound_ip_prefix_resources
)
if outbound_ports:
profile.allocated_outbound_ports = outbound_ports
if idle_timeout:
profile.idle_timeout_in_minutes = idle_timeout
return profile
def is_load_balancer_profile_provided(managed_outbound_ip_count, outbound_ips, ip_prefixes,
outbound_ports, idle_timeout):
return any([managed_outbound_ip_count,
outbound_ips,
ip_prefixes,
outbound_ports,
idle_timeout])
def _get_load_balancer_outbound_ips(load_balancer_outbound_ips):
"""parse load balancer profile outbound IP ids and return an array of references to the outbound IP resources"""
load_balancer_outbound_ip_resources = None
if load_balancer_outbound_ips:
load_balancer_outbound_ip_resources = \
[ResourceReference(id=x.strip()) for x in load_balancer_outbound_ips.split(',')]
return load_balancer_outbound_ip_resources
def _get_load_balancer_outbound_ip_prefixes(load_balancer_outbound_ip_prefixes):
"""parse load balancer profile outbound IP prefix ids and return an array \
of references to the outbound IP prefix resources"""
load_balancer_outbound_ip_prefix_resources = None
if load_balancer_outbound_ip_prefixes:
load_balancer_outbound_ip_prefix_resources = \
[ResourceReference(id=x.strip()) for x in load_balancer_outbound_ip_prefixes.split(',')]
return load_balancer_outbound_ip_prefix_resources
| 50.788991 | 120 | 0.712247 |
from distutils.version import StrictVersion
from knack.log import get_logger
from .vendored_sdks.azure_mgmt_preview_aks.v2020_12_01.models import ManagedClusterLoadBalancerProfile
from .vendored_sdks.azure_mgmt_preview_aks.v2020_12_01.models import ManagedClusterLoadBalancerProfileManagedOutboundIPs
from .vendored_sdks.azure_mgmt_preview_aks.v2020_12_01.models import ManagedClusterLoadBalancerProfileOutboundIPPrefixes
from .vendored_sdks.azure_mgmt_preview_aks.v2020_12_01.models import ManagedClusterLoadBalancerProfileOutboundIPs
from .vendored_sdks.azure_mgmt_preview_aks.v2020_12_01.models import ResourceReference
logger = get_logger(__name__)
def set_load_balancer_sku(sku, kubernetes_version):
if sku:
return sku
if kubernetes_version and StrictVersion(kubernetes_version) < StrictVersion("1.13.0"):
logger.warning('Setting load_balancer_sku to basic as it is not specified and kubernetes'
'version(%s) less than 1.13.0 only supports basic load balancer SKU\n',
kubernetes_version)
return "basic"
return "standard"
def update_load_balancer_profile(managed_outbound_ip_count, outbound_ips, outbound_ip_prefixes,
outbound_ports, idle_timeout, profile):
if not is_load_balancer_profile_provided(managed_outbound_ip_count, outbound_ips, outbound_ip_prefixes,
outbound_ports, idle_timeout):
return profile
return configure_load_balancer_profile(managed_outbound_ip_count, outbound_ips, outbound_ip_prefixes,
outbound_ports, idle_timeout, profile)
def create_load_balancer_profile(managed_outbound_ip_count, outbound_ips, outbound_ip_prefixes,
outbound_ports, idle_timeout):
if not is_load_balancer_profile_provided(managed_outbound_ip_count, outbound_ips, outbound_ip_prefixes,
outbound_ports, idle_timeout):
return None
profile = ManagedClusterLoadBalancerProfile()
return configure_load_balancer_profile(managed_outbound_ip_count, outbound_ips, outbound_ip_prefixes,
outbound_ports, idle_timeout, profile)
def configure_load_balancer_profile(managed_outbound_ip_count, outbound_ips, outbound_ip_prefixes, outbound_ports,
idle_timeout, profile):
if not profile:
return profile
outbound_ip_resources = _get_load_balancer_outbound_ips(outbound_ips)
outbound_ip_prefix_resources = _get_load_balancer_outbound_ip_prefixes(outbound_ip_prefixes)
if managed_outbound_ip_count or outbound_ip_resources or outbound_ip_prefix_resources:
profile.managed_outbound_ips = None
profile.outbound_ips = None
profile.outbound_ip_prefixes = None
if managed_outbound_ip_count:
profile.managed_outbound_ips = ManagedClusterLoadBalancerProfileManagedOutboundIPs(
count=managed_outbound_ip_count
)
if outbound_ip_resources:
profile.outbound_ips = ManagedClusterLoadBalancerProfileOutboundIPs(
public_ips=outbound_ip_resources
)
if outbound_ip_prefix_resources:
profile.outbound_ip_prefixes = ManagedClusterLoadBalancerProfileOutboundIPPrefixes(
public_ip_prefixes=outbound_ip_prefix_resources
)
if outbound_ports:
profile.allocated_outbound_ports = outbound_ports
if idle_timeout:
profile.idle_timeout_in_minutes = idle_timeout
return profile
def is_load_balancer_profile_provided(managed_outbound_ip_count, outbound_ips, ip_prefixes,
outbound_ports, idle_timeout):
return any([managed_outbound_ip_count,
outbound_ips,
ip_prefixes,
outbound_ports,
idle_timeout])
def _get_load_balancer_outbound_ips(load_balancer_outbound_ips):
load_balancer_outbound_ip_resources = None
if load_balancer_outbound_ips:
load_balancer_outbound_ip_resources = \
[ResourceReference(id=x.strip()) for x in load_balancer_outbound_ips.split(',')]
return load_balancer_outbound_ip_resources
def _get_load_balancer_outbound_ip_prefixes(load_balancer_outbound_ip_prefixes):
load_balancer_outbound_ip_prefix_resources = None
if load_balancer_outbound_ip_prefixes:
load_balancer_outbound_ip_prefix_resources = \
[ResourceReference(id=x.strip()) for x in load_balancer_outbound_ip_prefixes.split(',')]
return load_balancer_outbound_ip_prefix_resources
| true | true |
f72d76a79d946ab57c0ea4783590716defa93ac3 | 571 | py | Python | src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyBoolean/auto_rest_bool_test_service/operations/__init__.py | ljhljh235/AutoRest | b9ab4000e9b93d16925db84d08bafc225b098f8e | [
"MIT"
] | 3 | 2018-03-20T22:36:32.000Z | 2021-07-15T02:36:51.000Z | src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyBoolean/auto_rest_bool_test_service/operations/__init__.py | ljhljh235/AutoRest | b9ab4000e9b93d16925db84d08bafc225b098f8e | [
"MIT"
] | null | null | null | src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyBoolean/auto_rest_bool_test_service/operations/__init__.py | ljhljh235/AutoRest | b9ab4000e9b93d16925db84d08bafc225b098f8e | [
"MIT"
] | 1 | 2019-07-20T12:20:03.000Z | 2019-07-20T12:20:03.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .bool_model_operations import BoolModelOperations
__all__ = [
'BoolModelOperations',
]
| 33.588235 | 76 | 0.563923 |
from .bool_model_operations import BoolModelOperations
__all__ = [
'BoolModelOperations',
]
| true | true |
f72d774084f94d2b1be0bd5b02c8349dacbc2579 | 2,049 | py | Python | filters/parse_interface.py | adblockplus/web.adblockplus.org | c2c570ce4f4296afc3577afe233c6b23b128f206 | [
"MIT"
] | 9 | 2016-01-29T18:05:29.000Z | 2021-10-06T04:21:55.000Z | filters/parse_interface.py | adblockplus/web.adblockplus.org | c2c570ce4f4296afc3577afe233c6b23b128f206 | [
"MIT"
] | 9 | 2015-04-06T19:03:32.000Z | 2019-05-28T13:34:55.000Z | filters/parse_interface.py | adblockplus/web.adblockplus.org | c2c570ce4f4296afc3577afe233c6b23b128f206 | [
"MIT"
] | 18 | 2015-04-06T17:42:31.000Z | 2021-10-06T04:26:29.000Z |
import re
import warnings
TYPE_REGEXP = r"(?:arrayof\s+)?\w+"
def parse_interface(interface_items):
parsed = []
for key, value in interface_items.iteritems():
if "(" in key:
# Item is a method
match = re.match(r"^\s*(%s)\s+(\S+)\s*\(\s*([^\)]*)\s*\)\s*$" % TYPE_REGEXP, key)
if not match:
warnings.warn("Skipped malformed method: '%s'" % key)
continue
return_type, property_name, argument_string = match.groups()
arguments = []
if argument_string:
for argument in argument_string.split(","):
if argument.strip():
match = re.match(r"^\s*(%s)\s+(\S+)\s*$" % TYPE_REGEXP, argument)
if not match:
warnings.warn("Skipped malformed argument: '%s'" % argument)
continue
argument_type, argument_name = match.groups()
arguments.append({
"name": argument_name,
"type": argument_type
})
value.update({
"type": "method",
"name": property_name,
"return_type": return_type,
"arguments": arguments
})
parsed.append(value)
else:
# Item is a property
match = re.match(r"^\s*(readonly\s+)?(%s)\s+(\S+)\s*$" % TYPE_REGEXP, key)
if not match:
warnings.warn("Skipped malformed property: '%s'" % key)
continue
property_modifier, property_type, property_name = match.groups()
value.update({
"type": property_type,
"name": property_name,
"modifier": property_modifier or ""
})
parsed.append(value)
parsed.sort(key=lambda x: x["name"])
return parsed
| 38.660377 | 94 | 0.455344 |
import re
import warnings
TYPE_REGEXP = r"(?:arrayof\s+)?\w+"
def parse_interface(interface_items):
parsed = []
for key, value in interface_items.iteritems():
if "(" in key:
match = re.match(r"^\s*(%s)\s+(\S+)\s*\(\s*([^\)]*)\s*\)\s*$" % TYPE_REGEXP, key)
if not match:
warnings.warn("Skipped malformed method: '%s'" % key)
continue
return_type, property_name, argument_string = match.groups()
arguments = []
if argument_string:
for argument in argument_string.split(","):
if argument.strip():
match = re.match(r"^\s*(%s)\s+(\S+)\s*$" % TYPE_REGEXP, argument)
if not match:
warnings.warn("Skipped malformed argument: '%s'" % argument)
continue
argument_type, argument_name = match.groups()
arguments.append({
"name": argument_name,
"type": argument_type
})
value.update({
"type": "method",
"name": property_name,
"return_type": return_type,
"arguments": arguments
})
parsed.append(value)
else:
match = re.match(r"^\s*(readonly\s+)?(%s)\s+(\S+)\s*$" % TYPE_REGEXP, key)
if not match:
warnings.warn("Skipped malformed property: '%s'" % key)
continue
property_modifier, property_type, property_name = match.groups()
value.update({
"type": property_type,
"name": property_name,
"modifier": property_modifier or ""
})
parsed.append(value)
parsed.sort(key=lambda x: x["name"])
return parsed
| true | true |
f72d78d5dc3108cc117be1ea0357004699e0b64f | 2,137 | py | Python | rlberry/utils/torch.py | akrouriad/rlberry | dde4e2cbafca05fdef1df07646bb6368059eeadf | [
"MIT"
] | null | null | null | rlberry/utils/torch.py | akrouriad/rlberry | dde4e2cbafca05fdef1df07646bb6368059eeadf | [
"MIT"
] | null | null | null | rlberry/utils/torch.py | akrouriad/rlberry | dde4e2cbafca05fdef1df07646bb6368059eeadf | [
"MIT"
] | null | null | null | import os
import re
import shutil
from subprocess import check_output, run, PIPE
import numpy as np
import torch
import logging
logger = logging.getLogger(__name__)
def get_gpu_memory_map():
result = check_output(
["nvidia-smi", "--query-gpu=memory.used", "--format=csv,nounits,noheader"]
)
return [int(x) for x in result.split()]
def least_used_device():
"""Get the GPU device with most available memory."""
if not torch.cuda.is_available():
raise RuntimeError("cuda unavailable")
if shutil.which("nvidia-smi") is None:
raise RuntimeError(
"nvidia-smi unavailable: \
cannot select device with most least memory used."
)
memory_map = get_gpu_memory_map()
device_id = np.argmin(memory_map)
logger.info(
f"Choosing GPU device: {device_id}, " f"memory used: {memory_map[device_id]}"
)
return torch.device("cuda:{}".format(device_id))
def choose_device(preferred_device, default_device="cpu"):
if preferred_device == "cuda:best":
try:
preferred_device = least_used_device()
except RuntimeError:
logger.info(
f"Could not find least used device (nvidia-smi might be missing), use cuda:0 instead"
)
if torch.cuda.is_available():
return choose_device("cuda:0")
else:
return choose_device("cpu")
try:
torch.zeros((1,), device=preferred_device) # Test availability
except (RuntimeError, AssertionError) as e:
logger.info(
f"Preferred device {preferred_device} unavailable ({e})."
f"Switching to default {default_device}"
)
return default_device
return preferred_device
def get_memory(pid=None):
if not pid:
pid = os.getpid()
command = "nvidia-smi"
result = run(
command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True
).stdout
m = re.findall(
"\| *[0-9] *" + str(pid) + " *C *.*python.*? +([0-9]+).*\|",
result,
re.MULTILINE,
)
return [int(mem) for mem in m]
| 28.878378 | 101 | 0.617688 | import os
import re
import shutil
from subprocess import check_output, run, PIPE
import numpy as np
import torch
import logging
logger = logging.getLogger(__name__)
def get_gpu_memory_map():
result = check_output(
["nvidia-smi", "--query-gpu=memory.used", "--format=csv,nounits,noheader"]
)
return [int(x) for x in result.split()]
def least_used_device():
if not torch.cuda.is_available():
raise RuntimeError("cuda unavailable")
if shutil.which("nvidia-smi") is None:
raise RuntimeError(
"nvidia-smi unavailable: \
cannot select device with most least memory used."
)
memory_map = get_gpu_memory_map()
device_id = np.argmin(memory_map)
logger.info(
f"Choosing GPU device: {device_id}, " f"memory used: {memory_map[device_id]}"
)
return torch.device("cuda:{}".format(device_id))
def choose_device(preferred_device, default_device="cpu"):
if preferred_device == "cuda:best":
try:
preferred_device = least_used_device()
except RuntimeError:
logger.info(
f"Could not find least used device (nvidia-smi might be missing), use cuda:0 instead"
)
if torch.cuda.is_available():
return choose_device("cuda:0")
else:
return choose_device("cpu")
try:
torch.zeros((1,), device=preferred_device)
except (RuntimeError, AssertionError) as e:
logger.info(
f"Preferred device {preferred_device} unavailable ({e})."
f"Switching to default {default_device}"
)
return default_device
return preferred_device
def get_memory(pid=None):
if not pid:
pid = os.getpid()
command = "nvidia-smi"
result = run(
command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True
).stdout
m = re.findall(
"\| *[0-9] *" + str(pid) + " *C *.*python.*? +([0-9]+).*\|",
result,
re.MULTILINE,
)
return [int(mem) for mem in m]
| true | true |
f72d79ada97cca1cd2f1a76f278ef92d3f365260 | 18,711 | py | Python | train.py | jtiscione/doodlecritic | 3af8245330523109b7452d3afc7d8d25d43d182c | [
"MIT"
] | 4 | 2019-07-22T09:56:31.000Z | 2019-09-20T16:12:19.000Z | train.py | jtiscione/doodlecritic | 3af8245330523109b7452d3afc7d8d25d43d182c | [
"MIT"
] | 1 | 2021-09-30T05:31:32.000Z | 2021-11-04T00:01:49.000Z | train.py | jtiscione/doodlecritic | 3af8245330523109b7452d3afc7d8d25d43d182c | [
"MIT"
] | null | null | null | import sys
import os
from os.path import expanduser
import pickle
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import torch.onnx
import re
import json
from PIL import Image, ImageDraw
import torch
import numpy as np
# Training script- trains a Pytorch model against the Google Quickdraw dataset:
# https://github.com/googlecreativelab/quickdraw-dataset
#
# Specifically, it uses the "simplified Drawing files":
#
# https://console.cloud.google.com/storage/browser/quickdraw_dataset/full/simplified
#
# Also see https://www.kaggle.com/google/tinyquickdraw for a single downloadable tar file
# with about 50 million samples separated into 343 classes, which is where I got mine.
#
# It expects those files to be in ~/data/quickdraw. Specify any alternate path on the command line.
#
# As output it generates two files: doodles.pth (internal format) and doodles.onnx (ONNX export format).
#
# The model used here is a convolutional neural network accepting 1x64x64 inputs
# (i.e. black-and-white 64x64 images). Output is 344 neurons (i.e. one per label) with an extra neuron
# corresponding to label "nothing".
#
# NOTES:
#
# If doodles.pth is found (typically saved from a previous run), it will be loaded into the
# current model; otherwise it will start with a set of random weights. File size is approx. 300 MB.
#
# If it finds at any point during training that the output files doodles.pth or doodles.onnx
# are not on the drive, it will write new copies immediately with its current state (even though
# this means the first versions will only contain random weights). Deleting the files
# generates fresh copies, and so does finishing a training epoch (overwriting the prior versions).
# Because the data set is so immense, each epoch takes several hours to complete.
# In practice, with this model, performance levels off after about 3-4 epochs, with the network
# agreeing with Google's classification about 73% of the time.
#
# This way, if you need to edit a hyperparameter or go to work, you can pause execution by
# deleting the current doodles.pth and doodles.onnx files, letting it write new ones,
# and then hitting Ctrl-C. Typically you will want to adjust the learning rate downward
# or experiment with a different optimizer after the script has run for a few hours and
# its performance has reached a plateau. After you make your edits the script will pick up
# where it left off.
#
# If SAVE_BACKUP_FILES is set to True, the script will save backups as training progresses.
# Each time performance reaches a new record, a file will be saved with a filename indicating the
# new record number of correct responses. This is to avoid losing progress if the script crashes.
# (Raising the batch size too high can cause spurious out-of-memory errors at random times.)
# Specify data folder as command line argument; default is ~/data/quickdraw
DATA_DIRECTORY = '~/data/quickdraw'
if len(sys.argv) > 1:
DATA_DIRECTORY = sys.argv[1]
if DATA_DIRECTORY[0] == '~':
DATA_DIRECTORY = expanduser(DATA_DIRECTORY)
# Standard industry practice: Jack this number up as high as you can, then carefully lower it
# until the script stops crashing. Final value is dependent on GPU memory.
# This is a safe batch size to use on an RTX 2060 with 6 GB.
BATCH_SIZE = 1000
# Hyperparameters; both SGD and Adam work well, at least in the beginning; use SGD by default
OPTIMIZER_NAME = 'SGD'
SGD_LEARNING_RATE = 0.01
SGD_MOMENTUM = 0
ADAM_LEARNING_RATE = 0.001
ADAM_BETAS = (0.9, 0.99)
ADAM_EPSILON = 0.0001
INDEX_CACHE_FILE = './index_cache.pkl'
LABELS_FILE = './labels.txt'
STATE_DICT_FILE = './doodles.pth'
ONNX_FILE = './doodles.onnx'
SAVE_BACKUP_FILES = True
NUMBERED_STATE_DICT_FILE_TEMPLATE = './doodles_{}_of_{}.pth'
NUMBERED_ONNX_FILE_TEMPLATE = './doodles_{}_of_{}.onnx'
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# If it's installed, turn this on to enable NVidia's Apex AMP Pytorch extension.
# This will let us do calculations in FP16 on the GPU which will save memory on the card
# and let us raise the batch size. It will also leverage RTX tensor cores on RTX cards.
# Default is set to False, because compiling and installing AMP is an involved process-
# NVidia's CUDA Toolkit to be installed on your system before you can compile it using pip.
MIXED_PRECISION = False
if MIXED_PRECISION and torch.cuda.is_available():
# See if the AMP Pytorch extension has been installed; otherwise stick to standard FP32.
# If we are using mixed precision we can raise the batch size but keep it a multiple of 8.
# All tensor dimensions must be multiples of 8 to trigger NVidia's tensor core optimizations.
try:
from apex import amp, optimizers
MIXED_PRECISION = True
BATCH_SIZE = int(BATCH_SIZE * 1.6) # Raising it by 60%
print('Using mixed precision.')
except ImportError:
MIXED_PRECISION = False
# This is a torch DataSet implementation that makes the following assumptions:
#
# 1. Data consists of a set of text files with ".ndjson" extensions in the specified directory.
# 2. Each line in the .ndjson file is a JSON string with all data for a single sample.
# 3. Each line of JSON has the following format (omitting extraneous fields):
# {"word":"elephant","drawing":[[[0, 1, 10],[25, 103, 163]],[[4,15,134,234,250],[27,22,6,4,0]]]}
# Array "drawing" has the brush strokes, each stroke a pair of arrays with x and y coordinates on a 256x256 grid.
# 4. We can build our label list by only looking at the first line of each file. (All lines have same value for "word".)
class QuickDrawDataset(torch.utils.data.Dataset):
# Take the batch size, so we know how much to pad with all-zero samples mapping to the "blank" channel.
# This way we ensure we deliver full-sized batches interspersed with a few blank samples mapping to label "nothing".
def __init__(self, dataDir, batch_size):
super(QuickDrawDataset, self).__init__()
print('Data folder: ' + dataDir)
self.dataDir = dataDir
self.filenames = list(filter(lambda x: x.endswith(".ndjson"), sorted(os.listdir(dataDir)))) #[1:20]
self.filenameByIndex = []
self.fileByteOffsetByIndex = []
self.labelListIndices = {}
self.labelList = []
for filename in self.filenames:
print('Indexing ' + filename)
file = open(dataDir + "/" + filename, "r")
byte_offset = 0
word = None
for line in file:
if (word == None):
words = re.findall('\"word\":\"([\w\s-]+)\"', line)
word = words[0]
self.labelListIndices[word] = len(self.labelList)
self.labelList.append(word)
# Only use the ones Google recognizes
if (len(re.findall('\"recognized\":true', line)) > 0):
self.filenameByIndex.append(filename)
self.fileByteOffsetByIndex.append(byte_offset)
byte_offset += len(line)
file.close()
self.labelListIndices['nothing'] = len(self.labelList)
self.labelList.append('nothing')
if MIXED_PRECISION:
# NVidia really wants tensor dimensions to be multiples of 8, make sure here
extra_nothings = 0
while len(self.labelList) % 8 > 0:
extra_nothings += 1
self.labelListIndices['nothing_{}'.format(extra_nothings)] = len(self.labelList)
self.labelList.append('nothing_{}'.format(extra_nothings))
self.paddingLength = batch_size - (len(self.filenameByIndex) % batch_size)
print('padding length {}'.format(self.paddingLength))
def __len__(self):
return len(self.filenameByIndex) + self.paddingLength
def __getitem__(self, idx):
if idx >= len(self.filenameByIndex):
# NULL sample
return torch.zeros(1, 64, 64, dtype=torch.float), self.labelListIndices['nothing']
filename = self.filenameByIndex[idx]
byte_offset = self.fileByteOffsetByIndex[idx]
file = open(self.dataDir + '/' + filename, 'r')
file.seek(byte_offset)
line = file.readline()
file.close()
# Convert line containing brush stroke coordinate list to a 256x256 image tensor using PIL
entry = json.loads(line)
drawing = entry.get('drawing')
im = Image.new("L", (256, 256))
draw = ImageDraw.Draw(im)
for stroke in drawing:
x_coords = stroke[0]
y_coords = stroke[1]
for i in range(len(x_coords) - 1):
draw.line((x_coords[i], y_coords[i], x_coords[i + 1], y_coords[i + 1]), fill=255, width=5)
im = im.resize((64, 64), Image.ANTIALIAS)
word = entry.get('word')
imageTensor = torch.tensor(np.array(im) / 256, dtype=torch.float)
# Alter image slightly to look like the inputs we're eventually going to get from the client.
# This is a limitation imposed by JavaScript which implements "antialiasing" on downsized canvases by
# nearest-neighbor downsampling, smoothed onscreen by a WebGL filter that looks nice but doesn't alter the image data,
# so we only get two-color jagged images.
#
# Tedious workarounds are possible: https://stackoverflow.com/questions/2303690/resizing-an-image-in-an-html5-canvas
THRESHOLD = 0.1
imageTensor[imageTensor >= THRESHOLD] = 1.0
imageTensor[imageTensor < THRESHOLD] = 0.0
imageTensor = imageTensor.unsqueeze(0)
return imageTensor, self.labelListIndices.get(word)
# Takes input of size Nx1x64x64, a batch of N black and white 64x64 images.
# Applies two convolutional layers and three fully connected layers.
class CNNModel(nn.Module):
# input_size is 64 (input samples are 64x64 images); num_classes is 344
def __init__(self, input_size, num_classes):
super(CNNModel, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=5, stride=1, padding=2, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2))
dimension = int(64 * pow(input_size / 4, 2))
self.fc1 = nn.Sequential(nn.Linear(dimension, int(dimension / 4)), nn.Dropout(0.25))
self.fc2 = nn.Sequential(nn.Linear(int(dimension / 4), int(dimension / 8)), nn.Dropout(0.25))
self.fc3 = nn.Sequential(nn.Linear(int(dimension / 8), num_classes))
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.fc2(out)
out = self.fc3(out)
return out
# Main part
if __name__ == '__main__':
if os.path.isfile(INDEX_CACHE_FILE):
print("Loading {}".format(INDEX_CACHE_FILE))
infile = open(INDEX_CACHE_FILE, 'rb')
dataSet = pickle.load(infile)
infile.close()
else:
dataSet = QuickDrawDataset(DATA_DIRECTORY, BATCH_SIZE)
outfile = open(INDEX_CACHE_FILE, 'wb')
pickle.dump(dataSet, outfile)
outfile.close()
print("Saved {}".format(INDEX_CACHE_FILE))
if (os.path.isfile(LABELS_FILE) == False):
with open(LABELS_FILE, 'w') as f:
for label in dataSet.labelList:
f.write("%s\n" % label)
f.close()
print("Saved {}".format(LABELS_FILE))
print('Total number of labels: {}'.format(len(dataSet.labelList)))
print('Total number of samples: {}'.format(len(dataSet)))
randomSampler = torch.utils.data.RandomSampler(dataSet)
dataLoader = torch.utils.data.DataLoader(dataSet, batch_size = BATCH_SIZE, sampler = randomSampler, num_workers=4, pin_memory=True)
model = CNNModel(input_size=64, num_classes=len(dataSet.labelList)).to(DEVICE)
if (os.path.isfile(STATE_DICT_FILE)):
# We found an existing doodles.pth file! Instead of starting from scratch we'll load this one.
# and continue training it.
print("Loading {}".format(STATE_DICT_FILE))
state_dict = torch.load(STATE_DICT_FILE)
model.load_state_dict(state_dict)
optimizer = None
if (OPTIMIZER_NAME == 'SGD'):
optimizer = optim.SGD(model.parameters(), lr = SGD_LEARNING_RATE, momentum=SGD_MOMENTUM)
print('Using SGD with learning rate {} and momentum {}'.format(SGD_LEARNING_RATE, SGD_MOMENTUM))
elif (OPTIMIZER_NAME == 'Adam'):
if MIXED_PRECISION:
optimizer = optim.Adam(model.parameters(), lr = ADAM_LEARNING_RATE, betas = ADAM_BETAS, eps = ADAM_EPSILON)
else:
optimizer = optim.Adam(model.parameters(), lr = ADAM_LEARNING_RATE)
print('Using Adam with learning rate {}'.format(ADAM_LEARNING_RATE))
else:
print('No optimizer specified!')
if MIXED_PRECISION:
# Using NVidia's AMP Pytorch extension
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
criterion = nn.CrossEntropyLoss()
ROLLING_AVERAGE_RUN_LENGTH = 100
rolling = np.zeros(0)
record_rolling_average = 0
count = 0
# On my computer each epoch takes about 4 hours; the script consumes ~250 watts or about 1 kWh per epoch.
# Performance reaches a plateau after 3-4 epochs.
for epoch in range(4):
print('Epoch: {}'.format(epoch))
batch_number = 0
for i, (images, labels) in enumerate(dataLoader):
count = count + 1
images = images.to(DEVICE)
labels = labels.to(DEVICE)
optimizer.zero_grad()
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == labels).sum().item()
if (count < ROLLING_AVERAGE_RUN_LENGTH):
rolling = np.insert(rolling, 0, correct)
else:
rolling = np.roll(rolling, 1)
rolling[0] = correct
rolling_average = int(np.mean(rolling))
loss = criterion(outputs, labels)
if MIXED_PRECISION:
# Use of FP16 requires loss scaling, due to underflow error.
# See https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
print('EPOCH: {} BATCH: {} SIZE: {} CORRECT: {} (ROLLING AVG: {})'.format(epoch, batch_number, BATCH_SIZE, correct, rolling_average))
batch_number += 1
# print(loss.item())
# To be safe, save model whenever performance reaches a new high
if (count < 2 * ROLLING_AVERAGE_RUN_LENGTH): # (once rolling average has had time to stabilize)
record_rolling_average = max(rolling_average, record_rolling_average)
else:
if (rolling_average > record_rolling_average):
# Save model with a munged filename; e.g. doodles_706.pth
if (SAVE_BACKUP_FILES):
backupPth = NUMBERED_STATE_DICT_FILE_TEMPLATE.format(rolling_average, BATCH_SIZE)
torch.save(model.state_dict(), backupPth)
print('Saved model file {}'.format(backupPth))
# Delete the last backup .pth file we wrote to avoid filling up the drive
if (record_rolling_average > 0):
old_file = NUMBERED_STATE_DICT_FILE_TEMPLATE.format(record_rolling_average, BATCH_SIZE)
if os.path.exists(old_file):
os.remove(old_file)
# Same for ONNX
backupOnnx = NUMBERED_ONNX_FILE_TEMPLATE.format(rolling_average, BATCH_SIZE)
if MIXED_PRECISION:
with amp.disable_casts():
dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)
torch.onnx.export(model, dummy_input, backupOnnx, verbose=False)
else:
dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)
torch.onnx.export(model, dummy_input, backupOnnx, verbose=False)
print('Saved ONNX file {}'.format(backupOnnx))
# Delete the last backup ONNX file we wrote to avoid filling up the drive
if (record_rolling_average > 0):
old_file = NUMBERED_ONNX_FILE_TEMPLATE.format(record_rolling_average, BATCH_SIZE)
if os.path.exists(old_file):
os.remove(old_file)
record_rolling_average = rolling_average
# Deleting the model file during training triggers a fresh rewrite:
if (os.path.isfile(STATE_DICT_FILE) == False):
torch.save(model.state_dict(), STATE_DICT_FILE)
print('Saved model file {}'.format(STATE_DICT_FILE))
# ONNX: same policy
if (os.path.isfile(ONNX_FILE) == False):
if MIXED_PRECISION:
with amp.disable_casts():
dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)
torch.onnx.export(model, dummy_input, ONNX_FILE, verbose=False)
else:
dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)
torch.onnx.export(model, dummy_input, ONNX_FILE, verbose=False)
print('Exported ONNX file {}'.format(ONNX_FILE))
# Epoch finished
# Save the current model at the end of an epoch
torch.save(model.state_dict(), STATE_DICT_FILE)
# Export ONNX with loudmouth flag set
if (MIXED_PRECISION):
with amp.disable_casts():
dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)
torch.onnx.export(model, dummy_input, ONNX_FILE, verbose=True)
else:
dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)
torch.onnx.export(model, dummy_input, ONNX_FILE, verbose=True)
print('EPOCH {} FINISHED, SAVED {} AND {}'.format(epoch, STATE_DICT_FILE, ONNX_FILE))
| 47.85422 | 149 | 0.644754 | import sys
import os
from os.path import expanduser
import pickle
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import torch.onnx
import re
import json
from PIL import Image, ImageDraw
import torch
import numpy as np
#
# This way, if you need to edit a hyperparameter or go to work, you can pause execution by
# deleting the current doodles.pth and doodles.onnx files, letting it write new ones,
# and then hitting Ctrl-C. Typically you will want to adjust the learning rate downward
# or experiment with a different optimizer after the script has run for a few hours and
# its performance has reached a plateau. After you make your edits the script will pick up
# where it left off.
#
# If SAVE_BACKUP_FILES is set to True, the script will save backups as training progresses.
# Each time performance reaches a new record, a file will be saved with a filename indicating the
# new record number of correct responses. This is to avoid losing progress if the script crashes.
# (Raising the batch size too high can cause spurious out-of-memory errors at random times.)
# Specify data folder as command line argument; default is ~/data/quickdraw
DATA_DIRECTORY = '~/data/quickdraw'
if len(sys.argv) > 1:
DATA_DIRECTORY = sys.argv[1]
if DATA_DIRECTORY[0] == '~':
DATA_DIRECTORY = expanduser(DATA_DIRECTORY)
# Standard industry practice: Jack this number up as high as you can, then carefully lower it
# until the script stops crashing. Final value is dependent on GPU memory.
# This is a safe batch size to use on an RTX 2060 with 6 GB.
BATCH_SIZE = 1000
# Hyperparameters; both SGD and Adam work well, at least in the beginning; use SGD by default
OPTIMIZER_NAME = 'SGD'
SGD_LEARNING_RATE = 0.01
SGD_MOMENTUM = 0
ADAM_LEARNING_RATE = 0.001
ADAM_BETAS = (0.9, 0.99)
ADAM_EPSILON = 0.0001
INDEX_CACHE_FILE = './index_cache.pkl'
LABELS_FILE = './labels.txt'
STATE_DICT_FILE = './doodles.pth'
ONNX_FILE = './doodles.onnx'
SAVE_BACKUP_FILES = True
NUMBERED_STATE_DICT_FILE_TEMPLATE = './doodles_{}_of_{}.pth'
NUMBERED_ONNX_FILE_TEMPLATE = './doodles_{}_of_{}.onnx'
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# If it's installed, turn this on to enable NVidia's Apex AMP Pytorch extension.
# This will let us do calculations in FP16 on the GPU which will save memory on the card
# and let us raise the batch size. It will also leverage RTX tensor cores on RTX cards.
# Default is set to False, because compiling and installing AMP is an involved process-
# NVidia's CUDA Toolkit to be installed on your system before you can compile it using pip.
MIXED_PRECISION = False
if MIXED_PRECISION and torch.cuda.is_available():
try:
from apex import amp, optimizers
MIXED_PRECISION = True
BATCH_SIZE = int(BATCH_SIZE * 1.6) # Raising it by 60%
print('Using mixed precision.')
except ImportError:
MIXED_PRECISION = False
# This is a torch DataSet implementation that makes the following assumptions:
#
# 1. Data consists of a set of text files with ".ndjson" extensions in the specified directory.
# 2. Each line in the .ndjson file is a JSON string with all data for a single sample.
# 3. Each line of JSON has the following format (omitting extraneous fields):
# {"word":"elephant","drawing":[[[0, 1, 10],[25, 103, 163]],[[4,15,134,234,250],[27,22,6,4,0]]]}
# Array "drawing" has the brush strokes, each stroke a pair of arrays with x and y coordinates on a 256x256 grid.
# 4. We can build our label list by only looking at the first line of each file. (All lines have same value for "word".)
class QuickDrawDataset(torch.utils.data.Dataset):
# Take the batch size, so we know how much to pad with all-zero samples mapping to the "blank" channel.
# This way we ensure we deliver full-sized batches interspersed with a few blank samples mapping to label "nothing".
def __init__(self, dataDir, batch_size):
super(QuickDrawDataset, self).__init__()
print('Data folder: ' + dataDir)
self.dataDir = dataDir
self.filenames = list(filter(lambda x: x.endswith(".ndjson"), sorted(os.listdir(dataDir)))) #[1:20]
self.filenameByIndex = []
self.fileByteOffsetByIndex = []
self.labelListIndices = {}
self.labelList = []
for filename in self.filenames:
print('Indexing ' + filename)
file = open(dataDir + "/" + filename, "r")
byte_offset = 0
word = None
for line in file:
if (word == None):
words = re.findall('\"word\":\"([\w\s-]+)\"', line)
word = words[0]
self.labelListIndices[word] = len(self.labelList)
self.labelList.append(word)
# Only use the ones Google recognizes
if (len(re.findall('\"recognized\":true', line)) > 0):
self.filenameByIndex.append(filename)
self.fileByteOffsetByIndex.append(byte_offset)
byte_offset += len(line)
file.close()
self.labelListIndices['nothing'] = len(self.labelList)
self.labelList.append('nothing')
if MIXED_PRECISION:
# NVidia really wants tensor dimensions to be multiples of 8, make sure here
extra_nothings = 0
while len(self.labelList) % 8 > 0:
extra_nothings += 1
self.labelListIndices['nothing_{}'.format(extra_nothings)] = len(self.labelList)
self.labelList.append('nothing_{}'.format(extra_nothings))
self.paddingLength = batch_size - (len(self.filenameByIndex) % batch_size)
print('padding length {}'.format(self.paddingLength))
def __len__(self):
return len(self.filenameByIndex) + self.paddingLength
def __getitem__(self, idx):
if idx >= len(self.filenameByIndex):
# NULL sample
return torch.zeros(1, 64, 64, dtype=torch.float), self.labelListIndices['nothing']
filename = self.filenameByIndex[idx]
byte_offset = self.fileByteOffsetByIndex[idx]
file = open(self.dataDir + '/' + filename, 'r')
file.seek(byte_offset)
line = file.readline()
file.close()
# Convert line containing brush stroke coordinate list to a 256x256 image tensor using PIL
entry = json.loads(line)
drawing = entry.get('drawing')
im = Image.new("L", (256, 256))
draw = ImageDraw.Draw(im)
for stroke in drawing:
x_coords = stroke[0]
y_coords = stroke[1]
for i in range(len(x_coords) - 1):
draw.line((x_coords[i], y_coords[i], x_coords[i + 1], y_coords[i + 1]), fill=255, width=5)
im = im.resize((64, 64), Image.ANTIALIAS)
word = entry.get('word')
imageTensor = torch.tensor(np.array(im) / 256, dtype=torch.float)
# Alter image slightly to look like the inputs we're eventually going to get from the client.
# so we only get two-color jagged images.
#
# Tedious workarounds are possible: https://stackoverflow.com/questions/2303690/resizing-an-image-in-an-html5-canvas
THRESHOLD = 0.1
imageTensor[imageTensor >= THRESHOLD] = 1.0
imageTensor[imageTensor < THRESHOLD] = 0.0
imageTensor = imageTensor.unsqueeze(0)
return imageTensor, self.labelListIndices.get(word)
# Takes input of size Nx1x64x64, a batch of N black and white 64x64 images.
# Applies two convolutional layers and three fully connected layers.
class CNNModel(nn.Module):
# input_size is 64 (input samples are 64x64 images); num_classes is 344
def __init__(self, input_size, num_classes):
super(CNNModel, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=5, stride=1, padding=2, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2))
dimension = int(64 * pow(input_size / 4, 2))
self.fc1 = nn.Sequential(nn.Linear(dimension, int(dimension / 4)), nn.Dropout(0.25))
self.fc2 = nn.Sequential(nn.Linear(int(dimension / 4), int(dimension / 8)), nn.Dropout(0.25))
self.fc3 = nn.Sequential(nn.Linear(int(dimension / 8), num_classes))
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.fc2(out)
out = self.fc3(out)
return out
# Main part
if __name__ == '__main__':
if os.path.isfile(INDEX_CACHE_FILE):
print("Loading {}".format(INDEX_CACHE_FILE))
infile = open(INDEX_CACHE_FILE, 'rb')
dataSet = pickle.load(infile)
infile.close()
else:
dataSet = QuickDrawDataset(DATA_DIRECTORY, BATCH_SIZE)
outfile = open(INDEX_CACHE_FILE, 'wb')
pickle.dump(dataSet, outfile)
outfile.close()
print("Saved {}".format(INDEX_CACHE_FILE))
if (os.path.isfile(LABELS_FILE) == False):
with open(LABELS_FILE, 'w') as f:
for label in dataSet.labelList:
f.write("%s\n" % label)
f.close()
print("Saved {}".format(LABELS_FILE))
print('Total number of labels: {}'.format(len(dataSet.labelList)))
print('Total number of samples: {}'.format(len(dataSet)))
randomSampler = torch.utils.data.RandomSampler(dataSet)
dataLoader = torch.utils.data.DataLoader(dataSet, batch_size = BATCH_SIZE, sampler = randomSampler, num_workers=4, pin_memory=True)
model = CNNModel(input_size=64, num_classes=len(dataSet.labelList)).to(DEVICE)
if (os.path.isfile(STATE_DICT_FILE)):
# We found an existing doodles.pth file! Instead of starting from scratch we'll load this one.
print("Loading {}".format(STATE_DICT_FILE))
state_dict = torch.load(STATE_DICT_FILE)
model.load_state_dict(state_dict)
optimizer = None
if (OPTIMIZER_NAME == 'SGD'):
optimizer = optim.SGD(model.parameters(), lr = SGD_LEARNING_RATE, momentum=SGD_MOMENTUM)
print('Using SGD with learning rate {} and momentum {}'.format(SGD_LEARNING_RATE, SGD_MOMENTUM))
elif (OPTIMIZER_NAME == 'Adam'):
if MIXED_PRECISION:
optimizer = optim.Adam(model.parameters(), lr = ADAM_LEARNING_RATE, betas = ADAM_BETAS, eps = ADAM_EPSILON)
else:
optimizer = optim.Adam(model.parameters(), lr = ADAM_LEARNING_RATE)
print('Using Adam with learning rate {}'.format(ADAM_LEARNING_RATE))
else:
print('No optimizer specified!')
if MIXED_PRECISION:
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
criterion = nn.CrossEntropyLoss()
ROLLING_AVERAGE_RUN_LENGTH = 100
rolling = np.zeros(0)
record_rolling_average = 0
count = 0
# On my computer each epoch takes about 4 hours; the script consumes ~250 watts or about 1 kWh per epoch.
# Performance reaches a plateau after 3-4 epochs.
for epoch in range(4):
print('Epoch: {}'.format(epoch))
batch_number = 0
for i, (images, labels) in enumerate(dataLoader):
count = count + 1
images = images.to(DEVICE)
labels = labels.to(DEVICE)
optimizer.zero_grad()
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == labels).sum().item()
if (count < ROLLING_AVERAGE_RUN_LENGTH):
rolling = np.insert(rolling, 0, correct)
else:
rolling = np.roll(rolling, 1)
rolling[0] = correct
rolling_average = int(np.mean(rolling))
loss = criterion(outputs, labels)
if MIXED_PRECISION:
# Use of FP16 requires loss scaling, due to underflow error.
# See https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
print('EPOCH: {} BATCH: {} SIZE: {} CORRECT: {} (ROLLING AVG: {})'.format(epoch, batch_number, BATCH_SIZE, correct, rolling_average))
batch_number += 1
# print(loss.item())
# To be safe, save model whenever performance reaches a new high
if (count < 2 * ROLLING_AVERAGE_RUN_LENGTH): # (once rolling average has had time to stabilize)
record_rolling_average = max(rolling_average, record_rolling_average)
else:
if (rolling_average > record_rolling_average):
# Save model with a munged filename; e.g. doodles_706.pth
if (SAVE_BACKUP_FILES):
backupPth = NUMBERED_STATE_DICT_FILE_TEMPLATE.format(rolling_average, BATCH_SIZE)
torch.save(model.state_dict(), backupPth)
print('Saved model file {}'.format(backupPth))
# Delete the last backup .pth file we wrote to avoid filling up the drive
if (record_rolling_average > 0):
old_file = NUMBERED_STATE_DICT_FILE_TEMPLATE.format(record_rolling_average, BATCH_SIZE)
if os.path.exists(old_file):
os.remove(old_file)
# Same for ONNX
backupOnnx = NUMBERED_ONNX_FILE_TEMPLATE.format(rolling_average, BATCH_SIZE)
if MIXED_PRECISION:
with amp.disable_casts():
dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)
torch.onnx.export(model, dummy_input, backupOnnx, verbose=False)
else:
dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)
torch.onnx.export(model, dummy_input, backupOnnx, verbose=False)
print('Saved ONNX file {}'.format(backupOnnx))
# Delete the last backup ONNX file we wrote to avoid filling up the drive
if (record_rolling_average > 0):
old_file = NUMBERED_ONNX_FILE_TEMPLATE.format(record_rolling_average, BATCH_SIZE)
if os.path.exists(old_file):
os.remove(old_file)
record_rolling_average = rolling_average
# Deleting the model file during training triggers a fresh rewrite:
if (os.path.isfile(STATE_DICT_FILE) == False):
torch.save(model.state_dict(), STATE_DICT_FILE)
print('Saved model file {}'.format(STATE_DICT_FILE))
# ONNX: same policy
if (os.path.isfile(ONNX_FILE) == False):
if MIXED_PRECISION:
with amp.disable_casts():
dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)
torch.onnx.export(model, dummy_input, ONNX_FILE, verbose=False)
else:
dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)
torch.onnx.export(model, dummy_input, ONNX_FILE, verbose=False)
print('Exported ONNX file {}'.format(ONNX_FILE))
# Epoch finished
# Save the current model at the end of an epoch
torch.save(model.state_dict(), STATE_DICT_FILE)
# Export ONNX with loudmouth flag set
if (MIXED_PRECISION):
with amp.disable_casts():
dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)
torch.onnx.export(model, dummy_input, ONNX_FILE, verbose=True)
else:
dummy_input = torch.randn(1, 1, 64, 64).to(DEVICE)
torch.onnx.export(model, dummy_input, ONNX_FILE, verbose=True)
print('EPOCH {} FINISHED, SAVED {} AND {}'.format(epoch, STATE_DICT_FILE, ONNX_FILE))
| true | true |
f72d7b59f21fe73a417f27ac686824236356ac7e | 884 | py | Python | image_styles/urls.py | fotorius/django-image-styles | 29680851faf413c14b8b3e78c651725ce1e9c071 | [
"BSD-2-Clause"
] | null | null | null | image_styles/urls.py | fotorius/django-image-styles | 29680851faf413c14b8b3e78c651725ce1e9c071 | [
"BSD-2-Clause"
] | null | null | null | image_styles/urls.py | fotorius/django-image-styles | 29680851faf413c14b8b3e78c651725ce1e9c071 | [
"BSD-2-Clause"
] | null | null | null | from django.urls import include, re_path,path
from .views import EffectUpdateView,EffectCreateView,EffectCreateInitView
from .views import StyleView,RenderImageView
from .views import ManageImageStylesView
app_name = 'image_styles'
urlpatterns = [
path('',ManageImageStylesView.as_view(),name='manage_image_styles'),
path('effect/init/<int:style_id>/',EffectCreateInitView.as_view(),name='effect_create_init'),
path('effect/<int:style_id>/<slug:effect_name>/',EffectCreateView.as_view(),name='effect_create'),
path('effect/<int:effect_id>/<slug:effect_name>/update/',EffectUpdateView.as_view(),name='effect_update'),
path('style/',StyleView.as_view(),name='style_create'),
path('style/<int:style_id>/',StyleView.as_view(),name='style_update'),
#re_path(r'^(?P<style_name>[\w_-]+)/(?P<path>[^\s/$.?#].*)',RenderImageView.as_view(),name='render_image'),
]
| 52 | 111 | 0.743213 | from django.urls import include, re_path,path
from .views import EffectUpdateView,EffectCreateView,EffectCreateInitView
from .views import StyleView,RenderImageView
from .views import ManageImageStylesView
app_name = 'image_styles'
urlpatterns = [
path('',ManageImageStylesView.as_view(),name='manage_image_styles'),
path('effect/init/<int:style_id>/',EffectCreateInitView.as_view(),name='effect_create_init'),
path('effect/<int:style_id>/<slug:effect_name>/',EffectCreateView.as_view(),name='effect_create'),
path('effect/<int:effect_id>/<slug:effect_name>/update/',EffectUpdateView.as_view(),name='effect_update'),
path('style/',StyleView.as_view(),name='style_create'),
path('style/<int:style_id>/',StyleView.as_view(),name='style_update'),
]
| true | true |
f72d7cd9ced0441cf171005a09ff27dc4702f2d8 | 650 | py | Python | videos/migrations/0002_auto_20180112_0409.py | mleger45/turnex | 2b805c3681fe6ce3ddad403270c09ac9900fbe7d | [
"MIT"
] | null | null | null | videos/migrations/0002_auto_20180112_0409.py | mleger45/turnex | 2b805c3681fe6ce3ddad403270c09ac9900fbe7d | [
"MIT"
] | 1 | 2021-04-12T05:14:28.000Z | 2021-04-12T05:14:28.000Z | videos/migrations/0002_auto_20180112_0409.py | mleger45/turnex | 2b805c3681fe6ce3ddad403270c09ac9900fbe7d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-01-12 04:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('videos', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='video',
name='title',
field=models.CharField(default='n/a', max_length=10),
preserve_default=False,
),
migrations.AlterField(
model_name='video',
name='resource',
field=models.FileField(upload_to='board_media'),
),
]
| 24.074074 | 65 | 0.583077 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('videos', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='video',
name='title',
field=models.CharField(default='n/a', max_length=10),
preserve_default=False,
),
migrations.AlterField(
model_name='video',
name='resource',
field=models.FileField(upload_to='board_media'),
),
]
| true | true |
f72d7e72a009707d55813c1c29a5c3ce6628c6cf | 41 | py | Python | sciwing/engine/__init__.py | sean-dingxu/sciwing | 75eca1ea43be165eab20cf8bd81bbc19cecda74c | [
"MIT"
] | 50 | 2019-09-13T10:32:29.000Z | 2022-02-14T16:52:53.000Z | sciwing/engine/__init__.py | sean-dingxu/sciwing | 75eca1ea43be165eab20cf8bd81bbc19cecda74c | [
"MIT"
] | 31 | 2019-09-03T11:06:03.000Z | 2021-08-20T14:57:09.000Z | sciwing/engine/__init__.py | sean-dingxu/sciwing | 75eca1ea43be165eab20cf8bd81bbc19cecda74c | [
"MIT"
] | 9 | 2019-09-16T03:25:15.000Z | 2021-05-11T10:28:25.000Z | from sciwing.engine.engine import Engine
| 20.5 | 40 | 0.853659 | from sciwing.engine.engine import Engine
| true | true |
f72d7e82dbcf325ad6a11fd6cfb982abaa5967b6 | 522 | py | Python | data_structures/array/array_file.py | Nobodylesszb/python_module | 37d2cdcf89a3ff02a9e560696a059cec9272bd1f | [
"MIT"
] | null | null | null | data_structures/array/array_file.py | Nobodylesszb/python_module | 37d2cdcf89a3ff02a9e560696a059cec9272bd1f | [
"MIT"
] | null | null | null | data_structures/array/array_file.py | Nobodylesszb/python_module | 37d2cdcf89a3ff02a9e560696a059cec9272bd1f | [
"MIT"
] | null | null | null | import array
import binascii
import tempfile
a = array.array('i', range(5))
print('A1:', a)
# Write the array of numbers to a temporary file
output = tempfile.NamedTemporaryFile()
a.tofile(output.file) # must pass an *actual* file
output.flush()
# Read the raw data
with open(output.name, 'rb') as input:
raw_data = input.read()
print('Raw Contents:', binascii.hexlify(raw_data))
# Read the data into an array
input.seek(0)
a2 = array.array('i')
a2.fromfile(input, len(a))
print('A2:', a2) | 23.727273 | 54 | 0.676245 | import array
import binascii
import tempfile
a = array.array('i', range(5))
print('A1:', a)
output = tempfile.NamedTemporaryFile()
a.tofile(output.file)
output.flush()
with open(output.name, 'rb') as input:
raw_data = input.read()
print('Raw Contents:', binascii.hexlify(raw_data))
input.seek(0)
a2 = array.array('i')
a2.fromfile(input, len(a))
print('A2:', a2) | true | true |
f72d80f16582b9bfbe602acdb4fb855e80acddf4 | 29,858 | py | Python | pylibs/schema.py | Leedehai/score | 1683368755cf7e1c11d1e924624a0d1f02c9cf52 | [
"MIT"
] | null | null | null | pylibs/schema.py | Leedehai/score | 1683368755cf7e1c11d1e924624a0d1f02c9cf52 | [
"MIT"
] | null | null | null | pylibs/schema.py | Leedehai/score | 1683368755cf7e1c11d1e924624a0d1f02c9cf52 | [
"MIT"
] | null | null | null | # https://github.com/keleshev/schema
# Copyright (c) 2012 Vladimir Keleshev, <vladimir@keleshev.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# yapf: disable
# Source code start: updated Aug 18, 2020
import re
try:
from contextlib import ExitStack
except ImportError:
from contextlib2 import ExitStack
__version__ = "0.7.3"
__all__ = [
"Schema",
"And",
"Or",
"Regex",
"Optional",
"Use",
"Forbidden",
"Const",
"Literal",
"SchemaError",
"SchemaWrongKeyError",
"SchemaMissingKeyError",
"SchemaForbiddenKeyError",
"SchemaUnexpectedTypeError",
"SchemaOnlyOneAllowedError",
]
class SchemaError(Exception):
"""Error during Schema validation."""
def __init__(self, autos, errors=None):
self.autos = autos if type(autos) is list else [autos]
self.errors = errors if type(errors) is list else [errors]
Exception.__init__(self, self.code)
@property
def code(self):
"""
Removes duplicates values in auto and error list.
parameters.
"""
def uniq(seq):
"""
Utility function that removes duplicate.
"""
seen = set()
seen_add = seen.add
# This way removes duplicates while preserving the order.
return [x for x in seq if x not in seen and not seen_add(x)]
data_set = uniq(i for i in self.autos if i is not None)
error_list = uniq(i for i in self.errors if i is not None)
if error_list:
return "\n".join(error_list)
return "\n".join(data_set)
class SchemaWrongKeyError(SchemaError):
"""Error Should be raised when an unexpected key is detected within the
data set being."""
pass
class SchemaMissingKeyError(SchemaError):
"""Error should be raised when a mandatory key is not found within the
data set being validated"""
pass
class SchemaOnlyOneAllowedError(SchemaError):
"""Error should be raised when an only_one Or key has multiple matching candidates"""
pass
class SchemaForbiddenKeyError(SchemaError):
"""Error should be raised when a forbidden key is found within the
data set being validated, and its value matches the value that was specified"""
pass
class SchemaUnexpectedTypeError(SchemaError):
"""Error should be raised when a type mismatch is detected within the
data set being validated."""
pass
class And(object):
"""
Utility function to combine validation directives in AND Boolean fashion.
"""
def __init__(self, *args, **kw):
self._args = args
if not set(kw).issubset({"error", "schema", "ignore_extra_keys"}):
diff = {"error", "schema", "ignore_extra_keys"}.difference(kw)
raise TypeError("Unknown keyword arguments %r" % list(diff))
self._error = kw.get("error")
self._ignore_extra_keys = kw.get("ignore_extra_keys", False)
# You can pass your inherited Schema class.
self._schema = kw.get("schema", Schema)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(repr(a) for a in self._args))
@property
def args(self):
"""The provided parameters"""
return self._args
def validate(self, data):
"""
Validate data using defined sub schema/expressions ensuring all
values are valid.
:param data: to be validated with sub defined schemas.
:return: returns validated data
"""
for s in [self._schema(s, error=self._error, ignore_extra_keys=self._ignore_extra_keys) for s in self._args]:
data = s.validate(data)
return data
class Or(And):
"""Utility function to combine validation directives in a OR Boolean
fashion."""
def __init__(self, *args, **kwargs):
self.only_one = kwargs.pop("only_one", False)
self.match_count = 0
super(Or, self).__init__(*args, **kwargs)
def reset(self):
failed = self.match_count > 1 and self.only_one
self.match_count = 0
if failed:
raise SchemaOnlyOneAllowedError(["There are multiple keys present " + "from the %r condition" % self])
def validate(self, data):
"""
Validate data using sub defined schema/expressions ensuring at least
one value is valid.
:param data: data to be validated by provided schema.
:return: return validated data if not validation
"""
autos, errors = [], []
for s in [self._schema(s, error=self._error, ignore_extra_keys=self._ignore_extra_keys) for s in self._args]:
try:
validation = s.validate(data)
self.match_count += 1
if self.match_count > 1 and self.only_one:
break
return validation
except SchemaError as _x:
autos += _x.autos
errors += _x.errors
raise SchemaError(
["%r did not validate %r" % (self, data)] + autos,
[self._error.format(data) if self._error else None] + errors,
)
class Regex(object):
"""
Enables schema.py to validate string using regular expressions.
"""
# Map all flags bits to a more readable description
NAMES = [
"re.ASCII",
"re.DEBUG",
"re.VERBOSE",
"re.UNICODE",
"re.DOTALL",
"re.MULTILINE",
"re.LOCALE",
"re.IGNORECASE",
"re.TEMPLATE",
]
def __init__(self, pattern_str, flags=0, error=None):
self._pattern_str = pattern_str
flags_list = [Regex.NAMES[i] for i, f in enumerate("{0:09b}".format(flags)) if f != "0"] # Name for each bit
if flags_list:
self._flags_names = ", flags=" + "|".join(flags_list)
else:
self._flags_names = ""
self._pattern = re.compile(pattern_str, flags=flags)
self._error = error
def __repr__(self):
return "%s(%r%s)" % (self.__class__.__name__, self._pattern_str, self._flags_names)
@property
def pattern_str(self):
"""The pattern for the represented regular expression"""
return self._pattern_str
def validate(self, data):
"""
Validated data using defined regex.
:param data: data to be validated
:return: return validated data.
"""
e = self._error
try:
if self._pattern.search(data):
return data
else:
raise SchemaError("%r does not match %r" % (self, data), e)
except TypeError:
raise SchemaError("%r is not string nor buffer" % data, e)
class Use(object):
"""
For more general use cases, you can use the Use class to transform
the data while it is being validate.
"""
def __init__(self, callable_, error=None):
if not callable(callable_):
raise TypeError("Expected a callable, not %r" % callable_)
self._callable = callable_
self._error = error
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._callable)
def validate(self, data):
try:
return self._callable(data)
except SchemaError as x:
raise SchemaError([None] + x.autos, [self._error.format(data) if self._error else None] + x.errors)
except BaseException as x:
f = _callable_str(self._callable)
raise SchemaError("%s(%r) raised %r" % (f, data, x), self._error.format(data) if self._error else None)
COMPARABLE, CALLABLE, VALIDATOR, TYPE, DICT, ITERABLE = range(6)
def _priority(s):
"""Return priority for a given object."""
if type(s) in (list, tuple, set, frozenset):
return ITERABLE
if type(s) is dict:
return DICT
if issubclass(type(s), type):
return TYPE
if isinstance(s, Literal):
return COMPARABLE
if hasattr(s, "validate"):
return VALIDATOR
if callable(s):
return CALLABLE
else:
return COMPARABLE
class Schema(object):
"""
Entry point of the library, use this class to instantiate validation
schema for the data that will be validated.
"""
def __init__(self, schema, error=None, ignore_extra_keys=False, name=None, description=None, as_reference=False):
self._schema = schema
self._error = error
self._ignore_extra_keys = ignore_extra_keys
self._name = name
self._description = description
# Ask json_schema to create a definition for this schema and use it as part of another
self.as_reference = as_reference
if as_reference and name is None:
raise ValueError("Schema used as reference should have a name")
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._schema)
@property
def schema(self):
return self._schema
@property
def description(self):
return self._description
@property
def name(self):
return self._name
@property
def ignore_extra_keys(self):
return self._ignore_extra_keys
@staticmethod
def _dict_key_priority(s):
"""Return priority for a given key object."""
if isinstance(s, Hook):
return _priority(s._schema) - 0.5
if isinstance(s, Optional):
return _priority(s._schema) + 0.5
return _priority(s)
@staticmethod
def _is_optional_type(s):
"""Return True if the given key is optional (does not have to be found)"""
return any(isinstance(s, optional_type) for optional_type in [Optional, Hook])
def is_valid(self, data):
"""Return whether the given data has passed all the validations
that were specified in the given schema.
"""
try:
self.validate(data)
except SchemaError:
return False
else:
return True
def _prepend_schema_name(self, message):
"""
If a custom schema name has been defined, prepends it to the error
message that gets raised when a schema error occurs.
"""
if self._name:
message = "{0!r} {1!s}".format(self._name, message)
return message
def validate(self, data):
Schema = self.__class__
s = self._schema
e = self._error.format(data) if self._error else None
i = self._ignore_extra_keys
if isinstance(s, Literal):
s = s.schema
flavor = _priority(s)
if flavor == ITERABLE:
data = Schema(type(s), error=e).validate(data)
o = Or(*s, error=e, schema=Schema, ignore_extra_keys=i)
return type(data)(o.validate(d) for d in data)
if flavor == DICT:
exitstack = ExitStack()
data = Schema(dict, error=e).validate(data)
new = type(data)() # new - is a dict of the validated values
coverage = set() # matched schema keys
# for each key and value find a schema entry matching them, if any
sorted_skeys = sorted(s, key=self._dict_key_priority)
for skey in sorted_skeys:
if hasattr(skey, "reset"):
exitstack.callback(skey.reset)
with exitstack:
# Evaluate dictionaries last
data_items = sorted(data.items(), key=lambda value: isinstance(value[1], dict))
for key, value in data_items:
for skey in sorted_skeys:
svalue = s[skey]
try:
nkey = Schema(skey, error=e).validate(key)
except SchemaError:
pass
else:
if isinstance(skey, Hook):
# As the content of the value makes little sense for
# keys with a hook, we reverse its meaning:
# we will only call the handler if the value does match
# In the case of the forbidden key hook,
# we will raise the SchemaErrorForbiddenKey exception
# on match, allowing for excluding a key only if its
# value has a certain type, and allowing Forbidden to
# work well in combination with Optional.
try:
nvalue = Schema(svalue, error=e).validate(value)
except SchemaError:
continue
skey.handler(nkey, data, e)
else:
try:
nvalue = Schema(svalue, error=e, ignore_extra_keys=i).validate(value)
except SchemaError as x:
k = "Key '%s' error:" % nkey
message = self._prepend_schema_name(k)
raise SchemaError([message] + x.autos, [e] + x.errors)
else:
new[nkey] = nvalue
coverage.add(skey)
break
required = set(k for k in s if not self._is_optional_type(k))
if not required.issubset(coverage):
missing_keys = required - coverage
s_missing_keys = ", ".join(repr(k) for k in sorted(missing_keys, key=repr))
message = "Missing key%s: %s" % (_plural_s(missing_keys), s_missing_keys)
message = self._prepend_schema_name(message)
raise SchemaMissingKeyError(message, e)
if not self._ignore_extra_keys and (len(new) != len(data)):
wrong_keys = set(data.keys()) - set(new.keys())
s_wrong_keys = ", ".join(repr(k) for k in sorted(wrong_keys, key=repr))
message = "Wrong key%s %s in %r" % (_plural_s(wrong_keys), s_wrong_keys, data)
message = self._prepend_schema_name(message)
raise SchemaWrongKeyError(message, e)
# Apply default-having optionals that haven't been used:
defaults = set(k for k in s if type(k) is Optional and hasattr(k, "default")) - coverage
for default in defaults:
new[default.key] = default.default() if callable(default.default) else default.default
return new
if flavor == TYPE:
if isinstance(data, s) and not (isinstance(data, bool) and s == int):
return data
else:
message = "%r should be instance of %r" % (data, s.__name__)
message = self._prepend_schema_name(message)
raise SchemaUnexpectedTypeError(message, e)
if flavor == VALIDATOR:
try:
return s.validate(data)
except SchemaError as x:
raise SchemaError([None] + x.autos, [e] + x.errors)
except BaseException as x:
message = "%r.validate(%r) raised %r" % (s, data, x)
message = self._prepend_schema_name(message)
raise SchemaError(message, e)
if flavor == CALLABLE:
f = _callable_str(s)
try:
if s(data):
return data
except SchemaError as x:
raise SchemaError([None] + x.autos, [e] + x.errors)
except BaseException as x:
message = "%s(%r) raised %r" % (f, data, x)
message = self._prepend_schema_name(message)
raise SchemaError(message, e)
message = "%s(%r) should evaluate to True" % (f, data)
message = self._prepend_schema_name(message)
raise SchemaError(message, e)
if s == data:
return data
else:
message = "%r does not match %r" % (s, data)
message = self._prepend_schema_name(message)
raise SchemaError(message, e)
def json_schema(self, schema_id, use_refs=False):
"""Generate a draft-07 JSON schema dict representing the Schema.
This method can only be called when the Schema's value is a dict.
This method must be called with a schema_id.
:param schema_id: The value of the $id on the main schema
:param use_refs: Enable reusing object references in the resulting JSON schema.
Schemas with references are harder to read by humans, but are a lot smaller when there
is a lot of reuse
"""
seen = dict() # For use_refs
definitions_by_name = {}
def _json_schema(schema, is_main_schema=True, description=None, allow_reference=True):
Schema = self.__class__
def _create_or_use_ref(return_dict):
"""If not already seen, return the provided part of the schema unchanged.
If already seen, give an id to the already seen dict and return a reference to the previous part
of the schema instead.
"""
if not use_refs or is_main_schema:
return return_schema
hashed = hash(repr(sorted(return_dict.items())))
if hashed not in seen:
seen[hashed] = return_dict
return return_dict
else:
id_str = "#" + str(hashed)
seen[hashed]["$id"] = id_str
return {"$ref": id_str}
def _get_type_name(python_type):
"""Return the JSON schema name for a Python type"""
if python_type == str:
return "string"
elif python_type == int:
return "integer"
elif python_type == float:
return "number"
elif python_type == bool:
return "boolean"
elif python_type == list:
return "array"
elif python_type == dict:
return "object"
return "string"
def _to_json_type(value):
"""Attempt to convert a constant value (for "const" and "default") to a JSON serializable value"""
if value is None or type(value) in (str, int, float, bool, list, dict):
return value
if type(value) in (tuple, set, frozenset):
return list(value)
if isinstance(value, Literal):
return value.schema
return str(value)
def _to_schema(s, ignore_extra_keys):
if not isinstance(s, Schema):
return Schema(s, ignore_extra_keys=ignore_extra_keys)
return s
s = schema.schema
i = schema.ignore_extra_keys
flavor = _priority(s)
return_schema = {}
is_a_ref = allow_reference and schema.as_reference
return_description = description or schema.description
if return_description:
return_schema["description"] = return_description
if flavor == TYPE:
# Handle type
return_schema["type"] = _get_type_name(s)
elif flavor == ITERABLE:
# Handle arrays or dict schema
return_schema["type"] = "array"
if len(s) == 1:
return_schema["items"] = _json_schema(_to_schema(s[0], i), is_main_schema=False)
elif len(s) > 1:
return_schema["items"] = _json_schema(Schema(Or(*s)), is_main_schema=False)
elif isinstance(s, Or):
# Handle Or values
# Check if we can use an enum
if all(priority == COMPARABLE for priority in [_priority(value) for value in s.args]):
or_values = [str(s) if isinstance(s, Literal) else s for s in s.args]
# All values are simple, can use enum or const
if len(or_values) == 1:
return_schema["const"] = _to_json_type(or_values[0])
return return_schema
return_schema["enum"] = or_values
else:
# No enum, let's go with recursive calls
any_of_values = []
for or_key in s.args:
new_value = _json_schema(_to_schema(or_key, i), is_main_schema=False)
if new_value != {} and new_value not in any_of_values:
any_of_values.append(new_value)
if len(any_of_values) == 1:
# Only one representable condition remains, do not put under oneOf
return_schema.update(any_of_values[0])
else:
return_schema["anyOf"] = any_of_values
elif isinstance(s, And):
# Handle And values
all_of_values = []
for and_key in s.args:
new_value = _json_schema(_to_schema(and_key, i), is_main_schema=False)
if new_value != {} and new_value not in all_of_values:
all_of_values.append(new_value)
if len(all_of_values) == 1:
# Only one representable condition remains, do not put under allOf
return_schema.update(all_of_values[0])
else:
return_schema["allOf"] = all_of_values
elif flavor == COMPARABLE:
return_schema["const"] = _to_json_type(s)
elif flavor == VALIDATOR and type(s) == Regex:
return_schema["type"] = "string"
return_schema["pattern"] = s.pattern_str
else:
if flavor != DICT:
# If not handled, do not check
return return_schema
# Schema is a dict
# Check if we have to create a common definition and use as reference
if is_a_ref:
# Generate sub schema if not already done
if schema.name not in definitions_by_name:
definitions_by_name[schema.name] = {} # Avoid infinite loop
definitions_by_name[schema.name] = _json_schema(
schema, is_main_schema=False, allow_reference=False
)
return_schema["$ref"] = "#/definitions/" + schema.name
else:
required_keys = []
expanded_schema = {}
for key in s:
if isinstance(key, Hook):
continue
def _get_key_description(key):
"""Get the description associated to a key (as specified in a Literal object). Return None if not a Literal"""
if isinstance(key, Optional):
return _get_key_description(key.schema)
if isinstance(key, Literal):
return key.description
return None
def _get_key_name(key):
"""Get the name of a key (as specified in a Literal object). Return the key unchanged if not a Literal"""
if isinstance(key, Optional):
return _get_key_name(key.schema)
if isinstance(key, Literal):
return key.schema
return key
sub_schema = _to_schema(s[key], ignore_extra_keys=i)
key_name = _get_key_name(key)
if isinstance(key_name, str):
if not isinstance(key, Optional):
required_keys.append(key_name)
expanded_schema[key_name] = _json_schema(
sub_schema, is_main_schema=False, description=_get_key_description(key)
)
if isinstance(key, Optional) and hasattr(key, "default"):
expanded_schema[key_name]["default"] = _to_json_type(key.default)
elif isinstance(key_name, Or):
# JSON schema does not support having a key named one name or another, so we just add both options
# This is less strict because we cannot enforce that one or the other is required
for or_key in key_name.args:
expanded_schema[_get_key_name(or_key)] = _json_schema(
sub_schema, is_main_schema=False, description=_get_key_description(or_key)
)
return_schema.update(
{
"type": "object",
"properties": expanded_schema,
"required": required_keys,
"additionalProperties": i,
}
)
if is_main_schema:
return_schema.update({"$id": schema_id, "$schema": "http://json-schema.org/draft-07/schema#"})
if self._name:
return_schema["title"] = self._name
if definitions_by_name:
return_schema["definitions"] = {}
for definition_name, definition in definitions_by_name.items():
return_schema["definitions"][definition_name] = definition
return _create_or_use_ref(return_schema)
return _json_schema(self, True)
class Optional(Schema):
"""Marker for an optional part of the validation Schema."""
_MARKER = object()
def __init__(self, *args, **kwargs):
default = kwargs.pop("default", self._MARKER)
super(Optional, self).__init__(*args, **kwargs)
if default is not self._MARKER:
# See if I can come up with a static key to use for myself:
if _priority(self._schema) != COMPARABLE:
raise TypeError(
"Optional keys with defaults must have simple, "
"predictable values, like literal strings or ints. "
'"%r" is too complex.' % (self._schema,)
)
self.default = default
self.key = str(self._schema)
def __hash__(self):
return hash(self._schema)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and getattr(self, "default", self._MARKER) == getattr(other, "default", self._MARKER)
and self._schema == other._schema
)
def reset(self):
if hasattr(self._schema, "reset"):
self._schema.reset()
class Hook(Schema):
def __init__(self, *args, **kwargs):
self.handler = kwargs.pop("handler", lambda *args: None)
super(Hook, self).__init__(*args, **kwargs)
self.key = self._schema
class Forbidden(Hook):
def __init__(self, *args, **kwargs):
kwargs["handler"] = self._default_function
super(Forbidden, self).__init__(*args, **kwargs)
@staticmethod
def _default_function(nkey, data, error):
raise SchemaForbiddenKeyError("Forbidden key encountered: %r in %r" % (nkey, data), error)
class Literal(object):
def __init__(self, value, description=None):
self._schema = value
self._description = description
def __str__(self):
return self._schema
def __repr__(self):
return 'Literal("' + self.schema + '", description="' + (self.description or "") + '")'
@property
def description(self):
return self._description
@property
def schema(self):
return self._schema
class Const(Schema):
def validate(self, data):
super(Const, self).validate(data)
return data
def _callable_str(callable_):
if hasattr(callable_, "__name__"):
return callable_.__name__
return str(callable_)
def _plural_s(sized):
return "s" if len(sized) > 1 else ""
| 38.132822 | 138 | 0.554357 |
import re
try:
from contextlib import ExitStack
except ImportError:
from contextlib2 import ExitStack
__version__ = "0.7.3"
__all__ = [
"Schema",
"And",
"Or",
"Regex",
"Optional",
"Use",
"Forbidden",
"Const",
"Literal",
"SchemaError",
"SchemaWrongKeyError",
"SchemaMissingKeyError",
"SchemaForbiddenKeyError",
"SchemaUnexpectedTypeError",
"SchemaOnlyOneAllowedError",
]
class SchemaError(Exception):
def __init__(self, autos, errors=None):
self.autos = autos if type(autos) is list else [autos]
self.errors = errors if type(errors) is list else [errors]
Exception.__init__(self, self.code)
@property
def code(self):
def uniq(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
data_set = uniq(i for i in self.autos if i is not None)
error_list = uniq(i for i in self.errors if i is not None)
if error_list:
return "\n".join(error_list)
return "\n".join(data_set)
class SchemaWrongKeyError(SchemaError):
pass
class SchemaMissingKeyError(SchemaError):
pass
class SchemaOnlyOneAllowedError(SchemaError):
pass
class SchemaForbiddenKeyError(SchemaError):
pass
class SchemaUnexpectedTypeError(SchemaError):
pass
class And(object):
def __init__(self, *args, **kw):
self._args = args
if not set(kw).issubset({"error", "schema", "ignore_extra_keys"}):
diff = {"error", "schema", "ignore_extra_keys"}.difference(kw)
raise TypeError("Unknown keyword arguments %r" % list(diff))
self._error = kw.get("error")
self._ignore_extra_keys = kw.get("ignore_extra_keys", False)
self._schema = kw.get("schema", Schema)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(repr(a) for a in self._args))
@property
def args(self):
return self._args
def validate(self, data):
for s in [self._schema(s, error=self._error, ignore_extra_keys=self._ignore_extra_keys) for s in self._args]:
data = s.validate(data)
return data
class Or(And):
def __init__(self, *args, **kwargs):
self.only_one = kwargs.pop("only_one", False)
self.match_count = 0
super(Or, self).__init__(*args, **kwargs)
def reset(self):
failed = self.match_count > 1 and self.only_one
self.match_count = 0
if failed:
raise SchemaOnlyOneAllowedError(["There are multiple keys present " + "from the %r condition" % self])
def validate(self, data):
autos, errors = [], []
for s in [self._schema(s, error=self._error, ignore_extra_keys=self._ignore_extra_keys) for s in self._args]:
try:
validation = s.validate(data)
self.match_count += 1
if self.match_count > 1 and self.only_one:
break
return validation
except SchemaError as _x:
autos += _x.autos
errors += _x.errors
raise SchemaError(
["%r did not validate %r" % (self, data)] + autos,
[self._error.format(data) if self._error else None] + errors,
)
class Regex(object):
NAMES = [
"re.ASCII",
"re.DEBUG",
"re.VERBOSE",
"re.UNICODE",
"re.DOTALL",
"re.MULTILINE",
"re.LOCALE",
"re.IGNORECASE",
"re.TEMPLATE",
]
def __init__(self, pattern_str, flags=0, error=None):
self._pattern_str = pattern_str
flags_list = [Regex.NAMES[i] for i, f in enumerate("{0:09b}".format(flags)) if f != "0"]
if flags_list:
self._flags_names = ", flags=" + "|".join(flags_list)
else:
self._flags_names = ""
self._pattern = re.compile(pattern_str, flags=flags)
self._error = error
def __repr__(self):
return "%s(%r%s)" % (self.__class__.__name__, self._pattern_str, self._flags_names)
@property
def pattern_str(self):
return self._pattern_str
def validate(self, data):
e = self._error
try:
if self._pattern.search(data):
return data
else:
raise SchemaError("%r does not match %r" % (self, data), e)
except TypeError:
raise SchemaError("%r is not string nor buffer" % data, e)
class Use(object):
def __init__(self, callable_, error=None):
if not callable(callable_):
raise TypeError("Expected a callable, not %r" % callable_)
self._callable = callable_
self._error = error
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._callable)
def validate(self, data):
try:
return self._callable(data)
except SchemaError as x:
raise SchemaError([None] + x.autos, [self._error.format(data) if self._error else None] + x.errors)
except BaseException as x:
f = _callable_str(self._callable)
raise SchemaError("%s(%r) raised %r" % (f, data, x), self._error.format(data) if self._error else None)
COMPARABLE, CALLABLE, VALIDATOR, TYPE, DICT, ITERABLE = range(6)
def _priority(s):
if type(s) in (list, tuple, set, frozenset):
return ITERABLE
if type(s) is dict:
return DICT
if issubclass(type(s), type):
return TYPE
if isinstance(s, Literal):
return COMPARABLE
if hasattr(s, "validate"):
return VALIDATOR
if callable(s):
return CALLABLE
else:
return COMPARABLE
class Schema(object):
def __init__(self, schema, error=None, ignore_extra_keys=False, name=None, description=None, as_reference=False):
self._schema = schema
self._error = error
self._ignore_extra_keys = ignore_extra_keys
self._name = name
self._description = description
self.as_reference = as_reference
if as_reference and name is None:
raise ValueError("Schema used as reference should have a name")
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._schema)
@property
def schema(self):
return self._schema
@property
def description(self):
return self._description
@property
def name(self):
return self._name
@property
def ignore_extra_keys(self):
return self._ignore_extra_keys
@staticmethod
def _dict_key_priority(s):
if isinstance(s, Hook):
return _priority(s._schema) - 0.5
if isinstance(s, Optional):
return _priority(s._schema) + 0.5
return _priority(s)
@staticmethod
def _is_optional_type(s):
return any(isinstance(s, optional_type) for optional_type in [Optional, Hook])
def is_valid(self, data):
try:
self.validate(data)
except SchemaError:
return False
else:
return True
def _prepend_schema_name(self, message):
if self._name:
message = "{0!r} {1!s}".format(self._name, message)
return message
def validate(self, data):
Schema = self.__class__
s = self._schema
e = self._error.format(data) if self._error else None
i = self._ignore_extra_keys
if isinstance(s, Literal):
s = s.schema
flavor = _priority(s)
if flavor == ITERABLE:
data = Schema(type(s), error=e).validate(data)
o = Or(*s, error=e, schema=Schema, ignore_extra_keys=i)
return type(data)(o.validate(d) for d in data)
if flavor == DICT:
exitstack = ExitStack()
data = Schema(dict, error=e).validate(data)
new = type(data)()
coverage = set()
sorted_skeys = sorted(s, key=self._dict_key_priority)
for skey in sorted_skeys:
if hasattr(skey, "reset"):
exitstack.callback(skey.reset)
with exitstack:
data_items = sorted(data.items(), key=lambda value: isinstance(value[1], dict))
for key, value in data_items:
for skey in sorted_skeys:
svalue = s[skey]
try:
nkey = Schema(skey, error=e).validate(key)
except SchemaError:
pass
else:
if isinstance(skey, Hook):
try:
nvalue = Schema(svalue, error=e).validate(value)
except SchemaError:
continue
skey.handler(nkey, data, e)
else:
try:
nvalue = Schema(svalue, error=e, ignore_extra_keys=i).validate(value)
except SchemaError as x:
k = "Key '%s' error:" % nkey
message = self._prepend_schema_name(k)
raise SchemaError([message] + x.autos, [e] + x.errors)
else:
new[nkey] = nvalue
coverage.add(skey)
break
required = set(k for k in s if not self._is_optional_type(k))
if not required.issubset(coverage):
missing_keys = required - coverage
s_missing_keys = ", ".join(repr(k) for k in sorted(missing_keys, key=repr))
message = "Missing key%s: %s" % (_plural_s(missing_keys), s_missing_keys)
message = self._prepend_schema_name(message)
raise SchemaMissingKeyError(message, e)
if not self._ignore_extra_keys and (len(new) != len(data)):
wrong_keys = set(data.keys()) - set(new.keys())
s_wrong_keys = ", ".join(repr(k) for k in sorted(wrong_keys, key=repr))
message = "Wrong key%s %s in %r" % (_plural_s(wrong_keys), s_wrong_keys, data)
message = self._prepend_schema_name(message)
raise SchemaWrongKeyError(message, e)
defaults = set(k for k in s if type(k) is Optional and hasattr(k, "default")) - coverage
for default in defaults:
new[default.key] = default.default() if callable(default.default) else default.default
return new
if flavor == TYPE:
if isinstance(data, s) and not (isinstance(data, bool) and s == int):
return data
else:
message = "%r should be instance of %r" % (data, s.__name__)
message = self._prepend_schema_name(message)
raise SchemaUnexpectedTypeError(message, e)
if flavor == VALIDATOR:
try:
return s.validate(data)
except SchemaError as x:
raise SchemaError([None] + x.autos, [e] + x.errors)
except BaseException as x:
message = "%r.validate(%r) raised %r" % (s, data, x)
message = self._prepend_schema_name(message)
raise SchemaError(message, e)
if flavor == CALLABLE:
f = _callable_str(s)
try:
if s(data):
return data
except SchemaError as x:
raise SchemaError([None] + x.autos, [e] + x.errors)
except BaseException as x:
message = "%s(%r) raised %r" % (f, data, x)
message = self._prepend_schema_name(message)
raise SchemaError(message, e)
message = "%s(%r) should evaluate to True" % (f, data)
message = self._prepend_schema_name(message)
raise SchemaError(message, e)
if s == data:
return data
else:
message = "%r does not match %r" % (s, data)
message = self._prepend_schema_name(message)
raise SchemaError(message, e)
def json_schema(self, schema_id, use_refs=False):
seen = dict() # For use_refs
definitions_by_name = {}
def _json_schema(schema, is_main_schema=True, description=None, allow_reference=True):
Schema = self.__class__
def _create_or_use_ref(return_dict):
if not use_refs or is_main_schema:
return return_schema
hashed = hash(repr(sorted(return_dict.items())))
if hashed not in seen:
seen[hashed] = return_dict
return return_dict
else:
id_str = "#" + str(hashed)
seen[hashed]["$id"] = id_str
return {"$ref": id_str}
def _get_type_name(python_type):
if python_type == str:
return "string"
elif python_type == int:
return "integer"
elif python_type == float:
return "number"
elif python_type == bool:
return "boolean"
elif python_type == list:
return "array"
elif python_type == dict:
return "object"
return "string"
def _to_json_type(value):
if value is None or type(value) in (str, int, float, bool, list, dict):
return value
if type(value) in (tuple, set, frozenset):
return list(value)
if isinstance(value, Literal):
return value.schema
return str(value)
def _to_schema(s, ignore_extra_keys):
if not isinstance(s, Schema):
return Schema(s, ignore_extra_keys=ignore_extra_keys)
return s
s = schema.schema
i = schema.ignore_extra_keys
flavor = _priority(s)
return_schema = {}
is_a_ref = allow_reference and schema.as_reference
return_description = description or schema.description
if return_description:
return_schema["description"] = return_description
if flavor == TYPE:
# Handle type
return_schema["type"] = _get_type_name(s)
elif flavor == ITERABLE:
# Handle arrays or dict schema
return_schema["type"] = "array"
if len(s) == 1:
return_schema["items"] = _json_schema(_to_schema(s[0], i), is_main_schema=False)
elif len(s) > 1:
return_schema["items"] = _json_schema(Schema(Or(*s)), is_main_schema=False)
elif isinstance(s, Or):
# Handle Or values
# Check if we can use an enum
if all(priority == COMPARABLE for priority in [_priority(value) for value in s.args]):
or_values = [str(s) if isinstance(s, Literal) else s for s in s.args]
# All values are simple, can use enum or const
if len(or_values) == 1:
return_schema["const"] = _to_json_type(or_values[0])
return return_schema
return_schema["enum"] = or_values
else:
# No enum, let's go with recursive calls
any_of_values = []
for or_key in s.args:
new_value = _json_schema(_to_schema(or_key, i), is_main_schema=False)
if new_value != {} and new_value not in any_of_values:
any_of_values.append(new_value)
if len(any_of_values) == 1:
return_schema.update(any_of_values[0])
else:
return_schema["anyOf"] = any_of_values
elif isinstance(s, And):
all_of_values = []
for and_key in s.args:
new_value = _json_schema(_to_schema(and_key, i), is_main_schema=False)
if new_value != {} and new_value not in all_of_values:
all_of_values.append(new_value)
if len(all_of_values) == 1:
return_schema.update(all_of_values[0])
else:
return_schema["allOf"] = all_of_values
elif flavor == COMPARABLE:
return_schema["const"] = _to_json_type(s)
elif flavor == VALIDATOR and type(s) == Regex:
return_schema["type"] = "string"
return_schema["pattern"] = s.pattern_str
else:
if flavor != DICT:
return return_schema
if is_a_ref:
if schema.name not in definitions_by_name:
definitions_by_name[schema.name] = {}
definitions_by_name[schema.name] = _json_schema(
schema, is_main_schema=False, allow_reference=False
)
return_schema["$ref"] = "#/definitions/" + schema.name
else:
required_keys = []
expanded_schema = {}
for key in s:
if isinstance(key, Hook):
continue
def _get_key_description(key):
"""Get the description associated to a key (as specified in a Literal object). Return None if not a Literal"""
if isinstance(key, Optional):
return _get_key_description(key.schema)
if isinstance(key, Literal):
return key.description
return None
def _get_key_name(key):
"""Get the name of a key (as specified in a Literal object). Return the key unchanged if not a Literal"""
if isinstance(key, Optional):
return _get_key_name(key.schema)
if isinstance(key, Literal):
return key.schema
return key
sub_schema = _to_schema(s[key], ignore_extra_keys=i)
key_name = _get_key_name(key)
if isinstance(key_name, str):
if not isinstance(key, Optional):
required_keys.append(key_name)
expanded_schema[key_name] = _json_schema(
sub_schema, is_main_schema=False, description=_get_key_description(key)
)
if isinstance(key, Optional) and hasattr(key, "default"):
expanded_schema[key_name]["default"] = _to_json_type(key.default)
elif isinstance(key_name, Or):
for or_key in key_name.args:
expanded_schema[_get_key_name(or_key)] = _json_schema(
sub_schema, is_main_schema=False, description=_get_key_description(or_key)
)
return_schema.update(
{
"type": "object",
"properties": expanded_schema,
"required": required_keys,
"additionalProperties": i,
}
)
if is_main_schema:
return_schema.update({"$id": schema_id, "$schema": "http://json-schema.org/draft-07/schema#"})
if self._name:
return_schema["title"] = self._name
if definitions_by_name:
return_schema["definitions"] = {}
for definition_name, definition in definitions_by_name.items():
return_schema["definitions"][definition_name] = definition
return _create_or_use_ref(return_schema)
return _json_schema(self, True)
class Optional(Schema):
_MARKER = object()
def __init__(self, *args, **kwargs):
default = kwargs.pop("default", self._MARKER)
super(Optional, self).__init__(*args, **kwargs)
if default is not self._MARKER:
if _priority(self._schema) != COMPARABLE:
raise TypeError(
"Optional keys with defaults must have simple, "
"predictable values, like literal strings or ints. "
'"%r" is too complex.' % (self._schema,)
)
self.default = default
self.key = str(self._schema)
def __hash__(self):
return hash(self._schema)
def __eq__(self, other):
return (
self.__class__ is other.__class__
and getattr(self, "default", self._MARKER) == getattr(other, "default", self._MARKER)
and self._schema == other._schema
)
def reset(self):
if hasattr(self._schema, "reset"):
self._schema.reset()
class Hook(Schema):
def __init__(self, *args, **kwargs):
self.handler = kwargs.pop("handler", lambda *args: None)
super(Hook, self).__init__(*args, **kwargs)
self.key = self._schema
class Forbidden(Hook):
def __init__(self, *args, **kwargs):
kwargs["handler"] = self._default_function
super(Forbidden, self).__init__(*args, **kwargs)
@staticmethod
def _default_function(nkey, data, error):
raise SchemaForbiddenKeyError("Forbidden key encountered: %r in %r" % (nkey, data), error)
class Literal(object):
def __init__(self, value, description=None):
self._schema = value
self._description = description
def __str__(self):
return self._schema
def __repr__(self):
return 'Literal("' + self.schema + '", description="' + (self.description or "") + '")'
@property
def description(self):
return self._description
@property
def schema(self):
return self._schema
class Const(Schema):
def validate(self, data):
super(Const, self).validate(data)
return data
def _callable_str(callable_):
if hasattr(callable_, "__name__"):
return callable_.__name__
return str(callable_)
def _plural_s(sized):
return "s" if len(sized) > 1 else ""
| true | true |
f72d80fec048e93c8380bd0fed8c96da03ae69b2 | 2,886 | py | Python | 2017/01.py | GillesArcas/Advent_of_Code | 1f57eb1686875df2684b0d56916b1d20724e9fb9 | [
"MIT"
] | null | null | null | 2017/01.py | GillesArcas/Advent_of_Code | 1f57eb1686875df2684b0d56916b1d20724e9fb9 | [
"MIT"
] | null | null | null | 2017/01.py | GillesArcas/Advent_of_Code | 1f57eb1686875df2684b0d56916b1d20724e9fb9 | [
"MIT"
] | null | null | null |
EXAMPLES1 = (
('1122', 3),
('1111', 4),
('1234', 0),
('91212129', 9)
)
EXAMPLES2 = (
('1212', 6),
('1221', 0),
('123425', 4),
('123123', 12),
('12131415', 4)
)
INPUT = '31813174349235972159811869755166343882958376474278437681632495222499211488649543755655138842553867246131245462881756862736922925752647341673342756514856663979496747158241792857625471323535183222497949751644488277317173496124473893452425118133645984488759128897146498831373795721661696492622276282881218371273973538163779782435211491196616375135472517935481964439956844536136823757764494967297251545389464472794474447941564778733926532741752757865243946976266426548341889873514383464142659425122786667399143335772174973128383869893325977319651839516694295534146668728822393452626321892357192574444856264721585365164945647254645264693957898373214897848424966266582991272496771159583715456714645585576641458358326521858518319315233857473695712238323787254556597566461188452279853766184333696344395818615215846348586541164194624371353556812548945447432787795489443312941687221314432694115847863129826532628228386894683392352799514942665396273726821936346663485499159141368443782475714679953213388375939519711591262489869326145476958378464652451441434846382474578535468433514121336844727988128998543975147649823215332929623574231738442281161294838499441799996857746549441142859199799125595761724782225452394593514388571187279266291364278184761833324476838939898258225748562345853633364314923186685534864178665214135631494876474186833392929124337161222959459117554238429216916532175247326391321525832362274683763488347654497889261543959591212539851835354335598844669618391876623638137926893582131945361264841733341247646125278489995838369127582438419889922365596554237153412394494932582424222479798382932335239274297663365164912953364777876187522324991837775492621675953397843833247525599771974555545348388871578347332456586949283657613841414576976542343934911424716613479249893113961925713317644349946444271959375981158445151659431844142242547191181944395897963146947935463718145169266129118413523541222444997678726644615185324461293228124456118853885552279849917342474792984425629248492847827653133583215539325866881662159421987315186914769478947389188382383546881622246793781846254253759714573354544997853153798862436887889318646643359555663135476261863'
def code1(string):
return sum(ord(x) - ord('0') for i, x in enumerate(string) if x == string[(i + 1) % len(string)])
def code2(string):
return sum(ord(x) - ord('0') for i, x in enumerate(string) if x == string[(i + len(string) // 2) % len(string)])
def test(code, examples, myinput):
for data, result in examples:
assert code(data) == result, (data, result, code(data))
print('>', code(myinput))
test(code1, EXAMPLES1, INPUT)
test(code2, EXAMPLES2, INPUT)
| 78 | 2,162 | 0.878378 |
EXAMPLES1 = (
('1122', 3),
('1111', 4),
('1234', 0),
('91212129', 9)
)
EXAMPLES2 = (
('1212', 6),
('1221', 0),
('123425', 4),
('123123', 12),
('12131415', 4)
)
INPUT = '31813174349235972159811869755166343882958376474278437681632495222499211488649543755655138842553867246131245462881756862736922925752647341673342756514856663979496747158241792857625471323535183222497949751644488277317173496124473893452425118133645984488759128897146498831373795721661696492622276282881218371273973538163779782435211491196616375135472517935481964439956844536136823757764494967297251545389464472794474447941564778733926532741752757865243946976266426548341889873514383464142659425122786667399143335772174973128383869893325977319651839516694295534146668728822393452626321892357192574444856264721585365164945647254645264693957898373214897848424966266582991272496771159583715456714645585576641458358326521858518319315233857473695712238323787254556597566461188452279853766184333696344395818615215846348586541164194624371353556812548945447432787795489443312941687221314432694115847863129826532628228386894683392352799514942665396273726821936346663485499159141368443782475714679953213388375939519711591262489869326145476958378464652451441434846382474578535468433514121336844727988128998543975147649823215332929623574231738442281161294838499441799996857746549441142859199799125595761724782225452394593514388571187279266291364278184761833324476838939898258225748562345853633364314923186685534864178665214135631494876474186833392929124337161222959459117554238429216916532175247326391321525832362274683763488347654497889261543959591212539851835354335598844669618391876623638137926893582131945361264841733341247646125278489995838369127582438419889922365596554237153412394494932582424222479798382932335239274297663365164912953364777876187522324991837775492621675953397843833247525599771974555545348388871578347332456586949283657613841414576976542343934911424716613479249893113961925713317644349946444271959375981158445151659431844142242547191181944395897963146947935463718145169266129118413523541222444997678726644615185324461293228124456118853885552279849917342474792984425629248492847827653133583215539325866881662159421987315186914769478947389188382383546881622246793781846254253759714573354544997853153798862436887889318646643359555663135476261863'
def code1(string):
return sum(ord(x) - ord('0') for i, x in enumerate(string) if x == string[(i + 1) % len(string)])
def code2(string):
return sum(ord(x) - ord('0') for i, x in enumerate(string) if x == string[(i + len(string) // 2) % len(string)])
def test(code, examples, myinput):
for data, result in examples:
assert code(data) == result, (data, result, code(data))
print('>', code(myinput))
test(code1, EXAMPLES1, INPUT)
test(code2, EXAMPLES2, INPUT)
| true | true |
f72d8160d3dc15e51fdf79766d92f5048e9ff644 | 4,718 | py | Python | src/ui/help.py | slinden2/uno-card-game | d648e2b305167678a1428694f64dd50cb83f657f | [
"MIT"
] | null | null | null | src/ui/help.py | slinden2/uno-card-game | d648e2b305167678a1428694f64dd50cb83f657f | [
"MIT"
] | null | null | null | src/ui/help.py | slinden2/uno-card-game | d648e2b305167678a1428694f64dd50cb83f657f | [
"MIT"
] | null | null | null | import tkinter as tk
import tkinter.font as tkFont
from tkinter import ttk
import webbrowser
from config import Config
import ui.main_menu
class HelpPage(tk.Frame):
def __init__(self, parent, controller):
"""Rules of the game
"""
super().__init__(parent)
self.controller = controller
for i in range(0, 12):
self.rowconfigure(i, weight=1)
self.rowconfigure(12, weight=20)
self.columnconfigure(0, weight=1)
self.create_title()
self.create_content()
self.create_button()
def create_title(self):
"""Title of the page
"""
font = tkFont.Font(**Config.TITLE_FONT)
label1 = ttk.Label(self, text="Help", font=font)
label1.grid(row=0, column=0)
def create_content(self):
"""Create content widgets
"""
# set up title widge
title_font = tkFont.Font(**Config.HELP_TITLE)
title_label = tk.Label(self, text="Rules", font=title_font, padx=10)
title_label.grid(row=1, column=0, sticky="w")
# set up first paragraphs
for i, paragraph in enumerate((Config.PARAGRAPH_1,
Config.PARAGRAPH_2,
Config.PARAGRAPH_3), start=2):
content = tk.Message(self, text=paragraph, aspect=1500, padx=10)
content.grid(row=i, column=0, sticky="w")
# set up bullet points
for i, bullet in enumerate((Config.BULLET_1,
Config.BULLET_2,
Config.BULLET_3), start=5):
bullet = tk.Label(self, text=bullet)
bullet.grid(row=i, column=0, sticky="w", padx=20)
# set up last paragraphs
for i, paragraph in enumerate((Config.PARAGRAPH_4,
Config.PARAGRAPH_5), start=8):
content = tk.Label(self, text=paragraph, padx=10)
content.grid(row=i, column=0, sticky="w")
# create a table
table = HelpTable(self, Config.HELP_TABLE)
table.grid(row=10, column=0, sticky="w", padx=20)
# create a link to wikipedia source page
link = tk.Label(self, text=Config.LINK_TEXT, cursor="hand2", padx=20)
link.grid(row=11, column=0, sticky="w")
link.bind("<Button-1>", self.open_webbrowser)
def create_button(self):
""""Back to MainScreen button
"""
button = ttk.Button(self, text="Back",
command=self.back_to_mainscreen, width=30)
button.grid(row=12, column=0)
def back_to_mainscreen(self):
self.controller.show_frame(ui.main_menu.MainScreen)
@staticmethod
def open_webbrowser(event):
"""Used for opening the Wikipedia source page
"""
webbrowser.open(Config.HELP_LINK)
class HelpTable(tk.Frame):
def __init__(self, parent, data):
super().__init__(parent)
self.borderwidth = 1
self.data = data
self.create_table()
def create_table(self):
title_font = tkFont.Font(**Config.HELP_TABLE_TITLE)
for row_n, row_data in enumerate(self.data):
for column_n, cell_data in enumerate(row_data):
if row_n % 2 == 0:
# every other row with different bgcolor
frame = tk.Frame(self,
borderwidth=1,
background="#DADADA",
relief="groove",
padx=5)
else:
frame = tk.Frame(self,
borderwidth=1,
relief="groove",
padx=5)
frame.grid(row=row_n, column=column_n, sticky="nsew")
if row_n == 0:
# first row with bold font
message = tk.Message(frame,
text=cell_data,
aspect=500,
font=title_font,
background="#DADADA")
elif row_n % 2 == 0:
# every other row with different bgcolor
message = tk.Message(frame,
text=cell_data,
aspect=500,
background="#DADADA")
else:
message = tk.Message(frame, text=cell_data, aspect=500)
message.pack(side="left")
| 34.437956 | 77 | 0.50106 | import tkinter as tk
import tkinter.font as tkFont
from tkinter import ttk
import webbrowser
from config import Config
import ui.main_menu
class HelpPage(tk.Frame):
def __init__(self, parent, controller):
super().__init__(parent)
self.controller = controller
for i in range(0, 12):
self.rowconfigure(i, weight=1)
self.rowconfigure(12, weight=20)
self.columnconfigure(0, weight=1)
self.create_title()
self.create_content()
self.create_button()
def create_title(self):
font = tkFont.Font(**Config.TITLE_FONT)
label1 = ttk.Label(self, text="Help", font=font)
label1.grid(row=0, column=0)
def create_content(self):
title_font = tkFont.Font(**Config.HELP_TITLE)
title_label = tk.Label(self, text="Rules", font=title_font, padx=10)
title_label.grid(row=1, column=0, sticky="w")
for i, paragraph in enumerate((Config.PARAGRAPH_1,
Config.PARAGRAPH_2,
Config.PARAGRAPH_3), start=2):
content = tk.Message(self, text=paragraph, aspect=1500, padx=10)
content.grid(row=i, column=0, sticky="w")
for i, bullet in enumerate((Config.BULLET_1,
Config.BULLET_2,
Config.BULLET_3), start=5):
bullet = tk.Label(self, text=bullet)
bullet.grid(row=i, column=0, sticky="w", padx=20)
for i, paragraph in enumerate((Config.PARAGRAPH_4,
Config.PARAGRAPH_5), start=8):
content = tk.Label(self, text=paragraph, padx=10)
content.grid(row=i, column=0, sticky="w")
table = HelpTable(self, Config.HELP_TABLE)
table.grid(row=10, column=0, sticky="w", padx=20)
link = tk.Label(self, text=Config.LINK_TEXT, cursor="hand2", padx=20)
link.grid(row=11, column=0, sticky="w")
link.bind("<Button-1>", self.open_webbrowser)
def create_button(self):
button = ttk.Button(self, text="Back",
command=self.back_to_mainscreen, width=30)
button.grid(row=12, column=0)
def back_to_mainscreen(self):
self.controller.show_frame(ui.main_menu.MainScreen)
@staticmethod
def open_webbrowser(event):
webbrowser.open(Config.HELP_LINK)
class HelpTable(tk.Frame):
def __init__(self, parent, data):
super().__init__(parent)
self.borderwidth = 1
self.data = data
self.create_table()
def create_table(self):
title_font = tkFont.Font(**Config.HELP_TABLE_TITLE)
for row_n, row_data in enumerate(self.data):
for column_n, cell_data in enumerate(row_data):
if row_n % 2 == 0:
frame = tk.Frame(self,
borderwidth=1,
background="#DADADA",
relief="groove",
padx=5)
else:
frame = tk.Frame(self,
borderwidth=1,
relief="groove",
padx=5)
frame.grid(row=row_n, column=column_n, sticky="nsew")
if row_n == 0:
message = tk.Message(frame,
text=cell_data,
aspect=500,
font=title_font,
background="#DADADA")
elif row_n % 2 == 0:
message = tk.Message(frame,
text=cell_data,
aspect=500,
background="#DADADA")
else:
message = tk.Message(frame, text=cell_data, aspect=500)
message.pack(side="left")
| true | true |
f72d81a59c8b8dc7875de7cc95fb4147afde52f0 | 1,907 | py | Python | src/util/utils.py | rileymblaylock/bow_mnb | ecb693739ab23aafb4257f9448c06cc880bc52b2 | [
"MIT"
] | null | null | null | src/util/utils.py | rileymblaylock/bow_mnb | ecb693739ab23aafb4257f9448c06cc880bc52b2 | [
"MIT"
] | null | null | null | src/util/utils.py | rileymblaylock/bow_mnb | ecb693739ab23aafb4257f9448c06cc880bc52b2 | [
"MIT"
] | null | null | null | import math
def tfidf_calc(wordPerCat, numDocsWithTerm, totalDocs):
for i in wordPerCat:
for key, value in wordPerCat[i].items():
deted = int(numDocsWithTerm[key])
wordPerCat[i][key] = float(float(wordPerCat[i][key]) * (math.log(totalDocs/deted)))
return wordPerCat
def prior_probs_calc(docPerCat, totalDocs):
priorsDict = {}
for key in docPerCat.keys():
priorsDict[key] = math.log(float(docPerCat[key]/totalDocs))
return priorsDict
def pwc_calc(allPWC, word, docPerCat, wordPerCat, laplace, vocabSize, vocabSizePerCat):
for key in docPerCat.keys():
if word in wordPerCat[key]:
c_in_c = float(wordPerCat[key][word]) #count in class
else:
c_in_c = 0.0
try: #get probablity of class given word
allPWC[key].append(math.log(float((c_in_c + laplace)/((vocabSize) + vocabSizePerCat[key]))))
except:
allPWC[key] = [math.log(float((c_in_c + laplace)/((vocabSize) + vocabSizePerCat[key])))]
return allPWC
def class_prob_calc(allPWC, priorsDict):
dictOfClassProb = {}
for key in allPWC.keys():
dictOfClassProb[key] = priorsDict[key] + sum(allPWC[key])
return dictOfClassProb
def predict_class(dictOfClassProb, y_true, classLetter, y_pred, numRight, arrayforvalidation, count):
classPredicted = max(dictOfClassProb, key=dictOfClassProb.get)
y_true.append(classLetter)
y_pred.append(classPredicted)
if classPredicted == classLetter:
numRight+=1
arrayforvalidation.append("CORRECT /// Class: " + classLetter + "; Predicted: " + classPredicted + "; Total accuracy: " + str((numRight/count)*100))
else:
arrayforvalidation.append("WRONG /// Class: " + classLetter + "; Predicted: " + classPredicted + "; Total accuracy: " + str((numRight/count)*100))
return y_true, y_pred, arrayforvalidation, numRight | 44.348837 | 156 | 0.671211 | import math
def tfidf_calc(wordPerCat, numDocsWithTerm, totalDocs):
for i in wordPerCat:
for key, value in wordPerCat[i].items():
deted = int(numDocsWithTerm[key])
wordPerCat[i][key] = float(float(wordPerCat[i][key]) * (math.log(totalDocs/deted)))
return wordPerCat
def prior_probs_calc(docPerCat, totalDocs):
priorsDict = {}
for key in docPerCat.keys():
priorsDict[key] = math.log(float(docPerCat[key]/totalDocs))
return priorsDict
def pwc_calc(allPWC, word, docPerCat, wordPerCat, laplace, vocabSize, vocabSizePerCat):
for key in docPerCat.keys():
if word in wordPerCat[key]:
c_in_c = float(wordPerCat[key][word])
else:
c_in_c = 0.0
try:
allPWC[key].append(math.log(float((c_in_c + laplace)/((vocabSize) + vocabSizePerCat[key]))))
except:
allPWC[key] = [math.log(float((c_in_c + laplace)/((vocabSize) + vocabSizePerCat[key])))]
return allPWC
def class_prob_calc(allPWC, priorsDict):
dictOfClassProb = {}
for key in allPWC.keys():
dictOfClassProb[key] = priorsDict[key] + sum(allPWC[key])
return dictOfClassProb
def predict_class(dictOfClassProb, y_true, classLetter, y_pred, numRight, arrayforvalidation, count):
classPredicted = max(dictOfClassProb, key=dictOfClassProb.get)
y_true.append(classLetter)
y_pred.append(classPredicted)
if classPredicted == classLetter:
numRight+=1
arrayforvalidation.append("CORRECT /// Class: " + classLetter + "; Predicted: " + classPredicted + "; Total accuracy: " + str((numRight/count)*100))
else:
arrayforvalidation.append("WRONG /// Class: " + classLetter + "; Predicted: " + classPredicted + "; Total accuracy: " + str((numRight/count)*100))
return y_true, y_pred, arrayforvalidation, numRight | true | true |
f72d81d5f8e09b9387a63dd039e55e4dcd97c85d | 183 | py | Python | tests/kallisticore/test_urls.py | jpmorganchase/kallisti-core | d9dfcaa2ec3c9cd26dd37b5f2c39c3788a3d05aa | [
"Apache-2.0"
] | 1 | 2022-03-03T14:27:25.000Z | 2022-03-03T14:27:25.000Z | tests/kallisticore/test_urls.py | jpmorganchase/kallisti-core | d9dfcaa2ec3c9cd26dd37b5f2c39c3788a3d05aa | [
"Apache-2.0"
] | null | null | null | tests/kallisticore/test_urls.py | jpmorganchase/kallisti-core | d9dfcaa2ec3c9cd26dd37b5f2c39c3788a3d05aa | [
"Apache-2.0"
] | 1 | 2022-03-09T05:57:55.000Z | 2022-03-09T05:57:55.000Z | from django.test import TestCase
from django.urls import reverse
class TestUrls(TestCase):
def test_report(self):
self.assertEqual("/api/v1/report", reverse("report"))
| 20.333333 | 61 | 0.726776 | from django.test import TestCase
from django.urls import reverse
class TestUrls(TestCase):
def test_report(self):
self.assertEqual("/api/v1/report", reverse("report"))
| true | true |
f72d83a4c745ecf9567206de1d9571707318fded | 27,329 | py | Python | virtual/lib/python3.8/site-packages/sqlalchemy/sql/visitors.py | Esther-Anyona/mylearner | d49d1c4c8dbeb93cc384f2037c48236be5dc89e1 | [
"MIT"
] | 4 | 2022-02-06T00:54:58.000Z | 2022-02-25T12:44:43.000Z | virtual/lib/python3.8/site-packages/sqlalchemy/sql/visitors.py | Esther-Anyona/mylearner | d49d1c4c8dbeb93cc384f2037c48236be5dc89e1 | [
"MIT"
] | 1 | 2022-03-17T13:12:17.000Z | 2022-03-17T13:12:17.000Z | virtual/lib/python3.8/site-packages/sqlalchemy/sql/visitors.py | Esther-Anyona/mylearner | d49d1c4c8dbeb93cc384f2037c48236be5dc89e1 | [
"MIT"
] | 1 | 2022-02-08T13:43:20.000Z | 2022-02-08T13:43:20.000Z | # sql/visitors.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Visitor/traversal interface and library functions.
SQLAlchemy schema and expression constructs rely on a Python-centric
version of the classic "visitor" pattern as the primary way in which
they apply functionality. The most common use of this pattern
is statement compilation, where individual expression classes match
up to rendering methods that produce a string result. Beyond this,
the visitor system is also used to inspect expressions for various
information and patterns, as well as for the purposes of applying
transformations to expressions.
Examples of how the visit system is used can be seen in the source code
of for example the ``sqlalchemy.sql.util`` and the ``sqlalchemy.sql.compiler``
modules. Some background on clause adaption is also at
https://techspot.zzzeek.org/2008/01/23/expression-transformations/ .
"""
from collections import deque
import itertools
import operator
from .. import exc
from .. import util
from ..util import langhelpers
from ..util import symbol
__all__ = [
"iterate",
"traverse_using",
"traverse",
"cloned_traverse",
"replacement_traverse",
"Traversible",
"TraversibleType",
"ExternalTraversal",
"InternalTraversal",
]
def _generate_compiler_dispatch(cls):
"""Generate a _compiler_dispatch() external traversal on classes with a
__visit_name__ attribute.
"""
visit_name = cls.__visit_name__
if "_compiler_dispatch" in cls.__dict__:
# class has a fixed _compiler_dispatch() method.
# copy it to "original" so that we can get it back if
# sqlalchemy.ext.compiles overrides it.
cls._original_compiler_dispatch = cls._compiler_dispatch
return
if not isinstance(visit_name, util.compat.string_types):
raise exc.InvalidRequestError(
"__visit_name__ on class %s must be a string at the class level"
% cls.__name__
)
name = "visit_%s" % visit_name
getter = operator.attrgetter(name)
def _compiler_dispatch(self, visitor, **kw):
"""Look for an attribute named "visit_<visit_name>" on the
visitor, and call it with the same kw params.
"""
try:
meth = getter(visitor)
except AttributeError as err:
return visitor.visit_unsupported_compilation(self, err, **kw)
else:
return meth(self, **kw)
cls._compiler_dispatch = (
cls._original_compiler_dispatch
) = _compiler_dispatch
class TraversibleType(type):
"""Metaclass which assigns dispatch attributes to various kinds of
"visitable" classes.
Attributes include:
* The ``_compiler_dispatch`` method, corresponding to ``__visit_name__``.
This is called "external traversal" because the caller of each visit()
method is responsible for sub-traversing the inner elements of each
object. This is appropriate for string compilers and other traversals
that need to call upon the inner elements in a specific pattern.
* internal traversal collections ``_children_traversal``,
``_cache_key_traversal``, ``_copy_internals_traversal``, generated from
an optional ``_traverse_internals`` collection of symbols which comes
from the :class:`.InternalTraversal` list of symbols. This is called
"internal traversal" MARKMARK
"""
def __init__(cls, clsname, bases, clsdict):
if clsname != "Traversible":
if "__visit_name__" in clsdict:
_generate_compiler_dispatch(cls)
super(TraversibleType, cls).__init__(clsname, bases, clsdict)
class Traversible(util.with_metaclass(TraversibleType)):
"""Base class for visitable objects, applies the
:class:`.visitors.TraversibleType` metaclass.
"""
def __class_getitem__(cls, key):
# allow generic classes in py3.9+
return cls
@util.preload_module("sqlalchemy.sql.traversals")
def get_children(self, omit_attrs=(), **kw):
r"""Return immediate child :class:`.visitors.Traversible`
elements of this :class:`.visitors.Traversible`.
This is used for visit traversal.
\**kw may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
traversals = util.preloaded.sql_traversals
try:
traverse_internals = self._traverse_internals
except AttributeError:
# user-defined classes may not have a _traverse_internals
return []
dispatch = traversals._get_children.run_generated_dispatch
return itertools.chain.from_iterable(
meth(obj, **kw)
for attrname, obj, meth in dispatch(
self, traverse_internals, "_generated_get_children_traversal"
)
if attrname not in omit_attrs and obj is not None
)
class _InternalTraversalType(type):
def __init__(cls, clsname, bases, clsdict):
if cls.__name__ in ("InternalTraversal", "ExtendedInternalTraversal"):
lookup = {}
for key, sym in clsdict.items():
if key.startswith("dp_"):
visit_key = key.replace("dp_", "visit_")
sym_name = sym.name
assert sym_name not in lookup, sym_name
lookup[sym] = lookup[sym_name] = visit_key
if hasattr(cls, "_dispatch_lookup"):
lookup.update(cls._dispatch_lookup)
cls._dispatch_lookup = lookup
super(_InternalTraversalType, cls).__init__(clsname, bases, clsdict)
def _generate_dispatcher(visitor, internal_dispatch, method_name):
names = []
for attrname, visit_sym in internal_dispatch:
meth = visitor.dispatch(visit_sym)
if meth:
visit_name = ExtendedInternalTraversal._dispatch_lookup[visit_sym]
names.append((attrname, visit_name))
code = (
(" return [\n")
+ (
", \n".join(
" (%r, self.%s, visitor.%s)"
% (attrname, attrname, visit_name)
for attrname, visit_name in names
)
)
+ ("\n ]\n")
)
meth_text = ("def %s(self, visitor):\n" % method_name) + code + "\n"
# print(meth_text)
return langhelpers._exec_code_in_env(meth_text, {}, method_name)
class InternalTraversal(util.with_metaclass(_InternalTraversalType, object)):
r"""Defines visitor symbols used for internal traversal.
The :class:`.InternalTraversal` class is used in two ways. One is that
it can serve as the superclass for an object that implements the
various visit methods of the class. The other is that the symbols
themselves of :class:`.InternalTraversal` are used within
the ``_traverse_internals`` collection. Such as, the :class:`.Case`
object defines ``_traverse_internals`` as ::
_traverse_internals = [
("value", InternalTraversal.dp_clauseelement),
("whens", InternalTraversal.dp_clauseelement_tuples),
("else_", InternalTraversal.dp_clauseelement),
]
Above, the :class:`.Case` class indicates its internal state as the
attributes named ``value``, ``whens``, and ``else_``. They each
link to an :class:`.InternalTraversal` method which indicates the type
of datastructure referred towards.
Using the ``_traverse_internals`` structure, objects of type
:class:`.InternalTraversible` will have the following methods automatically
implemented:
* :meth:`.Traversible.get_children`
* :meth:`.Traversible._copy_internals`
* :meth:`.Traversible._gen_cache_key`
Subclasses can also implement these methods directly, particularly for the
:meth:`.Traversible._copy_internals` method, when special steps
are needed.
.. versionadded:: 1.4
"""
def dispatch(self, visit_symbol):
"""Given a method from :class:`.InternalTraversal`, return the
corresponding method on a subclass.
"""
name = self._dispatch_lookup[visit_symbol]
return getattr(self, name, None)
def run_generated_dispatch(
self, target, internal_dispatch, generate_dispatcher_name
):
try:
dispatcher = target.__class__.__dict__[generate_dispatcher_name]
except KeyError:
# most of the dispatchers are generated up front
# in sqlalchemy/sql/__init__.py ->
# traversals.py-> _preconfigure_traversals().
# this block will generate any remaining dispatchers.
dispatcher = self.generate_dispatch(
target.__class__, internal_dispatch, generate_dispatcher_name
)
return dispatcher(target, self)
def generate_dispatch(
self, target_cls, internal_dispatch, generate_dispatcher_name
):
dispatcher = _generate_dispatcher(
self, internal_dispatch, generate_dispatcher_name
)
# assert isinstance(target_cls, type)
setattr(target_cls, generate_dispatcher_name, dispatcher)
return dispatcher
dp_has_cache_key = symbol("HC")
"""Visit a :class:`.HasCacheKey` object."""
dp_has_cache_key_list = symbol("HL")
"""Visit a list of :class:`.HasCacheKey` objects."""
dp_clauseelement = symbol("CE")
"""Visit a :class:`_expression.ClauseElement` object."""
dp_fromclause_canonical_column_collection = symbol("FC")
"""Visit a :class:`_expression.FromClause` object in the context of the
``columns`` attribute.
The column collection is "canonical", meaning it is the originally
defined location of the :class:`.ColumnClause` objects. Right now
this means that the object being visited is a
:class:`_expression.TableClause`
or :class:`_schema.Table` object only.
"""
dp_clauseelement_tuples = symbol("CTS")
"""Visit a list of tuples which contain :class:`_expression.ClauseElement`
objects.
"""
dp_clauseelement_list = symbol("CL")
"""Visit a list of :class:`_expression.ClauseElement` objects.
"""
dp_clauseelement_tuple = symbol("CT")
"""Visit a tuple of :class:`_expression.ClauseElement` objects.
"""
dp_executable_options = symbol("EO")
dp_with_context_options = symbol("WC")
dp_fromclause_ordered_set = symbol("CO")
"""Visit an ordered set of :class:`_expression.FromClause` objects. """
dp_string = symbol("S")
"""Visit a plain string value.
Examples include table and column names, bound parameter keys, special
keywords such as "UNION", "UNION ALL".
The string value is considered to be significant for cache key
generation.
"""
dp_string_list = symbol("SL")
"""Visit a list of strings."""
dp_anon_name = symbol("AN")
"""Visit a potentially "anonymized" string value.
The string value is considered to be significant for cache key
generation.
"""
dp_boolean = symbol("B")
"""Visit a boolean value.
The boolean value is considered to be significant for cache key
generation.
"""
dp_operator = symbol("O")
"""Visit an operator.
The operator is a function from the :mod:`sqlalchemy.sql.operators`
module.
The operator value is considered to be significant for cache key
generation.
"""
dp_type = symbol("T")
"""Visit a :class:`.TypeEngine` object
The type object is considered to be significant for cache key
generation.
"""
dp_plain_dict = symbol("PD")
"""Visit a dictionary with string keys.
The keys of the dictionary should be strings, the values should
be immutable and hashable. The dictionary is considered to be
significant for cache key generation.
"""
dp_dialect_options = symbol("DO")
"""Visit a dialect options structure."""
dp_string_clauseelement_dict = symbol("CD")
"""Visit a dictionary of string keys to :class:`_expression.ClauseElement`
objects.
"""
dp_string_multi_dict = symbol("MD")
"""Visit a dictionary of string keys to values which may either be
plain immutable/hashable or :class:`.HasCacheKey` objects.
"""
dp_annotations_key = symbol("AK")
"""Visit the _annotations_cache_key element.
This is a dictionary of additional information about a ClauseElement
that modifies its role. It should be included when comparing or caching
objects, however generating this key is relatively expensive. Visitors
should check the "_annotations" dict for non-None first before creating
this key.
"""
dp_plain_obj = symbol("PO")
"""Visit a plain python object.
The value should be immutable and hashable, such as an integer.
The value is considered to be significant for cache key generation.
"""
dp_named_ddl_element = symbol("DD")
"""Visit a simple named DDL element.
The current object used by this method is the :class:`.Sequence`.
The object is only considered to be important for cache key generation
as far as its name, but not any other aspects of it.
"""
dp_prefix_sequence = symbol("PS")
"""Visit the sequence represented by :class:`_expression.HasPrefixes`
or :class:`_expression.HasSuffixes`.
"""
dp_table_hint_list = symbol("TH")
"""Visit the ``_hints`` collection of a :class:`_expression.Select`
object.
"""
dp_setup_join_tuple = symbol("SJ")
dp_memoized_select_entities = symbol("ME")
dp_statement_hint_list = symbol("SH")
"""Visit the ``_statement_hints`` collection of a
:class:`_expression.Select`
object.
"""
dp_unknown_structure = symbol("UK")
"""Visit an unknown structure.
"""
dp_dml_ordered_values = symbol("DML_OV")
"""Visit the values() ordered tuple list of an
:class:`_expression.Update` object."""
dp_dml_values = symbol("DML_V")
"""Visit the values() dictionary of a :class:`.ValuesBase`
(e.g. Insert or Update) object.
"""
dp_dml_multi_values = symbol("DML_MV")
"""Visit the values() multi-valued list of dictionaries of an
:class:`_expression.Insert` object.
"""
dp_propagate_attrs = symbol("PA")
"""Visit the propagate attrs dict. This hardcodes to the particular
elements we care about right now."""
class ExtendedInternalTraversal(InternalTraversal):
"""Defines additional symbols that are useful in caching applications.
Traversals for :class:`_expression.ClauseElement` objects only need to use
those symbols present in :class:`.InternalTraversal`. However, for
additional caching use cases within the ORM, symbols dealing with the
:class:`.HasCacheKey` class are added here.
"""
dp_ignore = symbol("IG")
"""Specify an object that should be ignored entirely.
This currently applies function call argument caching where some
arguments should not be considered to be part of a cache key.
"""
dp_inspectable = symbol("IS")
"""Visit an inspectable object where the return value is a
:class:`.HasCacheKey` object."""
dp_multi = symbol("M")
"""Visit an object that may be a :class:`.HasCacheKey` or may be a
plain hashable object."""
dp_multi_list = symbol("MT")
"""Visit a tuple containing elements that may be :class:`.HasCacheKey` or
may be a plain hashable object."""
dp_has_cache_key_tuples = symbol("HT")
"""Visit a list of tuples which contain :class:`.HasCacheKey`
objects.
"""
dp_inspectable_list = symbol("IL")
"""Visit a list of inspectable objects which upon inspection are
HasCacheKey objects."""
class ExternalTraversal(object):
"""Base class for visitor objects which can traverse externally using
the :func:`.visitors.traverse` function.
Direct usage of the :func:`.visitors.traverse` function is usually
preferred.
"""
__traverse_options__ = {}
def traverse_single(self, obj, **kw):
for v in self.visitor_iterator:
meth = getattr(v, "visit_%s" % obj.__visit_name__, None)
if meth:
return meth(obj, **kw)
def iterate(self, obj):
"""Traverse the given expression structure, returning an iterator
of all elements.
"""
return iterate(obj, self.__traverse_options__)
def traverse(self, obj):
"""Traverse and visit the given expression structure."""
return traverse(obj, self.__traverse_options__, self._visitor_dict)
@util.memoized_property
def _visitor_dict(self):
visitors = {}
for name in dir(self):
if name.startswith("visit_"):
visitors[name[6:]] = getattr(self, name)
return visitors
@property
def visitor_iterator(self):
"""Iterate through this visitor and each 'chained' visitor."""
v = self
while v:
yield v
v = getattr(v, "_next", None)
def chain(self, visitor):
"""'Chain' an additional ClauseVisitor onto this ClauseVisitor.
The chained visitor will receive all visit events after this one.
"""
tail = list(self.visitor_iterator)[-1]
tail._next = visitor
return self
class CloningExternalTraversal(ExternalTraversal):
"""Base class for visitor objects which can traverse using
the :func:`.visitors.cloned_traverse` function.
Direct usage of the :func:`.visitors.cloned_traverse` function is usually
preferred.
"""
def copy_and_process(self, list_):
"""Apply cloned traversal to the given list of elements, and return
the new list.
"""
return [self.traverse(x) for x in list_]
def traverse(self, obj):
"""Traverse and visit the given expression structure."""
return cloned_traverse(
obj, self.__traverse_options__, self._visitor_dict
)
class ReplacingExternalTraversal(CloningExternalTraversal):
"""Base class for visitor objects which can traverse using
the :func:`.visitors.replacement_traverse` function.
Direct usage of the :func:`.visitors.replacement_traverse` function is
usually preferred.
"""
def replace(self, elem):
"""Receive pre-copied elements during a cloning traversal.
If the method returns a new element, the element is used
instead of creating a simple copy of the element. Traversal
will halt on the newly returned element if it is re-encountered.
"""
return None
def traverse(self, obj):
"""Traverse and visit the given expression structure."""
def replace(elem):
for v in self.visitor_iterator:
e = v.replace(elem)
if e is not None:
return e
return replacement_traverse(obj, self.__traverse_options__, replace)
# backwards compatibility
Visitable = Traversible
VisitableType = TraversibleType
ClauseVisitor = ExternalTraversal
CloningVisitor = CloningExternalTraversal
ReplacingCloningVisitor = ReplacingExternalTraversal
def iterate(obj, opts=util.immutabledict()):
r"""Traverse the given expression structure, returning an iterator.
Traversal is configured to be breadth-first.
The central API feature used by the :func:`.visitors.iterate`
function is the
:meth:`_expression.ClauseElement.get_children` method of
:class:`_expression.ClauseElement` objects. This method should return all
the :class:`_expression.ClauseElement` objects which are associated with a
particular :class:`_expression.ClauseElement` object. For example, a
:class:`.Case` structure will refer to a series of
:class:`_expression.ColumnElement` objects within its "whens" and "else\_"
member variables.
:param obj: :class:`_expression.ClauseElement` structure to be traversed
:param opts: dictionary of iteration options. This dictionary is usually
empty in modern usage.
"""
yield obj
children = obj.get_children(**opts)
if not children:
return
stack = deque([children])
while stack:
t_iterator = stack.popleft()
for t in t_iterator:
yield t
stack.append(t.get_children(**opts))
def traverse_using(iterator, obj, visitors):
"""Visit the given expression structure using the given iterator of
objects.
:func:`.visitors.traverse_using` is usually called internally as the result
of the :func:`.visitors.traverse` function.
:param iterator: an iterable or sequence which will yield
:class:`_expression.ClauseElement`
structures; the iterator is assumed to be the
product of the :func:`.visitors.iterate` function.
:param obj: the :class:`_expression.ClauseElement`
that was used as the target of the
:func:`.iterate` function.
:param visitors: dictionary of visit functions. See :func:`.traverse`
for details on this dictionary.
.. seealso::
:func:`.traverse`
"""
for target in iterator:
meth = visitors.get(target.__visit_name__, None)
if meth:
meth(target)
return obj
def traverse(obj, opts, visitors):
"""Traverse and visit the given expression structure using the default
iterator.
e.g.::
from sqlalchemy.sql import visitors
stmt = select(some_table).where(some_table.c.foo == 'bar')
def visit_bindparam(bind_param):
print("found bound value: %s" % bind_param.value)
visitors.traverse(stmt, {}, {"bindparam": visit_bindparam})
The iteration of objects uses the :func:`.visitors.iterate` function,
which does a breadth-first traversal using a stack.
:param obj: :class:`_expression.ClauseElement` structure to be traversed
:param opts: dictionary of iteration options. This dictionary is usually
empty in modern usage.
:param visitors: dictionary of visit functions. The dictionary should
have strings as keys, each of which would correspond to the
``__visit_name__`` of a particular kind of SQL expression object, and
callable functions as values, each of which represents a visitor function
for that kind of object.
"""
return traverse_using(iterate(obj, opts), obj, visitors)
def cloned_traverse(obj, opts, visitors):
"""Clone the given expression structure, allowing modifications by
visitors.
Traversal usage is the same as that of :func:`.visitors.traverse`.
The visitor functions present in the ``visitors`` dictionary may also
modify the internals of the given structure as the traversal proceeds.
The central API feature used by the :func:`.visitors.cloned_traverse`
and :func:`.visitors.replacement_traverse` functions, in addition to the
:meth:`_expression.ClauseElement.get_children`
function that is used to achieve
the iteration, is the :meth:`_expression.ClauseElement._copy_internals`
method.
For a :class:`_expression.ClauseElement`
structure to support cloning and replacement
traversals correctly, it needs to be able to pass a cloning function into
its internal members in order to make copies of them.
.. seealso::
:func:`.visitors.traverse`
:func:`.visitors.replacement_traverse`
"""
cloned = {}
stop_on = set(opts.get("stop_on", []))
def deferred_copy_internals(obj):
return cloned_traverse(obj, opts, visitors)
def clone(elem, **kw):
if elem in stop_on:
return elem
else:
if id(elem) not in cloned:
if "replace" in kw:
newelem = kw["replace"](elem)
if newelem is not None:
cloned[id(elem)] = newelem
return newelem
cloned[id(elem)] = newelem = elem._clone(clone=clone, **kw)
newelem._copy_internals(clone=clone, **kw)
meth = visitors.get(newelem.__visit_name__, None)
if meth:
meth(newelem)
return cloned[id(elem)]
if obj is not None:
obj = clone(
obj, deferred_copy_internals=deferred_copy_internals, **opts
)
clone = None # remove gc cycles
return obj
def replacement_traverse(obj, opts, replace):
"""Clone the given expression structure, allowing element
replacement by a given replacement function.
This function is very similar to the :func:`.visitors.cloned_traverse`
function, except instead of being passed a dictionary of visitors, all
elements are unconditionally passed into the given replace function.
The replace function then has the option to return an entirely new object
which will replace the one given. If it returns ``None``, then the object
is kept in place.
The difference in usage between :func:`.visitors.cloned_traverse` and
:func:`.visitors.replacement_traverse` is that in the former case, an
already-cloned object is passed to the visitor function, and the visitor
function can then manipulate the internal state of the object.
In the case of the latter, the visitor function should only return an
entirely different object, or do nothing.
The use case for :func:`.visitors.replacement_traverse` is that of
replacing a FROM clause inside of a SQL structure with a different one,
as is a common use case within the ORM.
"""
cloned = {}
stop_on = {id(x) for x in opts.get("stop_on", [])}
def deferred_copy_internals(obj):
return replacement_traverse(obj, opts, replace)
def clone(elem, **kw):
if (
id(elem) in stop_on
or "no_replacement_traverse" in elem._annotations
):
return elem
else:
newelem = replace(elem)
if newelem is not None:
stop_on.add(id(newelem))
return newelem
else:
# base "already seen" on id(), not hash, so that we don't
# replace an Annotated element with its non-annotated one, and
# vice versa
id_elem = id(elem)
if id_elem not in cloned:
if "replace" in kw:
newelem = kw["replace"](elem)
if newelem is not None:
cloned[id_elem] = newelem
return newelem
cloned[id_elem] = newelem = elem._clone(**kw)
newelem._copy_internals(clone=clone, **kw)
return cloned[id_elem]
if obj is not None:
obj = clone(
obj, deferred_copy_internals=deferred_copy_internals, **opts
)
clone = None # remove gc cycles
return obj
| 32.038687 | 79 | 0.663142 |
from collections import deque
import itertools
import operator
from .. import exc
from .. import util
from ..util import langhelpers
from ..util import symbol
__all__ = [
"iterate",
"traverse_using",
"traverse",
"cloned_traverse",
"replacement_traverse",
"Traversible",
"TraversibleType",
"ExternalTraversal",
"InternalTraversal",
]
def _generate_compiler_dispatch(cls):
visit_name = cls.__visit_name__
if "_compiler_dispatch" in cls.__dict__:
cls._original_compiler_dispatch = cls._compiler_dispatch
return
if not isinstance(visit_name, util.compat.string_types):
raise exc.InvalidRequestError(
"__visit_name__ on class %s must be a string at the class level"
% cls.__name__
)
name = "visit_%s" % visit_name
getter = operator.attrgetter(name)
def _compiler_dispatch(self, visitor, **kw):
try:
meth = getter(visitor)
except AttributeError as err:
return visitor.visit_unsupported_compilation(self, err, **kw)
else:
return meth(self, **kw)
cls._compiler_dispatch = (
cls._original_compiler_dispatch
) = _compiler_dispatch
class TraversibleType(type):
def __init__(cls, clsname, bases, clsdict):
if clsname != "Traversible":
if "__visit_name__" in clsdict:
_generate_compiler_dispatch(cls)
super(TraversibleType, cls).__init__(clsname, bases, clsdict)
class Traversible(util.with_metaclass(TraversibleType)):
def __class_getitem__(cls, key):
return cls
@util.preload_module("sqlalchemy.sql.traversals")
def get_children(self, omit_attrs=(), **kw):
traversals = util.preloaded.sql_traversals
try:
traverse_internals = self._traverse_internals
except AttributeError:
return []
dispatch = traversals._get_children.run_generated_dispatch
return itertools.chain.from_iterable(
meth(obj, **kw)
for attrname, obj, meth in dispatch(
self, traverse_internals, "_generated_get_children_traversal"
)
if attrname not in omit_attrs and obj is not None
)
class _InternalTraversalType(type):
def __init__(cls, clsname, bases, clsdict):
if cls.__name__ in ("InternalTraversal", "ExtendedInternalTraversal"):
lookup = {}
for key, sym in clsdict.items():
if key.startswith("dp_"):
visit_key = key.replace("dp_", "visit_")
sym_name = sym.name
assert sym_name not in lookup, sym_name
lookup[sym] = lookup[sym_name] = visit_key
if hasattr(cls, "_dispatch_lookup"):
lookup.update(cls._dispatch_lookup)
cls._dispatch_lookup = lookup
super(_InternalTraversalType, cls).__init__(clsname, bases, clsdict)
def _generate_dispatcher(visitor, internal_dispatch, method_name):
names = []
for attrname, visit_sym in internal_dispatch:
meth = visitor.dispatch(visit_sym)
if meth:
visit_name = ExtendedInternalTraversal._dispatch_lookup[visit_sym]
names.append((attrname, visit_name))
code = (
(" return [\n")
+ (
", \n".join(
" (%r, self.%s, visitor.%s)"
% (attrname, attrname, visit_name)
for attrname, visit_name in names
)
)
+ ("\n ]\n")
)
meth_text = ("def %s(self, visitor):\n" % method_name) + code + "\n"
return langhelpers._exec_code_in_env(meth_text, {}, method_name)
class InternalTraversal(util.with_metaclass(_InternalTraversalType, object)):
def dispatch(self, visit_symbol):
name = self._dispatch_lookup[visit_symbol]
return getattr(self, name, None)
def run_generated_dispatch(
self, target, internal_dispatch, generate_dispatcher_name
):
try:
dispatcher = target.__class__.__dict__[generate_dispatcher_name]
except KeyError:
dispatcher = self.generate_dispatch(
target.__class__, internal_dispatch, generate_dispatcher_name
)
return dispatcher(target, self)
def generate_dispatch(
self, target_cls, internal_dispatch, generate_dispatcher_name
):
dispatcher = _generate_dispatcher(
self, internal_dispatch, generate_dispatcher_name
)
setattr(target_cls, generate_dispatcher_name, dispatcher)
return dispatcher
dp_has_cache_key = symbol("HC")
dp_has_cache_key_list = symbol("HL")
dp_clauseelement = symbol("CE")
dp_fromclause_canonical_column_collection = symbol("FC")
dp_clauseelement_tuples = symbol("CTS")
dp_clauseelement_list = symbol("CL")
dp_clauseelement_tuple = symbol("CT")
dp_executable_options = symbol("EO")
dp_with_context_options = symbol("WC")
dp_fromclause_ordered_set = symbol("CO")
dp_string = symbol("S")
dp_string_list = symbol("SL")
dp_anon_name = symbol("AN")
dp_boolean = symbol("B")
dp_operator = symbol("O")
dp_type = symbol("T")
dp_plain_dict = symbol("PD")
dp_dialect_options = symbol("DO")
dp_string_clauseelement_dict = symbol("CD")
dp_string_multi_dict = symbol("MD")
dp_annotations_key = symbol("AK")
dp_plain_obj = symbol("PO")
dp_named_ddl_element = symbol("DD")
dp_prefix_sequence = symbol("PS")
dp_table_hint_list = symbol("TH")
dp_setup_join_tuple = symbol("SJ")
dp_memoized_select_entities = symbol("ME")
dp_statement_hint_list = symbol("SH")
dp_unknown_structure = symbol("UK")
dp_dml_ordered_values = symbol("DML_OV")
dp_dml_values = symbol("DML_V")
dp_dml_multi_values = symbol("DML_MV")
dp_propagate_attrs = symbol("PA")
class ExtendedInternalTraversal(InternalTraversal):
dp_ignore = symbol("IG")
dp_inspectable = symbol("IS")
dp_multi = symbol("M")
dp_multi_list = symbol("MT")
dp_has_cache_key_tuples = symbol("HT")
dp_inspectable_list = symbol("IL")
class ExternalTraversal(object):
__traverse_options__ = {}
def traverse_single(self, obj, **kw):
for v in self.visitor_iterator:
meth = getattr(v, "visit_%s" % obj.__visit_name__, None)
if meth:
return meth(obj, **kw)
def iterate(self, obj):
return iterate(obj, self.__traverse_options__)
def traverse(self, obj):
return traverse(obj, self.__traverse_options__, self._visitor_dict)
@util.memoized_property
def _visitor_dict(self):
visitors = {}
for name in dir(self):
if name.startswith("visit_"):
visitors[name[6:]] = getattr(self, name)
return visitors
@property
def visitor_iterator(self):
v = self
while v:
yield v
v = getattr(v, "_next", None)
def chain(self, visitor):
tail = list(self.visitor_iterator)[-1]
tail._next = visitor
return self
class CloningExternalTraversal(ExternalTraversal):
def copy_and_process(self, list_):
return [self.traverse(x) for x in list_]
def traverse(self, obj):
return cloned_traverse(
obj, self.__traverse_options__, self._visitor_dict
)
class ReplacingExternalTraversal(CloningExternalTraversal):
def replace(self, elem):
return None
def traverse(self, obj):
def replace(elem):
for v in self.visitor_iterator:
e = v.replace(elem)
if e is not None:
return e
return replacement_traverse(obj, self.__traverse_options__, replace)
Visitable = Traversible
VisitableType = TraversibleType
ClauseVisitor = ExternalTraversal
CloningVisitor = CloningExternalTraversal
ReplacingCloningVisitor = ReplacingExternalTraversal
def iterate(obj, opts=util.immutabledict()):
yield obj
children = obj.get_children(**opts)
if not children:
return
stack = deque([children])
while stack:
t_iterator = stack.popleft()
for t in t_iterator:
yield t
stack.append(t.get_children(**opts))
def traverse_using(iterator, obj, visitors):
for target in iterator:
meth = visitors.get(target.__visit_name__, None)
if meth:
meth(target)
return obj
def traverse(obj, opts, visitors):
return traverse_using(iterate(obj, opts), obj, visitors)
def cloned_traverse(obj, opts, visitors):
cloned = {}
stop_on = set(opts.get("stop_on", []))
def deferred_copy_internals(obj):
return cloned_traverse(obj, opts, visitors)
def clone(elem, **kw):
if elem in stop_on:
return elem
else:
if id(elem) not in cloned:
if "replace" in kw:
newelem = kw["replace"](elem)
if newelem is not None:
cloned[id(elem)] = newelem
return newelem
cloned[id(elem)] = newelem = elem._clone(clone=clone, **kw)
newelem._copy_internals(clone=clone, **kw)
meth = visitors.get(newelem.__visit_name__, None)
if meth:
meth(newelem)
return cloned[id(elem)]
if obj is not None:
obj = clone(
obj, deferred_copy_internals=deferred_copy_internals, **opts
)
clone = None
return obj
def replacement_traverse(obj, opts, replace):
cloned = {}
stop_on = {id(x) for x in opts.get("stop_on", [])}
def deferred_copy_internals(obj):
return replacement_traverse(obj, opts, replace)
def clone(elem, **kw):
if (
id(elem) in stop_on
or "no_replacement_traverse" in elem._annotations
):
return elem
else:
newelem = replace(elem)
if newelem is not None:
stop_on.add(id(newelem))
return newelem
else:
# replace an Annotated element with its non-annotated one, and
# vice versa
id_elem = id(elem)
if id_elem not in cloned:
if "replace" in kw:
newelem = kw["replace"](elem)
if newelem is not None:
cloned[id_elem] = newelem
return newelem
cloned[id_elem] = newelem = elem._clone(**kw)
newelem._copy_internals(clone=clone, **kw)
return cloned[id_elem]
if obj is not None:
obj = clone(
obj, deferred_copy_internals=deferred_copy_internals, **opts
)
clone = None # remove gc cycles
return obj
| true | true |
f72d842ae758c78892bcd330bd0ebd4e7323263e | 276 | py | Python | latex2minizinc/GenBelongsTo.py | rafaellc28/Latex2MiniZinc | 5c255a712156b915469329a07d13f1e984cbd247 | [
"MIT"
] | null | null | null | latex2minizinc/GenBelongsTo.py | rafaellc28/Latex2MiniZinc | 5c255a712156b915469329a07d13f1e984cbd247 | [
"MIT"
] | null | null | null | latex2minizinc/GenBelongsTo.py | rafaellc28/Latex2MiniZinc | 5c255a712156b915469329a07d13f1e984cbd247 | [
"MIT"
] | null | null | null | from GenObj import *
class GenBelongsTo(GenObj):
def __init__(self, name, stmtIndex):
super(GenBelongsTo, self).__init__(name)
self.stmtIndex = stmtIndex
def getStmtIndex(self):
return self.stmtIndex
def setStmtIndex(self, stmtIndex):
self.stmtIndex = stmtIndex
| 21.230769 | 42 | 0.76087 | from GenObj import *
class GenBelongsTo(GenObj):
def __init__(self, name, stmtIndex):
super(GenBelongsTo, self).__init__(name)
self.stmtIndex = stmtIndex
def getStmtIndex(self):
return self.stmtIndex
def setStmtIndex(self, stmtIndex):
self.stmtIndex = stmtIndex
| true | true |
f72d85337df8ef4d239e1842d4ab2254edd696b6 | 8,602 | py | Python | venv/lib/python3.10/site-packages/pandas/tests/series/methods/test_drop_duplicates.py | r-graves/demo_lab | 729cdf61774bf32d2c07ca68bf70e65470700cc2 | [
"MIT"
] | 7 | 2022-01-16T12:28:16.000Z | 2022-03-04T15:31:45.000Z | venv/lib/python3.10/site-packages/pandas/tests/series/methods/test_drop_duplicates.py | r-graves/demo_lab | 729cdf61774bf32d2c07ca68bf70e65470700cc2 | [
"MIT"
] | 8 | 2021-09-22T12:47:32.000Z | 2022-01-14T21:30:38.000Z | venv/lib/python3.10/site-packages/pandas/tests/series/methods/test_drop_duplicates.py | r-graves/demo_lab | 729cdf61774bf32d2c07ca68bf70e65470700cc2 | [
"MIT"
] | 1 | 2021-11-18T10:45:16.000Z | 2021-11-18T10:45:16.000Z | import numpy as np
import pytest
from pandas import (
NA,
Categorical,
Series,
)
import pandas._testing as tm
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, False, False, True, True, False])),
("last", Series([False, True, True, False, False, False, False])),
(False, Series([False, True, True, False, True, True, False])),
],
)
def test_drop_duplicates(any_numpy_dtype, keep, expected):
tc = Series([1, 0, 3, 5, 3, 0, 4], dtype=np.dtype(any_numpy_dtype))
if tc.dtype == "bool":
pytest.skip("tested separately in test_drop_duplicates_bool")
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep=keep, inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc[~expected])
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, True, True])),
("last", Series([True, True, False, False])),
(False, Series([True, True, True, True])),
],
)
def test_drop_duplicates_bool(keep, expected):
tc = Series([True, False, True, False])
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep=keep, inplace=True)
tm.assert_series_equal(sc, tc[~expected])
assert return_value is None
@pytest.mark.parametrize("values", [[], list(range(5))])
def test_drop_duplicates_no_duplicates(any_numpy_dtype, keep, values):
tc = Series(values, dtype=np.dtype(any_numpy_dtype))
expected = Series([False] * len(tc), dtype="bool")
if tc.dtype == "bool":
# 0 -> False and 1-> True
# any other value would be duplicated
tc = tc[:2]
expected = expected[:2]
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
result_dropped = tc.drop_duplicates(keep=keep)
tm.assert_series_equal(result_dropped, tc)
# validate shallow copy
assert result_dropped is not tc
class TestSeriesDropDuplicates:
@pytest.fixture(
params=["int_", "uint", "float_", "unicode_", "timedelta64[h]", "datetime64[D]"]
)
def dtype(self, request):
return request.param
@pytest.fixture
def cat_series1(self, dtype, ordered):
# Test case 1
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))
cat = Categorical(input1, categories=cat_array, ordered=ordered)
tc1 = Series(cat)
return tc1
def test_drop_duplicates_categorical_non_bool(self, cat_series1):
tc1 = cat_series1
expected = Series([False, False, False, True])
result = tc1.duplicated()
tm.assert_series_equal(result, expected)
result = tc1.drop_duplicates()
tm.assert_series_equal(result, tc1[~expected])
sc = tc1.copy()
return_value = sc.drop_duplicates(inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc1[~expected])
def test_drop_duplicates_categorical_non_bool_keeplast(self, cat_series1):
tc1 = cat_series1
expected = Series([False, False, True, False])
result = tc1.duplicated(keep="last")
tm.assert_series_equal(result, expected)
result = tc1.drop_duplicates(keep="last")
tm.assert_series_equal(result, tc1[~expected])
sc = tc1.copy()
return_value = sc.drop_duplicates(keep="last", inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc1[~expected])
def test_drop_duplicates_categorical_non_bool_keepfalse(self, cat_series1):
tc1 = cat_series1
expected = Series([False, False, True, True])
result = tc1.duplicated(keep=False)
tm.assert_series_equal(result, expected)
result = tc1.drop_duplicates(keep=False)
tm.assert_series_equal(result, tc1[~expected])
sc = tc1.copy()
return_value = sc.drop_duplicates(keep=False, inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc1[~expected])
@pytest.fixture
def cat_series2(self, dtype, ordered):
# Test case 2; TODO: better name
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))
cat = Categorical(input2, categories=cat_array, ordered=ordered)
tc2 = Series(cat)
return tc2
def test_drop_duplicates_categorical_non_bool2(self, cat_series2):
# Test case 2; TODO: better name
tc2 = cat_series2
expected = Series([False, False, False, False, True, True, False])
result = tc2.duplicated()
tm.assert_series_equal(result, expected)
result = tc2.drop_duplicates()
tm.assert_series_equal(result, tc2[~expected])
sc = tc2.copy()
return_value = sc.drop_duplicates(inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc2[~expected])
def test_drop_duplicates_categorical_non_bool2_keeplast(self, cat_series2):
tc2 = cat_series2
expected = Series([False, True, True, False, False, False, False])
result = tc2.duplicated(keep="last")
tm.assert_series_equal(result, expected)
result = tc2.drop_duplicates(keep="last")
tm.assert_series_equal(result, tc2[~expected])
sc = tc2.copy()
return_value = sc.drop_duplicates(keep="last", inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc2[~expected])
def test_drop_duplicates_categorical_non_bool2_keepfalse(self, cat_series2):
tc2 = cat_series2
expected = Series([False, True, True, False, True, True, False])
result = tc2.duplicated(keep=False)
tm.assert_series_equal(result, expected)
result = tc2.drop_duplicates(keep=False)
tm.assert_series_equal(result, tc2[~expected])
sc = tc2.copy()
return_value = sc.drop_duplicates(keep=False, inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc2[~expected])
def test_drop_duplicates_categorical_bool(self, ordered):
tc = Series(
Categorical(
[True, False, True, False], categories=[True, False], ordered=ordered
)
)
expected = Series([False, False, True, True])
tm.assert_series_equal(tc.duplicated(), expected)
tm.assert_series_equal(tc.drop_duplicates(), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc[~expected])
expected = Series([True, True, False, False])
tm.assert_series_equal(tc.duplicated(keep="last"), expected)
tm.assert_series_equal(tc.drop_duplicates(keep="last"), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep="last", inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc[~expected])
expected = Series([True, True, True, True])
tm.assert_series_equal(tc.duplicated(keep=False), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=False), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep=False, inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc[~expected])
def test_drop_duplicates_categorical_bool_na(self):
# GH#44351
ser = Series(
Categorical(
[True, False, True, False, NA], categories=[True, False], ordered=True
)
)
result = ser.drop_duplicates()
expected = Series(
Categorical([True, False, np.nan], categories=[True, False], ordered=True),
index=[0, 1, 4],
)
tm.assert_series_equal(result, expected)
def test_drop_duplicates_pos_args_deprecation():
# GH#41485
s = Series(["a", "b", "c", "b"])
msg = (
"In a future version of pandas all arguments of "
"Series.drop_duplicates will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = s.drop_duplicates("last")
expected = Series(["a", "c", "b"], index=[0, 2, 3])
tm.assert_series_equal(expected, result)
| 33.866142 | 88 | 0.64578 | import numpy as np
import pytest
from pandas import (
NA,
Categorical,
Series,
)
import pandas._testing as tm
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, False, False, True, True, False])),
("last", Series([False, True, True, False, False, False, False])),
(False, Series([False, True, True, False, True, True, False])),
],
)
def test_drop_duplicates(any_numpy_dtype, keep, expected):
tc = Series([1, 0, 3, 5, 3, 0, 4], dtype=np.dtype(any_numpy_dtype))
if tc.dtype == "bool":
pytest.skip("tested separately in test_drop_duplicates_bool")
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep=keep, inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc[~expected])
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, True, True])),
("last", Series([True, True, False, False])),
(False, Series([True, True, True, True])),
],
)
def test_drop_duplicates_bool(keep, expected):
tc = Series([True, False, True, False])
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep=keep, inplace=True)
tm.assert_series_equal(sc, tc[~expected])
assert return_value is None
@pytest.mark.parametrize("values", [[], list(range(5))])
def test_drop_duplicates_no_duplicates(any_numpy_dtype, keep, values):
tc = Series(values, dtype=np.dtype(any_numpy_dtype))
expected = Series([False] * len(tc), dtype="bool")
if tc.dtype == "bool":
tc = tc[:2]
expected = expected[:2]
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
result_dropped = tc.drop_duplicates(keep=keep)
tm.assert_series_equal(result_dropped, tc)
assert result_dropped is not tc
class TestSeriesDropDuplicates:
@pytest.fixture(
params=["int_", "uint", "float_", "unicode_", "timedelta64[h]", "datetime64[D]"]
)
def dtype(self, request):
return request.param
@pytest.fixture
def cat_series1(self, dtype, ordered):
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))
cat = Categorical(input1, categories=cat_array, ordered=ordered)
tc1 = Series(cat)
return tc1
def test_drop_duplicates_categorical_non_bool(self, cat_series1):
tc1 = cat_series1
expected = Series([False, False, False, True])
result = tc1.duplicated()
tm.assert_series_equal(result, expected)
result = tc1.drop_duplicates()
tm.assert_series_equal(result, tc1[~expected])
sc = tc1.copy()
return_value = sc.drop_duplicates(inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc1[~expected])
def test_drop_duplicates_categorical_non_bool_keeplast(self, cat_series1):
tc1 = cat_series1
expected = Series([False, False, True, False])
result = tc1.duplicated(keep="last")
tm.assert_series_equal(result, expected)
result = tc1.drop_duplicates(keep="last")
tm.assert_series_equal(result, tc1[~expected])
sc = tc1.copy()
return_value = sc.drop_duplicates(keep="last", inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc1[~expected])
def test_drop_duplicates_categorical_non_bool_keepfalse(self, cat_series1):
tc1 = cat_series1
expected = Series([False, False, True, True])
result = tc1.duplicated(keep=False)
tm.assert_series_equal(result, expected)
result = tc1.drop_duplicates(keep=False)
tm.assert_series_equal(result, tc1[~expected])
sc = tc1.copy()
return_value = sc.drop_duplicates(keep=False, inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc1[~expected])
@pytest.fixture
def cat_series2(self, dtype, ordered):
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))
cat = Categorical(input2, categories=cat_array, ordered=ordered)
tc2 = Series(cat)
return tc2
def test_drop_duplicates_categorical_non_bool2(self, cat_series2):
tc2 = cat_series2
expected = Series([False, False, False, False, True, True, False])
result = tc2.duplicated()
tm.assert_series_equal(result, expected)
result = tc2.drop_duplicates()
tm.assert_series_equal(result, tc2[~expected])
sc = tc2.copy()
return_value = sc.drop_duplicates(inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc2[~expected])
def test_drop_duplicates_categorical_non_bool2_keeplast(self, cat_series2):
tc2 = cat_series2
expected = Series([False, True, True, False, False, False, False])
result = tc2.duplicated(keep="last")
tm.assert_series_equal(result, expected)
result = tc2.drop_duplicates(keep="last")
tm.assert_series_equal(result, tc2[~expected])
sc = tc2.copy()
return_value = sc.drop_duplicates(keep="last", inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc2[~expected])
def test_drop_duplicates_categorical_non_bool2_keepfalse(self, cat_series2):
tc2 = cat_series2
expected = Series([False, True, True, False, True, True, False])
result = tc2.duplicated(keep=False)
tm.assert_series_equal(result, expected)
result = tc2.drop_duplicates(keep=False)
tm.assert_series_equal(result, tc2[~expected])
sc = tc2.copy()
return_value = sc.drop_duplicates(keep=False, inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc2[~expected])
def test_drop_duplicates_categorical_bool(self, ordered):
tc = Series(
Categorical(
[True, False, True, False], categories=[True, False], ordered=ordered
)
)
expected = Series([False, False, True, True])
tm.assert_series_equal(tc.duplicated(), expected)
tm.assert_series_equal(tc.drop_duplicates(), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc[~expected])
expected = Series([True, True, False, False])
tm.assert_series_equal(tc.duplicated(keep="last"), expected)
tm.assert_series_equal(tc.drop_duplicates(keep="last"), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep="last", inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc[~expected])
expected = Series([True, True, True, True])
tm.assert_series_equal(tc.duplicated(keep=False), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=False), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep=False, inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc[~expected])
def test_drop_duplicates_categorical_bool_na(self):
ser = Series(
Categorical(
[True, False, True, False, NA], categories=[True, False], ordered=True
)
)
result = ser.drop_duplicates()
expected = Series(
Categorical([True, False, np.nan], categories=[True, False], ordered=True),
index=[0, 1, 4],
)
tm.assert_series_equal(result, expected)
def test_drop_duplicates_pos_args_deprecation():
= Series(["a", "b", "c", "b"])
msg = (
"In a future version of pandas all arguments of "
"Series.drop_duplicates will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = s.drop_duplicates("last")
expected = Series(["a", "c", "b"], index=[0, 2, 3])
tm.assert_series_equal(expected, result)
| true | true |
f72d861b9bac9b5ba86db78ac65832190f0c7ae3 | 3,736 | py | Python | src/ClusterBootstrap/scripts/move_keys_into_db.py | Anbang-Hu/DLWorkspace | 09d82aa5efd4dc9523fd956f913f73e53a85c3c2 | [
"MIT"
] | 38 | 2020-07-13T08:46:39.000Z | 2021-02-08T01:38:44.000Z | src/ClusterBootstrap/scripts/move_keys_into_db.py | Anbang-Hu/DLWorkspace | 09d82aa5efd4dc9523fd956f913f73e53a85c3c2 | [
"MIT"
] | null | null | null | src/ClusterBootstrap/scripts/move_keys_into_db.py | Anbang-Hu/DLWorkspace | 09d82aa5efd4dc9523fd956f913f73e53a85c3c2 | [
"MIT"
] | 20 | 2020-07-14T03:38:50.000Z | 2021-01-08T06:24:17.000Z | #!/usr/bin/env python3
import os
import sys
import yaml
import argparse
import logging
import mysql.connector
logger = logging.getLogger(__name__)
def build_mysql_connection(rest_config_path):
with open(rest_config_path) as f:
cluster_config = yaml.load(f)
host = cluster_config["mysql"]["hostname"]
port = cluster_config["mysql"]["port"]
username = cluster_config["mysql"]["username"]
password = cluster_config["mysql"]["password"]
db_name = "DLWSCluster-%s" % cluster_config["clusterId"]
return mysql.connector.connect(user=username,
password=password,
host=host,
port=port,
database=db_name)
def alter_table(rest_config_path):
conn = build_mysql_connection(rest_config_path)
cursor = conn.cursor()
cursor.execute(
"ALTER TABLE identity ADD COLUMN public_key TEXT not null"
)
cursor.execute(
"ALTER TABLE identity ADD COLUMN private_key TEXT not null"
)
conn.commit()
cursor.close()
conn.close()
def dump_data(rest_config_path, work_path):
conn = build_mysql_connection(rest_config_path)
cursor = conn.cursor()
cursor.execute("SELECT `identityName` FROM identity")
users = cursor.fetchall()
for user_name, in users:
alias = user_name
if "@" in alias:
alias = alias.split("@")[0]
if "/" in alias:
alias = alias.split("/")[1]
if "\\" in alias:
alias = alias.split("\\")[1]
logger.info("dumping %s", alias)
private_path = os.path.join(work_path, alias, ".ssh", "id_rsa")
public_path = os.path.join(work_path, alias, ".ssh",
"id_rsa.pub")
if not os.path.isfile(private_path) or not os.path.isfile(
public_path):
logger.warning("%s or %s not exist, ignore", private_path,
public_path)
continue
with open(private_path) as f:
private_key = f.read()
with open(public_path) as f:
public_key = f.read()
cursor.execute(
"""UPDATE identity
SET private_key = %s, public_key = %s
WHERE identityName = %s""", (private_key, public_key,
user_name))
conn.commit()
cursor.close()
conn.close()
def roll_back(rest_config_path):
conn = build_mysql_connection(rest_config_path)
cursor = conn.cursor()
cursor.execute("ALTER TABLE identity DROP COLUMN private_key, DROP COLUMN public_key")
conn.commit()
cursor.close()
conn.close()
def main(action, rest_config_path, work_path):
if action == "alter":
alter_table(rest_config_path)
elif action == "dump":
dump_data(rest_config_path, work_path)
elif action == "rollback":
roll_back(rest_config_path)
else:
logger.error("unknown action %s", action)
sys.exit(2)
if __name__ == '__main__':
logging.basicConfig(
format=
"%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s",
level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("action", choices=["alter", "dump", "rollback"])
parser.add_argument("--work_path",
help="path to NFS work directory",
default="/dlwsdata/work")
parser.add_argument("--rest_path",
help="path to restfulapi config file",
default="/etc/RestfulAPI/config.yaml")
args = parser.parse_args()
main(args.action, args.rest_path, args.work_path)
| 32.206897 | 90 | 0.591274 |
import os
import sys
import yaml
import argparse
import logging
import mysql.connector
logger = logging.getLogger(__name__)
def build_mysql_connection(rest_config_path):
with open(rest_config_path) as f:
cluster_config = yaml.load(f)
host = cluster_config["mysql"]["hostname"]
port = cluster_config["mysql"]["port"]
username = cluster_config["mysql"]["username"]
password = cluster_config["mysql"]["password"]
db_name = "DLWSCluster-%s" % cluster_config["clusterId"]
return mysql.connector.connect(user=username,
password=password,
host=host,
port=port,
database=db_name)
def alter_table(rest_config_path):
conn = build_mysql_connection(rest_config_path)
cursor = conn.cursor()
cursor.execute(
"ALTER TABLE identity ADD COLUMN public_key TEXT not null"
)
cursor.execute(
"ALTER TABLE identity ADD COLUMN private_key TEXT not null"
)
conn.commit()
cursor.close()
conn.close()
def dump_data(rest_config_path, work_path):
conn = build_mysql_connection(rest_config_path)
cursor = conn.cursor()
cursor.execute("SELECT `identityName` FROM identity")
users = cursor.fetchall()
for user_name, in users:
alias = user_name
if "@" in alias:
alias = alias.split("@")[0]
if "/" in alias:
alias = alias.split("/")[1]
if "\\" in alias:
alias = alias.split("\\")[1]
logger.info("dumping %s", alias)
private_path = os.path.join(work_path, alias, ".ssh", "id_rsa")
public_path = os.path.join(work_path, alias, ".ssh",
"id_rsa.pub")
if not os.path.isfile(private_path) or not os.path.isfile(
public_path):
logger.warning("%s or %s not exist, ignore", private_path,
public_path)
continue
with open(private_path) as f:
private_key = f.read()
with open(public_path) as f:
public_key = f.read()
cursor.execute(
"""UPDATE identity
SET private_key = %s, public_key = %s
WHERE identityName = %s""", (private_key, public_key,
user_name))
conn.commit()
cursor.close()
conn.close()
def roll_back(rest_config_path):
conn = build_mysql_connection(rest_config_path)
cursor = conn.cursor()
cursor.execute("ALTER TABLE identity DROP COLUMN private_key, DROP COLUMN public_key")
conn.commit()
cursor.close()
conn.close()
def main(action, rest_config_path, work_path):
if action == "alter":
alter_table(rest_config_path)
elif action == "dump":
dump_data(rest_config_path, work_path)
elif action == "rollback":
roll_back(rest_config_path)
else:
logger.error("unknown action %s", action)
sys.exit(2)
if __name__ == '__main__':
logging.basicConfig(
format=
"%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s",
level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("action", choices=["alter", "dump", "rollback"])
parser.add_argument("--work_path",
help="path to NFS work directory",
default="/dlwsdata/work")
parser.add_argument("--rest_path",
help="path to restfulapi config file",
default="/etc/RestfulAPI/config.yaml")
args = parser.parse_args()
main(args.action, args.rest_path, args.work_path)
| true | true |
f72d8654bb38b434c13bdf313b5b3c9e76373332 | 3,325 | py | Python | include/dot_bdd.py | tyler-utah/PBDD | 3d53b09872efaf6825eb14de25b55584ed4f40bd | [
"BSD-2-Clause-FreeBSD"
] | 13 | 2015-01-12T10:04:25.000Z | 2022-03-14T13:33:44.000Z | include/dot_bdd.py | tyler-utah/PBDD | 3d53b09872efaf6825eb14de25b55584ed4f40bd | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | include/dot_bdd.py | tyler-utah/PBDD | 3d53b09872efaf6825eb14de25b55584ed4f40bd | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2015-10-09T05:41:38.000Z | 2019-01-26T23:34:17.000Z | #Tyler Sorensen
#University of Utah
#March 1, 2012
#dot_bdd.py
#This simply prints a .dot file for visualizing the bdd
#Only public function
def print_bdd(bdd, fileName):
"""
Generate a dot file with the bdd in it. Run the dot file through
dot and generate a ps file.
"""
#open the file
f1 = open(fileName, 'w')
#Give it a readable header
_prDotHeader(f1)
#Print the Nodes
_prNodes(f1, bdd)
#Print the ranks
_prRanks(f1, bdd)
#Determine and print the edges
_prEdges(f1, bdd, bdd["u"], [])
#Close the file
_prClosing(f1)
def _prClosing(f1):
"""
A nice readable closing
"""
f1.write("/* Unix command: dot -Tps bdd.dot > bdd.ps */\n")
f1.write(r"/* For further details, see the `dot' manual */")
f1.write("\n}")
def _prDotHeader(f1):
"""
Header that sets up initial variables and settings
"""
f1.write("digraph G {\n" )
f1.write("/* Defaults */\n" )
f1.write(" fontsize = 12;\n" )
f1.write(" graph [dpi = 600];\n" )
f1.write(" ratio = compress; \n")
f1.write("/* Bounding box */\n" )
f1.write(" size = \"4,4\";\n" )
def _prNodes(f1, bdd):
"""
prints the definition for the Nodes
"""
u = bdd["u"]
if u != 1:
s = "Node0 [label=0, color=Red, shape=box, peripheries=2]\n"
f1.write(s)
if u != 0:
s = "Node1 [label=1, color=Blue, shape=box, peripheries=2]\n"
f1.write(s)
for q in bdd["t_table"]:
if q != 0 and q!= 1:
s = "Node%i " % q
s = "%s[label=%s" % (s, _get_var_name(bdd,q))
s = "%s, shape=circle, peripheries=1]\n" % s
f1.write(s)
#Helper for _prNodes
def _get_var_name(bdd, u):
"""
Given a variable index u in the BDD, return the variable
Name
"""
var_index = bdd["t_table"][u][0]-1
return bdd["var_order"][var_index]
def _prEdges(f1, bdd, u, drawn_list):
"""
Recursive function to draw all the edges.
Red for low, Blue for High
"""
if u == 1:
return
if u == 0:
return
if u not in drawn_list:
s = "Node%i->Node%i [color=red, label = \"0\"]\n" % (u, bdd["t_table"][u][1])
f1.write(s)
s = "Node%i->Node%i [color=blue, label = \"1\"]\n" % (u, bdd["t_table"][u][2])
f1.write(s)
_prEdges(f1, bdd, bdd["t_table"][u][1], drawn_list)
_prEdges(f1, bdd, bdd["t_table"][u][2], drawn_list)
drawn_list.append(u)
def _prRanks(f1, bdd):
"""
Make all the nodes with the same variables the same rank
"""
ar = [0]*len(bdd["var_order"])
#Count how many times each variable appears
for q in bdd["t_table"]:
if q != 0 and q != 1:
ar[bdd["t_table"][q][0]-1] += 1
i = 0
while i < len(bdd["var_order"]):
if ar[i] > 1:
l = find(bdd, i)
s = "{rank=same;"
for q in l:
s = "%s Node%s" % (s, str(q))
s = "%s}\n" % s
f1.write(s)
i += 1
#Helper function for prRanks
def find(bdd, i):
"""
returns a list of all the u numbers of variable i
"""
l = []
for q in bdd["t_table"]:
if bdd["t_table"][q][0]-1 == i:
l.append(q)
return l
| 24.094203 | 86 | 0.520602 |
def print_bdd(bdd, fileName):
f1 = open(fileName, 'w')
_prDotHeader(f1)
_prNodes(f1, bdd)
_prRanks(f1, bdd)
_prEdges(f1, bdd, bdd["u"], [])
_prClosing(f1)
def _prClosing(f1):
f1.write("/* Unix command: dot -Tps bdd.dot > bdd.ps */\n")
f1.write(r"/* For further details, see the `dot' manual */")
f1.write("\n}")
def _prDotHeader(f1):
f1.write("digraph G {\n" )
f1.write("/* Defaults */\n" )
f1.write(" fontsize = 12;\n" )
f1.write(" graph [dpi = 600];\n" )
f1.write(" ratio = compress; \n")
f1.write("/* Bounding box */\n" )
f1.write(" size = \"4,4\";\n" )
def _prNodes(f1, bdd):
u = bdd["u"]
if u != 1:
s = "Node0 [label=0, color=Red, shape=box, peripheries=2]\n"
f1.write(s)
if u != 0:
s = "Node1 [label=1, color=Blue, shape=box, peripheries=2]\n"
f1.write(s)
for q in bdd["t_table"]:
if q != 0 and q!= 1:
s = "Node%i " % q
s = "%s[label=%s" % (s, _get_var_name(bdd,q))
s = "%s, shape=circle, peripheries=1]\n" % s
f1.write(s)
#Helper for _prNodes
def _get_var_name(bdd, u):
var_index = bdd["t_table"][u][0]-1
return bdd["var_order"][var_index]
def _prEdges(f1, bdd, u, drawn_list):
if u == 1:
return
if u == 0:
return
if u not in drawn_list:
s = "Node%i->Node%i [color=red, label = \"0\"]\n" % (u, bdd["t_table"][u][1])
f1.write(s)
s = "Node%i->Node%i [color=blue, label = \"1\"]\n" % (u, bdd["t_table"][u][2])
f1.write(s)
_prEdges(f1, bdd, bdd["t_table"][u][1], drawn_list)
_prEdges(f1, bdd, bdd["t_table"][u][2], drawn_list)
drawn_list.append(u)
def _prRanks(f1, bdd):
ar = [0]*len(bdd["var_order"])
#Count how many times each variable appears
for q in bdd["t_table"]:
if q != 0 and q != 1:
ar[bdd["t_table"][q][0]-1] += 1
i = 0
while i < len(bdd["var_order"]):
if ar[i] > 1:
l = find(bdd, i)
s = "{rank=same;"
for q in l:
s = "%s Node%s" % (s, str(q))
s = "%s}\n" % s
f1.write(s)
i += 1
#Helper function for prRanks
def find(bdd, i):
l = []
for q in bdd["t_table"]:
if bdd["t_table"][q][0]-1 == i:
l.append(q)
return l
| true | true |
f72d8677c20fa3e3a54169d4eb48cb7ca7458055 | 11,575 | py | Python | OneSpanAnalysis_Mdl.py | Ivanfdezr/CentralSoftware | 8681fedd4814dc60deb527a370411350b40c994c | [
"MIT"
] | null | null | null | OneSpanAnalysis_Mdl.py | Ivanfdezr/CentralSoftware | 8681fedd4814dc60deb527a370411350b40c994c | [
"MIT"
] | 44 | 2021-02-10T23:58:28.000Z | 2021-12-14T02:38:21.000Z | OneSpanAnalysis_Mdl.py | Ivanfdezr/CentralSoftware | 8681fedd4814dc60deb527a370411350b40c994c | [
"MIT"
] | null | null | null | import numpy as np
import numpy.linalg as la
from MdlUtilities import Field, FieldList
import MdlUtilities as mdl
def get_osaCasing_fields():
OD = Field(2030)
ID = Field(2031)
Weight = Field(2032)
Density = Field(2039)
E = Field(2040)
osaCasing_fields = FieldList()
osaCasing_fields.append( OD )
osaCasing_fields.append( ID )
osaCasing_fields.append( Weight )
osaCasing_fields.append( Density )
osaCasing_fields.append( E )
return osaCasing_fields
def get_osaCent_fields():
Type = Field(2049)
IPOD = Field(2009)
CentOD = Field(2011)
#CentID = Field(2012)
ResF_SO67 = Field(2018)
minResF = Field(2017)
SO_minResF = Field(2019)
ResF_SO67.set_representation('Res. Force @ SO=67%')
minResF.set_representation('minimum Res. Force')
SO_minResF.set_representation('StandOff @ min. Res. F.')
osaCent_fields = FieldList()
osaCent_fields.append( Type )
osaCent_fields.append( IPOD )
osaCent_fields.append( CentOD )
#osaCent_fields.append( CentID )
osaCent_fields.append( ResF_SO67 )
osaCent_fields.append( minResF )
osaCent_fields.append( SO_minResF )
return osaCent_fields
def get_osaWellbore_fields():
HoleID = Field(2010)
MaxSpan = Field(2061)
MudIPDensity = Field(2077)
MudOPDensity = Field(2077)
HoleID.set_representation('Hole ID')
HoleID.set_abbreviation('HoleID')
MaxSpan.set_representation('Max span')
MaxSpan.set_abbreviation('MaxSpan')
MudIPDensity.set_representation('Mud inside pipe')
MudIPDensity.set_abbreviation('MudIPDensity')
MudOPDensity.set_representation('Mud in annulus')
MudOPDensity.set_abbreviation('MudOPDensity')
osaWellbore_fields = FieldList()
osaWellbore_fields.append( HoleID )
osaWellbore_fields.append( MaxSpan )
osaWellbore_fields.append( MudIPDensity )
osaWellbore_fields.append( MudOPDensity )
return osaWellbore_fields
def get_osaOutputdata1_fields():
clearanceA = Field(2073, altBg=True, altFg=True)
clearanceB = Field(2073, altBg=True, altFg=True)
clearanceM = Field(2073, altBg=True, altFg=True)
sideForceA = Field(2074, altBg=True, altFg=True)
sideForceB = Field(2074, altBg=True, altFg=True)
sideForceM = Field(2074, altBg=True, altFg=True)
standoffA = Field(2078, altBg=True, altFg=True)
standoffB = Field(2078, altBg=True, altFg=True)
standoffM = Field(2078, altBg=True, altFg=True)
clearanceA.set_representation('Annular clearance @ cent. A')
clearanceA.set_abbreviation('ClearanceA')
clearanceB.set_representation('Annular clearance @ cent. B')
clearanceB.set_abbreviation('ClearanceB')
clearanceM.set_representation('Annular clearance @ mid span')
clearanceM.set_abbreviation('ClearanceM')
sideForceA.set_representation('Side force @ cent. A')
sideForceA.set_abbreviation('SideForceA')
sideForceB.set_representation('Side force @ cent. B')
sideForceB.set_abbreviation('SideForceB')
sideForceM.set_representation('Side force @ mid span')
sideForceM.set_abbreviation('SideForceM')
standoffA.set_representation('Standoff @ cent. A')
standoffA.set_abbreviation('StandoffA')
standoffB.set_representation('Standoff @ cent. B')
standoffB.set_abbreviation('StandoffB')
standoffM.set_representation('Standoff @ mid span')
standoffM.set_abbreviation('StandoffM')
osaOutputdata1_fields = FieldList()
osaOutputdata1_fields.append( clearanceA )
osaOutputdata1_fields.append( clearanceB )
osaOutputdata1_fields.append( clearanceM )
osaOutputdata1_fields.append( sideForceA )
osaOutputdata1_fields.append( sideForceB )
osaOutputdata1_fields.append( sideForceM )
osaOutputdata1_fields.append( standoffA )
osaOutputdata1_fields.append( standoffB )
osaOutputdata1_fields.append( standoffM )
return osaOutputdata1_fields
def get_osaOutputdata2_fields():
axialForce = Field(2075, altBg=True, altFg=True)
deflection = Field(2076, altBg=True, altFg=True)
wClearance = Field(2073, altBg=True, altFg=True)
wStandoff = Field(2078, altBg=True, altFg=True)
axialForce.set_representation('Axial extra force @ top')
axialForce.set_abbreviation('AxialForce')
deflection.set_representation('Max. pipe deflection')
deflection.set_abbreviation('MaxDeflection')
wClearance.set_representation('Mean wellbore clearance')
wClearance.set_abbreviation('WellboreClearance')
wStandoff.set_representation('Mean wellbore standoff')
wStandoff.set_abbreviation('WellboreStandoff')
osaOutputdata2_fields = FieldList()
osaOutputdata2_fields.append( axialForce )
osaOutputdata2_fields.append( deflection )
osaOutputdata2_fields.append( wClearance )
osaOutputdata2_fields.append( wStandoff )
return osaOutputdata2_fields
def get_casingDeflectionCurve(self):
# Equation(s) Reference 1:
# Hans C. Juvkam-Wold, Jiang Wu. Casing Deflection and Centralizer Spacing Calculations.
# SPE Drilling Engineering (December 1992).
# Equation(s) Reference 2:
# Hans C. Juvkam-Wold, Richard L. Baxter. Discussion of Optimal Spacing for Casing Centralizers.
# SPE Drilling Engineering (December 1988).
# Equation(s) Reference 3:
# Carlos F. H. Fonseca, Jacques Braile. Optimizing of Centralizer Distribution.
# SPE Latin American Petroleum Engineering Conference (October 1990).
self.osaCasing_fields.referenceUnitConvert_fields()
self.osaCentA_fields.referenceUnitConvert_fields()
self.osaCentB_fields.referenceUnitConvert_fields()
self.osaWellbore_fields.referenceUnitConvert_fields()
Rot = lambda φ: np.array( [[np.cos(φ),-np.sin(φ)],[np.sin(φ),np.cos(φ)]] )
dH = self.osaWellbore_fields.HoleID[0]
L = self.osaWellbore_fields.MaxSpan[0]*self.osaSpacing_slider.sliderPosition()/100
ρe = self.osaWellbore_fields.MudOPDensity[0]
ρi = self.osaWellbore_fields.MudIPDensity[0]
ρs = self.osaCasing_fields.Density[0]
E = self.osaCasing_fields.E[0]
w = self.osaCasing_fields.PW[0]
D = self.osaCasing_fields.OD[0]
d = self.osaCasing_fields.ID[0]
Type_A = self.osaCentA_fields.Type[0]
F_So67_A = self.osaCentA_fields.ResF_SO67[0]
minF_A = self.osaCentA_fields.minResF[0]
So_minF_A = self.osaCentA_fields.SO_minResF[0]
DA = self.osaCentA_fields.COD[0]
dA = self.osaCentA_fields.IPOD[0]
Type_B = self.osaCentB_fields.Type[0]
F_So67_B = self.osaCentB_fields.ResF_SO67[0]
minF_B = self.osaCentB_fields.minResF[0]
So_minF_B = self.osaCentB_fields.SO_minResF[0]
DB = self.osaCentB_fields.COD[0]
dB = self.osaCentB_fields.IPOD[0]
#kA = ResFA/(DA/2-0.335*(DA-D)) # Con esto se calculan los coeficientes de los resortes ( 0.335=0.67/2 )
#kB = ResFB/(DB/2-0.335*(DB-D))
for field in self.osaWellbore_fields:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
for field in self.osaCasing_fields:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
for field in self.osaCentA_fields[1:]:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
for field in self.osaCentB_fields[1:]:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
if dA!=D or dB!=D or dH<=D:
raise mdl.LogicalError('The selected devices are not size-consistent.')
θ = np.pi*self.osaInclination_slider.sliderPosition()/180
I = np.pi/64*(D**4-d**4) # [Ref.3] Momento de inercia diferente a momento de inercia polar.
F = 30000 # [Ref.1]
Radio = L*1e6
aspr = L*0.02
buoyancyFactor = mdl.calculate_buoyancyFactor( OD=D, ID=d, ρs=ρs, ρe=ρe, ρi=ρi ) # [Ref.2]
w *= buoyancyFactor
fC = w*L*np.sin(θ)/2
if Type_A=='Resin': #mdl.isNoneEntry(ResFA):
yA = 0
dA = d
else:
kA = 2*(F_So67_A-minF_A)/(So_minF_A-0.67)/(DA-dA)
yA = fC/kA if (DA<dH) else fC/kA/2
if Type_B=='Resin': #mdl.isNoneEntry(ResFB):
yB = 0
dB = d
else:
kB = 2*(F_So67_B-minF_B)/(So_minF_B-0.67)/(DB-dB)
yB = fC/kB if (DB<dH) else fC/kB/2
R = D/2
rH = dH/2
rA_min = R+(DA/2-R)*0.1
rB_min = R+(DB/2-R)*0.1
rA = (DA/2-yA) if (DA<dH) else (rH-yA)
rB = (DB/2-yB) if (DB<dH) else (rH-yB)
rA = rA_min if (rA<=rA_min) else rA
rB = rB_min if (rB<=rB_min) else rB
α = np.arctan( (rB-rA)/L )
Lα = L/np.cos(α)
x = np.linspace( 0, Lα, 101 )
K = np.sqrt(F/E/I)
y = (Lα/2/Radio/K + w*Lα*np.sin(θ)/2/K/F)*( (np.cosh(K*x)-1)/np.tanh(K*Lα/2) + K*x - np.sinh(K*x) ) - w*np.sin(θ)/2/F*x**2 # [Ref.1]
Rα = Rot(α)
xy = np.array([x,y])
x,y = np.dot(Rα,xy)
Δy = rH-rB
y += Δy
cH = rH-R
cA = rA-R
cB = rB-R
indexes = y>cH
y[indexes] = cH
indexes = y<-cH
y[indexes] =-cH
cy = cH-y
rM = rH-y[50]
if y[50]==cH:
fM = fC
fC = 0
else:
fM = 0
cM = rM-R
x -= L/2
yoh = y*0
ohc = np.array([x, yoh])
ohp = np.array([x, (yoh+rH)*aspr])
ohm = np.array([x, (yoh-rH)*aspr])
xyc = np.array([x, y*aspr])
xyp = np.array([x, (y+R)*aspr])
xym = np.array([x, (y-R)*aspr])
φ = θ + np.pi/2
Rφ = Rot(φ)
OHc = np.dot(Rφ,ohc)
OHp = np.dot(Rφ,ohp)
OHm = np.dot(Rφ,ohm)
XYc = np.dot(Rφ,xyc)
XYp = np.dot(Rφ,xyp)
XYm = np.dot(Rφ,xym)
SA = cA/cH
SB = cB/cH
SM = cM/cH
Sy = cy/cH
δ = (cA+cB)/2-cM
self.osaOutputdata1_fields.clear_content()
self.osaOutputdata2_fields.clear_content()
self.osaOutputdata1_fields.ClearanceA.append( mdl.physicalValue( cA, self.osaOutputdata1_fields.ClearanceA.referenceUnit ) )
self.osaOutputdata1_fields.ClearanceB.append( mdl.physicalValue( cB, self.osaOutputdata1_fields.ClearanceB.referenceUnit ) )
self.osaOutputdata1_fields.ClearanceM.append( mdl.physicalValue( cM, self.osaOutputdata1_fields.ClearanceM.referenceUnit ) )
self.osaOutputdata1_fields.SideForceA.append( mdl.physicalValue( fC, self.osaOutputdata1_fields.SideForceA.referenceUnit ) )
self.osaOutputdata1_fields.SideForceB.append( mdl.physicalValue( fC, self.osaOutputdata1_fields.SideForceB.referenceUnit ) )
self.osaOutputdata1_fields.SideForceM.append( mdl.physicalValue( fM, self.osaOutputdata1_fields.SideForceM.referenceUnit ) )
self.osaOutputdata1_fields.StandoffA.append( mdl.physicalValue( SA, self.osaOutputdata1_fields.StandoffA.referenceUnit ) )
self.osaOutputdata1_fields.StandoffB.append( mdl.physicalValue( SB, self.osaOutputdata1_fields.StandoffB.referenceUnit ) )
self.osaOutputdata1_fields.StandoffM.append( mdl.physicalValue( SM, self.osaOutputdata1_fields.StandoffM.referenceUnit ) )
self.osaOutputdata2_fields.AxialForce.append( mdl.physicalValue( w*L*np.cos(θ), self.osaOutputdata2_fields.AxialForce.referenceUnit ) )
self.osaOutputdata2_fields.MaxDeflection.append( mdl.physicalValue( δ, self.osaOutputdata2_fields.MaxDeflection.referenceUnit ) )
self.osaOutputdata2_fields.WellboreClearance.append( mdl.physicalValue( np.mean(cy), self.osaOutputdata2_fields.WellboreClearance.referenceUnit ) )
self.osaOutputdata2_fields.WellboreStandoff.append( mdl.physicalValue( np.mean(Sy), self.osaOutputdata2_fields.WellboreStandoff.referenceUnit ) )
self.osaCasing_fields.inverseReferenceUnitConvert_fields()
self.osaCentA_fields.inverseReferenceUnitConvert_fields()
self.osaCentB_fields.inverseReferenceUnitConvert_fields()
self.osaWellbore_fields.inverseReferenceUnitConvert_fields()
self.osaOutputdata1_fields.inverseReferenceUnitConvert_fields()
self.osaOutputdata2_fields.inverseReferenceUnitConvert_fields()
lim = L/2*1.05
return OHc, OHp, OHm, XYc, XYp, XYm, lim, rA, rB, rM
| 35.506135 | 149 | 0.723629 | import numpy as np
import numpy.linalg as la
from MdlUtilities import Field, FieldList
import MdlUtilities as mdl
def get_osaCasing_fields():
OD = Field(2030)
ID = Field(2031)
Weight = Field(2032)
Density = Field(2039)
E = Field(2040)
osaCasing_fields = FieldList()
osaCasing_fields.append( OD )
osaCasing_fields.append( ID )
osaCasing_fields.append( Weight )
osaCasing_fields.append( Density )
osaCasing_fields.append( E )
return osaCasing_fields
def get_osaCent_fields():
Type = Field(2049)
IPOD = Field(2009)
CentOD = Field(2011)
ResF_SO67 = Field(2018)
minResF = Field(2017)
SO_minResF = Field(2019)
ResF_SO67.set_representation('Res. Force @ SO=67%')
minResF.set_representation('minimum Res. Force')
SO_minResF.set_representation('StandOff @ min. Res. F.')
osaCent_fields = FieldList()
osaCent_fields.append( Type )
osaCent_fields.append( IPOD )
osaCent_fields.append( CentOD )
osaCent_fields.append( ResF_SO67 )
osaCent_fields.append( minResF )
osaCent_fields.append( SO_minResF )
return osaCent_fields
def get_osaWellbore_fields():
HoleID = Field(2010)
MaxSpan = Field(2061)
MudIPDensity = Field(2077)
MudOPDensity = Field(2077)
HoleID.set_representation('Hole ID')
HoleID.set_abbreviation('HoleID')
MaxSpan.set_representation('Max span')
MaxSpan.set_abbreviation('MaxSpan')
MudIPDensity.set_representation('Mud inside pipe')
MudIPDensity.set_abbreviation('MudIPDensity')
MudOPDensity.set_representation('Mud in annulus')
MudOPDensity.set_abbreviation('MudOPDensity')
osaWellbore_fields = FieldList()
osaWellbore_fields.append( HoleID )
osaWellbore_fields.append( MaxSpan )
osaWellbore_fields.append( MudIPDensity )
osaWellbore_fields.append( MudOPDensity )
return osaWellbore_fields
def get_osaOutputdata1_fields():
clearanceA = Field(2073, altBg=True, altFg=True)
clearanceB = Field(2073, altBg=True, altFg=True)
clearanceM = Field(2073, altBg=True, altFg=True)
sideForceA = Field(2074, altBg=True, altFg=True)
sideForceB = Field(2074, altBg=True, altFg=True)
sideForceM = Field(2074, altBg=True, altFg=True)
standoffA = Field(2078, altBg=True, altFg=True)
standoffB = Field(2078, altBg=True, altFg=True)
standoffM = Field(2078, altBg=True, altFg=True)
clearanceA.set_representation('Annular clearance @ cent. A')
clearanceA.set_abbreviation('ClearanceA')
clearanceB.set_representation('Annular clearance @ cent. B')
clearanceB.set_abbreviation('ClearanceB')
clearanceM.set_representation('Annular clearance @ mid span')
clearanceM.set_abbreviation('ClearanceM')
sideForceA.set_representation('Side force @ cent. A')
sideForceA.set_abbreviation('SideForceA')
sideForceB.set_representation('Side force @ cent. B')
sideForceB.set_abbreviation('SideForceB')
sideForceM.set_representation('Side force @ mid span')
sideForceM.set_abbreviation('SideForceM')
standoffA.set_representation('Standoff @ cent. A')
standoffA.set_abbreviation('StandoffA')
standoffB.set_representation('Standoff @ cent. B')
standoffB.set_abbreviation('StandoffB')
standoffM.set_representation('Standoff @ mid span')
standoffM.set_abbreviation('StandoffM')
osaOutputdata1_fields = FieldList()
osaOutputdata1_fields.append( clearanceA )
osaOutputdata1_fields.append( clearanceB )
osaOutputdata1_fields.append( clearanceM )
osaOutputdata1_fields.append( sideForceA )
osaOutputdata1_fields.append( sideForceB )
osaOutputdata1_fields.append( sideForceM )
osaOutputdata1_fields.append( standoffA )
osaOutputdata1_fields.append( standoffB )
osaOutputdata1_fields.append( standoffM )
return osaOutputdata1_fields
def get_osaOutputdata2_fields():
axialForce = Field(2075, altBg=True, altFg=True)
deflection = Field(2076, altBg=True, altFg=True)
wClearance = Field(2073, altBg=True, altFg=True)
wStandoff = Field(2078, altBg=True, altFg=True)
axialForce.set_representation('Axial extra force @ top')
axialForce.set_abbreviation('AxialForce')
deflection.set_representation('Max. pipe deflection')
deflection.set_abbreviation('MaxDeflection')
wClearance.set_representation('Mean wellbore clearance')
wClearance.set_abbreviation('WellboreClearance')
wStandoff.set_representation('Mean wellbore standoff')
wStandoff.set_abbreviation('WellboreStandoff')
osaOutputdata2_fields = FieldList()
osaOutputdata2_fields.append( axialForce )
osaOutputdata2_fields.append( deflection )
osaOutputdata2_fields.append( wClearance )
osaOutputdata2_fields.append( wStandoff )
return osaOutputdata2_fields
def get_casingDeflectionCurve(self):
self.osaCasing_fields.referenceUnitConvert_fields()
self.osaCentA_fields.referenceUnitConvert_fields()
self.osaCentB_fields.referenceUnitConvert_fields()
self.osaWellbore_fields.referenceUnitConvert_fields()
Rot = lambda φ: np.array( [[np.cos(φ),-np.sin(φ)],[np.sin(φ),np.cos(φ)]] )
dH = self.osaWellbore_fields.HoleID[0]
L = self.osaWellbore_fields.MaxSpan[0]*self.osaSpacing_slider.sliderPosition()/100
ρe = self.osaWellbore_fields.MudOPDensity[0]
ρi = self.osaWellbore_fields.MudIPDensity[0]
ρs = self.osaCasing_fields.Density[0]
E = self.osaCasing_fields.E[0]
w = self.osaCasing_fields.PW[0]
D = self.osaCasing_fields.OD[0]
d = self.osaCasing_fields.ID[0]
Type_A = self.osaCentA_fields.Type[0]
F_So67_A = self.osaCentA_fields.ResF_SO67[0]
minF_A = self.osaCentA_fields.minResF[0]
So_minF_A = self.osaCentA_fields.SO_minResF[0]
DA = self.osaCentA_fields.COD[0]
dA = self.osaCentA_fields.IPOD[0]
Type_B = self.osaCentB_fields.Type[0]
F_So67_B = self.osaCentB_fields.ResF_SO67[0]
minF_B = self.osaCentB_fields.minResF[0]
So_minF_B = self.osaCentB_fields.SO_minResF[0]
DB = self.osaCentB_fields.COD[0]
dB = self.osaCentB_fields.IPOD[0]
dl.LogicalError('Every parameter should be greater than zero.')
for field in self.osaCasing_fields:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
for field in self.osaCentA_fields[1:]:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
for field in self.osaCentB_fields[1:]:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
if dA!=D or dB!=D or dH<=D:
raise mdl.LogicalError('The selected devices are not size-consistent.')
θ = np.pi*self.osaInclination_slider.sliderPosition()/180
I = np.pi/64*(D**4-d**4)
F = 30000
Radio = L*1e6
aspr = L*0.02
buoyancyFactor = mdl.calculate_buoyancyFactor( OD=D, ID=d, ρs=ρs, ρe=ρe, ρi=ρi )
w *= buoyancyFactor
fC = w*L*np.sin(θ)/2
if Type_A=='Resin':
yA = 0
dA = d
else:
kA = 2*(F_So67_A-minF_A)/(So_minF_A-0.67)/(DA-dA)
yA = fC/kA if (DA<dH) else fC/kA/2
if Type_B=='Resin':
yB = 0
dB = d
else:
kB = 2*(F_So67_B-minF_B)/(So_minF_B-0.67)/(DB-dB)
yB = fC/kB if (DB<dH) else fC/kB/2
R = D/2
rH = dH/2
rA_min = R+(DA/2-R)*0.1
rB_min = R+(DB/2-R)*0.1
rA = (DA/2-yA) if (DA<dH) else (rH-yA)
rB = (DB/2-yB) if (DB<dH) else (rH-yB)
rA = rA_min if (rA<=rA_min) else rA
rB = rB_min if (rB<=rB_min) else rB
α = np.arctan( (rB-rA)/L )
Lα = L/np.cos(α)
x = np.linspace( 0, Lα, 101 )
K = np.sqrt(F/E/I)
y = (Lα/2/Radio/K + w*Lα*np.sin(θ)/2/K/F)*( (np.cosh(K*x)-1)/np.tanh(K*Lα/2) + K*x - np.sinh(K*x) ) - w*np.sin(θ)/2/F*x**2
Rα = Rot(α)
xy = np.array([x,y])
x,y = np.dot(Rα,xy)
Δy = rH-rB
y += Δy
cH = rH-R
cA = rA-R
cB = rB-R
indexes = y>cH
y[indexes] = cH
indexes = y<-cH
y[indexes] =-cH
cy = cH-y
rM = rH-y[50]
if y[50]==cH:
fM = fC
fC = 0
else:
fM = 0
cM = rM-R
x -= L/2
yoh = y*0
ohc = np.array([x, yoh])
ohp = np.array([x, (yoh+rH)*aspr])
ohm = np.array([x, (yoh-rH)*aspr])
xyc = np.array([x, y*aspr])
xyp = np.array([x, (y+R)*aspr])
xym = np.array([x, (y-R)*aspr])
φ = θ + np.pi/2
Rφ = Rot(φ)
OHc = np.dot(Rφ,ohc)
OHp = np.dot(Rφ,ohp)
OHm = np.dot(Rφ,ohm)
XYc = np.dot(Rφ,xyc)
XYp = np.dot(Rφ,xyp)
XYm = np.dot(Rφ,xym)
SA = cA/cH
SB = cB/cH
SM = cM/cH
Sy = cy/cH
δ = (cA+cB)/2-cM
self.osaOutputdata1_fields.clear_content()
self.osaOutputdata2_fields.clear_content()
self.osaOutputdata1_fields.ClearanceA.append( mdl.physicalValue( cA, self.osaOutputdata1_fields.ClearanceA.referenceUnit ) )
self.osaOutputdata1_fields.ClearanceB.append( mdl.physicalValue( cB, self.osaOutputdata1_fields.ClearanceB.referenceUnit ) )
self.osaOutputdata1_fields.ClearanceM.append( mdl.physicalValue( cM, self.osaOutputdata1_fields.ClearanceM.referenceUnit ) )
self.osaOutputdata1_fields.SideForceA.append( mdl.physicalValue( fC, self.osaOutputdata1_fields.SideForceA.referenceUnit ) )
self.osaOutputdata1_fields.SideForceB.append( mdl.physicalValue( fC, self.osaOutputdata1_fields.SideForceB.referenceUnit ) )
self.osaOutputdata1_fields.SideForceM.append( mdl.physicalValue( fM, self.osaOutputdata1_fields.SideForceM.referenceUnit ) )
self.osaOutputdata1_fields.StandoffA.append( mdl.physicalValue( SA, self.osaOutputdata1_fields.StandoffA.referenceUnit ) )
self.osaOutputdata1_fields.StandoffB.append( mdl.physicalValue( SB, self.osaOutputdata1_fields.StandoffB.referenceUnit ) )
self.osaOutputdata1_fields.StandoffM.append( mdl.physicalValue( SM, self.osaOutputdata1_fields.StandoffM.referenceUnit ) )
self.osaOutputdata2_fields.AxialForce.append( mdl.physicalValue( w*L*np.cos(θ), self.osaOutputdata2_fields.AxialForce.referenceUnit ) )
self.osaOutputdata2_fields.MaxDeflection.append( mdl.physicalValue( δ, self.osaOutputdata2_fields.MaxDeflection.referenceUnit ) )
self.osaOutputdata2_fields.WellboreClearance.append( mdl.physicalValue( np.mean(cy), self.osaOutputdata2_fields.WellboreClearance.referenceUnit ) )
self.osaOutputdata2_fields.WellboreStandoff.append( mdl.physicalValue( np.mean(Sy), self.osaOutputdata2_fields.WellboreStandoff.referenceUnit ) )
self.osaCasing_fields.inverseReferenceUnitConvert_fields()
self.osaCentA_fields.inverseReferenceUnitConvert_fields()
self.osaCentB_fields.inverseReferenceUnitConvert_fields()
self.osaWellbore_fields.inverseReferenceUnitConvert_fields()
self.osaOutputdata1_fields.inverseReferenceUnitConvert_fields()
self.osaOutputdata2_fields.inverseReferenceUnitConvert_fields()
lim = L/2*1.05
return OHc, OHp, OHm, XYc, XYp, XYm, lim, rA, rB, rM
| true | true |
f72d8828ff9e26570c02649cb01fa01ec0782c9c | 842 | py | Python | decoradores.py | daniela2001-png/PYTHON-REVIEW-TOPICS | 203c88492267c9a6a6c05cb75bcbb5e4d78cb295 | [
"MIT"
] | null | null | null | decoradores.py | daniela2001-png/PYTHON-REVIEW-TOPICS | 203c88492267c9a6a6c05cb75bcbb5e4d78cb295 | [
"MIT"
] | null | null | null | decoradores.py | daniela2001-png/PYTHON-REVIEW-TOPICS | 203c88492267c9a6a6c05cb75bcbb5e4d78cb295 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""
Creando mi propio decorador
y entendiendolos
¿ Qué es un decorador ?
- Un decorador básicamente toma una función,
le añade alguna funcionalidad y la retorna.
"""
# Ejemplo:
def funcion_decorador(funcion):
def wrapper():
print("llamando a mi funcion")
funcion()
print("finalizo llamado")
return wrapper
"""
(@funcion_decorador) es lo mismo que =>
ejemplo = funcion_decoradora(ejemplo) => print(ejemplo())
solo que usando la sintaxis que nos permite usar python con el "@"
"""
@funcion_decorador
def ejemplo():
print("soy una funcion ejemplo que tendra una nueva funcionalidad cuando sea llmada!")
print(ejemplo())
"""
SALIDA DEL PROGRAMA:
llamando a mi funcion
soy una funcion ejemplo que tendra una nueva funcionalidad cuando sea llmada!
finalizo llamado
"""
| 19.581395 | 90 | 0.706651 |
def funcion_decorador(funcion):
def wrapper():
print("llamando a mi funcion")
funcion()
print("finalizo llamado")
return wrapper
@funcion_decorador
def ejemplo():
print("soy una funcion ejemplo que tendra una nueva funcionalidad cuando sea llmada!")
print(ejemplo())
| true | true |
f72d8837ef94b47804da55b302a6ffd384200c01 | 6,156 | py | Python | loss.py | HitkoDev/triplet-reid | d80edf7bdcee2ebcab160f1a06224837ac624329 | [
"MIT"
] | null | null | null | loss.py | HitkoDev/triplet-reid | d80edf7bdcee2ebcab160f1a06224837ac624329 | [
"MIT"
] | null | null | null | loss.py | HitkoDev/triplet-reid | d80edf7bdcee2ebcab160f1a06224837ac624329 | [
"MIT"
] | null | null | null | import numbers
import tensorflow as tf
def all_diffs(a, b):
""" Returns a tensor of all combinations of a - b.
Args:
a (2D tensor): A batch of vectors shaped (B1, F).
b (2D tensor): A batch of vectors shaped (B2, F).
Returns:
The matrix of all pairwise differences between all vectors in `a` and in
`b`, will be of shape (B1, B2).
Note:
For convenience, if either `a` or `b` is a `Distribution` object, its
mean is used.
"""
return tf.expand_dims(a, axis=1) - tf.expand_dims(b, axis=0)
def cdist(a, b, metric='euclidean'):
"""Similar to scipy.spatial's cdist, but symbolic.
The currently supported metrics can be listed as `cdist.supported_metrics` and are:
- 'euclidean', although with a fudge-factor epsilon.
- 'sqeuclidean', the squared euclidean.
- 'cityblock', the manhattan or L1 distance.
Args:
a (2D tensor): The left-hand side, shaped (B1, F).
b (2D tensor): The right-hand side, shaped (B2, F).
metric (string): Which distance metric to use, see notes.
Returns:
The matrix of all pairwise distances between all vectors in `a` and in
`b`, will be of shape (B1, B2).
Note:
When a square root is taken (such as in the Euclidean case), a small
epsilon is added because the gradient of the square-root at zero is
undefined. Thus, it will never return exact zero in these cases.
"""
with tf.compat.v1.name_scope("cdist"):
diffs = all_diffs(a, b)
if metric == 'sqeuclidean':
return tf.reduce_sum(input_tensor=tf.square(diffs), axis=-1)
elif metric == 'euclidean':
return tf.sqrt(tf.reduce_sum(input_tensor=tf.square(diffs), axis=-1) + 1e-12)
elif metric == 'cityblock':
return tf.reduce_sum(input_tensor=tf.abs(diffs), axis=-1)
else:
raise NotImplementedError(
'The following metric is not implemented by `cdist` yet: {}'.format(metric))
cdist.supported_metrics = [
'euclidean',
'sqeuclidean',
'cityblock',
]
def get_at_indices(tensor, indices):
""" Like `tensor[np.arange(len(tensor)), indices]` in numpy. """
counter = tf.range(tf.shape(input=indices, out_type=indices.dtype)[0])
return tf.gather_nd(tensor, tf.stack((counter, indices), -1))
def batch_hard(dists, pids, margin, batch_precision_at_k=None):
"""Computes the batch-hard loss from arxiv.org/abs/1703.07737.
Args:
dists (2D tensor): A square all-to-all distance matrix as given by cdist.
pids (1D tensor): The identities of the entries in `batch`, shape (B,).
This can be of any type that can be compared, thus also a string.
margin: The value of the margin if a number, alternatively the string
'soft' for using the soft-margin formulation, or `None` for not
using a margin at all.
Returns:
A 1D tensor of shape (B,) containing the loss value for each sample.
"""
with tf.compat.v1.name_scope("batch_hard"):
same_identity_mask = tf.equal(tf.expand_dims(pids, axis=1),
tf.expand_dims(pids, axis=0))
negative_mask = tf.logical_not(same_identity_mask)
positive_mask = tf.math.logical_xor(same_identity_mask,
tf.eye(tf.shape(input=pids)[0], dtype=tf.bool))
furthest_positive = tf.reduce_max(input_tensor=dists*tf.cast(positive_mask, tf.float32), axis=1)
closest_negative = tf.map_fn(lambda x: tf.reduce_min(input_tensor=tf.boolean_mask(tensor=x[0], mask=x[1])),
(dists, negative_mask), tf.float32)
# Another way of achieving the same, though more hacky:
# closest_negative = tf.reduce_min(dists + 1e5*tf.cast(same_identity_mask, tf.float32), axis=1)
diff = furthest_positive - closest_negative
if isinstance(margin, numbers.Real):
diff = tf.maximum(diff + margin, 0.0)
elif margin == 'soft':
diff = tf.nn.softplus(diff)
elif margin.lower() == 'none':
pass
else:
raise NotImplementedError(
'The margin {} is not implemented in batch_hard'.format(margin))
if batch_precision_at_k is None:
return diff
# For monitoring, compute the within-batch top-1 accuracy and the
# within-batch precision-at-k, which is somewhat more expressive.
with tf.compat.v1.name_scope("monitoring"):
# This is like argsort along the last axis. Add one to K as we'll
# drop the diagonal.
_, indices = tf.nn.top_k(-dists, k=batch_precision_at_k+1)
# Drop the diagonal (distance to self is always least).
indices = indices[:,1:]
# Generate the index indexing into the batch dimension.
# This is simething like [[0,0,0],[1,1,1],...,[B,B,B]]
batch_index = tf.tile(
tf.expand_dims(tf.range(tf.shape(input=indices)[0]), 1),
(1, tf.shape(input=indices)[1]))
# Stitch the above together with the argsort indices to get the
# indices of the top-k of each row.
topk_indices = tf.stack((batch_index, indices), -1)
# See if the topk belong to the same person as they should, or not.
topk_is_same = tf.gather_nd(same_identity_mask, topk_indices)
# All of the above could be reduced to the simpler following if k==1
#top1_is_same = get_at_indices(same_identity_mask, top_idxs[:,1])
topk_is_same_f32 = tf.cast(topk_is_same, tf.float32)
top1 = tf.reduce_mean(input_tensor=topk_is_same_f32[:,0])
prec_at_k = tf.reduce_mean(input_tensor=topk_is_same_f32)
# Finally, let's get some more info that can help in debugging while
# we're at it!
negative_dists = tf.boolean_mask(tensor=dists, mask=negative_mask)
positive_dists = tf.boolean_mask(tensor=dists, mask=positive_mask)
return diff, top1, prec_at_k, topk_is_same, negative_dists, positive_dists
LOSS_CHOICES = {
'batch_hard': batch_hard,
}
| 40.768212 | 115 | 0.635802 | import numbers
import tensorflow as tf
def all_diffs(a, b):
return tf.expand_dims(a, axis=1) - tf.expand_dims(b, axis=0)
def cdist(a, b, metric='euclidean'):
with tf.compat.v1.name_scope("cdist"):
diffs = all_diffs(a, b)
if metric == 'sqeuclidean':
return tf.reduce_sum(input_tensor=tf.square(diffs), axis=-1)
elif metric == 'euclidean':
return tf.sqrt(tf.reduce_sum(input_tensor=tf.square(diffs), axis=-1) + 1e-12)
elif metric == 'cityblock':
return tf.reduce_sum(input_tensor=tf.abs(diffs), axis=-1)
else:
raise NotImplementedError(
'The following metric is not implemented by `cdist` yet: {}'.format(metric))
cdist.supported_metrics = [
'euclidean',
'sqeuclidean',
'cityblock',
]
def get_at_indices(tensor, indices):
counter = tf.range(tf.shape(input=indices, out_type=indices.dtype)[0])
return tf.gather_nd(tensor, tf.stack((counter, indices), -1))
def batch_hard(dists, pids, margin, batch_precision_at_k=None):
with tf.compat.v1.name_scope("batch_hard"):
same_identity_mask = tf.equal(tf.expand_dims(pids, axis=1),
tf.expand_dims(pids, axis=0))
negative_mask = tf.logical_not(same_identity_mask)
positive_mask = tf.math.logical_xor(same_identity_mask,
tf.eye(tf.shape(input=pids)[0], dtype=tf.bool))
furthest_positive = tf.reduce_max(input_tensor=dists*tf.cast(positive_mask, tf.float32), axis=1)
closest_negative = tf.map_fn(lambda x: tf.reduce_min(input_tensor=tf.boolean_mask(tensor=x[0], mask=x[1])),
(dists, negative_mask), tf.float32)
diff = furthest_positive - closest_negative
if isinstance(margin, numbers.Real):
diff = tf.maximum(diff + margin, 0.0)
elif margin == 'soft':
diff = tf.nn.softplus(diff)
elif margin.lower() == 'none':
pass
else:
raise NotImplementedError(
'The margin {} is not implemented in batch_hard'.format(margin))
if batch_precision_at_k is None:
return diff
with tf.compat.v1.name_scope("monitoring"):
# drop the diagonal.
_, indices = tf.nn.top_k(-dists, k=batch_precision_at_k+1)
# Drop the diagonal (distance to self is always least).
indices = indices[:,1:]
# Generate the index indexing into the batch dimension.
# This is simething like [[0,0,0],[1,1,1],...,[B,B,B]]
batch_index = tf.tile(
tf.expand_dims(tf.range(tf.shape(input=indices)[0]), 1),
(1, tf.shape(input=indices)[1]))
# Stitch the above together with the argsort indices to get the
# indices of the top-k of each row.
topk_indices = tf.stack((batch_index, indices), -1)
# See if the topk belong to the same person as they should, or not.
topk_is_same = tf.gather_nd(same_identity_mask, topk_indices)
# All of the above could be reduced to the simpler following if k==1
#top1_is_same = get_at_indices(same_identity_mask, top_idxs[:,1])
topk_is_same_f32 = tf.cast(topk_is_same, tf.float32)
top1 = tf.reduce_mean(input_tensor=topk_is_same_f32[:,0])
prec_at_k = tf.reduce_mean(input_tensor=topk_is_same_f32)
# Finally, let's get some more info that can help in debugging while
negative_dists = tf.boolean_mask(tensor=dists, mask=negative_mask)
positive_dists = tf.boolean_mask(tensor=dists, mask=positive_mask)
return diff, top1, prec_at_k, topk_is_same, negative_dists, positive_dists
LOSS_CHOICES = {
'batch_hard': batch_hard,
}
| true | true |
f72d885c2821d0dd8389f957464f9c9db74649a9 | 2,395 | py | Python | rcsb/utils/tests-targets/testCARDTargetFeatureProvider.py | rcsb/py-rcsb_utils_targets | 1796ae15186df22a4167c4554aec1dca4b16539b | [
"Apache-2.0"
] | null | null | null | rcsb/utils/tests-targets/testCARDTargetFeatureProvider.py | rcsb/py-rcsb_utils_targets | 1796ae15186df22a4167c4554aec1dca4b16539b | [
"Apache-2.0"
] | null | null | null | rcsb/utils/tests-targets/testCARDTargetFeatureProvider.py | rcsb/py-rcsb_utils_targets | 1796ae15186df22a4167c4554aec1dca4b16539b | [
"Apache-2.0"
] | null | null | null | ##
# File: CARDTargetFeatureProviderTests.py
# Author: J. Westbrook
# Date: 11-Jun-2021
# Version: 0.001
#
# Update:
#
#
##
"""
Tests for utilities managing CARD target data.
"""
__docformat__ = "google en"
__author__ = "John Westbrook"
__email__ = "jwest@rcsb.rutgers.edu"
__license__ = "Apache 2.0"
import logging
import os
import platform
import resource
import time
import unittest
from rcsb.utils.targets.CARDTargetFeatureProvider import CARDTargetFeatureProvider
HERE = os.path.abspath(os.path.dirname(__file__))
TOPDIR = os.path.dirname(os.path.dirname(HERE))
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
class CARDTargetFeatureProviderTests(unittest.TestCase):
def setUp(self):
self.__cachePath = os.path.join(HERE, "test-output", "CACHE")
#
self.__seqMatchResultsPath = os.path.join(HERE, "test-data", "card-vs-pdbprent-filtered-results.json.gz")
self.__startTime = time.time()
logger.info("Starting %s at %s", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()))
def tearDown(self):
unitS = "MB" if platform.system() == "Darwin" else "GB"
rusageMax = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
logger.info("Maximum resident memory size %.4f %s", rusageMax / 10 ** 6, unitS)
endTime = time.time()
logger.info("Completed %s at %s (%.4f seconds)", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()), endTime - self.__startTime)
def testBuildCARDTargetsFeatures(self):
stfP = CARDTargetFeatureProvider(cachePath=self.__cachePath, useCache=False)
ok = stfP.testCache()
self.assertFalse(ok)
ok = stfP.buildFeatureList(self.__seqMatchResultsPath, useTaxonomy=True)
self.assertTrue(ok)
stfP = CARDTargetFeatureProvider(cachePath=self.__cachePath, useCache=True)
ok = stfP.testCache()
self.assertTrue(ok)
ok = stfP.hasFeatures("5f64_1")
self.assertTrue(ok)
fL = stfP.getFeatures("5f64_1")
self.assertGreaterEqual(len(fL), 1)
def buildCARDFeaturesTargets():
suiteSelect = unittest.TestSuite()
suiteSelect.addTest(CARDTargetFeatureProviderTests("testBuildCARDTargetsFeatures"))
return suiteSelect
if __name__ == "__main__":
mySuite = buildCARDFeaturesTargets()
unittest.TextTestRunner(verbosity=2).run(mySuite)
| 31.933333 | 149 | 0.694363 |
__docformat__ = "google en"
__author__ = "John Westbrook"
__email__ = "jwest@rcsb.rutgers.edu"
__license__ = "Apache 2.0"
import logging
import os
import platform
import resource
import time
import unittest
from rcsb.utils.targets.CARDTargetFeatureProvider import CARDTargetFeatureProvider
HERE = os.path.abspath(os.path.dirname(__file__))
TOPDIR = os.path.dirname(os.path.dirname(HERE))
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
class CARDTargetFeatureProviderTests(unittest.TestCase):
def setUp(self):
self.__cachePath = os.path.join(HERE, "test-output", "CACHE")
self.__seqMatchResultsPath = os.path.join(HERE, "test-data", "card-vs-pdbprent-filtered-results.json.gz")
self.__startTime = time.time()
logger.info("Starting %s at %s", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()))
def tearDown(self):
unitS = "MB" if platform.system() == "Darwin" else "GB"
rusageMax = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
logger.info("Maximum resident memory size %.4f %s", rusageMax / 10 ** 6, unitS)
endTime = time.time()
logger.info("Completed %s at %s (%.4f seconds)", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()), endTime - self.__startTime)
def testBuildCARDTargetsFeatures(self):
stfP = CARDTargetFeatureProvider(cachePath=self.__cachePath, useCache=False)
ok = stfP.testCache()
self.assertFalse(ok)
ok = stfP.buildFeatureList(self.__seqMatchResultsPath, useTaxonomy=True)
self.assertTrue(ok)
stfP = CARDTargetFeatureProvider(cachePath=self.__cachePath, useCache=True)
ok = stfP.testCache()
self.assertTrue(ok)
ok = stfP.hasFeatures("5f64_1")
self.assertTrue(ok)
fL = stfP.getFeatures("5f64_1")
self.assertGreaterEqual(len(fL), 1)
def buildCARDFeaturesTargets():
suiteSelect = unittest.TestSuite()
suiteSelect.addTest(CARDTargetFeatureProviderTests("testBuildCARDTargetsFeatures"))
return suiteSelect
if __name__ == "__main__":
mySuite = buildCARDFeaturesTargets()
unittest.TextTestRunner(verbosity=2).run(mySuite)
| true | true |
f72d886ac003b22033ad97f0998cbc701ccac80c | 2,411 | py | Python | kaggle/mnist/bayes/naivebayes.py | fg6/MachineLearning | 7c3f6e8f2f90b729dbcc345c5a8a5da712cfbb27 | [
"MIT"
] | null | null | null | kaggle/mnist/bayes/naivebayes.py | fg6/MachineLearning | 7c3f6e8f2f90b729dbcc345c5a8a5da712cfbb27 | [
"MIT"
] | null | null | null | kaggle/mnist/bayes/naivebayes.py | fg6/MachineLearning | 7c3f6e8f2f90b729dbcc345c5a8a5da712cfbb27 | [
"MIT"
] | 1 | 2019-05-15T02:17:22.000Z | 2019-05-15T02:17:22.000Z |
import numpy as np
from sortedcontainers import SortedList
from scipy.stats import multivariate_normal
class NaiveBayes:
#def __init__(self):
# pass
def fit(self, X, Y):
self.X = X
self.Y = set(Y)
self.Classes = set(Y)
self.Prior = {}
self.G = {}
# smoothing
epsilon=0.001*np.identity(28)
for c in self.Classes:
Xc = X[Y==c]
Mean = np.mean(Xc, axis=0,dtype=np.float64)
Sigma = np.var(Xc,axis=0,dtype=np.float64)+0.001
self.G[c] = (Mean, Sigma)
self.Prior[c] = float(len(Xc))/len(Y)
def predict(self, X):
results=[]
max_posterior = -1
max_class = None
c_posterior = np.zeros((X.shape[0], len(self.G)))
for c in self.Classes:
mean, sigma = self.G[c]
c_posterior[:,c] = multivariate_normal.logpdf(X, mean, sigma) + np.log(self.Prior[c]) # add cov !
#print(len(c_posterior), np.argmax(c_posterior, axis=1))
return np.argmax(c_posterior, axis=1)
def score(self, X, Y):
results = self.predict(X)
#for i,v in enumerate(Y):
# print(i,v,results[i])
score = np.mean(results == Y)
return score
class Bayes:
def fit(self, X, Y, e=0.001):
self.X = X
self.Y = set(Y)
N,D = X.shape
self.Classes = set(Y)
self.Prior = {}
self.G = {}
# smoothing
epsilon=e*np.identity(28)
for c in self.Classes:
Xc = X [ Y==c ]
Mean = np.mean(Xc, axis=0, dtype=np.float64)
#Sigma = np.var(Xc, axis=0, dtype=np.float64) + e
Cov = np.cov(Xc.T)+ np.eye(D)*e
self.G[c] = (Mean, Cov)
self.Prior[c] = float(len(Xc))/len(Y)
def predict(self, X):
results=[]
max_posterior = -1
max_class = None
c_posterior = np.zeros((X.shape[0], len(self.G)))
for c in self.Classes:
mean, cov = self.G[c]
c_posterior[:,c] = multivariate_normal.logpdf(X, mean, cov) + np.log(self.Prior[c])
return np.argmax(c_posterior, axis=1)
def score(self, X, Y):
results = self.predict(X)
score = np.mean(results == Y)
return score
| 25.924731 | 109 | 0.498548 |
import numpy as np
from sortedcontainers import SortedList
from scipy.stats import multivariate_normal
class NaiveBayes:
def fit(self, X, Y):
self.X = X
self.Y = set(Y)
self.Classes = set(Y)
self.Prior = {}
self.G = {}
epsilon=0.001*np.identity(28)
for c in self.Classes:
Xc = X[Y==c]
Mean = np.mean(Xc, axis=0,dtype=np.float64)
Sigma = np.var(Xc,axis=0,dtype=np.float64)+0.001
self.G[c] = (Mean, Sigma)
self.Prior[c] = float(len(Xc))/len(Y)
def predict(self, X):
results=[]
max_posterior = -1
max_class = None
c_posterior = np.zeros((X.shape[0], len(self.G)))
for c in self.Classes:
mean, sigma = self.G[c]
c_posterior[:,c] = multivariate_normal.logpdf(X, mean, sigma) + np.log(self.Prior[c])
return np.argmax(c_posterior, axis=1)
def score(self, X, Y):
results = self.predict(X)
score = np.mean(results == Y)
return score
class Bayes:
def fit(self, X, Y, e=0.001):
self.X = X
self.Y = set(Y)
N,D = X.shape
self.Classes = set(Y)
self.Prior = {}
self.G = {}
epsilon=e*np.identity(28)
for c in self.Classes:
Xc = X [ Y==c ]
Mean = np.mean(Xc, axis=0, dtype=np.float64)
Cov = np.cov(Xc.T)+ np.eye(D)*e
self.G[c] = (Mean, Cov)
self.Prior[c] = float(len(Xc))/len(Y)
def predict(self, X):
results=[]
max_posterior = -1
max_class = None
c_posterior = np.zeros((X.shape[0], len(self.G)))
for c in self.Classes:
mean, cov = self.G[c]
c_posterior[:,c] = multivariate_normal.logpdf(X, mean, cov) + np.log(self.Prior[c])
return np.argmax(c_posterior, axis=1)
def score(self, X, Y):
results = self.predict(X)
score = np.mean(results == Y)
return score
| true | true |
f72d88fab45b476987edef7d50bb524e562ad941 | 89 | py | Python | src/eversource_scraper/__init__.py | Haeilifax/eversource_scraper | 4652ff82a57124e0b83644c8776d6e54a39103be | [
"MIT"
] | null | null | null | src/eversource_scraper/__init__.py | Haeilifax/eversource_scraper | 4652ff82a57124e0b83644c8776d6e54a39103be | [
"MIT"
] | null | null | null | src/eversource_scraper/__init__.py | Haeilifax/eversource_scraper | 4652ff82a57124e0b83644c8776d6e54a39103be | [
"MIT"
] | null | null | null | from eversource_scraper import (selenium_scraper, mysql_inserter)
__version__ = "0.1.0"
| 22.25 | 65 | 0.808989 | from eversource_scraper import (selenium_scraper, mysql_inserter)
__version__ = "0.1.0"
| true | true |
f72d894c5dd643cc66f3cf18f2330569c6a1b5c9 | 6,672 | py | Python | bindings/python/ensmallen_graph/datasets/string/pantoearwandensis.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/pantoearwandensis.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/pantoearwandensis.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph Pantoea rwandensis.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:19:45.558653
The undirected graph Pantoea rwandensis has 3765 nodes and 306976 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.04332 and has 12 connected components, where the component with most
nodes has 3741 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 128, the mean node degree is 163.07, and
the node degree mode is 1. The top 5 most central nodes are 1076550.LH22_12305
(degree 1310), 1076550.LH22_16485 (degree 1299), 1076550.LH22_02530 (degree
1292), 1076550.LH22_19995 (degree 1166) and 1076550.LH22_07950 (degree
1066).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import PantoeaRwandensis
# Then load the graph
graph = PantoeaRwandensis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def PantoeaRwandensis(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Pantoea rwandensis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Pantoea rwandensis graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:19:45.558653
The undirected graph Pantoea rwandensis has 3765 nodes and 306976 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.04332 and has 12 connected components, where the component with most
nodes has 3741 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 128, the mean node degree is 163.07, and
the node degree mode is 1. The top 5 most central nodes are 1076550.LH22_12305
(degree 1310), 1076550.LH22_16485 (degree 1299), 1076550.LH22_02530 (degree
1292), 1076550.LH22_19995 (degree 1166) and 1076550.LH22_07950 (degree
1066).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import PantoeaRwandensis
# Then load the graph
graph = PantoeaRwandensis()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="PantoeaRwandensis",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 34.931937 | 223 | 0.702938 | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph
def PantoeaRwandensis(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
return AutomaticallyRetrievedGraph(
graph_name="PantoeaRwandensis",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true | true |
f72d896778f8cdb96ceeedf64eb8c606fcdc07b1 | 24 | py | Python | example_snippets/multimenus_snippets/NewSnippets/SymPy/Manipulating expressions/Exponentials and Logarithms/Combine exponentials.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | null | null | null | example_snippets/multimenus_snippets/NewSnippets/SymPy/Manipulating expressions/Exponentials and Logarithms/Combine exponentials.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | null | null | null | example_snippets/multimenus_snippets/NewSnippets/SymPy/Manipulating expressions/Exponentials and Logarithms/Combine exponentials.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | 1 | 2021-02-04T04:51:48.000Z | 2021-02-04T04:51:48.000Z | powsimp(exp(y) * exp(z)) | 24 | 24 | 0.625 | powsimp(exp(y) * exp(z)) | true | true |
f72d89796eb4cedeea9887765fdd33a3f277fd71 | 178,860 | py | Python | src/azure-cli/azure/cli/command_modules/vm/custom.py | nexxai/azure-cli | 3f24ada49f3323d9310d46ccc1025dc99fc4cf8e | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/vm/custom.py | nexxai/azure-cli | 3f24ada49f3323d9310d46ccc1025dc99fc4cf8e | [
"MIT"
] | 1 | 2021-02-24T09:10:12.000Z | 2021-02-24T09:10:12.000Z | src/azure-cli/azure/cli/command_modules/vm/custom.py | nexxai/azure-cli | 3f24ada49f3323d9310d46ccc1025dc99fc4cf8e | [
"MIT"
] | 1 | 2020-09-07T18:44:14.000Z | 2020-09-07T18:44:14.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=no-self-use,too-many-lines
from __future__ import print_function
import json
import os
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
# the urlopen is imported for automation purpose
from six.moves.urllib.request import urlopen # noqa, pylint: disable=import-error,unused-import,ungrouped-imports
from knack.log import get_logger
from knack.util import CLIError
from azure.cli.command_modules.vm._validators import _get_resource_group_from_vault_name
from azure.cli.core.commands.validators import validate_file_or_dict
from azure.cli.core.commands import LongRunningOperation, DeploymentOutputLongRunningOperation
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_data_service_client
from azure.cli.core.profiles import ResourceType
from azure.cli.core.util import sdk_no_wait
from ._vm_utils import read_content_if_is_file
from ._vm_diagnostics_templates import get_default_diag_config
from ._actions import (load_images_from_aliases_doc, load_extension_images_thru_services,
load_images_thru_services, _get_latest_image_version)
from ._client_factory import (_compute_client_factory, cf_public_ip_addresses, cf_vm_image_term,
_dev_test_labs_client_factory)
logger = get_logger(__name__)
# Use the same name by portal, so people can update from both cli and portal
# (VM doesn't allow multiple handlers for the same extension)
_ACCESS_EXT_HANDLER_NAME = 'enablevmaccess'
_LINUX_ACCESS_EXT = 'VMAccessForLinux'
_WINDOWS_ACCESS_EXT = 'VMAccessAgent'
_LINUX_DIAG_EXT = 'LinuxDiagnostic'
_WINDOWS_DIAG_EXT = 'IaaSDiagnostics'
_LINUX_OMS_AGENT_EXT = 'OmsAgentForLinux'
_WINDOWS_OMS_AGENT_EXT = 'MicrosoftMonitoringAgent'
extension_mappings = {
_LINUX_ACCESS_EXT: {
'version': '1.5',
'publisher': 'Microsoft.OSTCExtensions'
},
_WINDOWS_ACCESS_EXT: {
'version': '2.4',
'publisher': 'Microsoft.Compute'
},
_LINUX_DIAG_EXT: {
'version': '3.0',
'publisher': 'Microsoft.Azure.Diagnostics'
},
_WINDOWS_DIAG_EXT: {
'version': '1.5',
'publisher': 'Microsoft.Azure.Diagnostics'
},
_LINUX_OMS_AGENT_EXT: {
'version': '1.0',
'publisher': 'Microsoft.EnterpriseCloud.Monitoring'
},
_WINDOWS_OMS_AGENT_EXT: {
'version': '1.0',
'publisher': 'Microsoft.EnterpriseCloud.Monitoring'
}
}
def _construct_identity_info(identity_scope, identity_role, implicit_identity, external_identities):
info = {}
if identity_scope:
info['scope'] = identity_scope
info['role'] = str(identity_role) # could be DefaultStr, so convert to string
info['userAssignedIdentities'] = external_identities or {}
info['systemAssignedIdentity'] = implicit_identity or ''
return info
# for injecting test seams to produce predicatable role assignment id for playback
def _gen_guid():
import uuid
return uuid.uuid4()
def _get_access_extension_upgrade_info(extensions, name):
version = extension_mappings[name]['version']
publisher = extension_mappings[name]['publisher']
auto_upgrade = None
if extensions:
extension = next((e for e in extensions if e.name == name), None)
from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error
if extension and LooseVersion(extension.type_handler_version) < LooseVersion(version):
auto_upgrade = True
elif extension and LooseVersion(extension.type_handler_version) > LooseVersion(version):
version = extension.type_handler_version
return publisher, version, auto_upgrade
def _get_extension_instance_name(instance_view, publisher, extension_type_name,
suggested_name=None):
extension_instance_name = suggested_name or extension_type_name
full_type_name = '.'.join([publisher, extension_type_name])
if instance_view.extensions:
ext = next((x for x in instance_view.extensions
if x.type and (x.type.lower() == full_type_name.lower())), None)
if ext:
extension_instance_name = ext.name
return extension_instance_name
def _get_storage_management_client(cli_ctx):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_STORAGE)
def _get_disk_lun(data_disks):
# start from 0, search for unused int for lun
if not data_disks:
return 0
existing_luns = sorted([d.lun for d in data_disks])
for i, current in enumerate(existing_luns):
if current != i:
return i
return len(existing_luns)
def _get_private_config(cli_ctx, resource_group_name, storage_account):
storage_mgmt_client = _get_storage_management_client(cli_ctx)
# pylint: disable=no-member
keys = storage_mgmt_client.storage_accounts.list_keys(resource_group_name, storage_account).keys
private_config = {
'storageAccountName': storage_account,
'storageAccountKey': keys[0].value
}
return private_config
def _get_resource_group_location(cli_ctx, resource_group_name):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
# pylint: disable=no-member
return client.resource_groups.get(resource_group_name).location
def _get_sku_object(cmd, sku):
if cmd.supported_api_version(min_api='2017-03-30'):
DiskSku = cmd.get_models('DiskSku')
return DiskSku(name=sku)
return sku
def _grant_access(cmd, resource_group_name, name, duration_in_seconds, is_disk, access_level):
AccessLevel = cmd.get_models('AccessLevel')
client = _compute_client_factory(cmd.cli_ctx)
op = client.disks if is_disk else client.snapshots
return op.grant_access(resource_group_name, name, access_level or AccessLevel.read, duration_in_seconds)
def _is_linux_os(vm):
os_type = vm.storage_profile.os_disk.os_type.value if vm.storage_profile.os_disk.os_type else None
if os_type:
return os_type.lower() == 'linux'
# the os_type could be None for VM scaleset, let us check out os configurations
if vm.os_profile.linux_configuration:
return bool(vm.os_profile.linux_configuration)
return False
def _merge_secrets(secrets):
"""
Merge a list of secrets. Each secret should be a dict fitting the following JSON structure:
[{ "sourceVault": { "id": "value" },
"vaultCertificates": [{ "certificateUrl": "value",
"certificateStore": "cert store name (only on windows)"}] }]
The array of secrets is merged on sourceVault.id.
:param secrets:
:return:
"""
merged = {}
vc_name = 'vaultCertificates'
for outer in secrets:
for secret in outer:
if secret['sourceVault']['id'] not in merged:
merged[secret['sourceVault']['id']] = []
merged[secret['sourceVault']['id']] = \
secret[vc_name] + merged[secret['sourceVault']['id']]
# transform the reduced map to vm format
formatted = [{'sourceVault': {'id': source_id},
'vaultCertificates': value}
for source_id, value in list(merged.items())]
return formatted
def _normalize_extension_version(cli_ctx, publisher, vm_extension_name, version, location):
def _trim_away_build_number(version):
# workaround a known issue: the version must only contain "major.minor", even though
# "extension image list" gives more detail
return '.'.join(version.split('.')[0:2])
if not version:
result = load_extension_images_thru_services(cli_ctx, publisher, vm_extension_name, None, location,
show_latest=True, partial_match=False)
if not result:
raise CLIError('Failed to find the latest version for the extension "{}"'.format(vm_extension_name))
# with 'show_latest' enabled, we will only get one result.
version = result[0]['version']
version = _trim_away_build_number(version)
return version
def _parse_rg_name(strid):
'''From an ID, extract the contained (resource group, name) tuple.'''
from msrestazure.tools import parse_resource_id
parts = parse_resource_id(strid)
return (parts['resource_group'], parts['name'])
def _set_sku(cmd, instance, sku):
if cmd.supported_api_version(min_api='2017-03-30'):
instance.sku = cmd.get_models('DiskSku')(name=sku)
else:
instance.account_type = sku
def _show_missing_access_warning(resource_group, name, command):
warn = ("No access was given yet to the '{1}', because '--scope' was not provided. "
"You should setup by creating a role assignment, e.g. "
"'az role assignment create --assignee <principal-id> --role contributor -g {0}' "
"would let it access the current resource group. To get the pricipal id, run "
"'az {2} show -g {0} -n {1} --query \"identity.principalId\" -otsv'".format(resource_group, name, command))
logger.warning(warn)
def _parse_aux_subscriptions(resource_id):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
if is_valid_resource_id(resource_id):
res = parse_resource_id(resource_id)
return [res['subscription']]
return None
# Hide extension information from output as the info is not correct and unhelpful; also
# commands using it mean to hide the extension concept from users.
class ExtensionUpdateLongRunningOperation(LongRunningOperation): # pylint: disable=too-few-public-methods
pass
# region Disks (Managed)
def create_managed_disk(cmd, resource_group_name, disk_name, location=None, # pylint: disable=too-many-locals, too-many-branches, too-many-statements
size_gb=None, sku='Premium_LRS', os_type=None,
source=None, for_upload=None, upload_size_bytes=None, # pylint: disable=unused-argument
# below are generated internally from 'source'
source_blob_uri=None, source_disk=None, source_snapshot=None,
source_storage_account_id=None, no_wait=False, tags=None, zone=None,
disk_iops_read_write=None, disk_mbps_read_write=None, hyper_v_generation=None,
encryption_type=None, disk_encryption_set=None, max_shares=None,
disk_iops_read_only=None, disk_mbps_read_only=None,
image_reference=None, image_reference_lun=None,
gallery_image_reference=None, gallery_image_reference_lun=None,
network_access_policy=None, disk_access=None):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
Disk, CreationData, DiskCreateOption, Encryption = cmd.get_models(
'Disk', 'CreationData', 'DiskCreateOption', 'Encryption')
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
if source_blob_uri:
option = DiskCreateOption.import_enum
elif source_disk or source_snapshot:
option = DiskCreateOption.copy
elif for_upload:
option = DiskCreateOption.upload
elif image_reference or gallery_image_reference:
option = DiskCreateOption.from_image
else:
option = DiskCreateOption.empty
if source_storage_account_id is None and source_blob_uri is not None:
subscription_id = get_subscription_id(cmd.cli_ctx)
storage_account_name = source_blob_uri.split('.')[0].split('/')[-1]
source_storage_account_id = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts', name=storage_account_name)
if upload_size_bytes is not None and for_upload is not True:
raise CLIError('usage error: --upload-size-bytes should be used together with --for-upload')
if image_reference is not None:
if not is_valid_resource_id(image_reference):
# URN or name
terms = image_reference.split(':')
if len(terms) == 4: # URN
disk_publisher, disk_offer, disk_sku, disk_version = terms[0], terms[1], terms[2], terms[3]
if disk_version.lower() == 'latest':
disk_version = _get_latest_image_version(cmd.cli_ctx, location, disk_publisher, disk_offer,
disk_sku)
client = _compute_client_factory(cmd.cli_ctx)
response = client.virtual_machine_images.get(location, disk_publisher, disk_offer, disk_sku,
disk_version)
image_reference = response.id
else: # error
raise CLIError('usage error: --image-reference should be ID or URN (publisher:offer:sku:version).')
# image_reference is an ID now
image_reference = {'id': image_reference}
if image_reference_lun is not None:
image_reference['lun'] = image_reference_lun
if gallery_image_reference is not None:
gallery_image_reference = {'id': gallery_image_reference}
if gallery_image_reference_lun is not None:
gallery_image_reference['lun'] = gallery_image_reference_lun
creation_data = CreationData(create_option=option, source_uri=source_blob_uri,
image_reference=image_reference, gallery_image_reference=gallery_image_reference,
source_resource_id=source_disk or source_snapshot,
storage_account_id=source_storage_account_id,
upload_size_bytes=upload_size_bytes)
if size_gb is None and upload_size_bytes is None and (option == DiskCreateOption.empty or for_upload):
raise CLIError('usage error: --size-gb or --upload-size-bytes required to create an empty disk')
if disk_encryption_set is not None and not is_valid_resource_id(disk_encryption_set):
disk_encryption_set = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set)
if disk_access is not None and not is_valid_resource_id(disk_access):
disk_access = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskAccesses', name=disk_access)
encryption = None
if disk_encryption_set:
encryption = Encryption(type=encryption_type, disk_encryption_set_id=disk_encryption_set)
disk = Disk(location=location, creation_data=creation_data, tags=(tags or {}),
sku=_get_sku_object(cmd, sku), disk_size_gb=size_gb, os_type=os_type, encryption=encryption)
if hyper_v_generation:
disk.hyper_vgeneration = hyper_v_generation
if zone:
disk.zones = zone
if disk_iops_read_write is not None:
disk.disk_iops_read_write = disk_iops_read_write
if disk_mbps_read_write is not None:
disk.disk_mbps_read_write = disk_mbps_read_write
if max_shares is not None:
disk.max_shares = max_shares
if disk_iops_read_only is not None:
disk.disk_iops_read_only = disk_iops_read_only
if disk_mbps_read_only is not None:
disk.disk_mbps_read_only = disk_mbps_read_only
if network_access_policy is not None:
disk.network_access_policy = network_access_policy
if disk_access is not None:
disk.disk_access_id = disk_access
client = _compute_client_factory(cmd.cli_ctx)
return sdk_no_wait(no_wait, client.disks.create_or_update, resource_group_name, disk_name, disk)
def grant_disk_access(cmd, resource_group_name, disk_name, duration_in_seconds, access_level=None):
return _grant_access(cmd, resource_group_name, disk_name, duration_in_seconds, is_disk=True,
access_level=access_level)
def list_managed_disks(cmd, resource_group_name=None):
client = _compute_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.disks.list_by_resource_group(resource_group_name)
return client.disks.list()
def update_managed_disk(cmd, resource_group_name, instance, size_gb=None, sku=None, disk_iops_read_write=None,
disk_mbps_read_write=None, encryption_type=None, disk_encryption_set=None,
network_access_policy=None, disk_access=None):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
if size_gb is not None:
instance.disk_size_gb = size_gb
if sku is not None:
_set_sku(cmd, instance, sku)
if disk_iops_read_write is not None:
instance.disk_iops_read_write = disk_iops_read_write
if disk_mbps_read_write is not None:
instance.disk_mbps_read_write = disk_mbps_read_write
if disk_encryption_set is not None:
if instance.encryption.type != 'EncryptionAtRestWithCustomerKey' and \
encryption_type != 'EncryptionAtRestWithCustomerKey':
raise CLIError('usage error: Please set --encryption-type to EncryptionAtRestWithCustomerKey')
if not is_valid_resource_id(disk_encryption_set):
disk_encryption_set = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set)
instance.encryption.disk_encryption_set_id = disk_encryption_set
if encryption_type is not None:
instance.encryption.type = encryption_type
if network_access_policy is not None:
instance.network_access_policy = network_access_policy
if disk_access is not None and not is_valid_resource_id(disk_access):
disk_access = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskAccesses', name=disk_access)
instance.disk_access_id = disk_access
return instance
# endregion
# region Images (Managed)
def create_image(cmd, resource_group_name, name, source, os_type=None, data_disk_sources=None, location=None, # pylint: disable=too-many-locals,unused-argument
# below are generated internally from 'source' and 'data_disk_sources'
source_virtual_machine=None, storage_sku=None, hyper_v_generation=None,
os_blob_uri=None, data_blob_uris=None,
os_snapshot=None, data_snapshots=None,
os_disk=None, os_disk_caching=None, data_disks=None, data_disk_caching=None,
tags=None, zone_resilient=None):
ImageOSDisk, ImageDataDisk, ImageStorageProfile, Image, SubResource, OperatingSystemStateTypes = cmd.get_models(
'ImageOSDisk', 'ImageDataDisk', 'ImageStorageProfile', 'Image', 'SubResource', 'OperatingSystemStateTypes')
if source_virtual_machine:
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
image_storage_profile = None if zone_resilient is None else ImageStorageProfile(zone_resilient=zone_resilient)
image = Image(location=location, source_virtual_machine=SubResource(id=source_virtual_machine),
storage_profile=image_storage_profile, tags=(tags or {}))
else:
os_disk = ImageOSDisk(os_type=os_type,
os_state=OperatingSystemStateTypes.generalized,
caching=os_disk_caching,
snapshot=SubResource(id=os_snapshot) if os_snapshot else None,
managed_disk=SubResource(id=os_disk) if os_disk else None,
blob_uri=os_blob_uri,
storage_account_type=storage_sku)
all_data_disks = []
lun = 0
if data_blob_uris:
for d in data_blob_uris:
all_data_disks.append(ImageDataDisk(lun=lun, blob_uri=d, caching=data_disk_caching))
lun += 1
if data_snapshots:
for d in data_snapshots:
all_data_disks.append(ImageDataDisk(lun=lun, snapshot=SubResource(id=d), caching=data_disk_caching))
lun += 1
if data_disks:
for d in data_disks:
all_data_disks.append(ImageDataDisk(lun=lun, managed_disk=SubResource(id=d), caching=data_disk_caching))
lun += 1
image_storage_profile = ImageStorageProfile(os_disk=os_disk, data_disks=all_data_disks)
if zone_resilient is not None:
image_storage_profile.zone_resilient = zone_resilient
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
# pylint disable=no-member
image = Image(location=location, storage_profile=image_storage_profile, tags=(tags or {}))
if hyper_v_generation:
image.hyper_vgeneration = hyper_v_generation
client = _compute_client_factory(cmd.cli_ctx)
return client.images.create_or_update(resource_group_name, name, image)
def update_image(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
def list_images(cmd, resource_group_name=None):
client = _compute_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.images.list_by_resource_group(resource_group_name)
return client.images.list()
# endregion
# region Snapshots
# pylint: disable=unused-argument,too-many-locals
def create_snapshot(cmd, resource_group_name, snapshot_name, location=None, size_gb=None, sku='Standard_LRS',
source=None, for_upload=None, incremental=None,
# below are generated internally from 'source'
source_blob_uri=None, source_disk=None, source_snapshot=None, source_storage_account_id=None,
hyper_v_generation=None, tags=None, no_wait=False, disk_encryption_set=None,
encryption_type=None, network_access_policy=None, disk_access=None):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
Snapshot, CreationData, DiskCreateOption, Encryption = cmd.get_models(
'Snapshot', 'CreationData', 'DiskCreateOption', 'Encryption')
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
if source_blob_uri:
option = DiskCreateOption.import_enum
elif source_disk or source_snapshot:
option = DiskCreateOption.copy
elif for_upload:
option = DiskCreateOption.upload
else:
option = DiskCreateOption.empty
creation_data = CreationData(create_option=option, source_uri=source_blob_uri,
image_reference=None,
source_resource_id=source_disk or source_snapshot,
storage_account_id=source_storage_account_id)
if size_gb is None and option == DiskCreateOption.empty:
raise CLIError('Please supply size for the snapshots')
if disk_encryption_set is not None and not is_valid_resource_id(disk_encryption_set):
disk_encryption_set = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set)
if disk_access is not None and not is_valid_resource_id(disk_access):
disk_access = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskAccesses', name=disk_access)
if disk_encryption_set is not None and encryption_type is None:
raise CLIError('usage error: Please specify --encryption-type.')
if encryption_type is not None:
encryption = Encryption(type=encryption_type, disk_encryption_set_id=disk_encryption_set)
else:
encryption = None
snapshot = Snapshot(location=location, creation_data=creation_data, tags=(tags or {}),
sku=_get_sku_object(cmd, sku), disk_size_gb=size_gb, incremental=incremental,
encryption=encryption)
if hyper_v_generation:
snapshot.hyper_vgeneration = hyper_v_generation
if network_access_policy is not None:
snapshot.network_access_policy = network_access_policy
if disk_access is not None:
snapshot.disk_access_id = disk_access
client = _compute_client_factory(cmd.cli_ctx)
return sdk_no_wait(no_wait, client.snapshots.create_or_update, resource_group_name, snapshot_name, snapshot)
def grant_snapshot_access(cmd, resource_group_name, snapshot_name, duration_in_seconds, access_level=None):
return _grant_access(cmd, resource_group_name, snapshot_name, duration_in_seconds, is_disk=False,
access_level=access_level)
def list_snapshots(cmd, resource_group_name=None):
client = _compute_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.snapshots.list_by_resource_group(resource_group_name)
return client.snapshots.list()
def update_snapshot(cmd, resource_group_name, instance, sku=None, disk_encryption_set=None,
encryption_type=None, network_access_policy=None, disk_access=None):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
if sku is not None:
_set_sku(cmd, instance, sku)
if disk_encryption_set is not None:
if instance.encryption.type != 'EncryptionAtRestWithCustomerKey' and \
encryption_type != 'EncryptionAtRestWithCustomerKey':
raise CLIError('usage error: Please set --encryption-type to EncryptionAtRestWithCustomerKey')
if not is_valid_resource_id(disk_encryption_set):
disk_encryption_set = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set)
instance.encryption.disk_encryption_set_id = disk_encryption_set
if encryption_type is not None:
instance.encryption.type = encryption_type
if network_access_policy is not None:
instance.network_access_policy = network_access_policy
if disk_access is not None and not is_valid_resource_id(disk_access):
disk_access = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskAccesses', name=disk_access)
instance.disk_access_id = disk_access
return instance
# endregion
# region VirtualMachines Identity
def show_vm_identity(cmd, resource_group_name, vm_name):
client = _compute_client_factory(cmd.cli_ctx)
return client.virtual_machines.get(resource_group_name, vm_name).identity
def show_vmss_identity(cmd, resource_group_name, vm_name):
client = _compute_client_factory(cmd.cli_ctx)
return client.virtual_machine_scale_sets.get(resource_group_name, vm_name).identity
def assign_vm_identity(cmd, resource_group_name, vm_name, assign_identity=None, identity_role='Contributor',
identity_role_id=None, identity_scope=None):
VirtualMachineIdentity, ResourceIdentityType, VirtualMachineUpdate = cmd.get_models('VirtualMachineIdentity',
'ResourceIdentityType',
'VirtualMachineUpdate')
VirtualMachineIdentityUserAssignedIdentitiesValue = cmd.get_models(
'VirtualMachineIdentityUserAssignedIdentitiesValue')
from azure.cli.core.commands.arm import assign_identity as assign_identity_helper
client = _compute_client_factory(cmd.cli_ctx)
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identity)
def getter():
return client.virtual_machines.get(resource_group_name, vm_name)
def setter(vm, external_identities=external_identities):
if vm.identity and vm.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif vm.identity and vm.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif vm.identity and vm.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
vm.identity = VirtualMachineIdentity(type=identity_types)
if external_identities:
vm.identity.user_assigned_identities = {}
for identity in external_identities:
vm.identity.user_assigned_identities[identity] = VirtualMachineIdentityUserAssignedIdentitiesValue()
vm_patch = VirtualMachineUpdate()
vm_patch.identity = vm.identity
return patch_vm(cmd, resource_group_name, vm_name, vm_patch)
assign_identity_helper(cmd.cli_ctx, getter, setter, identity_role=identity_role_id, identity_scope=identity_scope)
vm = client.virtual_machines.get(resource_group_name, vm_name)
return _construct_identity_info(identity_scope, identity_role, vm.identity.principal_id,
vm.identity.user_assigned_identities)
# endregion
# region VirtualMachines
def capture_vm(cmd, resource_group_name, vm_name, vhd_name_prefix,
storage_container='vhds', overwrite=True):
VirtualMachineCaptureParameters = cmd.get_models('VirtualMachineCaptureParameters')
client = _compute_client_factory(cmd.cli_ctx)
parameter = VirtualMachineCaptureParameters(vhd_prefix=vhd_name_prefix,
destination_container_name=storage_container,
overwrite_vhds=overwrite)
poller = client.virtual_machines.capture(resource_group_name, vm_name, parameter)
result = LongRunningOperation(cmd.cli_ctx)(poller)
output = getattr(result, 'output', None) or result.resources[0]
print(json.dumps(output, indent=2)) # pylint: disable=no-member
# pylint: disable=too-many-locals, unused-argument, too-many-statements, too-many-branches
def create_vm(cmd, vm_name, resource_group_name, image=None, size='Standard_DS1_v2', location=None, tags=None,
no_wait=False, authentication_type=None, admin_password=None, computer_name=None,
admin_username=None, ssh_dest_key_path=None, ssh_key_value=None, generate_ssh_keys=False,
availability_set=None, nics=None, nsg=None, nsg_rule=None, accelerated_networking=None,
private_ip_address=None, public_ip_address=None, public_ip_address_allocation='dynamic',
public_ip_address_dns_name=None, public_ip_sku=None, os_disk_name=None, os_type=None,
storage_account=None, os_caching=None, data_caching=None, storage_container_name=None, storage_sku=None,
use_unmanaged_disk=False, attach_os_disk=None, os_disk_size_gb=None, attach_data_disks=None,
data_disk_sizes_gb=None, disk_info=None,
vnet_name=None, vnet_address_prefix='10.0.0.0/16', subnet=None, subnet_address_prefix='10.0.0.0/24',
storage_profile=None, os_publisher=None, os_offer=None, os_sku=None, os_version=None,
storage_account_type=None, vnet_type=None, nsg_type=None, public_ip_address_type=None, nic_type=None,
validate=False, custom_data=None, secrets=None, plan_name=None, plan_product=None, plan_publisher=None,
plan_promotion_code=None, license_type=None, assign_identity=None, identity_scope=None,
identity_role='Contributor', identity_role_id=None, application_security_groups=None, zone=None,
boot_diagnostics_storage=None, ultra_ssd_enabled=None, ephemeral_os_disk=None,
proximity_placement_group=None, dedicated_host=None, dedicated_host_group=None, aux_subscriptions=None,
priority=None, max_price=None, eviction_policy=None, enable_agent=None, workspace=None, vmss=None,
os_disk_encryption_set=None, data_disk_encryption_sets=None, specialized=None,
encryption_at_host=None, enable_auto_update=None, patch_mode=None):
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.util import random_string, hash_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.vm._template_builder import (build_vm_resource,
build_storage_account_resource, build_nic_resource,
build_vnet_resource, build_nsg_resource,
build_public_ip_resource, StorageProfile,
build_msi_role_assignment,
build_vm_linux_log_analytics_workspace_agent,
build_vm_windows_log_analytics_workspace_agent)
from msrestazure.tools import resource_id, is_valid_resource_id, parse_resource_id
subscription_id = get_subscription_id(cmd.cli_ctx)
if os_disk_encryption_set is not None and not is_valid_resource_id(os_disk_encryption_set):
os_disk_encryption_set = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=os_disk_encryption_set)
if data_disk_encryption_sets is None:
data_disk_encryption_sets = []
for i, des in enumerate(data_disk_encryption_sets):
if des is not None and not is_valid_resource_id(des):
data_disk_encryption_sets[i] = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=des)
storage_sku = disk_info['os'].get('storageAccountType')
network_id_template = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Network')
vm_id = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm_name)
# determine final defaults and calculated values
tags = tags or {}
os_disk_name = os_disk_name or ('osdisk_{}'.format(hash_string(vm_id, length=10)) if use_unmanaged_disk else None)
storage_container_name = storage_container_name or 'vhds'
# Build up the ARM template
master_template = ArmTemplateBuilder()
vm_dependencies = []
if storage_account_type == 'new':
storage_account = storage_account or 'vhdstorage{}'.format(
hash_string(vm_id, length=14, force_lower=True))
vm_dependencies.append('Microsoft.Storage/storageAccounts/{}'.format(storage_account))
master_template.add_resource(build_storage_account_resource(cmd, storage_account, location,
tags, storage_sku))
nic_name = None
if nic_type == 'new':
nic_name = '{}VMNic'.format(vm_name)
vm_dependencies.append('Microsoft.Network/networkInterfaces/{}'.format(nic_name))
nic_dependencies = []
if vnet_type == 'new':
subnet = subnet or '{}Subnet'.format(vm_name)
vnet_exists = False
if vnet_name:
from azure.cli.command_modules.vm._vm_utils import check_existence
vnet_exists = \
check_existence(cmd.cli_ctx, vnet_name, resource_group_name, 'Microsoft.Network', 'virtualNetworks')
if vnet_exists:
from azure.cli.core.commands import cached_get, cached_put, upsert_to_collection
from azure.cli.command_modules.vm._validators import get_network_client
client = get_network_client(cmd.cli_ctx).virtual_networks
vnet = cached_get(cmd, client.get, resource_group_name, vnet_name)
Subnet = cmd.get_models('Subnet', resource_type=ResourceType.MGMT_NETWORK)
subnet_obj = Subnet(
name=subnet,
address_prefixes=[subnet_address_prefix],
address_prefix=subnet_address_prefix
)
upsert_to_collection(vnet, 'subnets', subnet_obj, 'name')
try:
cached_put(cmd, client.create_or_update, vnet, resource_group_name, vnet_name).result()
except Exception:
raise CLIError('Subnet({}) does not exist, but failed to create a new subnet with address '
'prefix {}. It may be caused by name or address prefix conflict. Please specify '
'an appropriate subnet name with --subnet or a valid address prefix value with '
'--subnet-address-prefix.'.format(subnet, subnet_address_prefix))
if not vnet_exists:
vnet_name = vnet_name or '{}VNET'.format(vm_name)
nic_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name))
master_template.add_resource(build_vnet_resource(
cmd, vnet_name, location, tags, vnet_address_prefix, subnet, subnet_address_prefix))
if nsg_type == 'new':
if nsg_rule is None:
nsg_rule = 'RDP' if os_type.lower() == 'windows' else 'SSH'
nsg = nsg or '{}NSG'.format(vm_name)
nic_dependencies.append('Microsoft.Network/networkSecurityGroups/{}'.format(nsg))
master_template.add_resource(build_nsg_resource(cmd, nsg, location, tags, nsg_rule))
if public_ip_address_type == 'new':
public_ip_address = public_ip_address or '{}PublicIP'.format(vm_name)
nic_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(
public_ip_address))
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location, tags,
public_ip_address_allocation,
public_ip_address_dns_name,
public_ip_sku, zone))
subnet_id = subnet if is_valid_resource_id(subnet) else \
'{}/virtualNetworks/{}/subnets/{}'.format(network_id_template, vnet_name, subnet)
nsg_id = None
if nsg:
nsg_id = nsg if is_valid_resource_id(nsg) else \
'{}/networkSecurityGroups/{}'.format(network_id_template, nsg)
public_ip_address_id = None
if public_ip_address:
public_ip_address_id = public_ip_address if is_valid_resource_id(public_ip_address) \
else '{}/publicIPAddresses/{}'.format(network_id_template, public_ip_address)
nics = [
{'id': '{}/networkInterfaces/{}'.format(network_id_template, nic_name)}
]
nic_resource = build_nic_resource(
cmd, nic_name, location, tags, vm_name, subnet_id, private_ip_address, nsg_id,
public_ip_address_id, application_security_groups, accelerated_networking=accelerated_networking)
nic_resource['dependsOn'] = nic_dependencies
master_template.add_resource(nic_resource)
else:
# Using an existing NIC
invalid_parameters = [nsg, public_ip_address, subnet, vnet_name, application_security_groups]
if any(invalid_parameters):
raise CLIError('When specifying an existing NIC, do not specify NSG, '
'public IP, ASGs, VNet or subnet.')
if accelerated_networking is not None:
logger.warning('When specifying an existing NIC, do not specify accelerated networking. '
'Ignore --accelerated-networking now. '
'This will trigger an error instead of a warning in future releases.')
os_vhd_uri = None
if storage_profile in [StorageProfile.SACustomImage, StorageProfile.SAPirImage]:
storage_account_name = storage_account.rsplit('/', 1)
storage_account_name = storage_account_name[1] if \
len(storage_account_name) > 1 else storage_account_name[0]
os_vhd_uri = 'https://{}.blob.{}/{}/{}.vhd'.format(
storage_account_name, cmd.cli_ctx.cloud.suffixes.storage_endpoint, storage_container_name, os_disk_name)
elif storage_profile == StorageProfile.SASpecializedOSDisk:
os_vhd_uri = attach_os_disk
os_disk_name = attach_os_disk.rsplit('/', 1)[1][:-4]
if custom_data:
custom_data = read_content_if_is_file(custom_data)
if secrets:
secrets = _merge_secrets([validate_file_or_dict(secret) for secret in secrets])
vm_resource = build_vm_resource(
cmd=cmd, name=vm_name, location=location, tags=tags, size=size, storage_profile=storage_profile, nics=nics,
admin_username=admin_username, availability_set_id=availability_set, admin_password=admin_password,
ssh_key_values=ssh_key_value, ssh_key_path=ssh_dest_key_path, image_reference=image,
os_disk_name=os_disk_name, custom_image_os_type=os_type, authentication_type=authentication_type,
os_publisher=os_publisher, os_offer=os_offer, os_sku=os_sku, os_version=os_version, os_vhd_uri=os_vhd_uri,
attach_os_disk=attach_os_disk, os_disk_size_gb=os_disk_size_gb, custom_data=custom_data, secrets=secrets,
license_type=license_type, zone=zone, disk_info=disk_info,
boot_diagnostics_storage_uri=boot_diagnostics_storage, ultra_ssd_enabled=ultra_ssd_enabled,
proximity_placement_group=proximity_placement_group, computer_name=computer_name,
dedicated_host=dedicated_host, priority=priority, max_price=max_price, eviction_policy=eviction_policy,
enable_agent=enable_agent, vmss=vmss, os_disk_encryption_set=os_disk_encryption_set,
data_disk_encryption_sets=data_disk_encryption_sets, specialized=specialized,
encryption_at_host=encryption_at_host, dedicated_host_group=dedicated_host_group,
enable_auto_update=enable_auto_update, patch_mode=patch_mode)
vm_resource['dependsOn'] = vm_dependencies
if plan_name:
vm_resource['plan'] = {
'name': plan_name,
'publisher': plan_publisher,
'product': plan_product,
'promotionCode': plan_promotion_code
}
enable_local_identity = None
if assign_identity is not None:
vm_resource['identity'], _, _, enable_local_identity = _build_identities_info(assign_identity)
role_assignment_guid = None
if identity_scope:
role_assignment_guid = str(_gen_guid())
master_template.add_resource(build_msi_role_assignment(vm_name, vm_id, identity_role_id,
role_assignment_guid, identity_scope))
if workspace is not None:
workspace_id = _prepare_workspace(cmd, resource_group_name, workspace)
master_template.add_secure_parameter('workspaceId', workspace_id)
if os_type.lower() == 'linux':
vm_mmaExtension_resource = build_vm_linux_log_analytics_workspace_agent(cmd, vm_name, location)
master_template.add_resource(vm_mmaExtension_resource)
elif os_type.lower() == 'windows':
vm_mmaExtension_resource = build_vm_windows_log_analytics_workspace_agent(cmd, vm_name, location)
master_template.add_resource(vm_mmaExtension_resource)
else:
logger.warning("Unsupported OS type. Skip the connection step for log analytics workspace.")
master_template.add_resource(vm_resource)
if admin_password:
master_template.add_secure_parameter('adminPassword', admin_password)
template = master_template.build()
parameters = master_template.build_parameters()
# deploy ARM template
deployment_name = 'vm_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
aux_subscriptions=aux_subscriptions).deployments
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
if validate:
from azure.cli.command_modules.vm._vm_utils import log_pprint_template
log_pprint_template(template)
log_pprint_template(parameters)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
validation_poller = client.validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
# creates the VM deployment
if no_wait:
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, deployment_name, deployment)
LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, deployment_name, deployment))
else:
if validate:
return client.validate(resource_group_name, deployment_name, properties)
# creates the VM deployment
if no_wait:
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, deployment_name, properties)
LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, deployment_name, properties))
vm = get_vm_details(cmd, resource_group_name, vm_name)
if assign_identity is not None:
if enable_local_identity and not identity_scope:
_show_missing_access_warning(resource_group_name, vm_name, 'vm')
setattr(vm, 'identity', _construct_identity_info(identity_scope, identity_role, vm.identity.principal_id,
vm.identity.user_assigned_identities))
if workspace is not None:
workspace_name = parse_resource_id(workspace_id)['name']
_set_data_source_for_workspace(cmd, os_type, resource_group_name, workspace_name)
return vm
def auto_shutdown_vm(cmd, resource_group_name, vm_name, off=None, email=None, webhook=None, time=None,
location=None):
from msrestazure.tools import resource_id
from azure.mgmt.devtestlabs.models import Schedule
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cmd.cli_ctx)
client = _dev_test_labs_client_factory(cmd.cli_ctx, subscription_id)
name = 'shutdown-computevm-' + vm_name
vm_id = resource_id(subscription=client.config.subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm_name)
if off:
if email is not None or webhook is not None or time is not None:
# I don't want to disrupt users. So I warn instead of raising an error.
logger.warning('If --off, other parameters will be ignored.')
return client.global_schedules.delete(resource_group_name, name)
if time is None:
raise CLIError('usage error: --time is a required parameter')
daily_recurrence = {'time': time}
notification_settings = None
if webhook:
notification_settings = {
'emailRecipient': email,
'webhookUrl': webhook,
'timeInMinutes': 30,
'status': 'Enabled'
}
schedule = Schedule(status='Enabled',
target_resource_id=vm_id,
daily_recurrence=daily_recurrence,
notification_settings=notification_settings,
time_zone_id='UTC',
task_type='ComputeVmShutdownTask',
location=location)
return client.global_schedules.create_or_update(resource_group_name, name, schedule)
def get_instance_view(cmd, resource_group_name, vm_name):
return get_vm(cmd, resource_group_name, vm_name, 'instanceView')
def get_vm(cmd, resource_group_name, vm_name, expand=None):
client = _compute_client_factory(cmd.cli_ctx)
return client.virtual_machines.get(resource_group_name, vm_name, expand=expand)
def get_vm_details(cmd, resource_group_name, vm_name):
from msrestazure.tools import parse_resource_id
from azure.cli.command_modules.vm._vm_utils import get_target_network_api
result = get_instance_view(cmd, resource_group_name, vm_name)
network_client = get_mgmt_service_client(
cmd.cli_ctx, ResourceType.MGMT_NETWORK, api_version=get_target_network_api(cmd.cli_ctx))
public_ips = []
fqdns = []
private_ips = []
mac_addresses = []
# pylint: disable=line-too-long,no-member
for nic_ref in result.network_profile.network_interfaces:
nic_parts = parse_resource_id(nic_ref.id)
nic = network_client.network_interfaces.get(nic_parts['resource_group'], nic_parts['name'])
if nic.mac_address:
mac_addresses.append(nic.mac_address)
for ip_configuration in nic.ip_configurations:
if ip_configuration.private_ip_address:
private_ips.append(ip_configuration.private_ip_address)
if ip_configuration.public_ip_address:
res = parse_resource_id(ip_configuration.public_ip_address.id)
public_ip_info = network_client.public_ip_addresses.get(res['resource_group'],
res['name'])
if public_ip_info.ip_address:
public_ips.append(public_ip_info.ip_address)
if public_ip_info.dns_settings:
fqdns.append(public_ip_info.dns_settings.fqdn)
setattr(result, 'power_state',
','.join([s.display_status for s in result.instance_view.statuses if s.code.startswith('PowerState/')]))
setattr(result, 'public_ips', ','.join(public_ips))
setattr(result, 'fqdns', ','.join(fqdns))
setattr(result, 'private_ips', ','.join(private_ips))
setattr(result, 'mac_addresses', ','.join(mac_addresses))
del result.instance_view # we don't need other instance_view info as people won't care
return result
def list_skus(cmd, location=None, size=None, zone=None, show_all=None, resource_type=None):
from ._vm_utils import list_sku_info
result = list_sku_info(cmd.cli_ctx, location)
if not show_all:
result = [x for x in result if not [y for y in (x.restrictions or [])
if y.reason_code == 'NotAvailableForSubscription']]
if resource_type:
result = [x for x in result if x.resource_type.lower() == resource_type.lower()]
if size:
result = [x for x in result if x.resource_type == 'virtualMachines' and size.lower() in x.name.lower()]
if zone:
result = [x for x in result if x.location_info and x.location_info[0].zones]
return result
def list_vm(cmd, resource_group_name=None, show_details=False):
ccf = _compute_client_factory(cmd.cli_ctx)
vm_list = ccf.virtual_machines.list(resource_group_name=resource_group_name) \
if resource_group_name else ccf.virtual_machines.list_all()
if show_details:
return [get_vm_details(cmd, _parse_rg_name(v.id)[0], v.name) for v in vm_list]
return list(vm_list)
def list_vm_ip_addresses(cmd, resource_group_name=None, vm_name=None):
# We start by getting NICs as they are the smack in the middle of all data that we
# want to collect for a VM (as long as we don't need any info on the VM than what
# is available in the Id, we don't need to make any calls to the compute RP)
#
# Since there is no guarantee that a NIC is in the same resource group as a given
# Virtual Machine, we can't constrain the lookup to only a single group...
network_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK)
nics = network_client.network_interfaces.list_all()
public_ip_addresses = network_client.public_ip_addresses.list_all()
ip_address_lookup = {pip.id: pip for pip in list(public_ip_addresses)}
result = []
for nic in [n for n in list(nics) if n.virtual_machine]:
nic_resource_group, nic_vm_name = _parse_rg_name(nic.virtual_machine.id)
# If provided, make sure that resource group name and vm name match the NIC we are
# looking at before adding it to the result...
same_resource_group_name = (resource_group_name is None or
resource_group_name.lower() == nic_resource_group.lower())
same_vm_name = (vm_name is None or
vm_name.lower() == nic_vm_name.lower())
if same_resource_group_name and same_vm_name:
network_info = {
'privateIpAddresses': [],
'publicIpAddresses': []
}
for ip_configuration in nic.ip_configurations:
network_info['privateIpAddresses'].append(ip_configuration.private_ip_address)
if ip_configuration.public_ip_address and ip_configuration.public_ip_address.id in ip_address_lookup:
public_ip_address = ip_address_lookup[ip_configuration.public_ip_address.id]
public_ip_addr_info = {
'id': public_ip_address.id,
'name': public_ip_address.name,
'ipAddress': public_ip_address.ip_address,
'ipAllocationMethod': public_ip_address.public_ip_allocation_method
}
try:
public_ip_addr_info['zone'] = public_ip_address.zones[0]
except (AttributeError, IndexError, TypeError):
pass
network_info['publicIpAddresses'].append(public_ip_addr_info)
result.append({
'virtualMachine': {
'resourceGroup': nic_resource_group,
'name': nic_vm_name,
'network': network_info
}
})
return result
def open_vm_port(cmd, resource_group_name, vm_name, port, priority=900, network_security_group_name=None,
apply_to_subnet=False):
from msrestazure.tools import parse_resource_id
network = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK)
vm = get_vm(cmd, resource_group_name, vm_name)
location = vm.location
if not vm.network_profile:
raise CLIError("Network profile not found for VM '{}'".format(vm_name))
nic_ids = list(vm.network_profile.network_interfaces)
if len(nic_ids) > 1:
raise CLIError('Multiple NICs is not supported for this command. Create rules on the NSG '
'directly.')
if not nic_ids:
raise CLIError("No NIC associated with VM '{}'".format(vm_name))
# get existing NSG or create a new one
created_nsg = False
nic = network.network_interfaces.get(resource_group_name, os.path.split(nic_ids[0].id)[1])
if not apply_to_subnet:
nsg = nic.network_security_group
else:
subnet_id = parse_resource_id(nic.ip_configurations[0].subnet.id)
subnet = network.subnets.get(resource_group_name, subnet_id['name'], subnet_id['child_name_1'])
nsg = subnet.network_security_group
if not nsg:
NetworkSecurityGroup = \
cmd.get_models('NetworkSecurityGroup', resource_type=ResourceType.MGMT_NETWORK)
nsg = LongRunningOperation(cmd.cli_ctx, 'Creating network security group')(
network.network_security_groups.create_or_update(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
parameters=NetworkSecurityGroup(location=location)
)
)
created_nsg = True
# update the NSG with the new rule to allow inbound traffic
SecurityRule = cmd.get_models('SecurityRule', resource_type=ResourceType.MGMT_NETWORK)
rule_name = 'open-port-all' if port == '*' else 'open-port-{}'.format(port)
rule = SecurityRule(protocol='*', access='allow', direction='inbound', name=rule_name,
source_port_range='*', destination_port_range=port, priority=priority,
source_address_prefix='*', destination_address_prefix='*')
nsg_name = nsg.name or os.path.split(nsg.id)[1]
LongRunningOperation(cmd.cli_ctx, 'Adding security rule')(
network.security_rules.create_or_update(
resource_group_name, nsg_name, rule_name, rule)
)
# update the NIC or subnet if a new NSG was created
if created_nsg and not apply_to_subnet:
nic.network_security_group = nsg
LongRunningOperation(cmd.cli_ctx, 'Updating NIC')(network.network_interfaces.create_or_update(
resource_group_name, nic.name, nic))
elif created_nsg and apply_to_subnet:
subnet.network_security_group = nsg
LongRunningOperation(cmd.cli_ctx, 'Updating subnet')(network.subnets.create_or_update(
resource_group_name=resource_group_name,
virtual_network_name=subnet_id['name'],
subnet_name=subnet_id['child_name_1'],
subnet_parameters=subnet
))
return network.network_security_groups.get(resource_group_name, nsg_name)
def resize_vm(cmd, resource_group_name, vm_name, size, no_wait=False):
vm = get_vm(cmd, resource_group_name, vm_name)
if vm.hardware_profile.vm_size == size:
logger.warning("VM is already %s", size)
return None
vm.hardware_profile.vm_size = size # pylint: disable=no-member
return set_vm(cmd, vm, no_wait=no_wait)
def restart_vm(cmd, resource_group_name, vm_name, no_wait=False, force=False):
client = _compute_client_factory(cmd.cli_ctx)
if force:
return sdk_no_wait(no_wait, client.virtual_machines.redeploy, resource_group_name, vm_name)
return sdk_no_wait(no_wait, client.virtual_machines.restart, resource_group_name, vm_name)
def set_vm(cmd, instance, lro_operation=None, no_wait=False):
instance.resources = None # Issue: https://github.com/Azure/autorest/issues/934
client = _compute_client_factory(cmd.cli_ctx)
parsed_id = _parse_rg_name(instance.id)
poller = sdk_no_wait(no_wait, client.virtual_machines.create_or_update,
resource_group_name=parsed_id[0],
vm_name=parsed_id[1],
parameters=instance)
if lro_operation:
return lro_operation(poller)
return LongRunningOperation(cmd.cli_ctx)(poller)
def patch_vm(cmd, resource_group_name, vm_name, vm):
client = _compute_client_factory(cmd.cli_ctx)
poller = client.virtual_machines.update(resource_group_name, vm_name, vm)
return LongRunningOperation(cmd.cli_ctx)(poller)
def show_vm(cmd, resource_group_name, vm_name, show_details=False):
return get_vm_details(cmd, resource_group_name, vm_name) if show_details \
else get_vm(cmd, resource_group_name, vm_name)
def update_vm(cmd, resource_group_name, vm_name, os_disk=None, disk_caching=None,
write_accelerator=None, license_type=None, no_wait=False, ultra_ssd_enabled=None,
priority=None, max_price=None, proximity_placement_group=None, workspace=None, **kwargs):
from msrestazure.tools import parse_resource_id, resource_id, is_valid_resource_id
from ._vm_utils import update_write_accelerator_settings, update_disk_caching
vm = kwargs['parameters']
if os_disk is not None:
if is_valid_resource_id(os_disk):
disk_id, disk_name = os_disk, parse_resource_id(os_disk)['name']
else:
res = parse_resource_id(vm.id)
disk_id = resource_id(subscription=res['subscription'], resource_group=res['resource_group'],
namespace='Microsoft.Compute', type='disks', name=os_disk)
disk_name = os_disk
vm.storage_profile.os_disk.managed_disk.id = disk_id
vm.storage_profile.os_disk.name = disk_name
if write_accelerator is not None:
update_write_accelerator_settings(vm.storage_profile, write_accelerator)
if disk_caching is not None:
update_disk_caching(vm.storage_profile, disk_caching)
if license_type is not None:
vm.license_type = license_type
if ultra_ssd_enabled is not None:
if vm.additional_capabilities is None:
AdditionalCapabilities = cmd.get_models('AdditionalCapabilities')
vm.additional_capabilities = AdditionalCapabilities(ultra_ssd_enabled=ultra_ssd_enabled)
else:
vm.additional_capabilities.ultra_ssd_enabled = ultra_ssd_enabled
if priority is not None:
vm.priority = priority
if max_price is not None:
if vm.billing_profile is None:
BillingProfile = cmd.get_models('BillingProfile')
vm.billing_profile = BillingProfile(max_price=max_price)
else:
vm.billing_profile.max_price = max_price
if proximity_placement_group is not None:
vm.proximity_placement_group = {'id': proximity_placement_group}
if workspace is not None:
workspace_id = _prepare_workspace(cmd, resource_group_name, workspace)
workspace_name = parse_resource_id(workspace_id)['name']
_set_log_analytics_workspace_extension(cmd=cmd,
resource_group_name=resource_group_name,
vm=vm,
vm_name=vm_name,
workspace_name=workspace_name)
os_type = vm.storage_profile.os_disk.os_type.value if vm.storage_profile.os_disk.os_type else None
_set_data_source_for_workspace(cmd, os_type, resource_group_name, workspace_name)
aux_subscriptions = None
if vm and vm.storage_profile and vm.storage_profile.image_reference and vm.storage_profile.image_reference.id:
aux_subscriptions = _parse_aux_subscriptions(vm.storage_profile.image_reference.id)
client = _compute_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions)
return sdk_no_wait(no_wait, client.virtual_machines.create_or_update, resource_group_name, vm_name, **kwargs)
# endregion
# region VirtualMachines AvailabilitySets
def _get_availset(cmd, resource_group_name, name):
return _compute_client_factory(cmd.cli_ctx).availability_sets.get(resource_group_name, name)
def _set_availset(cmd, resource_group_name, name, **kwargs):
return _compute_client_factory(cmd.cli_ctx).availability_sets.create_or_update(resource_group_name, name, **kwargs)
# pylint: disable=inconsistent-return-statements
def convert_av_set_to_managed_disk(cmd, resource_group_name, availability_set_name):
av_set = _get_availset(cmd, resource_group_name, availability_set_name)
if av_set.sku.name != 'Aligned':
av_set.sku.name = 'Aligned'
# let us double check whether the existing FD number is supported
skus = list_skus(cmd, av_set.location)
av_sku = next((s for s in skus if s.resource_type == 'availabilitySets' and s.name == 'Aligned'), None)
if av_sku and av_sku.capabilities:
max_fd = int(next((c.value for c in av_sku.capabilities if c.name == 'MaximumPlatformFaultDomainCount'),
'0'))
if max_fd and max_fd < av_set.platform_fault_domain_count:
logger.warning("The fault domain count will be adjusted from %s to %s so to stay within region's "
"limitation", av_set.platform_fault_domain_count, max_fd)
av_set.platform_fault_domain_count = max_fd
return _set_availset(cmd, resource_group_name=resource_group_name, name=availability_set_name,
parameters=av_set)
logger.warning('Availability set %s is already configured for managed disks.', availability_set_name)
def create_av_set(cmd, availability_set_name, resource_group_name, platform_fault_domain_count=2,
platform_update_domain_count=None, location=None, proximity_placement_group=None, unmanaged=False,
no_wait=False, tags=None, validate=False):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.vm._template_builder import build_av_set_resource
tags = tags or {}
# Build up the ARM template
master_template = ArmTemplateBuilder()
av_set_resource = build_av_set_resource(cmd, availability_set_name, location, tags,
platform_update_domain_count,
platform_fault_domain_count, unmanaged,
proximity_placement_group=proximity_placement_group)
master_template.add_resource(av_set_resource)
template = master_template.build()
# deploy ARM template
deployment_name = 'av_set_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters={}, mode='incremental')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
validation_poller = client.validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
if no_wait:
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, deployment_name, deployment)
LongRunningOperation(cmd.cli_ctx)(sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, deployment_name, deployment))
else:
if validate:
return client.validate(resource_group_name, deployment_name, properties)
if no_wait:
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, deployment_name, properties)
LongRunningOperation(cmd.cli_ctx)(sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, deployment_name, properties))
compute_client = _compute_client_factory(cmd.cli_ctx)
return compute_client.availability_sets.get(resource_group_name, availability_set_name)
def update_av_set(instance, resource_group_name, proximity_placement_group=None):
if proximity_placement_group is not None:
instance.proximity_placement_group = {'id': proximity_placement_group}
return instance
def list_av_sets(cmd, resource_group_name=None):
op_group = _compute_client_factory(cmd.cli_ctx).availability_sets
if resource_group_name:
return op_group.list(resource_group_name)
return op_group.list_by_subscription(expand='virtualMachines/$ref')
# endregion
# region VirtualMachines BootDiagnostics
def disable_boot_diagnostics(cmd, resource_group_name, vm_name):
vm = get_vm(cmd, resource_group_name, vm_name)
diag_profile = vm.diagnostics_profile
if not (diag_profile and diag_profile.boot_diagnostics and diag_profile.boot_diagnostics.enabled):
return
diag_profile.boot_diagnostics.enabled = False
diag_profile.boot_diagnostics.storage_uri = None
set_vm(cmd, vm, ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'disabling boot diagnostics', 'done'))
def enable_boot_diagnostics(cmd, resource_group_name, vm_name, storage):
from azure.cli.command_modules.vm._vm_utils import get_storage_blob_uri
vm = get_vm(cmd, resource_group_name, vm_name)
storage_uri = get_storage_blob_uri(cmd.cli_ctx, storage)
if (vm.diagnostics_profile and
vm.diagnostics_profile.boot_diagnostics and
vm.diagnostics_profile.boot_diagnostics.enabled and
vm.diagnostics_profile.boot_diagnostics.storage_uri and
vm.diagnostics_profile.boot_diagnostics.storage_uri.lower() == storage_uri.lower()):
return
DiagnosticsProfile, BootDiagnostics = cmd.get_models('DiagnosticsProfile', 'BootDiagnostics')
boot_diag = BootDiagnostics(enabled=True, storage_uri=storage_uri)
if vm.diagnostics_profile is None:
vm.diagnostics_profile = DiagnosticsProfile(boot_diagnostics=boot_diag)
else:
vm.diagnostics_profile.boot_diagnostics = boot_diag
set_vm(cmd, vm, ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'enabling boot diagnostics', 'done'))
class BootLogStreamWriter: # pylint: disable=too-few-public-methods
def __init__(self, out):
self.out = out
def write(self, str_or_bytes):
content = str_or_bytes
if isinstance(str_or_bytes, bytes):
content = str_or_bytes.decode('utf8')
try:
self.out.write(content)
except UnicodeEncodeError:
# e.g. 'charmap' codec can't encode characters in position 258829-258830: character maps to <undefined>
import unicodedata
ascii_content = unicodedata.normalize('NFKD', content).encode('ascii', 'ignore')
self.out.write(ascii_content.decode())
logger.warning("A few unicode characters have been ignored because the shell is not able to display. "
"To see the full log, use a shell with unicode capacity")
def get_boot_log(cmd, resource_group_name, vm_name):
import re
import sys
from azure.cli.core.profiles import get_sdk
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob.blockblobservice#BlockBlobService')
client = _compute_client_factory(cmd.cli_ctx)
virtual_machine = client.virtual_machines.get(resource_group_name, vm_name, expand='instanceView')
# pylint: disable=no-member
if (not virtual_machine.instance_view.boot_diagnostics or
not virtual_machine.instance_view.boot_diagnostics.serial_console_log_blob_uri):
raise CLIError('Please enable boot diagnostics.')
blob_uri = virtual_machine.instance_view.boot_diagnostics.serial_console_log_blob_uri
# Find storage account for diagnostics
storage_mgmt_client = _get_storage_management_client(cmd.cli_ctx)
if not blob_uri:
raise CLIError('No console log available')
try:
storage_accounts = storage_mgmt_client.storage_accounts.list()
matching_storage_account = (a for a in list(storage_accounts)
if blob_uri.startswith(a.primary_endpoints.blob))
storage_account = next(matching_storage_account)
except StopIteration:
raise CLIError('Failed to find storage accont for console log file')
regex = r'/subscriptions/[^/]+/resourceGroups/(?P<rg>[^/]+)/.+'
match = re.search(regex, storage_account.id, re.I)
rg = match.group('rg')
# Get account key
keys = storage_mgmt_client.storage_accounts.list_keys(rg, storage_account.name)
# Extract container and blob name from url...
container, blob = urlparse(blob_uri).path.split('/')[-2:]
storage_client = get_data_service_client(
cmd.cli_ctx,
BlockBlobService,
storage_account.name,
keys.keys[0].value,
endpoint_suffix=cmd.cli_ctx.cloud.suffixes.storage_endpoint) # pylint: disable=no-member
# our streamwriter not seekable, so no parallel.
storage_client.get_blob_to_stream(container, blob, BootLogStreamWriter(sys.stdout), max_connections=1)
# endregion
# region VirtualMachines Diagnostics
def set_diagnostics_extension(
cmd, resource_group_name, vm_name, settings, protected_settings=None, version=None,
no_auto_upgrade=False):
client = _compute_client_factory(cmd.cli_ctx)
vm = client.virtual_machines.get(resource_group_name, vm_name, 'instanceView')
# pylint: disable=no-member
is_linux_os = _is_linux_os(vm)
vm_extension_name = _LINUX_DIAG_EXT if is_linux_os else _WINDOWS_DIAG_EXT
if is_linux_os: # check incompatible version
exts = vm.instance_view.extensions or []
major_ver = extension_mappings[_LINUX_DIAG_EXT]['version'].split('.')[0]
if next((e for e in exts if e.name == vm_extension_name and
not e.type_handler_version.startswith(major_ver + '.')), None):
logger.warning('There is an incompatible version of diagnostics extension installed. '
'We will update it with a new version')
poller = client.virtual_machine_extensions.delete(resource_group_name, vm_name,
vm_extension_name)
LongRunningOperation(cmd.cli_ctx)(poller)
return set_extension(cmd, resource_group_name, vm_name, vm_extension_name,
extension_mappings[vm_extension_name]['publisher'],
version or extension_mappings[vm_extension_name]['version'],
settings,
protected_settings,
no_auto_upgrade)
def show_default_diagnostics_configuration(is_windows_os=False):
public_settings = get_default_diag_config(is_windows_os)
# pylint: disable=line-too-long
protected_settings_info = json.dumps({
'storageAccountName': "__STORAGE_ACCOUNT_NAME__",
# LAD and WAD are not consistent on sas token format. Call it out here
"storageAccountSasToken": "__SAS_TOKEN_{}__".format("WITH_LEADING_QUESTION_MARK" if is_windows_os else "WITHOUT_LEADING_QUESTION_MARK")
}, indent=2)
logger.warning('Protected settings with storage account info is required to work with the default configurations, e.g. \n%s', protected_settings_info)
return public_settings
# endregion
# region VirtualMachines Disks (Managed)
def attach_managed_data_disk(cmd, resource_group_name, vm_name, disk, new=False, sku=None,
size_gb=1023, lun=None, caching=None, enable_write_accelerator=False):
'''attach a managed disk'''
from msrestazure.tools import parse_resource_id
vm = get_vm(cmd, resource_group_name, vm_name)
DataDisk, ManagedDiskParameters, DiskCreateOption = cmd.get_models(
'DataDisk', 'ManagedDiskParameters', 'DiskCreateOptionTypes')
# pylint: disable=no-member
if lun is None:
lun = _get_disk_lun(vm.storage_profile.data_disks)
if new:
data_disk = DataDisk(lun=lun, create_option=DiskCreateOption.empty,
name=parse_resource_id(disk)['name'],
disk_size_gb=size_gb, caching=caching,
managed_disk=ManagedDiskParameters(storage_account_type=sku))
else:
params = ManagedDiskParameters(id=disk, storage_account_type=sku)
data_disk = DataDisk(lun=lun, create_option=DiskCreateOption.attach, managed_disk=params, caching=caching)
if enable_write_accelerator:
data_disk.write_accelerator_enabled = enable_write_accelerator
vm.storage_profile.data_disks.append(data_disk)
set_vm(cmd, vm)
def detach_data_disk(cmd, resource_group_name, vm_name, disk_name):
# here we handle both unmanaged or managed disk
vm = get_vm(cmd, resource_group_name, vm_name)
# pylint: disable=no-member
leftovers = [d for d in vm.storage_profile.data_disks if d.name.lower() != disk_name.lower()]
if len(vm.storage_profile.data_disks) == len(leftovers):
raise CLIError("No disk with the name '{}' was found".format(disk_name))
vm.storage_profile.data_disks = leftovers
set_vm(cmd, vm)
# endregion
# region VirtualMachines Extensions
def list_extensions(cmd, resource_group_name, vm_name):
vm = get_vm(cmd, resource_group_name, vm_name)
extension_type = 'Microsoft.Compute/virtualMachines/extensions'
result = [r for r in (vm.resources or []) if r.type == extension_type]
return result
def set_extension(cmd, resource_group_name, vm_name, vm_extension_name, publisher, version=None, settings=None,
protected_settings=None, no_auto_upgrade=False, force_update=False, no_wait=False,
extension_instance_name=None):
vm = get_vm(cmd, resource_group_name, vm_name, 'instanceView')
client = _compute_client_factory(cmd.cli_ctx)
if not extension_instance_name:
extension_instance_name = vm_extension_name
VirtualMachineExtension = cmd.get_models('VirtualMachineExtension')
instance_name = _get_extension_instance_name(vm.instance_view, publisher, vm_extension_name,
suggested_name=extension_instance_name)
if instance_name != extension_instance_name:
msg = "A %s extension with name %s already exists. Updating it with your settings..."
logger.warning(msg, vm_extension_name, instance_name)
version = _normalize_extension_version(cmd.cli_ctx, publisher, vm_extension_name, version, vm.location)
ext = VirtualMachineExtension(location=vm.location,
publisher=publisher,
virtual_machine_extension_type=vm_extension_name,
protected_settings=protected_settings,
type_handler_version=version,
settings=settings,
auto_upgrade_minor_version=(not no_auto_upgrade))
if force_update:
ext.force_update_tag = str(_gen_guid())
return sdk_no_wait(no_wait, client.virtual_machine_extensions.create_or_update,
resource_group_name, vm_name, instance_name, ext)
# endregion
# region VirtualMachines Extension Images
def list_vm_extension_images(
cmd, image_location=None, publisher_name=None, name=None, version=None, latest=False):
return load_extension_images_thru_services(
cmd.cli_ctx, publisher_name, name, version, image_location, latest)
# endregion
# region VirtualMachines Identity
def _remove_identities(cmd, resource_group_name, name, identities, getter, setter):
from ._vm_utils import MSI_LOCAL_ID
ResourceIdentityType = cmd.get_models('ResourceIdentityType', operation_group='virtual_machines')
remove_system_assigned_identity = False
if MSI_LOCAL_ID in identities:
remove_system_assigned_identity = True
identities.remove(MSI_LOCAL_ID)
resource = getter(cmd, resource_group_name, name)
if resource.identity is None:
return None
emsis_to_remove = []
if identities:
existing_emsis = {x.lower() for x in list((resource.identity.user_assigned_identities or {}).keys())}
emsis_to_remove = {x.lower() for x in identities}
non_existing = emsis_to_remove.difference(existing_emsis)
if non_existing:
raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_emsis - emsis_to_remove): # if all emsis are gone, we need to update the type
if resource.identity.type == ResourceIdentityType.user_assigned:
resource.identity.type = ResourceIdentityType.none
elif resource.identity.type == ResourceIdentityType.system_assigned_user_assigned:
resource.identity.type = ResourceIdentityType.system_assigned
resource.identity.user_assigned_identities = None
if remove_system_assigned_identity:
resource.identity.type = (ResourceIdentityType.none
if resource.identity.type == ResourceIdentityType.system_assigned
else ResourceIdentityType.user_assigned)
if emsis_to_remove:
if resource.identity.type not in [ResourceIdentityType.none, ResourceIdentityType.system_assigned]:
resource.identity.user_assigned_identities = {}
for identity in emsis_to_remove:
resource.identity.user_assigned_identities[identity] = None
result = LongRunningOperation(cmd.cli_ctx)(setter(resource_group_name, name, resource))
return result.identity
def remove_vm_identity(cmd, resource_group_name, vm_name, identities=None):
def setter(resource_group_name, vm_name, vm):
client = _compute_client_factory(cmd.cli_ctx)
VirtualMachineUpdate = cmd.get_models('VirtualMachineUpdate', operation_group='virtual_machines')
vm_update = VirtualMachineUpdate(identity=vm.identity)
return client.virtual_machines.update(resource_group_name, vm_name, vm_update)
if identities is None:
from ._vm_utils import MSI_LOCAL_ID
identities = [MSI_LOCAL_ID]
return _remove_identities(cmd, resource_group_name, vm_name, identities, get_vm, setter)
# endregion
# region VirtualMachines Images
def list_vm_images(cmd, image_location=None, publisher_name=None, offer=None, sku=None,
all=False): # pylint: disable=redefined-builtin
load_thru_services = all
if load_thru_services:
if not publisher_name and not offer and not sku:
logger.warning("You are retrieving all the images from server which could take more than a minute. "
"To shorten the wait, provide '--publisher', '--offer' or '--sku'. Partial name search "
"is supported.")
all_images = load_images_thru_services(cmd.cli_ctx, publisher_name, offer, sku, image_location)
else:
all_images = load_images_from_aliases_doc(cmd.cli_ctx, publisher_name, offer, sku)
logger.warning(
'You are viewing an offline list of images, use --all to retrieve an up-to-date list')
for i in all_images:
i['urn'] = ':'.join([i['publisher'], i['offer'], i['sku'], i['version']])
return all_images
def show_vm_image(cmd, urn=None, publisher=None, offer=None, sku=None, version=None, location=None):
from azure.cli.core.commands.parameters import get_one_of_subscription_locations
usage_err = 'usage error: --plan STRING --offer STRING --publish STRING --version STRING | --urn STRING'
location = location or get_one_of_subscription_locations(cmd.cli_ctx)
if urn:
if any([publisher, offer, sku, version]):
raise CLIError(usage_err)
publisher, offer, sku, version = urn.split(":")
if version.lower() == 'latest':
version = _get_latest_image_version(cmd.cli_ctx, location, publisher, offer, sku)
elif not publisher or not offer or not sku or not version:
raise CLIError(usage_err)
client = _compute_client_factory(cmd.cli_ctx)
return client.virtual_machine_images.get(location, publisher, offer, sku, version)
def accept_market_ordering_terms(cmd, urn=None, publisher=None, offer=None, plan=None):
from azure.mgmt.marketplaceordering import MarketplaceOrderingAgreements
usage_err = 'usage error: --plan STRING --offer STRING --publish STRING |--urn STRING'
if urn:
if any([publisher, offer, plan]):
raise CLIError(usage_err)
publisher, offer, _, _ = urn.split(':')
image = show_vm_image(cmd, urn)
if not image.plan:
logger.warning("Image '%s' has no terms to accept.", urn)
return
plan = image.plan.name
else:
if not publisher or not offer or not plan:
raise CLIError(usage_err)
market_place_client = get_mgmt_service_client(cmd.cli_ctx, MarketplaceOrderingAgreements)
term = market_place_client.marketplace_agreements.get(publisher, offer, plan)
term.accepted = True
return market_place_client.marketplace_agreements.create(publisher, offer, plan, term)
# endregion
def _terms_prepare(cmd, urn, publisher, offer, plan):
if urn:
if any([publisher, offer, plan]):
raise CLIError('usage error: If using --urn, do not use any of --plan, --offer, --publisher.')
terms = urn.split(':')
if len(terms) != 4:
raise CLIError('usage error: urn should be in the format of publisher:offer:sku:version.')
publisher, offer = terms[0], terms[1]
image = show_vm_image(cmd, urn)
if not image.plan:
raise CLIError("Image '%s' has no terms to accept." % urn)
plan = image.plan.name
else:
if not all([publisher, offer, plan]):
raise CLIError(
'usage error: If not using --urn, all of --plan, --offer and --publisher should be provided.')
return publisher, offer, plan
def _accept_cancel_terms(cmd, urn, publisher, offer, plan, accept):
publisher, offer, plan = _terms_prepare(cmd, urn, publisher, offer, plan)
op = cf_vm_image_term(cmd.cli_ctx, '')
terms = op.get(publisher, offer, plan)
terms.accepted = accept
return op.create(publisher, offer, plan, terms)
def accept_terms(cmd, urn=None, publisher=None, offer=None, plan=None):
"""
Accept Azure Marketplace image terms so that the image can be used to create VMs.
:param cmd:cmd
:param urn:URN, in the format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted
:param publisher:Image publisher
:param offer:Image offer
:param plan:Image billing plan
:return:
"""
return _accept_cancel_terms(cmd, urn, publisher, offer, plan, True)
def cancel_terms(cmd, urn=None, publisher=None, offer=None, plan=None):
"""
Cancel Azure Marketplace image terms.
:param cmd:cmd
:param urn:URN, in the format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted
:param publisher:Image publisher
:param offer:Image offer
:param plan:Image billing plan
:return:
"""
return _accept_cancel_terms(cmd, urn, publisher, offer, plan, False)
def get_terms(cmd, urn=None, publisher=None, offer=None, plan=None):
"""
Get the details of Azure Marketplace image terms.
:param cmd:cmd
:param urn:URN, in the format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted
:param publisher:Image publisher
:param offer:Image offer
:param plan:Image billing plan
:return:
"""
publisher, offer, plan = _terms_prepare(cmd, urn, publisher, offer, plan)
op = cf_vm_image_term(cmd.cli_ctx, '')
terms = op.get(publisher, offer, plan)
return terms
# region VirtualMachines NetworkInterfaces (NICs)
def show_vm_nic(cmd, resource_group_name, vm_name, nic):
from msrestazure.tools import parse_resource_id
vm = get_vm(cmd, resource_group_name, vm_name)
found = next(
(n for n in vm.network_profile.network_interfaces if nic.lower() == n.id.lower()), None
# pylint: disable=no-member
)
if found:
network_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK)
nic_name = parse_resource_id(found.id)['name']
return network_client.network_interfaces.get(resource_group_name, nic_name)
raise CLIError("NIC '{}' not found on VM '{}'".format(nic, vm_name))
def list_vm_nics(cmd, resource_group_name, vm_name):
vm = get_vm(cmd, resource_group_name, vm_name)
return vm.network_profile.network_interfaces # pylint: disable=no-member
def add_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None):
vm = get_vm(cmd, resource_group_name, vm_name)
new_nics = _build_nic_list(cmd, nics)
existing_nics = _get_existing_nics(vm)
return _update_vm_nics(cmd, vm, existing_nics + new_nics, primary_nic)
def remove_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None):
def to_delete(nic_id):
return [n for n in nics_to_delete if n.id.lower() == nic_id.lower()]
vm = get_vm(cmd, resource_group_name, vm_name)
nics_to_delete = _build_nic_list(cmd, nics)
existing_nics = _get_existing_nics(vm)
survived = [x for x in existing_nics if not to_delete(x.id)]
return _update_vm_nics(cmd, vm, survived, primary_nic)
def set_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None):
vm = get_vm(cmd, resource_group_name, vm_name)
nics = _build_nic_list(cmd, nics)
return _update_vm_nics(cmd, vm, nics, primary_nic)
def _build_nic_list(cmd, nic_ids):
NetworkInterfaceReference = cmd.get_models('NetworkInterfaceReference')
nic_list = []
if nic_ids:
# pylint: disable=no-member
network_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK)
for nic_id in nic_ids:
rg, name = _parse_rg_name(nic_id)
nic = network_client.network_interfaces.get(rg, name)
nic_list.append(NetworkInterfaceReference(id=nic.id, primary=False))
return nic_list
def _get_existing_nics(vm):
network_profile = getattr(vm, 'network_profile', None)
nics = []
if network_profile is not None:
nics = network_profile.network_interfaces or []
return nics
def _update_vm_nics(cmd, vm, nics, primary_nic):
NetworkProfile = cmd.get_models('NetworkProfile')
if primary_nic:
try:
_, primary_nic_name = _parse_rg_name(primary_nic)
except IndexError:
primary_nic_name = primary_nic
matched = [n for n in nics if _parse_rg_name(n.id)[1].lower() == primary_nic_name.lower()]
if not matched:
raise CLIError('Primary Nic {} is not found'.format(primary_nic))
if len(matched) > 1:
raise CLIError('Duplicate Nic entries with name {}'.format(primary_nic))
for n in nics:
n.primary = False
matched[0].primary = True
elif nics:
if not [n for n in nics if n.primary]:
nics[0].primary = True
network_profile = getattr(vm, 'network_profile', None)
if network_profile is None:
vm.network_profile = NetworkProfile(network_interfaces=nics)
else:
network_profile.network_interfaces = nics
return set_vm(cmd, vm).network_profile.network_interfaces
# endregion
# region VirtualMachines RunCommand
def run_command_invoke(cmd, resource_group_name, vm_vmss_name, command_id, scripts=None, parameters=None, instance_id=None): # pylint: disable=line-too-long
RunCommandInput, RunCommandInputParameter = cmd.get_models('RunCommandInput', 'RunCommandInputParameter')
parameters = parameters or []
run_command_input_parameters = []
auto_arg_name_num = 0
for p in parameters:
if '=' in p:
n, v = p.split('=', 1)
else:
# RunCommand API requires named arguments, which doesn't make lots of sense for bash scripts
# using positional arguments, so here we provide names just to get API happy
# note, we don't handle mixing styles, but will consolidate by GA when API is settled
auto_arg_name_num += 1
n = 'arg{}'.format(auto_arg_name_num)
v = p
run_command_input_parameters.append(RunCommandInputParameter(name=n, value=v))
client = _compute_client_factory(cmd.cli_ctx)
# if instance_id, this is a vmss instance
if instance_id:
return client.virtual_machine_scale_set_vms.run_command(resource_group_name, vm_vmss_name, instance_id,
RunCommandInput(command_id=command_id, script=scripts,
parameters=run_command_input_parameters)) # pylint: disable=line-too-long
# otherwise this is a regular vm instance
return client.virtual_machines.run_command(resource_group_name, vm_vmss_name,
RunCommandInput(command_id=command_id, script=scripts,
parameters=run_command_input_parameters))
def vm_run_command_invoke(cmd, resource_group_name, vm_name, command_id, scripts=None, parameters=None):
return run_command_invoke(cmd, resource_group_name, vm_name, command_id, scripts, parameters)
# endregion
# region VirtualMachines Secrets
def _get_vault_id_from_name(cli_ctx, client, vault_name):
group_name = _get_resource_group_from_vault_name(cli_ctx, vault_name)
if not group_name:
raise CLIError("unable to find vault '{}' in current subscription.".format(vault_name))
vault = client.get(group_name, vault_name)
return vault.id
def get_vm_format_secret(cmd, secrets, certificate_store=None, keyvault=None, resource_group_name=None):
from azure.keyvault import KeyVaultId
import re
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT).vaults
grouped_secrets = {}
merged_secrets = []
for s in secrets:
merged_secrets += s.splitlines()
# group secrets by source vault
for secret in merged_secrets:
parsed = KeyVaultId.parse_secret_id(secret)
match = re.search('://(.+?)\\.', parsed.vault)
vault_name = match.group(1)
if vault_name not in grouped_secrets:
grouped_secrets[vault_name] = {
'vaultCertificates': [],
'id': keyvault or _get_vault_id_from_name(cmd.cli_ctx, client, vault_name)
}
vault_cert = {'certificateUrl': secret}
if certificate_store:
vault_cert['certificateStore'] = certificate_store
grouped_secrets[vault_name]['vaultCertificates'].append(vault_cert)
# transform the reduced map to vm format
formatted = [{'sourceVault': {'id': value['id']},
'vaultCertificates': value['vaultCertificates']}
for _, value in list(grouped_secrets.items())]
return formatted
def add_vm_secret(cmd, resource_group_name, vm_name, keyvault, certificate, certificate_store=None):
from msrestazure.tools import parse_resource_id
from ._vm_utils import create_keyvault_data_plane_client, get_key_vault_base_url
VaultSecretGroup, SubResource, VaultCertificate = cmd.get_models(
'VaultSecretGroup', 'SubResource', 'VaultCertificate')
vm = get_vm(cmd, resource_group_name, vm_name)
if '://' not in certificate: # has a cert name rather a full url?
keyvault_client = create_keyvault_data_plane_client(cmd.cli_ctx)
cert_info = keyvault_client.get_certificate(
get_key_vault_base_url(cmd.cli_ctx, parse_resource_id(keyvault)['name']), certificate, '')
certificate = cert_info.sid
if not _is_linux_os(vm):
certificate_store = certificate_store or 'My'
elif certificate_store:
raise CLIError('Usage error: --certificate-store is only applicable on Windows VM')
vault_cert = VaultCertificate(certificate_url=certificate, certificate_store=certificate_store)
vault_secret_group = next((x for x in vm.os_profile.secrets
if x.source_vault and x.source_vault.id.lower() == keyvault.lower()), None)
if vault_secret_group:
vault_secret_group.vault_certificates.append(vault_cert)
else:
vault_secret_group = VaultSecretGroup(source_vault=SubResource(id=keyvault), vault_certificates=[vault_cert])
vm.os_profile.secrets.append(vault_secret_group)
vm = set_vm(cmd, vm)
return vm.os_profile.secrets
def list_vm_secrets(cmd, resource_group_name, vm_name):
vm = get_vm(cmd, resource_group_name, vm_name)
if vm.os_profile:
return vm.os_profile.secrets
return []
def remove_vm_secret(cmd, resource_group_name, vm_name, keyvault, certificate=None):
vm = get_vm(cmd, resource_group_name, vm_name)
# support 2 kinds of filter:
# a. if only keyvault is supplied, we delete its whole vault group.
# b. if both keyvault and certificate are supplied, we only delete the specific cert entry.
to_keep = vm.os_profile.secrets
keyvault_matched = []
if keyvault:
keyvault = keyvault.lower()
keyvault_matched = [x for x in to_keep if x.source_vault and x.source_vault.id.lower() == keyvault]
if keyvault and not certificate:
to_keep = [x for x in to_keep if x not in keyvault_matched]
elif certificate:
temp = keyvault_matched if keyvault else to_keep
cert_url_pattern = certificate.lower()
if '://' not in cert_url_pattern: # just a cert name?
cert_url_pattern = '/' + cert_url_pattern + '/'
for x in temp:
x.vault_certificates = ([v for v in x.vault_certificates
if not(v.certificate_url and cert_url_pattern in v.certificate_url.lower())])
to_keep = [x for x in to_keep if x.vault_certificates] # purge all groups w/o any cert entries
vm.os_profile.secrets = to_keep
vm = set_vm(cmd, vm)
return vm.os_profile.secrets
# endregion
# region VirtualMachines UnmanagedDisks
def attach_unmanaged_data_disk(cmd, resource_group_name, vm_name, new=False, vhd_uri=None, lun=None,
disk_name=None, size_gb=1023, caching=None):
DataDisk, DiskCreateOptionTypes, VirtualHardDisk = cmd.get_models(
'DataDisk', 'DiskCreateOptionTypes', 'VirtualHardDisk')
if not new and not disk_name:
raise CLIError('Please provide the name of the existing disk to attach')
create_option = DiskCreateOptionTypes.empty if new else DiskCreateOptionTypes.attach
vm = get_vm(cmd, resource_group_name, vm_name)
if disk_name is None:
import datetime
disk_name = vm_name + '-' + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
# pylint: disable=no-member
if vhd_uri is None:
if not hasattr(vm.storage_profile.os_disk, 'vhd') or not vm.storage_profile.os_disk.vhd:
raise CLIError('Adding unmanaged disks to a VM with managed disks is not supported')
blob_uri = vm.storage_profile.os_disk.vhd.uri
vhd_uri = blob_uri[0:blob_uri.rindex('/') + 1] + disk_name + '.vhd'
if lun is None:
lun = _get_disk_lun(vm.storage_profile.data_disks)
disk = DataDisk(lun=lun, vhd=VirtualHardDisk(uri=vhd_uri), name=disk_name,
create_option=create_option,
caching=caching, disk_size_gb=size_gb if new else None)
if vm.storage_profile.data_disks is None:
vm.storage_profile.data_disks = []
vm.storage_profile.data_disks.append(disk)
return set_vm(cmd, vm)
def list_unmanaged_disks(cmd, resource_group_name, vm_name):
vm = get_vm(cmd, resource_group_name, vm_name)
return vm.storage_profile.data_disks # pylint: disable=no-member
# endregion
# region VirtualMachines Users
def _update_linux_access_extension(cmd, vm_instance, resource_group_name, protected_settings,
no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
VirtualMachineExtension = cmd.get_models('VirtualMachineExtension')
# pylint: disable=no-member
instance_name = _get_extension_instance_name(vm_instance.instance_view,
extension_mappings[_LINUX_ACCESS_EXT]['publisher'],
_LINUX_ACCESS_EXT,
_ACCESS_EXT_HANDLER_NAME)
publisher, version, auto_upgrade = _get_access_extension_upgrade_info(
vm_instance.resources, _LINUX_ACCESS_EXT)
ext = VirtualMachineExtension(location=vm_instance.location, # pylint: disable=no-member
publisher=publisher,
virtual_machine_extension_type=_LINUX_ACCESS_EXT,
protected_settings=protected_settings,
type_handler_version=version,
settings={},
auto_upgrade_minor_version=auto_upgrade)
return sdk_no_wait(no_wait, client.virtual_machine_extensions.create_or_update,
resource_group_name, vm_instance.name, instance_name, ext)
def _set_linux_user(cmd, vm_instance, resource_group_name, username,
password=None, ssh_key_value=None, no_wait=False):
protected_settings = {}
protected_settings['username'] = username
if password:
protected_settings['password'] = password
elif not ssh_key_value and not password: # default to ssh
ssh_key_value = os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub')
if ssh_key_value:
protected_settings['ssh_key'] = read_content_if_is_file(ssh_key_value)
if no_wait:
return _update_linux_access_extension(cmd, vm_instance, resource_group_name,
protected_settings, no_wait)
poller = _update_linux_access_extension(cmd, vm_instance, resource_group_name,
protected_settings)
return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'setting user', 'done')(poller)
def _reset_windows_admin(cmd, vm_instance, resource_group_name, username, password, no_wait=False):
'''Update the password. You can only change the password. Adding a new user is not supported. '''
client = _compute_client_factory(cmd.cli_ctx)
VirtualMachineExtension = cmd.get_models('VirtualMachineExtension')
publisher, version, auto_upgrade = _get_access_extension_upgrade_info(
vm_instance.resources, _WINDOWS_ACCESS_EXT)
# pylint: disable=no-member
instance_name = _get_extension_instance_name(vm_instance.instance_view,
publisher,
_WINDOWS_ACCESS_EXT,
_ACCESS_EXT_HANDLER_NAME)
ext = VirtualMachineExtension(location=vm_instance.location, # pylint: disable=no-member
publisher=publisher,
virtual_machine_extension_type=_WINDOWS_ACCESS_EXT,
protected_settings={'Password': password},
type_handler_version=version,
settings={'UserName': username},
auto_upgrade_minor_version=auto_upgrade)
if no_wait:
return sdk_no_wait(no_wait, client.virtual_machine_extensions.create_or_update,
resource_group_name, vm_instance.name, instance_name, ext)
poller = client.virtual_machine_extensions.create_or_update(resource_group_name,
vm_instance.name,
instance_name, ext)
return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'resetting admin', 'done')(poller)
def set_user(cmd, resource_group_name, vm_name, username, password=None, ssh_key_value=None,
no_wait=False):
vm = get_vm(cmd, resource_group_name, vm_name, 'instanceView')
if _is_linux_os(vm):
return _set_linux_user(cmd, vm, resource_group_name, username, password, ssh_key_value, no_wait)
if ssh_key_value:
raise CLIError('SSH key is not appliable on a Windows VM')
return _reset_windows_admin(cmd, vm, resource_group_name, username, password, no_wait)
def delete_user(cmd, resource_group_name, vm_name, username, no_wait=False):
vm = get_vm(cmd, resource_group_name, vm_name, 'instanceView')
if not _is_linux_os(vm):
raise CLIError('Deleting a user is not supported on Windows VM')
if no_wait:
return _update_linux_access_extension(cmd, vm, resource_group_name,
{'remove_user': username}, no_wait)
poller = _update_linux_access_extension(cmd, vm, resource_group_name,
{'remove_user': username})
return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'deleting user', 'done')(poller)
def reset_linux_ssh(cmd, resource_group_name, vm_name, no_wait=False):
vm = get_vm(cmd, resource_group_name, vm_name, 'instanceView')
if not _is_linux_os(vm):
raise CLIError('Resetting SSH is not supported in Windows VM')
if no_wait:
return _update_linux_access_extension(cmd, vm, resource_group_name,
{'reset_ssh': True}, no_wait)
poller = _update_linux_access_extension(cmd, vm, resource_group_name,
{'reset_ssh': True})
return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'resetting SSH', 'done')(poller)
# endregion
# region VirtualMachineScaleSets
def assign_vmss_identity(cmd, resource_group_name, vmss_name, assign_identity=None, identity_role='Contributor',
identity_role_id=None, identity_scope=None):
VirtualMachineScaleSetIdentity, UpgradeMode, ResourceIdentityType, VirtualMachineScaleSetUpdate = cmd.get_models(
'VirtualMachineScaleSetIdentity', 'UpgradeMode', 'ResourceIdentityType', 'VirtualMachineScaleSetUpdate')
IdentityUserAssignedIdentitiesValue = cmd.get_models('VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue')
from azure.cli.core.commands.arm import assign_identity as assign_identity_helper
client = _compute_client_factory(cmd.cli_ctx)
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identity)
def getter():
return client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
def setter(vmss, external_identities=external_identities):
if vmss.identity and vmss.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif vmss.identity and vmss.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif vmss.identity and vmss.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
vmss.identity = VirtualMachineScaleSetIdentity(type=identity_types)
if external_identities:
vmss.identity.user_assigned_identities = {}
for identity in external_identities:
vmss.identity.user_assigned_identities[identity] = IdentityUserAssignedIdentitiesValue()
vmss_patch = VirtualMachineScaleSetUpdate()
vmss_patch.identity = vmss.identity
poller = client.virtual_machine_scale_sets.update(resource_group_name, vmss_name, vmss_patch)
return LongRunningOperation(cmd.cli_ctx)(poller)
assign_identity_helper(cmd.cli_ctx, getter, setter, identity_role=identity_role_id, identity_scope=identity_scope)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
if vmss.upgrade_policy.mode == UpgradeMode.manual:
logger.warning("With manual upgrade mode, you will need to run 'az vmss update-instances -g %s -n %s "
"--instance-ids *' to propagate the change", resource_group_name, vmss_name)
return _construct_identity_info(identity_scope, identity_role, vmss.identity.principal_id,
vmss.identity.user_assigned_identities)
# pylint: disable=too-many-locals, too-many-statements
def create_vmss(cmd, vmss_name, resource_group_name, image=None,
disable_overprovision=False, instance_count=2,
location=None, tags=None, upgrade_policy_mode='manual', validate=False,
admin_username=None, admin_password=None, authentication_type=None,
vm_sku=None, no_wait=False,
ssh_dest_key_path=None, ssh_key_value=None, generate_ssh_keys=False,
load_balancer=None, load_balancer_sku=None, application_gateway=None,
app_gateway_subnet_address_prefix=None,
app_gateway_sku='Standard_Large', app_gateway_capacity=10,
backend_pool_name=None, nat_pool_name=None, backend_port=None, health_probe=None,
public_ip_address=None, public_ip_address_allocation=None,
public_ip_address_dns_name=None, accelerated_networking=None,
public_ip_per_vm=False, vm_domain_name=None, dns_servers=None, nsg=None,
os_caching=None, data_caching=None,
storage_container_name='vhds', storage_sku=None,
os_type=None, os_disk_name=None,
use_unmanaged_disk=False, data_disk_sizes_gb=None, disk_info=None,
vnet_name=None, vnet_address_prefix='10.0.0.0/16',
subnet=None, subnet_address_prefix=None,
os_offer=None, os_publisher=None, os_sku=None, os_version=None,
load_balancer_type=None, app_gateway_type=None, vnet_type=None,
public_ip_address_type=None, storage_profile=None,
single_placement_group=None, custom_data=None, secrets=None, platform_fault_domain_count=None,
plan_name=None, plan_product=None, plan_publisher=None, plan_promotion_code=None, license_type=None,
assign_identity=None, identity_scope=None, identity_role='Contributor',
identity_role_id=None, zones=None, priority=None, eviction_policy=None,
application_security_groups=None, ultra_ssd_enabled=None, ephemeral_os_disk=None,
proximity_placement_group=None, aux_subscriptions=None, terminate_notification_time=None,
max_price=None, computer_name_prefix=None, orchestration_mode='ScaleSetVM', scale_in_policy=None,
os_disk_encryption_set=None, data_disk_encryption_sets=None, data_disk_iops=None, data_disk_mbps=None,
automatic_repairs_grace_period=None, specialized=None, os_disk_size_gb=None, encryption_at_host=None,
host_group=None):
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.util import random_string, hash_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.vm._template_builder import (StorageProfile, build_vmss_resource,
build_vnet_resource, build_public_ip_resource,
build_load_balancer_resource,
build_vmss_storage_account_pool_resource,
build_application_gateway_resource,
build_msi_role_assignment, build_nsg_resource)
# Build up the ARM template
master_template = ArmTemplateBuilder()
scale_set_vm_str = 'ScaleSetVM'
vm_str = 'VM'
if orchestration_mode.lower() == scale_set_vm_str.lower():
from msrestazure.tools import resource_id, is_valid_resource_id
storage_sku = disk_info['os'].get('storageAccountType')
subscription_id = get_subscription_id(cmd.cli_ctx)
if os_disk_encryption_set is not None and not is_valid_resource_id(os_disk_encryption_set):
os_disk_encryption_set = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=os_disk_encryption_set)
if data_disk_encryption_sets is None:
data_disk_encryption_sets = []
for i, des in enumerate(data_disk_encryption_sets):
if des is not None and not is_valid_resource_id(des):
data_disk_encryption_sets[i] = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=des)
network_id_template = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Network')
vmss_id = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachineScaleSets', name=vmss_name)
scrubbed_name = vmss_name.replace('-', '').lower()[:5]
naming_prefix = '{}{}'.format(scrubbed_name,
hash_string(vmss_id,
length=(9 - len(scrubbed_name)),
force_lower=True))
# determine final defaults and calculated values
tags = tags or {}
os_disk_name = os_disk_name or ('osdisk_{}'.format(hash_string(vmss_id, length=10))
if use_unmanaged_disk else None)
load_balancer = load_balancer or '{}LB'.format(vmss_name)
app_gateway = application_gateway or '{}AG'.format(vmss_name)
backend_pool_name = backend_pool_name or '{}BEPool'.format(load_balancer or application_gateway)
vmss_dependencies = []
# VNET will always be a dependency
if vnet_type == 'new':
vnet_name = vnet_name or '{}VNET'.format(vmss_name)
subnet = subnet or '{}Subnet'.format(vmss_name)
vmss_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name))
vnet = build_vnet_resource(
cmd, vnet_name, location, tags, vnet_address_prefix, subnet, subnet_address_prefix)
if app_gateway_type:
vnet['properties']['subnets'].append({
'name': 'appGwSubnet',
'properties': {
'addressPrefix': app_gateway_subnet_address_prefix
}
})
master_template.add_resource(vnet)
subnet_id = subnet if is_valid_resource_id(subnet) else \
'{}/virtualNetworks/{}/subnets/{}'.format(network_id_template, vnet_name, subnet)
gateway_subnet_id = ('{}/virtualNetworks/{}/subnets/appGwSubnet'.format(network_id_template, vnet_name)
if app_gateway_type == 'new' else None)
# public IP is used by either load balancer/application gateway
public_ip_address_id = None
if public_ip_address:
public_ip_address_id = (public_ip_address if is_valid_resource_id(public_ip_address)
else '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address))
def _get_public_ip_address_allocation(value, sku):
IPAllocationMethod = cmd.get_models('IPAllocationMethod', resource_type=ResourceType.MGMT_NETWORK)
if not value:
value = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
return value
# Handle load balancer creation
if load_balancer_type == 'new':
vmss_dependencies.append('Microsoft.Network/loadBalancers/{}'.format(load_balancer))
lb_dependencies = []
if vnet_type == 'new':
lb_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name))
if public_ip_address_type == 'new':
public_ip_address = public_ip_address or '{}PublicIP'.format(load_balancer)
lb_dependencies.append(
'Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
master_template.add_resource(build_public_ip_resource(
cmd, public_ip_address, location, tags,
_get_public_ip_address_allocation(public_ip_address_allocation, load_balancer_sku),
public_ip_address_dns_name, load_balancer_sku, zones))
public_ip_address_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
# calculate default names if not provided
nat_pool_name = nat_pool_name or '{}NatPool'.format(load_balancer)
if not backend_port:
backend_port = 3389 if os_type == 'windows' else 22
lb_resource = build_load_balancer_resource(
cmd, load_balancer, location, tags, backend_pool_name, nat_pool_name, backend_port,
'loadBalancerFrontEnd', public_ip_address_id, subnet_id, private_ip_address='',
private_ip_allocation='Dynamic', sku=load_balancer_sku, instance_count=instance_count,
disable_overprovision=disable_overprovision)
lb_resource['dependsOn'] = lb_dependencies
master_template.add_resource(lb_resource)
# Per https://docs.microsoft.com/azure/load-balancer/load-balancer-standard-overview#nsg
if load_balancer_sku and load_balancer_sku.lower() == 'standard' and nsg is None:
nsg_name = '{}NSG'.format(vmss_name)
master_template.add_resource(build_nsg_resource(
None, nsg_name, location, tags, 'rdp' if os_type.lower() == 'windows' else 'ssh'))
nsg = "[resourceId('Microsoft.Network/networkSecurityGroups', '{}')]".format(nsg_name)
vmss_dependencies.append('Microsoft.Network/networkSecurityGroups/{}'.format(nsg_name))
# Or handle application gateway creation
if app_gateway_type == 'new':
vmss_dependencies.append('Microsoft.Network/applicationGateways/{}'.format(app_gateway))
ag_dependencies = []
if vnet_type == 'new':
ag_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name))
if public_ip_address_type == 'new':
public_ip_address = public_ip_address or '{}PublicIP'.format(app_gateway)
ag_dependencies.append(
'Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
master_template.add_resource(build_public_ip_resource(
cmd, public_ip_address, location, tags,
_get_public_ip_address_allocation(public_ip_address_allocation, None), public_ip_address_dns_name,
None, zones))
public_ip_address_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
# calculate default names if not provided
backend_port = backend_port or 80
ag_resource = build_application_gateway_resource(
cmd, app_gateway, location, tags, backend_pool_name, backend_port, 'appGwFrontendIP',
public_ip_address_id, subnet_id, gateway_subnet_id, private_ip_address='',
private_ip_allocation='Dynamic', sku=app_gateway_sku, capacity=app_gateway_capacity)
ag_resource['dependsOn'] = ag_dependencies
master_template.add_variable(
'appGwID',
"[resourceId('Microsoft.Network/applicationGateways', '{}')]".format(app_gateway))
master_template.add_resource(ag_resource)
# create storage accounts if needed for unmanaged disk storage
if storage_profile == StorageProfile.SAPirImage:
master_template.add_resource(build_vmss_storage_account_pool_resource(
cmd, 'storageLoop', location, tags, storage_sku))
master_template.add_variable('storageAccountNames', [
'{}{}'.format(naming_prefix, x) for x in range(5)
])
master_template.add_variable('vhdContainers', [
"[concat('https://', variables('storageAccountNames')[{}], '.blob.{}/{}')]".format(
x, cmd.cli_ctx.cloud.suffixes.storage_endpoint, storage_container_name) for x in range(5)
])
vmss_dependencies.append('storageLoop')
backend_address_pool_id = None
inbound_nat_pool_id = None
if load_balancer_type or app_gateway_type:
network_balancer = load_balancer if load_balancer_type else app_gateway
balancer_type = 'loadBalancers' if load_balancer_type else 'applicationGateways'
if is_valid_resource_id(network_balancer):
# backend address pool needed by load balancer or app gateway
backend_address_pool_id = '{}/backendAddressPools/{}'.format(network_balancer, backend_pool_name)
if nat_pool_name:
inbound_nat_pool_id = '{}/inboundNatPools/{}'.format(network_balancer, nat_pool_name)
else:
# backend address pool needed by load balancer or app gateway
backend_address_pool_id = '{}/{}/{}/backendAddressPools/{}'.format(
network_id_template, balancer_type, network_balancer, backend_pool_name)
if nat_pool_name:
inbound_nat_pool_id = '{}/{}/{}/inboundNatPools/{}'.format(
network_id_template, balancer_type, network_balancer, nat_pool_name)
if health_probe and not is_valid_resource_id(health_probe):
health_probe = '{}/loadBalancers/{}/probes/{}'.format(network_id_template, load_balancer, health_probe)
ip_config_name = '{}IPConfig'.format(naming_prefix)
nic_name = '{}Nic'.format(naming_prefix)
if custom_data:
custom_data = read_content_if_is_file(custom_data)
if secrets:
secrets = _merge_secrets([validate_file_or_dict(secret) for secret in secrets])
if computer_name_prefix is not None and isinstance(computer_name_prefix, str):
naming_prefix = computer_name_prefix
if os_version and os_version != 'latest':
logger.warning('You are deploying VMSS pinned to a specific image version from Azure Marketplace. '
'Consider using "latest" as the image version.')
vmss_resource = build_vmss_resource(
cmd=cmd, name=vmss_name, naming_prefix=naming_prefix, location=location, tags=tags,
overprovision=not disable_overprovision, upgrade_policy_mode=upgrade_policy_mode, vm_sku=vm_sku,
instance_count=instance_count, ip_config_name=ip_config_name, nic_name=nic_name, subnet_id=subnet_id,
public_ip_per_vm=public_ip_per_vm, vm_domain_name=vm_domain_name, dns_servers=dns_servers, nsg=nsg,
accelerated_networking=accelerated_networking, admin_username=admin_username,
authentication_type=authentication_type, storage_profile=storage_profile, os_disk_name=os_disk_name,
disk_info=disk_info, os_type=os_type, image=image, admin_password=admin_password,
ssh_key_values=ssh_key_value, ssh_key_path=ssh_dest_key_path, os_publisher=os_publisher, os_offer=os_offer,
os_sku=os_sku, os_version=os_version, backend_address_pool_id=backend_address_pool_id,
inbound_nat_pool_id=inbound_nat_pool_id, health_probe=health_probe,
single_placement_group=single_placement_group, platform_fault_domain_count=platform_fault_domain_count,
custom_data=custom_data, secrets=secrets, license_type=license_type, zones=zones, priority=priority,
eviction_policy=eviction_policy, application_security_groups=application_security_groups,
ultra_ssd_enabled=ultra_ssd_enabled, proximity_placement_group=proximity_placement_group,
terminate_notification_time=terminate_notification_time, max_price=max_price,
scale_in_policy=scale_in_policy, os_disk_encryption_set=os_disk_encryption_set,
data_disk_encryption_sets=data_disk_encryption_sets, data_disk_iops=data_disk_iops,
data_disk_mbps=data_disk_mbps, automatic_repairs_grace_period=automatic_repairs_grace_period,
specialized=specialized, os_disk_size_gb=os_disk_size_gb, encryption_at_host=encryption_at_host,
host_group=host_group)
vmss_resource['dependsOn'] = vmss_dependencies
if plan_name:
vmss_resource['plan'] = {
'name': plan_name,
'publisher': plan_publisher,
'product': plan_product,
'promotionCode': plan_promotion_code
}
enable_local_identity = None
if assign_identity is not None:
vmss_resource['identity'], _, _, enable_local_identity = _build_identities_info(
assign_identity)
if identity_scope:
role_assignment_guid = str(_gen_guid())
master_template.add_resource(build_msi_role_assignment(vmss_name, vmss_id, identity_role_id,
role_assignment_guid, identity_scope, False))
elif orchestration_mode.lower() == vm_str.lower():
if platform_fault_domain_count is None:
raise CLIError("usage error: --platform-fault-domain-count is required in VM mode")
vmss_resource = {
'type': 'Microsoft.Compute/virtualMachineScaleSets',
'name': vmss_name,
'location': location,
'tags': tags,
'apiVersion': cmd.get_api_version(ResourceType.MGMT_COMPUTE, operation_group='virtual_machine_scale_sets'),
'properties': {
'singlePlacementGroup': single_placement_group,
'provisioningState': 0,
'platformFaultDomainCount': platform_fault_domain_count
}
}
if zones is not None:
vmss_resource['zones'] = zones
if proximity_placement_group is not None:
vmss_resource['properties']['proximityPlacementGroup'] = {
'id': proximity_placement_group
}
else:
raise CLIError('usage error: --orchestration-mode (ScaleSet | VM)')
master_template.add_resource(vmss_resource)
master_template.add_output('VMSS', vmss_name, 'Microsoft.Compute', 'virtualMachineScaleSets',
output_type='object')
if orchestration_mode.lower() == scale_set_vm_str.lower() and admin_password:
master_template.add_secure_parameter('adminPassword', admin_password)
template = master_template.build()
parameters = master_template.build_parameters()
# deploy ARM template
deployment_name = 'vmss_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
aux_subscriptions=aux_subscriptions).deployments
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
if validate:
from azure.cli.command_modules.vm._vm_utils import log_pprint_template
log_pprint_template(template)
log_pprint_template(parameters)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
validation_poller = client.validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
# creates the VMSS deployment
deployment_result = DeploymentOutputLongRunningOperation(cmd.cli_ctx)(
sdk_no_wait(no_wait, client.create_or_update, resource_group_name, deployment_name, deployment))
else:
if validate:
return client.validate(resource_group_name, deployment_name, properties)
# creates the VMSS deployment
deployment_result = DeploymentOutputLongRunningOperation(cmd.cli_ctx)(
sdk_no_wait(no_wait, client.create_or_update, resource_group_name, deployment_name, properties))
if orchestration_mode.lower() == scale_set_vm_str.lower() and assign_identity is not None:
vmss_info = get_vmss(cmd, resource_group_name, vmss_name)
if enable_local_identity and not identity_scope:
_show_missing_access_warning(resource_group_name, vmss_name, 'vmss')
deployment_result['vmss']['identity'] = _construct_identity_info(identity_scope, identity_role,
vmss_info.identity.principal_id,
vmss_info.identity.user_assigned_identities)
return deployment_result
def _build_identities_info(identities):
from ._vm_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def deallocate_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
if instance_ids and len(instance_ids) == 1:
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.deallocate,
resource_group_name, vm_scale_set_name, instance_ids[0])
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.deallocate,
resource_group_name, vm_scale_set_name, instance_ids=instance_ids)
def delete_vmss_instances(cmd, resource_group_name, vm_scale_set_name, instance_ids, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
if len(instance_ids) == 1:
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.delete,
resource_group_name, vm_scale_set_name, instance_ids[0])
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.delete_instances,
resource_group_name, vm_scale_set_name, instance_ids)
def get_vmss(cmd, resource_group_name, name, instance_id=None):
client = _compute_client_factory(cmd.cli_ctx)
if instance_id is not None:
return client.virtual_machine_scale_set_vms.get(resource_group_name, name, instance_id)
return client.virtual_machine_scale_sets.get(resource_group_name, name)
def get_vmss_instance_view(cmd, resource_group_name, vm_scale_set_name, instance_id=None):
client = _compute_client_factory(cmd.cli_ctx)
if instance_id:
if instance_id == '*':
return [x.instance_view for x in (client.virtual_machine_scale_set_vms.list(
resource_group_name, vm_scale_set_name, select='instanceView', expand='instanceView'))]
return client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name, vm_scale_set_name,
instance_id)
return client.virtual_machine_scale_sets.get_instance_view(resource_group_name, vm_scale_set_name)
def list_vmss(cmd, resource_group_name=None):
client = _compute_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.virtual_machine_scale_sets.list(resource_group_name)
return client.virtual_machine_scale_sets.list_all()
def list_vmss_instance_connection_info(cmd, resource_group_name, vm_scale_set_name):
from msrestazure.tools import parse_resource_id
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vm_scale_set_name)
# find the load balancer
nic_configs = vmss.virtual_machine_profile.network_profile.network_interface_configurations
primary_nic_config = next((n for n in nic_configs if n.primary), None)
if primary_nic_config is None:
raise CLIError('could not find a primary NIC which is needed to search to load balancer')
ip_configs = primary_nic_config.ip_configurations
ip_config = next((ip for ip in ip_configs if ip.load_balancer_inbound_nat_pools), None)
if not ip_config:
raise CLIError('No load balancer exists to retrieve public IP address')
res_id = ip_config.load_balancer_inbound_nat_pools[0].id
lb_info = parse_resource_id(res_id)
lb_name = lb_info['name']
lb_rg = lb_info['resource_group']
# get public ip
network_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK)
lb = network_client.load_balancers.get(lb_rg, lb_name)
if getattr(lb.frontend_ip_configurations[0], 'public_ip_address', None):
res_id = lb.frontend_ip_configurations[0].public_ip_address.id
public_ip_info = parse_resource_id(res_id)
public_ip_name = public_ip_info['name']
public_ip_rg = public_ip_info['resource_group']
public_ip = network_client.public_ip_addresses.get(public_ip_rg, public_ip_name)
public_ip_address = public_ip.ip_address
# loop around inboundnatrule
instance_addresses = {}
for rule in lb.inbound_nat_rules:
instance_id = parse_resource_id(rule.backend_ip_configuration.id)['child_name_1']
instance_addresses['instance ' + instance_id] = '{}:{}'.format(public_ip_address,
rule.frontend_port)
return instance_addresses
raise CLIError('The VM scale-set uses an internal load balancer, hence no connection information')
def list_vmss_instance_public_ips(cmd, resource_group_name, vm_scale_set_name):
result = cf_public_ip_addresses(cmd.cli_ctx).list_virtual_machine_scale_set_public_ip_addresses(
resource_group_name, vm_scale_set_name)
# filter away over-provisioned instances which are deleted after 'create/update' returns
return [r for r in result if r.ip_address]
def reimage_vmss(cmd, resource_group_name, vm_scale_set_name, instance_id=None, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
if instance_id:
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.reimage,
resource_group_name, vm_scale_set_name, instance_id)
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.reimage, resource_group_name, vm_scale_set_name)
def restart_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
if instance_ids and len(instance_ids) == 1:
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.restart,
resource_group_name, vm_scale_set_name, instance_ids[0])
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.restart, resource_group_name, vm_scale_set_name,
instance_ids=instance_ids)
# pylint: disable=inconsistent-return-statements
def scale_vmss(cmd, resource_group_name, vm_scale_set_name, new_capacity, no_wait=False):
VirtualMachineScaleSet = cmd.get_models('VirtualMachineScaleSet')
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vm_scale_set_name)
# pylint: disable=no-member
if vmss.sku.capacity == new_capacity:
return
vmss.sku.capacity = new_capacity
vmss_new = VirtualMachineScaleSet(location=vmss.location, sku=vmss.sku)
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.create_or_update,
resource_group_name, vm_scale_set_name, vmss_new)
def start_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
if instance_ids and len(instance_ids) == 1:
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.start,
resource_group_name, vm_scale_set_name, instance_ids[0])
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.start,
resource_group_name, vm_scale_set_name, instance_ids=instance_ids)
def stop_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False, skip_shutdown=False):
client = _compute_client_factory(cmd.cli_ctx)
if instance_ids and len(instance_ids) == 1:
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.power_off, resource_group_name,
vm_scale_set_name, instance_id=instance_ids[0], skip_shutdown=skip_shutdown)
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.power_off, resource_group_name, vm_scale_set_name,
instance_ids=instance_ids, skip_shutdown=skip_shutdown)
def update_vmss_instances(cmd, resource_group_name, vm_scale_set_name, instance_ids, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.update_instances,
resource_group_name, vm_scale_set_name, instance_ids)
def update_vmss(cmd, resource_group_name, name, license_type=None, no_wait=False, instance_id=None,
protect_from_scale_in=None, protect_from_scale_set_actions=None,
enable_terminate_notification=None, terminate_notification_time=None, ultra_ssd_enabled=None,
scale_in_policy=None, priority=None, max_price=None, proximity_placement_group=None,
enable_automatic_repairs=None, automatic_repairs_grace_period=None, **kwargs):
vmss = kwargs['parameters']
aux_subscriptions = None
# pylint: disable=too-many-boolean-expressions
if vmss and hasattr(vmss, 'virtual_machine_profile') and vmss.virtual_machine_profile and \
vmss.virtual_machine_profile.storage_profile and \
vmss.virtual_machine_profile.storage_profile.image_reference and \
vmss.virtual_machine_profile.storage_profile.image_reference.id:
aux_subscriptions = _parse_aux_subscriptions(vmss.virtual_machine_profile.storage_profile.image_reference.id)
client = _compute_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions)
VMProtectionPolicy = cmd.get_models('VirtualMachineScaleSetVMProtectionPolicy')
# handle vmss instance update
if instance_id is not None:
if license_type is not None:
vmss.license_type = license_type
if not vmss.protection_policy:
vmss.protection_policy = VMProtectionPolicy()
if protect_from_scale_in is not None:
vmss.protection_policy.protect_from_scale_in = protect_from_scale_in
if protect_from_scale_set_actions is not None:
vmss.protection_policy.protect_from_scale_set_actions = protect_from_scale_set_actions
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.update,
resource_group_name, name, instance_id, **kwargs)
# else handle vmss update
if license_type is not None:
vmss.virtual_machine_profile.license_type = license_type
if enable_terminate_notification is not None or terminate_notification_time is not None:
if vmss.virtual_machine_profile.scheduled_events_profile is None:
ScheduledEventsProfile = cmd.get_models('ScheduledEventsProfile')
vmss.virtual_machine_profile.scheduled_events_profile = ScheduledEventsProfile()
TerminateNotificationProfile = cmd.get_models('TerminateNotificationProfile')
vmss.virtual_machine_profile.scheduled_events_profile.terminate_notification_profile =\
TerminateNotificationProfile(not_before_timeout=terminate_notification_time,
enable=enable_terminate_notification)
if enable_automatic_repairs is not None or automatic_repairs_grace_period is not None:
AutomaticRepairsPolicy = cmd.get_models('AutomaticRepairsPolicy')
vmss.automatic_repairs_policy = \
AutomaticRepairsPolicy(enabled="true", grace_period=automatic_repairs_grace_period)
if ultra_ssd_enabled is not None:
if cmd.supported_api_version(min_api='2019-03-01', operation_group='virtual_machine_scale_sets'):
if vmss.additional_capabilities is None:
AdditionalCapabilities = cmd.get_models('AdditionalCapabilities')
vmss.additional_capabilities = AdditionalCapabilities(ultra_ssd_enabled=ultra_ssd_enabled)
else:
vmss.additional_capabilities.ultra_ssd_enabled = ultra_ssd_enabled
else:
if vmss.virtual_machine_profile.additional_capabilities is None:
AdditionalCapabilities = cmd.get_models('AdditionalCapabilities')
vmss.virtual_machine_profile.additional_capabilities = AdditionalCapabilities(
ultra_ssd_enabled=ultra_ssd_enabled)
else:
vmss.virtual_machine_profile.additional_capabilities.ultra_ssd_enabled = ultra_ssd_enabled
if scale_in_policy is not None:
ScaleInPolicy = cmd.get_models('ScaleInPolicy')
vmss.scale_in_policy = ScaleInPolicy(rules=scale_in_policy)
if priority is not None:
vmss.virtual_machine_profile.priority = priority
if max_price is not None:
if vmss.virtual_machine_profile.billing_profile is None:
BillingProfile = cmd.get_models('BillingProfile')
vmss.virtual_machine_profile.billing_profile = BillingProfile(max_price=max_price)
else:
vmss.virtual_machine_profile.billing_profile.max_price = max_price
if proximity_placement_group is not None:
vmss.proximity_placement_group = {'id': proximity_placement_group}
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.create_or_update,
resource_group_name, name, **kwargs)
# endregion
# region VirtualMachineScaleSets Diagnostics
def set_vmss_diagnostics_extension(
cmd, resource_group_name, vmss_name, settings, protected_settings=None, version=None,
no_auto_upgrade=False):
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
# pylint: disable=no-member
is_linux_os = _is_linux_os(vmss.virtual_machine_profile)
vm_extension_name = _LINUX_DIAG_EXT if is_linux_os else _WINDOWS_DIAG_EXT
if is_linux_os and vmss.virtual_machine_profile.extension_profile: # check incompatibles
exts = vmss.virtual_machine_profile.extension_profile.extensions or []
major_ver = extension_mappings[_LINUX_DIAG_EXT]['version'].split('.')[0]
# For VMSS, we don't do auto-removal like VM because there is no reliable API to wait for
# the removal done before we can install the newer one
if next((e for e in exts if e.name == _LINUX_DIAG_EXT and
not e.type_handler_version.startswith(major_ver + '.')), None):
delete_cmd = 'az vmss extension delete -g {} --vmss-name {} -n {}'.format(
resource_group_name, vmss_name, vm_extension_name)
raise CLIError("There is an incompatible version of diagnostics extension installed. "
"Please remove it by running '{}', and retry. 'az vmss update-instances'"
" might be needed if with manual upgrade policy".format(delete_cmd))
poller = set_vmss_extension(cmd, resource_group_name, vmss_name, vm_extension_name,
extension_mappings[vm_extension_name]['publisher'],
version or extension_mappings[vm_extension_name]['version'],
settings,
protected_settings,
no_auto_upgrade)
result = LongRunningOperation(cmd.cli_ctx)(poller)
UpgradeMode = cmd.get_models('UpgradeMode')
if vmss.upgrade_policy.mode == UpgradeMode.manual:
poller2 = update_vmss_instances(cmd, resource_group_name, vmss_name, ['*'])
LongRunningOperation(cmd.cli_ctx)(poller2)
return result
# endregion
# region VirtualMachineScaleSets Disks (Managed)
def attach_managed_data_disk_to_vmss(cmd, resource_group_name, vmss_name, size_gb=None, instance_id=None, lun=None,
caching=None, disk=None, sku=None):
def _init_data_disk(storage_profile, lun, existing_disk=None):
data_disks = storage_profile.data_disks or []
if lun is None:
lun = _get_disk_lun(data_disks)
if existing_disk is None:
data_disk = DataDisk(lun=lun, create_option=DiskCreateOptionTypes.empty, disk_size_gb=size_gb,
caching=caching, managed_disk=ManagedDiskParameters(storage_account_type=sku))
else:
data_disk = DataDisk(lun=lun, create_option=DiskCreateOptionTypes.attach, caching=caching,
managed_disk=ManagedDiskParameters(id=existing_disk, storage_account_type=sku))
data_disks.append(data_disk)
storage_profile.data_disks = data_disks
DiskCreateOptionTypes, ManagedDiskParameters = cmd.get_models(
'DiskCreateOptionTypes', 'ManagedDiskParameters')
if disk is None:
DataDisk = cmd.get_models('VirtualMachineScaleSetDataDisk')
else:
DataDisk = cmd.get_models('DataDisk')
client = _compute_client_factory(cmd.cli_ctx)
if instance_id is None:
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
# pylint: disable=no-member
_init_data_disk(vmss.virtual_machine_profile.storage_profile, lun)
return client.virtual_machine_scale_sets.create_or_update(resource_group_name, vmss_name, vmss)
vmss_vm = client.virtual_machine_scale_set_vms.get(resource_group_name, vmss_name, instance_id)
_init_data_disk(vmss_vm.storage_profile, lun, disk)
return client.virtual_machine_scale_set_vms.update(resource_group_name, vmss_name, instance_id, vmss_vm)
def detach_disk_from_vmss(cmd, resource_group_name, vmss_name, lun, instance_id=None):
client = _compute_client_factory(cmd.cli_ctx)
if instance_id is None:
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
# pylint: disable=no-member
data_disks = vmss.virtual_machine_profile.storage_profile.data_disks
else:
vmss_vm = client.virtual_machine_scale_set_vms.get(resource_group_name, vmss_name, instance_id)
data_disks = vmss_vm.storage_profile.data_disks
if not data_disks:
raise CLIError("Data disk doesn't exist")
leftovers = [d for d in data_disks if d.lun != lun]
if len(data_disks) == len(leftovers):
raise CLIError("Could not find the data disk with lun '{}'".format(lun))
if instance_id is None:
vmss.virtual_machine_profile.storage_profile.data_disks = leftovers
return client.virtual_machine_scale_sets.create_or_update(resource_group_name, vmss_name, vmss)
vmss_vm.storage_profile.data_disks = leftovers
return client.virtual_machine_scale_set_vms.update(resource_group_name, vmss_name, instance_id, vmss_vm)
# endregion
# region VirtualMachineScaleSets Extensions
def delete_vmss_extension(cmd, resource_group_name, vmss_name, extension_name):
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
# pylint: disable=no-member
if not vmss.virtual_machine_profile.extension_profile:
raise CLIError('Scale set has no extensions to delete')
keep_list = [e for e in vmss.virtual_machine_profile.extension_profile.extensions
if e.name != extension_name]
if len(keep_list) == len(vmss.virtual_machine_profile.extension_profile.extensions):
raise CLIError('Extension {} not found'.format(extension_name))
vmss.virtual_machine_profile.extension_profile.extensions = keep_list
return client.virtual_machine_scale_sets.create_or_update(resource_group_name, vmss_name, vmss)
# pylint: disable=inconsistent-return-statements
def get_vmss_extension(cmd, resource_group_name, vmss_name, extension_name):
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
# pylint: disable=no-member
if not vmss.virtual_machine_profile.extension_profile:
return
return next((e for e in vmss.virtual_machine_profile.extension_profile.extensions
if e.name == extension_name), None)
def list_vmss_extensions(cmd, resource_group_name, vmss_name):
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
# pylint: disable=no-member
if vmss.virtual_machine_profile and vmss.virtual_machine_profile.extension_profile:
return vmss.virtual_machine_profile.extension_profile.extensions
return None
def set_vmss_extension(cmd, resource_group_name, vmss_name, extension_name, publisher, version=None,
settings=None, protected_settings=None, no_auto_upgrade=False, force_update=False,
no_wait=False, extension_instance_name=None, provision_after_extensions=None):
if not extension_instance_name:
extension_instance_name = extension_name
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = cmd.get_models(
'VirtualMachineScaleSetExtension', 'VirtualMachineScaleSetExtensionProfile')
# pylint: disable=no-member
version = _normalize_extension_version(cmd.cli_ctx, publisher, extension_name, version, vmss.location)
extension_profile = vmss.virtual_machine_profile.extension_profile
if extension_profile:
extensions = extension_profile.extensions
if extensions:
extension_profile.extensions = [x for x in extensions if
x.type1.lower() != extension_name.lower() or x.publisher.lower() != publisher.lower()] # pylint: disable=line-too-long
ext = VirtualMachineScaleSetExtension(name=extension_instance_name,
publisher=publisher,
type1=extension_name,
protected_settings=protected_settings,
type_handler_version=version,
settings=settings,
auto_upgrade_minor_version=(not no_auto_upgrade),
provision_after_extensions=provision_after_extensions)
if force_update:
ext.force_update_tag = str(_gen_guid())
if not vmss.virtual_machine_profile.extension_profile:
vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile(extensions=[])
vmss.virtual_machine_profile.extension_profile.extensions.append(ext)
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.create_or_update,
resource_group_name, vmss_name, vmss)
def set_orchestration_service_state(cmd, resource_group_name, vm_scale_set_name, service_name, action, no_wait=False):
# currently service_name has only one available value "AutomaticRepairs". And SDK does not accept service_name,
# instead SDK assign it to "AutomaticRepairs" in its own logic. As there may be more service name to be supported,
# we define service_name as a required parameter here to avoid introducing a breaking change in the future.
client = _compute_client_factory(cmd.cli_ctx)
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.set_orchestration_service_state,
resource_group_name, vm_scale_set_name, action)
# endregion
# region VirtualMachineScaleSets RunCommand
def vmss_run_command_invoke(cmd, resource_group_name, vmss_name, command_id, instance_id, scripts=None, parameters=None): # pylint: disable=line-too-long
return run_command_invoke(cmd, resource_group_name, vmss_name, command_id, scripts, parameters, instance_id)
# endregion
# region VirtualMachineScaleSets Identity
def remove_vmss_identity(cmd, resource_group_name, vmss_name, identities=None):
client = _compute_client_factory(cmd.cli_ctx)
def _get_vmss(_, resource_group_name, vmss_name):
return client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
def _set_vmss(resource_group_name, name, vmss_instance):
VirtualMachineScaleSetUpdate = cmd.get_models('VirtualMachineScaleSetUpdate',
operation_group='virtual_machine_scale_sets')
vmss_update = VirtualMachineScaleSetUpdate(identity=vmss_instance.identity)
return client.virtual_machine_scale_sets.update(resource_group_name, vmss_name, vmss_update)
if identities is None:
from ._vm_utils import MSI_LOCAL_ID
identities = [MSI_LOCAL_ID]
return _remove_identities(cmd, resource_group_name, vmss_name, identities,
_get_vmss,
_set_vmss)
# endregion
# region image galleries
def list_image_galleries(cmd, resource_group_name=None):
client = _compute_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.galleries.list_by_resource_group(resource_group_name)
return client.galleries.list()
def create_image_gallery(cmd, resource_group_name, gallery_name, description=None,
location=None, no_wait=False, tags=None):
client = _compute_client_factory(cmd.cli_ctx)
Gallery = cmd.get_models('Gallery')
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
gallery = Gallery(description=description, location=location, tags=(tags or {}))
client = _compute_client_factory(cmd.cli_ctx)
return sdk_no_wait(no_wait, client.galleries.create_or_update, resource_group_name, gallery_name, gallery)
def create_gallery_image(cmd, resource_group_name, gallery_name, gallery_image_name, os_type, publisher, offer, sku,
os_state='Generalized', end_of_life_date=None, privacy_statement_uri=None,
release_note_uri=None, eula=None, description=None, location=None,
minimum_cpu_core=None, maximum_cpu_core=None, minimum_memory=None, maximum_memory=None,
disallowed_disk_types=None, plan_name=None, plan_publisher=None, plan_product=None, tags=None,
hyper_v_generation='V1'):
# pylint: disable=line-too-long
GalleryImage, GalleryImageIdentifier, RecommendedMachineConfiguration, ResourceRange, Disallowed, ImagePurchasePlan = cmd.get_models(
'GalleryImage', 'GalleryImageIdentifier', 'RecommendedMachineConfiguration', 'ResourceRange', 'Disallowed', 'ImagePurchasePlan')
client = _compute_client_factory(cmd.cli_ctx)
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
end_of_life_date = fix_gallery_image_date_info(end_of_life_date)
recommendation = None
if any([minimum_cpu_core, maximum_cpu_core, minimum_memory, maximum_memory]):
cpu_recommendation, memory_recommendation = None, None
if any([minimum_cpu_core, maximum_cpu_core]):
cpu_recommendation = ResourceRange(min=minimum_cpu_core, max=maximum_cpu_core)
if any([minimum_memory, maximum_memory]):
memory_recommendation = ResourceRange(min=minimum_memory, max=maximum_memory)
recommendation = RecommendedMachineConfiguration(v_cp_us=cpu_recommendation, memory=memory_recommendation)
purchase_plan = None
if any([plan_name, plan_publisher, plan_product]):
purchase_plan = ImagePurchasePlan(name=plan_name, publisher=plan_publisher, product=plan_product)
image = GalleryImage(identifier=GalleryImageIdentifier(publisher=publisher, offer=offer, sku=sku),
os_type=os_type, os_state=os_state, end_of_life_date=end_of_life_date,
recommended=recommendation, disallowed=Disallowed(disk_types=disallowed_disk_types),
purchase_plan=purchase_plan, location=location, eula=eula, tags=(tags or {}),
hyper_vgeneration=hyper_v_generation)
return client.gallery_images.create_or_update(resource_group_name, gallery_name, gallery_image_name, image)
def create_image_version(cmd, resource_group_name, gallery_name, gallery_image_name, gallery_image_version,
location=None, target_regions=None, storage_account_type=None,
end_of_life_date=None, exclude_from_latest=None, replica_count=None, tags=None,
os_snapshot=None, data_snapshots=None, managed_image=None, data_snapshot_luns=None,
target_region_encryption=None):
# print(target_regions)
from msrestazure.tools import resource_id, is_valid_resource_id
ImageVersionPublishingProfile, GalleryArtifactSource, ManagedArtifact, ImageVersion, TargetRegion = cmd.get_models(
'GalleryImageVersionPublishingProfile', 'GalleryArtifactSource', 'ManagedArtifact', 'GalleryImageVersion',
'TargetRegion')
aux_subscriptions = None
if managed_image:
aux_subscriptions = _parse_aux_subscriptions(managed_image)
client = _compute_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions)
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
end_of_life_date = fix_gallery_image_date_info(end_of_life_date)
if managed_image and not is_valid_resource_id(managed_image):
managed_image = resource_id(subscription=client.config.subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='images', name=managed_image)
if os_snapshot and not is_valid_resource_id(os_snapshot):
os_snapshot = resource_id(subscription=client.config.subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='snapshots', name=os_snapshot)
if data_snapshots:
for i, s in enumerate(data_snapshots):
if not is_valid_resource_id(data_snapshots[i]):
data_snapshots[i] = resource_id(
subscription=client.config.subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='snapshots', name=s)
source = GalleryArtifactSource(managed_image=ManagedArtifact(id=managed_image))
profile = ImageVersionPublishingProfile(exclude_from_latest=exclude_from_latest, end_of_life_date=end_of_life_date,
target_regions=target_regions or [TargetRegion(name=location)],
source=source, replica_count=replica_count,
storage_account_type=storage_account_type)
if cmd.supported_api_version(min_api='2019-07-01', operation_group='gallery_image_versions'):
if managed_image is None and os_snapshot is None:
raise CLIError('usage error: Please provide --managed-image or --os-snapshot')
GalleryImageVersionStorageProfile = cmd.get_models('GalleryImageVersionStorageProfile')
GalleryArtifactVersionSource = cmd.get_models('GalleryArtifactVersionSource')
GalleryOSDiskImage = cmd.get_models('GalleryOSDiskImage')
GalleryDataDiskImage = cmd.get_models('GalleryDataDiskImage')
source = os_disk_image = data_disk_images = None
if managed_image is not None:
source = GalleryArtifactVersionSource(id=managed_image)
if os_snapshot is not None:
os_disk_image = GalleryOSDiskImage(source=GalleryArtifactVersionSource(id=os_snapshot))
if data_snapshot_luns and not data_snapshots:
raise CLIError('usage error: --data-snapshot-luns must be used together with --data-snapshots')
if data_snapshots:
if data_snapshot_luns and len(data_snapshots) != len(data_snapshot_luns):
raise CLIError('usage error: Length of --data-snapshots and --data-snapshot-luns should be equal.')
if not data_snapshot_luns:
data_snapshot_luns = [i for i in range(len(data_snapshots))]
data_disk_images = []
for i, s in enumerate(data_snapshots):
data_disk_images.append(GalleryDataDiskImage(source=GalleryArtifactVersionSource(id=s),
lun=data_snapshot_luns[i]))
storage_profile = GalleryImageVersionStorageProfile(source=source, os_disk_image=os_disk_image,
data_disk_images=data_disk_images)
image_version = ImageVersion(publishing_profile=profile, location=location, tags=(tags or {}),
storage_profile=storage_profile)
else:
if managed_image is None:
raise CLIError('usage error: Please provide --managed-image')
image_version = ImageVersion(publishing_profile=profile, location=location, tags=(tags or {}))
return client.gallery_image_versions.create_or_update(resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image_version_name=gallery_image_version,
gallery_image_version=image_version)
def fix_gallery_image_date_info(date_info):
# here we add needed time, if only date is provided, so the setting can be accepted by servie end
if date_info and 't' not in date_info.lower():
date_info += 'T12:59:59Z'
return date_info
def update_image_version(cmd, resource_group_name, gallery_name, gallery_image_name, gallery_image_version_name,
target_regions=None, replica_count=None, no_wait=False, **kwargs):
image_version = kwargs['gallery_image_version']
if target_regions:
image_version.publishing_profile.target_regions = target_regions
if replica_count:
image_version.publishing_profile.replica_count = replica_count
if image_version.storage_profile.source is not None:
image_version.storage_profile.os_disk_image = image_version.storage_profile.data_disk_images = None
aux_subscriptions = None
if image_version.storage_profile and image_version.storage_profile.source and \
image_version.storage_profile.source.id:
aux_subscriptions = _parse_aux_subscriptions(image_version.storage_profile.source.id)
client = _compute_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions)
return sdk_no_wait(no_wait, client.gallery_image_versions.create_or_update, resource_group_name, gallery_name,
gallery_image_name, gallery_image_version_name, **kwargs)
# endregion
# region proximity placement groups
def create_proximity_placement_group(cmd, client, proximity_placement_group_name, resource_group_name,
ppg_type=None, location=None, tags=None):
from knack.arguments import CaseInsensitiveList
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
ProximityPlacementGroup, PPGType = cmd.get_models('ProximityPlacementGroup', 'ProximityPlacementGroupType')
choices = CaseInsensitiveList([x.value for x in PPGType])
if ppg_type and ppg_type not in choices:
logger.info("Valid choices: %s", str(choices))
raise CLIError("Usage error: invalid value for --type/-t")
ppg_params = ProximityPlacementGroup(name=proximity_placement_group_name, proximity_placement_group_type=ppg_type,
location=location, tags=(tags or {}))
return client.create_or_update(resource_group_name=resource_group_name,
proximity_placement_group_name=proximity_placement_group_name, parameters=ppg_params)
def list_proximity_placement_groups(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list_by_subscription()
# endregion
# region dedicated host
def create_dedicated_host_group(cmd, client, host_group_name, resource_group_name, platform_fault_domain_count=None,
automatic_placement=None, location=None, zones=None, tags=None):
DedicatedHostGroup = cmd.get_models('DedicatedHostGroup')
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
host_group_params = DedicatedHostGroup(location=location, platform_fault_domain_count=platform_fault_domain_count,
support_automatic_placement=automatic_placement, zones=zones, tags=tags)
return client.create_or_update(resource_group_name, host_group_name, parameters=host_group_params)
def list_dedicated_host_groups(cmd, client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
def get_dedicated_host_group_instance_view(client, host_group_name, resource_group_name):
return client.get(resource_group_name, host_group_name, expand="instanceView")
def create_dedicated_host(cmd, client, host_group_name, host_name, resource_group_name, sku, platform_fault_domain=None,
auto_replace_on_failure=None, license_type=None, location=None, tags=None):
DedicatedHostType = cmd.get_models('DedicatedHost')
SkuType = cmd.get_models('Sku')
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
sku = SkuType(name=sku)
host_params = DedicatedHostType(location=location, platform_fault_domain=platform_fault_domain,
auto_replace_on_failure=auto_replace_on_failure, license_type=license_type,
sku=sku, tags=tags)
return client.create_or_update(resource_group_name, host_group_name, host_name, parameters=host_params)
def get_dedicated_host_instance_view(client, host_group_name, host_name, resource_group_name):
return client.get(resource_group_name, host_group_name, host_name, expand="instanceView")
# endregion
# region VMMonitor
def _get_log_analytics_client(cmd):
from ._client_factory import cf_log_analytics
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cmd.cli_ctx)
return cf_log_analytics(cmd.cli_ctx, subscription_id)
def _prepare_workspace(cmd, resource_group_name, workspace):
from msrestazure.tools import is_valid_resource_id
from msrestazure.azure_exceptions import CloudError
workspace_id = None
if not is_valid_resource_id(workspace):
workspace_name = workspace
log_client = _get_log_analytics_client(cmd)
workspace_result = None
try:
workspace_result = log_client.workspaces.get(resource_group_name, workspace_name)
except CloudError:
from azure.mgmt.loganalytics.models import Workspace, WorkspaceSku, WorkspaceSkuNameEnum
sku = WorkspaceSku(name=WorkspaceSkuNameEnum.per_gb2018.value)
retention_time = 30 # default value
location = _get_resource_group_location(cmd.cli_ctx, resource_group_name)
workspace_instance = Workspace(location=location,
sku=sku,
retention_in_days=retention_time)
workspace_result = LongRunningOperation(cmd.cli_ctx)(log_client.workspaces.create_or_update(
resource_group_name,
workspace_name,
workspace_instance))
workspace_id = workspace_result.id
else:
workspace_id = workspace
return workspace_id
def _set_data_source_for_workspace(cmd, os_type, resource_group_name, workspace_name):
from ._client_factory import cf_log_analytics_data_sources
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.mgmt.loganalytics.models import DataSource
from msrestazure.azure_exceptions import CloudError
subscription_id = get_subscription_id(cmd.cli_ctx)
data_sources_client = cf_log_analytics_data_sources(cmd.cli_ctx, subscription_id)
data_source_name_template = "DataSource_{}_{}"
default_data_sources = None
if os_type.lower() == 'linux':
from ._workspace_data_source_settings import default_linux_data_sources
default_data_sources = default_linux_data_sources
elif os_type.lower() == 'windows':
from ._workspace_data_source_settings import default_windows_data_sources
default_data_sources = default_windows_data_sources
if default_data_sources is not None:
for data_source_kind, data_source_settings in default_data_sources.items():
for data_source_setting in data_source_settings:
data_source = DataSource(kind=data_source_kind,
properties=data_source_setting)
data_source_name = data_source_name_template.format(data_source_kind, _gen_guid())
try:
data_sources_client.create_or_update(resource_group_name,
workspace_name,
data_source_name,
data_source)
except CloudError as ex:
logger.warning("Failed to set data source due to %s. "
"Skip this step and need manual work later.", ex.message)
else:
logger.warning("Unsupported OS type. Skip the default settings for log analytics workspace.")
def execute_query_for_vm(cmd, client, resource_group_name, vm_name, analytics_query, timespan=None):
"""Executes a query against the Log Analytics workspace linked with a vm."""
from azure.loganalytics.models import QueryBody
vm = get_vm(cmd, resource_group_name, vm_name)
workspace = None
extension_resources = vm.resources or []
for resource in extension_resources:
if resource.name == "MicrosoftMonitoringAgent" or resource.name == "OmsAgentForLinux":
workspace = resource.settings.get('workspaceId', None)
if workspace is None:
raise CLIError('Cannot find the corresponding log analytics workspace. '
'Please check the status of log analytics workpsace.')
return client.query(workspace, QueryBody(query=analytics_query, timespan=timespan))
def _set_log_analytics_workspace_extension(cmd, resource_group_name, vm, vm_name, workspace_name):
is_linux_os = _is_linux_os(vm)
vm_extension_name = _LINUX_OMS_AGENT_EXT if is_linux_os else _WINDOWS_OMS_AGENT_EXT
log_client = _get_log_analytics_client(cmd)
customer_id = log_client.workspaces.get(resource_group_name, workspace_name).customer_id
settings = {
'workspaceId': customer_id,
'stopOnMultipleConnections': 'true'
}
primary_shared_key = log_client.shared_keys.get_shared_keys(resource_group_name, workspace_name).primary_shared_key
protected_settings = {
'workspaceKey': primary_shared_key,
}
return set_extension(cmd, resource_group_name, vm_name, vm_extension_name,
extension_mappings[vm_extension_name]['publisher'],
extension_mappings[vm_extension_name]['version'],
settings,
protected_settings)
# endregion
# disk encryption set
def create_disk_encryption_set(cmd, client, resource_group_name, disk_encryption_set_name,
key_url, source_vault, encryption_type=None, location=None, tags=None, no_wait=False):
from msrestazure.tools import resource_id, is_valid_resource_id
DiskEncryptionSet, EncryptionSetIdentity, KeyVaultAndKeyReference, SourceVault = cmd.get_models(
'DiskEncryptionSet', 'EncryptionSetIdentity', 'KeyVaultAndKeyReference', 'SourceVault')
encryption_set_identity = EncryptionSetIdentity(type='SystemAssigned')
if not is_valid_resource_id(source_vault):
source_vault = resource_id(subscription=client.config.subscription_id, resource_group=resource_group_name,
namespace='Microsoft.KeyVault', type='vaults', name=source_vault)
source_vault = SourceVault(id=source_vault)
keyVault_and_key_reference = KeyVaultAndKeyReference(source_vault=source_vault, key_url=key_url)
disk_encryption_set = DiskEncryptionSet(location=location, tags=tags, identity=encryption_set_identity,
active_key=keyVault_and_key_reference, encryption_type=encryption_type)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, disk_encryption_set_name,
disk_encryption_set)
def list_disk_encryption_sets(cmd, client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def update_disk_encryption_set(instance, client, resource_group_name, key_url=None, source_vault=None):
from msrestazure.tools import resource_id, is_valid_resource_id
if not is_valid_resource_id(source_vault):
source_vault = resource_id(subscription=client.config.subscription_id, resource_group=resource_group_name,
namespace='Microsoft.KeyVault', type='vaults', name=source_vault)
if key_url:
instance.active_key.key_url = key_url
if source_vault:
instance.active_key.source_vault.id = source_vault
return instance
# endregion
# region Disk Access
def create_disk_access(cmd, client, resource_group_name, disk_access_name, location=None, tags=None, no_wait=False):
return sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, disk_access_name,
location=location, tags=tags)
def list_disk_accesses(cmd, client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def set_disk_access(cmd, client, parameters, resource_group_name, disk_access_name, tags=None, no_wait=False):
location = _get_resource_group_location(cmd.cli_ctx, resource_group_name)
return sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, disk_access_name,
location=location, tags=tags)
# endregion
| 51.381787 | 163 | 0.695868 |
from __future__ import print_function
import json
import os
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from six.moves.urllib.request import urlopen
from knack.log import get_logger
from knack.util import CLIError
from azure.cli.command_modules.vm._validators import _get_resource_group_from_vault_name
from azure.cli.core.commands.validators import validate_file_or_dict
from azure.cli.core.commands import LongRunningOperation, DeploymentOutputLongRunningOperation
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_data_service_client
from azure.cli.core.profiles import ResourceType
from azure.cli.core.util import sdk_no_wait
from ._vm_utils import read_content_if_is_file
from ._vm_diagnostics_templates import get_default_diag_config
from ._actions import (load_images_from_aliases_doc, load_extension_images_thru_services,
load_images_thru_services, _get_latest_image_version)
from ._client_factory import (_compute_client_factory, cf_public_ip_addresses, cf_vm_image_term,
_dev_test_labs_client_factory)
logger = get_logger(__name__)
_ACCESS_EXT_HANDLER_NAME = 'enablevmaccess'
_LINUX_ACCESS_EXT = 'VMAccessForLinux'
_WINDOWS_ACCESS_EXT = 'VMAccessAgent'
_LINUX_DIAG_EXT = 'LinuxDiagnostic'
_WINDOWS_DIAG_EXT = 'IaaSDiagnostics'
_LINUX_OMS_AGENT_EXT = 'OmsAgentForLinux'
_WINDOWS_OMS_AGENT_EXT = 'MicrosoftMonitoringAgent'
extension_mappings = {
_LINUX_ACCESS_EXT: {
'version': '1.5',
'publisher': 'Microsoft.OSTCExtensions'
},
_WINDOWS_ACCESS_EXT: {
'version': '2.4',
'publisher': 'Microsoft.Compute'
},
_LINUX_DIAG_EXT: {
'version': '3.0',
'publisher': 'Microsoft.Azure.Diagnostics'
},
_WINDOWS_DIAG_EXT: {
'version': '1.5',
'publisher': 'Microsoft.Azure.Diagnostics'
},
_LINUX_OMS_AGENT_EXT: {
'version': '1.0',
'publisher': 'Microsoft.EnterpriseCloud.Monitoring'
},
_WINDOWS_OMS_AGENT_EXT: {
'version': '1.0',
'publisher': 'Microsoft.EnterpriseCloud.Monitoring'
}
}
def _construct_identity_info(identity_scope, identity_role, implicit_identity, external_identities):
info = {}
if identity_scope:
info['scope'] = identity_scope
info['role'] = str(identity_role) # could be DefaultStr, so convert to string
info['userAssignedIdentities'] = external_identities or {}
info['systemAssignedIdentity'] = implicit_identity or ''
return info
# for injecting test seams to produce predicatable role assignment id for playback
def _gen_guid():
import uuid
return uuid.uuid4()
def _get_access_extension_upgrade_info(extensions, name):
version = extension_mappings[name]['version']
publisher = extension_mappings[name]['publisher']
auto_upgrade = None
if extensions:
extension = next((e for e in extensions if e.name == name), None)
from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error
if extension and LooseVersion(extension.type_handler_version) < LooseVersion(version):
auto_upgrade = True
elif extension and LooseVersion(extension.type_handler_version) > LooseVersion(version):
version = extension.type_handler_version
return publisher, version, auto_upgrade
def _get_extension_instance_name(instance_view, publisher, extension_type_name,
suggested_name=None):
extension_instance_name = suggested_name or extension_type_name
full_type_name = '.'.join([publisher, extension_type_name])
if instance_view.extensions:
ext = next((x for x in instance_view.extensions
if x.type and (x.type.lower() == full_type_name.lower())), None)
if ext:
extension_instance_name = ext.name
return extension_instance_name
def _get_storage_management_client(cli_ctx):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_STORAGE)
def _get_disk_lun(data_disks):
# start from 0, search for unused int for lun
if not data_disks:
return 0
existing_luns = sorted([d.lun for d in data_disks])
for i, current in enumerate(existing_luns):
if current != i:
return i
return len(existing_luns)
def _get_private_config(cli_ctx, resource_group_name, storage_account):
storage_mgmt_client = _get_storage_management_client(cli_ctx)
# pylint: disable=no-member
keys = storage_mgmt_client.storage_accounts.list_keys(resource_group_name, storage_account).keys
private_config = {
'storageAccountName': storage_account,
'storageAccountKey': keys[0].value
}
return private_config
def _get_resource_group_location(cli_ctx, resource_group_name):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
# pylint: disable=no-member
return client.resource_groups.get(resource_group_name).location
def _get_sku_object(cmd, sku):
if cmd.supported_api_version(min_api='2017-03-30'):
DiskSku = cmd.get_models('DiskSku')
return DiskSku(name=sku)
return sku
def _grant_access(cmd, resource_group_name, name, duration_in_seconds, is_disk, access_level):
AccessLevel = cmd.get_models('AccessLevel')
client = _compute_client_factory(cmd.cli_ctx)
op = client.disks if is_disk else client.snapshots
return op.grant_access(resource_group_name, name, access_level or AccessLevel.read, duration_in_seconds)
def _is_linux_os(vm):
os_type = vm.storage_profile.os_disk.os_type.value if vm.storage_profile.os_disk.os_type else None
if os_type:
return os_type.lower() == 'linux'
# the os_type could be None for VM scaleset, let us check out os configurations
if vm.os_profile.linux_configuration:
return bool(vm.os_profile.linux_configuration)
return False
def _merge_secrets(secrets):
merged = {}
vc_name = 'vaultCertificates'
for outer in secrets:
for secret in outer:
if secret['sourceVault']['id'] not in merged:
merged[secret['sourceVault']['id']] = []
merged[secret['sourceVault']['id']] = \
secret[vc_name] + merged[secret['sourceVault']['id']]
# transform the reduced map to vm format
formatted = [{'sourceVault': {'id': source_id},
'vaultCertificates': value}
for source_id, value in list(merged.items())]
return formatted
def _normalize_extension_version(cli_ctx, publisher, vm_extension_name, version, location):
def _trim_away_build_number(version):
# workaround a known issue: the version must only contain "major.minor", even though
# "extension image list" gives more detail
return '.'.join(version.split('.')[0:2])
if not version:
result = load_extension_images_thru_services(cli_ctx, publisher, vm_extension_name, None, location,
show_latest=True, partial_match=False)
if not result:
raise CLIError('Failed to find the latest version for the extension "{}"'.format(vm_extension_name))
# with 'show_latest' enabled, we will only get one result.
version = result[0]['version']
version = _trim_away_build_number(version)
return version
def _parse_rg_name(strid):
from msrestazure.tools import parse_resource_id
parts = parse_resource_id(strid)
return (parts['resource_group'], parts['name'])
def _set_sku(cmd, instance, sku):
if cmd.supported_api_version(min_api='2017-03-30'):
instance.sku = cmd.get_models('DiskSku')(name=sku)
else:
instance.account_type = sku
def _show_missing_access_warning(resource_group, name, command):
warn = ("No access was given yet to the '{1}', because '--scope' was not provided. "
"You should setup by creating a role assignment, e.g. "
"'az role assignment create --assignee <principal-id> --role contributor -g {0}' "
"would let it access the current resource group. To get the pricipal id, run "
"'az {2} show -g {0} -n {1} --query \"identity.principalId\" -otsv'".format(resource_group, name, command))
logger.warning(warn)
def _parse_aux_subscriptions(resource_id):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
if is_valid_resource_id(resource_id):
res = parse_resource_id(resource_id)
return [res['subscription']]
return None
# Hide extension information from output as the info is not correct and unhelpful; also
# commands using it mean to hide the extension concept from users.
class ExtensionUpdateLongRunningOperation(LongRunningOperation): # pylint: disable=too-few-public-methods
pass
# region Disks (Managed)
def create_managed_disk(cmd, resource_group_name, disk_name, location=None, # pylint: disable=too-many-locals, too-many-branches, too-many-statements
size_gb=None, sku='Premium_LRS', os_type=None,
source=None, for_upload=None, upload_size_bytes=None, # pylint: disable=unused-argument
# below are generated internally from 'source'
source_blob_uri=None, source_disk=None, source_snapshot=None,
source_storage_account_id=None, no_wait=False, tags=None, zone=None,
disk_iops_read_write=None, disk_mbps_read_write=None, hyper_v_generation=None,
encryption_type=None, disk_encryption_set=None, max_shares=None,
disk_iops_read_only=None, disk_mbps_read_only=None,
image_reference=None, image_reference_lun=None,
gallery_image_reference=None, gallery_image_reference_lun=None,
network_access_policy=None, disk_access=None):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
Disk, CreationData, DiskCreateOption, Encryption = cmd.get_models(
'Disk', 'CreationData', 'DiskCreateOption', 'Encryption')
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
if source_blob_uri:
option = DiskCreateOption.import_enum
elif source_disk or source_snapshot:
option = DiskCreateOption.copy
elif for_upload:
option = DiskCreateOption.upload
elif image_reference or gallery_image_reference:
option = DiskCreateOption.from_image
else:
option = DiskCreateOption.empty
if source_storage_account_id is None and source_blob_uri is not None:
subscription_id = get_subscription_id(cmd.cli_ctx)
storage_account_name = source_blob_uri.split('.')[0].split('/')[-1]
source_storage_account_id = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts', name=storage_account_name)
if upload_size_bytes is not None and for_upload is not True:
raise CLIError('usage error: --upload-size-bytes should be used together with --for-upload')
if image_reference is not None:
if not is_valid_resource_id(image_reference):
# URN or name
terms = image_reference.split(':')
if len(terms) == 4: # URN
disk_publisher, disk_offer, disk_sku, disk_version = terms[0], terms[1], terms[2], terms[3]
if disk_version.lower() == 'latest':
disk_version = _get_latest_image_version(cmd.cli_ctx, location, disk_publisher, disk_offer,
disk_sku)
client = _compute_client_factory(cmd.cli_ctx)
response = client.virtual_machine_images.get(location, disk_publisher, disk_offer, disk_sku,
disk_version)
image_reference = response.id
else: # error
raise CLIError('usage error: --image-reference should be ID or URN (publisher:offer:sku:version).')
# image_reference is an ID now
image_reference = {'id': image_reference}
if image_reference_lun is not None:
image_reference['lun'] = image_reference_lun
if gallery_image_reference is not None:
gallery_image_reference = {'id': gallery_image_reference}
if gallery_image_reference_lun is not None:
gallery_image_reference['lun'] = gallery_image_reference_lun
creation_data = CreationData(create_option=option, source_uri=source_blob_uri,
image_reference=image_reference, gallery_image_reference=gallery_image_reference,
source_resource_id=source_disk or source_snapshot,
storage_account_id=source_storage_account_id,
upload_size_bytes=upload_size_bytes)
if size_gb is None and upload_size_bytes is None and (option == DiskCreateOption.empty or for_upload):
raise CLIError('usage error: --size-gb or --upload-size-bytes required to create an empty disk')
if disk_encryption_set is not None and not is_valid_resource_id(disk_encryption_set):
disk_encryption_set = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set)
if disk_access is not None and not is_valid_resource_id(disk_access):
disk_access = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskAccesses', name=disk_access)
encryption = None
if disk_encryption_set:
encryption = Encryption(type=encryption_type, disk_encryption_set_id=disk_encryption_set)
disk = Disk(location=location, creation_data=creation_data, tags=(tags or {}),
sku=_get_sku_object(cmd, sku), disk_size_gb=size_gb, os_type=os_type, encryption=encryption)
if hyper_v_generation:
disk.hyper_vgeneration = hyper_v_generation
if zone:
disk.zones = zone
if disk_iops_read_write is not None:
disk.disk_iops_read_write = disk_iops_read_write
if disk_mbps_read_write is not None:
disk.disk_mbps_read_write = disk_mbps_read_write
if max_shares is not None:
disk.max_shares = max_shares
if disk_iops_read_only is not None:
disk.disk_iops_read_only = disk_iops_read_only
if disk_mbps_read_only is not None:
disk.disk_mbps_read_only = disk_mbps_read_only
if network_access_policy is not None:
disk.network_access_policy = network_access_policy
if disk_access is not None:
disk.disk_access_id = disk_access
client = _compute_client_factory(cmd.cli_ctx)
return sdk_no_wait(no_wait, client.disks.create_or_update, resource_group_name, disk_name, disk)
def grant_disk_access(cmd, resource_group_name, disk_name, duration_in_seconds, access_level=None):
return _grant_access(cmd, resource_group_name, disk_name, duration_in_seconds, is_disk=True,
access_level=access_level)
def list_managed_disks(cmd, resource_group_name=None):
client = _compute_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.disks.list_by_resource_group(resource_group_name)
return client.disks.list()
def update_managed_disk(cmd, resource_group_name, instance, size_gb=None, sku=None, disk_iops_read_write=None,
disk_mbps_read_write=None, encryption_type=None, disk_encryption_set=None,
network_access_policy=None, disk_access=None):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
if size_gb is not None:
instance.disk_size_gb = size_gb
if sku is not None:
_set_sku(cmd, instance, sku)
if disk_iops_read_write is not None:
instance.disk_iops_read_write = disk_iops_read_write
if disk_mbps_read_write is not None:
instance.disk_mbps_read_write = disk_mbps_read_write
if disk_encryption_set is not None:
if instance.encryption.type != 'EncryptionAtRestWithCustomerKey' and \
encryption_type != 'EncryptionAtRestWithCustomerKey':
raise CLIError('usage error: Please set --encryption-type to EncryptionAtRestWithCustomerKey')
if not is_valid_resource_id(disk_encryption_set):
disk_encryption_set = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set)
instance.encryption.disk_encryption_set_id = disk_encryption_set
if encryption_type is not None:
instance.encryption.type = encryption_type
if network_access_policy is not None:
instance.network_access_policy = network_access_policy
if disk_access is not None and not is_valid_resource_id(disk_access):
disk_access = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskAccesses', name=disk_access)
instance.disk_access_id = disk_access
return instance
# endregion
# region Images (Managed)
def create_image(cmd, resource_group_name, name, source, os_type=None, data_disk_sources=None, location=None, # pylint: disable=too-many-locals,unused-argument
# below are generated internally from 'source' and 'data_disk_sources'
source_virtual_machine=None, storage_sku=None, hyper_v_generation=None,
os_blob_uri=None, data_blob_uris=None,
os_snapshot=None, data_snapshots=None,
os_disk=None, os_disk_caching=None, data_disks=None, data_disk_caching=None,
tags=None, zone_resilient=None):
ImageOSDisk, ImageDataDisk, ImageStorageProfile, Image, SubResource, OperatingSystemStateTypes = cmd.get_models(
'ImageOSDisk', 'ImageDataDisk', 'ImageStorageProfile', 'Image', 'SubResource', 'OperatingSystemStateTypes')
if source_virtual_machine:
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
image_storage_profile = None if zone_resilient is None else ImageStorageProfile(zone_resilient=zone_resilient)
image = Image(location=location, source_virtual_machine=SubResource(id=source_virtual_machine),
storage_profile=image_storage_profile, tags=(tags or {}))
else:
os_disk = ImageOSDisk(os_type=os_type,
os_state=OperatingSystemStateTypes.generalized,
caching=os_disk_caching,
snapshot=SubResource(id=os_snapshot) if os_snapshot else None,
managed_disk=SubResource(id=os_disk) if os_disk else None,
blob_uri=os_blob_uri,
storage_account_type=storage_sku)
all_data_disks = []
lun = 0
if data_blob_uris:
for d in data_blob_uris:
all_data_disks.append(ImageDataDisk(lun=lun, blob_uri=d, caching=data_disk_caching))
lun += 1
if data_snapshots:
for d in data_snapshots:
all_data_disks.append(ImageDataDisk(lun=lun, snapshot=SubResource(id=d), caching=data_disk_caching))
lun += 1
if data_disks:
for d in data_disks:
all_data_disks.append(ImageDataDisk(lun=lun, managed_disk=SubResource(id=d), caching=data_disk_caching))
lun += 1
image_storage_profile = ImageStorageProfile(os_disk=os_disk, data_disks=all_data_disks)
if zone_resilient is not None:
image_storage_profile.zone_resilient = zone_resilient
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
# pylint disable=no-member
image = Image(location=location, storage_profile=image_storage_profile, tags=(tags or {}))
if hyper_v_generation:
image.hyper_vgeneration = hyper_v_generation
client = _compute_client_factory(cmd.cli_ctx)
return client.images.create_or_update(resource_group_name, name, image)
def update_image(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
def list_images(cmd, resource_group_name=None):
client = _compute_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.images.list_by_resource_group(resource_group_name)
return client.images.list()
# endregion
# region Snapshots
# pylint: disable=unused-argument,too-many-locals
def create_snapshot(cmd, resource_group_name, snapshot_name, location=None, size_gb=None, sku='Standard_LRS',
source=None, for_upload=None, incremental=None,
# below are generated internally from 'source'
source_blob_uri=None, source_disk=None, source_snapshot=None, source_storage_account_id=None,
hyper_v_generation=None, tags=None, no_wait=False, disk_encryption_set=None,
encryption_type=None, network_access_policy=None, disk_access=None):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
Snapshot, CreationData, DiskCreateOption, Encryption = cmd.get_models(
'Snapshot', 'CreationData', 'DiskCreateOption', 'Encryption')
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
if source_blob_uri:
option = DiskCreateOption.import_enum
elif source_disk or source_snapshot:
option = DiskCreateOption.copy
elif for_upload:
option = DiskCreateOption.upload
else:
option = DiskCreateOption.empty
creation_data = CreationData(create_option=option, source_uri=source_blob_uri,
image_reference=None,
source_resource_id=source_disk or source_snapshot,
storage_account_id=source_storage_account_id)
if size_gb is None and option == DiskCreateOption.empty:
raise CLIError('Please supply size for the snapshots')
if disk_encryption_set is not None and not is_valid_resource_id(disk_encryption_set):
disk_encryption_set = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set)
if disk_access is not None and not is_valid_resource_id(disk_access):
disk_access = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskAccesses', name=disk_access)
if disk_encryption_set is not None and encryption_type is None:
raise CLIError('usage error: Please specify --encryption-type.')
if encryption_type is not None:
encryption = Encryption(type=encryption_type, disk_encryption_set_id=disk_encryption_set)
else:
encryption = None
snapshot = Snapshot(location=location, creation_data=creation_data, tags=(tags or {}),
sku=_get_sku_object(cmd, sku), disk_size_gb=size_gb, incremental=incremental,
encryption=encryption)
if hyper_v_generation:
snapshot.hyper_vgeneration = hyper_v_generation
if network_access_policy is not None:
snapshot.network_access_policy = network_access_policy
if disk_access is not None:
snapshot.disk_access_id = disk_access
client = _compute_client_factory(cmd.cli_ctx)
return sdk_no_wait(no_wait, client.snapshots.create_or_update, resource_group_name, snapshot_name, snapshot)
def grant_snapshot_access(cmd, resource_group_name, snapshot_name, duration_in_seconds, access_level=None):
return _grant_access(cmd, resource_group_name, snapshot_name, duration_in_seconds, is_disk=False,
access_level=access_level)
def list_snapshots(cmd, resource_group_name=None):
client = _compute_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.snapshots.list_by_resource_group(resource_group_name)
return client.snapshots.list()
def update_snapshot(cmd, resource_group_name, instance, sku=None, disk_encryption_set=None,
encryption_type=None, network_access_policy=None, disk_access=None):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
if sku is not None:
_set_sku(cmd, instance, sku)
if disk_encryption_set is not None:
if instance.encryption.type != 'EncryptionAtRestWithCustomerKey' and \
encryption_type != 'EncryptionAtRestWithCustomerKey':
raise CLIError('usage error: Please set --encryption-type to EncryptionAtRestWithCustomerKey')
if not is_valid_resource_id(disk_encryption_set):
disk_encryption_set = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set)
instance.encryption.disk_encryption_set_id = disk_encryption_set
if encryption_type is not None:
instance.encryption.type = encryption_type
if network_access_policy is not None:
instance.network_access_policy = network_access_policy
if disk_access is not None and not is_valid_resource_id(disk_access):
disk_access = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskAccesses', name=disk_access)
instance.disk_access_id = disk_access
return instance
# endregion
# region VirtualMachines Identity
def show_vm_identity(cmd, resource_group_name, vm_name):
client = _compute_client_factory(cmd.cli_ctx)
return client.virtual_machines.get(resource_group_name, vm_name).identity
def show_vmss_identity(cmd, resource_group_name, vm_name):
client = _compute_client_factory(cmd.cli_ctx)
return client.virtual_machine_scale_sets.get(resource_group_name, vm_name).identity
def assign_vm_identity(cmd, resource_group_name, vm_name, assign_identity=None, identity_role='Contributor',
identity_role_id=None, identity_scope=None):
VirtualMachineIdentity, ResourceIdentityType, VirtualMachineUpdate = cmd.get_models('VirtualMachineIdentity',
'ResourceIdentityType',
'VirtualMachineUpdate')
VirtualMachineIdentityUserAssignedIdentitiesValue = cmd.get_models(
'VirtualMachineIdentityUserAssignedIdentitiesValue')
from azure.cli.core.commands.arm import assign_identity as assign_identity_helper
client = _compute_client_factory(cmd.cli_ctx)
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identity)
def getter():
return client.virtual_machines.get(resource_group_name, vm_name)
def setter(vm, external_identities=external_identities):
if vm.identity and vm.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif vm.identity and vm.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif vm.identity and vm.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
vm.identity = VirtualMachineIdentity(type=identity_types)
if external_identities:
vm.identity.user_assigned_identities = {}
for identity in external_identities:
vm.identity.user_assigned_identities[identity] = VirtualMachineIdentityUserAssignedIdentitiesValue()
vm_patch = VirtualMachineUpdate()
vm_patch.identity = vm.identity
return patch_vm(cmd, resource_group_name, vm_name, vm_patch)
assign_identity_helper(cmd.cli_ctx, getter, setter, identity_role=identity_role_id, identity_scope=identity_scope)
vm = client.virtual_machines.get(resource_group_name, vm_name)
return _construct_identity_info(identity_scope, identity_role, vm.identity.principal_id,
vm.identity.user_assigned_identities)
# endregion
# region VirtualMachines
def capture_vm(cmd, resource_group_name, vm_name, vhd_name_prefix,
storage_container='vhds', overwrite=True):
VirtualMachineCaptureParameters = cmd.get_models('VirtualMachineCaptureParameters')
client = _compute_client_factory(cmd.cli_ctx)
parameter = VirtualMachineCaptureParameters(vhd_prefix=vhd_name_prefix,
destination_container_name=storage_container,
overwrite_vhds=overwrite)
poller = client.virtual_machines.capture(resource_group_name, vm_name, parameter)
result = LongRunningOperation(cmd.cli_ctx)(poller)
output = getattr(result, 'output', None) or result.resources[0]
print(json.dumps(output, indent=2)) # pylint: disable=no-member
# pylint: disable=too-many-locals, unused-argument, too-many-statements, too-many-branches
def create_vm(cmd, vm_name, resource_group_name, image=None, size='Standard_DS1_v2', location=None, tags=None,
no_wait=False, authentication_type=None, admin_password=None, computer_name=None,
admin_username=None, ssh_dest_key_path=None, ssh_key_value=None, generate_ssh_keys=False,
availability_set=None, nics=None, nsg=None, nsg_rule=None, accelerated_networking=None,
private_ip_address=None, public_ip_address=None, public_ip_address_allocation='dynamic',
public_ip_address_dns_name=None, public_ip_sku=None, os_disk_name=None, os_type=None,
storage_account=None, os_caching=None, data_caching=None, storage_container_name=None, storage_sku=None,
use_unmanaged_disk=False, attach_os_disk=None, os_disk_size_gb=None, attach_data_disks=None,
data_disk_sizes_gb=None, disk_info=None,
vnet_name=None, vnet_address_prefix='10.0.0.0/16', subnet=None, subnet_address_prefix='10.0.0.0/24',
storage_profile=None, os_publisher=None, os_offer=None, os_sku=None, os_version=None,
storage_account_type=None, vnet_type=None, nsg_type=None, public_ip_address_type=None, nic_type=None,
validate=False, custom_data=None, secrets=None, plan_name=None, plan_product=None, plan_publisher=None,
plan_promotion_code=None, license_type=None, assign_identity=None, identity_scope=None,
identity_role='Contributor', identity_role_id=None, application_security_groups=None, zone=None,
boot_diagnostics_storage=None, ultra_ssd_enabled=None, ephemeral_os_disk=None,
proximity_placement_group=None, dedicated_host=None, dedicated_host_group=None, aux_subscriptions=None,
priority=None, max_price=None, eviction_policy=None, enable_agent=None, workspace=None, vmss=None,
os_disk_encryption_set=None, data_disk_encryption_sets=None, specialized=None,
encryption_at_host=None, enable_auto_update=None, patch_mode=None):
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.util import random_string, hash_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.vm._template_builder import (build_vm_resource,
build_storage_account_resource, build_nic_resource,
build_vnet_resource, build_nsg_resource,
build_public_ip_resource, StorageProfile,
build_msi_role_assignment,
build_vm_linux_log_analytics_workspace_agent,
build_vm_windows_log_analytics_workspace_agent)
from msrestazure.tools import resource_id, is_valid_resource_id, parse_resource_id
subscription_id = get_subscription_id(cmd.cli_ctx)
if os_disk_encryption_set is not None and not is_valid_resource_id(os_disk_encryption_set):
os_disk_encryption_set = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=os_disk_encryption_set)
if data_disk_encryption_sets is None:
data_disk_encryption_sets = []
for i, des in enumerate(data_disk_encryption_sets):
if des is not None and not is_valid_resource_id(des):
data_disk_encryption_sets[i] = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=des)
storage_sku = disk_info['os'].get('storageAccountType')
network_id_template = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Network')
vm_id = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm_name)
# determine final defaults and calculated values
tags = tags or {}
os_disk_name = os_disk_name or ('osdisk_{}'.format(hash_string(vm_id, length=10)) if use_unmanaged_disk else None)
storage_container_name = storage_container_name or 'vhds'
# Build up the ARM template
master_template = ArmTemplateBuilder()
vm_dependencies = []
if storage_account_type == 'new':
storage_account = storage_account or 'vhdstorage{}'.format(
hash_string(vm_id, length=14, force_lower=True))
vm_dependencies.append('Microsoft.Storage/storageAccounts/{}'.format(storage_account))
master_template.add_resource(build_storage_account_resource(cmd, storage_account, location,
tags, storage_sku))
nic_name = None
if nic_type == 'new':
nic_name = '{}VMNic'.format(vm_name)
vm_dependencies.append('Microsoft.Network/networkInterfaces/{}'.format(nic_name))
nic_dependencies = []
if vnet_type == 'new':
subnet = subnet or '{}Subnet'.format(vm_name)
vnet_exists = False
if vnet_name:
from azure.cli.command_modules.vm._vm_utils import check_existence
vnet_exists = \
check_existence(cmd.cli_ctx, vnet_name, resource_group_name, 'Microsoft.Network', 'virtualNetworks')
if vnet_exists:
from azure.cli.core.commands import cached_get, cached_put, upsert_to_collection
from azure.cli.command_modules.vm._validators import get_network_client
client = get_network_client(cmd.cli_ctx).virtual_networks
vnet = cached_get(cmd, client.get, resource_group_name, vnet_name)
Subnet = cmd.get_models('Subnet', resource_type=ResourceType.MGMT_NETWORK)
subnet_obj = Subnet(
name=subnet,
address_prefixes=[subnet_address_prefix],
address_prefix=subnet_address_prefix
)
upsert_to_collection(vnet, 'subnets', subnet_obj, 'name')
try:
cached_put(cmd, client.create_or_update, vnet, resource_group_name, vnet_name).result()
except Exception:
raise CLIError('Subnet({}) does not exist, but failed to create a new subnet with address '
'prefix {}. It may be caused by name or address prefix conflict. Please specify '
'an appropriate subnet name with --subnet or a valid address prefix value with '
'--subnet-address-prefix.'.format(subnet, subnet_address_prefix))
if not vnet_exists:
vnet_name = vnet_name or '{}VNET'.format(vm_name)
nic_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name))
master_template.add_resource(build_vnet_resource(
cmd, vnet_name, location, tags, vnet_address_prefix, subnet, subnet_address_prefix))
if nsg_type == 'new':
if nsg_rule is None:
nsg_rule = 'RDP' if os_type.lower() == 'windows' else 'SSH'
nsg = nsg or '{}NSG'.format(vm_name)
nic_dependencies.append('Microsoft.Network/networkSecurityGroups/{}'.format(nsg))
master_template.add_resource(build_nsg_resource(cmd, nsg, location, tags, nsg_rule))
if public_ip_address_type == 'new':
public_ip_address = public_ip_address or '{}PublicIP'.format(vm_name)
nic_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(
public_ip_address))
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location, tags,
public_ip_address_allocation,
public_ip_address_dns_name,
public_ip_sku, zone))
subnet_id = subnet if is_valid_resource_id(subnet) else \
'{}/virtualNetworks/{}/subnets/{}'.format(network_id_template, vnet_name, subnet)
nsg_id = None
if nsg:
nsg_id = nsg if is_valid_resource_id(nsg) else \
'{}/networkSecurityGroups/{}'.format(network_id_template, nsg)
public_ip_address_id = None
if public_ip_address:
public_ip_address_id = public_ip_address if is_valid_resource_id(public_ip_address) \
else '{}/publicIPAddresses/{}'.format(network_id_template, public_ip_address)
nics = [
{'id': '{}/networkInterfaces/{}'.format(network_id_template, nic_name)}
]
nic_resource = build_nic_resource(
cmd, nic_name, location, tags, vm_name, subnet_id, private_ip_address, nsg_id,
public_ip_address_id, application_security_groups, accelerated_networking=accelerated_networking)
nic_resource['dependsOn'] = nic_dependencies
master_template.add_resource(nic_resource)
else:
# Using an existing NIC
invalid_parameters = [nsg, public_ip_address, subnet, vnet_name, application_security_groups]
if any(invalid_parameters):
raise CLIError('When specifying an existing NIC, do not specify NSG, '
'public IP, ASGs, VNet or subnet.')
if accelerated_networking is not None:
logger.warning('When specifying an existing NIC, do not specify accelerated networking. '
'Ignore --accelerated-networking now. '
'This will trigger an error instead of a warning in future releases.')
os_vhd_uri = None
if storage_profile in [StorageProfile.SACustomImage, StorageProfile.SAPirImage]:
storage_account_name = storage_account.rsplit('/', 1)
storage_account_name = storage_account_name[1] if \
len(storage_account_name) > 1 else storage_account_name[0]
os_vhd_uri = 'https://{}.blob.{}/{}/{}.vhd'.format(
storage_account_name, cmd.cli_ctx.cloud.suffixes.storage_endpoint, storage_container_name, os_disk_name)
elif storage_profile == StorageProfile.SASpecializedOSDisk:
os_vhd_uri = attach_os_disk
os_disk_name = attach_os_disk.rsplit('/', 1)[1][:-4]
if custom_data:
custom_data = read_content_if_is_file(custom_data)
if secrets:
secrets = _merge_secrets([validate_file_or_dict(secret) for secret in secrets])
vm_resource = build_vm_resource(
cmd=cmd, name=vm_name, location=location, tags=tags, size=size, storage_profile=storage_profile, nics=nics,
admin_username=admin_username, availability_set_id=availability_set, admin_password=admin_password,
ssh_key_values=ssh_key_value, ssh_key_path=ssh_dest_key_path, image_reference=image,
os_disk_name=os_disk_name, custom_image_os_type=os_type, authentication_type=authentication_type,
os_publisher=os_publisher, os_offer=os_offer, os_sku=os_sku, os_version=os_version, os_vhd_uri=os_vhd_uri,
attach_os_disk=attach_os_disk, os_disk_size_gb=os_disk_size_gb, custom_data=custom_data, secrets=secrets,
license_type=license_type, zone=zone, disk_info=disk_info,
boot_diagnostics_storage_uri=boot_diagnostics_storage, ultra_ssd_enabled=ultra_ssd_enabled,
proximity_placement_group=proximity_placement_group, computer_name=computer_name,
dedicated_host=dedicated_host, priority=priority, max_price=max_price, eviction_policy=eviction_policy,
enable_agent=enable_agent, vmss=vmss, os_disk_encryption_set=os_disk_encryption_set,
data_disk_encryption_sets=data_disk_encryption_sets, specialized=specialized,
encryption_at_host=encryption_at_host, dedicated_host_group=dedicated_host_group,
enable_auto_update=enable_auto_update, patch_mode=patch_mode)
vm_resource['dependsOn'] = vm_dependencies
if plan_name:
vm_resource['plan'] = {
'name': plan_name,
'publisher': plan_publisher,
'product': plan_product,
'promotionCode': plan_promotion_code
}
enable_local_identity = None
if assign_identity is not None:
vm_resource['identity'], _, _, enable_local_identity = _build_identities_info(assign_identity)
role_assignment_guid = None
if identity_scope:
role_assignment_guid = str(_gen_guid())
master_template.add_resource(build_msi_role_assignment(vm_name, vm_id, identity_role_id,
role_assignment_guid, identity_scope))
if workspace is not None:
workspace_id = _prepare_workspace(cmd, resource_group_name, workspace)
master_template.add_secure_parameter('workspaceId', workspace_id)
if os_type.lower() == 'linux':
vm_mmaExtension_resource = build_vm_linux_log_analytics_workspace_agent(cmd, vm_name, location)
master_template.add_resource(vm_mmaExtension_resource)
elif os_type.lower() == 'windows':
vm_mmaExtension_resource = build_vm_windows_log_analytics_workspace_agent(cmd, vm_name, location)
master_template.add_resource(vm_mmaExtension_resource)
else:
logger.warning("Unsupported OS type. Skip the connection step for log analytics workspace.")
master_template.add_resource(vm_resource)
if admin_password:
master_template.add_secure_parameter('adminPassword', admin_password)
template = master_template.build()
parameters = master_template.build_parameters()
# deploy ARM template
deployment_name = 'vm_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
aux_subscriptions=aux_subscriptions).deployments
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
if validate:
from azure.cli.command_modules.vm._vm_utils import log_pprint_template
log_pprint_template(template)
log_pprint_template(parameters)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
validation_poller = client.validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
# creates the VM deployment
if no_wait:
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, deployment_name, deployment)
LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, deployment_name, deployment))
else:
if validate:
return client.validate(resource_group_name, deployment_name, properties)
# creates the VM deployment
if no_wait:
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, deployment_name, properties)
LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, deployment_name, properties))
vm = get_vm_details(cmd, resource_group_name, vm_name)
if assign_identity is not None:
if enable_local_identity and not identity_scope:
_show_missing_access_warning(resource_group_name, vm_name, 'vm')
setattr(vm, 'identity', _construct_identity_info(identity_scope, identity_role, vm.identity.principal_id,
vm.identity.user_assigned_identities))
if workspace is not None:
workspace_name = parse_resource_id(workspace_id)['name']
_set_data_source_for_workspace(cmd, os_type, resource_group_name, workspace_name)
return vm
def auto_shutdown_vm(cmd, resource_group_name, vm_name, off=None, email=None, webhook=None, time=None,
location=None):
from msrestazure.tools import resource_id
from azure.mgmt.devtestlabs.models import Schedule
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cmd.cli_ctx)
client = _dev_test_labs_client_factory(cmd.cli_ctx, subscription_id)
name = 'shutdown-computevm-' + vm_name
vm_id = resource_id(subscription=client.config.subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm_name)
if off:
if email is not None or webhook is not None or time is not None:
# I don't want to disrupt users. So I warn instead of raising an error.
logger.warning('If --off, other parameters will be ignored.')
return client.global_schedules.delete(resource_group_name, name)
if time is None:
raise CLIError('usage error: --time is a required parameter')
daily_recurrence = {'time': time}
notification_settings = None
if webhook:
notification_settings = {
'emailRecipient': email,
'webhookUrl': webhook,
'timeInMinutes': 30,
'status': 'Enabled'
}
schedule = Schedule(status='Enabled',
target_resource_id=vm_id,
daily_recurrence=daily_recurrence,
notification_settings=notification_settings,
time_zone_id='UTC',
task_type='ComputeVmShutdownTask',
location=location)
return client.global_schedules.create_or_update(resource_group_name, name, schedule)
def get_instance_view(cmd, resource_group_name, vm_name):
return get_vm(cmd, resource_group_name, vm_name, 'instanceView')
def get_vm(cmd, resource_group_name, vm_name, expand=None):
client = _compute_client_factory(cmd.cli_ctx)
return client.virtual_machines.get(resource_group_name, vm_name, expand=expand)
def get_vm_details(cmd, resource_group_name, vm_name):
from msrestazure.tools import parse_resource_id
from azure.cli.command_modules.vm._vm_utils import get_target_network_api
result = get_instance_view(cmd, resource_group_name, vm_name)
network_client = get_mgmt_service_client(
cmd.cli_ctx, ResourceType.MGMT_NETWORK, api_version=get_target_network_api(cmd.cli_ctx))
public_ips = []
fqdns = []
private_ips = []
mac_addresses = []
for nic_ref in result.network_profile.network_interfaces:
nic_parts = parse_resource_id(nic_ref.id)
nic = network_client.network_interfaces.get(nic_parts['resource_group'], nic_parts['name'])
if nic.mac_address:
mac_addresses.append(nic.mac_address)
for ip_configuration in nic.ip_configurations:
if ip_configuration.private_ip_address:
private_ips.append(ip_configuration.private_ip_address)
if ip_configuration.public_ip_address:
res = parse_resource_id(ip_configuration.public_ip_address.id)
public_ip_info = network_client.public_ip_addresses.get(res['resource_group'],
res['name'])
if public_ip_info.ip_address:
public_ips.append(public_ip_info.ip_address)
if public_ip_info.dns_settings:
fqdns.append(public_ip_info.dns_settings.fqdn)
setattr(result, 'power_state',
','.join([s.display_status for s in result.instance_view.statuses if s.code.startswith('PowerState/')]))
setattr(result, 'public_ips', ','.join(public_ips))
setattr(result, 'fqdns', ','.join(fqdns))
setattr(result, 'private_ips', ','.join(private_ips))
setattr(result, 'mac_addresses', ','.join(mac_addresses))
del result.instance_view
return result
def list_skus(cmd, location=None, size=None, zone=None, show_all=None, resource_type=None):
from ._vm_utils import list_sku_info
result = list_sku_info(cmd.cli_ctx, location)
if not show_all:
result = [x for x in result if not [y for y in (x.restrictions or [])
if y.reason_code == 'NotAvailableForSubscription']]
if resource_type:
result = [x for x in result if x.resource_type.lower() == resource_type.lower()]
if size:
result = [x for x in result if x.resource_type == 'virtualMachines' and size.lower() in x.name.lower()]
if zone:
result = [x for x in result if x.location_info and x.location_info[0].zones]
return result
def list_vm(cmd, resource_group_name=None, show_details=False):
ccf = _compute_client_factory(cmd.cli_ctx)
vm_list = ccf.virtual_machines.list(resource_group_name=resource_group_name) \
if resource_group_name else ccf.virtual_machines.list_all()
if show_details:
return [get_vm_details(cmd, _parse_rg_name(v.id)[0], v.name) for v in vm_list]
return list(vm_list)
def list_vm_ip_addresses(cmd, resource_group_name=None, vm_name=None):
# is available in the Id, we don't need to make any calls to the compute RP)
network_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK)
nics = network_client.network_interfaces.list_all()
public_ip_addresses = network_client.public_ip_addresses.list_all()
ip_address_lookup = {pip.id: pip for pip in list(public_ip_addresses)}
result = []
for nic in [n for n in list(nics) if n.virtual_machine]:
nic_resource_group, nic_vm_name = _parse_rg_name(nic.virtual_machine.id)
# If provided, make sure that resource group name and vm name match the NIC we are
# looking at before adding it to the result...
same_resource_group_name = (resource_group_name is None or
resource_group_name.lower() == nic_resource_group.lower())
same_vm_name = (vm_name is None or
vm_name.lower() == nic_vm_name.lower())
if same_resource_group_name and same_vm_name:
network_info = {
'privateIpAddresses': [],
'publicIpAddresses': []
}
for ip_configuration in nic.ip_configurations:
network_info['privateIpAddresses'].append(ip_configuration.private_ip_address)
if ip_configuration.public_ip_address and ip_configuration.public_ip_address.id in ip_address_lookup:
public_ip_address = ip_address_lookup[ip_configuration.public_ip_address.id]
public_ip_addr_info = {
'id': public_ip_address.id,
'name': public_ip_address.name,
'ipAddress': public_ip_address.ip_address,
'ipAllocationMethod': public_ip_address.public_ip_allocation_method
}
try:
public_ip_addr_info['zone'] = public_ip_address.zones[0]
except (AttributeError, IndexError, TypeError):
pass
network_info['publicIpAddresses'].append(public_ip_addr_info)
result.append({
'virtualMachine': {
'resourceGroup': nic_resource_group,
'name': nic_vm_name,
'network': network_info
}
})
return result
def open_vm_port(cmd, resource_group_name, vm_name, port, priority=900, network_security_group_name=None,
apply_to_subnet=False):
from msrestazure.tools import parse_resource_id
network = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK)
vm = get_vm(cmd, resource_group_name, vm_name)
location = vm.location
if not vm.network_profile:
raise CLIError("Network profile not found for VM '{}'".format(vm_name))
nic_ids = list(vm.network_profile.network_interfaces)
if len(nic_ids) > 1:
raise CLIError('Multiple NICs is not supported for this command. Create rules on the NSG '
'directly.')
if not nic_ids:
raise CLIError("No NIC associated with VM '{}'".format(vm_name))
# get existing NSG or create a new one
created_nsg = False
nic = network.network_interfaces.get(resource_group_name, os.path.split(nic_ids[0].id)[1])
if not apply_to_subnet:
nsg = nic.network_security_group
else:
subnet_id = parse_resource_id(nic.ip_configurations[0].subnet.id)
subnet = network.subnets.get(resource_group_name, subnet_id['name'], subnet_id['child_name_1'])
nsg = subnet.network_security_group
if not nsg:
NetworkSecurityGroup = \
cmd.get_models('NetworkSecurityGroup', resource_type=ResourceType.MGMT_NETWORK)
nsg = LongRunningOperation(cmd.cli_ctx, 'Creating network security group')(
network.network_security_groups.create_or_update(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
parameters=NetworkSecurityGroup(location=location)
)
)
created_nsg = True
# update the NSG with the new rule to allow inbound traffic
SecurityRule = cmd.get_models('SecurityRule', resource_type=ResourceType.MGMT_NETWORK)
rule_name = 'open-port-all' if port == '*' else 'open-port-{}'.format(port)
rule = SecurityRule(protocol='*', access='allow', direction='inbound', name=rule_name,
source_port_range='*', destination_port_range=port, priority=priority,
source_address_prefix='*', destination_address_prefix='*')
nsg_name = nsg.name or os.path.split(nsg.id)[1]
LongRunningOperation(cmd.cli_ctx, 'Adding security rule')(
network.security_rules.create_or_update(
resource_group_name, nsg_name, rule_name, rule)
)
# update the NIC or subnet if a new NSG was created
if created_nsg and not apply_to_subnet:
nic.network_security_group = nsg
LongRunningOperation(cmd.cli_ctx, 'Updating NIC')(network.network_interfaces.create_or_update(
resource_group_name, nic.name, nic))
elif created_nsg and apply_to_subnet:
subnet.network_security_group = nsg
LongRunningOperation(cmd.cli_ctx, 'Updating subnet')(network.subnets.create_or_update(
resource_group_name=resource_group_name,
virtual_network_name=subnet_id['name'],
subnet_name=subnet_id['child_name_1'],
subnet_parameters=subnet
))
return network.network_security_groups.get(resource_group_name, nsg_name)
def resize_vm(cmd, resource_group_name, vm_name, size, no_wait=False):
vm = get_vm(cmd, resource_group_name, vm_name)
if vm.hardware_profile.vm_size == size:
logger.warning("VM is already %s", size)
return None
vm.hardware_profile.vm_size = size # pylint: disable=no-member
return set_vm(cmd, vm, no_wait=no_wait)
def restart_vm(cmd, resource_group_name, vm_name, no_wait=False, force=False):
client = _compute_client_factory(cmd.cli_ctx)
if force:
return sdk_no_wait(no_wait, client.virtual_machines.redeploy, resource_group_name, vm_name)
return sdk_no_wait(no_wait, client.virtual_machines.restart, resource_group_name, vm_name)
def set_vm(cmd, instance, lro_operation=None, no_wait=False):
instance.resources = None # Issue: https://github.com/Azure/autorest/issues/934
client = _compute_client_factory(cmd.cli_ctx)
parsed_id = _parse_rg_name(instance.id)
poller = sdk_no_wait(no_wait, client.virtual_machines.create_or_update,
resource_group_name=parsed_id[0],
vm_name=parsed_id[1],
parameters=instance)
if lro_operation:
return lro_operation(poller)
return LongRunningOperation(cmd.cli_ctx)(poller)
def patch_vm(cmd, resource_group_name, vm_name, vm):
client = _compute_client_factory(cmd.cli_ctx)
poller = client.virtual_machines.update(resource_group_name, vm_name, vm)
return LongRunningOperation(cmd.cli_ctx)(poller)
def show_vm(cmd, resource_group_name, vm_name, show_details=False):
return get_vm_details(cmd, resource_group_name, vm_name) if show_details \
else get_vm(cmd, resource_group_name, vm_name)
def update_vm(cmd, resource_group_name, vm_name, os_disk=None, disk_caching=None,
write_accelerator=None, license_type=None, no_wait=False, ultra_ssd_enabled=None,
priority=None, max_price=None, proximity_placement_group=None, workspace=None, **kwargs):
from msrestazure.tools import parse_resource_id, resource_id, is_valid_resource_id
from ._vm_utils import update_write_accelerator_settings, update_disk_caching
vm = kwargs['parameters']
if os_disk is not None:
if is_valid_resource_id(os_disk):
disk_id, disk_name = os_disk, parse_resource_id(os_disk)['name']
else:
res = parse_resource_id(vm.id)
disk_id = resource_id(subscription=res['subscription'], resource_group=res['resource_group'],
namespace='Microsoft.Compute', type='disks', name=os_disk)
disk_name = os_disk
vm.storage_profile.os_disk.managed_disk.id = disk_id
vm.storage_profile.os_disk.name = disk_name
if write_accelerator is not None:
update_write_accelerator_settings(vm.storage_profile, write_accelerator)
if disk_caching is not None:
update_disk_caching(vm.storage_profile, disk_caching)
if license_type is not None:
vm.license_type = license_type
if ultra_ssd_enabled is not None:
if vm.additional_capabilities is None:
AdditionalCapabilities = cmd.get_models('AdditionalCapabilities')
vm.additional_capabilities = AdditionalCapabilities(ultra_ssd_enabled=ultra_ssd_enabled)
else:
vm.additional_capabilities.ultra_ssd_enabled = ultra_ssd_enabled
if priority is not None:
vm.priority = priority
if max_price is not None:
if vm.billing_profile is None:
BillingProfile = cmd.get_models('BillingProfile')
vm.billing_profile = BillingProfile(max_price=max_price)
else:
vm.billing_profile.max_price = max_price
if proximity_placement_group is not None:
vm.proximity_placement_group = {'id': proximity_placement_group}
if workspace is not None:
workspace_id = _prepare_workspace(cmd, resource_group_name, workspace)
workspace_name = parse_resource_id(workspace_id)['name']
_set_log_analytics_workspace_extension(cmd=cmd,
resource_group_name=resource_group_name,
vm=vm,
vm_name=vm_name,
workspace_name=workspace_name)
os_type = vm.storage_profile.os_disk.os_type.value if vm.storage_profile.os_disk.os_type else None
_set_data_source_for_workspace(cmd, os_type, resource_group_name, workspace_name)
aux_subscriptions = None
if vm and vm.storage_profile and vm.storage_profile.image_reference and vm.storage_profile.image_reference.id:
aux_subscriptions = _parse_aux_subscriptions(vm.storage_profile.image_reference.id)
client = _compute_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions)
return sdk_no_wait(no_wait, client.virtual_machines.create_or_update, resource_group_name, vm_name, **kwargs)
# endregion
# region VirtualMachines AvailabilitySets
def _get_availset(cmd, resource_group_name, name):
return _compute_client_factory(cmd.cli_ctx).availability_sets.get(resource_group_name, name)
def _set_availset(cmd, resource_group_name, name, **kwargs):
return _compute_client_factory(cmd.cli_ctx).availability_sets.create_or_update(resource_group_name, name, **kwargs)
# pylint: disable=inconsistent-return-statements
def convert_av_set_to_managed_disk(cmd, resource_group_name, availability_set_name):
av_set = _get_availset(cmd, resource_group_name, availability_set_name)
if av_set.sku.name != 'Aligned':
av_set.sku.name = 'Aligned'
# let us double check whether the existing FD number is supported
skus = list_skus(cmd, av_set.location)
av_sku = next((s for s in skus if s.resource_type == 'availabilitySets' and s.name == 'Aligned'), None)
if av_sku and av_sku.capabilities:
max_fd = int(next((c.value for c in av_sku.capabilities if c.name == 'MaximumPlatformFaultDomainCount'),
'0'))
if max_fd and max_fd < av_set.platform_fault_domain_count:
logger.warning("The fault domain count will be adjusted from %s to %s so to stay within region's "
"limitation", av_set.platform_fault_domain_count, max_fd)
av_set.platform_fault_domain_count = max_fd
return _set_availset(cmd, resource_group_name=resource_group_name, name=availability_set_name,
parameters=av_set)
logger.warning('Availability set %s is already configured for managed disks.', availability_set_name)
def create_av_set(cmd, availability_set_name, resource_group_name, platform_fault_domain_count=2,
platform_update_domain_count=None, location=None, proximity_placement_group=None, unmanaged=False,
no_wait=False, tags=None, validate=False):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.vm._template_builder import build_av_set_resource
tags = tags or {}
master_template = ArmTemplateBuilder()
av_set_resource = build_av_set_resource(cmd, availability_set_name, location, tags,
platform_update_domain_count,
platform_fault_domain_count, unmanaged,
proximity_placement_group=proximity_placement_group)
master_template.add_resource(av_set_resource)
template = master_template.build()
deployment_name = 'av_set_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters={}, mode='incremental')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
validation_poller = client.validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
if no_wait:
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, deployment_name, deployment)
LongRunningOperation(cmd.cli_ctx)(sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, deployment_name, deployment))
else:
if validate:
return client.validate(resource_group_name, deployment_name, properties)
if no_wait:
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, deployment_name, properties)
LongRunningOperation(cmd.cli_ctx)(sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, deployment_name, properties))
compute_client = _compute_client_factory(cmd.cli_ctx)
return compute_client.availability_sets.get(resource_group_name, availability_set_name)
def update_av_set(instance, resource_group_name, proximity_placement_group=None):
if proximity_placement_group is not None:
instance.proximity_placement_group = {'id': proximity_placement_group}
return instance
def list_av_sets(cmd, resource_group_name=None):
op_group = _compute_client_factory(cmd.cli_ctx).availability_sets
if resource_group_name:
return op_group.list(resource_group_name)
return op_group.list_by_subscription(expand='virtualMachines/$ref')
def disable_boot_diagnostics(cmd, resource_group_name, vm_name):
vm = get_vm(cmd, resource_group_name, vm_name)
diag_profile = vm.diagnostics_profile
if not (diag_profile and diag_profile.boot_diagnostics and diag_profile.boot_diagnostics.enabled):
return
diag_profile.boot_diagnostics.enabled = False
diag_profile.boot_diagnostics.storage_uri = None
set_vm(cmd, vm, ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'disabling boot diagnostics', 'done'))
def enable_boot_diagnostics(cmd, resource_group_name, vm_name, storage):
from azure.cli.command_modules.vm._vm_utils import get_storage_blob_uri
vm = get_vm(cmd, resource_group_name, vm_name)
storage_uri = get_storage_blob_uri(cmd.cli_ctx, storage)
if (vm.diagnostics_profile and
vm.diagnostics_profile.boot_diagnostics and
vm.diagnostics_profile.boot_diagnostics.enabled and
vm.diagnostics_profile.boot_diagnostics.storage_uri and
vm.diagnostics_profile.boot_diagnostics.storage_uri.lower() == storage_uri.lower()):
return
DiagnosticsProfile, BootDiagnostics = cmd.get_models('DiagnosticsProfile', 'BootDiagnostics')
boot_diag = BootDiagnostics(enabled=True, storage_uri=storage_uri)
if vm.diagnostics_profile is None:
vm.diagnostics_profile = DiagnosticsProfile(boot_diagnostics=boot_diag)
else:
vm.diagnostics_profile.boot_diagnostics = boot_diag
set_vm(cmd, vm, ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'enabling boot diagnostics', 'done'))
class BootLogStreamWriter:
def __init__(self, out):
self.out = out
def write(self, str_or_bytes):
content = str_or_bytes
if isinstance(str_or_bytes, bytes):
content = str_or_bytes.decode('utf8')
try:
self.out.write(content)
except UnicodeEncodeError:
import unicodedata
ascii_content = unicodedata.normalize('NFKD', content).encode('ascii', 'ignore')
self.out.write(ascii_content.decode())
logger.warning("A few unicode characters have been ignored because the shell is not able to display. "
"To see the full log, use a shell with unicode capacity")
def get_boot_log(cmd, resource_group_name, vm_name):
import re
import sys
from azure.cli.core.profiles import get_sdk
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob.blockblobservice
client = _compute_client_factory(cmd.cli_ctx)
virtual_machine = client.virtual_machines.get(resource_group_name, vm_name, expand='instanceView')
# pylint: disable=no-member
if (not virtual_machine.instance_view.boot_diagnostics or
not virtual_machine.instance_view.boot_diagnostics.serial_console_log_blob_uri):
raise CLIError('Please enable boot diagnostics.')
blob_uri = virtual_machine.instance_view.boot_diagnostics.serial_console_log_blob_uri
# Find storage account for diagnostics
storage_mgmt_client = _get_storage_management_client(cmd.cli_ctx)
if not blob_uri:
raise CLIError('No console log available')
try:
storage_accounts = storage_mgmt_client.storage_accounts.list()
matching_storage_account = (a for a in list(storage_accounts)
if blob_uri.startswith(a.primary_endpoints.blob))
storage_account = next(matching_storage_account)
except StopIteration:
raise CLIError('Failed to find storage accont for console log file')
regex = r'/subscriptions/[^/]+/resourceGroups/(?P<rg>[^/]+)/.+'
match = re.search(regex, storage_account.id, re.I)
rg = match.group('rg')
# Get account key
keys = storage_mgmt_client.storage_accounts.list_keys(rg, storage_account.name)
# Extract container and blob name from url...
container, blob = urlparse(blob_uri).path.split('/')[-2:]
storage_client = get_data_service_client(
cmd.cli_ctx,
BlockBlobService,
storage_account.name,
keys.keys[0].value,
endpoint_suffix=cmd.cli_ctx.cloud.suffixes.storage_endpoint) # pylint: disable=no-member
# our streamwriter not seekable, so no parallel.
storage_client.get_blob_to_stream(container, blob, BootLogStreamWriter(sys.stdout), max_connections=1)
# endregion
# region VirtualMachines Diagnostics
def set_diagnostics_extension(
cmd, resource_group_name, vm_name, settings, protected_settings=None, version=None,
no_auto_upgrade=False):
client = _compute_client_factory(cmd.cli_ctx)
vm = client.virtual_machines.get(resource_group_name, vm_name, 'instanceView')
# pylint: disable=no-member
is_linux_os = _is_linux_os(vm)
vm_extension_name = _LINUX_DIAG_EXT if is_linux_os else _WINDOWS_DIAG_EXT
if is_linux_os: # check incompatible version
exts = vm.instance_view.extensions or []
major_ver = extension_mappings[_LINUX_DIAG_EXT]['version'].split('.')[0]
if next((e for e in exts if e.name == vm_extension_name and
not e.type_handler_version.startswith(major_ver + '.')), None):
logger.warning('There is an incompatible version of diagnostics extension installed. '
'We will update it with a new version')
poller = client.virtual_machine_extensions.delete(resource_group_name, vm_name,
vm_extension_name)
LongRunningOperation(cmd.cli_ctx)(poller)
return set_extension(cmd, resource_group_name, vm_name, vm_extension_name,
extension_mappings[vm_extension_name]['publisher'],
version or extension_mappings[vm_extension_name]['version'],
settings,
protected_settings,
no_auto_upgrade)
def show_default_diagnostics_configuration(is_windows_os=False):
public_settings = get_default_diag_config(is_windows_os)
# pylint: disable=line-too-long
protected_settings_info = json.dumps({
'storageAccountName': "__STORAGE_ACCOUNT_NAME__",
# LAD and WAD are not consistent on sas token format. Call it out here
"storageAccountSasToken": "__SAS_TOKEN_{}__".format("WITH_LEADING_QUESTION_MARK" if is_windows_os else "WITHOUT_LEADING_QUESTION_MARK")
}, indent=2)
logger.warning('Protected settings with storage account info is required to work with the default configurations, e.g. \n%s', protected_settings_info)
return public_settings
# endregion
# region VirtualMachines Disks (Managed)
def attach_managed_data_disk(cmd, resource_group_name, vm_name, disk, new=False, sku=None,
size_gb=1023, lun=None, caching=None, enable_write_accelerator=False):
from msrestazure.tools import parse_resource_id
vm = get_vm(cmd, resource_group_name, vm_name)
DataDisk, ManagedDiskParameters, DiskCreateOption = cmd.get_models(
'DataDisk', 'ManagedDiskParameters', 'DiskCreateOptionTypes')
# pylint: disable=no-member
if lun is None:
lun = _get_disk_lun(vm.storage_profile.data_disks)
if new:
data_disk = DataDisk(lun=lun, create_option=DiskCreateOption.empty,
name=parse_resource_id(disk)['name'],
disk_size_gb=size_gb, caching=caching,
managed_disk=ManagedDiskParameters(storage_account_type=sku))
else:
params = ManagedDiskParameters(id=disk, storage_account_type=sku)
data_disk = DataDisk(lun=lun, create_option=DiskCreateOption.attach, managed_disk=params, caching=caching)
if enable_write_accelerator:
data_disk.write_accelerator_enabled = enable_write_accelerator
vm.storage_profile.data_disks.append(data_disk)
set_vm(cmd, vm)
def detach_data_disk(cmd, resource_group_name, vm_name, disk_name):
# here we handle both unmanaged or managed disk
vm = get_vm(cmd, resource_group_name, vm_name)
# pylint: disable=no-member
leftovers = [d for d in vm.storage_profile.data_disks if d.name.lower() != disk_name.lower()]
if len(vm.storage_profile.data_disks) == len(leftovers):
raise CLIError("No disk with the name '{}' was found".format(disk_name))
vm.storage_profile.data_disks = leftovers
set_vm(cmd, vm)
# endregion
# region VirtualMachines Extensions
def list_extensions(cmd, resource_group_name, vm_name):
vm = get_vm(cmd, resource_group_name, vm_name)
extension_type = 'Microsoft.Compute/virtualMachines/extensions'
result = [r for r in (vm.resources or []) if r.type == extension_type]
return result
def set_extension(cmd, resource_group_name, vm_name, vm_extension_name, publisher, version=None, settings=None,
protected_settings=None, no_auto_upgrade=False, force_update=False, no_wait=False,
extension_instance_name=None):
vm = get_vm(cmd, resource_group_name, vm_name, 'instanceView')
client = _compute_client_factory(cmd.cli_ctx)
if not extension_instance_name:
extension_instance_name = vm_extension_name
VirtualMachineExtension = cmd.get_models('VirtualMachineExtension')
instance_name = _get_extension_instance_name(vm.instance_view, publisher, vm_extension_name,
suggested_name=extension_instance_name)
if instance_name != extension_instance_name:
msg = "A %s extension with name %s already exists. Updating it with your settings..."
logger.warning(msg, vm_extension_name, instance_name)
version = _normalize_extension_version(cmd.cli_ctx, publisher, vm_extension_name, version, vm.location)
ext = VirtualMachineExtension(location=vm.location,
publisher=publisher,
virtual_machine_extension_type=vm_extension_name,
protected_settings=protected_settings,
type_handler_version=version,
settings=settings,
auto_upgrade_minor_version=(not no_auto_upgrade))
if force_update:
ext.force_update_tag = str(_gen_guid())
return sdk_no_wait(no_wait, client.virtual_machine_extensions.create_or_update,
resource_group_name, vm_name, instance_name, ext)
# endregion
# region VirtualMachines Extension Images
def list_vm_extension_images(
cmd, image_location=None, publisher_name=None, name=None, version=None, latest=False):
return load_extension_images_thru_services(
cmd.cli_ctx, publisher_name, name, version, image_location, latest)
# endregion
# region VirtualMachines Identity
def _remove_identities(cmd, resource_group_name, name, identities, getter, setter):
from ._vm_utils import MSI_LOCAL_ID
ResourceIdentityType = cmd.get_models('ResourceIdentityType', operation_group='virtual_machines')
remove_system_assigned_identity = False
if MSI_LOCAL_ID in identities:
remove_system_assigned_identity = True
identities.remove(MSI_LOCAL_ID)
resource = getter(cmd, resource_group_name, name)
if resource.identity is None:
return None
emsis_to_remove = []
if identities:
existing_emsis = {x.lower() for x in list((resource.identity.user_assigned_identities or {}).keys())}
emsis_to_remove = {x.lower() for x in identities}
non_existing = emsis_to_remove.difference(existing_emsis)
if non_existing:
raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_emsis - emsis_to_remove): # if all emsis are gone, we need to update the type
if resource.identity.type == ResourceIdentityType.user_assigned:
resource.identity.type = ResourceIdentityType.none
elif resource.identity.type == ResourceIdentityType.system_assigned_user_assigned:
resource.identity.type = ResourceIdentityType.system_assigned
resource.identity.user_assigned_identities = None
if remove_system_assigned_identity:
resource.identity.type = (ResourceIdentityType.none
if resource.identity.type == ResourceIdentityType.system_assigned
else ResourceIdentityType.user_assigned)
if emsis_to_remove:
if resource.identity.type not in [ResourceIdentityType.none, ResourceIdentityType.system_assigned]:
resource.identity.user_assigned_identities = {}
for identity in emsis_to_remove:
resource.identity.user_assigned_identities[identity] = None
result = LongRunningOperation(cmd.cli_ctx)(setter(resource_group_name, name, resource))
return result.identity
def remove_vm_identity(cmd, resource_group_name, vm_name, identities=None):
def setter(resource_group_name, vm_name, vm):
client = _compute_client_factory(cmd.cli_ctx)
VirtualMachineUpdate = cmd.get_models('VirtualMachineUpdate', operation_group='virtual_machines')
vm_update = VirtualMachineUpdate(identity=vm.identity)
return client.virtual_machines.update(resource_group_name, vm_name, vm_update)
if identities is None:
from ._vm_utils import MSI_LOCAL_ID
identities = [MSI_LOCAL_ID]
return _remove_identities(cmd, resource_group_name, vm_name, identities, get_vm, setter)
# endregion
# region VirtualMachines Images
def list_vm_images(cmd, image_location=None, publisher_name=None, offer=None, sku=None,
all=False): # pylint: disable=redefined-builtin
load_thru_services = all
if load_thru_services:
if not publisher_name and not offer and not sku:
logger.warning("You are retrieving all the images from server which could take more than a minute. "
"To shorten the wait, provide '--publisher', '--offer' or '--sku'. Partial name search "
"is supported.")
all_images = load_images_thru_services(cmd.cli_ctx, publisher_name, offer, sku, image_location)
else:
all_images = load_images_from_aliases_doc(cmd.cli_ctx, publisher_name, offer, sku)
logger.warning(
'You are viewing an offline list of images, use --all to retrieve an up-to-date list')
for i in all_images:
i['urn'] = ':'.join([i['publisher'], i['offer'], i['sku'], i['version']])
return all_images
def show_vm_image(cmd, urn=None, publisher=None, offer=None, sku=None, version=None, location=None):
from azure.cli.core.commands.parameters import get_one_of_subscription_locations
usage_err = 'usage error: --plan STRING --offer STRING --publish STRING --version STRING | --urn STRING'
location = location or get_one_of_subscription_locations(cmd.cli_ctx)
if urn:
if any([publisher, offer, sku, version]):
raise CLIError(usage_err)
publisher, offer, sku, version = urn.split(":")
if version.lower() == 'latest':
version = _get_latest_image_version(cmd.cli_ctx, location, publisher, offer, sku)
elif not publisher or not offer or not sku or not version:
raise CLIError(usage_err)
client = _compute_client_factory(cmd.cli_ctx)
return client.virtual_machine_images.get(location, publisher, offer, sku, version)
def accept_market_ordering_terms(cmd, urn=None, publisher=None, offer=None, plan=None):
from azure.mgmt.marketplaceordering import MarketplaceOrderingAgreements
usage_err = 'usage error: --plan STRING --offer STRING --publish STRING |--urn STRING'
if urn:
if any([publisher, offer, plan]):
raise CLIError(usage_err)
publisher, offer, _, _ = urn.split(':')
image = show_vm_image(cmd, urn)
if not image.plan:
logger.warning("Image '%s' has no terms to accept.", urn)
return
plan = image.plan.name
else:
if not publisher or not offer or not plan:
raise CLIError(usage_err)
market_place_client = get_mgmt_service_client(cmd.cli_ctx, MarketplaceOrderingAgreements)
term = market_place_client.marketplace_agreements.get(publisher, offer, plan)
term.accepted = True
return market_place_client.marketplace_agreements.create(publisher, offer, plan, term)
# endregion
def _terms_prepare(cmd, urn, publisher, offer, plan):
if urn:
if any([publisher, offer, plan]):
raise CLIError('usage error: If using --urn, do not use any of --plan, --offer, --publisher.')
terms = urn.split(':')
if len(terms) != 4:
raise CLIError('usage error: urn should be in the format of publisher:offer:sku:version.')
publisher, offer = terms[0], terms[1]
image = show_vm_image(cmd, urn)
if not image.plan:
raise CLIError("Image '%s' has no terms to accept." % urn)
plan = image.plan.name
else:
if not all([publisher, offer, plan]):
raise CLIError(
'usage error: If not using --urn, all of --plan, --offer and --publisher should be provided.')
return publisher, offer, plan
def _accept_cancel_terms(cmd, urn, publisher, offer, plan, accept):
publisher, offer, plan = _terms_prepare(cmd, urn, publisher, offer, plan)
op = cf_vm_image_term(cmd.cli_ctx, '')
terms = op.get(publisher, offer, plan)
terms.accepted = accept
return op.create(publisher, offer, plan, terms)
def accept_terms(cmd, urn=None, publisher=None, offer=None, plan=None):
return _accept_cancel_terms(cmd, urn, publisher, offer, plan, True)
def cancel_terms(cmd, urn=None, publisher=None, offer=None, plan=None):
return _accept_cancel_terms(cmd, urn, publisher, offer, plan, False)
def get_terms(cmd, urn=None, publisher=None, offer=None, plan=None):
publisher, offer, plan = _terms_prepare(cmd, urn, publisher, offer, plan)
op = cf_vm_image_term(cmd.cli_ctx, '')
terms = op.get(publisher, offer, plan)
return terms
# region VirtualMachines NetworkInterfaces (NICs)
def show_vm_nic(cmd, resource_group_name, vm_name, nic):
from msrestazure.tools import parse_resource_id
vm = get_vm(cmd, resource_group_name, vm_name)
found = next(
(n for n in vm.network_profile.network_interfaces if nic.lower() == n.id.lower()), None
# pylint: disable=no-member
)
if found:
network_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK)
nic_name = parse_resource_id(found.id)['name']
return network_client.network_interfaces.get(resource_group_name, nic_name)
raise CLIError("NIC '{}' not found on VM '{}'".format(nic, vm_name))
def list_vm_nics(cmd, resource_group_name, vm_name):
vm = get_vm(cmd, resource_group_name, vm_name)
return vm.network_profile.network_interfaces # pylint: disable=no-member
def add_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None):
vm = get_vm(cmd, resource_group_name, vm_name)
new_nics = _build_nic_list(cmd, nics)
existing_nics = _get_existing_nics(vm)
return _update_vm_nics(cmd, vm, existing_nics + new_nics, primary_nic)
def remove_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None):
def to_delete(nic_id):
return [n for n in nics_to_delete if n.id.lower() == nic_id.lower()]
vm = get_vm(cmd, resource_group_name, vm_name)
nics_to_delete = _build_nic_list(cmd, nics)
existing_nics = _get_existing_nics(vm)
survived = [x for x in existing_nics if not to_delete(x.id)]
return _update_vm_nics(cmd, vm, survived, primary_nic)
def set_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None):
vm = get_vm(cmd, resource_group_name, vm_name)
nics = _build_nic_list(cmd, nics)
return _update_vm_nics(cmd, vm, nics, primary_nic)
def _build_nic_list(cmd, nic_ids):
NetworkInterfaceReference = cmd.get_models('NetworkInterfaceReference')
nic_list = []
if nic_ids:
# pylint: disable=no-member
network_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK)
for nic_id in nic_ids:
rg, name = _parse_rg_name(nic_id)
nic = network_client.network_interfaces.get(rg, name)
nic_list.append(NetworkInterfaceReference(id=nic.id, primary=False))
return nic_list
def _get_existing_nics(vm):
network_profile = getattr(vm, 'network_profile', None)
nics = []
if network_profile is not None:
nics = network_profile.network_interfaces or []
return nics
def _update_vm_nics(cmd, vm, nics, primary_nic):
NetworkProfile = cmd.get_models('NetworkProfile')
if primary_nic:
try:
_, primary_nic_name = _parse_rg_name(primary_nic)
except IndexError:
primary_nic_name = primary_nic
matched = [n for n in nics if _parse_rg_name(n.id)[1].lower() == primary_nic_name.lower()]
if not matched:
raise CLIError('Primary Nic {} is not found'.format(primary_nic))
if len(matched) > 1:
raise CLIError('Duplicate Nic entries with name {}'.format(primary_nic))
for n in nics:
n.primary = False
matched[0].primary = True
elif nics:
if not [n for n in nics if n.primary]:
nics[0].primary = True
network_profile = getattr(vm, 'network_profile', None)
if network_profile is None:
vm.network_profile = NetworkProfile(network_interfaces=nics)
else:
network_profile.network_interfaces = nics
return set_vm(cmd, vm).network_profile.network_interfaces
# endregion
# region VirtualMachines RunCommand
def run_command_invoke(cmd, resource_group_name, vm_vmss_name, command_id, scripts=None, parameters=None, instance_id=None): # pylint: disable=line-too-long
RunCommandInput, RunCommandInputParameter = cmd.get_models('RunCommandInput', 'RunCommandInputParameter')
parameters = parameters or []
run_command_input_parameters = []
auto_arg_name_num = 0
for p in parameters:
if '=' in p:
n, v = p.split('=', 1)
else:
# RunCommand API requires named arguments, which doesn't make lots of sense for bash scripts
auto_arg_name_num += 1
n = 'arg{}'.format(auto_arg_name_num)
v = p
run_command_input_parameters.append(RunCommandInputParameter(name=n, value=v))
client = _compute_client_factory(cmd.cli_ctx)
# if instance_id, this is a vmss instance
if instance_id:
return client.virtual_machine_scale_set_vms.run_command(resource_group_name, vm_vmss_name, instance_id,
RunCommandInput(command_id=command_id, script=scripts,
parameters=run_command_input_parameters)) # pylint: disable=line-too-long
# otherwise this is a regular vm instance
return client.virtual_machines.run_command(resource_group_name, vm_vmss_name,
RunCommandInput(command_id=command_id, script=scripts,
parameters=run_command_input_parameters))
def vm_run_command_invoke(cmd, resource_group_name, vm_name, command_id, scripts=None, parameters=None):
return run_command_invoke(cmd, resource_group_name, vm_name, command_id, scripts, parameters)
# endregion
# region VirtualMachines Secrets
def _get_vault_id_from_name(cli_ctx, client, vault_name):
group_name = _get_resource_group_from_vault_name(cli_ctx, vault_name)
if not group_name:
raise CLIError("unable to find vault '{}' in current subscription.".format(vault_name))
vault = client.get(group_name, vault_name)
return vault.id
def get_vm_format_secret(cmd, secrets, certificate_store=None, keyvault=None, resource_group_name=None):
from azure.keyvault import KeyVaultId
import re
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT).vaults
grouped_secrets = {}
merged_secrets = []
for s in secrets:
merged_secrets += s.splitlines()
# group secrets by source vault
for secret in merged_secrets:
parsed = KeyVaultId.parse_secret_id(secret)
match = re.search('://(.+?)\\.', parsed.vault)
vault_name = match.group(1)
if vault_name not in grouped_secrets:
grouped_secrets[vault_name] = {
'vaultCertificates': [],
'id': keyvault or _get_vault_id_from_name(cmd.cli_ctx, client, vault_name)
}
vault_cert = {'certificateUrl': secret}
if certificate_store:
vault_cert['certificateStore'] = certificate_store
grouped_secrets[vault_name]['vaultCertificates'].append(vault_cert)
# transform the reduced map to vm format
formatted = [{'sourceVault': {'id': value['id']},
'vaultCertificates': value['vaultCertificates']}
for _, value in list(grouped_secrets.items())]
return formatted
def add_vm_secret(cmd, resource_group_name, vm_name, keyvault, certificate, certificate_store=None):
from msrestazure.tools import parse_resource_id
from ._vm_utils import create_keyvault_data_plane_client, get_key_vault_base_url
VaultSecretGroup, SubResource, VaultCertificate = cmd.get_models(
'VaultSecretGroup', 'SubResource', 'VaultCertificate')
vm = get_vm(cmd, resource_group_name, vm_name)
if '://' not in certificate: # has a cert name rather a full url?
keyvault_client = create_keyvault_data_plane_client(cmd.cli_ctx)
cert_info = keyvault_client.get_certificate(
get_key_vault_base_url(cmd.cli_ctx, parse_resource_id(keyvault)['name']), certificate, '')
certificate = cert_info.sid
if not _is_linux_os(vm):
certificate_store = certificate_store or 'My'
elif certificate_store:
raise CLIError('Usage error: --certificate-store is only applicable on Windows VM')
vault_cert = VaultCertificate(certificate_url=certificate, certificate_store=certificate_store)
vault_secret_group = next((x for x in vm.os_profile.secrets
if x.source_vault and x.source_vault.id.lower() == keyvault.lower()), None)
if vault_secret_group:
vault_secret_group.vault_certificates.append(vault_cert)
else:
vault_secret_group = VaultSecretGroup(source_vault=SubResource(id=keyvault), vault_certificates=[vault_cert])
vm.os_profile.secrets.append(vault_secret_group)
vm = set_vm(cmd, vm)
return vm.os_profile.secrets
def list_vm_secrets(cmd, resource_group_name, vm_name):
vm = get_vm(cmd, resource_group_name, vm_name)
if vm.os_profile:
return vm.os_profile.secrets
return []
def remove_vm_secret(cmd, resource_group_name, vm_name, keyvault, certificate=None):
vm = get_vm(cmd, resource_group_name, vm_name)
# support 2 kinds of filter:
# a. if only keyvault is supplied, we delete its whole vault group.
# b. if both keyvault and certificate are supplied, we only delete the specific cert entry.
to_keep = vm.os_profile.secrets
keyvault_matched = []
if keyvault:
keyvault = keyvault.lower()
keyvault_matched = [x for x in to_keep if x.source_vault and x.source_vault.id.lower() == keyvault]
if keyvault and not certificate:
to_keep = [x for x in to_keep if x not in keyvault_matched]
elif certificate:
temp = keyvault_matched if keyvault else to_keep
cert_url_pattern = certificate.lower()
if '://' not in cert_url_pattern: # just a cert name?
cert_url_pattern = '/' + cert_url_pattern + '/'
for x in temp:
x.vault_certificates = ([v for v in x.vault_certificates
if not(v.certificate_url and cert_url_pattern in v.certificate_url.lower())])
to_keep = [x for x in to_keep if x.vault_certificates] # purge all groups w/o any cert entries
vm.os_profile.secrets = to_keep
vm = set_vm(cmd, vm)
return vm.os_profile.secrets
# endregion
# region VirtualMachines UnmanagedDisks
def attach_unmanaged_data_disk(cmd, resource_group_name, vm_name, new=False, vhd_uri=None, lun=None,
disk_name=None, size_gb=1023, caching=None):
DataDisk, DiskCreateOptionTypes, VirtualHardDisk = cmd.get_models(
'DataDisk', 'DiskCreateOptionTypes', 'VirtualHardDisk')
if not new and not disk_name:
raise CLIError('Please provide the name of the existing disk to attach')
create_option = DiskCreateOptionTypes.empty if new else DiskCreateOptionTypes.attach
vm = get_vm(cmd, resource_group_name, vm_name)
if disk_name is None:
import datetime
disk_name = vm_name + '-' + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
# pylint: disable=no-member
if vhd_uri is None:
if not hasattr(vm.storage_profile.os_disk, 'vhd') or not vm.storage_profile.os_disk.vhd:
raise CLIError('Adding unmanaged disks to a VM with managed disks is not supported')
blob_uri = vm.storage_profile.os_disk.vhd.uri
vhd_uri = blob_uri[0:blob_uri.rindex('/') + 1] + disk_name + '.vhd'
if lun is None:
lun = _get_disk_lun(vm.storage_profile.data_disks)
disk = DataDisk(lun=lun, vhd=VirtualHardDisk(uri=vhd_uri), name=disk_name,
create_option=create_option,
caching=caching, disk_size_gb=size_gb if new else None)
if vm.storage_profile.data_disks is None:
vm.storage_profile.data_disks = []
vm.storage_profile.data_disks.append(disk)
return set_vm(cmd, vm)
def list_unmanaged_disks(cmd, resource_group_name, vm_name):
vm = get_vm(cmd, resource_group_name, vm_name)
return vm.storage_profile.data_disks # pylint: disable=no-member
# endregion
# region VirtualMachines Users
def _update_linux_access_extension(cmd, vm_instance, resource_group_name, protected_settings,
no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
VirtualMachineExtension = cmd.get_models('VirtualMachineExtension')
# pylint: disable=no-member
instance_name = _get_extension_instance_name(vm_instance.instance_view,
extension_mappings[_LINUX_ACCESS_EXT]['publisher'],
_LINUX_ACCESS_EXT,
_ACCESS_EXT_HANDLER_NAME)
publisher, version, auto_upgrade = _get_access_extension_upgrade_info(
vm_instance.resources, _LINUX_ACCESS_EXT)
ext = VirtualMachineExtension(location=vm_instance.location, # pylint: disable=no-member
publisher=publisher,
virtual_machine_extension_type=_LINUX_ACCESS_EXT,
protected_settings=protected_settings,
type_handler_version=version,
settings={},
auto_upgrade_minor_version=auto_upgrade)
return sdk_no_wait(no_wait, client.virtual_machine_extensions.create_or_update,
resource_group_name, vm_instance.name, instance_name, ext)
def _set_linux_user(cmd, vm_instance, resource_group_name, username,
password=None, ssh_key_value=None, no_wait=False):
protected_settings = {}
protected_settings['username'] = username
if password:
protected_settings['password'] = password
elif not ssh_key_value and not password: # default to ssh
ssh_key_value = os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub')
if ssh_key_value:
protected_settings['ssh_key'] = read_content_if_is_file(ssh_key_value)
if no_wait:
return _update_linux_access_extension(cmd, vm_instance, resource_group_name,
protected_settings, no_wait)
poller = _update_linux_access_extension(cmd, vm_instance, resource_group_name,
protected_settings)
return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'setting user', 'done')(poller)
def _reset_windows_admin(cmd, vm_instance, resource_group_name, username, password, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
VirtualMachineExtension = cmd.get_models('VirtualMachineExtension')
publisher, version, auto_upgrade = _get_access_extension_upgrade_info(
vm_instance.resources, _WINDOWS_ACCESS_EXT)
# pylint: disable=no-member
instance_name = _get_extension_instance_name(vm_instance.instance_view,
publisher,
_WINDOWS_ACCESS_EXT,
_ACCESS_EXT_HANDLER_NAME)
ext = VirtualMachineExtension(location=vm_instance.location, # pylint: disable=no-member
publisher=publisher,
virtual_machine_extension_type=_WINDOWS_ACCESS_EXT,
protected_settings={'Password': password},
type_handler_version=version,
settings={'UserName': username},
auto_upgrade_minor_version=auto_upgrade)
if no_wait:
return sdk_no_wait(no_wait, client.virtual_machine_extensions.create_or_update,
resource_group_name, vm_instance.name, instance_name, ext)
poller = client.virtual_machine_extensions.create_or_update(resource_group_name,
vm_instance.name,
instance_name, ext)
return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'resetting admin', 'done')(poller)
def set_user(cmd, resource_group_name, vm_name, username, password=None, ssh_key_value=None,
no_wait=False):
vm = get_vm(cmd, resource_group_name, vm_name, 'instanceView')
if _is_linux_os(vm):
return _set_linux_user(cmd, vm, resource_group_name, username, password, ssh_key_value, no_wait)
if ssh_key_value:
raise CLIError('SSH key is not appliable on a Windows VM')
return _reset_windows_admin(cmd, vm, resource_group_name, username, password, no_wait)
def delete_user(cmd, resource_group_name, vm_name, username, no_wait=False):
vm = get_vm(cmd, resource_group_name, vm_name, 'instanceView')
if not _is_linux_os(vm):
raise CLIError('Deleting a user is not supported on Windows VM')
if no_wait:
return _update_linux_access_extension(cmd, vm, resource_group_name,
{'remove_user': username}, no_wait)
poller = _update_linux_access_extension(cmd, vm, resource_group_name,
{'remove_user': username})
return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'deleting user', 'done')(poller)
def reset_linux_ssh(cmd, resource_group_name, vm_name, no_wait=False):
vm = get_vm(cmd, resource_group_name, vm_name, 'instanceView')
if not _is_linux_os(vm):
raise CLIError('Resetting SSH is not supported in Windows VM')
if no_wait:
return _update_linux_access_extension(cmd, vm, resource_group_name,
{'reset_ssh': True}, no_wait)
poller = _update_linux_access_extension(cmd, vm, resource_group_name,
{'reset_ssh': True})
return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'resetting SSH', 'done')(poller)
# endregion
# region VirtualMachineScaleSets
def assign_vmss_identity(cmd, resource_group_name, vmss_name, assign_identity=None, identity_role='Contributor',
identity_role_id=None, identity_scope=None):
VirtualMachineScaleSetIdentity, UpgradeMode, ResourceIdentityType, VirtualMachineScaleSetUpdate = cmd.get_models(
'VirtualMachineScaleSetIdentity', 'UpgradeMode', 'ResourceIdentityType', 'VirtualMachineScaleSetUpdate')
IdentityUserAssignedIdentitiesValue = cmd.get_models('VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue')
from azure.cli.core.commands.arm import assign_identity as assign_identity_helper
client = _compute_client_factory(cmd.cli_ctx)
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identity)
def getter():
return client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
def setter(vmss, external_identities=external_identities):
if vmss.identity and vmss.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif vmss.identity and vmss.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif vmss.identity and vmss.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
vmss.identity = VirtualMachineScaleSetIdentity(type=identity_types)
if external_identities:
vmss.identity.user_assigned_identities = {}
for identity in external_identities:
vmss.identity.user_assigned_identities[identity] = IdentityUserAssignedIdentitiesValue()
vmss_patch = VirtualMachineScaleSetUpdate()
vmss_patch.identity = vmss.identity
poller = client.virtual_machine_scale_sets.update(resource_group_name, vmss_name, vmss_patch)
return LongRunningOperation(cmd.cli_ctx)(poller)
assign_identity_helper(cmd.cli_ctx, getter, setter, identity_role=identity_role_id, identity_scope=identity_scope)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
if vmss.upgrade_policy.mode == UpgradeMode.manual:
logger.warning("With manual upgrade mode, you will need to run 'az vmss update-instances -g %s -n %s "
"--instance-ids *' to propagate the change", resource_group_name, vmss_name)
return _construct_identity_info(identity_scope, identity_role, vmss.identity.principal_id,
vmss.identity.user_assigned_identities)
# pylint: disable=too-many-locals, too-many-statements
def create_vmss(cmd, vmss_name, resource_group_name, image=None,
disable_overprovision=False, instance_count=2,
location=None, tags=None, upgrade_policy_mode='manual', validate=False,
admin_username=None, admin_password=None, authentication_type=None,
vm_sku=None, no_wait=False,
ssh_dest_key_path=None, ssh_key_value=None, generate_ssh_keys=False,
load_balancer=None, load_balancer_sku=None, application_gateway=None,
app_gateway_subnet_address_prefix=None,
app_gateway_sku='Standard_Large', app_gateway_capacity=10,
backend_pool_name=None, nat_pool_name=None, backend_port=None, health_probe=None,
public_ip_address=None, public_ip_address_allocation=None,
public_ip_address_dns_name=None, accelerated_networking=None,
public_ip_per_vm=False, vm_domain_name=None, dns_servers=None, nsg=None,
os_caching=None, data_caching=None,
storage_container_name='vhds', storage_sku=None,
os_type=None, os_disk_name=None,
use_unmanaged_disk=False, data_disk_sizes_gb=None, disk_info=None,
vnet_name=None, vnet_address_prefix='10.0.0.0/16',
subnet=None, subnet_address_prefix=None,
os_offer=None, os_publisher=None, os_sku=None, os_version=None,
load_balancer_type=None, app_gateway_type=None, vnet_type=None,
public_ip_address_type=None, storage_profile=None,
single_placement_group=None, custom_data=None, secrets=None, platform_fault_domain_count=None,
plan_name=None, plan_product=None, plan_publisher=None, plan_promotion_code=None, license_type=None,
assign_identity=None, identity_scope=None, identity_role='Contributor',
identity_role_id=None, zones=None, priority=None, eviction_policy=None,
application_security_groups=None, ultra_ssd_enabled=None, ephemeral_os_disk=None,
proximity_placement_group=None, aux_subscriptions=None, terminate_notification_time=None,
max_price=None, computer_name_prefix=None, orchestration_mode='ScaleSetVM', scale_in_policy=None,
os_disk_encryption_set=None, data_disk_encryption_sets=None, data_disk_iops=None, data_disk_mbps=None,
automatic_repairs_grace_period=None, specialized=None, os_disk_size_gb=None, encryption_at_host=None,
host_group=None):
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.util import random_string, hash_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.vm._template_builder import (StorageProfile, build_vmss_resource,
build_vnet_resource, build_public_ip_resource,
build_load_balancer_resource,
build_vmss_storage_account_pool_resource,
build_application_gateway_resource,
build_msi_role_assignment, build_nsg_resource)
# Build up the ARM template
master_template = ArmTemplateBuilder()
scale_set_vm_str = 'ScaleSetVM'
vm_str = 'VM'
if orchestration_mode.lower() == scale_set_vm_str.lower():
from msrestazure.tools import resource_id, is_valid_resource_id
storage_sku = disk_info['os'].get('storageAccountType')
subscription_id = get_subscription_id(cmd.cli_ctx)
if os_disk_encryption_set is not None and not is_valid_resource_id(os_disk_encryption_set):
os_disk_encryption_set = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=os_disk_encryption_set)
if data_disk_encryption_sets is None:
data_disk_encryption_sets = []
for i, des in enumerate(data_disk_encryption_sets):
if des is not None and not is_valid_resource_id(des):
data_disk_encryption_sets[i] = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=des)
network_id_template = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Network')
vmss_id = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachineScaleSets', name=vmss_name)
scrubbed_name = vmss_name.replace('-', '').lower()[:5]
naming_prefix = '{}{}'.format(scrubbed_name,
hash_string(vmss_id,
length=(9 - len(scrubbed_name)),
force_lower=True))
# determine final defaults and calculated values
tags = tags or {}
os_disk_name = os_disk_name or ('osdisk_{}'.format(hash_string(vmss_id, length=10))
if use_unmanaged_disk else None)
load_balancer = load_balancer or '{}LB'.format(vmss_name)
app_gateway = application_gateway or '{}AG'.format(vmss_name)
backend_pool_name = backend_pool_name or '{}BEPool'.format(load_balancer or application_gateway)
vmss_dependencies = []
# VNET will always be a dependency
if vnet_type == 'new':
vnet_name = vnet_name or '{}VNET'.format(vmss_name)
subnet = subnet or '{}Subnet'.format(vmss_name)
vmss_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name))
vnet = build_vnet_resource(
cmd, vnet_name, location, tags, vnet_address_prefix, subnet, subnet_address_prefix)
if app_gateway_type:
vnet['properties']['subnets'].append({
'name': 'appGwSubnet',
'properties': {
'addressPrefix': app_gateway_subnet_address_prefix
}
})
master_template.add_resource(vnet)
subnet_id = subnet if is_valid_resource_id(subnet) else \
'{}/virtualNetworks/{}/subnets/{}'.format(network_id_template, vnet_name, subnet)
gateway_subnet_id = ('{}/virtualNetworks/{}/subnets/appGwSubnet'.format(network_id_template, vnet_name)
if app_gateway_type == 'new' else None)
# public IP is used by either load balancer/application gateway
public_ip_address_id = None
if public_ip_address:
public_ip_address_id = (public_ip_address if is_valid_resource_id(public_ip_address)
else '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address))
def _get_public_ip_address_allocation(value, sku):
IPAllocationMethod = cmd.get_models('IPAllocationMethod', resource_type=ResourceType.MGMT_NETWORK)
if not value:
value = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
return value
# Handle load balancer creation
if load_balancer_type == 'new':
vmss_dependencies.append('Microsoft.Network/loadBalancers/{}'.format(load_balancer))
lb_dependencies = []
if vnet_type == 'new':
lb_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name))
if public_ip_address_type == 'new':
public_ip_address = public_ip_address or '{}PublicIP'.format(load_balancer)
lb_dependencies.append(
'Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
master_template.add_resource(build_public_ip_resource(
cmd, public_ip_address, location, tags,
_get_public_ip_address_allocation(public_ip_address_allocation, load_balancer_sku),
public_ip_address_dns_name, load_balancer_sku, zones))
public_ip_address_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
# calculate default names if not provided
nat_pool_name = nat_pool_name or '{}NatPool'.format(load_balancer)
if not backend_port:
backend_port = 3389 if os_type == 'windows' else 22
lb_resource = build_load_balancer_resource(
cmd, load_balancer, location, tags, backend_pool_name, nat_pool_name, backend_port,
'loadBalancerFrontEnd', public_ip_address_id, subnet_id, private_ip_address='',
private_ip_allocation='Dynamic', sku=load_balancer_sku, instance_count=instance_count,
disable_overprovision=disable_overprovision)
lb_resource['dependsOn'] = lb_dependencies
master_template.add_resource(lb_resource)
# Per https://docs.microsoft.com/azure/load-balancer/load-balancer-standard-overview#nsg
if load_balancer_sku and load_balancer_sku.lower() == 'standard' and nsg is None:
nsg_name = '{}NSG'.format(vmss_name)
master_template.add_resource(build_nsg_resource(
None, nsg_name, location, tags, 'rdp' if os_type.lower() == 'windows' else 'ssh'))
nsg = "[resourceId('Microsoft.Network/networkSecurityGroups', '{}')]".format(nsg_name)
vmss_dependencies.append('Microsoft.Network/networkSecurityGroups/{}'.format(nsg_name))
# Or handle application gateway creation
if app_gateway_type == 'new':
vmss_dependencies.append('Microsoft.Network/applicationGateways/{}'.format(app_gateway))
ag_dependencies = []
if vnet_type == 'new':
ag_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name))
if public_ip_address_type == 'new':
public_ip_address = public_ip_address or '{}PublicIP'.format(app_gateway)
ag_dependencies.append(
'Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
master_template.add_resource(build_public_ip_resource(
cmd, public_ip_address, location, tags,
_get_public_ip_address_allocation(public_ip_address_allocation, None), public_ip_address_dns_name,
None, zones))
public_ip_address_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
# calculate default names if not provided
backend_port = backend_port or 80
ag_resource = build_application_gateway_resource(
cmd, app_gateway, location, tags, backend_pool_name, backend_port, 'appGwFrontendIP',
public_ip_address_id, subnet_id, gateway_subnet_id, private_ip_address='',
private_ip_allocation='Dynamic', sku=app_gateway_sku, capacity=app_gateway_capacity)
ag_resource['dependsOn'] = ag_dependencies
master_template.add_variable(
'appGwID',
"[resourceId('Microsoft.Network/applicationGateways', '{}')]".format(app_gateway))
master_template.add_resource(ag_resource)
# create storage accounts if needed for unmanaged disk storage
if storage_profile == StorageProfile.SAPirImage:
master_template.add_resource(build_vmss_storage_account_pool_resource(
cmd, 'storageLoop', location, tags, storage_sku))
master_template.add_variable('storageAccountNames', [
'{}{}'.format(naming_prefix, x) for x in range(5)
])
master_template.add_variable('vhdContainers', [
"[concat('https://', variables('storageAccountNames')[{}], '.blob.{}/{}')]".format(
x, cmd.cli_ctx.cloud.suffixes.storage_endpoint, storage_container_name) for x in range(5)
])
vmss_dependencies.append('storageLoop')
backend_address_pool_id = None
inbound_nat_pool_id = None
if load_balancer_type or app_gateway_type:
network_balancer = load_balancer if load_balancer_type else app_gateway
balancer_type = 'loadBalancers' if load_balancer_type else 'applicationGateways'
if is_valid_resource_id(network_balancer):
# backend address pool needed by load balancer or app gateway
backend_address_pool_id = '{}/backendAddressPools/{}'.format(network_balancer, backend_pool_name)
if nat_pool_name:
inbound_nat_pool_id = '{}/inboundNatPools/{}'.format(network_balancer, nat_pool_name)
else:
# backend address pool needed by load balancer or app gateway
backend_address_pool_id = '{}/{}/{}/backendAddressPools/{}'.format(
network_id_template, balancer_type, network_balancer, backend_pool_name)
if nat_pool_name:
inbound_nat_pool_id = '{}/{}/{}/inboundNatPools/{}'.format(
network_id_template, balancer_type, network_balancer, nat_pool_name)
if health_probe and not is_valid_resource_id(health_probe):
health_probe = '{}/loadBalancers/{}/probes/{}'.format(network_id_template, load_balancer, health_probe)
ip_config_name = '{}IPConfig'.format(naming_prefix)
nic_name = '{}Nic'.format(naming_prefix)
if custom_data:
custom_data = read_content_if_is_file(custom_data)
if secrets:
secrets = _merge_secrets([validate_file_or_dict(secret) for secret in secrets])
if computer_name_prefix is not None and isinstance(computer_name_prefix, str):
naming_prefix = computer_name_prefix
if os_version and os_version != 'latest':
logger.warning('You are deploying VMSS pinned to a specific image version from Azure Marketplace. '
'Consider using "latest" as the image version.')
vmss_resource = build_vmss_resource(
cmd=cmd, name=vmss_name, naming_prefix=naming_prefix, location=location, tags=tags,
overprovision=not disable_overprovision, upgrade_policy_mode=upgrade_policy_mode, vm_sku=vm_sku,
instance_count=instance_count, ip_config_name=ip_config_name, nic_name=nic_name, subnet_id=subnet_id,
public_ip_per_vm=public_ip_per_vm, vm_domain_name=vm_domain_name, dns_servers=dns_servers, nsg=nsg,
accelerated_networking=accelerated_networking, admin_username=admin_username,
authentication_type=authentication_type, storage_profile=storage_profile, os_disk_name=os_disk_name,
disk_info=disk_info, os_type=os_type, image=image, admin_password=admin_password,
ssh_key_values=ssh_key_value, ssh_key_path=ssh_dest_key_path, os_publisher=os_publisher, os_offer=os_offer,
os_sku=os_sku, os_version=os_version, backend_address_pool_id=backend_address_pool_id,
inbound_nat_pool_id=inbound_nat_pool_id, health_probe=health_probe,
single_placement_group=single_placement_group, platform_fault_domain_count=platform_fault_domain_count,
custom_data=custom_data, secrets=secrets, license_type=license_type, zones=zones, priority=priority,
eviction_policy=eviction_policy, application_security_groups=application_security_groups,
ultra_ssd_enabled=ultra_ssd_enabled, proximity_placement_group=proximity_placement_group,
terminate_notification_time=terminate_notification_time, max_price=max_price,
scale_in_policy=scale_in_policy, os_disk_encryption_set=os_disk_encryption_set,
data_disk_encryption_sets=data_disk_encryption_sets, data_disk_iops=data_disk_iops,
data_disk_mbps=data_disk_mbps, automatic_repairs_grace_period=automatic_repairs_grace_period,
specialized=specialized, os_disk_size_gb=os_disk_size_gb, encryption_at_host=encryption_at_host,
host_group=host_group)
vmss_resource['dependsOn'] = vmss_dependencies
if plan_name:
vmss_resource['plan'] = {
'name': plan_name,
'publisher': plan_publisher,
'product': plan_product,
'promotionCode': plan_promotion_code
}
enable_local_identity = None
if assign_identity is not None:
vmss_resource['identity'], _, _, enable_local_identity = _build_identities_info(
assign_identity)
if identity_scope:
role_assignment_guid = str(_gen_guid())
master_template.add_resource(build_msi_role_assignment(vmss_name, vmss_id, identity_role_id,
role_assignment_guid, identity_scope, False))
elif orchestration_mode.lower() == vm_str.lower():
if platform_fault_domain_count is None:
raise CLIError("usage error: --platform-fault-domain-count is required in VM mode")
vmss_resource = {
'type': 'Microsoft.Compute/virtualMachineScaleSets',
'name': vmss_name,
'location': location,
'tags': tags,
'apiVersion': cmd.get_api_version(ResourceType.MGMT_COMPUTE, operation_group='virtual_machine_scale_sets'),
'properties': {
'singlePlacementGroup': single_placement_group,
'provisioningState': 0,
'platformFaultDomainCount': platform_fault_domain_count
}
}
if zones is not None:
vmss_resource['zones'] = zones
if proximity_placement_group is not None:
vmss_resource['properties']['proximityPlacementGroup'] = {
'id': proximity_placement_group
}
else:
raise CLIError('usage error: --orchestration-mode (ScaleSet | VM)')
master_template.add_resource(vmss_resource)
master_template.add_output('VMSS', vmss_name, 'Microsoft.Compute', 'virtualMachineScaleSets',
output_type='object')
if orchestration_mode.lower() == scale_set_vm_str.lower() and admin_password:
master_template.add_secure_parameter('adminPassword', admin_password)
template = master_template.build()
parameters = master_template.build_parameters()
# deploy ARM template
deployment_name = 'vmss_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
aux_subscriptions=aux_subscriptions).deployments
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
if validate:
from azure.cli.command_modules.vm._vm_utils import log_pprint_template
log_pprint_template(template)
log_pprint_template(parameters)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
validation_poller = client.validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
# creates the VMSS deployment
deployment_result = DeploymentOutputLongRunningOperation(cmd.cli_ctx)(
sdk_no_wait(no_wait, client.create_or_update, resource_group_name, deployment_name, deployment))
else:
if validate:
return client.validate(resource_group_name, deployment_name, properties)
# creates the VMSS deployment
deployment_result = DeploymentOutputLongRunningOperation(cmd.cli_ctx)(
sdk_no_wait(no_wait, client.create_or_update, resource_group_name, deployment_name, properties))
if orchestration_mode.lower() == scale_set_vm_str.lower() and assign_identity is not None:
vmss_info = get_vmss(cmd, resource_group_name, vmss_name)
if enable_local_identity and not identity_scope:
_show_missing_access_warning(resource_group_name, vmss_name, 'vmss')
deployment_result['vmss']['identity'] = _construct_identity_info(identity_scope, identity_role,
vmss_info.identity.principal_id,
vmss_info.identity.user_assigned_identities)
return deployment_result
def _build_identities_info(identities):
from ._vm_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def deallocate_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
if instance_ids and len(instance_ids) == 1:
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.deallocate,
resource_group_name, vm_scale_set_name, instance_ids[0])
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.deallocate,
resource_group_name, vm_scale_set_name, instance_ids=instance_ids)
def delete_vmss_instances(cmd, resource_group_name, vm_scale_set_name, instance_ids, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
if len(instance_ids) == 1:
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.delete,
resource_group_name, vm_scale_set_name, instance_ids[0])
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.delete_instances,
resource_group_name, vm_scale_set_name, instance_ids)
def get_vmss(cmd, resource_group_name, name, instance_id=None):
client = _compute_client_factory(cmd.cli_ctx)
if instance_id is not None:
return client.virtual_machine_scale_set_vms.get(resource_group_name, name, instance_id)
return client.virtual_machine_scale_sets.get(resource_group_name, name)
def get_vmss_instance_view(cmd, resource_group_name, vm_scale_set_name, instance_id=None):
client = _compute_client_factory(cmd.cli_ctx)
if instance_id:
if instance_id == '*':
return [x.instance_view for x in (client.virtual_machine_scale_set_vms.list(
resource_group_name, vm_scale_set_name, select='instanceView', expand='instanceView'))]
return client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name, vm_scale_set_name,
instance_id)
return client.virtual_machine_scale_sets.get_instance_view(resource_group_name, vm_scale_set_name)
def list_vmss(cmd, resource_group_name=None):
client = _compute_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.virtual_machine_scale_sets.list(resource_group_name)
return client.virtual_machine_scale_sets.list_all()
def list_vmss_instance_connection_info(cmd, resource_group_name, vm_scale_set_name):
from msrestazure.tools import parse_resource_id
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vm_scale_set_name)
# find the load balancer
nic_configs = vmss.virtual_machine_profile.network_profile.network_interface_configurations
primary_nic_config = next((n for n in nic_configs if n.primary), None)
if primary_nic_config is None:
raise CLIError('could not find a primary NIC which is needed to search to load balancer')
ip_configs = primary_nic_config.ip_configurations
ip_config = next((ip for ip in ip_configs if ip.load_balancer_inbound_nat_pools), None)
if not ip_config:
raise CLIError('No load balancer exists to retrieve public IP address')
res_id = ip_config.load_balancer_inbound_nat_pools[0].id
lb_info = parse_resource_id(res_id)
lb_name = lb_info['name']
lb_rg = lb_info['resource_group']
# get public ip
network_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK)
lb = network_client.load_balancers.get(lb_rg, lb_name)
if getattr(lb.frontend_ip_configurations[0], 'public_ip_address', None):
res_id = lb.frontend_ip_configurations[0].public_ip_address.id
public_ip_info = parse_resource_id(res_id)
public_ip_name = public_ip_info['name']
public_ip_rg = public_ip_info['resource_group']
public_ip = network_client.public_ip_addresses.get(public_ip_rg, public_ip_name)
public_ip_address = public_ip.ip_address
# loop around inboundnatrule
instance_addresses = {}
for rule in lb.inbound_nat_rules:
instance_id = parse_resource_id(rule.backend_ip_configuration.id)['child_name_1']
instance_addresses['instance ' + instance_id] = '{}:{}'.format(public_ip_address,
rule.frontend_port)
return instance_addresses
raise CLIError('The VM scale-set uses an internal load balancer, hence no connection information')
def list_vmss_instance_public_ips(cmd, resource_group_name, vm_scale_set_name):
result = cf_public_ip_addresses(cmd.cli_ctx).list_virtual_machine_scale_set_public_ip_addresses(
resource_group_name, vm_scale_set_name)
# filter away over-provisioned instances which are deleted after 'create/update' returns
return [r for r in result if r.ip_address]
def reimage_vmss(cmd, resource_group_name, vm_scale_set_name, instance_id=None, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
if instance_id:
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.reimage,
resource_group_name, vm_scale_set_name, instance_id)
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.reimage, resource_group_name, vm_scale_set_name)
def restart_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
if instance_ids and len(instance_ids) == 1:
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.restart,
resource_group_name, vm_scale_set_name, instance_ids[0])
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.restart, resource_group_name, vm_scale_set_name,
instance_ids=instance_ids)
# pylint: disable=inconsistent-return-statements
def scale_vmss(cmd, resource_group_name, vm_scale_set_name, new_capacity, no_wait=False):
VirtualMachineScaleSet = cmd.get_models('VirtualMachineScaleSet')
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vm_scale_set_name)
# pylint: disable=no-member
if vmss.sku.capacity == new_capacity:
return
vmss.sku.capacity = new_capacity
vmss_new = VirtualMachineScaleSet(location=vmss.location, sku=vmss.sku)
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.create_or_update,
resource_group_name, vm_scale_set_name, vmss_new)
def start_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
if instance_ids and len(instance_ids) == 1:
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.start,
resource_group_name, vm_scale_set_name, instance_ids[0])
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.start,
resource_group_name, vm_scale_set_name, instance_ids=instance_ids)
def stop_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False, skip_shutdown=False):
client = _compute_client_factory(cmd.cli_ctx)
if instance_ids and len(instance_ids) == 1:
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.power_off, resource_group_name,
vm_scale_set_name, instance_id=instance_ids[0], skip_shutdown=skip_shutdown)
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.power_off, resource_group_name, vm_scale_set_name,
instance_ids=instance_ids, skip_shutdown=skip_shutdown)
def update_vmss_instances(cmd, resource_group_name, vm_scale_set_name, instance_ids, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.update_instances,
resource_group_name, vm_scale_set_name, instance_ids)
def update_vmss(cmd, resource_group_name, name, license_type=None, no_wait=False, instance_id=None,
protect_from_scale_in=None, protect_from_scale_set_actions=None,
enable_terminate_notification=None, terminate_notification_time=None, ultra_ssd_enabled=None,
scale_in_policy=None, priority=None, max_price=None, proximity_placement_group=None,
enable_automatic_repairs=None, automatic_repairs_grace_period=None, **kwargs):
vmss = kwargs['parameters']
aux_subscriptions = None
# pylint: disable=too-many-boolean-expressions
if vmss and hasattr(vmss, 'virtual_machine_profile') and vmss.virtual_machine_profile and \
vmss.virtual_machine_profile.storage_profile and \
vmss.virtual_machine_profile.storage_profile.image_reference and \
vmss.virtual_machine_profile.storage_profile.image_reference.id:
aux_subscriptions = _parse_aux_subscriptions(vmss.virtual_machine_profile.storage_profile.image_reference.id)
client = _compute_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions)
VMProtectionPolicy = cmd.get_models('VirtualMachineScaleSetVMProtectionPolicy')
# handle vmss instance update
if instance_id is not None:
if license_type is not None:
vmss.license_type = license_type
if not vmss.protection_policy:
vmss.protection_policy = VMProtectionPolicy()
if protect_from_scale_in is not None:
vmss.protection_policy.protect_from_scale_in = protect_from_scale_in
if protect_from_scale_set_actions is not None:
vmss.protection_policy.protect_from_scale_set_actions = protect_from_scale_set_actions
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.update,
resource_group_name, name, instance_id, **kwargs)
# else handle vmss update
if license_type is not None:
vmss.virtual_machine_profile.license_type = license_type
if enable_terminate_notification is not None or terminate_notification_time is not None:
if vmss.virtual_machine_profile.scheduled_events_profile is None:
ScheduledEventsProfile = cmd.get_models('ScheduledEventsProfile')
vmss.virtual_machine_profile.scheduled_events_profile = ScheduledEventsProfile()
TerminateNotificationProfile = cmd.get_models('TerminateNotificationProfile')
vmss.virtual_machine_profile.scheduled_events_profile.terminate_notification_profile =\
TerminateNotificationProfile(not_before_timeout=terminate_notification_time,
enable=enable_terminate_notification)
if enable_automatic_repairs is not None or automatic_repairs_grace_period is not None:
AutomaticRepairsPolicy = cmd.get_models('AutomaticRepairsPolicy')
vmss.automatic_repairs_policy = \
AutomaticRepairsPolicy(enabled="true", grace_period=automatic_repairs_grace_period)
if ultra_ssd_enabled is not None:
if cmd.supported_api_version(min_api='2019-03-01', operation_group='virtual_machine_scale_sets'):
if vmss.additional_capabilities is None:
AdditionalCapabilities = cmd.get_models('AdditionalCapabilities')
vmss.additional_capabilities = AdditionalCapabilities(ultra_ssd_enabled=ultra_ssd_enabled)
else:
vmss.additional_capabilities.ultra_ssd_enabled = ultra_ssd_enabled
else:
if vmss.virtual_machine_profile.additional_capabilities is None:
AdditionalCapabilities = cmd.get_models('AdditionalCapabilities')
vmss.virtual_machine_profile.additional_capabilities = AdditionalCapabilities(
ultra_ssd_enabled=ultra_ssd_enabled)
else:
vmss.virtual_machine_profile.additional_capabilities.ultra_ssd_enabled = ultra_ssd_enabled
if scale_in_policy is not None:
ScaleInPolicy = cmd.get_models('ScaleInPolicy')
vmss.scale_in_policy = ScaleInPolicy(rules=scale_in_policy)
if priority is not None:
vmss.virtual_machine_profile.priority = priority
if max_price is not None:
if vmss.virtual_machine_profile.billing_profile is None:
BillingProfile = cmd.get_models('BillingProfile')
vmss.virtual_machine_profile.billing_profile = BillingProfile(max_price=max_price)
else:
vmss.virtual_machine_profile.billing_profile.max_price = max_price
if proximity_placement_group is not None:
vmss.proximity_placement_group = {'id': proximity_placement_group}
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.create_or_update,
resource_group_name, name, **kwargs)
# endregion
# region VirtualMachineScaleSets Diagnostics
def set_vmss_diagnostics_extension(
cmd, resource_group_name, vmss_name, settings, protected_settings=None, version=None,
no_auto_upgrade=False):
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
# pylint: disable=no-member
is_linux_os = _is_linux_os(vmss.virtual_machine_profile)
vm_extension_name = _LINUX_DIAG_EXT if is_linux_os else _WINDOWS_DIAG_EXT
if is_linux_os and vmss.virtual_machine_profile.extension_profile: # check incompatibles
exts = vmss.virtual_machine_profile.extension_profile.extensions or []
major_ver = extension_mappings[_LINUX_DIAG_EXT]['version'].split('.')[0]
# For VMSS, we don't do auto-removal like VM because there is no reliable API to wait for
if next((e for e in exts if e.name == _LINUX_DIAG_EXT and
not e.type_handler_version.startswith(major_ver + '.')), None):
delete_cmd = 'az vmss extension delete -g {} --vmss-name {} -n {}'.format(
resource_group_name, vmss_name, vm_extension_name)
raise CLIError("There is an incompatible version of diagnostics extension installed. "
"Please remove it by running '{}', and retry. 'az vmss update-instances'"
" might be needed if with manual upgrade policy".format(delete_cmd))
poller = set_vmss_extension(cmd, resource_group_name, vmss_name, vm_extension_name,
extension_mappings[vm_extension_name]['publisher'],
version or extension_mappings[vm_extension_name]['version'],
settings,
protected_settings,
no_auto_upgrade)
result = LongRunningOperation(cmd.cli_ctx)(poller)
UpgradeMode = cmd.get_models('UpgradeMode')
if vmss.upgrade_policy.mode == UpgradeMode.manual:
poller2 = update_vmss_instances(cmd, resource_group_name, vmss_name, ['*'])
LongRunningOperation(cmd.cli_ctx)(poller2)
return result
def attach_managed_data_disk_to_vmss(cmd, resource_group_name, vmss_name, size_gb=None, instance_id=None, lun=None,
caching=None, disk=None, sku=None):
def _init_data_disk(storage_profile, lun, existing_disk=None):
data_disks = storage_profile.data_disks or []
if lun is None:
lun = _get_disk_lun(data_disks)
if existing_disk is None:
data_disk = DataDisk(lun=lun, create_option=DiskCreateOptionTypes.empty, disk_size_gb=size_gb,
caching=caching, managed_disk=ManagedDiskParameters(storage_account_type=sku))
else:
data_disk = DataDisk(lun=lun, create_option=DiskCreateOptionTypes.attach, caching=caching,
managed_disk=ManagedDiskParameters(id=existing_disk, storage_account_type=sku))
data_disks.append(data_disk)
storage_profile.data_disks = data_disks
DiskCreateOptionTypes, ManagedDiskParameters = cmd.get_models(
'DiskCreateOptionTypes', 'ManagedDiskParameters')
if disk is None:
DataDisk = cmd.get_models('VirtualMachineScaleSetDataDisk')
else:
DataDisk = cmd.get_models('DataDisk')
client = _compute_client_factory(cmd.cli_ctx)
if instance_id is None:
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
_init_data_disk(vmss.virtual_machine_profile.storage_profile, lun)
return client.virtual_machine_scale_sets.create_or_update(resource_group_name, vmss_name, vmss)
vmss_vm = client.virtual_machine_scale_set_vms.get(resource_group_name, vmss_name, instance_id)
_init_data_disk(vmss_vm.storage_profile, lun, disk)
return client.virtual_machine_scale_set_vms.update(resource_group_name, vmss_name, instance_id, vmss_vm)
def detach_disk_from_vmss(cmd, resource_group_name, vmss_name, lun, instance_id=None):
client = _compute_client_factory(cmd.cli_ctx)
if instance_id is None:
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
data_disks = vmss.virtual_machine_profile.storage_profile.data_disks
else:
vmss_vm = client.virtual_machine_scale_set_vms.get(resource_group_name, vmss_name, instance_id)
data_disks = vmss_vm.storage_profile.data_disks
if not data_disks:
raise CLIError("Data disk doesn't exist")
leftovers = [d for d in data_disks if d.lun != lun]
if len(data_disks) == len(leftovers):
raise CLIError("Could not find the data disk with lun '{}'".format(lun))
if instance_id is None:
vmss.virtual_machine_profile.storage_profile.data_disks = leftovers
return client.virtual_machine_scale_sets.create_or_update(resource_group_name, vmss_name, vmss)
vmss_vm.storage_profile.data_disks = leftovers
return client.virtual_machine_scale_set_vms.update(resource_group_name, vmss_name, instance_id, vmss_vm)
# endregion
# region VirtualMachineScaleSets Extensions
def delete_vmss_extension(cmd, resource_group_name, vmss_name, extension_name):
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
# pylint: disable=no-member
if not vmss.virtual_machine_profile.extension_profile:
raise CLIError('Scale set has no extensions to delete')
keep_list = [e for e in vmss.virtual_machine_profile.extension_profile.extensions
if e.name != extension_name]
if len(keep_list) == len(vmss.virtual_machine_profile.extension_profile.extensions):
raise CLIError('Extension {} not found'.format(extension_name))
vmss.virtual_machine_profile.extension_profile.extensions = keep_list
return client.virtual_machine_scale_sets.create_or_update(resource_group_name, vmss_name, vmss)
# pylint: disable=inconsistent-return-statements
def get_vmss_extension(cmd, resource_group_name, vmss_name, extension_name):
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
# pylint: disable=no-member
if not vmss.virtual_machine_profile.extension_profile:
return
return next((e for e in vmss.virtual_machine_profile.extension_profile.extensions
if e.name == extension_name), None)
def list_vmss_extensions(cmd, resource_group_name, vmss_name):
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
# pylint: disable=no-member
if vmss.virtual_machine_profile and vmss.virtual_machine_profile.extension_profile:
return vmss.virtual_machine_profile.extension_profile.extensions
return None
def set_vmss_extension(cmd, resource_group_name, vmss_name, extension_name, publisher, version=None,
settings=None, protected_settings=None, no_auto_upgrade=False, force_update=False,
no_wait=False, extension_instance_name=None, provision_after_extensions=None):
if not extension_instance_name:
extension_instance_name = extension_name
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = cmd.get_models(
'VirtualMachineScaleSetExtension', 'VirtualMachineScaleSetExtensionProfile')
# pylint: disable=no-member
version = _normalize_extension_version(cmd.cli_ctx, publisher, extension_name, version, vmss.location)
extension_profile = vmss.virtual_machine_profile.extension_profile
if extension_profile:
extensions = extension_profile.extensions
if extensions:
extension_profile.extensions = [x for x in extensions if
x.type1.lower() != extension_name.lower() or x.publisher.lower() != publisher.lower()] # pylint: disable=line-too-long
ext = VirtualMachineScaleSetExtension(name=extension_instance_name,
publisher=publisher,
type1=extension_name,
protected_settings=protected_settings,
type_handler_version=version,
settings=settings,
auto_upgrade_minor_version=(not no_auto_upgrade),
provision_after_extensions=provision_after_extensions)
if force_update:
ext.force_update_tag = str(_gen_guid())
if not vmss.virtual_machine_profile.extension_profile:
vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile(extensions=[])
vmss.virtual_machine_profile.extension_profile.extensions.append(ext)
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.create_or_update,
resource_group_name, vmss_name, vmss)
def set_orchestration_service_state(cmd, resource_group_name, vm_scale_set_name, service_name, action, no_wait=False):
# currently service_name has only one available value "AutomaticRepairs". And SDK does not accept service_name,
# instead SDK assign it to "AutomaticRepairs" in its own logic. As there may be more service name to be supported,
# we define service_name as a required parameter here to avoid introducing a breaking change in the future.
client = _compute_client_factory(cmd.cli_ctx)
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.set_orchestration_service_state,
resource_group_name, vm_scale_set_name, action)
# endregion
# region VirtualMachineScaleSets RunCommand
def vmss_run_command_invoke(cmd, resource_group_name, vmss_name, command_id, instance_id, scripts=None, parameters=None): # pylint: disable=line-too-long
return run_command_invoke(cmd, resource_group_name, vmss_name, command_id, scripts, parameters, instance_id)
# endregion
# region VirtualMachineScaleSets Identity
def remove_vmss_identity(cmd, resource_group_name, vmss_name, identities=None):
client = _compute_client_factory(cmd.cli_ctx)
def _get_vmss(_, resource_group_name, vmss_name):
return client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
def _set_vmss(resource_group_name, name, vmss_instance):
VirtualMachineScaleSetUpdate = cmd.get_models('VirtualMachineScaleSetUpdate',
operation_group='virtual_machine_scale_sets')
vmss_update = VirtualMachineScaleSetUpdate(identity=vmss_instance.identity)
return client.virtual_machine_scale_sets.update(resource_group_name, vmss_name, vmss_update)
if identities is None:
from ._vm_utils import MSI_LOCAL_ID
identities = [MSI_LOCAL_ID]
return _remove_identities(cmd, resource_group_name, vmss_name, identities,
_get_vmss,
_set_vmss)
# endregion
# region image galleries
def list_image_galleries(cmd, resource_group_name=None):
client = _compute_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.galleries.list_by_resource_group(resource_group_name)
return client.galleries.list()
def create_image_gallery(cmd, resource_group_name, gallery_name, description=None,
location=None, no_wait=False, tags=None):
client = _compute_client_factory(cmd.cli_ctx)
Gallery = cmd.get_models('Gallery')
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
gallery = Gallery(description=description, location=location, tags=(tags or {}))
client = _compute_client_factory(cmd.cli_ctx)
return sdk_no_wait(no_wait, client.galleries.create_or_update, resource_group_name, gallery_name, gallery)
def create_gallery_image(cmd, resource_group_name, gallery_name, gallery_image_name, os_type, publisher, offer, sku,
os_state='Generalized', end_of_life_date=None, privacy_statement_uri=None,
release_note_uri=None, eula=None, description=None, location=None,
minimum_cpu_core=None, maximum_cpu_core=None, minimum_memory=None, maximum_memory=None,
disallowed_disk_types=None, plan_name=None, plan_publisher=None, plan_product=None, tags=None,
hyper_v_generation='V1'):
# pylint: disable=line-too-long
GalleryImage, GalleryImageIdentifier, RecommendedMachineConfiguration, ResourceRange, Disallowed, ImagePurchasePlan = cmd.get_models(
'GalleryImage', 'GalleryImageIdentifier', 'RecommendedMachineConfiguration', 'ResourceRange', 'Disallowed', 'ImagePurchasePlan')
client = _compute_client_factory(cmd.cli_ctx)
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
end_of_life_date = fix_gallery_image_date_info(end_of_life_date)
recommendation = None
if any([minimum_cpu_core, maximum_cpu_core, minimum_memory, maximum_memory]):
cpu_recommendation, memory_recommendation = None, None
if any([minimum_cpu_core, maximum_cpu_core]):
cpu_recommendation = ResourceRange(min=minimum_cpu_core, max=maximum_cpu_core)
if any([minimum_memory, maximum_memory]):
memory_recommendation = ResourceRange(min=minimum_memory, max=maximum_memory)
recommendation = RecommendedMachineConfiguration(v_cp_us=cpu_recommendation, memory=memory_recommendation)
purchase_plan = None
if any([plan_name, plan_publisher, plan_product]):
purchase_plan = ImagePurchasePlan(name=plan_name, publisher=plan_publisher, product=plan_product)
image = GalleryImage(identifier=GalleryImageIdentifier(publisher=publisher, offer=offer, sku=sku),
os_type=os_type, os_state=os_state, end_of_life_date=end_of_life_date,
recommended=recommendation, disallowed=Disallowed(disk_types=disallowed_disk_types),
purchase_plan=purchase_plan, location=location, eula=eula, tags=(tags or {}),
hyper_vgeneration=hyper_v_generation)
return client.gallery_images.create_or_update(resource_group_name, gallery_name, gallery_image_name, image)
def create_image_version(cmd, resource_group_name, gallery_name, gallery_image_name, gallery_image_version,
location=None, target_regions=None, storage_account_type=None,
end_of_life_date=None, exclude_from_latest=None, replica_count=None, tags=None,
os_snapshot=None, data_snapshots=None, managed_image=None, data_snapshot_luns=None,
target_region_encryption=None):
# print(target_regions)
from msrestazure.tools import resource_id, is_valid_resource_id
ImageVersionPublishingProfile, GalleryArtifactSource, ManagedArtifact, ImageVersion, TargetRegion = cmd.get_models(
'GalleryImageVersionPublishingProfile', 'GalleryArtifactSource', 'ManagedArtifact', 'GalleryImageVersion',
'TargetRegion')
aux_subscriptions = None
if managed_image:
aux_subscriptions = _parse_aux_subscriptions(managed_image)
client = _compute_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions)
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
end_of_life_date = fix_gallery_image_date_info(end_of_life_date)
if managed_image and not is_valid_resource_id(managed_image):
managed_image = resource_id(subscription=client.config.subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='images', name=managed_image)
if os_snapshot and not is_valid_resource_id(os_snapshot):
os_snapshot = resource_id(subscription=client.config.subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='snapshots', name=os_snapshot)
if data_snapshots:
for i, s in enumerate(data_snapshots):
if not is_valid_resource_id(data_snapshots[i]):
data_snapshots[i] = resource_id(
subscription=client.config.subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='snapshots', name=s)
source = GalleryArtifactSource(managed_image=ManagedArtifact(id=managed_image))
profile = ImageVersionPublishingProfile(exclude_from_latest=exclude_from_latest, end_of_life_date=end_of_life_date,
target_regions=target_regions or [TargetRegion(name=location)],
source=source, replica_count=replica_count,
storage_account_type=storage_account_type)
if cmd.supported_api_version(min_api='2019-07-01', operation_group='gallery_image_versions'):
if managed_image is None and os_snapshot is None:
raise CLIError('usage error: Please provide --managed-image or --os-snapshot')
GalleryImageVersionStorageProfile = cmd.get_models('GalleryImageVersionStorageProfile')
GalleryArtifactVersionSource = cmd.get_models('GalleryArtifactVersionSource')
GalleryOSDiskImage = cmd.get_models('GalleryOSDiskImage')
GalleryDataDiskImage = cmd.get_models('GalleryDataDiskImage')
source = os_disk_image = data_disk_images = None
if managed_image is not None:
source = GalleryArtifactVersionSource(id=managed_image)
if os_snapshot is not None:
os_disk_image = GalleryOSDiskImage(source=GalleryArtifactVersionSource(id=os_snapshot))
if data_snapshot_luns and not data_snapshots:
raise CLIError('usage error: --data-snapshot-luns must be used together with --data-snapshots')
if data_snapshots:
if data_snapshot_luns and len(data_snapshots) != len(data_snapshot_luns):
raise CLIError('usage error: Length of --data-snapshots and --data-snapshot-luns should be equal.')
if not data_snapshot_luns:
data_snapshot_luns = [i for i in range(len(data_snapshots))]
data_disk_images = []
for i, s in enumerate(data_snapshots):
data_disk_images.append(GalleryDataDiskImage(source=GalleryArtifactVersionSource(id=s),
lun=data_snapshot_luns[i]))
storage_profile = GalleryImageVersionStorageProfile(source=source, os_disk_image=os_disk_image,
data_disk_images=data_disk_images)
image_version = ImageVersion(publishing_profile=profile, location=location, tags=(tags or {}),
storage_profile=storage_profile)
else:
if managed_image is None:
raise CLIError('usage error: Please provide --managed-image')
image_version = ImageVersion(publishing_profile=profile, location=location, tags=(tags or {}))
return client.gallery_image_versions.create_or_update(resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image_version_name=gallery_image_version,
gallery_image_version=image_version)
def fix_gallery_image_date_info(date_info):
# here we add needed time, if only date is provided, so the setting can be accepted by servie end
if date_info and 't' not in date_info.lower():
date_info += 'T12:59:59Z'
return date_info
def update_image_version(cmd, resource_group_name, gallery_name, gallery_image_name, gallery_image_version_name,
target_regions=None, replica_count=None, no_wait=False, **kwargs):
image_version = kwargs['gallery_image_version']
if target_regions:
image_version.publishing_profile.target_regions = target_regions
if replica_count:
image_version.publishing_profile.replica_count = replica_count
if image_version.storage_profile.source is not None:
image_version.storage_profile.os_disk_image = image_version.storage_profile.data_disk_images = None
aux_subscriptions = None
if image_version.storage_profile and image_version.storage_profile.source and \
image_version.storage_profile.source.id:
aux_subscriptions = _parse_aux_subscriptions(image_version.storage_profile.source.id)
client = _compute_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions)
return sdk_no_wait(no_wait, client.gallery_image_versions.create_or_update, resource_group_name, gallery_name,
gallery_image_name, gallery_image_version_name, **kwargs)
# endregion
# region proximity placement groups
def create_proximity_placement_group(cmd, client, proximity_placement_group_name, resource_group_name,
ppg_type=None, location=None, tags=None):
from knack.arguments import CaseInsensitiveList
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
ProximityPlacementGroup, PPGType = cmd.get_models('ProximityPlacementGroup', 'ProximityPlacementGroupType')
choices = CaseInsensitiveList([x.value for x in PPGType])
if ppg_type and ppg_type not in choices:
logger.info("Valid choices: %s", str(choices))
raise CLIError("Usage error: invalid value for --type/-t")
ppg_params = ProximityPlacementGroup(name=proximity_placement_group_name, proximity_placement_group_type=ppg_type,
location=location, tags=(tags or {}))
return client.create_or_update(resource_group_name=resource_group_name,
proximity_placement_group_name=proximity_placement_group_name, parameters=ppg_params)
def list_proximity_placement_groups(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list_by_subscription()
# endregion
# region dedicated host
def create_dedicated_host_group(cmd, client, host_group_name, resource_group_name, platform_fault_domain_count=None,
automatic_placement=None, location=None, zones=None, tags=None):
DedicatedHostGroup = cmd.get_models('DedicatedHostGroup')
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
host_group_params = DedicatedHostGroup(location=location, platform_fault_domain_count=platform_fault_domain_count,
support_automatic_placement=automatic_placement, zones=zones, tags=tags)
return client.create_or_update(resource_group_name, host_group_name, parameters=host_group_params)
def list_dedicated_host_groups(cmd, client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
def get_dedicated_host_group_instance_view(client, host_group_name, resource_group_name):
return client.get(resource_group_name, host_group_name, expand="instanceView")
def create_dedicated_host(cmd, client, host_group_name, host_name, resource_group_name, sku, platform_fault_domain=None,
auto_replace_on_failure=None, license_type=None, location=None, tags=None):
DedicatedHostType = cmd.get_models('DedicatedHost')
SkuType = cmd.get_models('Sku')
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
sku = SkuType(name=sku)
host_params = DedicatedHostType(location=location, platform_fault_domain=platform_fault_domain,
auto_replace_on_failure=auto_replace_on_failure, license_type=license_type,
sku=sku, tags=tags)
return client.create_or_update(resource_group_name, host_group_name, host_name, parameters=host_params)
def get_dedicated_host_instance_view(client, host_group_name, host_name, resource_group_name):
return client.get(resource_group_name, host_group_name, host_name, expand="instanceView")
# endregion
# region VMMonitor
def _get_log_analytics_client(cmd):
from ._client_factory import cf_log_analytics
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cmd.cli_ctx)
return cf_log_analytics(cmd.cli_ctx, subscription_id)
def _prepare_workspace(cmd, resource_group_name, workspace):
from msrestazure.tools import is_valid_resource_id
from msrestazure.azure_exceptions import CloudError
workspace_id = None
if not is_valid_resource_id(workspace):
workspace_name = workspace
log_client = _get_log_analytics_client(cmd)
workspace_result = None
try:
workspace_result = log_client.workspaces.get(resource_group_name, workspace_name)
except CloudError:
from azure.mgmt.loganalytics.models import Workspace, WorkspaceSku, WorkspaceSkuNameEnum
sku = WorkspaceSku(name=WorkspaceSkuNameEnum.per_gb2018.value)
retention_time = 30 # default value
location = _get_resource_group_location(cmd.cli_ctx, resource_group_name)
workspace_instance = Workspace(location=location,
sku=sku,
retention_in_days=retention_time)
workspace_result = LongRunningOperation(cmd.cli_ctx)(log_client.workspaces.create_or_update(
resource_group_name,
workspace_name,
workspace_instance))
workspace_id = workspace_result.id
else:
workspace_id = workspace
return workspace_id
def _set_data_source_for_workspace(cmd, os_type, resource_group_name, workspace_name):
from ._client_factory import cf_log_analytics_data_sources
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.mgmt.loganalytics.models import DataSource
from msrestazure.azure_exceptions import CloudError
subscription_id = get_subscription_id(cmd.cli_ctx)
data_sources_client = cf_log_analytics_data_sources(cmd.cli_ctx, subscription_id)
data_source_name_template = "DataSource_{}_{}"
default_data_sources = None
if os_type.lower() == 'linux':
from ._workspace_data_source_settings import default_linux_data_sources
default_data_sources = default_linux_data_sources
elif os_type.lower() == 'windows':
from ._workspace_data_source_settings import default_windows_data_sources
default_data_sources = default_windows_data_sources
if default_data_sources is not None:
for data_source_kind, data_source_settings in default_data_sources.items():
for data_source_setting in data_source_settings:
data_source = DataSource(kind=data_source_kind,
properties=data_source_setting)
data_source_name = data_source_name_template.format(data_source_kind, _gen_guid())
try:
data_sources_client.create_or_update(resource_group_name,
workspace_name,
data_source_name,
data_source)
except CloudError as ex:
logger.warning("Failed to set data source due to %s. "
"Skip this step and need manual work later.", ex.message)
else:
logger.warning("Unsupported OS type. Skip the default settings for log analytics workspace.")
def execute_query_for_vm(cmd, client, resource_group_name, vm_name, analytics_query, timespan=None):
from azure.loganalytics.models import QueryBody
vm = get_vm(cmd, resource_group_name, vm_name)
workspace = None
extension_resources = vm.resources or []
for resource in extension_resources:
if resource.name == "MicrosoftMonitoringAgent" or resource.name == "OmsAgentForLinux":
workspace = resource.settings.get('workspaceId', None)
if workspace is None:
raise CLIError('Cannot find the corresponding log analytics workspace. '
'Please check the status of log analytics workpsace.')
return client.query(workspace, QueryBody(query=analytics_query, timespan=timespan))
def _set_log_analytics_workspace_extension(cmd, resource_group_name, vm, vm_name, workspace_name):
is_linux_os = _is_linux_os(vm)
vm_extension_name = _LINUX_OMS_AGENT_EXT if is_linux_os else _WINDOWS_OMS_AGENT_EXT
log_client = _get_log_analytics_client(cmd)
customer_id = log_client.workspaces.get(resource_group_name, workspace_name).customer_id
settings = {
'workspaceId': customer_id,
'stopOnMultipleConnections': 'true'
}
primary_shared_key = log_client.shared_keys.get_shared_keys(resource_group_name, workspace_name).primary_shared_key
protected_settings = {
'workspaceKey': primary_shared_key,
}
return set_extension(cmd, resource_group_name, vm_name, vm_extension_name,
extension_mappings[vm_extension_name]['publisher'],
extension_mappings[vm_extension_name]['version'],
settings,
protected_settings)
# endregion
# disk encryption set
def create_disk_encryption_set(cmd, client, resource_group_name, disk_encryption_set_name,
key_url, source_vault, encryption_type=None, location=None, tags=None, no_wait=False):
from msrestazure.tools import resource_id, is_valid_resource_id
DiskEncryptionSet, EncryptionSetIdentity, KeyVaultAndKeyReference, SourceVault = cmd.get_models(
'DiskEncryptionSet', 'EncryptionSetIdentity', 'KeyVaultAndKeyReference', 'SourceVault')
encryption_set_identity = EncryptionSetIdentity(type='SystemAssigned')
if not is_valid_resource_id(source_vault):
source_vault = resource_id(subscription=client.config.subscription_id, resource_group=resource_group_name,
namespace='Microsoft.KeyVault', type='vaults', name=source_vault)
source_vault = SourceVault(id=source_vault)
keyVault_and_key_reference = KeyVaultAndKeyReference(source_vault=source_vault, key_url=key_url)
disk_encryption_set = DiskEncryptionSet(location=location, tags=tags, identity=encryption_set_identity,
active_key=keyVault_and_key_reference, encryption_type=encryption_type)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, disk_encryption_set_name,
disk_encryption_set)
def list_disk_encryption_sets(cmd, client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def update_disk_encryption_set(instance, client, resource_group_name, key_url=None, source_vault=None):
from msrestazure.tools import resource_id, is_valid_resource_id
if not is_valid_resource_id(source_vault):
source_vault = resource_id(subscription=client.config.subscription_id, resource_group=resource_group_name,
namespace='Microsoft.KeyVault', type='vaults', name=source_vault)
if key_url:
instance.active_key.key_url = key_url
if source_vault:
instance.active_key.source_vault.id = source_vault
return instance
# endregion
# region Disk Access
def create_disk_access(cmd, client, resource_group_name, disk_access_name, location=None, tags=None, no_wait=False):
return sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, disk_access_name,
location=location, tags=tags)
def list_disk_accesses(cmd, client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def set_disk_access(cmd, client, parameters, resource_group_name, disk_access_name, tags=None, no_wait=False):
location = _get_resource_group_location(cmd.cli_ctx, resource_group_name)
return sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, disk_access_name,
location=location, tags=tags)
# endregion
| true | true |
f72d89f927ae8b3502ffbda7f60181ea25a41dcd | 9,240 | py | Python | run_align.py | gitlost-murali/awesome-align | 39fb45ca85a98e005447bddb52c48e65ce7d399b | [
"BSD-3-Clause"
] | null | null | null | run_align.py | gitlost-murali/awesome-align | 39fb45ca85a98e005447bddb52c48e65ce7d399b | [
"BSD-3-Clause"
] | null | null | null | run_align.py | gitlost-murali/awesome-align | 39fb45ca85a98e005447bddb52c48e65ce7d399b | [
"BSD-3-Clause"
] | null | null | null | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Modifications copyright (C) 2020 Zi-Yi Dou
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import random
import itertools
import os
import numpy as np
import torch
from tqdm import trange
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, SequentialSampler
import modeling
from configuration_bert import BertConfig
from modeling import BertForMaskedLM
from tokenization_bert import BertTokenizer
from tokenization_utils import PreTrainedTokenizer
from modeling_utils import PreTrainedModel
def set_seed(args):
if args.seed >= 0:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
class LineByLineTextDataset(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path):
assert os.path.isfile(file_path)
print('Loading the dataset...')
self.examples = []
with open(file_path, encoding="utf-8") as f:
for idx, line in enumerate(f.readlines()):
if len(line) == 0 or line.isspace() or not len(line.split(' ||| ')) == 2:
raise ValueError(f'Line {idx+1} is not in the correct format!')
src, tgt = line.split(' ||| ')
if src.rstrip() == '' or tgt.rstrip() == '':
raise ValueError(f'Line {idx+1} is not in the correct format!')
sent_src, sent_tgt = src.strip().split(), tgt.strip().split()
token_src, token_tgt = [tokenizer.tokenize(word) for word in sent_src], [tokenizer.tokenize(word) for word in sent_tgt]
wid_src, wid_tgt = [tokenizer.convert_tokens_to_ids(x) for x in token_src], [tokenizer.convert_tokens_to_ids(x) for x in token_tgt]
ids_src, ids_tgt = tokenizer.prepare_for_model(list(itertools.chain(*wid_src)), return_tensors='pt', max_length=tokenizer.max_len)['input_ids'], tokenizer.prepare_for_model(list(itertools.chain(*wid_tgt)), return_tensors='pt', max_length=tokenizer.max_len)['input_ids']
bpe2word_map_src = []
for i, word_list in enumerate(token_src):
bpe2word_map_src += [i for x in word_list]
bpe2word_map_tgt = []
for i, word_list in enumerate(token_tgt):
bpe2word_map_tgt += [i for x in word_list]
self.examples.append( (ids_src[0], ids_tgt[0], bpe2word_map_src, bpe2word_map_tgt) )
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return self.examples[i]
def word_align(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, output_word_alignments = False):
def collate(examples):
ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt = zip(*examples)
ids_src = pad_sequence(ids_src, batch_first=True, padding_value=tokenizer.pad_token_id)
ids_tgt = pad_sequence(ids_tgt, batch_first=True, padding_value=tokenizer.pad_token_id)
return ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt
dataset = LineByLineTextDataset(tokenizer, args, file_path=args.data_file)
sampler = SequentialSampler(dataset)
dataloader = DataLoader(
dataset, sampler=sampler, batch_size=args.batch_size, collate_fn=collate
)
model.to(args.device)
model.eval()
tqdm_iterator = trange(dataset.__len__(), desc="Extracting")
with open(args.output_file, 'w') as writer:
for batch in dataloader:
with torch.no_grad():
ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt = batch
word_aligns_list = model.get_aligned_word(ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt, args.device, 0, 0, align_layer=args.align_layer, extraction=args.extraction, softmax_threshold=args.softmax_threshold, test=True)
for word_aligns in word_aligns_list:
output_str = []
for word_align in word_aligns:
output_str.append(f'{word_align[0]}-{word_align[1]}')
writer.write(' '.join(output_str)+'\n')
tqdm_iterator.update(len(ids_src))
if output_word_alignments:
with open(args.output_file, 'r') as fh:
outputf = (fh.read()).split("\n")
with open(args.data_file, 'r') as fh:
datalines = (fh.read()).split("\n")
with open(args.output_file+".outtxt", 'w') as fwriter:
for indices, line in zip(outputf, datalines):
srcline, tgtline = line.split(' ||| ')
indices = indices.split()
srcwrds = srcline.split()
tgtwrds = tgtline.split()
output_wrds = []
for wrd in indices:
srcix,tgtix = wrd.split("-")
srcix, tgtix = int(srcix), int(tgtix)
output_wrds.append(f"{srcwrds[srcix]}-{tgtwrds[tgtix]}")
fwriter.write(' '.join(output_wrds)+'\n')
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_file", default=None, type=str, required=True, help="The input data file (a text file)."
)
parser.add_argument(
"--output_file",
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--align_layer", type=int, default=8, help="layer for alignment extraction")
parser.add_argument(
"--extraction", default='softmax', type=str, help='softmax or entmax15'
)
parser.add_argument(
"--softmax_threshold", type=float, default=0.001
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
help="The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.",
)
parser.add_argument(
"--config_name",
default=None,
type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path. If both are None, initialize a new config.",
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path. If both are None, initialize a new tokenizer.",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument(
"--cache_dir",
default='cache_dir',
type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.device = device
# Set seed
set_seed(args)
config_class, model_class, tokenizer_class = BertConfig, BertForMaskedLM, BertTokenizer
if args.config_name:
config = config_class.from_pretrained(args.config_name, cache_dir=args.cache_dir)
elif args.model_name_or_path:
config = config_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)
else:
config = config_class()
if args.tokenizer_name:
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name, cache_dir=args.cache_dir)
elif args.model_name_or_path:
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)
else:
raise ValueError(
"You are instantiating a new {} tokenizer. This is not supported, but you can do it from another script, save it,"
"and load it from here, using --tokenizer_name".format(tokenizer_class.__name__)
)
modeling.PAD_ID = tokenizer.pad_token_id
modeling.CLS_ID = tokenizer.cls_token_id
modeling.SEP_ID = tokenizer.sep_token_id
if args.model_name_or_path:
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir,
)
else:
model = model_class(config=config)
word_align(args, model, tokenizer)
if __name__ == "__main__":
main()
| 42.580645 | 285 | 0.660281 |
import argparse
import random
import itertools
import os
import numpy as np
import torch
from tqdm import trange
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, SequentialSampler
import modeling
from configuration_bert import BertConfig
from modeling import BertForMaskedLM
from tokenization_bert import BertTokenizer
from tokenization_utils import PreTrainedTokenizer
from modeling_utils import PreTrainedModel
def set_seed(args):
if args.seed >= 0:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
class LineByLineTextDataset(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path):
assert os.path.isfile(file_path)
print('Loading the dataset...')
self.examples = []
with open(file_path, encoding="utf-8") as f:
for idx, line in enumerate(f.readlines()):
if len(line) == 0 or line.isspace() or not len(line.split(' ||| ')) == 2:
raise ValueError(f'Line {idx+1} is not in the correct format!')
src, tgt = line.split(' ||| ')
if src.rstrip() == '' or tgt.rstrip() == '':
raise ValueError(f'Line {idx+1} is not in the correct format!')
sent_src, sent_tgt = src.strip().split(), tgt.strip().split()
token_src, token_tgt = [tokenizer.tokenize(word) for word in sent_src], [tokenizer.tokenize(word) for word in sent_tgt]
wid_src, wid_tgt = [tokenizer.convert_tokens_to_ids(x) for x in token_src], [tokenizer.convert_tokens_to_ids(x) for x in token_tgt]
ids_src, ids_tgt = tokenizer.prepare_for_model(list(itertools.chain(*wid_src)), return_tensors='pt', max_length=tokenizer.max_len)['input_ids'], tokenizer.prepare_for_model(list(itertools.chain(*wid_tgt)), return_tensors='pt', max_length=tokenizer.max_len)['input_ids']
bpe2word_map_src = []
for i, word_list in enumerate(token_src):
bpe2word_map_src += [i for x in word_list]
bpe2word_map_tgt = []
for i, word_list in enumerate(token_tgt):
bpe2word_map_tgt += [i for x in word_list]
self.examples.append( (ids_src[0], ids_tgt[0], bpe2word_map_src, bpe2word_map_tgt) )
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return self.examples[i]
def word_align(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, output_word_alignments = False):
def collate(examples):
ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt = zip(*examples)
ids_src = pad_sequence(ids_src, batch_first=True, padding_value=tokenizer.pad_token_id)
ids_tgt = pad_sequence(ids_tgt, batch_first=True, padding_value=tokenizer.pad_token_id)
return ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt
dataset = LineByLineTextDataset(tokenizer, args, file_path=args.data_file)
sampler = SequentialSampler(dataset)
dataloader = DataLoader(
dataset, sampler=sampler, batch_size=args.batch_size, collate_fn=collate
)
model.to(args.device)
model.eval()
tqdm_iterator = trange(dataset.__len__(), desc="Extracting")
with open(args.output_file, 'w') as writer:
for batch in dataloader:
with torch.no_grad():
ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt = batch
word_aligns_list = model.get_aligned_word(ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt, args.device, 0, 0, align_layer=args.align_layer, extraction=args.extraction, softmax_threshold=args.softmax_threshold, test=True)
for word_aligns in word_aligns_list:
output_str = []
for word_align in word_aligns:
output_str.append(f'{word_align[0]}-{word_align[1]}')
writer.write(' '.join(output_str)+'\n')
tqdm_iterator.update(len(ids_src))
if output_word_alignments:
with open(args.output_file, 'r') as fh:
outputf = (fh.read()).split("\n")
with open(args.data_file, 'r') as fh:
datalines = (fh.read()).split("\n")
with open(args.output_file+".outtxt", 'w') as fwriter:
for indices, line in zip(outputf, datalines):
srcline, tgtline = line.split(' ||| ')
indices = indices.split()
srcwrds = srcline.split()
tgtwrds = tgtline.split()
output_wrds = []
for wrd in indices:
srcix,tgtix = wrd.split("-")
srcix, tgtix = int(srcix), int(tgtix)
output_wrds.append(f"{srcwrds[srcix]}-{tgtwrds[tgtix]}")
fwriter.write(' '.join(output_wrds)+'\n')
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_file", default=None, type=str, required=True, help="The input data file (a text file)."
)
parser.add_argument(
"--output_file",
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--align_layer", type=int, default=8, help="layer for alignment extraction")
parser.add_argument(
"--extraction", default='softmax', type=str, help='softmax or entmax15'
)
parser.add_argument(
"--softmax_threshold", type=float, default=0.001
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
help="The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.",
)
parser.add_argument(
"--config_name",
default=None,
type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path. If both are None, initialize a new config.",
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path. If both are None, initialize a new tokenizer.",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument(
"--cache_dir",
default='cache_dir',
type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.device = device
set_seed(args)
config_class, model_class, tokenizer_class = BertConfig, BertForMaskedLM, BertTokenizer
if args.config_name:
config = config_class.from_pretrained(args.config_name, cache_dir=args.cache_dir)
elif args.model_name_or_path:
config = config_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)
else:
config = config_class()
if args.tokenizer_name:
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name, cache_dir=args.cache_dir)
elif args.model_name_or_path:
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)
else:
raise ValueError(
"You are instantiating a new {} tokenizer. This is not supported, but you can do it from another script, save it,"
"and load it from here, using --tokenizer_name".format(tokenizer_class.__name__)
)
modeling.PAD_ID = tokenizer.pad_token_id
modeling.CLS_ID = tokenizer.cls_token_id
modeling.SEP_ID = tokenizer.sep_token_id
if args.model_name_or_path:
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir,
)
else:
model = model_class(config=config)
word_align(args, model, tokenizer)
if __name__ == "__main__":
main()
| true | true |
f72d8a16b6feb1fb6bd072d2c7ac219ebc166969 | 1,867 | py | Python | JianShuSpider/spiders/search_spider.py | xiaoshicae/Others | a5df75f1da527f94c1c79870a8f5ac7c9a7353c2 | [
"Apache-1.1"
] | null | null | null | JianShuSpider/spiders/search_spider.py | xiaoshicae/Others | a5df75f1da527f94c1c79870a8f5ac7c9a7353c2 | [
"Apache-1.1"
] | null | null | null | JianShuSpider/spiders/search_spider.py | xiaoshicae/Others | a5df75f1da527f94c1c79870a8f5ac7c9a7353c2 | [
"Apache-1.1"
] | null | null | null | # --*-- coding: utf-8 --*--
# --------------------------------------------------------------------------------
# Description:
# search_spider负责将搜索结果扔进redis队列,提供给page_spider消费
# 两步爬虫分离,实现分布式,弹性扩展
# DATE:
# 2018/02/01
# BY:
# xiaoshicae
# --------------------------------------------------------------------------------
import re
import json
from scrapy.http import Request
from jianshu.scrapy_redis.spiders import RedisSpider
from jianshu.items import PageItem
class SearchSpider(RedisSpider):
name = "SearchSpider"
redis_key = "%s:start_urls" % name
# 流程 redis_start_urls -> start_requests -> next_requests -> make_requests_from_url -> parse
# 改写start_requests会影响next_requests(向redis请求任务),因此改写make_requests_from_url,修改获取url后的处理逻辑
# dont_filter=False就会进入dupefilter进行去重判断
def make_requests_from_url(self, url):
# return Request(url, dont_filter=False)
return Request(url, method='POST', dont_filter=True)
def parse(self, response):
item = PageItem()
json_resp = json.loads(response.text)
total_page = json_resp.get('total_count', 1)
search_word = json_resp.get('q', '')
json_resp = json.loads(response.text)
entries = json_resp.get('entries', [])
for entry in entries:
item['slug'] = entry.get('slug', '')
yield item
for i in range(2, total_page + 1):
url = 'https://www.jianshu.com/search/do?q=%s&type=note&page=%d&order_by=default' % (search_word, i)
yield Request(url, method='POST', callback=self.parse_entries)
def parse_entries(self, response):
item = PageItem()
json_resp = json.loads(response.text)
entries = json_resp.get('entries', [])
for entry in entries:
item['slug'] = entry.get('slug', '')
yield item
| 33.945455 | 112 | 0.587574 |
import re
import json
from scrapy.http import Request
from jianshu.scrapy_redis.spiders import RedisSpider
from jianshu.items import PageItem
class SearchSpider(RedisSpider):
name = "SearchSpider"
redis_key = "%s:start_urls" % name
def make_requests_from_url(self, url):
return Request(url, method='POST', dont_filter=True)
def parse(self, response):
item = PageItem()
json_resp = json.loads(response.text)
total_page = json_resp.get('total_count', 1)
search_word = json_resp.get('q', '')
json_resp = json.loads(response.text)
entries = json_resp.get('entries', [])
for entry in entries:
item['slug'] = entry.get('slug', '')
yield item
for i in range(2, total_page + 1):
url = 'https://www.jianshu.com/search/do?q=%s&type=note&page=%d&order_by=default' % (search_word, i)
yield Request(url, method='POST', callback=self.parse_entries)
def parse_entries(self, response):
item = PageItem()
json_resp = json.loads(response.text)
entries = json_resp.get('entries', [])
for entry in entries:
item['slug'] = entry.get('slug', '')
yield item
| true | true |
f72d8a69e94886b1bc1362136e57364ff82afbb0 | 2,924 | gyp | Python | build/re2.gyp | nodenative/nodenative | cf988c9399e0793b1b8c29a8ffd09e910d1a0cb3 | [
"MIT"
] | 16 | 2016-03-16T22:16:18.000Z | 2021-04-05T04:46:38.000Z | build/re2.gyp | nodenative/nodenative | cf988c9399e0793b1b8c29a8ffd09e910d1a0cb3 | [
"MIT"
] | 11 | 2016-03-16T22:02:26.000Z | 2021-04-04T02:20:51.000Z | build/re2.gyp | nodenative/nodenative | cf988c9399e0793b1b8c29a8ffd09e910d1a0cb3 | [
"MIT"
] | 5 | 2016-03-22T14:03:34.000Z | 2021-01-06T18:08:46.000Z | {
'targets': [
{
'target_name': 're2',
'type': 'static_library',
'include_dirs': [
'../deps/re2',
],
'direct_dependent_settings': {
'include_dirs': [
'../deps/re2',
],
},
'sources': [
'../deps/re2/re2/bitmap256.h',
'../deps/re2/re2/bitstate.cc',
'../deps/re2/re2/compile.cc',
'../deps/re2/re2/dfa.cc',
'../deps/re2/re2/filtered_re2.cc',
'../deps/re2/re2/mimics_pcre.cc',
'../deps/re2/re2/nfa.cc',
'../deps/re2/re2/onepass.cc',
'../deps/re2/re2/parse.cc',
'../deps/re2/re2/perl_groups.cc',
'../deps/re2/re2/prefilter.cc',
'../deps/re2/re2/prefilter.h',
'../deps/re2/re2/prefilter_tree.cc',
'../deps/re2/re2/prefilter_tree.h',
'../deps/re2/re2/prog.cc',
'../deps/re2/re2/prog.h',
'../deps/re2/re2/re2.cc',
'../deps/re2/re2/regexp.cc',
'../deps/re2/re2/regexp.h',
'../deps/re2/re2/set.cc',
'../deps/re2/re2/simplify.cc',
'../deps/re2/re2/stringpiece.cc',
'../deps/re2/re2/tostring.cc',
'../deps/re2/re2/unicode_casefold.cc',
'../deps/re2/re2/unicode_casefold.h',
'../deps/re2/re2/unicode_groups.cc',
'../deps/re2/re2/unicode_groups.h',
'../deps/re2/re2/walker-inl.h',
'../deps/re2/util/flags.h',
'../deps/re2/util/logging.h',
'../deps/re2/util/mix.h',
'../deps/re2/util/mutex.h',
'../deps/re2/util/rune.cc',
'../deps/re2/util/sparse_array.h',
'../deps/re2/util/sparse_set.h',
'../deps/re2/util/strutil.cc',
'../deps/re2/util/strutil.h',
'../deps/re2/util/utf.h',
'../deps/re2/util/util.h',
],
'all_dependent_settings' : {
'cflags':[
'-std=c++14'
]
},
'cflags':[
'-std=c++14'
],
'conditions' : [
['OS=="mac"', {
'xcode_settings': {
'OTHER_CPLUSPLUSFLAGS' : ['-std=c++14', '-stdlib=libc++'],
},
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/CoreServices.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework'
],
},
'cflags': [
'-stdlib=libc++'
],
'all_dependent_settings': {
'xcode_settings': {
'OTHER_CPLUSPLUSFLAGS' : ['-std=c++14', '-stdlib=libc++'],
},
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/CoreServices.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework'
],
},
'cflags': [
'-stdlib=libc++'
],
},
},
]],
}
]
}
| 30.458333 | 79 | 0.461354 | {
'targets': [
{
'target_name': 're2',
'type': 'static_library',
'include_dirs': [
'../deps/re2',
],
'direct_dependent_settings': {
'include_dirs': [
'../deps/re2',
],
},
'sources': [
'../deps/re2/re2/bitmap256.h',
'../deps/re2/re2/bitstate.cc',
'../deps/re2/re2/compile.cc',
'../deps/re2/re2/dfa.cc',
'../deps/re2/re2/filtered_re2.cc',
'../deps/re2/re2/mimics_pcre.cc',
'../deps/re2/re2/nfa.cc',
'../deps/re2/re2/onepass.cc',
'../deps/re2/re2/parse.cc',
'../deps/re2/re2/perl_groups.cc',
'../deps/re2/re2/prefilter.cc',
'../deps/re2/re2/prefilter.h',
'../deps/re2/re2/prefilter_tree.cc',
'../deps/re2/re2/prefilter_tree.h',
'../deps/re2/re2/prog.cc',
'../deps/re2/re2/prog.h',
'../deps/re2/re2/re2.cc',
'../deps/re2/re2/regexp.cc',
'../deps/re2/re2/regexp.h',
'../deps/re2/re2/set.cc',
'../deps/re2/re2/simplify.cc',
'../deps/re2/re2/stringpiece.cc',
'../deps/re2/re2/tostring.cc',
'../deps/re2/re2/unicode_casefold.cc',
'../deps/re2/re2/unicode_casefold.h',
'../deps/re2/re2/unicode_groups.cc',
'../deps/re2/re2/unicode_groups.h',
'../deps/re2/re2/walker-inl.h',
'../deps/re2/util/flags.h',
'../deps/re2/util/logging.h',
'../deps/re2/util/mix.h',
'../deps/re2/util/mutex.h',
'../deps/re2/util/rune.cc',
'../deps/re2/util/sparse_array.h',
'../deps/re2/util/sparse_set.h',
'../deps/re2/util/strutil.cc',
'../deps/re2/util/strutil.h',
'../deps/re2/util/utf.h',
'../deps/re2/util/util.h',
],
'all_dependent_settings' : {
'cflags':[
'-std=c++14'
]
},
'cflags':[
'-std=c++14'
],
'conditions' : [
['OS=="mac"', {
'xcode_settings': {
'OTHER_CPLUSPLUSFLAGS' : ['-std=c++14', '-stdlib=libc++'],
},
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/CoreServices.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework'
],
},
'cflags': [
'-stdlib=libc++'
],
'all_dependent_settings': {
'xcode_settings': {
'OTHER_CPLUSPLUSFLAGS' : ['-std=c++14', '-stdlib=libc++'],
},
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/CoreServices.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework'
],
},
'cflags': [
'-stdlib=libc++'
],
},
},
]],
}
]
}
| true | true |
f72d8aaa5262c683925ab0a07528a4305056d570 | 15,149 | py | Python | pkgs/ipython-1.2.1-py27_0/lib/python2.7/site-packages/IPython/core/application.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | 26 | 2018-02-14T23:52:58.000Z | 2021-08-16T13:50:03.000Z | pkgs/ipython-1.2.1-py27_0/lib/python2.7/site-packages/IPython/core/application.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/ipython-1.2.1-py27_0/lib/python2.7/site-packages/IPython/core/application.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | 10 | 2018-08-13T19:38:39.000Z | 2020-04-19T03:02:00.000Z | # encoding: utf-8
"""
An application for IPython.
All top-level applications should use the classes in this module for
handling configuration and creating componenets.
The job of an :class:`Application` is to create the master configuration
object and then create the configurable objects, passing the config to them.
Authors:
* Brian Granger
* Fernando Perez
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import atexit
import glob
import logging
import os
import shutil
import sys
from IPython.config.application import Application, catch_config_error
from IPython.config.loader import ConfigFileNotFound
from IPython.core import release, crashhandler
from IPython.core.profiledir import ProfileDir, ProfileDirError
from IPython.utils import py3compat
from IPython.utils.path import get_ipython_dir, get_ipython_package_dir
from IPython.utils.traitlets import List, Unicode, Type, Bool, Dict, Set, Instance
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Base Application Class
#-----------------------------------------------------------------------------
# aliases and flags
base_aliases = {
'profile-dir' : 'ProfileDir.location',
'profile' : 'BaseIPythonApplication.profile',
'ipython-dir' : 'BaseIPythonApplication.ipython_dir',
'log-level' : 'Application.log_level',
'config' : 'BaseIPythonApplication.extra_config_file',
}
base_flags = dict(
debug = ({'Application' : {'log_level' : logging.DEBUG}},
"set log level to logging.DEBUG (maximize logging output)"),
quiet = ({'Application' : {'log_level' : logging.CRITICAL}},
"set log level to logging.CRITICAL (minimize logging output)"),
init = ({'BaseIPythonApplication' : {
'copy_config_files' : True,
'auto_create' : True}
}, """Initialize profile with default config files. This is equivalent
to running `ipython profile create <profile>` prior to startup.
""")
)
class BaseIPythonApplication(Application):
name = Unicode(u'ipython')
description = Unicode(u'IPython: an enhanced interactive Python shell.')
version = Unicode(release.version)
aliases = Dict(base_aliases)
flags = Dict(base_flags)
classes = List([ProfileDir])
# Track whether the config_file has changed,
# because some logic happens only if we aren't using the default.
config_file_specified = Set()
config_file_name = Unicode()
def _config_file_name_default(self):
return self.name.replace('-','_') + u'_config.py'
def _config_file_name_changed(self, name, old, new):
if new != old:
self.config_file_specified.add(new)
# The directory that contains IPython's builtin profiles.
builtin_profile_dir = Unicode(
os.path.join(get_ipython_package_dir(), u'config', u'profile', u'default')
)
config_file_paths = List(Unicode)
def _config_file_paths_default(self):
return [os.getcwdu()]
extra_config_file = Unicode(config=True,
help="""Path to an extra config file to load.
If specified, load this config file in addition to any other IPython config.
""")
def _extra_config_file_changed(self, name, old, new):
try:
self.config_files.remove(old)
except ValueError:
pass
self.config_file_specified.add(new)
self.config_files.append(new)
profile = Unicode(u'default', config=True,
help="""The IPython profile to use."""
)
def _profile_changed(self, name, old, new):
self.builtin_profile_dir = os.path.join(
get_ipython_package_dir(), u'config', u'profile', new
)
ipython_dir = Unicode(get_ipython_dir(), config=True,
help="""
The name of the IPython directory. This directory is used for logging
configuration (through profiles), history storage, etc. The default
is usually $HOME/.ipython. This options can also be specified through
the environment variable IPYTHONDIR.
"""
)
_in_init_profile_dir = False
profile_dir = Instance(ProfileDir)
def _profile_dir_default(self):
# avoid recursion
if self._in_init_profile_dir:
return
# profile_dir requested early, force initialization
self.init_profile_dir()
return self.profile_dir
overwrite = Bool(False, config=True,
help="""Whether to overwrite existing config files when copying""")
auto_create = Bool(False, config=True,
help="""Whether to create profile dir if it doesn't exist""")
config_files = List(Unicode)
def _config_files_default(self):
return [self.config_file_name]
copy_config_files = Bool(False, config=True,
help="""Whether to install the default config files into the profile dir.
If a new profile is being created, and IPython contains config files for that
profile, then they will be staged into the new directory. Otherwise,
default config files will be automatically generated.
""")
verbose_crash = Bool(False, config=True,
help="""Create a massive crash report when IPython encounters what may be an
internal error. The default is to append a short message to the
usual traceback""")
# The class to use as the crash handler.
crash_handler_class = Type(crashhandler.CrashHandler)
@catch_config_error
def __init__(self, **kwargs):
super(BaseIPythonApplication, self).__init__(**kwargs)
# ensure current working directory exists
try:
directory = os.getcwdu()
except:
# raise exception
self.log.error("Current working directory doesn't exist.")
raise
# ensure even default IPYTHONDIR exists
if not os.path.exists(self.ipython_dir):
self._ipython_dir_changed('ipython_dir', self.ipython_dir, self.ipython_dir)
#-------------------------------------------------------------------------
# Various stages of Application creation
#-------------------------------------------------------------------------
def init_crash_handler(self):
"""Create a crash handler, typically setting sys.excepthook to it."""
self.crash_handler = self.crash_handler_class(self)
sys.excepthook = self.excepthook
def unset_crashhandler():
sys.excepthook = sys.__excepthook__
atexit.register(unset_crashhandler)
def excepthook(self, etype, evalue, tb):
"""this is sys.excepthook after init_crashhandler
set self.verbose_crash=True to use our full crashhandler, instead of
a regular traceback with a short message (crash_handler_lite)
"""
if self.verbose_crash:
return self.crash_handler(etype, evalue, tb)
else:
return crashhandler.crash_handler_lite(etype, evalue, tb)
def _ipython_dir_changed(self, name, old, new):
str_old = py3compat.cast_bytes_py2(os.path.abspath(old),
sys.getfilesystemencoding()
)
if str_old in sys.path:
sys.path.remove(str_old)
str_path = py3compat.cast_bytes_py2(os.path.abspath(new),
sys.getfilesystemencoding()
)
sys.path.append(str_path)
if not os.path.isdir(new):
os.makedirs(new, mode=0o777)
readme = os.path.join(new, 'README')
if not os.path.exists(readme):
path = os.path.join(get_ipython_package_dir(), u'config', u'profile')
shutil.copy(os.path.join(path, 'README'), readme)
self.log.debug("IPYTHONDIR set to: %s" % new)
def load_config_file(self, suppress_errors=True):
"""Load the config file.
By default, errors in loading config are handled, and a warning
printed on screen. For testing, the suppress_errors option is set
to False, so errors will make tests fail.
"""
self.log.debug("Searching path %s for config files", self.config_file_paths)
base_config = 'ipython_config.py'
self.log.debug("Attempting to load config file: %s" %
base_config)
try:
Application.load_config_file(
self,
base_config,
path=self.config_file_paths
)
except ConfigFileNotFound:
# ignore errors loading parent
self.log.debug("Config file %s not found", base_config)
pass
for config_file_name in self.config_files:
if not config_file_name or config_file_name == base_config:
continue
self.log.debug("Attempting to load config file: %s" %
self.config_file_name)
try:
Application.load_config_file(
self,
config_file_name,
path=self.config_file_paths
)
except ConfigFileNotFound:
# Only warn if the default config file was NOT being used.
if config_file_name in self.config_file_specified:
msg = self.log.warn
else:
msg = self.log.debug
msg("Config file not found, skipping: %s", config_file_name)
except:
# For testing purposes.
if not suppress_errors:
raise
self.log.warn("Error loading config file: %s" %
self.config_file_name, exc_info=True)
def init_profile_dir(self):
"""initialize the profile dir"""
self._in_init_profile_dir = True
if self.profile_dir is not None:
# already ran
return
try:
# location explicitly specified:
location = self.config.ProfileDir.location
except AttributeError:
# location not specified, find by profile name
try:
p = ProfileDir.find_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
except ProfileDirError:
# not found, maybe create it (always create default profile)
if self.auto_create or self.profile == 'default':
try:
p = ProfileDir.create_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
except ProfileDirError:
self.log.fatal("Could not create profile: %r"%self.profile)
self.exit(1)
else:
self.log.info("Created profile dir: %r"%p.location)
else:
self.log.fatal("Profile %r not found."%self.profile)
self.exit(1)
else:
self.log.info("Using existing profile dir: %r"%p.location)
else:
# location is fully specified
try:
p = ProfileDir.find_profile_dir(location, self.config)
except ProfileDirError:
# not found, maybe create it
if self.auto_create:
try:
p = ProfileDir.create_profile_dir(location, self.config)
except ProfileDirError:
self.log.fatal("Could not create profile directory: %r"%location)
self.exit(1)
else:
self.log.info("Creating new profile dir: %r"%location)
else:
self.log.fatal("Profile directory %r not found."%location)
self.exit(1)
else:
self.log.info("Using existing profile dir: %r"%location)
# if profile_dir is specified explicitly, set profile name
dir_name = os.path.basename(p.location)
if dir_name.startswith('profile_'):
self.profile = dir_name[8:]
self.profile_dir = p
self.config_file_paths.append(p.location)
self._in_init_profile_dir = False
def init_config_files(self):
"""[optionally] copy default config files into profile dir."""
# copy config files
path = self.builtin_profile_dir
if self.copy_config_files:
src = self.profile
cfg = self.config_file_name
if path and os.path.exists(os.path.join(path, cfg)):
self.log.warn("Staging %r from %s into %r [overwrite=%s]"%(
cfg, src, self.profile_dir.location, self.overwrite)
)
self.profile_dir.copy_config_file(cfg, path=path, overwrite=self.overwrite)
else:
self.stage_default_config_file()
else:
# Still stage *bundled* config files, but not generated ones
# This is necessary for `ipython profile=sympy` to load the profile
# on the first go
files = glob.glob(os.path.join(path, '*.py'))
for fullpath in files:
cfg = os.path.basename(fullpath)
if self.profile_dir.copy_config_file(cfg, path=path, overwrite=False):
# file was copied
self.log.warn("Staging bundled %s from %s into %r"%(
cfg, self.profile, self.profile_dir.location)
)
def stage_default_config_file(self):
"""auto generate default config file, and stage it into the profile."""
s = self.generate_config_file()
fname = os.path.join(self.profile_dir.location, self.config_file_name)
if self.overwrite or not os.path.exists(fname):
self.log.warn("Generating default config file: %r"%(fname))
with open(fname, 'w') as f:
f.write(s)
@catch_config_error
def initialize(self, argv=None):
# don't hook up crash handler before parsing command-line
self.parse_command_line(argv)
self.init_crash_handler()
if self.subapp is not None:
# stop here if subapp is taking over
return
cl_config = self.config
self.init_profile_dir()
self.init_config_files()
self.load_config_file()
# enforce cl-opts override configfile opts:
self.update_config(cl_config)
| 39.348052 | 110 | 0.582481 |
import atexit
import glob
import logging
import os
import shutil
import sys
from IPython.config.application import Application, catch_config_error
from IPython.config.loader import ConfigFileNotFound
from IPython.core import release, crashhandler
from IPython.core.profiledir import ProfileDir, ProfileDirError
from IPython.utils import py3compat
from IPython.utils.path import get_ipython_dir, get_ipython_package_dir
from IPython.utils.traitlets import List, Unicode, Type, Bool, Dict, Set, Instance
base_aliases = {
'profile-dir' : 'ProfileDir.location',
'profile' : 'BaseIPythonApplication.profile',
'ipython-dir' : 'BaseIPythonApplication.ipython_dir',
'log-level' : 'Application.log_level',
'config' : 'BaseIPythonApplication.extra_config_file',
}
base_flags = dict(
debug = ({'Application' : {'log_level' : logging.DEBUG}},
"set log level to logging.DEBUG (maximize logging output)"),
quiet = ({'Application' : {'log_level' : logging.CRITICAL}},
"set log level to logging.CRITICAL (minimize logging output)"),
init = ({'BaseIPythonApplication' : {
'copy_config_files' : True,
'auto_create' : True}
}, """Initialize profile with default config files. This is equivalent
to running `ipython profile create <profile>` prior to startup.
""")
)
class BaseIPythonApplication(Application):
name = Unicode(u'ipython')
description = Unicode(u'IPython: an enhanced interactive Python shell.')
version = Unicode(release.version)
aliases = Dict(base_aliases)
flags = Dict(base_flags)
classes = List([ProfileDir])
config_file_specified = Set()
config_file_name = Unicode()
def _config_file_name_default(self):
return self.name.replace('-','_') + u'_config.py'
def _config_file_name_changed(self, name, old, new):
if new != old:
self.config_file_specified.add(new)
# The directory that contains IPython's builtin profiles.
builtin_profile_dir = Unicode(
os.path.join(get_ipython_package_dir(), u'config', u'profile', u'default')
)
config_file_paths = List(Unicode)
def _config_file_paths_default(self):
return [os.getcwdu()]
extra_config_file = Unicode(config=True,
help="""Path to an extra config file to load.
If specified, load this config file in addition to any other IPython config.
""")
def _extra_config_file_changed(self, name, old, new):
try:
self.config_files.remove(old)
except ValueError:
pass
self.config_file_specified.add(new)
self.config_files.append(new)
profile = Unicode(u'default', config=True,
help="""The IPython profile to use."""
)
def _profile_changed(self, name, old, new):
self.builtin_profile_dir = os.path.join(
get_ipython_package_dir(), u'config', u'profile', new
)
ipython_dir = Unicode(get_ipython_dir(), config=True,
help="""
The name of the IPython directory. This directory is used for logging
configuration (through profiles), history storage, etc. The default
is usually $HOME/.ipython. This options can also be specified through
the environment variable IPYTHONDIR.
"""
)
_in_init_profile_dir = False
profile_dir = Instance(ProfileDir)
def _profile_dir_default(self):
if self._in_init_profile_dir:
return
self.init_profile_dir()
return self.profile_dir
overwrite = Bool(False, config=True,
help="""Whether to overwrite existing config files when copying""")
auto_create = Bool(False, config=True,
help="""Whether to create profile dir if it doesn't exist""")
config_files = List(Unicode)
def _config_files_default(self):
return [self.config_file_name]
copy_config_files = Bool(False, config=True,
help="""Whether to install the default config files into the profile dir.
If a new profile is being created, and IPython contains config files for that
profile, then they will be staged into the new directory. Otherwise,
default config files will be automatically generated.
""")
verbose_crash = Bool(False, config=True,
help="""Create a massive crash report when IPython encounters what may be an
internal error. The default is to append a short message to the
usual traceback""")
# The class to use as the crash handler.
crash_handler_class = Type(crashhandler.CrashHandler)
@catch_config_error
def __init__(self, **kwargs):
super(BaseIPythonApplication, self).__init__(**kwargs)
# ensure current working directory exists
try:
directory = os.getcwdu()
except:
# raise exception
self.log.error("Current working directory doesn't exist.")
raise
if not os.path.exists(self.ipython_dir):
self._ipython_dir_changed('ipython_dir', self.ipython_dir, self.ipython_dir)
def init_crash_handler(self):
self.crash_handler = self.crash_handler_class(self)
sys.excepthook = self.excepthook
def unset_crashhandler():
sys.excepthook = sys.__excepthook__
atexit.register(unset_crashhandler)
def excepthook(self, etype, evalue, tb):
if self.verbose_crash:
return self.crash_handler(etype, evalue, tb)
else:
return crashhandler.crash_handler_lite(etype, evalue, tb)
def _ipython_dir_changed(self, name, old, new):
str_old = py3compat.cast_bytes_py2(os.path.abspath(old),
sys.getfilesystemencoding()
)
if str_old in sys.path:
sys.path.remove(str_old)
str_path = py3compat.cast_bytes_py2(os.path.abspath(new),
sys.getfilesystemencoding()
)
sys.path.append(str_path)
if not os.path.isdir(new):
os.makedirs(new, mode=0o777)
readme = os.path.join(new, 'README')
if not os.path.exists(readme):
path = os.path.join(get_ipython_package_dir(), u'config', u'profile')
shutil.copy(os.path.join(path, 'README'), readme)
self.log.debug("IPYTHONDIR set to: %s" % new)
def load_config_file(self, suppress_errors=True):
self.log.debug("Searching path %s for config files", self.config_file_paths)
base_config = 'ipython_config.py'
self.log.debug("Attempting to load config file: %s" %
base_config)
try:
Application.load_config_file(
self,
base_config,
path=self.config_file_paths
)
except ConfigFileNotFound:
self.log.debug("Config file %s not found", base_config)
pass
for config_file_name in self.config_files:
if not config_file_name or config_file_name == base_config:
continue
self.log.debug("Attempting to load config file: %s" %
self.config_file_name)
try:
Application.load_config_file(
self,
config_file_name,
path=self.config_file_paths
)
except ConfigFileNotFound:
if config_file_name in self.config_file_specified:
msg = self.log.warn
else:
msg = self.log.debug
msg("Config file not found, skipping: %s", config_file_name)
except:
if not suppress_errors:
raise
self.log.warn("Error loading config file: %s" %
self.config_file_name, exc_info=True)
def init_profile_dir(self):
self._in_init_profile_dir = True
if self.profile_dir is not None:
return
try:
location = self.config.ProfileDir.location
except AttributeError:
try:
p = ProfileDir.find_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
except ProfileDirError:
if self.auto_create or self.profile == 'default':
try:
p = ProfileDir.create_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
except ProfileDirError:
self.log.fatal("Could not create profile: %r"%self.profile)
self.exit(1)
else:
self.log.info("Created profile dir: %r"%p.location)
else:
self.log.fatal("Profile %r not found."%self.profile)
self.exit(1)
else:
self.log.info("Using existing profile dir: %r"%p.location)
else:
try:
p = ProfileDir.find_profile_dir(location, self.config)
except ProfileDirError:
if self.auto_create:
try:
p = ProfileDir.create_profile_dir(location, self.config)
except ProfileDirError:
self.log.fatal("Could not create profile directory: %r"%location)
self.exit(1)
else:
self.log.info("Creating new profile dir: %r"%location)
else:
self.log.fatal("Profile directory %r not found."%location)
self.exit(1)
else:
self.log.info("Using existing profile dir: %r"%location)
dir_name = os.path.basename(p.location)
if dir_name.startswith('profile_'):
self.profile = dir_name[8:]
self.profile_dir = p
self.config_file_paths.append(p.location)
self._in_init_profile_dir = False
def init_config_files(self):
path = self.builtin_profile_dir
if self.copy_config_files:
src = self.profile
cfg = self.config_file_name
if path and os.path.exists(os.path.join(path, cfg)):
self.log.warn("Staging %r from %s into %r [overwrite=%s]"%(
cfg, src, self.profile_dir.location, self.overwrite)
)
self.profile_dir.copy_config_file(cfg, path=path, overwrite=self.overwrite)
else:
self.stage_default_config_file()
else:
files = glob.glob(os.path.join(path, '*.py'))
for fullpath in files:
cfg = os.path.basename(fullpath)
if self.profile_dir.copy_config_file(cfg, path=path, overwrite=False):
self.log.warn("Staging bundled %s from %s into %r"%(
cfg, self.profile, self.profile_dir.location)
)
def stage_default_config_file(self):
s = self.generate_config_file()
fname = os.path.join(self.profile_dir.location, self.config_file_name)
if self.overwrite or not os.path.exists(fname):
self.log.warn("Generating default config file: %r"%(fname))
with open(fname, 'w') as f:
f.write(s)
@catch_config_error
def initialize(self, argv=None):
self.parse_command_line(argv)
self.init_crash_handler()
if self.subapp is not None:
# stop here if subapp is taking over
return
cl_config = self.config
self.init_profile_dir()
self.init_config_files()
self.load_config_file()
# enforce cl-opts override configfile opts:
self.update_config(cl_config)
| true | true |
f72d8c40013d261c62b6339a86bfa224ef3dfb81 | 141,438 | py | Python | conans/client/migrations_settings.py | VitaliiOsykovSC/conan | cc7d529e2c91b78490619482e867301e5fd78daa | [
"MIT"
] | null | null | null | conans/client/migrations_settings.py | VitaliiOsykovSC/conan | cc7d529e2c91b78490619482e867301e5fd78daa | [
"MIT"
] | null | null | null | conans/client/migrations_settings.py | VitaliiOsykovSC/conan | cc7d529e2c91b78490619482e867301e5fd78daa | [
"MIT"
] | null | null | null | settings_1_9_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
arch_build: [x86, x86_64, ppc64le, ppc64, armv6, armv7, armv7hf, armv8, sparc, sparcv9, mips, mips64, avr, armv7s, armv7k]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino]
arch_target: [x86, x86_64, ppc64le, ppc64, armv6, armv7, armv7hf, armv8, sparc, sparcv9, mips, mips64, avr, armv7s, armv7k]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0"]
watchOS:
version: ["4.0"]
tvOS:
version: ["11.0"]
FreeBSD:
SunOS:
Arduino:
board: ANY
arch: [x86, x86_64, ppc64le, ppc64, armv6, armv7, armv7hf, armv8, sparc, sparcv9, mips, mips64, avr, armv7s, armv7k]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
"""
settings_1_9_1 = settings_1_9_0
settings_1_9_2 = settings_1_9_1
settings_1_10_0 = settings_1_9_2
settings_1_10_1 = settings_1_10_0
settings_1_10_2 = settings_1_10_1
settings_1_11_0 = settings_1_10_2
settings_1_11_1 = settings_1_11_0
settings_1_11_2 = settings_1_11_1
settings_1_11_3 = settings_1_11_2
settings_1_12_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
arch_build: [x86, x86_64, ppc32, ppc64le, ppc64, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino]
arch_target: [x86, x86_64, ppc32, ppc64le, ppc64, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
Arduino:
board: ANY
arch: [x86, x86_64, ppc32, ppc64le, ppc64, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
"""
settings_1_12_1 = settings_1_12_0
settings_1_12_2 = settings_1_12_1
settings_1_12_3 = settings_1_12_2
settings_1_12_4 = settings_1_12_3
settings_1_13_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
arch_build: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino]
arch_target: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
Arduino:
board: ANY
arch: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
"""
settings_1_13_1 = settings_1_13_0
settings_1_13_2 = settings_1_13_1
settings_1_13_3 = settings_1_13_2
settings_1_13_4 = settings_1_13_3
settings_1_14_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
arch_build: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino]
arch_target: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
Arduino:
board: ANY
arch: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
"""
settings_1_14_1 = settings_1_14_0
settings_1_14_2 = settings_1_14_1
settings_1_14_3 = settings_1_14_2
settings_1_14_4 = settings_1_14_3
settings_1_14_5 = settings_1_14_4
settings_1_14_6 = settings_1_14_5
settings_1_15_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
arch_build: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino]
arch_target: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
Arduino:
board: ANY
Emscripten:
arch: [x86, x86_64, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2",
"9"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_15_1 = settings_1_15_0
settings_1_15_2 = settings_1_15_1
settings_1_15_3 = settings_1_15_2
settings_1_15_4 = settings_1_15_3
settings_1_15_5 = settings_1_15_4
settings_1_16_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2", "8.3",
"9", "9.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_16_1 = settings_1_16_0
settings_1_17_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2", "8.3",
"9", "9.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_17_1 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2", "8.3",
"9", "9.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_17_2 = settings_1_17_1
settings_1_18_0 = settings_1_17_2
settings_1_18_1 = settings_1_18_0
settings_1_18_2 = settings_1_18_1
settings_1_18_3 = settings_1_18_2
settings_1_18_4 = settings_1_18_3
settings_1_18_5 = settings_1_18_4
settings_1_19_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_19_1 = settings_1_19_0
settings_1_19_2 = settings_1_19_1
settings_1_19_3 = settings_1_19_2
settings_1_20_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3", "7.4",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_20_1 = settings_1_20_0
settings_1_20_2 = settings_1_20_1
settings_1_20_3 = settings_1_20_2
settings_1_20_4 = settings_1_20_3
settings_1_20_5 = settings_1_20_4
settings_1_21_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3", "7.4",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_21_1 = settings_1_21_0
settings_1_21_2 = settings_1_21_1
settings_1_21_3 = settings_1_21_2
settings_1_22_0 = settings_1_21_2
settings_1_22_1 = settings_1_22_0
settings_1_22_2 = settings_1_22_1
settings_1_22_3 = settings_1_22_2
settings_1_23_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3", "7.4",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_24_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3", "7.4",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_24_1 = settings_1_24_0
settings_1_25_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3", "7.4",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2", "9.3",
"10"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_25_1 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_25_2 = settings_1_25_1
settings_1_26_0 = settings_1_25_2
settings_1_26_1 = settings_1_26_0
settings_1_27_0 = settings_1_26_1
settings_1_27_1 = settings_1_27_0
settings_1_28_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_28_1 = settings_1_28_0
settings_1_28_2 = settings_1_28_1
settings_1_29_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_29_1 = settings_1_29_0
settings_1_29_2 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_30_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_30_1 = settings_1_30_0
settings_1_30_2 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_31_0 = settings_1_30_2
settings_1_31_1 = settings_1_31_0
settings_1_31_2 = settings_1_31_1
settings_1_31_3 = settings_1_31_2
settings_1_31_4 = settings_1_31_3
settings_1_32_0 = settings_1_31_4
settings_1_32_1 = settings_1_32_0
settings_1_33_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0"]
sdk: [None, "macosx"]
subsystem: [None, "Catalyst"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_33_1 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_34_0 = settings_1_33_1
settings_1_34_1 = settings_1_34_0
settings_1_35_0 = settings_1_34_1
settings_1_35_1 = settings_1_35_0
settings_1_35_2 = settings_1_35_1
settings_1_36_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11", "12"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23] # Deprecated, use compiler.cppstd
"""
settings_1_37_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1", "10.2", "10.3",
"11", "11.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11", "12"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23] # Deprecated, use compiler.cppstd
"""
settings_1_37_1 = settings_1_37_0
settings_1_37_2 = settings_1_37_1
settings_1_38_0 = settings_1_37_2
settings_1_39_0 = settings_1_38_0
settings_1_40_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1", "10.2", "10.3",
"11", "11.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16", "17"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL, v143]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28", "19.29",
"19.3", "19.30"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20, 23]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11", "12", "13"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
update: [None, ANY]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23] # Deprecated, use compiler.cppstd
"""
settings_1_40_1 = settings_1_40_0
settings_1_40_2 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1", "10.2", "10.3",
"11", "11.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16", "17"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL, v143]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28", "19.29",
"19.3", "19.30"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20, 23]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11", "12", "13"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0", "13.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23] # Deprecated, use compiler.cppstd
"""
settings_1_40_3 = settings_1_40_2
settings_1_40_4 = settings_1_40_3
settings_1_41_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7, xtensalx6, xtensalx106]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7, xtensalx6, xtensalx106]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1", "10.2", "10.3",
"11", "11.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16", "17"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL, v143]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28", "19.29",
"19.3", "19.30"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20, 23]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11", "12", "13"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0", "13.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
update: [None, ANY]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
intel-cc:
version: ["2021.1", "2021.2", "2021.3"]
update: [None, ANY]
mode: ["icx", "classic", "dpcpp"]
libcxx: [None, libstdc++, libstdc++11, libc++]
cppstd: [None, 98, gnu98, 03, gnu03, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, static, dynamic]
runtime_type: [None, Debug, Release]
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23] # Deprecated, use compiler.cppstd
"""
settings_1_42_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7, xtensalx6, xtensalx106]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "12.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3",
"11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4",
"13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6", "13.7",
"14.0", "14.1", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7", "14.8", "15.0", "15.1"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1", "6.2",
"7.0", "7.1", "7.2", "7.3", "7.4", "7.5", "7.6", "8.0", "8.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4",
"13.0", "13.2", "13.3", "13.4", "14.0", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7",
"15.0", "15.1"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7, xtensalx6, xtensalx106]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1", "10.2", "10.3",
"11", "11.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16", "17"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL, v143]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28", "19.29",
"19.3", "19.30"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20, 23]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11", "12", "13"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0", "13.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
update: [None, ANY]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
intel-cc:
version: ["2021.1", "2021.2", "2021.3"]
update: [None, ANY]
mode: ["icx", "classic", "dpcpp"]
libcxx: [None, libstdc++, libstdc++11, libc++]
cppstd: [None, 98, gnu98, 03, gnu03, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, static, dynamic]
runtime_type: [None, Debug, Release]
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23] # Deprecated, use compiler.cppstd
"""
settings_1_42_1 = settings_1_42_0
settings_1_43_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7, xtensalx6, xtensalx106]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "12.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3",
"11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4",
"13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6", "13.7",
"14.0", "14.1", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7", "14.8", "15.0", "15.1"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1", "6.2",
"7.0", "7.1", "7.2", "7.3", "7.4", "7.5", "7.6", "8.0", "8.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4",
"13.0", "13.2", "13.3", "13.4", "14.0", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7",
"15.0", "15.1"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7, xtensalx6, xtensalx106]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1", "10.2", "10.3",
"11", "11.1", "11.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16", "17"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL, v143]
cppstd: [None, 14, 17, 20, 23]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28", "19.29",
"19.3", "19.30"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20, 23]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11", "12", "13"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0", "13.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
update: [None, ANY]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
intel-cc:
version: ["2021.1", "2021.2", "2021.3"]
update: [None, ANY]
mode: ["icx", "classic", "dpcpp"]
libcxx: [None, libstdc++, libstdc++11, libc++]
cppstd: [None, 98, gnu98, 03, gnu03, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, static, dynamic]
runtime_type: [None, Debug, Release]
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23] # Deprecated, use compiler.cppstd
"""
| 47.718623 | 290 | 0.527977 | settings_1_9_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
arch_build: [x86, x86_64, ppc64le, ppc64, armv6, armv7, armv7hf, armv8, sparc, sparcv9, mips, mips64, avr, armv7s, armv7k]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino]
arch_target: [x86, x86_64, ppc64le, ppc64, armv6, armv7, armv7hf, armv8, sparc, sparcv9, mips, mips64, avr, armv7s, armv7k]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0"]
watchOS:
version: ["4.0"]
tvOS:
version: ["11.0"]
FreeBSD:
SunOS:
Arduino:
board: ANY
arch: [x86, x86_64, ppc64le, ppc64, armv6, armv7, armv7hf, armv8, sparc, sparcv9, mips, mips64, avr, armv7s, armv7k]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
"""
settings_1_9_1 = settings_1_9_0
settings_1_9_2 = settings_1_9_1
settings_1_10_0 = settings_1_9_2
settings_1_10_1 = settings_1_10_0
settings_1_10_2 = settings_1_10_1
settings_1_11_0 = settings_1_10_2
settings_1_11_1 = settings_1_11_0
settings_1_11_2 = settings_1_11_1
settings_1_11_3 = settings_1_11_2
settings_1_12_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
arch_build: [x86, x86_64, ppc32, ppc64le, ppc64, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino]
arch_target: [x86, x86_64, ppc32, ppc64le, ppc64, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
Arduino:
board: ANY
arch: [x86, x86_64, ppc32, ppc64le, ppc64, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
"""
settings_1_12_1 = settings_1_12_0
settings_1_12_2 = settings_1_12_1
settings_1_12_3 = settings_1_12_2
settings_1_12_4 = settings_1_12_3
settings_1_13_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
arch_build: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino]
arch_target: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
Arduino:
board: ANY
arch: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
"""
settings_1_13_1 = settings_1_13_0
settings_1_13_2 = settings_1_13_1
settings_1_13_3 = settings_1_13_2
settings_1_13_4 = settings_1_13_3
settings_1_14_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
arch_build: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino]
arch_target: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
Arduino:
board: ANY
arch: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
"""
settings_1_14_1 = settings_1_14_0
settings_1_14_2 = settings_1_14_1
settings_1_14_3 = settings_1_14_2
settings_1_14_4 = settings_1_14_3
settings_1_14_5 = settings_1_14_4
settings_1_14_6 = settings_1_14_5
settings_1_15_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
arch_build: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino]
arch_target: [x86, x86_64, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
Arduino:
board: ANY
Emscripten:
arch: [x86, x86_64, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2",
"9"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_15_1 = settings_1_15_0
settings_1_15_2 = settings_1_15_1
settings_1_15_3 = settings_1_15_2
settings_1_15_4 = settings_1_15_3
settings_1_15_5 = settings_1_15_4
settings_1_16_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2", "8.3",
"9", "9.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_16_1 = settings_1_16_0
settings_1_17_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2", "8.3",
"9", "9.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0",
"8"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_17_1 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2", "8.3",
"9", "9.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_17_2 = settings_1_17_1
settings_1_18_0 = settings_1_17_2
settings_1_18_1 = settings_1_18_0
settings_1_18_2 = settings_1_18_1
settings_1_18_3 = settings_1_18_2
settings_1_18_4 = settings_1_18_3
settings_1_18_5 = settings_1_18_4
settings_1_19_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_19_1 = settings_1_19_0
settings_1_19_2 = settings_1_19_1
settings_1_19_3 = settings_1_19_2
settings_1_20_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3", "7.4",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_20_1 = settings_1_20_0
settings_1_20_2 = settings_1_20_1
settings_1_20_3 = settings_1_20_2
settings_1_20_4 = settings_1_20_3
settings_1_20_5 = settings_1_20_4
settings_1_21_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3", "7.4",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_21_1 = settings_1_21_0
settings_1_21_2 = settings_1_21_1
settings_1_21_3 = settings_1_21_2
settings_1_22_0 = settings_1_21_2
settings_1_22_1 = settings_1_22_0
settings_1_22_2 = settings_1_22_1
settings_1_22_3 = settings_1_22_2
settings_1_23_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3", "7.4",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_24_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3", "7.4",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_24_1 = settings_1_24_0
settings_1_25_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2", "7.3", "7.4",
"8", "8.1", "8.2", "8.3",
"9", "9.1", "9.2", "9.3",
"10"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_25_1 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_25_2 = settings_1_25_1
settings_1_26_0 = settings_1_25_2
settings_1_26_1 = settings_1_26_0
settings_1_27_0 = settings_1_26_1
settings_1_27_1 = settings_1_27_0
settings_1_28_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_28_1 = settings_1_28_0
settings_1_28_2 = settings_1_28_1
settings_1_29_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_29_1 = settings_1_29_0
settings_1_29_2 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_30_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_30_1 = settings_1_30_0
settings_1_30_2 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_31_0 = settings_1_30_2
settings_1_31_1 = settings_1_31_0
settings_1_31_2 = settings_1_31_1
settings_1_31_3 = settings_1_31_2
settings_1_31_4 = settings_1_31_3
settings_1_32_0 = settings_1_31_4
settings_1_32_1 = settings_1_32_0
settings_1_33_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0"]
sdk: [None, "macosx"]
subsystem: [None, "Catalyst"]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_33_1 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20] # Deprecated, use compiler.cppstd
"""
settings_1_34_0 = settings_1_33_1
settings_1_34_1 = settings_1_34_0
settings_1_35_0 = settings_1_34_1
settings_1_35_1 = settings_1_35_0
settings_1_35_2 = settings_1_35_1
settings_1_36_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11", "12"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23] # Deprecated, use compiler.cppstd
"""
settings_1_37_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1", "10.2", "10.3",
"11", "11.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11", "12"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23] # Deprecated, use compiler.cppstd
"""
settings_1_37_1 = settings_1_37_0
settings_1_37_2 = settings_1_37_1
settings_1_38_0 = settings_1_37_2
settings_1_39_0 = settings_1_38_0
settings_1_40_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1", "10.2", "10.3",
"11", "11.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16", "17"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL, v143]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28", "19.29",
"19.3", "19.30"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20, 23]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11", "12", "13"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
update: [None, ANY]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23] # Deprecated, use compiler.cppstd
"""
settings_1_40_1 = settings_1_40_0
settings_1_40_2 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1", "10.2", "10.3",
"11", "11.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16", "17"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL, v143]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28", "19.29",
"19.3", "19.30"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20, 23]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11", "12", "13"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0", "13.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23] # Deprecated, use compiler.cppstd
"""
settings_1_40_3 = settings_1_40_2
settings_1_40_4 = settings_1_40_3
settings_1_41_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7, xtensalx6, xtensalx106]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4", "13.0"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7, xtensalx6, xtensalx106]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1", "10.2", "10.3",
"11", "11.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16", "17"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL, v143]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28", "19.29",
"19.3", "19.30"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20, 23]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11", "12", "13"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0", "13.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
update: [None, ANY]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
intel-cc:
version: ["2021.1", "2021.2", "2021.3"]
update: [None, ANY]
mode: ["icx", "classic", "dpcpp"]
libcxx: [None, libstdc++, libstdc++11, libc++]
cppstd: [None, 98, gnu98, 03, gnu03, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, static, dynamic]
runtime_type: [None, Debug, Release]
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23] # Deprecated, use compiler.cppstd
"""
settings_1_42_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7, xtensalx6, xtensalx106]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "12.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3",
"11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4",
"13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6", "13.7",
"14.0", "14.1", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7", "14.8", "15.0", "15.1"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1", "6.2",
"7.0", "7.1", "7.2", "7.3", "7.4", "7.5", "7.6", "8.0", "8.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4",
"13.0", "13.2", "13.3", "13.4", "14.0", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7",
"15.0", "15.1"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7, xtensalx6, xtensalx106]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1", "10.2", "10.3",
"11", "11.1"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16", "17"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL, v143]
cppstd: [None, 14, 17, 20]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28", "19.29",
"19.3", "19.30"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20, 23]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11", "12", "13"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0", "13.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
update: [None, ANY]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
intel-cc:
version: ["2021.1", "2021.2", "2021.3"]
update: [None, ANY]
mode: ["icx", "classic", "dpcpp"]
libcxx: [None, libstdc++, libstdc++11, libc++]
cppstd: [None, 98, gnu98, 03, gnu03, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, static, dynamic]
runtime_type: [None, Debug, Release]
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23] # Deprecated, use compiler.cppstd
"""
settings_1_42_1 = settings_1_42_0
settings_1_43_0 = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS, AIX]
arch_build: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, AIX, Arduino, Neutrino]
arch_target: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7, xtensalx6, xtensalx106]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
WindowsCE:
platform: ANY
version: ["5.0", "6.0", "7.0", "8.0"]
Linux:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "12.0", "13.0"]
sdk: [None, "macosx"]
subsystem: [None, catalyst]
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3",
"11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4",
"13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6", "13.7",
"14.0", "14.1", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7", "14.8", "15.0", "15.1"]
sdk: [None, "iphoneos", "iphonesimulator"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1", "6.2",
"7.0", "7.1", "7.2", "7.3", "7.4", "7.5", "7.6", "8.0", "8.1"]
sdk: [None, "watchos", "watchsimulator"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4",
"13.0", "13.2", "13.3", "13.4", "14.0", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7",
"15.0", "15.1"]
sdk: [None, "appletvos", "appletvsimulator"]
FreeBSD:
SunOS:
AIX:
Arduino:
board: ANY
Emscripten:
Neutrino:
version: ["6.4", "6.5", "6.6", "7.0", "7.1"]
arch: [x86, x86_64, ppc32be, ppc32, ppc64le, ppc64, armv4, armv4i, armv5el, armv5hf, armv6, armv7, armv7hf, armv7s, armv7k, armv8, armv8_32, armv8.3, sparc, sparcv9, mips, mips64, avr, s390, s390x, asm.js, wasm, sh4le, e2k-v2, e2k-v3, e2k-v4, e2k-v5, e2k-v6, e2k-v7, xtensalx6, xtensalx106]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14", "5.15"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc: &gcc
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4", "6.5",
"7", "7.1", "7.2", "7.3", "7.4", "7.5",
"8", "8.1", "8.2", "8.3", "8.4",
"9", "9.1", "9.2", "9.3",
"10", "10.1", "10.2", "10.3",
"11", "11.1", "11.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
Visual Studio: &visual_studio
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15", "16", "17"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp,
v140, v140_xp, v140_clang_c2, LLVM-vs2012, LLVM-vs2012_xp,
LLVM-vs2013, LLVM-vs2013_xp, LLVM-vs2014, LLVM-vs2014_xp,
LLVM-vs2017, LLVM-vs2017_xp, v141, v141_xp, v141_clang_c2, v142,
llvm, ClangCL, v143]
cppstd: [None, 14, 17, 20, 23]
msvc:
version: ["19.0",
"19.1", "19.10", "19.11", "19.12", "19.13", "19.14", "19.15", "19.16",
"19.2", "19.20", "19.21", "19.22", "19.23", "19.24", "19.25", "19.26", "19.27", "19.28", "19.29",
"19.3", "19.30"]
runtime: [static, dynamic]
runtime_type: [Debug, Release]
cppstd: [14, 17, 20, 23]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0",
"5.0", "6.0", "7.0", "7.1",
"8", "9", "10", "11", "12", "13"]
libcxx: [None, libstdc++, libstdc++11, libc++, c++_shared, c++_static]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0", "13.0"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
version: ["11", "12", "13", "14", "15", "16", "17", "18", "19", "19.1"]
update: [None, ANY]
base:
gcc:
<<: *gcc
threads: [None]
exception: [None]
Visual Studio:
<<: *visual_studio
apple-clang:
<<: *apple_clang
intel-cc:
version: ["2021.1", "2021.2", "2021.3"]
update: [None, ANY]
mode: ["icx", "classic", "dpcpp"]
libcxx: [None, libstdc++, libstdc++11, libc++]
cppstd: [None, 98, gnu98, 03, gnu03, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, static, dynamic]
runtime_type: [None, Debug, Release]
qcc:
version: ["4.4", "5.4", "8.3"]
libcxx: [cxx, gpp, cpp, cpp-ne, accp, acpp-ne, ecpp, ecpp-ne]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
mcst-lcc:
version: ["1.19", "1.20", "1.21", "1.22", "1.23", "1.24", "1.25"]
base:
gcc:
<<: *gcc
threads: [None]
exceptions: [None]
build_type: [None, Debug, Release, RelWithDebInfo, MinSizeRel]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23] # Deprecated, use compiler.cppstd
"""
| true | true |
f72d8ce087b0b710f68d7f8b8da4bd9a2dd64c10 | 13,199 | py | Python | python/qpid_dispatch_internal/policy/policy_util.py | bartoval/skupper-router | b62f8376f2e2d4fb78a92bd8916b43b857ab48cc | [
"Apache-2.0"
] | null | null | null | python/qpid_dispatch_internal/policy/policy_util.py | bartoval/skupper-router | b62f8376f2e2d4fb78a92bd8916b43b857ab48cc | [
"Apache-2.0"
] | null | null | null | python/qpid_dispatch_internal/policy/policy_util.py | bartoval/skupper-router | b62f8376f2e2d4fb78a92bd8916b43b857ab48cc | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
import socket
import binascii
#
#
class PolicyError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def is_ipv6_enabled():
"""
Returns true if IPV6 is enabled, false otherwise
"""
ipv6_enabled = True
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(('::1', 0))
sock.close()
except Exception as e:
ipv6_enabled = False
return ipv6_enabled
class HostStruct:
"""
HostStruct represents a single, binary socket address from getaddrinfo
- name : name given to constructor; numeric IP or host name
- saddr : net name resolved by getaddrinfo; numeric IP
- family : saddr.family; int
- binary : saddr packed binary address; binary string
"""
families = [socket.AF_INET]
famnames = ["IPv4"]
if is_ipv6_enabled():
families.append(socket.AF_INET6)
famnames.append("IPv6")
def __init__(self, hostname):
"""
Given a host name text string, return the socket info for it.
@param[in] hostname host IP address to parse
"""
try:
res = socket.getaddrinfo(hostname, 0, socket.AF_UNSPEC, socket.SOCK_STREAM)
if len(res) == 0:
raise PolicyError("HostStruct: '%s' did not resolve to an IP address" % hostname)
foundFirst = False
saddr = ""
sfamily = socket.AF_UNSPEC
for i0 in range(0, len(res)):
family, dum0, dum1, dum2, sockaddr = res[i0]
if not foundFirst:
if family in self.families:
saddr = sockaddr[0]
sfamily = family
foundFirst = True
else:
if family in self.families:
if not saddr == sockaddr[0] or not sfamily == family:
raise PolicyError("HostStruct: '%s' resolves to multiple IP addresses" %
hostname)
if not foundFirst:
raise PolicyError("HostStruct: '%s' did not resolve to one of the supported address family" %
hostname)
self.name = hostname
self.saddr = saddr
self.family = sfamily
self.binary = socket.inet_pton(family, saddr)
return
except Exception as e:
raise PolicyError("HostStruct: '%s' failed to resolve: '%s'" %
(hostname, e))
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
def dump(self):
return ("(%s, %s, %s, %s)" %
(self.name,
self.saddr,
"AF_INET" if self.family == socket.AF_INET else "AF_INET6",
binascii.hexlify(self.binary)))
#
#
class HostAddr:
"""
Provide HostIP address ranges and comparison functions.
A HostIP may be:
- single address: 10.10.1.1
- a pair of addresses: 10.10.0.0,10.10.255.255
- a wildcard: *
Only IPv4 and IPv6 are supported.
- No unix sockets.
HostIP names must resolve to a single IP address.
Address pairs define a range.
- The second address must be numerically larger than the first address.
- The addresses must be of the same address 'family', IPv4 or IPv6.
The wildcard '*' matches all address IPv4 or IPv6.
IPv6 support is conditional based on underlying OS network options.
Raises a PolicyError on validation error in constructor.
"""
def __init__(self, hostspec, separator=","):
"""
Parse host spec into binary structures to use for comparisons.
Validate the hostspec to enforce usage rules.
"""
self.hoststructs = []
if hostspec == "*":
self.wildcard = True
else:
self.wildcard = False
hosts = [x.strip() for x in hostspec.split(separator)]
# hosts must contain one or two host specs
if len(hosts) not in [1, 2]:
raise PolicyError("hostspec must contain 1 or 2 host names")
self.hoststructs.append(HostStruct(hosts[0]))
if len(hosts) > 1:
self.hoststructs.append(HostStruct(hosts[1]))
if not self.hoststructs[0].family == self.hoststructs[1].family:
raise PolicyError("mixed IPv4 and IPv6 host specs in range not allowed")
c0 = self.memcmp(self.hoststructs[0].binary, self.hoststructs[1].binary)
if c0 > 0:
raise PolicyError("host specs in range must have lower numeric address first")
def __str__(self):
if self.wildcard:
return "*"
res = self.hoststructs[0].name
if len(self.hoststructs) > 1:
res += "," + self.hoststructs[1].name
return res
def __repr__(self):
return self.__str__()
def dump(self):
if self.wildcard:
return "(*)"
res = "(" + self.hoststructs[0].dump()
if len(self.hoststructs) > 1:
res += "," + self.hoststructs[1].dump()
res += ")"
return res
def memcmp(self, a, b):
res = 0
for i in range(0, len(a)):
if a[i] > b[i]:
res = 1
break
elif a[i] < b[i]:
res = -1
break
return res
def match_bin(self, candidate):
"""
Does the candidate hoststruct match the IP or range of IP addresses represented by this?
@param[in] candidate the IP address to be tested
@return candidate matches this or not
"""
if self.wildcard:
return True
try:
if not candidate.family == self.hoststructs[0].family:
# sorry, wrong AF_INET family
return False
c0 = self.memcmp(candidate.binary, self.hoststructs[0].binary)
if len(self.hoststructs) == 1:
return c0 == 0
c1 = self.memcmp(candidate.binary, self.hoststructs[1].binary)
return c0 >= 0 and c1 <= 0 # pylint: disable=chained-comparison
except PolicyError:
return False
except Exception as e:
assert isinstance(candidate, HostStruct), \
("Wrong type. Expected HostStruct but received %s" % candidate.__class__.__name__)
return False
def match_str(self, candidate):
"""
Does the candidate string match the IP or range represented by this?
@param[in] candidate the IP address to be tested
@return candidate matches this or not
"""
try:
hoststruct = HostStruct(candidate)
except PolicyError:
return False
return self.match_bin(hoststruct)
#
#
class PolicyAppConnectionMgr:
"""
Track policy user/host connection limits and statistics for one app.
# limits - set at creation and by update()
max_total : 20
max_per_user : 5
max_per_host : 10
# statistics - maintained for the lifetime of corresponding application
connections_approved : N
connections_denied : N
# live state - maintained for the lifetime of corresponding application
connections_active : 5
per_host_state : { 'host1' : [conn1, conn2, conn3],
'host2' : [conn4, conn5] }
per_user_state : { 'user1' : [conn1, conn2, conn3],
'user2' : [conn4, conn5] }
"""
def __init__(self, maxconn, maxconnperuser, maxconnperhost):
"""
The object is constructed with the policy limits and zeroed counts.
@param[in] maxconn maximum total concurrent connections
@param[in] maxconnperuser maximum total conncurrent connections for each user
@param[in] maxconnperuser maximum total conncurrent connections for each host
"""
if maxconn < 0 or maxconnperuser < 0 or maxconnperhost < 0:
raise PolicyError("PolicyAppConnectionMgr settings must be >= 0")
self.max_total = maxconn
self.max_per_user = maxconnperuser
self.max_per_host = maxconnperhost
self.connections_approved = 0
self.connections_denied = 0
self.connections_active = 0
self.per_host_state = {}
self.per_user_state = {}
def __str__(self):
res = ("Connection Limits: total: %s, per user: %s, per host: %s\n" %
(self.max_total, self.max_per_user, self.max_per_host))
res += ("Connections Statistics: total approved: %s, total denied: %s" %
(self.connections_approved, self.connections_denied))
res += ("Connection State: total current: %s" % self.connections_active)
res += ("User state: %s\n" % self.per_user_state)
res += ("Host state: %s" % self.per_host_state)
return res
def __repr__(self):
return self.__str__()
def update(self, maxconn, maxconnperuser, maxconnperhost):
"""
Reset connection limits
@param[in] maxconn maximum total concurrent connections
@param[in] maxconnperuser maximum total conncurrent connections for each user
@param[in] maxconnperuser maximum total conncurrent connections for each host
"""
if maxconn < 0 or maxconnperuser < 0 or maxconnperhost < 0:
raise PolicyError("PolicyAppConnectionMgr settings must be >= 0")
self.max_total = maxconn
self.max_per_user = maxconnperuser
self.max_per_host = maxconnperhost
def can_connect(self, conn_id, user, host, diags, grp_max_user, grp_max_host):
"""
Register a connection attempt.
If all the connection limit rules pass then add the
user/host to the connection tables.
@param[in] conn_id unique ID for connection, usually IP:port
@param[in] user authenticated user ID
@param[in] host IP address of host
@param[out] diags on failure holds 1, 2, or 3 error strings
@return connection is allowed and tracked in state tables
"""
n_user = 0
if user in self.per_user_state:
n_user = len(self.per_user_state[user])
n_host = 0
if host in self.per_host_state:
n_host = len(self.per_host_state[host])
max_per_user = grp_max_user if grp_max_user is not None else self.max_per_user
max_per_host = grp_max_host if grp_max_host is not None else self.max_per_host
allowbytotal = self.connections_active < self.max_total
allowbyuser = n_user < max_per_user
allowbyhost = n_host < max_per_host
if allowbytotal and allowbyuser and allowbyhost:
if user not in self.per_user_state:
self.per_user_state[user] = []
self.per_user_state[user].append(conn_id)
if host not in self.per_host_state:
self.per_host_state[host] = []
self.per_host_state[host].append(conn_id)
self.connections_active += 1
self.connections_approved += 1
return True
else:
if not allowbytotal:
diags.append("Connection denied by application connection limit")
if not allowbyuser:
diags.append("Connection denied by application per user limit")
if not allowbyhost:
diags.append("Connection denied by application per host limit")
self.connections_denied += 1
return False
def disconnect(self, conn_id, user, host):
"""
Unregister a connection
"""
assert self.connections_active > 0
assert user in self.per_user_state
assert conn_id in self.per_user_state[user]
assert conn_id in self.per_host_state[host]
self.connections_active -= 1
self.per_user_state[user].remove(conn_id)
self.per_host_state[host].remove(conn_id)
def count_other_denial(self):
"""
Record the statistic for a connection denied by some other process
@return:
"""
self.connections_denied += 1
| 36.971989 | 109 | 0.598303 |
import socket
import binascii
class PolicyError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def is_ipv6_enabled():
ipv6_enabled = True
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(('::1', 0))
sock.close()
except Exception as e:
ipv6_enabled = False
return ipv6_enabled
class HostStruct:
families = [socket.AF_INET]
famnames = ["IPv4"]
if is_ipv6_enabled():
families.append(socket.AF_INET6)
famnames.append("IPv6")
def __init__(self, hostname):
try:
res = socket.getaddrinfo(hostname, 0, socket.AF_UNSPEC, socket.SOCK_STREAM)
if len(res) == 0:
raise PolicyError("HostStruct: '%s' did not resolve to an IP address" % hostname)
foundFirst = False
saddr = ""
sfamily = socket.AF_UNSPEC
for i0 in range(0, len(res)):
family, dum0, dum1, dum2, sockaddr = res[i0]
if not foundFirst:
if family in self.families:
saddr = sockaddr[0]
sfamily = family
foundFirst = True
else:
if family in self.families:
if not saddr == sockaddr[0] or not sfamily == family:
raise PolicyError("HostStruct: '%s' resolves to multiple IP addresses" %
hostname)
if not foundFirst:
raise PolicyError("HostStruct: '%s' did not resolve to one of the supported address family" %
hostname)
self.name = hostname
self.saddr = saddr
self.family = sfamily
self.binary = socket.inet_pton(family, saddr)
return
except Exception as e:
raise PolicyError("HostStruct: '%s' failed to resolve: '%s'" %
(hostname, e))
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
def dump(self):
return ("(%s, %s, %s, %s)" %
(self.name,
self.saddr,
"AF_INET" if self.family == socket.AF_INET else "AF_INET6",
binascii.hexlify(self.binary)))
class HostAddr:
def __init__(self, hostspec, separator=","):
self.hoststructs = []
if hostspec == "*":
self.wildcard = True
else:
self.wildcard = False
hosts = [x.strip() for x in hostspec.split(separator)]
if len(hosts) not in [1, 2]:
raise PolicyError("hostspec must contain 1 or 2 host names")
self.hoststructs.append(HostStruct(hosts[0]))
if len(hosts) > 1:
self.hoststructs.append(HostStruct(hosts[1]))
if not self.hoststructs[0].family == self.hoststructs[1].family:
raise PolicyError("mixed IPv4 and IPv6 host specs in range not allowed")
c0 = self.memcmp(self.hoststructs[0].binary, self.hoststructs[1].binary)
if c0 > 0:
raise PolicyError("host specs in range must have lower numeric address first")
def __str__(self):
if self.wildcard:
return "*"
res = self.hoststructs[0].name
if len(self.hoststructs) > 1:
res += "," + self.hoststructs[1].name
return res
def __repr__(self):
return self.__str__()
def dump(self):
if self.wildcard:
return "(*)"
res = "(" + self.hoststructs[0].dump()
if len(self.hoststructs) > 1:
res += "," + self.hoststructs[1].dump()
res += ")"
return res
def memcmp(self, a, b):
res = 0
for i in range(0, len(a)):
if a[i] > b[i]:
res = 1
break
elif a[i] < b[i]:
res = -1
break
return res
def match_bin(self, candidate):
if self.wildcard:
return True
try:
if not candidate.family == self.hoststructs[0].family:
return False
c0 = self.memcmp(candidate.binary, self.hoststructs[0].binary)
if len(self.hoststructs) == 1:
return c0 == 0
c1 = self.memcmp(candidate.binary, self.hoststructs[1].binary)
return c0 >= 0 and c1 <= 0
except PolicyError:
return False
except Exception as e:
assert isinstance(candidate, HostStruct), \
("Wrong type. Expected HostStruct but received %s" % candidate.__class__.__name__)
return False
def match_str(self, candidate):
try:
hoststruct = HostStruct(candidate)
except PolicyError:
return False
return self.match_bin(hoststruct)
class PolicyAppConnectionMgr:
def __init__(self, maxconn, maxconnperuser, maxconnperhost):
if maxconn < 0 or maxconnperuser < 0 or maxconnperhost < 0:
raise PolicyError("PolicyAppConnectionMgr settings must be >= 0")
self.max_total = maxconn
self.max_per_user = maxconnperuser
self.max_per_host = maxconnperhost
self.connections_approved = 0
self.connections_denied = 0
self.connections_active = 0
self.per_host_state = {}
self.per_user_state = {}
def __str__(self):
res = ("Connection Limits: total: %s, per user: %s, per host: %s\n" %
(self.max_total, self.max_per_user, self.max_per_host))
res += ("Connections Statistics: total approved: %s, total denied: %s" %
(self.connections_approved, self.connections_denied))
res += ("Connection State: total current: %s" % self.connections_active)
res += ("User state: %s\n" % self.per_user_state)
res += ("Host state: %s" % self.per_host_state)
return res
def __repr__(self):
return self.__str__()
def update(self, maxconn, maxconnperuser, maxconnperhost):
if maxconn < 0 or maxconnperuser < 0 or maxconnperhost < 0:
raise PolicyError("PolicyAppConnectionMgr settings must be >= 0")
self.max_total = maxconn
self.max_per_user = maxconnperuser
self.max_per_host = maxconnperhost
def can_connect(self, conn_id, user, host, diags, grp_max_user, grp_max_host):
n_user = 0
if user in self.per_user_state:
n_user = len(self.per_user_state[user])
n_host = 0
if host in self.per_host_state:
n_host = len(self.per_host_state[host])
max_per_user = grp_max_user if grp_max_user is not None else self.max_per_user
max_per_host = grp_max_host if grp_max_host is not None else self.max_per_host
allowbytotal = self.connections_active < self.max_total
allowbyuser = n_user < max_per_user
allowbyhost = n_host < max_per_host
if allowbytotal and allowbyuser and allowbyhost:
if user not in self.per_user_state:
self.per_user_state[user] = []
self.per_user_state[user].append(conn_id)
if host not in self.per_host_state:
self.per_host_state[host] = []
self.per_host_state[host].append(conn_id)
self.connections_active += 1
self.connections_approved += 1
return True
else:
if not allowbytotal:
diags.append("Connection denied by application connection limit")
if not allowbyuser:
diags.append("Connection denied by application per user limit")
if not allowbyhost:
diags.append("Connection denied by application per host limit")
self.connections_denied += 1
return False
def disconnect(self, conn_id, user, host):
assert self.connections_active > 0
assert user in self.per_user_state
assert conn_id in self.per_user_state[user]
assert conn_id in self.per_host_state[host]
self.connections_active -= 1
self.per_user_state[user].remove(conn_id)
self.per_host_state[host].remove(conn_id)
def count_other_denial(self):
self.connections_denied += 1
| true | true |
f72d8cf23cc5609bf9167436c3ec295515e7bddb | 4,477 | py | Python | src/lightextclassification/imdb.py | duoan/light-text-classification | 6c96c9fb6b52abd42e4b4358cb85c44473731668 | [
"MIT"
] | 1 | 2021-03-20T20:59:57.000Z | 2021-03-20T20:59:57.000Z | src/lightextclassification/imdb.py | classtag/light-text-classification | 6c96c9fb6b52abd42e4b4358cb85c44473731668 | [
"MIT"
] | 244 | 2018-11-22T13:37:48.000Z | 2021-07-14T18:40:29.000Z | src/lightextclassification/imdb.py | duoan/light-text-classification | 6c96c9fb6b52abd42e4b4358cb85c44473731668 | [
"MIT"
] | 1 | 2018-11-22T12:03:13.000Z | 2018-11-22T12:03:13.000Z | # _*_ coding: utf-8 _*_
from argparse import ArgumentParser
import torch
from torchtext import data, datasets
from vocab import LocalVectors
from models import *
from torch.optim import SGD
from torch.utils.data import DataLoader
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from tqdm import tqdm
def get_data_loaders(batch_size=32):
tokenize = lambda x: x.split()
TEXT = data.Field(
sequential=True,
tokenize=tokenize,
lower=True,
include_lengths=True,
batch_first=True,
fix_length=200)
LABEL = data.LabelField(dtype=torch.float)
print('Load IMDB dataset')
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)
print('TEXT build vocab')
TEXT.build_vocab(
train_data,
vectors=LocalVectors(
'/Users/duoan/nbs/quora-insincere-questions-classification/input/embeddings/glove.840B.300d/glove.840B.300d.txt'
))
print('LABEL build vocab')
LABEL.build_vocab(train_data)
word_embeddings = TEXT.vocab.vectors
print('Length of TEXT Vocabulary: {}'.format(len(TEXT.vocab)))
print('Vector size of TEXT Vocabulary: {}'.format(TEXT.vocab.vectors.size()))
print('LABEL Length: {}'.format(len(LABEL.vocab)))
train_data, valid_data = train_data.split()
train_iter, valid_iter, test_iter = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=True)
vocab_size = len(TEXT.vocab)
print('finished get data loaders')
return vocab_size, word_embeddings, train_iter, valid_iter, test_iter
def run(batch_size, epochs, lr, momentum, log_interval):
vocab_size, word_embeddings, train_iter, valid_iter, test_iter = get_data_loaders(
batch_size)
model = LSTMClassifier(32, 2, 256, vocab_size, 300, word_embeddings)
device = 'cpu'
if torch.cuda.is_available():
device = 'cuda'
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
trainer = create_supervised_trainer(
model, optimizer, F.nll_loss, device=device)
evaluator = create_supervised_evaluator(
model,
metrics={
'accuracy': Accuracy(),
'nll': Loss(F.nll_loss)
},
device=device)
desc = "ITERATION - loss: {:.2f}"
pbar = tqdm(
initial=0, leave=False, total=len(train_iter), desc=desc.format(0))
@trainer.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
iter = (engine.state.iteration - 1) % len(train_iter) + 1
if iter % log_interval == 0:
pbar.desc = desc.format(engine.state.output)
pbar.update(log_interval)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
pbar.refresh()
evaluator.run(train_iter)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
tqdm.write(
"Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}"
.format(engine.state.epoch, avg_accuracy, avg_nll))
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(valid_iter)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
tqdm.write(
"Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}"
.format(engine.state.epoch, avg_accuracy, avg_nll))
pbar.n = pbar.last_print_n = 0
trainer.run(train_iter, max_epochs=epochs)
pbar.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
'--batch_size',
type=int,
default=64,
help='input batch size for training (default: 64)')
parser.add_argument(
'--val_batch_size',
type=int,
default=1000,
help='input batch size for validation (default: 1000)')
parser.add_argument(
'--epochs',
type=int,
default=10,
help='number of epochs to train (default: 10)')
parser.add_argument(
'--lr', type=float, default=0.01, help='learning rate (default: 0.01)')
parser.add_argument(
'--momentum', type=float, default=0.5, help='SGD momentum (default: 0.5)')
parser.add_argument(
'--log_interval',
type=int,
default=10,
help='how many batches to wait before logging training status')
args = parser.parse_args()
run(args.batch_size, args.epochs, args.lr, args.momentum, args.log_interval)
| 30.875862 | 122 | 0.688184 |
from argparse import ArgumentParser
import torch
from torchtext import data, datasets
from vocab import LocalVectors
from models import *
from torch.optim import SGD
from torch.utils.data import DataLoader
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from tqdm import tqdm
def get_data_loaders(batch_size=32):
tokenize = lambda x: x.split()
TEXT = data.Field(
sequential=True,
tokenize=tokenize,
lower=True,
include_lengths=True,
batch_first=True,
fix_length=200)
LABEL = data.LabelField(dtype=torch.float)
print('Load IMDB dataset')
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)
print('TEXT build vocab')
TEXT.build_vocab(
train_data,
vectors=LocalVectors(
'/Users/duoan/nbs/quora-insincere-questions-classification/input/embeddings/glove.840B.300d/glove.840B.300d.txt'
))
print('LABEL build vocab')
LABEL.build_vocab(train_data)
word_embeddings = TEXT.vocab.vectors
print('Length of TEXT Vocabulary: {}'.format(len(TEXT.vocab)))
print('Vector size of TEXT Vocabulary: {}'.format(TEXT.vocab.vectors.size()))
print('LABEL Length: {}'.format(len(LABEL.vocab)))
train_data, valid_data = train_data.split()
train_iter, valid_iter, test_iter = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=batch_size,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=True)
vocab_size = len(TEXT.vocab)
print('finished get data loaders')
return vocab_size, word_embeddings, train_iter, valid_iter, test_iter
def run(batch_size, epochs, lr, momentum, log_interval):
vocab_size, word_embeddings, train_iter, valid_iter, test_iter = get_data_loaders(
batch_size)
model = LSTMClassifier(32, 2, 256, vocab_size, 300, word_embeddings)
device = 'cpu'
if torch.cuda.is_available():
device = 'cuda'
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
trainer = create_supervised_trainer(
model, optimizer, F.nll_loss, device=device)
evaluator = create_supervised_evaluator(
model,
metrics={
'accuracy': Accuracy(),
'nll': Loss(F.nll_loss)
},
device=device)
desc = "ITERATION - loss: {:.2f}"
pbar = tqdm(
initial=0, leave=False, total=len(train_iter), desc=desc.format(0))
@trainer.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
iter = (engine.state.iteration - 1) % len(train_iter) + 1
if iter % log_interval == 0:
pbar.desc = desc.format(engine.state.output)
pbar.update(log_interval)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
pbar.refresh()
evaluator.run(train_iter)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
tqdm.write(
"Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}"
.format(engine.state.epoch, avg_accuracy, avg_nll))
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(valid_iter)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
tqdm.write(
"Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}"
.format(engine.state.epoch, avg_accuracy, avg_nll))
pbar.n = pbar.last_print_n = 0
trainer.run(train_iter, max_epochs=epochs)
pbar.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
'--batch_size',
type=int,
default=64,
help='input batch size for training (default: 64)')
parser.add_argument(
'--val_batch_size',
type=int,
default=1000,
help='input batch size for validation (default: 1000)')
parser.add_argument(
'--epochs',
type=int,
default=10,
help='number of epochs to train (default: 10)')
parser.add_argument(
'--lr', type=float, default=0.01, help='learning rate (default: 0.01)')
parser.add_argument(
'--momentum', type=float, default=0.5, help='SGD momentum (default: 0.5)')
parser.add_argument(
'--log_interval',
type=int,
default=10,
help='how many batches to wait before logging training status')
args = parser.parse_args()
run(args.batch_size, args.epochs, args.lr, args.momentum, args.log_interval)
| true | true |
f72d8d7b7cf9155185aabc026bb4c4602f0f19fc | 66,826 | py | Python | sympy/core/basic.py | MartinThoma/sympy | 009d0031bec7222ffa472e52148a2b4e441cd3a5 | [
"BSD-3-Clause"
] | null | null | null | sympy/core/basic.py | MartinThoma/sympy | 009d0031bec7222ffa472e52148a2b4e441cd3a5 | [
"BSD-3-Clause"
] | null | null | null | sympy/core/basic.py | MartinThoma/sympy | 009d0031bec7222ffa472e52148a2b4e441cd3a5 | [
"BSD-3-Clause"
] | null | null | null | """Base class for all the objects in SymPy"""
from collections import defaultdict
from itertools import chain, zip_longest
from .assumptions import BasicMeta, ManagedProperties
from .cache import cacheit
from .sympify import _sympify, sympify, SympifyError
from .compatibility import iterable, ordered, Mapping
from .singleton import S
from inspect import getmro
def as_Basic(expr):
"""Return expr as a Basic instance using strict sympify
or raise a TypeError; this is just a wrapper to _sympify,
raising a TypeError instead of a SympifyError."""
from sympy.utilities.misc import func_name
try:
return _sympify(expr)
except SympifyError:
raise TypeError(
'Argument must be a Basic object, not `%s`' % func_name(
expr))
class Basic(metaclass=ManagedProperties):
"""
Base class for all objects in SymPy.
Conventions:
1) Always use ``.args``, when accessing parameters of some instance:
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
2) Never use internal methods or variables (the ones prefixed with ``_``):
>>> cot(x)._args # do not use this, use cot(x).args instead
(x,)
"""
__slots__ = ('_mhash', # hash value
'_args', # arguments
'_assumptions'
)
# To be overridden with True in the appropriate subclasses
is_number = False
is_Atom = False
is_Symbol = False
is_symbol = False
is_Indexed = False
is_Dummy = False
is_Wild = False
is_Function = False
is_Add = False
is_Mul = False
is_Pow = False
is_Number = False
is_Float = False
is_Rational = False
is_Integer = False
is_NumberSymbol = False
is_Order = False
is_Derivative = False
is_Piecewise = False
is_Poly = False
is_AlgebraicNumber = False
is_Relational = False
is_Equality = False
is_Boolean = False
is_Not = False
is_Matrix = False
is_Vector = False
is_Point = False
is_MatAdd = False
is_MatMul = False
def __new__(cls, *args):
obj = object.__new__(cls)
obj._assumptions = cls.default_assumptions
obj._mhash = None # will be set by __hash__ method.
obj._args = args # all items in args must be Basic objects
return obj
def copy(self):
return self.func(*self.args)
def __reduce_ex__(self, proto):
""" Pickling support."""
return type(self), self.__getnewargs__(), self.__getstate__()
def __getnewargs__(self):
return self.args
def __getstate__(self):
return {}
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def __hash__(self):
# hash cannot be cached using cache_it because infinite recurrence
# occurs as hash is needed for setting cache dictionary keys
h = self._mhash
if h is None:
h = hash((type(self).__name__,) + self._hashable_content())
self._mhash = h
return h
def _hashable_content(self):
"""Return a tuple of information about self that can be used to
compute the hash. If a class defines additional attributes,
like ``name`` in Symbol, then this method should be updated
accordingly to return such relevant attributes.
Defining more than _hashable_content is necessary if __eq__ has
been defined by a class. See note about this in Basic.__eq__."""
return self._args
@property
def assumptions0(self):
"""
Return object `type` assumptions.
For example:
Symbol('x', real=True)
Symbol('x', integer=True)
are different objects. In other words, besides Python type (Symbol in
this case), the initial assumptions are also forming their typeinfo.
Examples
========
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> x.assumptions0
{'commutative': True}
>>> x = Symbol("x", positive=True)
>>> x.assumptions0
{'commutative': True, 'complex': True, 'extended_negative': False,
'extended_nonnegative': True, 'extended_nonpositive': False,
'extended_nonzero': True, 'extended_positive': True, 'extended_real':
True, 'finite': True, 'hermitian': True, 'imaginary': False,
'infinite': False, 'negative': False, 'nonnegative': True,
'nonpositive': False, 'nonzero': True, 'positive': True, 'real':
True, 'zero': False}
"""
return {}
def compare(self, other):
"""
Return -1, 0, 1 if the object is smaller, equal, or greater than other.
Not in the mathematical sense. If the object is of a different type
from the "other" then their classes are ordered according to
the sorted_classes list.
Examples
========
>>> from sympy.abc import x, y
>>> x.compare(y)
-1
>>> x.compare(x)
0
>>> y.compare(x)
1
"""
# all redefinitions of __cmp__ method should start with the
# following lines:
if self is other:
return 0
n1 = self.__class__
n2 = other.__class__
c = (n1 > n2) - (n1 < n2)
if c:
return c
#
st = self._hashable_content()
ot = other._hashable_content()
c = (len(st) > len(ot)) - (len(st) < len(ot))
if c:
return c
for l, r in zip(st, ot):
l = Basic(*l) if isinstance(l, frozenset) else l
r = Basic(*r) if isinstance(r, frozenset) else r
if isinstance(l, Basic):
c = l.compare(r)
else:
c = (l > r) - (l < r)
if c:
return c
return 0
@staticmethod
def _compare_pretty(a, b):
from sympy.series.order import Order
if isinstance(a, Order) and not isinstance(b, Order):
return 1
if not isinstance(a, Order) and isinstance(b, Order):
return -1
if a.is_Rational and b.is_Rational:
l = a.p * b.q
r = b.p * a.q
return (l > r) - (l < r)
else:
from sympy.core.symbol import Wild
p1, p2, p3 = Wild("p1"), Wild("p2"), Wild("p3")
r_a = a.match(p1 * p2**p3)
if r_a and p3 in r_a:
a3 = r_a[p3]
r_b = b.match(p1 * p2**p3)
if r_b and p3 in r_b:
b3 = r_b[p3]
c = Basic.compare(a3, b3)
if c != 0:
return c
return Basic.compare(a, b)
@classmethod
def fromiter(cls, args, **assumptions):
"""
Create a new object from an iterable.
This is a convenience function that allows one to create objects from
any iterable, without having to convert to a list or tuple first.
Examples
========
>>> from sympy import Tuple
>>> Tuple.fromiter(i for i in range(5))
(0, 1, 2, 3, 4)
"""
return cls(*tuple(args), **assumptions)
@classmethod
def class_key(cls):
"""Nice order of classes. """
return 5, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
"""
Return a sort key.
Examples
========
>>> from sympy.core import S, I
>>> sorted([S(1)/2, I, -I], key=lambda x: x.sort_key())
[1/2, -I, I]
>>> S("[x, 1/x, 1/x**2, x**2, x**(1/2), x**(1/4), x**(3/2)]")
[x, 1/x, x**(-2), x**2, sqrt(x), x**(1/4), x**(3/2)]
>>> sorted(_, key=lambda x: x.sort_key())
[x**(-2), 1/x, x**(1/4), sqrt(x), x, x**(3/2), x**2]
"""
# XXX: remove this when issue 5169 is fixed
def inner_key(arg):
if isinstance(arg, Basic):
return arg.sort_key(order)
else:
return arg
args = self._sorted_args
args = len(args), tuple([inner_key(arg) for arg in args])
return self.class_key(), args, S.One.sort_key(), S.One
def __eq__(self, other):
"""Return a boolean indicating whether a == b on the basis of
their symbolic trees.
This is the same as a.compare(b) == 0 but faster.
Notes
=====
If a class that overrides __eq__() needs to retain the
implementation of __hash__() from a parent class, the
interpreter must be told this explicitly by setting __hash__ =
<ParentClass>.__hash__. Otherwise the inheritance of __hash__()
will be blocked, just as if __hash__ had been explicitly set to
None.
References
==========
from http://docs.python.org/dev/reference/datamodel.html#object.__hash__
"""
if self is other:
return True
tself = type(self)
tother = type(other)
if tself is not tother:
try:
other = _sympify(other)
tother = type(other)
except SympifyError:
return NotImplemented
# As long as we have the ordering of classes (sympy.core),
# comparing types will be slow in Python 2, because it uses
# __cmp__. Until we can remove it
# (https://github.com/sympy/sympy/issues/4269), we only compare
# types in Python 2 directly if they actually have __ne__.
if type(tself).__ne__ is not type.__ne__:
if tself != tother:
return False
elif tself is not tother:
return False
return self._hashable_content() == other._hashable_content()
def __ne__(self, other):
"""``a != b`` -> Compare two symbolic trees and see whether they are different
this is the same as:
``a.compare(b) != 0``
but faster
"""
return not self == other
def dummy_eq(self, other, symbol=None):
"""
Compare two expressions and handle dummy symbols.
Examples
========
>>> from sympy import Dummy
>>> from sympy.abc import x, y
>>> u = Dummy('u')
>>> (u**2 + 1).dummy_eq(x**2 + 1)
True
>>> (u**2 + 1) == (x**2 + 1)
False
>>> (u**2 + y).dummy_eq(x**2 + y, x)
True
>>> (u**2 + y).dummy_eq(x**2 + y, y)
False
"""
s = self.as_dummy()
o = _sympify(other)
o = o.as_dummy()
dummy_symbols = [i for i in s.free_symbols if i.is_Dummy]
if len(dummy_symbols) == 1:
dummy = dummy_symbols.pop()
else:
return s == o
if symbol is None:
symbols = o.free_symbols
if len(symbols) == 1:
symbol = symbols.pop()
else:
return s == o
tmp = dummy.__class__()
return s.subs(dummy, tmp) == o.subs(symbol, tmp)
# Note, we always use the default ordering (lex) in __str__ and __repr__,
# regardless of the global setting. See issue 5487.
def __repr__(self):
"""Method to return the string representation.
Return the expression as a string.
"""
from sympy.printing import sstr
return sstr(self, order=None)
def __str__(self):
from sympy.printing import sstr
return sstr(self, order=None)
# We don't define _repr_png_ here because it would add a large amount of
# data to any notebook containing SymPy expressions, without adding
# anything useful to the notebook. It can still enabled manually, e.g.,
# for the qtconsole, with init_printing().
def _repr_latex_(self):
"""
IPython/Jupyter LaTeX printing
To change the behavior of this (e.g., pass in some settings to LaTeX),
use init_printing(). init_printing() will also enable LaTeX printing
for built in numeric types like ints and container types that contain
SymPy objects, like lists and dictionaries of expressions.
"""
from sympy.printing.latex import latex
s = latex(self, mode='plain')
return "$\\displaystyle %s$" % s
_repr_latex_orig = _repr_latex_
def atoms(self, *types):
"""Returns the atoms that form the current object.
By default, only objects that are truly atomic and can't
be divided into smaller pieces are returned: symbols, numbers,
and number symbols like I and pi. It is possible to request
atoms of any type, however, as demonstrated below.
Examples
========
>>> from sympy import I, pi, sin
>>> from sympy.abc import x, y
>>> (1 + x + 2*sin(y + I*pi)).atoms()
{1, 2, I, pi, x, y}
If one or more types are given, the results will contain only
those types of atoms.
>>> from sympy import Number, NumberSymbol, Symbol
>>> (1 + x + 2*sin(y + I*pi)).atoms(Symbol)
{x, y}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number)
{1, 2}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol)
{1, 2, pi}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol, I)
{1, 2, I, pi}
Note that I (imaginary unit) and zoo (complex infinity) are special
types of number symbols and are not part of the NumberSymbol class.
The type can be given implicitly, too:
>>> (1 + x + 2*sin(y + I*pi)).atoms(x) # x is a Symbol
{x, y}
Be careful to check your assumptions when using the implicit option
since ``S(1).is_Integer = True`` but ``type(S(1))`` is ``One``, a special type
of sympy atom, while ``type(S(2))`` is type ``Integer`` and will find all
integers in an expression:
>>> from sympy import S
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(1))
{1}
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(2))
{1, 2}
Finally, arguments to atoms() can select more than atomic atoms: any
sympy type (loaded in core/__init__.py) can be listed as an argument
and those types of "atoms" as found in scanning the arguments of the
expression recursively:
>>> from sympy import Function, Mul
>>> from sympy.core.function import AppliedUndef
>>> f = Function('f')
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(Function)
{f(x), sin(y + I*pi)}
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(AppliedUndef)
{f(x)}
>>> (1 + x + 2*sin(y + I*pi)).atoms(Mul)
{I*pi, 2*sin(y + I*pi)}
"""
if types:
types = tuple(
[t if isinstance(t, type) else type(t) for t in types])
nodes = preorder_traversal(self)
if types:
result = {node for node in nodes if isinstance(node, types)}
else:
result = {node for node in nodes if not node.args}
return result
@property
def free_symbols(self):
"""Return from the atoms of self those which are free symbols.
For most expressions, all symbols are free symbols. For some classes
this is not true. e.g. Integrals use Symbols for the dummy variables
which are bound variables, so Integral has a method to return all
symbols except those. Derivative keeps track of symbols with respect
to which it will perform a derivative; those are
bound variables, too, so it has its own free_symbols method.
Any other method that uses bound variables should implement a
free_symbols method."""
return set().union(*[a.free_symbols for a in self.args])
@property
def expr_free_symbols(self):
return set()
def as_dummy(self):
"""Return the expression with any objects having structurally
bound symbols replaced with unique, canonical symbols within
the object in which they appear and having only the default
assumption for commutativity being True.
Examples
========
>>> from sympy import Integral, Symbol
>>> from sympy.abc import x, y
>>> r = Symbol('r', real=True)
>>> Integral(r, (r, x)).as_dummy()
Integral(_0, (_0, x))
>>> _.variables[0].is_real is None
True
Notes
=====
Any object that has structural dummy variables should have
a property, `bound_symbols` that returns a list of structural
dummy symbols of the object itself.
Lambda and Subs have bound symbols, but because of how they
are cached, they already compare the same regardless of their
bound symbols:
>>> from sympy import Lambda
>>> Lambda(x, x + 1) == Lambda(y, y + 1)
True
"""
def can(x):
d = {i: i.as_dummy() for i in x.bound_symbols}
# mask free that shadow bound
x = x.subs(d)
c = x.canonical_variables
# replace bound
x = x.xreplace(c)
# undo masking
x = x.xreplace({v: k for k, v in d.items()})
return x
return self.replace(
lambda x: hasattr(x, 'bound_symbols'),
lambda x: can(x))
@property
def canonical_variables(self):
"""Return a dictionary mapping any variable defined in
``self.bound_symbols`` to Symbols that do not clash
with any existing symbol in the expression.
Examples
========
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> Lambda(x, 2*x).canonical_variables
{x: _0}
"""
from sympy.core.symbol import Symbol
from sympy.utilities.iterables import numbered_symbols
if not hasattr(self, 'bound_symbols'):
return {}
dums = numbered_symbols('_')
reps = {}
v = self.bound_symbols
# this free will include bound symbols that are not part of
# self's bound symbols
free = {i.name for i in self.atoms(Symbol) - set(v)}
for v in v:
d = next(dums)
if v.is_Symbol:
while v.name == d.name or d.name in free:
d = next(dums)
reps[v] = d
return reps
def rcall(self, *args):
"""Apply on the argument recursively through the expression tree.
This method is used to simulate a common abuse of notation for
operators. For instance in SymPy the the following will not work:
``(x+Lambda(y, 2*y))(z) == x+2*z``,
however you can use
>>> from sympy import Lambda
>>> from sympy.abc import x, y, z
>>> (x + Lambda(y, 2*y)).rcall(z)
x + 2*z
"""
return Basic._recursive_call(self, args)
@staticmethod
def _recursive_call(expr_to_call, on_args):
"""Helper for rcall method."""
from sympy import Symbol
def the_call_method_is_overridden(expr):
for cls in getmro(type(expr)):
if '__call__' in cls.__dict__:
return cls != Basic
if callable(expr_to_call) and the_call_method_is_overridden(expr_to_call):
if isinstance(expr_to_call, Symbol): # XXX When you call a Symbol it is
return expr_to_call # transformed into an UndefFunction
else:
return expr_to_call(*on_args)
elif expr_to_call.args:
args = [Basic._recursive_call(
sub, on_args) for sub in expr_to_call.args]
return type(expr_to_call)(*args)
else:
return expr_to_call
def is_hypergeometric(self, k):
from sympy.simplify import hypersimp
return hypersimp(self, k) is not None
@property
def is_comparable(self):
"""Return True if self can be computed to a real number
(or already is a real number) with precision, else False.
Examples
========
>>> from sympy import exp_polar, pi, I
>>> (I*exp_polar(I*pi/2)).is_comparable
True
>>> (I*exp_polar(I*pi*2)).is_comparable
False
A False result does not mean that `self` cannot be rewritten
into a form that would be comparable. For example, the
difference computed below is zero but without simplification
it does not evaluate to a zero with precision:
>>> e = 2**pi*(1 + 2**pi)
>>> dif = e - e.expand()
>>> dif.is_comparable
False
>>> dif.n(2)._prec
1
"""
is_extended_real = self.is_extended_real
if is_extended_real is False:
return False
if not self.is_number:
return False
# don't re-eval numbers that are already evaluated since
# this will create spurious precision
n, i = [p.evalf(2) if not p.is_Number else p
for p in self.as_real_imag()]
if not (i.is_Number and n.is_Number):
return False
if i:
# if _prec = 1 we can't decide and if not,
# the answer is False because numbers with
# imaginary parts can't be compared
# so return False
return False
else:
return n._prec != 1
@property
def func(self):
"""
The top-level function in an expression.
The following should hold for all objects::
>> x == x.func(*x.args)
Examples
========
>>> from sympy.abc import x
>>> a = 2*x
>>> a.func
<class 'sympy.core.mul.Mul'>
>>> a.args
(2, x)
>>> a.func(*a.args)
2*x
>>> a == a.func(*a.args)
True
"""
return self.__class__
@property
def args(self):
"""Returns a tuple of arguments of 'self'.
Examples
========
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
Notes
=====
Never use self._args, always use self.args.
Only use _args in __new__ when creating a new function.
Don't override .args() from Basic (so that it's easy to
change the interface in the future if needed).
"""
return self._args
@property
def _sorted_args(self):
"""
The same as ``args``. Derived classes which don't fix an
order on their arguments should override this method to
produce the sorted representation.
"""
return self.args
def as_content_primitive(self, radical=False, clear=True):
"""A stub to allow Basic args (like Tuple) to be skipped when computing
the content and primitive components of an expression.
See Also
========
sympy.core.expr.Expr.as_content_primitive
"""
return S.One, self
def subs(self, *args, **kwargs):
"""
Substitutes old for new in an expression after sympifying args.
`args` is either:
- two arguments, e.g. foo.subs(old, new)
- one iterable argument, e.g. foo.subs(iterable). The iterable may be
o an iterable container with (old, new) pairs. In this case the
replacements are processed in the order given with successive
patterns possibly affecting replacements already made.
o a dict or set whose key/value items correspond to old/new pairs.
In this case the old/new pairs will be sorted by op count and in
case of a tie, by number of args and the default_sort_key. The
resulting sorted list is then processed as an iterable container
(see previous).
If the keyword ``simultaneous`` is True, the subexpressions will not be
evaluated until all the substitutions have been made.
Examples
========
>>> from sympy import pi, exp, limit, oo
>>> from sympy.abc import x, y
>>> (1 + x*y).subs(x, pi)
pi*y + 1
>>> (1 + x*y).subs({x:pi, y:2})
1 + 2*pi
>>> (1 + x*y).subs([(x, pi), (y, 2)])
1 + 2*pi
>>> reps = [(y, x**2), (x, 2)]
>>> (x + y).subs(reps)
6
>>> (x + y).subs(reversed(reps))
x**2 + 2
>>> (x**2 + x**4).subs(x**2, y)
y**2 + y
To replace only the x**2 but not the x**4, use xreplace:
>>> (x**2 + x**4).xreplace({x**2: y})
x**4 + y
To delay evaluation until all substitutions have been made,
set the keyword ``simultaneous`` to True:
>>> (x/y).subs([(x, 0), (y, 0)])
0
>>> (x/y).subs([(x, 0), (y, 0)], simultaneous=True)
nan
This has the added feature of not allowing subsequent substitutions
to affect those already made:
>>> ((x + y)/y).subs({x + y: y, y: x + y})
1
>>> ((x + y)/y).subs({x + y: y, y: x + y}, simultaneous=True)
y/(x + y)
In order to obtain a canonical result, unordered iterables are
sorted by count_op length, number of arguments and by the
default_sort_key to break any ties. All other iterables are left
unsorted.
>>> from sympy import sqrt, sin, cos
>>> from sympy.abc import a, b, c, d, e
>>> A = (sqrt(sin(2*x)), a)
>>> B = (sin(2*x), b)
>>> C = (cos(2*x), c)
>>> D = (x, d)
>>> E = (exp(x), e)
>>> expr = sqrt(sin(2*x))*sin(exp(x)*x)*cos(2*x) + sin(2*x)
>>> expr.subs(dict([A, B, C, D, E]))
a*c*sin(d*e) + b
The resulting expression represents a literal replacement of the
old arguments with the new arguments. This may not reflect the
limiting behavior of the expression:
>>> (x**3 - 3*x).subs({x: oo})
nan
>>> limit(x**3 - 3*x, x, oo)
oo
If the substitution will be followed by numerical
evaluation, it is better to pass the substitution to
evalf as
>>> (1/x).evalf(subs={x: 3.0}, n=21)
0.333333333333333333333
rather than
>>> (1/x).subs({x: 3.0}).evalf(21)
0.333333333333333314830
as the former will ensure that the desired level of precision is
obtained.
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
xreplace: exact node replacement in expr tree; also capable of
using matching rules
sympy.core.evalf.EvalfMixin.evalf: calculates the given formula to a desired level of precision
"""
from sympy.core.containers import Dict
from sympy.utilities.iterables import sift
from sympy import Dummy, Symbol
unordered = False
if len(args) == 1:
sequence = args[0]
if isinstance(sequence, set):
unordered = True
elif isinstance(sequence, (Dict, Mapping)):
unordered = True
sequence = sequence.items()
elif not iterable(sequence):
from sympy.utilities.misc import filldedent
raise ValueError(filldedent("""
When a single argument is passed to subs
it should be a dictionary of old: new pairs or an iterable
of (old, new) tuples."""))
elif len(args) == 2:
sequence = [args]
else:
raise ValueError("subs accepts either 1 or 2 arguments")
sequence = list(sequence)
for i, s in enumerate(sequence):
if isinstance(s[0], str):
# when old is a string we prefer Symbol
s = Symbol(s[0]), s[1]
try:
s = [sympify(_, strict=not isinstance(_, str))
for _ in s]
except SympifyError:
# if it can't be sympified, skip it
sequence[i] = None
continue
# skip if there is no change
sequence[i] = None if _aresame(*s) else tuple(s)
sequence = list(filter(None, sequence))
if unordered:
sequence = dict(sequence)
atoms, nonatoms = sift(list(sequence),
lambda x: x.is_Atom, binary=True)
sequence = [(k, sequence[k]) for k in
list(reversed(list(ordered(nonatoms)))) + list(ordered(atoms))]
if kwargs.pop('simultaneous', False): # XXX should this be the default for dict subs?
reps = {}
rv = self
kwargs['hack2'] = True
m = Dummy('subs_m')
for old, new in sequence:
com = new.is_commutative
if com is None:
com = True
d = Dummy('subs_d', commutative=com)
# using d*m so Subs will be used on dummy variables
# in things like Derivative(f(x, y), x) in which x
# is both free and bound
rv = rv._subs(old, d*m, **kwargs)
if not isinstance(rv, Basic):
break
reps[d] = new
reps[m] = S.One # get rid of m
return rv.xreplace(reps)
else:
rv = self
for old, new in sequence:
rv = rv._subs(old, new, **kwargs)
if not isinstance(rv, Basic):
break
return rv
@cacheit
def _subs(self, old, new, **hints):
"""Substitutes an expression old -> new.
If self is not equal to old then _eval_subs is called.
If _eval_subs doesn't want to make any special replacement
then a None is received which indicates that the fallback
should be applied wherein a search for replacements is made
amongst the arguments of self.
>>> from sympy import Add
>>> from sympy.abc import x, y, z
Examples
========
Add's _eval_subs knows how to target x + y in the following
so it makes the change:
>>> (x + y + z).subs(x + y, 1)
z + 1
Add's _eval_subs doesn't need to know how to find x + y in
the following:
>>> Add._eval_subs(z*(x + y) + 3, x + y, 1) is None
True
The returned None will cause the fallback routine to traverse the args and
pass the z*(x + y) arg to Mul where the change will take place and the
substitution will succeed:
>>> (z*(x + y) + 3).subs(x + y, 1)
z + 3
** Developers Notes **
An _eval_subs routine for a class should be written if:
1) any arguments are not instances of Basic (e.g. bool, tuple);
2) some arguments should not be targeted (as in integration
variables);
3) if there is something other than a literal replacement
that should be attempted (as in Piecewise where the condition
may be updated without doing a replacement).
If it is overridden, here are some special cases that might arise:
1) If it turns out that no special change was made and all
the original sub-arguments should be checked for
replacements then None should be returned.
2) If it is necessary to do substitutions on a portion of
the expression then _subs should be called. _subs will
handle the case of any sub-expression being equal to old
(which usually would not be the case) while its fallback
will handle the recursion into the sub-arguments. For
example, after Add's _eval_subs removes some matching terms
it must process the remaining terms so it calls _subs
on each of the un-matched terms and then adds them
onto the terms previously obtained.
3) If the initial expression should remain unchanged then
the original expression should be returned. (Whenever an
expression is returned, modified or not, no further
substitution of old -> new is attempted.) Sum's _eval_subs
routine uses this strategy when a substitution is attempted
on any of its summation variables.
"""
def fallback(self, old, new):
"""
Try to replace old with new in any of self's arguments.
"""
hit = False
args = list(self.args)
for i, arg in enumerate(args):
if not hasattr(arg, '_eval_subs'):
continue
arg = arg._subs(old, new, **hints)
if not _aresame(arg, args[i]):
hit = True
args[i] = arg
if hit:
rv = self.func(*args)
hack2 = hints.get('hack2', False)
if hack2 and self.is_Mul and not rv.is_Mul: # 2-arg hack
coeff = S.One
nonnumber = []
for i in args:
if i.is_Number:
coeff *= i
else:
nonnumber.append(i)
nonnumber = self.func(*nonnumber)
if coeff is S.One:
return nonnumber
else:
return self.func(coeff, nonnumber, evaluate=False)
return rv
return self
if _aresame(self, old):
return new
rv = self._eval_subs(old, new)
if rv is None:
rv = fallback(self, old, new)
return rv
def _eval_subs(self, old, new):
"""Override this stub if you want to do anything more than
attempt a replacement of old with new in the arguments of self.
See also
========
_subs
"""
return None
def xreplace(self, rule):
"""
Replace occurrences of objects within the expression.
Parameters
==========
rule : dict-like
Expresses a replacement rule
Returns
=======
xreplace : the result of the replacement
Examples
========
>>> from sympy import symbols, pi, exp
>>> x, y, z = symbols('x y z')
>>> (1 + x*y).xreplace({x: pi})
pi*y + 1
>>> (1 + x*y).xreplace({x: pi, y: 2})
1 + 2*pi
Replacements occur only if an entire node in the expression tree is
matched:
>>> (x*y + z).xreplace({x*y: pi})
z + pi
>>> (x*y*z).xreplace({x*y: pi})
x*y*z
>>> (2*x).xreplace({2*x: y, x: z})
y
>>> (2*2*x).xreplace({2*x: y, x: z})
4*z
>>> (x + y + 2).xreplace({x + y: 2})
x + y + 2
>>> (x + 2 + exp(x + 2)).xreplace({x + 2: y})
x + exp(y) + 2
xreplace doesn't differentiate between free and bound symbols. In the
following, subs(x, y) would not change x since it is a bound symbol,
but xreplace does:
>>> from sympy import Integral
>>> Integral(x, (x, 1, 2*x)).xreplace({x: y})
Integral(y, (y, 1, 2*y))
Trying to replace x with an expression raises an error:
>>> Integral(x, (x, 1, 2*x)).xreplace({x: 2*y}) # doctest: +SKIP
ValueError: Invalid limits given: ((2*y, 1, 4*y),)
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
subs: substitution of subexpressions as defined by the objects
themselves.
"""
value, _ = self._xreplace(rule)
return value
def _xreplace(self, rule):
"""
Helper for xreplace. Tracks whether a replacement actually occurred.
"""
if self in rule:
return rule[self], True
elif rule:
args = []
changed = False
for a in self.args:
_xreplace = getattr(a, '_xreplace', None)
if _xreplace is not None:
a_xr = _xreplace(rule)
args.append(a_xr[0])
changed |= a_xr[1]
else:
args.append(a)
args = tuple(args)
if changed:
return self.func(*args), True
return self, False
@cacheit
def has(self, *patterns):
"""
Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x, y, z
>>> (x**2 + sin(x*y)).has(z)
False
>>> (x**2 + sin(x*y)).has(x, y, z)
True
>>> x.has(x)
True
Note ``has`` is a structural algorithm with no knowledge of
mathematics. Consider the following half-open interval:
>>> from sympy.sets import Interval
>>> i = Interval.Lopen(0, 5); i
Interval.Lopen(0, 5)
>>> i.args
(0, 5, True, False)
>>> i.has(4) # there is no "4" in the arguments
False
>>> i.has(0) # there *is* a "0" in the arguments
True
Instead, use ``contains`` to determine whether a number is in the
interval or not:
>>> i.contains(4)
True
>>> i.contains(0)
False
Note that ``expr.has(*patterns)`` is exactly equivalent to
``any(expr.has(p) for p in patterns)``. In particular, ``False`` is
returned when the list of patterns is empty.
>>> x.has()
False
"""
return any(self._has(pattern) for pattern in patterns)
def _has(self, pattern):
"""Helper for .has()"""
from sympy.core.function import UndefinedFunction, Function
if isinstance(pattern, UndefinedFunction):
return any(f.func == pattern or f == pattern
for f in self.atoms(Function, UndefinedFunction))
pattern = sympify(pattern)
if isinstance(pattern, BasicMeta):
return any(isinstance(arg, pattern)
for arg in preorder_traversal(self))
_has_matcher = getattr(pattern, '_has_matcher', None)
if _has_matcher is not None:
match = _has_matcher()
return any(match(arg) for arg in preorder_traversal(self))
else:
return any(arg == pattern for arg in preorder_traversal(self))
def _has_matcher(self):
"""Helper for .has()"""
return lambda other: self == other
def replace(self, query, value, map=False, simultaneous=True, exact=None):
"""
Replace matching subexpressions of ``self`` with ``value``.
If ``map = True`` then also return the mapping {old: new} where ``old``
was a sub-expression found with query and ``new`` is the replacement
value for it. If the expression itself doesn't match the query, then
the returned value will be ``self.xreplace(map)`` otherwise it should
be ``self.subs(ordered(map.items()))``.
Traverses an expression tree and performs replacement of matching
subexpressions from the bottom to the top of the tree. The default
approach is to do the replacement in a simultaneous fashion so
changes made are targeted only once. If this is not desired or causes
problems, ``simultaneous`` can be set to False.
In addition, if an expression containing more than one Wild symbol
is being used to match subexpressions and the ``exact`` flag is None
it will be set to True so the match will only succeed if all non-zero
values are received for each Wild that appears in the match pattern.
Setting this to False accepts a match of 0; while setting it True
accepts all matches that have a 0 in them. See example below for
cautions.
The list of possible combinations of queries and replacement values
is listed below:
Examples
========
Initial setup
>>> from sympy import log, sin, cos, tan, Wild, Mul, Add
>>> from sympy.abc import x, y
>>> f = log(sin(x)) + tan(sin(x**2))
1.1. type -> type
obj.replace(type, newtype)
When object of type ``type`` is found, replace it with the
result of passing its argument(s) to ``newtype``.
>>> f.replace(sin, cos)
log(cos(x)) + tan(cos(x**2))
>>> sin(x).replace(sin, cos, map=True)
(cos(x), {sin(x): cos(x)})
>>> (x*y).replace(Mul, Add)
x + y
1.2. type -> func
obj.replace(type, func)
When object of type ``type`` is found, apply ``func`` to its
argument(s). ``func`` must be written to handle the number
of arguments of ``type``.
>>> f.replace(sin, lambda arg: sin(2*arg))
log(sin(2*x)) + tan(sin(2*x**2))
>>> (x*y).replace(Mul, lambda *args: sin(2*Mul(*args)))
sin(2*x*y)
2.1. pattern -> expr
obj.replace(pattern(wild), expr(wild))
Replace subexpressions matching ``pattern`` with the expression
written in terms of the Wild symbols in ``pattern``.
>>> a, b = map(Wild, 'ab')
>>> f.replace(sin(a), tan(a))
log(tan(x)) + tan(tan(x**2))
>>> f.replace(sin(a), tan(a/2))
log(tan(x/2)) + tan(tan(x**2/2))
>>> f.replace(sin(a), a)
log(x) + tan(x**2)
>>> (x*y).replace(a*x, a)
y
Matching is exact by default when more than one Wild symbol
is used: matching fails unless the match gives non-zero
values for all Wild symbols:
>>> (2*x + y).replace(a*x + b, b - a)
y - 2
>>> (2*x).replace(a*x + b, b - a)
2*x
When set to False, the results may be non-intuitive:
>>> (2*x).replace(a*x + b, b - a, exact=False)
2/x
2.2. pattern -> func
obj.replace(pattern(wild), lambda wild: expr(wild))
All behavior is the same as in 2.1 but now a function in terms of
pattern variables is used rather than an expression:
>>> f.replace(sin(a), lambda a: sin(2*a))
log(sin(2*x)) + tan(sin(2*x**2))
3.1. func -> func
obj.replace(filter, func)
Replace subexpression ``e`` with ``func(e)`` if ``filter(e)``
is True.
>>> g = 2*sin(x**3)
>>> g.replace(lambda expr: expr.is_Number, lambda expr: expr**2)
4*sin(x**9)
The expression itself is also targeted by the query but is done in
such a fashion that changes are not made twice.
>>> e = x*(x*y + 1)
>>> e.replace(lambda x: x.is_Mul, lambda x: 2*x)
2*x*(2*x*y + 1)
When matching a single symbol, `exact` will default to True, but
this may or may not be the behavior that is desired:
Here, we want `exact=False`:
>>> from sympy import Function
>>> f = Function('f')
>>> e = f(1) + f(0)
>>> q = f(a), lambda a: f(a + 1)
>>> e.replace(*q, exact=False)
f(1) + f(2)
>>> e.replace(*q, exact=True)
f(0) + f(2)
But here, the nature of matching makes selecting
the right setting tricky:
>>> e = x**(1 + y)
>>> (x**(1 + y)).replace(x**(1 + a), lambda a: x**-a, exact=False)
1
>>> (x**(1 + y)).replace(x**(1 + a), lambda a: x**-a, exact=True)
x**(-x - y + 1)
>>> (x**y).replace(x**(1 + a), lambda a: x**-a, exact=False)
1
>>> (x**y).replace(x**(1 + a), lambda a: x**-a, exact=True)
x**(1 - y)
It is probably better to use a different form of the query
that describes the target expression more precisely:
>>> (1 + x**(1 + y)).replace(
... lambda x: x.is_Pow and x.exp.is_Add and x.exp.args[0] == 1,
... lambda x: x.base**(1 - (x.exp - 1)))
...
x**(1 - y) + 1
See Also
========
subs: substitution of subexpressions as defined by the objects
themselves.
xreplace: exact node replacement in expr tree; also capable of
using matching rules
"""
from sympy.core.symbol import Dummy, Wild
from sympy.simplify.simplify import bottom_up
try:
query = _sympify(query)
except SympifyError:
pass
try:
value = _sympify(value)
except SympifyError:
pass
if isinstance(query, type):
_query = lambda expr: isinstance(expr, query)
if isinstance(value, type):
_value = lambda expr, result: value(*expr.args)
elif callable(value):
_value = lambda expr, result: value(*expr.args)
else:
raise TypeError(
"given a type, replace() expects another "
"type or a callable")
elif isinstance(query, Basic):
_query = lambda expr: expr.match(query)
if exact is None:
exact = (len(query.atoms(Wild)) > 1)
if isinstance(value, Basic):
if exact:
_value = lambda expr, result: (value.subs(result)
if all(result.values()) else expr)
else:
_value = lambda expr, result: value.subs(result)
elif callable(value):
# match dictionary keys get the trailing underscore stripped
# from them and are then passed as keywords to the callable;
# if ``exact`` is True, only accept match if there are no null
# values amongst those matched.
if exact:
_value = lambda expr, result: (value(**
{str(k)[:-1]: v for k, v in result.items()})
if all(val for val in result.values()) else expr)
else:
_value = lambda expr, result: value(**
{str(k)[:-1]: v for k, v in result.items()})
else:
raise TypeError(
"given an expression, replace() expects "
"another expression or a callable")
elif callable(query):
_query = query
if callable(value):
_value = lambda expr, result: value(expr)
else:
raise TypeError(
"given a callable, replace() expects "
"another callable")
else:
raise TypeError(
"first argument to replace() must be a "
"type, an expression or a callable")
mapping = {} # changes that took place
mask = [] # the dummies that were used as change placeholders
def rec_replace(expr):
result = _query(expr)
if result or result == {}:
new = _value(expr, result)
if new is not None and new != expr:
mapping[expr] = new
if simultaneous:
# don't let this change during rebuilding;
# XXX this may fail if the object being replaced
# cannot be represented as a Dummy in the expression
# tree, e.g. an ExprConditionPair in Piecewise
# cannot be represented with a Dummy
com = getattr(new, 'is_commutative', True)
if com is None:
com = True
d = Dummy('rec_replace', commutative=com)
mask.append((d, new))
expr = d
else:
expr = new
return expr
rv = bottom_up(self, rec_replace, atoms=True)
# restore original expressions for Dummy symbols
if simultaneous:
mask = list(reversed(mask))
for o, n in mask:
r = {o: n}
# if a sub-expression could not be replaced with
# a Dummy then this will fail; either filter
# against such sub-expressions or figure out a
# way to carry out simultaneous replacement
# in this situation.
rv = rv.xreplace(r) # if this fails, see above
if not map:
return rv
else:
if simultaneous:
# restore subexpressions in mapping
for o, n in mask:
r = {o: n}
mapping = {k.xreplace(r): v.xreplace(r)
for k, v in mapping.items()}
return rv, mapping
def find(self, query, group=False):
"""Find all subexpressions matching a query. """
query = _make_find_query(query)
results = list(filter(query, preorder_traversal(self)))
if not group:
return set(results)
else:
groups = {}
for result in results:
if result in groups:
groups[result] += 1
else:
groups[result] = 1
return groups
def count(self, query):
"""Count the number of matching subexpressions. """
query = _make_find_query(query)
return sum(bool(query(sub)) for sub in preorder_traversal(self))
def matches(self, expr, repl_dict={}, old=False):
"""
Helper method for match() that looks for a match between Wild symbols
in self and expressions in expr.
Examples
========
>>> from sympy import symbols, Wild, Basic
>>> a, b, c = symbols('a b c')
>>> x = Wild('x')
>>> Basic(a + x, x).matches(Basic(a + b, c)) is None
True
>>> Basic(a + x, x).matches(Basic(a + b + c, b + c))
{x_: b + c}
"""
expr = sympify(expr)
if not isinstance(expr, self.__class__):
return None
if self == expr:
return repl_dict
if len(self.args) != len(expr.args):
return None
d = repl_dict.copy()
for arg, other_arg in zip(self.args, expr.args):
if arg == other_arg:
continue
d = arg.xreplace(d).matches(other_arg, d, old=old)
if d is None:
return None
return d
def match(self, pattern, old=False):
"""
Pattern matching.
Wild symbols match all.
Return ``None`` when expression (self) does not match
with pattern. Otherwise return a dictionary such that::
pattern.xreplace(self.match(pattern)) == self
Examples
========
>>> from sympy import Wild
>>> from sympy.abc import x, y
>>> p = Wild("p")
>>> q = Wild("q")
>>> r = Wild("r")
>>> e = (x+y)**(x+y)
>>> e.match(p**p)
{p_: x + y}
>>> e.match(p**q)
{p_: x + y, q_: x + y}
>>> e = (2*x)**2
>>> e.match(p*q**r)
{p_: 4, q_: x, r_: 2}
>>> (p*q**r).xreplace(e.match(p*q**r))
4*x**2
The ``old`` flag will give the old-style pattern matching where
expressions and patterns are essentially solved to give the
match. Both of the following give None unless ``old=True``:
>>> (x - 2).match(p - x, old=True)
{p_: 2*x - 2}
>>> (2/x).match(p*x, old=True)
{p_: 2/x**2}
"""
pattern = sympify(pattern)
return pattern.matches(self, old=old)
def count_ops(self, visual=None):
"""wrapper for count_ops that returns the operation count."""
from sympy import count_ops
return count_ops(self, visual)
def doit(self, **hints):
"""Evaluate objects that are not evaluated by default like limits,
integrals, sums and products. All objects of this kind will be
evaluated recursively, unless some species were excluded via 'hints'
or unless the 'deep' hint was set to 'False'.
>>> from sympy import Integral
>>> from sympy.abc import x
>>> 2*Integral(x, x)
2*Integral(x, x)
>>> (2*Integral(x, x)).doit()
x**2
>>> (2*Integral(x, x)).doit(deep=False)
2*Integral(x, x)
"""
if hints.get('deep', True):
terms = [term.doit(**hints) if isinstance(term, Basic) else term
for term in self.args]
return self.func(*terms)
else:
return self
def simplify(self, **kwargs):
"""See the simplify function in sympy.simplify"""
from sympy.simplify import simplify
return simplify(self, **kwargs)
def _eval_rewrite(self, pattern, rule, **hints):
if self.is_Atom:
if hasattr(self, rule):
return getattr(self, rule)()
return self
if hints.get('deep', True):
args = [a._eval_rewrite(pattern, rule, **hints)
if isinstance(a, Basic) else a
for a in self.args]
else:
args = self.args
if pattern is None or isinstance(self, pattern):
if hasattr(self, rule):
rewritten = getattr(self, rule)(*args, **hints)
if rewritten is not None:
return rewritten
return self.func(*args) if hints.get('evaluate', True) else self
def _accept_eval_derivative(self, s):
# This method needs to be overridden by array-like objects
return s._visit_eval_derivative_scalar(self)
def _visit_eval_derivative_scalar(self, base):
# Base is a scalar
# Types are (base: scalar, self: scalar)
return base._eval_derivative(self)
def _visit_eval_derivative_array(self, base):
# Types are (base: array/matrix, self: scalar)
# Base is some kind of array/matrix,
# it should have `.applyfunc(lambda x: x.diff(self)` implemented:
return base._eval_derivative_array(self)
def _eval_derivative_n_times(self, s, n):
# This is the default evaluator for derivatives (as called by `diff`
# and `Derivative`), it will attempt a loop to derive the expression
# `n` times by calling the corresponding `_eval_derivative` method,
# while leaving the derivative unevaluated if `n` is symbolic. This
# method should be overridden if the object has a closed form for its
# symbolic n-th derivative.
from sympy import Integer
if isinstance(n, (int, Integer)):
obj = self
for i in range(n):
obj2 = obj._accept_eval_derivative(s)
if obj == obj2 or obj2 is None:
break
obj = obj2
return obj2
else:
return None
def rewrite(self, *args, **hints):
""" Rewrite functions in terms of other functions.
Rewrites expression containing applications of functions
of one kind in terms of functions of different kind. For
example you can rewrite trigonometric functions as complex
exponentials or combinatorial functions as gamma function.
As a pattern this function accepts a list of functions to
to rewrite (instances of DefinedFunction class). As rule
you can use string or a destination function instance (in
this case rewrite() will use the str() function).
There is also the possibility to pass hints on how to rewrite
the given expressions. For now there is only one such hint
defined called 'deep'. When 'deep' is set to False it will
forbid functions to rewrite their contents.
Examples
========
>>> from sympy import sin, exp
>>> from sympy.abc import x
Unspecified pattern:
>>> sin(x).rewrite(exp)
-I*(exp(I*x) - exp(-I*x))/2
Pattern as a single function:
>>> sin(x).rewrite(sin, exp)
-I*(exp(I*x) - exp(-I*x))/2
Pattern as a list of functions:
>>> sin(x).rewrite([sin, ], exp)
-I*(exp(I*x) - exp(-I*x))/2
"""
if not args:
return self
else:
pattern = args[:-1]
if isinstance(args[-1], str):
rule = '_eval_rewrite_as_' + args[-1]
else:
# rewrite arg is usually a class but can also be a
# singleton (e.g. GoldenRatio) so we check
# __name__ or __class__.__name__
clsname = getattr(args[-1], "__name__", None)
if clsname is None:
clsname = args[-1].__class__.__name__
rule = '_eval_rewrite_as_' + clsname
if not pattern:
return self._eval_rewrite(None, rule, **hints)
else:
if iterable(pattern[0]):
pattern = pattern[0]
pattern = [p for p in pattern if self.has(p)]
if pattern:
return self._eval_rewrite(tuple(pattern), rule, **hints)
else:
return self
_constructor_postprocessor_mapping = {} # type: ignore
@classmethod
def _exec_constructor_postprocessors(cls, obj):
# WARNING: This API is experimental.
# This is an experimental API that introduces constructor
# postprosessors for SymPy Core elements. If an argument of a SymPy
# expression has a `_constructor_postprocessor_mapping` attribute, it will
# be interpreted as a dictionary containing lists of postprocessing
# functions for matching expression node names.
clsname = obj.__class__.__name__
postprocessors = defaultdict(list)
for i in obj.args:
try:
postprocessor_mappings = (
Basic._constructor_postprocessor_mapping[cls].items()
for cls in type(i).mro()
if cls in Basic._constructor_postprocessor_mapping
)
for k, v in chain.from_iterable(postprocessor_mappings):
postprocessors[k].extend([j for j in v if j not in postprocessors[k]])
except TypeError:
pass
for f in postprocessors.get(clsname, []):
obj = f(obj)
return obj
class Atom(Basic):
"""
A parent class for atomic things. An atom is an expression with no subexpressions.
Examples
========
Symbol, Number, Rational, Integer, ...
But not: Add, Mul, Pow, ...
"""
is_Atom = True
__slots__ = ()
def matches(self, expr, repl_dict={}, old=False):
if self == expr:
return repl_dict
def xreplace(self, rule, hack2=False):
return rule.get(self, self)
def doit(self, **hints):
return self
@classmethod
def class_key(cls):
return 2, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
return self.class_key(), (1, (str(self),)), S.One.sort_key(), S.One
def _eval_simplify(self, **kwargs):
return self
@property
def _sorted_args(self):
# this is here as a safeguard against accidentally using _sorted_args
# on Atoms -- they cannot be rebuilt as atom.func(*atom._sorted_args)
# since there are no args. So the calling routine should be checking
# to see that this property is not called for Atoms.
raise AttributeError('Atoms have no args. It might be necessary'
' to make a check for Atoms in the calling code.')
def _aresame(a, b):
"""Return True if a and b are structurally the same, else False.
Examples
========
In SymPy (as in Python) two numbers compare the same if they
have the same underlying base-2 representation even though
they may not be the same type:
>>> from sympy import S
>>> 2.0 == S(2)
True
>>> 0.5 == S.Half
True
This routine was written to provide a query for such cases that
would give false when the types do not match:
>>> from sympy.core.basic import _aresame
>>> _aresame(S(2.0), S(2))
False
"""
from .numbers import Number
from .function import AppliedUndef, UndefinedFunction as UndefFunc
if isinstance(a, Number) and isinstance(b, Number):
return a == b and a.__class__ == b.__class__
for i, j in zip_longest(preorder_traversal(a), preorder_traversal(b)):
if i != j or type(i) != type(j):
if ((isinstance(i, UndefFunc) and isinstance(j, UndefFunc)) or
(isinstance(i, AppliedUndef) and isinstance(j, AppliedUndef))):
if i.class_key() != j.class_key():
return False
else:
return False
return True
def _atomic(e, recursive=False):
"""Return atom-like quantities as far as substitution is
concerned: Derivatives, Functions and Symbols. Don't
return any 'atoms' that are inside such quantities unless
they also appear outside, too, unless `recursive` is True.
Examples
========
>>> from sympy import Derivative, Function, cos
>>> from sympy.abc import x, y
>>> from sympy.core.basic import _atomic
>>> f = Function('f')
>>> _atomic(x + y)
{x, y}
>>> _atomic(x + f(y))
{x, f(y)}
>>> _atomic(Derivative(f(x), x) + cos(x) + y)
{y, cos(x), Derivative(f(x), x)}
"""
from sympy import Derivative, Function, Symbol
pot = preorder_traversal(e)
seen = set()
if isinstance(e, Basic):
free = getattr(e, "free_symbols", None)
if free is None:
return {e}
else:
return set()
atoms = set()
for p in pot:
if p in seen:
pot.skip()
continue
seen.add(p)
if isinstance(p, Symbol) and p in free:
atoms.add(p)
elif isinstance(p, (Derivative, Function)):
if not recursive:
pot.skip()
atoms.add(p)
return atoms
class preorder_traversal:
"""
Do a pre-order traversal of a tree.
This iterator recursively yields nodes that it has visited in a pre-order
fashion. That is, it yields the current node then descends through the
tree breadth-first to yield all of a node's children's pre-order
traversal.
For an expression, the order of the traversal depends on the order of
.args, which in many cases can be arbitrary.
Parameters
==========
node : sympy expression
The expression to traverse.
keys : (default None) sort key(s)
The key(s) used to sort args of Basic objects. When None, args of Basic
objects are processed in arbitrary order. If key is defined, it will
be passed along to ordered() as the only key(s) to use to sort the
arguments; if ``key`` is simply True then the default keys of ordered
will be used.
Yields
======
subtree : sympy expression
All of the subtrees in the tree.
Examples
========
>>> from sympy import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
The nodes are returned in the order that they are encountered unless key
is given; simply passing key=True will guarantee that the traversal is
unique.
>>> list(preorder_traversal((x + y)*z, keys=None)) # doctest: +SKIP
[z*(x + y), z, x + y, y, x]
>>> list(preorder_traversal((x + y)*z, keys=True))
[z*(x + y), z, x + y, x, y]
"""
def __init__(self, node, keys=None):
self._skip_flag = False
self._pt = self._preorder_traversal(node, keys)
def _preorder_traversal(self, node, keys):
yield node
if self._skip_flag:
self._skip_flag = False
return
if isinstance(node, Basic):
if not keys and hasattr(node, '_argset'):
# LatticeOp keeps args as a set. We should use this if we
# don't care about the order, to prevent unnecessary sorting.
args = node._argset
else:
args = node.args
if keys:
if keys != True:
args = ordered(args, keys, default=False)
else:
args = ordered(args)
for arg in args:
yield from self._preorder_traversal(arg, keys)
elif iterable(node):
for item in node:
yield from self._preorder_traversal(item, keys)
def skip(self):
"""
Skip yielding current node's (last yielded node's) subtrees.
Examples
========
>>> from sympy.core import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
>>> pt = preorder_traversal((x+y*z)*z)
>>> for i in pt:
... print(i)
... if i == x+y*z:
... pt.skip()
z*(x + y*z)
z
x + y*z
"""
self._skip_flag = True
def __next__(self):
return next(self._pt)
def __iter__(self):
return self
def _make_find_query(query):
"""Convert the argument of Basic.find() into a callable"""
try:
query = _sympify(query)
except SympifyError:
pass
if isinstance(query, type):
return lambda expr: isinstance(expr, query)
elif isinstance(query, Basic):
return lambda expr: expr.match(query) is not None
return query
| 32.550414 | 103 | 0.541645 | from collections import defaultdict
from itertools import chain, zip_longest
from .assumptions import BasicMeta, ManagedProperties
from .cache import cacheit
from .sympify import _sympify, sympify, SympifyError
from .compatibility import iterable, ordered, Mapping
from .singleton import S
from inspect import getmro
def as_Basic(expr):
from sympy.utilities.misc import func_name
try:
return _sympify(expr)
except SympifyError:
raise TypeError(
'Argument must be a Basic object, not `%s`' % func_name(
expr))
class Basic(metaclass=ManagedProperties):
__slots__ = ('_mhash',
'_args',
'_assumptions'
)
is_number = False
is_Atom = False
is_Symbol = False
is_symbol = False
is_Indexed = False
is_Dummy = False
is_Wild = False
is_Function = False
is_Add = False
is_Mul = False
is_Pow = False
is_Number = False
is_Float = False
is_Rational = False
is_Integer = False
is_NumberSymbol = False
is_Order = False
is_Derivative = False
is_Piecewise = False
is_Poly = False
is_AlgebraicNumber = False
is_Relational = False
is_Equality = False
is_Boolean = False
is_Not = False
is_Matrix = False
is_Vector = False
is_Point = False
is_MatAdd = False
is_MatMul = False
def __new__(cls, *args):
obj = object.__new__(cls)
obj._assumptions = cls.default_assumptions
obj._mhash = None
obj._args = args
return obj
def copy(self):
return self.func(*self.args)
def __reduce_ex__(self, proto):
return type(self), self.__getnewargs__(), self.__getstate__()
def __getnewargs__(self):
return self.args
def __getstate__(self):
return {}
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def __hash__(self):
h = self._mhash
if h is None:
h = hash((type(self).__name__,) + self._hashable_content())
self._mhash = h
return h
def _hashable_content(self):
return self._args
@property
def assumptions0(self):
return {}
def compare(self, other):
if self is other:
return 0
n1 = self.__class__
n2 = other.__class__
c = (n1 > n2) - (n1 < n2)
if c:
return c
st = self._hashable_content()
ot = other._hashable_content()
c = (len(st) > len(ot)) - (len(st) < len(ot))
if c:
return c
for l, r in zip(st, ot):
l = Basic(*l) if isinstance(l, frozenset) else l
r = Basic(*r) if isinstance(r, frozenset) else r
if isinstance(l, Basic):
c = l.compare(r)
else:
c = (l > r) - (l < r)
if c:
return c
return 0
@staticmethod
def _compare_pretty(a, b):
from sympy.series.order import Order
if isinstance(a, Order) and not isinstance(b, Order):
return 1
if not isinstance(a, Order) and isinstance(b, Order):
return -1
if a.is_Rational and b.is_Rational:
l = a.p * b.q
r = b.p * a.q
return (l > r) - (l < r)
else:
from sympy.core.symbol import Wild
p1, p2, p3 = Wild("p1"), Wild("p2"), Wild("p3")
r_a = a.match(p1 * p2**p3)
if r_a and p3 in r_a:
a3 = r_a[p3]
r_b = b.match(p1 * p2**p3)
if r_b and p3 in r_b:
b3 = r_b[p3]
c = Basic.compare(a3, b3)
if c != 0:
return c
return Basic.compare(a, b)
@classmethod
def fromiter(cls, args, **assumptions):
return cls(*tuple(args), **assumptions)
@classmethod
def class_key(cls):
return 5, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
def inner_key(arg):
if isinstance(arg, Basic):
return arg.sort_key(order)
else:
return arg
args = self._sorted_args
args = len(args), tuple([inner_key(arg) for arg in args])
return self.class_key(), args, S.One.sort_key(), S.One
def __eq__(self, other):
if self is other:
return True
tself = type(self)
tother = type(other)
if tself is not tother:
try:
other = _sympify(other)
tother = type(other)
except SympifyError:
return NotImplemented
if type(tself).__ne__ is not type.__ne__:
if tself != tother:
return False
elif tself is not tother:
return False
return self._hashable_content() == other._hashable_content()
def __ne__(self, other):
return not self == other
def dummy_eq(self, other, symbol=None):
s = self.as_dummy()
o = _sympify(other)
o = o.as_dummy()
dummy_symbols = [i for i in s.free_symbols if i.is_Dummy]
if len(dummy_symbols) == 1:
dummy = dummy_symbols.pop()
else:
return s == o
if symbol is None:
symbols = o.free_symbols
if len(symbols) == 1:
symbol = symbols.pop()
else:
return s == o
tmp = dummy.__class__()
return s.subs(dummy, tmp) == o.subs(symbol, tmp)
def __repr__(self):
from sympy.printing import sstr
return sstr(self, order=None)
def __str__(self):
from sympy.printing import sstr
return sstr(self, order=None)
# data to any notebook containing SymPy expressions, without adding
# anything useful to the notebook. It can still enabled manually, e.g.,
# for the qtconsole, with init_printing().
def _repr_latex_(self):
from sympy.printing.latex import latex
s = latex(self, mode='plain')
return "$\\displaystyle %s$" % s
_repr_latex_orig = _repr_latex_
def atoms(self, *types):
if types:
types = tuple(
[t if isinstance(t, type) else type(t) for t in types])
nodes = preorder_traversal(self)
if types:
result = {node for node in nodes if isinstance(node, types)}
else:
result = {node for node in nodes if not node.args}
return result
@property
def free_symbols(self):
return set().union(*[a.free_symbols for a in self.args])
@property
def expr_free_symbols(self):
return set()
def as_dummy(self):
def can(x):
d = {i: i.as_dummy() for i in x.bound_symbols}
# mask free that shadow bound
x = x.subs(d)
c = x.canonical_variables
# replace bound
x = x.xreplace(c)
# undo masking
x = x.xreplace({v: k for k, v in d.items()})
return x
return self.replace(
lambda x: hasattr(x, 'bound_symbols'),
lambda x: can(x))
@property
def canonical_variables(self):
from sympy.core.symbol import Symbol
from sympy.utilities.iterables import numbered_symbols
if not hasattr(self, 'bound_symbols'):
return {}
dums = numbered_symbols('_')
reps = {}
v = self.bound_symbols
# this free will include bound symbols that are not part of
# self's bound symbols
free = {i.name for i in self.atoms(Symbol) - set(v)}
for v in v:
d = next(dums)
if v.is_Symbol:
while v.name == d.name or d.name in free:
d = next(dums)
reps[v] = d
return reps
def rcall(self, *args):
return Basic._recursive_call(self, args)
@staticmethod
def _recursive_call(expr_to_call, on_args):
from sympy import Symbol
def the_call_method_is_overridden(expr):
for cls in getmro(type(expr)):
if '__call__' in cls.__dict__:
return cls != Basic
if callable(expr_to_call) and the_call_method_is_overridden(expr_to_call):
if isinstance(expr_to_call, Symbol):
return expr_to_call
else:
return expr_to_call(*on_args)
elif expr_to_call.args:
args = [Basic._recursive_call(
sub, on_args) for sub in expr_to_call.args]
return type(expr_to_call)(*args)
else:
return expr_to_call
def is_hypergeometric(self, k):
from sympy.simplify import hypersimp
return hypersimp(self, k) is not None
@property
def is_comparable(self):
is_extended_real = self.is_extended_real
if is_extended_real is False:
return False
if not self.is_number:
return False
# this will create spurious precision
n, i = [p.evalf(2) if not p.is_Number else p
for p in self.as_real_imag()]
if not (i.is_Number and n.is_Number):
return False
if i:
# if _prec = 1 we can't decide and if not,
# so return False
return False
else:
return n._prec != 1
@property
def func(self):
return self.__class__
@property
def args(self):
return self._args
@property
def _sorted_args(self):
return self.args
def as_content_primitive(self, radical=False, clear=True):
return S.One, self
def subs(self, *args, **kwargs):
from sympy.core.containers import Dict
from sympy.utilities.iterables import sift
from sympy import Dummy, Symbol
unordered = False
if len(args) == 1:
sequence = args[0]
if isinstance(sequence, set):
unordered = True
elif isinstance(sequence, (Dict, Mapping)):
unordered = True
sequence = sequence.items()
elif not iterable(sequence):
from sympy.utilities.misc import filldedent
raise ValueError(filldedent("""
When a single argument is passed to subs
it should be a dictionary of old: new pairs or an iterable
of (old, new) tuples."""))
elif len(args) == 2:
sequence = [args]
else:
raise ValueError("subs accepts either 1 or 2 arguments")
sequence = list(sequence)
for i, s in enumerate(sequence):
if isinstance(s[0], str):
# when old is a string we prefer Symbol
s = Symbol(s[0]), s[1]
try:
s = [sympify(_, strict=not isinstance(_, str))
for _ in s]
except SympifyError:
# if it can't be sympified, skip it
sequence[i] = None
continue
sequence[i] = None if _aresame(*s) else tuple(s)
sequence = list(filter(None, sequence))
if unordered:
sequence = dict(sequence)
atoms, nonatoms = sift(list(sequence),
lambda x: x.is_Atom, binary=True)
sequence = [(k, sequence[k]) for k in
list(reversed(list(ordered(nonatoms)))) + list(ordered(atoms))]
if kwargs.pop('simultaneous', False):
reps = {}
rv = self
kwargs['hack2'] = True
m = Dummy('subs_m')
for old, new in sequence:
com = new.is_commutative
if com is None:
com = True
d = Dummy('subs_d', commutative=com)
rv = rv._subs(old, d*m, **kwargs)
if not isinstance(rv, Basic):
break
reps[d] = new
reps[m] = S.One
return rv.xreplace(reps)
else:
rv = self
for old, new in sequence:
rv = rv._subs(old, new, **kwargs)
if not isinstance(rv, Basic):
break
return rv
@cacheit
def _subs(self, old, new, **hints):
def fallback(self, old, new):
hit = False
args = list(self.args)
for i, arg in enumerate(args):
if not hasattr(arg, '_eval_subs'):
continue
arg = arg._subs(old, new, **hints)
if not _aresame(arg, args[i]):
hit = True
args[i] = arg
if hit:
rv = self.func(*args)
hack2 = hints.get('hack2', False)
if hack2 and self.is_Mul and not rv.is_Mul:
coeff = S.One
nonnumber = []
for i in args:
if i.is_Number:
coeff *= i
else:
nonnumber.append(i)
nonnumber = self.func(*nonnumber)
if coeff is S.One:
return nonnumber
else:
return self.func(coeff, nonnumber, evaluate=False)
return rv
return self
if _aresame(self, old):
return new
rv = self._eval_subs(old, new)
if rv is None:
rv = fallback(self, old, new)
return rv
def _eval_subs(self, old, new):
return None
def xreplace(self, rule):
value, _ = self._xreplace(rule)
return value
def _xreplace(self, rule):
if self in rule:
return rule[self], True
elif rule:
args = []
changed = False
for a in self.args:
_xreplace = getattr(a, '_xreplace', None)
if _xreplace is not None:
a_xr = _xreplace(rule)
args.append(a_xr[0])
changed |= a_xr[1]
else:
args.append(a)
args = tuple(args)
if changed:
return self.func(*args), True
return self, False
@cacheit
def has(self, *patterns):
return any(self._has(pattern) for pattern in patterns)
def _has(self, pattern):
from sympy.core.function import UndefinedFunction, Function
if isinstance(pattern, UndefinedFunction):
return any(f.func == pattern or f == pattern
for f in self.atoms(Function, UndefinedFunction))
pattern = sympify(pattern)
if isinstance(pattern, BasicMeta):
return any(isinstance(arg, pattern)
for arg in preorder_traversal(self))
_has_matcher = getattr(pattern, '_has_matcher', None)
if _has_matcher is not None:
match = _has_matcher()
return any(match(arg) for arg in preorder_traversal(self))
else:
return any(arg == pattern for arg in preorder_traversal(self))
def _has_matcher(self):
return lambda other: self == other
def replace(self, query, value, map=False, simultaneous=True, exact=None):
from sympy.core.symbol import Dummy, Wild
from sympy.simplify.simplify import bottom_up
try:
query = _sympify(query)
except SympifyError:
pass
try:
value = _sympify(value)
except SympifyError:
pass
if isinstance(query, type):
_query = lambda expr: isinstance(expr, query)
if isinstance(value, type):
_value = lambda expr, result: value(*expr.args)
elif callable(value):
_value = lambda expr, result: value(*expr.args)
else:
raise TypeError(
"given a type, replace() expects another "
"type or a callable")
elif isinstance(query, Basic):
_query = lambda expr: expr.match(query)
if exact is None:
exact = (len(query.atoms(Wild)) > 1)
if isinstance(value, Basic):
if exact:
_value = lambda expr, result: (value.subs(result)
if all(result.values()) else expr)
else:
_value = lambda expr, result: value.subs(result)
elif callable(value):
if exact:
_value = lambda expr, result: (value(**
{str(k)[:-1]: v for k, v in result.items()})
if all(val for val in result.values()) else expr)
else:
_value = lambda expr, result: value(**
{str(k)[:-1]: v for k, v in result.items()})
else:
raise TypeError(
"given an expression, replace() expects "
"another expression or a callable")
elif callable(query):
_query = query
if callable(value):
_value = lambda expr, result: value(expr)
else:
raise TypeError(
"given a callable, replace() expects "
"another callable")
else:
raise TypeError(
"first argument to replace() must be a "
"type, an expression or a callable")
mapping = {}
mask = []
def rec_replace(expr):
result = _query(expr)
if result or result == {}:
new = _value(expr, result)
if new is not None and new != expr:
mapping[expr] = new
if simultaneous:
# XXX this may fail if the object being replaced
# cannot be represented as a Dummy in the expression
# tree, e.g. an ExprConditionPair in Piecewise
# cannot be represented with a Dummy
com = getattr(new, 'is_commutative', True)
if com is None:
com = True
d = Dummy('rec_replace', commutative=com)
mask.append((d, new))
expr = d
else:
expr = new
return expr
rv = bottom_up(self, rec_replace, atoms=True)
# restore original expressions for Dummy symbols
if simultaneous:
mask = list(reversed(mask))
for o, n in mask:
r = {o: n}
# if a sub-expression could not be replaced with
# a Dummy then this will fail; either filter
# against such sub-expressions or figure out a
# way to carry out simultaneous replacement
# in this situation.
rv = rv.xreplace(r) # if this fails, see above
if not map:
return rv
else:
if simultaneous:
# restore subexpressions in mapping
for o, n in mask:
r = {o: n}
mapping = {k.xreplace(r): v.xreplace(r)
for k, v in mapping.items()}
return rv, mapping
def find(self, query, group=False):
query = _make_find_query(query)
results = list(filter(query, preorder_traversal(self)))
if not group:
return set(results)
else:
groups = {}
for result in results:
if result in groups:
groups[result] += 1
else:
groups[result] = 1
return groups
def count(self, query):
query = _make_find_query(query)
return sum(bool(query(sub)) for sub in preorder_traversal(self))
def matches(self, expr, repl_dict={}, old=False):
expr = sympify(expr)
if not isinstance(expr, self.__class__):
return None
if self == expr:
return repl_dict
if len(self.args) != len(expr.args):
return None
d = repl_dict.copy()
for arg, other_arg in zip(self.args, expr.args):
if arg == other_arg:
continue
d = arg.xreplace(d).matches(other_arg, d, old=old)
if d is None:
return None
return d
def match(self, pattern, old=False):
pattern = sympify(pattern)
return pattern.matches(self, old=old)
def count_ops(self, visual=None):
from sympy import count_ops
return count_ops(self, visual)
def doit(self, **hints):
if hints.get('deep', True):
terms = [term.doit(**hints) if isinstance(term, Basic) else term
for term in self.args]
return self.func(*terms)
else:
return self
def simplify(self, **kwargs):
from sympy.simplify import simplify
return simplify(self, **kwargs)
def _eval_rewrite(self, pattern, rule, **hints):
if self.is_Atom:
if hasattr(self, rule):
return getattr(self, rule)()
return self
if hints.get('deep', True):
args = [a._eval_rewrite(pattern, rule, **hints)
if isinstance(a, Basic) else a
for a in self.args]
else:
args = self.args
if pattern is None or isinstance(self, pattern):
if hasattr(self, rule):
rewritten = getattr(self, rule)(*args, **hints)
if rewritten is not None:
return rewritten
return self.func(*args) if hints.get('evaluate', True) else self
def _accept_eval_derivative(self, s):
# This method needs to be overridden by array-like objects
return s._visit_eval_derivative_scalar(self)
def _visit_eval_derivative_scalar(self, base):
# Base is a scalar
# Types are (base: scalar, self: scalar)
return base._eval_derivative(self)
def _visit_eval_derivative_array(self, base):
# Types are (base: array/matrix, self: scalar)
# Base is some kind of array/matrix,
# it should have `.applyfunc(lambda x: x.diff(self)` implemented:
return base._eval_derivative_array(self)
def _eval_derivative_n_times(self, s, n):
# This is the default evaluator for derivatives (as called by `diff`
# and `Derivative`), it will attempt a loop to derive the expression
# `n` times by calling the corresponding `_eval_derivative` method,
# while leaving the derivative unevaluated if `n` is symbolic. This
# method should be overridden if the object has a closed form for its
# symbolic n-th derivative.
from sympy import Integer
if isinstance(n, (int, Integer)):
obj = self
for i in range(n):
obj2 = obj._accept_eval_derivative(s)
if obj == obj2 or obj2 is None:
break
obj = obj2
return obj2
else:
return None
def rewrite(self, *args, **hints):
if not args:
return self
else:
pattern = args[:-1]
if isinstance(args[-1], str):
rule = '_eval_rewrite_as_' + args[-1]
else:
# rewrite arg is usually a class but can also be a
# singleton (e.g. GoldenRatio) so we check
# __name__ or __class__.__name__
clsname = getattr(args[-1], "__name__", None)
if clsname is None:
clsname = args[-1].__class__.__name__
rule = '_eval_rewrite_as_' + clsname
if not pattern:
return self._eval_rewrite(None, rule, **hints)
else:
if iterable(pattern[0]):
pattern = pattern[0]
pattern = [p for p in pattern if self.has(p)]
if pattern:
return self._eval_rewrite(tuple(pattern), rule, **hints)
else:
return self
_constructor_postprocessor_mapping = {} # type: ignore
@classmethod
def _exec_constructor_postprocessors(cls, obj):
# WARNING: This API is experimental.
# This is an experimental API that introduces constructor
# postprosessors for SymPy Core elements. If an argument of a SymPy
# expression has a `_constructor_postprocessor_mapping` attribute, it will
# be interpreted as a dictionary containing lists of postprocessing
# functions for matching expression node names.
clsname = obj.__class__.__name__
postprocessors = defaultdict(list)
for i in obj.args:
try:
postprocessor_mappings = (
Basic._constructor_postprocessor_mapping[cls].items()
for cls in type(i).mro()
if cls in Basic._constructor_postprocessor_mapping
)
for k, v in chain.from_iterable(postprocessor_mappings):
postprocessors[k].extend([j for j in v if j not in postprocessors[k]])
except TypeError:
pass
for f in postprocessors.get(clsname, []):
obj = f(obj)
return obj
class Atom(Basic):
is_Atom = True
__slots__ = ()
def matches(self, expr, repl_dict={}, old=False):
if self == expr:
return repl_dict
def xreplace(self, rule, hack2=False):
return rule.get(self, self)
def doit(self, **hints):
return self
@classmethod
def class_key(cls):
return 2, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
return self.class_key(), (1, (str(self),)), S.One.sort_key(), S.One
def _eval_simplify(self, **kwargs):
return self
@property
def _sorted_args(self):
# this is here as a safeguard against accidentally using _sorted_args
# on Atoms -- they cannot be rebuilt as atom.func(*atom._sorted_args)
# since there are no args. So the calling routine should be checking
# to see that this property is not called for Atoms.
raise AttributeError('Atoms have no args. It might be necessary'
' to make a check for Atoms in the calling code.')
def _aresame(a, b):
from .numbers import Number
from .function import AppliedUndef, UndefinedFunction as UndefFunc
if isinstance(a, Number) and isinstance(b, Number):
return a == b and a.__class__ == b.__class__
for i, j in zip_longest(preorder_traversal(a), preorder_traversal(b)):
if i != j or type(i) != type(j):
if ((isinstance(i, UndefFunc) and isinstance(j, UndefFunc)) or
(isinstance(i, AppliedUndef) and isinstance(j, AppliedUndef))):
if i.class_key() != j.class_key():
return False
else:
return False
return True
def _atomic(e, recursive=False):
from sympy import Derivative, Function, Symbol
pot = preorder_traversal(e)
seen = set()
if isinstance(e, Basic):
free = getattr(e, "free_symbols", None)
if free is None:
return {e}
else:
return set()
atoms = set()
for p in pot:
if p in seen:
pot.skip()
continue
seen.add(p)
if isinstance(p, Symbol) and p in free:
atoms.add(p)
elif isinstance(p, (Derivative, Function)):
if not recursive:
pot.skip()
atoms.add(p)
return atoms
class preorder_traversal:
def __init__(self, node, keys=None):
self._skip_flag = False
self._pt = self._preorder_traversal(node, keys)
def _preorder_traversal(self, node, keys):
yield node
if self._skip_flag:
self._skip_flag = False
return
if isinstance(node, Basic):
if not keys and hasattr(node, '_argset'):
# LatticeOp keeps args as a set. We should use this if we
# don't care about the order, to prevent unnecessary sorting.
args = node._argset
else:
args = node.args
if keys:
if keys != True:
args = ordered(args, keys, default=False)
else:
args = ordered(args)
for arg in args:
yield from self._preorder_traversal(arg, keys)
elif iterable(node):
for item in node:
yield from self._preorder_traversal(item, keys)
def skip(self):
self._skip_flag = True
def __next__(self):
return next(self._pt)
def __iter__(self):
return self
def _make_find_query(query):
try:
query = _sympify(query)
except SympifyError:
pass
if isinstance(query, type):
return lambda expr: isinstance(expr, query)
elif isinstance(query, Basic):
return lambda expr: expr.match(query) is not None
return query
| true | true |
f72d8d821c1671e10a95f3d6047d6a3edb552952 | 1,999 | py | Python | modules/dbnd/test_dbnd/run/test_log_metrics_commands.py | turbaszek/dbnd | 6efbf3e7ecd175645e8e58d0d015d32fe9e95ea0 | [
"Apache-2.0"
] | null | null | null | modules/dbnd/test_dbnd/run/test_log_metrics_commands.py | turbaszek/dbnd | 6efbf3e7ecd175645e8e58d0d015d32fe9e95ea0 | [
"Apache-2.0"
] | null | null | null | modules/dbnd/test_dbnd/run/test_log_metrics_commands.py | turbaszek/dbnd | 6efbf3e7ecd175645e8e58d0d015d32fe9e95ea0 | [
"Apache-2.0"
] | null | null | null | import os
from typing import Dict
from dbnd import as_task, band, task
from dbnd._core.commands import log_artifact, log_metric
from dbnd._core.current import get_databand_run
from dbnd._core.tracking.tracking_store_file import read_task_metrics
from dbnd.testing.helpers_pytest import assert_run_task
from test_dbnd.targets_tests import TargetTestBase
class TestTaskMetricsCommands(TargetTestBase):
def test_log_metric(self):
@task
def t_f_metric(a=5):
log_metric("t_f", a)
t = assert_run_task(t_f_metric.t())
assert (
t.ctrl.last_task_run.meta_files.get_metric_target("t_f").read().split()[1]
== "5"
)
def test_log_artifact(self, tmpdir):
lorem = "Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt\n"
f = tmpdir.join("abcd")
f.write(lorem)
@task
def t_f_artifact(a=5):
log_artifact("t_a", str(f))
t = assert_run_task(t_f_artifact.t())
actual = t._meta_output.list_partitions()
actual_strings = list(map(str, actual))
assert any(["t_a" in os.path.basename(s) for s in actual_strings])
def test_log__write_read_metrics(self, tmpdir):
@task
def write_metrics(a=5):
log_metric("t_f", a)
@task
def read_metrics(metrics_task_id):
# type: ( str) -> Dict
source_task_attempt_folder = (
get_databand_run().get_task_run(metrics_task_id).attempt_folder
)
metrics = read_task_metrics(source_task_attempt_folder)
return metrics
@band
def metrics_flow():
w = write_metrics()
r = read_metrics(metrics_task_id=w.task.task_id)
as_task(r).set_upstream(w)
return r
t = assert_run_task(metrics_flow.t())
metrics = t.result.load(value_type=Dict)
assert {"t_f": 5} == metrics
| 30.287879 | 117 | 0.634817 | import os
from typing import Dict
from dbnd import as_task, band, task
from dbnd._core.commands import log_artifact, log_metric
from dbnd._core.current import get_databand_run
from dbnd._core.tracking.tracking_store_file import read_task_metrics
from dbnd.testing.helpers_pytest import assert_run_task
from test_dbnd.targets_tests import TargetTestBase
class TestTaskMetricsCommands(TargetTestBase):
def test_log_metric(self):
@task
def t_f_metric(a=5):
log_metric("t_f", a)
t = assert_run_task(t_f_metric.t())
assert (
t.ctrl.last_task_run.meta_files.get_metric_target("t_f").read().split()[1]
== "5"
)
def test_log_artifact(self, tmpdir):
lorem = "Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt\n"
f = tmpdir.join("abcd")
f.write(lorem)
@task
def t_f_artifact(a=5):
log_artifact("t_a", str(f))
t = assert_run_task(t_f_artifact.t())
actual = t._meta_output.list_partitions()
actual_strings = list(map(str, actual))
assert any(["t_a" in os.path.basename(s) for s in actual_strings])
def test_log__write_read_metrics(self, tmpdir):
@task
def write_metrics(a=5):
log_metric("t_f", a)
@task
def read_metrics(metrics_task_id):
source_task_attempt_folder = (
get_databand_run().get_task_run(metrics_task_id).attempt_folder
)
metrics = read_task_metrics(source_task_attempt_folder)
return metrics
@band
def metrics_flow():
w = write_metrics()
r = read_metrics(metrics_task_id=w.task.task_id)
as_task(r).set_upstream(w)
return r
t = assert_run_task(metrics_flow.t())
metrics = t.result.load(value_type=Dict)
assert {"t_f": 5} == metrics
| true | true |
f72d8ea026a247a21f48ec3e729b5a1627b6ab98 | 2,746 | py | Python | ios/web_view/PRESUBMIT_test.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | ios/web_view/PRESUBMIT_test.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | ios/web_view/PRESUBMIT_test.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
import PRESUBMIT
# append the path of src/ to sys.path to import PRESUBMIT_test_mocks
SRC_IOS_WEB_VIEW_PATH = os.path.dirname(os.path.abspath(__file__))
SRC_PATH = os.path.dirname(os.path.dirname(SRC_IOS_WEB_VIEW_PATH))
sys.path.append(SRC_PATH)
import PRESUBMIT_test_mocks
class InclusionPathCheckerTest(unittest.TestCase):
"""Test the _CheckAbsolutePathInclusionInPublicHeaders presubmit check."""
def testInclusionPathChecker(self):
bads = [
('#import "ios/web_view/aaa_imported.h"', 'ios/web_view/public/aaa.h'),
('#include "ios/web_view/eee_imported.h"', 'ios/web_view/public/eee.h'),
('#include "base/logging.h"', 'ios/web_view/public/fff.h'),
('#import "ios/web_view/public/ggg_imported.h"',
'ios/web_view/public/ggg.h'),
('#import "subdirectory/hhh_imported.h"', 'ios/web_view/public/hhh.h'),
]
goods = [
('#import "ios/web_view/bbb_imported.h"', 'ios/web_view/shell/bbb.h'),
('#import "ccc_imported.h"', 'ios/web_view/public/ccc.h'),
('#import <UIKit/UIKit.h>', 'ios/web_view/public/ddd.h'),
]
normal_code = '''
/**
* Some random comments here.
* Write #include "base/logging.h" to use logging functions.
*/
int main() {
double a = 1.0 / 2.0;
const char* str = "Hello, World!"; // a string to print
printf(str);
}'''
bads = [((code + normal_code).split('\n'),
SRC_PATH + '/' + path) for code, path in bads]
goods = [((code + normal_code).split('\n'),
SRC_PATH + '/' + path) for code, path in goods]
mock_input = PRESUBMIT_test_mocks.MockInputApi()
mock_input.presubmit_local_path = SRC_IOS_WEB_VIEW_PATH
mock_input.change = PRESUBMIT_test_mocks.MockChange([
PRESUBMIT_test_mocks.MockFile(file_path, code)
for code, file_path in (bads + goods)])
mock_output = PRESUBMIT_test_mocks.MockOutputApi()
errors = PRESUBMIT._CheckAbsolutePathInclusionInPublicHeaders(mock_input,
mock_output)
self.assertEqual(len(errors), 1)
self.assertEqual('error', errors[0].type)
self.assertTrue('with absolute path inclusion' in errors[0].message)
for _, file_path in bads:
self.assertTrue(file_path in errors[0].message)
for _, file_path in goods:
self.assertFalse(file_path in errors[0].message)
if __name__ == '__main__':
unittest.main()
| 36.613333 | 80 | 0.637291 |
import os
import sys
import unittest
import PRESUBMIT
SRC_IOS_WEB_VIEW_PATH = os.path.dirname(os.path.abspath(__file__))
SRC_PATH = os.path.dirname(os.path.dirname(SRC_IOS_WEB_VIEW_PATH))
sys.path.append(SRC_PATH)
import PRESUBMIT_test_mocks
class InclusionPathCheckerTest(unittest.TestCase):
def testInclusionPathChecker(self):
bads = [
('#import "ios/web_view/aaa_imported.h"', 'ios/web_view/public/aaa.h'),
('#include "ios/web_view/eee_imported.h"', 'ios/web_view/public/eee.h'),
('#include "base/logging.h"', 'ios/web_view/public/fff.h'),
('#import "ios/web_view/public/ggg_imported.h"',
'ios/web_view/public/ggg.h'),
('#import "subdirectory/hhh_imported.h"', 'ios/web_view/public/hhh.h'),
]
goods = [
('#import "ios/web_view/bbb_imported.h"', 'ios/web_view/shell/bbb.h'),
('#import "ccc_imported.h"', 'ios/web_view/public/ccc.h'),
('#import <UIKit/UIKit.h>', 'ios/web_view/public/ddd.h'),
]
normal_code = '''
/**
* Some random comments here.
* Write #include "base/logging.h" to use logging functions.
*/
int main() {
double a = 1.0 / 2.0;
const char* str = "Hello, World!"; // a string to print
printf(str);
}'''
bads = [((code + normal_code).split('\n'),
SRC_PATH + '/' + path) for code, path in bads]
goods = [((code + normal_code).split('\n'),
SRC_PATH + '/' + path) for code, path in goods]
mock_input = PRESUBMIT_test_mocks.MockInputApi()
mock_input.presubmit_local_path = SRC_IOS_WEB_VIEW_PATH
mock_input.change = PRESUBMIT_test_mocks.MockChange([
PRESUBMIT_test_mocks.MockFile(file_path, code)
for code, file_path in (bads + goods)])
mock_output = PRESUBMIT_test_mocks.MockOutputApi()
errors = PRESUBMIT._CheckAbsolutePathInclusionInPublicHeaders(mock_input,
mock_output)
self.assertEqual(len(errors), 1)
self.assertEqual('error', errors[0].type)
self.assertTrue('with absolute path inclusion' in errors[0].message)
for _, file_path in bads:
self.assertTrue(file_path in errors[0].message)
for _, file_path in goods:
self.assertFalse(file_path in errors[0].message)
if __name__ == '__main__':
unittest.main()
| true | true |
f72d8f50dd5091fb3db1affbd4c3e936c1aff93a | 2,332 | py | Python | sqlalchemy_geonames/files.py | dionysio/sqlalchemy-geonames | 0d2542cf53512b14415319f23ad53dc4994691a8 | [
"BSD-2-Clause-FreeBSD"
] | 17 | 2015-02-24T20:20:49.000Z | 2021-07-21T02:32:15.000Z | sqlalchemy_geonames/files.py | dionysio/sqlalchemy-geonames | 0d2542cf53512b14415319f23ad53dc4994691a8 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2016-11-13T17:00:26.000Z | 2020-05-28T13:12:07.000Z | sqlalchemy_geonames/files.py | dionysio/sqlalchemy-geonames | 0d2542cf53512b14415319f23ad53dc4994691a8 | [
"BSD-2-Clause-FreeBSD"
] | 6 | 2015-03-28T12:23:50.000Z | 2020-05-28T08:41:50.000Z | BASE_DOWNLOAD_URL = 'http://download.geonames.org/export/dump/'
def full_url(filename):
return BASE_DOWNLOAD_URL + filename
filename_config = {
'admin1CodesASCII.txt': {
'url': full_url('admin1CodesASCII.txt'),
},
'admin2Codes.txt': {
'url': full_url('admin2Codes.txt'),
},
'allCountries.txt': {
'url': full_url('allCountries.zip'),
'unzip': True,
'is_primary': True,
},
'alternateNames.txt': {
'url': full_url('alternateNames.zip'),
'unzip': True,
},
'cities1000.txt': {
'url': full_url('cities1000.zip'),
'unzip': True,
'is_primary': True,
},
'cities15000.txt': {
'url': full_url('cities15000.zip'),
'unzip': True,
'is_primary': True,
},
'cities5000.txt': {
'url': full_url('cities5000.zip'),
'unzip': True,
'is_primary': True,
},
'countryInfo.txt': {
'url': full_url('countryInfo.txt'),
},
'featureCodes_bg.txt': {
'url': full_url('featureCodes_bg.txt'),
'language_code': 'bg',
},
'featureCodes_en.txt': {
'url': full_url('featureCodes_en.txt'),
'language_code': 'en',
},
'featureCodes_nb.txt': {
'url': full_url('featureCodes_nb.txt'),
'language_code': 'nb',
},
'featureCodes_nn.txt': {
'url': full_url('featureCodes_nn.txt'),
'language_code': 'nn',
},
'featureCodes_no.txt': {
'url': full_url('featureCodes_no.txt'),
'language_code': 'no',
},
'featureCodes_ru.txt': {
'url': full_url('featureCodes_ru.txt'),
'language_code': 'ru',
},
'featureCodes_sv.txt': {
'url': full_url('featureCodes_sv.txt'),
'language_code': 'sv',
},
'hierarchy.txt': {
'url': full_url('hierarchy.zip'),
'unzip': True,
},
'iso-languagecodes.txt': {
'url': full_url('iso-languagecodes.txt'),
},
'timeZones.txt': {
'url': full_url('timeZones.txt'),
},
'userTags.txt': {
'url': full_url('userTags.zip'),
'unzip': True,
},
}
# TODO: Support modification files
# alternateNamesDeletes-2013-12-16.txt
# alternateNamesModifications-2013-12-16.txt
# deletes-2013-12-16.txt
# modifications-2013-12-16.txt
| 25.626374 | 63 | 0.551458 | BASE_DOWNLOAD_URL = 'http://download.geonames.org/export/dump/'
def full_url(filename):
return BASE_DOWNLOAD_URL + filename
filename_config = {
'admin1CodesASCII.txt': {
'url': full_url('admin1CodesASCII.txt'),
},
'admin2Codes.txt': {
'url': full_url('admin2Codes.txt'),
},
'allCountries.txt': {
'url': full_url('allCountries.zip'),
'unzip': True,
'is_primary': True,
},
'alternateNames.txt': {
'url': full_url('alternateNames.zip'),
'unzip': True,
},
'cities1000.txt': {
'url': full_url('cities1000.zip'),
'unzip': True,
'is_primary': True,
},
'cities15000.txt': {
'url': full_url('cities15000.zip'),
'unzip': True,
'is_primary': True,
},
'cities5000.txt': {
'url': full_url('cities5000.zip'),
'unzip': True,
'is_primary': True,
},
'countryInfo.txt': {
'url': full_url('countryInfo.txt'),
},
'featureCodes_bg.txt': {
'url': full_url('featureCodes_bg.txt'),
'language_code': 'bg',
},
'featureCodes_en.txt': {
'url': full_url('featureCodes_en.txt'),
'language_code': 'en',
},
'featureCodes_nb.txt': {
'url': full_url('featureCodes_nb.txt'),
'language_code': 'nb',
},
'featureCodes_nn.txt': {
'url': full_url('featureCodes_nn.txt'),
'language_code': 'nn',
},
'featureCodes_no.txt': {
'url': full_url('featureCodes_no.txt'),
'language_code': 'no',
},
'featureCodes_ru.txt': {
'url': full_url('featureCodes_ru.txt'),
'language_code': 'ru',
},
'featureCodes_sv.txt': {
'url': full_url('featureCodes_sv.txt'),
'language_code': 'sv',
},
'hierarchy.txt': {
'url': full_url('hierarchy.zip'),
'unzip': True,
},
'iso-languagecodes.txt': {
'url': full_url('iso-languagecodes.txt'),
},
'timeZones.txt': {
'url': full_url('timeZones.txt'),
},
'userTags.txt': {
'url': full_url('userTags.zip'),
'unzip': True,
},
}
| true | true |
f72d911064ee83731fb01489837fcc983c05458c | 2,670 | py | Python | aliyun-python-sdk-rds/aliyunsdkrds/request/v20140815/ModifyReplicaModeRequest.py | DataDog/aliyun-openapi-python-sdk | 5cbee29bce6416dd62f61f0c3786b1af6ea0d84f | [
"Apache-2.0"
] | 1 | 2019-12-23T12:36:43.000Z | 2019-12-23T12:36:43.000Z | aliyun-python-sdk-rds/aliyunsdkrds/request/v20140815/ModifyReplicaModeRequest.py | liusc27/aliyun-openapi-python-sdk | 5e3db3535dd21de987dc5981e71151327d5a884f | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-rds/aliyunsdkrds/request/v20140815/ModifyReplicaModeRequest.py | liusc27/aliyun-openapi-python-sdk | 5e3db3535dd21de987dc5981e71151327d5a884f | [
"Apache-2.0"
] | 1 | 2021-02-23T11:27:54.000Z | 2021-02-23T11:27:54.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ModifyReplicaModeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'ModifyReplicaMode','rds')
def get_DomainMode(self):
return self.get_query_params().get('DomainMode')
def set_DomainMode(self,DomainMode):
self.add_query_param('DomainMode',DomainMode)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_PrimaryInstanceId(self):
return self.get_query_params().get('PrimaryInstanceId')
def set_PrimaryInstanceId(self,PrimaryInstanceId):
self.add_query_param('PrimaryInstanceId',PrimaryInstanceId)
def get_ReplicaMode(self):
return self.get_query_params().get('ReplicaMode')
def set_ReplicaMode(self,ReplicaMode):
self.add_query_param('ReplicaMode',ReplicaMode)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_ReplicaId(self):
return self.get_query_params().get('ReplicaId')
def set_ReplicaId(self,ReplicaId):
self.add_query_param('ReplicaId',ReplicaId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | 34.230769 | 76 | 0.774532 |
from aliyunsdkcore.request import RpcRequest
class ModifyReplicaModeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'ModifyReplicaMode','rds')
def get_DomainMode(self):
return self.get_query_params().get('DomainMode')
def set_DomainMode(self,DomainMode):
self.add_query_param('DomainMode',DomainMode)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_PrimaryInstanceId(self):
return self.get_query_params().get('PrimaryInstanceId')
def set_PrimaryInstanceId(self,PrimaryInstanceId):
self.add_query_param('PrimaryInstanceId',PrimaryInstanceId)
def get_ReplicaMode(self):
return self.get_query_params().get('ReplicaMode')
def set_ReplicaMode(self,ReplicaMode):
self.add_query_param('ReplicaMode',ReplicaMode)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_ReplicaId(self):
return self.get_query_params().get('ReplicaId')
def set_ReplicaId(self,ReplicaId):
self.add_query_param('ReplicaId',ReplicaId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | true | true |
f72d9189a05cf033d697164df047e31ba089901c | 560 | py | Python | test/services_tests/test_game_service.py | rhsu/slackjack | c6ba6ec97fcf669c8f4dddc83a3b03cd829ec792 | [
"MIT"
] | null | null | null | test/services_tests/test_game_service.py | rhsu/slackjack | c6ba6ec97fcf669c8f4dddc83a3b03cd829ec792 | [
"MIT"
] | 8 | 2019-03-25T23:11:54.000Z | 2019-04-09T23:38:23.000Z | test/services_tests/test_game_service.py | rhsu/slackjack | c6ba6ec97fcf669c8f4dddc83a3b03cd829ec792 | [
"MIT"
] | 1 | 2019-04-04T00:12:35.000Z | 2019-04-04T00:12:35.000Z | from services.game_service import GameService
from test.mocks.mock_objects import MockEndgameService
def test_play_works_with_no_hand(default_user_data):
service = GameService(default_user_data, MockEndgameService())
response = service.play()
assert response is not None
assert len(service.hand()) == 2
def test_busted(default_user_data, some_busted_hand):
default_user_data.hand = some_busted_hand
service = GameService(default_user_data, MockEndgameService())
response = service.play()
assert response == "EndGameService"
| 32.941176 | 66 | 0.782143 | from services.game_service import GameService
from test.mocks.mock_objects import MockEndgameService
def test_play_works_with_no_hand(default_user_data):
service = GameService(default_user_data, MockEndgameService())
response = service.play()
assert response is not None
assert len(service.hand()) == 2
def test_busted(default_user_data, some_busted_hand):
default_user_data.hand = some_busted_hand
service = GameService(default_user_data, MockEndgameService())
response = service.play()
assert response == "EndGameService"
| true | true |
f72d91d7965629ca9512adf5a81d10f7c72e8a9b | 21,546 | py | Python | sdk/python/pulumi_aws/lakeformation/outputs.py | aamir-locus/pulumi-aws | 3e234b050129bde35d8e072a88bd608562f02142 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/lakeformation/outputs.py | aamir-locus/pulumi-aws | 3e234b050129bde35d8e072a88bd608562f02142 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/lakeformation/outputs.py | aamir-locus/pulumi-aws | 3e234b050129bde35d8e072a88bd608562f02142 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'DataLakeSettingsCreateDatabaseDefaultPermission',
'DataLakeSettingsCreateTableDefaultPermission',
'PermissionsDataLocation',
'PermissionsDatabase',
'PermissionsTable',
'PermissionsTableWithColumns',
'GetDataLakeSettingsCreateDatabaseDefaultPermissionResult',
'GetDataLakeSettingsCreateTableDefaultPermissionResult',
'GetPermissionsDataLocationResult',
'GetPermissionsDatabaseResult',
'GetPermissionsTableResult',
'GetPermissionsTableWithColumnsResult',
]
@pulumi.output_type
class DataLakeSettingsCreateDatabaseDefaultPermission(dict):
def __init__(__self__, *,
permissions: Optional[Sequence[str]] = None,
principal: Optional[str] = None):
"""
:param Sequence[str] permissions: List of permissions that are granted to the principal. Valid values may include `ALL`, `SELECT`, `ALTER`, `DROP`, `DELETE`, `INSERT`, and `DESCRIBE`. For more details, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html).
:param str principal: Principal who is granted permissions. To enforce metadata and underlying data access control only by IAM on new databases and tables set `principal` to `IAM_ALLOWED_PRINCIPALS` and `permissions` to `["ALL"]`.
"""
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
if principal is not None:
pulumi.set(__self__, "principal", principal)
@property
@pulumi.getter
def permissions(self) -> Optional[Sequence[str]]:
"""
List of permissions that are granted to the principal. Valid values may include `ALL`, `SELECT`, `ALTER`, `DROP`, `DELETE`, `INSERT`, and `DESCRIBE`. For more details, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html).
"""
return pulumi.get(self, "permissions")
@property
@pulumi.getter
def principal(self) -> Optional[str]:
"""
Principal who is granted permissions. To enforce metadata and underlying data access control only by IAM on new databases and tables set `principal` to `IAM_ALLOWED_PRINCIPALS` and `permissions` to `["ALL"]`.
"""
return pulumi.get(self, "principal")
@pulumi.output_type
class DataLakeSettingsCreateTableDefaultPermission(dict):
def __init__(__self__, *,
permissions: Optional[Sequence[str]] = None,
principal: Optional[str] = None):
"""
:param Sequence[str] permissions: List of permissions that are granted to the principal. Valid values may include `ALL`, `SELECT`, `ALTER`, `DROP`, `DELETE`, `INSERT`, and `DESCRIBE`. For more details, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html).
:param str principal: Principal who is granted permissions. To enforce metadata and underlying data access control only by IAM on new databases and tables set `principal` to `IAM_ALLOWED_PRINCIPALS` and `permissions` to `["ALL"]`.
"""
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
if principal is not None:
pulumi.set(__self__, "principal", principal)
@property
@pulumi.getter
def permissions(self) -> Optional[Sequence[str]]:
"""
List of permissions that are granted to the principal. Valid values may include `ALL`, `SELECT`, `ALTER`, `DROP`, `DELETE`, `INSERT`, and `DESCRIBE`. For more details, see [Lake Formation Permissions Reference](https://docs.aws.amazon.com/lake-formation/latest/dg/lf-permissions-reference.html).
"""
return pulumi.get(self, "permissions")
@property
@pulumi.getter
def principal(self) -> Optional[str]:
"""
Principal who is granted permissions. To enforce metadata and underlying data access control only by IAM on new databases and tables set `principal` to `IAM_ALLOWED_PRINCIPALS` and `permissions` to `["ALL"]`.
"""
return pulumi.get(self, "principal")
@pulumi.output_type
class PermissionsDataLocation(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "catalogId":
suggest = "catalog_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PermissionsDataLocation. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PermissionsDataLocation.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PermissionsDataLocation.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
arn: str,
catalog_id: Optional[str] = None):
"""
:param str arn: Amazon Resource Name (ARN) that uniquely identifies the data location resource.
:param str catalog_id: Identifier for the Data Catalog. By default, it is the account ID of the caller.
"""
pulumi.set(__self__, "arn", arn)
if catalog_id is not None:
pulumi.set(__self__, "catalog_id", catalog_id)
@property
@pulumi.getter
def arn(self) -> str:
"""
Amazon Resource Name (ARN) that uniquely identifies the data location resource.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="catalogId")
def catalog_id(self) -> Optional[str]:
"""
Identifier for the Data Catalog. By default, it is the account ID of the caller.
"""
return pulumi.get(self, "catalog_id")
@pulumi.output_type
class PermissionsDatabase(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "catalogId":
suggest = "catalog_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PermissionsDatabase. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PermissionsDatabase.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PermissionsDatabase.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
catalog_id: Optional[str] = None):
"""
:param str name: Name of the table resource.
:param str catalog_id: Identifier for the Data Catalog. By default, it is the account ID of the caller.
"""
pulumi.set(__self__, "name", name)
if catalog_id is not None:
pulumi.set(__self__, "catalog_id", catalog_id)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the table resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="catalogId")
def catalog_id(self) -> Optional[str]:
"""
Identifier for the Data Catalog. By default, it is the account ID of the caller.
"""
return pulumi.get(self, "catalog_id")
@pulumi.output_type
class PermissionsTable(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "databaseName":
suggest = "database_name"
elif key == "catalogId":
suggest = "catalog_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PermissionsTable. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PermissionsTable.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PermissionsTable.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
database_name: str,
catalog_id: Optional[str] = None,
name: Optional[str] = None,
wildcard: Optional[bool] = None):
"""
:param str database_name: Name of the database for the table with columns resource. Unique to the Data Catalog.
:param str catalog_id: Identifier for the Data Catalog. By default, it is the account ID of the caller.
:param str name: Name of the table resource.
:param bool wildcard: Whether to use a wildcard representing every table under a database. Defaults to `false`.
"""
pulumi.set(__self__, "database_name", database_name)
if catalog_id is not None:
pulumi.set(__self__, "catalog_id", catalog_id)
if name is not None:
pulumi.set(__self__, "name", name)
if wildcard is not None:
pulumi.set(__self__, "wildcard", wildcard)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> str:
"""
Name of the database for the table with columns resource. Unique to the Data Catalog.
"""
return pulumi.get(self, "database_name")
@property
@pulumi.getter(name="catalogId")
def catalog_id(self) -> Optional[str]:
"""
Identifier for the Data Catalog. By default, it is the account ID of the caller.
"""
return pulumi.get(self, "catalog_id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the table resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def wildcard(self) -> Optional[bool]:
"""
Whether to use a wildcard representing every table under a database. Defaults to `false`.
"""
return pulumi.get(self, "wildcard")
@pulumi.output_type
class PermissionsTableWithColumns(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "databaseName":
suggest = "database_name"
elif key == "catalogId":
suggest = "catalog_id"
elif key == "columnNames":
suggest = "column_names"
elif key == "excludedColumnNames":
suggest = "excluded_column_names"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PermissionsTableWithColumns. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PermissionsTableWithColumns.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PermissionsTableWithColumns.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
database_name: str,
name: str,
catalog_id: Optional[str] = None,
column_names: Optional[Sequence[str]] = None,
excluded_column_names: Optional[Sequence[str]] = None):
"""
:param str database_name: Name of the database for the table with columns resource. Unique to the Data Catalog.
:param str name: Name of the table resource.
:param str catalog_id: Identifier for the Data Catalog. By default, it is the account ID of the caller.
:param Sequence[str] column_names: List of column names for the table.
:param Sequence[str] excluded_column_names: List of column names for the table to exclude.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "name", name)
if catalog_id is not None:
pulumi.set(__self__, "catalog_id", catalog_id)
if column_names is not None:
pulumi.set(__self__, "column_names", column_names)
if excluded_column_names is not None:
pulumi.set(__self__, "excluded_column_names", excluded_column_names)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> str:
"""
Name of the database for the table with columns resource. Unique to the Data Catalog.
"""
return pulumi.get(self, "database_name")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the table resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="catalogId")
def catalog_id(self) -> Optional[str]:
"""
Identifier for the Data Catalog. By default, it is the account ID of the caller.
"""
return pulumi.get(self, "catalog_id")
@property
@pulumi.getter(name="columnNames")
def column_names(self) -> Optional[Sequence[str]]:
"""
List of column names for the table.
"""
return pulumi.get(self, "column_names")
@property
@pulumi.getter(name="excludedColumnNames")
def excluded_column_names(self) -> Optional[Sequence[str]]:
"""
List of column names for the table to exclude.
"""
return pulumi.get(self, "excluded_column_names")
@pulumi.output_type
class GetDataLakeSettingsCreateDatabaseDefaultPermissionResult(dict):
def __init__(__self__, *,
permissions: Sequence[str],
principal: str):
"""
:param Sequence[str] permissions: List of permissions granted to the principal.
:param str principal: Principal who is granted permissions.
"""
pulumi.set(__self__, "permissions", permissions)
pulumi.set(__self__, "principal", principal)
@property
@pulumi.getter
def permissions(self) -> Sequence[str]:
"""
List of permissions granted to the principal.
"""
return pulumi.get(self, "permissions")
@property
@pulumi.getter
def principal(self) -> str:
"""
Principal who is granted permissions.
"""
return pulumi.get(self, "principal")
@pulumi.output_type
class GetDataLakeSettingsCreateTableDefaultPermissionResult(dict):
def __init__(__self__, *,
permissions: Sequence[str],
principal: str):
"""
:param Sequence[str] permissions: List of permissions granted to the principal.
:param str principal: Principal who is granted permissions.
"""
pulumi.set(__self__, "permissions", permissions)
pulumi.set(__self__, "principal", principal)
@property
@pulumi.getter
def permissions(self) -> Sequence[str]:
"""
List of permissions granted to the principal.
"""
return pulumi.get(self, "permissions")
@property
@pulumi.getter
def principal(self) -> str:
"""
Principal who is granted permissions.
"""
return pulumi.get(self, "principal")
@pulumi.output_type
class GetPermissionsDataLocationResult(dict):
def __init__(__self__, *,
arn: str,
catalog_id: str):
"""
:param str arn: Amazon Resource Name (ARN) that uniquely identifies the data location resource.
:param str catalog_id: Identifier for the Data Catalog. By default, it is the account ID of the caller.
"""
pulumi.set(__self__, "arn", arn)
pulumi.set(__self__, "catalog_id", catalog_id)
@property
@pulumi.getter
def arn(self) -> str:
"""
Amazon Resource Name (ARN) that uniquely identifies the data location resource.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="catalogId")
def catalog_id(self) -> str:
"""
Identifier for the Data Catalog. By default, it is the account ID of the caller.
"""
return pulumi.get(self, "catalog_id")
@pulumi.output_type
class GetPermissionsDatabaseResult(dict):
def __init__(__self__, *,
catalog_id: str,
name: str):
"""
:param str catalog_id: Identifier for the Data Catalog. By default, it is the account ID of the caller.
:param str name: Name of the table resource.
"""
pulumi.set(__self__, "catalog_id", catalog_id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="catalogId")
def catalog_id(self) -> str:
"""
Identifier for the Data Catalog. By default, it is the account ID of the caller.
"""
return pulumi.get(self, "catalog_id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the table resource.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class GetPermissionsTableResult(dict):
def __init__(__self__, *,
catalog_id: str,
database_name: str,
name: str,
wildcard: Optional[bool] = None):
"""
:param str catalog_id: Identifier for the Data Catalog. By default, it is the account ID of the caller.
:param str database_name: Name of the database for the table with columns resource. Unique to the Data Catalog.
:param str name: Name of the table resource.
:param bool wildcard: Whether to use a wildcard representing every table under a database. At least one of `name` or `wildcard` is required. Defaults to `false`.
"""
pulumi.set(__self__, "catalog_id", catalog_id)
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "name", name)
if wildcard is not None:
pulumi.set(__self__, "wildcard", wildcard)
@property
@pulumi.getter(name="catalogId")
def catalog_id(self) -> str:
"""
Identifier for the Data Catalog. By default, it is the account ID of the caller.
"""
return pulumi.get(self, "catalog_id")
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> str:
"""
Name of the database for the table with columns resource. Unique to the Data Catalog.
"""
return pulumi.get(self, "database_name")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the table resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def wildcard(self) -> Optional[bool]:
"""
Whether to use a wildcard representing every table under a database. At least one of `name` or `wildcard` is required. Defaults to `false`.
"""
return pulumi.get(self, "wildcard")
@pulumi.output_type
class GetPermissionsTableWithColumnsResult(dict):
def __init__(__self__, *,
catalog_id: str,
database_name: str,
name: str,
column_names: Optional[Sequence[str]] = None,
excluded_column_names: Optional[Sequence[str]] = None):
"""
:param str catalog_id: Identifier for the Data Catalog. By default, it is the account ID of the caller.
:param str database_name: Name of the database for the table with columns resource. Unique to the Data Catalog.
:param str name: Name of the table resource.
:param Sequence[str] column_names: List of column names for the table. At least one of `column_names` or `excluded_column_names` is required.
:param Sequence[str] excluded_column_names: List of column names for the table to exclude. At least one of `column_names` or `excluded_column_names` is required.
"""
pulumi.set(__self__, "catalog_id", catalog_id)
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "name", name)
if column_names is not None:
pulumi.set(__self__, "column_names", column_names)
if excluded_column_names is not None:
pulumi.set(__self__, "excluded_column_names", excluded_column_names)
@property
@pulumi.getter(name="catalogId")
def catalog_id(self) -> str:
"""
Identifier for the Data Catalog. By default, it is the account ID of the caller.
"""
return pulumi.get(self, "catalog_id")
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> str:
"""
Name of the database for the table with columns resource. Unique to the Data Catalog.
"""
return pulumi.get(self, "database_name")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the table resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="columnNames")
def column_names(self) -> Optional[Sequence[str]]:
"""
List of column names for the table. At least one of `column_names` or `excluded_column_names` is required.
"""
return pulumi.get(self, "column_names")
@property
@pulumi.getter(name="excludedColumnNames")
def excluded_column_names(self) -> Optional[Sequence[str]]:
"""
List of column names for the table to exclude. At least one of `column_names` or `excluded_column_names` is required.
"""
return pulumi.get(self, "excluded_column_names")
| 37.471304 | 337 | 0.633482 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'DataLakeSettingsCreateDatabaseDefaultPermission',
'DataLakeSettingsCreateTableDefaultPermission',
'PermissionsDataLocation',
'PermissionsDatabase',
'PermissionsTable',
'PermissionsTableWithColumns',
'GetDataLakeSettingsCreateDatabaseDefaultPermissionResult',
'GetDataLakeSettingsCreateTableDefaultPermissionResult',
'GetPermissionsDataLocationResult',
'GetPermissionsDatabaseResult',
'GetPermissionsTableResult',
'GetPermissionsTableWithColumnsResult',
]
@pulumi.output_type
class DataLakeSettingsCreateDatabaseDefaultPermission(dict):
def __init__(__self__, *,
permissions: Optional[Sequence[str]] = None,
principal: Optional[str] = None):
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
if principal is not None:
pulumi.set(__self__, "principal", principal)
@property
@pulumi.getter
def permissions(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "permissions")
@property
@pulumi.getter
def principal(self) -> Optional[str]:
return pulumi.get(self, "principal")
@pulumi.output_type
class DataLakeSettingsCreateTableDefaultPermission(dict):
def __init__(__self__, *,
permissions: Optional[Sequence[str]] = None,
principal: Optional[str] = None):
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
if principal is not None:
pulumi.set(__self__, "principal", principal)
@property
@pulumi.getter
def permissions(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "permissions")
@property
@pulumi.getter
def principal(self) -> Optional[str]:
return pulumi.get(self, "principal")
@pulumi.output_type
class PermissionsDataLocation(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "catalogId":
suggest = "catalog_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PermissionsDataLocation. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PermissionsDataLocation.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PermissionsDataLocation.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
arn: str,
catalog_id: Optional[str] = None):
pulumi.set(__self__, "arn", arn)
if catalog_id is not None:
pulumi.set(__self__, "catalog_id", catalog_id)
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="catalogId")
def catalog_id(self) -> Optional[str]:
return pulumi.get(self, "catalog_id")
@pulumi.output_type
class PermissionsDatabase(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "catalogId":
suggest = "catalog_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PermissionsDatabase. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PermissionsDatabase.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PermissionsDatabase.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
catalog_id: Optional[str] = None):
pulumi.set(__self__, "name", name)
if catalog_id is not None:
pulumi.set(__self__, "catalog_id", catalog_id)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="catalogId")
def catalog_id(self) -> Optional[str]:
return pulumi.get(self, "catalog_id")
@pulumi.output_type
class PermissionsTable(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "databaseName":
suggest = "database_name"
elif key == "catalogId":
suggest = "catalog_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PermissionsTable. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PermissionsTable.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PermissionsTable.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
database_name: str,
catalog_id: Optional[str] = None,
name: Optional[str] = None,
wildcard: Optional[bool] = None):
pulumi.set(__self__, "database_name", database_name)
if catalog_id is not None:
pulumi.set(__self__, "catalog_id", catalog_id)
if name is not None:
pulumi.set(__self__, "name", name)
if wildcard is not None:
pulumi.set(__self__, "wildcard", wildcard)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> str:
return pulumi.get(self, "database_name")
@property
@pulumi.getter(name="catalogId")
def catalog_id(self) -> Optional[str]:
return pulumi.get(self, "catalog_id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def wildcard(self) -> Optional[bool]:
return pulumi.get(self, "wildcard")
@pulumi.output_type
class PermissionsTableWithColumns(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "databaseName":
suggest = "database_name"
elif key == "catalogId":
suggest = "catalog_id"
elif key == "columnNames":
suggest = "column_names"
elif key == "excludedColumnNames":
suggest = "excluded_column_names"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PermissionsTableWithColumns. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PermissionsTableWithColumns.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PermissionsTableWithColumns.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
database_name: str,
name: str,
catalog_id: Optional[str] = None,
column_names: Optional[Sequence[str]] = None,
excluded_column_names: Optional[Sequence[str]] = None):
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "name", name)
if catalog_id is not None:
pulumi.set(__self__, "catalog_id", catalog_id)
if column_names is not None:
pulumi.set(__self__, "column_names", column_names)
if excluded_column_names is not None:
pulumi.set(__self__, "excluded_column_names", excluded_column_names)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> str:
return pulumi.get(self, "database_name")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="catalogId")
def catalog_id(self) -> Optional[str]:
return pulumi.get(self, "catalog_id")
@property
@pulumi.getter(name="columnNames")
def column_names(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "column_names")
@property
@pulumi.getter(name="excludedColumnNames")
def excluded_column_names(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "excluded_column_names")
@pulumi.output_type
class GetDataLakeSettingsCreateDatabaseDefaultPermissionResult(dict):
def __init__(__self__, *,
permissions: Sequence[str],
principal: str):
pulumi.set(__self__, "permissions", permissions)
pulumi.set(__self__, "principal", principal)
@property
@pulumi.getter
def permissions(self) -> Sequence[str]:
return pulumi.get(self, "permissions")
@property
@pulumi.getter
def principal(self) -> str:
return pulumi.get(self, "principal")
@pulumi.output_type
class GetDataLakeSettingsCreateTableDefaultPermissionResult(dict):
def __init__(__self__, *,
permissions: Sequence[str],
principal: str):
pulumi.set(__self__, "permissions", permissions)
pulumi.set(__self__, "principal", principal)
@property
@pulumi.getter
def permissions(self) -> Sequence[str]:
return pulumi.get(self, "permissions")
@property
@pulumi.getter
def principal(self) -> str:
return pulumi.get(self, "principal")
@pulumi.output_type
class GetPermissionsDataLocationResult(dict):
def __init__(__self__, *,
arn: str,
catalog_id: str):
pulumi.set(__self__, "arn", arn)
pulumi.set(__self__, "catalog_id", catalog_id)
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="catalogId")
def catalog_id(self) -> str:
return pulumi.get(self, "catalog_id")
@pulumi.output_type
class GetPermissionsDatabaseResult(dict):
def __init__(__self__, *,
catalog_id: str,
name: str):
pulumi.set(__self__, "catalog_id", catalog_id)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="catalogId")
def catalog_id(self) -> str:
return pulumi.get(self, "catalog_id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@pulumi.output_type
class GetPermissionsTableResult(dict):
def __init__(__self__, *,
catalog_id: str,
database_name: str,
name: str,
wildcard: Optional[bool] = None):
pulumi.set(__self__, "catalog_id", catalog_id)
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "name", name)
if wildcard is not None:
pulumi.set(__self__, "wildcard", wildcard)
@property
@pulumi.getter(name="catalogId")
def catalog_id(self) -> str:
return pulumi.get(self, "catalog_id")
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> str:
return pulumi.get(self, "database_name")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def wildcard(self) -> Optional[bool]:
return pulumi.get(self, "wildcard")
@pulumi.output_type
class GetPermissionsTableWithColumnsResult(dict):
def __init__(__self__, *,
catalog_id: str,
database_name: str,
name: str,
column_names: Optional[Sequence[str]] = None,
excluded_column_names: Optional[Sequence[str]] = None):
pulumi.set(__self__, "catalog_id", catalog_id)
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "name", name)
if column_names is not None:
pulumi.set(__self__, "column_names", column_names)
if excluded_column_names is not None:
pulumi.set(__self__, "excluded_column_names", excluded_column_names)
@property
@pulumi.getter(name="catalogId")
def catalog_id(self) -> str:
return pulumi.get(self, "catalog_id")
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> str:
return pulumi.get(self, "database_name")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="columnNames")
def column_names(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "column_names")
@property
@pulumi.getter(name="excludedColumnNames")
def excluded_column_names(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "excluded_column_names")
| true | true |
f72d9252e9bb0a32ba37dc42ebe896887438d7a8 | 2,930 | py | Python | plugins/dbnd-docker/src/dbnd_docker/container_engine_config.py | busunkim96/dbnd | 0191fdcd4c4fbd35006f1026d1a55b2abab9097b | [
"Apache-2.0"
] | 224 | 2020-01-02T10:46:37.000Z | 2022-03-02T13:54:08.000Z | plugins/dbnd-docker/src/dbnd_docker/container_engine_config.py | busunkim96/dbnd | 0191fdcd4c4fbd35006f1026d1a55b2abab9097b | [
"Apache-2.0"
] | 16 | 2020-03-11T09:37:58.000Z | 2022-01-26T10:22:08.000Z | plugins/dbnd-docker/src/dbnd_docker/container_engine_config.py | busunkim96/dbnd | 0191fdcd4c4fbd35006f1026d1a55b2abab9097b | [
"Apache-2.0"
] | 24 | 2020-03-24T13:53:50.000Z | 2022-03-22T11:55:18.000Z | import subprocess
from dbnd import parameter
from dbnd._core.parameter.validators import NonEmptyString
from dbnd._core.run.databand_run import DatabandRun
from dbnd._core.settings import EngineConfig
from targets.values.version_value import VersionStr
class ContainerEngineConfig(EngineConfig):
require_submit = True
dbnd_executable = ["dbnd"] # we should have 'dbnd' command installed in container
container_repository = parameter(validator=NonEmptyString()).help(
"Docker container registry"
)[str]
container_tag = parameter.none().help("Docker container tag")[VersionStr]
container_tag_gpu = parameter.none().help("Docker container tag for GPU tasks")[
VersionStr
]
docker_build_tag_base = parameter.help("Auto build docker container tag").value(
"dbnd_build"
)
docker_build_tag = parameter.help(
"Docker build tag for the docker image dbnd will build"
).default(None)[str]
docker_build = parameter(default=True).help(
"Automatically build docker image. "
"If container_repository is unset it will be taken (along with the tag) from the docker build settings"
)[bool]
docker_build_push = parameter(default=True).help(
"If docker_build is enabled, controls whether the image is automatically pushed or not"
)
def get_docker_ctrl(self, task_run):
pass
@property
def full_image(self):
return "{}:{}".format(self.container_repository, self.container_tag)
def prepare_for_run(self, run):
# type: (DatabandRun) -> None
super(ContainerEngineConfig, self).prepare_for_run(run)
from dbnd_docker.submit_ctrl import prepare_docker_for_executor
# when we run at submitter - we need to update driver_engine - this one will be used to send job
# when we run at driver - we update task config, it will be used by task
# inside pod submission the fallback is always on task_engine
prepare_docker_for_executor(run, self)
def submit_to_engine_task(self, env, task_name, args, interactive=True):
from dbnd_docker.docker.docker_task import DockerRunTask
submit_task = DockerRunTask(
task_name=task_name,
command=subprocess.list2cmdline(args),
image=self.full_image,
docker_engine=self,
task_is_system=True,
)
return submit_task
def _should_wrap_with_submit_task(self, task_run):
"""
We don't want to resubmit if it's dockerized run and we running with the same engine
"""
from dbnd_docker.docker.docker_task import DockerRunTask
if isinstance(task_run.task, DockerRunTask):
if task_run.task.docker_engine.task_name == self.task_name:
return False
return super(ContainerEngineConfig, self)._should_wrap_with_submit_task(
task_run
)
| 37.564103 | 111 | 0.696928 | import subprocess
from dbnd import parameter
from dbnd._core.parameter.validators import NonEmptyString
from dbnd._core.run.databand_run import DatabandRun
from dbnd._core.settings import EngineConfig
from targets.values.version_value import VersionStr
class ContainerEngineConfig(EngineConfig):
require_submit = True
dbnd_executable = ["dbnd"]
container_repository = parameter(validator=NonEmptyString()).help(
"Docker container registry"
)[str]
container_tag = parameter.none().help("Docker container tag")[VersionStr]
container_tag_gpu = parameter.none().help("Docker container tag for GPU tasks")[
VersionStr
]
docker_build_tag_base = parameter.help("Auto build docker container tag").value(
"dbnd_build"
)
docker_build_tag = parameter.help(
"Docker build tag for the docker image dbnd will build"
).default(None)[str]
docker_build = parameter(default=True).help(
"Automatically build docker image. "
"If container_repository is unset it will be taken (along with the tag) from the docker build settings"
)[bool]
docker_build_push = parameter(default=True).help(
"If docker_build is enabled, controls whether the image is automatically pushed or not"
)
def get_docker_ctrl(self, task_run):
pass
@property
def full_image(self):
return "{}:{}".format(self.container_repository, self.container_tag)
def prepare_for_run(self, run):
super(ContainerEngineConfig, self).prepare_for_run(run)
from dbnd_docker.submit_ctrl import prepare_docker_for_executor
prepare_docker_for_executor(run, self)
def submit_to_engine_task(self, env, task_name, args, interactive=True):
from dbnd_docker.docker.docker_task import DockerRunTask
submit_task = DockerRunTask(
task_name=task_name,
command=subprocess.list2cmdline(args),
image=self.full_image,
docker_engine=self,
task_is_system=True,
)
return submit_task
def _should_wrap_with_submit_task(self, task_run):
from dbnd_docker.docker.docker_task import DockerRunTask
if isinstance(task_run.task, DockerRunTask):
if task_run.task.docker_engine.task_name == self.task_name:
return False
return super(ContainerEngineConfig, self)._should_wrap_with_submit_task(
task_run
)
| true | true |
f72d929e9c08be5c45b8e9ee81bea161824633b0 | 661 | py | Python | Sorting/Selection Sort/selection_sort.py | Lashuk1729/PyAlgo-Tree | c8546ba45161a8c9ac87dc2710b5fb944568f5b1 | [
"MIT"
] | 24 | 2021-07-06T10:08:46.000Z | 2021-10-17T20:18:41.000Z | Sorting/Selection Sort/selection_sort.py | Lashuk1729/PyAlgo-Tree | c8546ba45161a8c9ac87dc2710b5fb944568f5b1 | [
"MIT"
] | 159 | 2021-06-06T12:44:09.000Z | 2021-10-31T14:25:28.000Z | Sorting/Selection Sort/selection_sort.py | Lashuk1729/PyAlgo-Tree | c8546ba45161a8c9ac87dc2710b5fb944568f5b1 | [
"MIT"
] | 47 | 2021-07-05T16:32:14.000Z | 2021-11-01T13:59:16.000Z | def selectionSort(array, n):
for i in range(n):
minimum = i
for j in range(i + 1, n):
# to sort in descending order, change > to < in this line
# select the minimum element in each loop
if array[j] < array[minimum]:
minimum = j
# put min at the correct position
(array[i], array[minimum]) = (array[minimum], array[i])
data = [ ]
size = int(input("Enter size of array : "))
print("Enter array elements: ")
for i in range(size):
e=int(input())
data.append(e)
selectionSort(data, size)
print('Sorted Array in Ascending Order:')
print(data) | 26.44 | 69 | 0.562784 | def selectionSort(array, n):
for i in range(n):
minimum = i
for j in range(i + 1, n):
if array[j] < array[minimum]:
minimum = j
(array[i], array[minimum]) = (array[minimum], array[i])
data = [ ]
size = int(input("Enter size of array : "))
print("Enter array elements: ")
for i in range(size):
e=int(input())
data.append(e)
selectionSort(data, size)
print('Sorted Array in Ascending Order:')
print(data) | true | true |
f72d9323fed3cb0d5783a901014f576253fb4d86 | 1,028 | py | Python | django/solution/untitled/ksiazkaadresowa/management/commands/clean.py | giserh/book-python | ebd4e70cea1dd56986aa8efbae3629ba3f1ba087 | [
"MIT"
] | 1 | 2019-01-02T15:04:08.000Z | 2019-01-02T15:04:08.000Z | django/solution/untitled/ksiazkaadresowa/management/commands/clean.py | giserh/book-python | ebd4e70cea1dd56986aa8efbae3629ba3f1ba087 | [
"MIT"
] | null | null | null | django/solution/untitled/ksiazkaadresowa/management/commands/clean.py | giserh/book-python | ebd4e70cea1dd56986aa8efbae3629ba3f1ba087 | [
"MIT"
] | null | null | null | from django.core.management.base import BaseCommand
from ksiazkaadresowa.models import Person
class Command(BaseCommand):
help = 'Moj tekst pomocy'
def add_arguments(self, parser):
parser.add_argument(
'--file',
dest='file',
nargs='?',
help='Log File',
)
parser.add_argument(
'--format',
nargs='?',
dest='format',
help='Log File Format',
)
def handle(self, *args, **options):
filename = options['file']
format = options['format']
content = []
with open(filename) as file:
for line in file:
line = self.parse_line(line)
content.append(line)
print('\n'.join(content))
return
for p in Person.objects.all():
p.first_name = p.first_name.title()
p.last_name = p.last_name.title()
p.save()
def parse_line(self, line):
return line.upper()
| 23.906977 | 51 | 0.51751 | from django.core.management.base import BaseCommand
from ksiazkaadresowa.models import Person
class Command(BaseCommand):
help = 'Moj tekst pomocy'
def add_arguments(self, parser):
parser.add_argument(
'--file',
dest='file',
nargs='?',
help='Log File',
)
parser.add_argument(
'--format',
nargs='?',
dest='format',
help='Log File Format',
)
def handle(self, *args, **options):
filename = options['file']
format = options['format']
content = []
with open(filename) as file:
for line in file:
line = self.parse_line(line)
content.append(line)
print('\n'.join(content))
return
for p in Person.objects.all():
p.first_name = p.first_name.title()
p.last_name = p.last_name.title()
p.save()
def parse_line(self, line):
return line.upper()
| true | true |
f72d9348637b3cd7722c51c6e5c4d93b8b735f0c | 5,928 | py | Python | baselines/cher/experiment/config.py | krishpop/CHER | 0633a45151b13f23acf20faabc65028c599a3551 | [
"MIT"
] | 38 | 2019-10-21T14:04:33.000Z | 2022-01-18T05:31:26.000Z | baselines/cher/experiment/config.py | krishpop/CHER | 0633a45151b13f23acf20faabc65028c599a3551 | [
"MIT"
] | 3 | 2019-12-12T01:36:12.000Z | 2021-04-21T19:53:55.000Z | baselines/cher/experiment/config.py | krishpop/CHER | 0633a45151b13f23acf20faabc65028c599a3551 | [
"MIT"
] | 12 | 2019-12-06T03:46:02.000Z | 2021-12-01T11:17:07.000Z | from copy import deepcopy
import numpy as np
import json
import os
import gym
from baselines import logger
from baselines.her.ddpg import DDPG
from baselines.cher.her import make_sample_her_transitions
DEFAULT_ENV_PARAMS = {
'FetchReach-v0': {
'n_cycles': 10,
},
}
DEFAULT_PARAMS = {
# env
'max_u': 1., # max absolute value of actions on different coordinates
# ddpg
'layers': 3, # number of layers in the critic/actor networks
'hidden': 256, # number of neurons in each hidden layers
'network_class': 'baselines.her.actor_critic:ActorCritic',
'Q_lr': 0.001, # critic learning rate
'pi_lr': 0.001, # actor learning rate
'buffer_size': int(1E6), # for experience replay
'polyak': 0.95, # polyak averaging coefficient
'action_l2': 1.0, # quadratic penalty on actions (before rescaling by max_u)
'clip_obs': 200.,
'scope': 'ddpg', # can be tweaked for testing
'relative_goals': False,
# training
'n_cycles': 50, # per epoch
'rollout_batch_size': 2, # per mpi thread
'n_batches': 40, # training batches per cycle
'batch_size': 64, # per mpi thread, measured in transitions and reduced to even multiple of chunk_length.
'n_test_rollouts': 10, # number of test rollouts per epoch, each consists of rollout_batch_size rollouts
'test_with_polyak': False, # run test episodes with the target network
# exploration
'random_eps': 0.3, # percentage of time a random action is taken
'noise_eps': 0.2, # std of gaussian noise added to not-completely-random actions as a percentage of max_u
# HER
'replay_strategy': 'future', # supported modes: future, none
'replay_k': 4, # number of additional goals used for replay, only used if off_policy_data=future
# normalization
'norm_eps': 0.01, # epsilon used for observation normalization
'norm_clip': 5, # normalized observations are cropped to this values
}
CACHED_ENVS = {}
def cached_make_env(make_env):
"""
Only creates a new environment from the provided function if one has not yet already been
created. This is useful here because we need to infer certain properties of the env, e.g.
its observation and action spaces, without any intend of actually using it.
"""
if make_env not in CACHED_ENVS:
env = make_env()
CACHED_ENVS[make_env] = env
return CACHED_ENVS[make_env]
def prepare_params(kwargs):
# DDPG params
ddpg_params = dict()
env_name = kwargs['env_name']
def make_env():
return gym.make(env_name)
kwargs['make_env'] = make_env
tmp_env = cached_make_env(kwargs['make_env'])
assert hasattr(tmp_env, '_max_episode_steps')
kwargs['T'] = tmp_env._max_episode_steps
tmp_env.reset()
kwargs['max_u'] = np.array(kwargs['max_u']) if type(kwargs['max_u']) == list else kwargs['max_u']
kwargs['gamma'] = 1. - 1. / kwargs['T']
if 'lr' in kwargs:
kwargs['pi_lr'] = kwargs['lr']
kwargs['Q_lr'] = kwargs['lr']
del kwargs['lr']
for name in ['buffer_size', 'hidden', 'layers',
'network_class',
'polyak',
'batch_size', 'Q_lr', 'pi_lr',
'norm_eps', 'norm_clip', 'max_u',
'action_l2', 'clip_obs', 'scope', 'relative_goals']:
ddpg_params[name] = kwargs[name]
kwargs['_' + name] = kwargs[name]
del kwargs[name]
kwargs['ddpg_params'] = ddpg_params
return kwargs
def log_params(params, logger=logger):
for key in sorted(params.keys()):
logger.info('{}: {}'.format(key, params[key]))
def configure_her(params):
env = cached_make_env(params['make_env'])
env.reset()
def reward_fun(ag_2, g, info): # vectorized
return env.compute_reward(achieved_goal=ag_2, desired_goal=g, info=info)
# Prepare configuration for HER.
her_params = {
'reward_fun': reward_fun,
}
for name in ['replay_strategy', 'replay_k']:
her_params[name] = params[name]
params['_' + name] = her_params[name]
del params[name]
sample_her_transitions = make_sample_her_transitions(**her_params)
return sample_her_transitions
def simple_goal_subtract(a, b):
assert a.shape == b.shape
return a - b
def configure_ddpg(dims, params, reuse=False, use_mpi=True, clip_return=True):
sample_her_transitions = configure_her(params)
# Extract relevant parameters.
gamma = params['gamma']
rollout_batch_size = params['rollout_batch_size']
ddpg_params = params['ddpg_params']
input_dims = dims.copy()
# DDPG agent
env = cached_make_env(params['make_env'])
env.reset()
ddpg_params.update({'input_dims': input_dims, # agent takes an input observations
'T': params['T'],
'clip_pos_returns': True, # clip positive returns
'clip_return': (1. / (1. - gamma)) if clip_return else np.inf, # max abs of return
'rollout_batch_size': rollout_batch_size,
'subtract_goals': simple_goal_subtract,
'sample_transitions': sample_her_transitions,
'gamma': gamma,
})
ddpg_params['info'] = {
'env_name': params['env_name'],
}
policy = DDPG(reuse=reuse, **ddpg_params, use_mpi=use_mpi)
return policy
def configure_dims(params):
env = cached_make_env(params['make_env'])
env.reset()
obs, _, _, info = env.step(env.action_space.sample())
dims = {
'o': obs['observation'].shape[0],
'u': env.action_space.shape[0],
'g': obs['desired_goal'].shape[0],
}
for key, value in info.items():
value = np.array(value)
if value.ndim == 0:
value = value.reshape(1)
dims['info_{}'.format(key)] = value.shape[0]
return dims
| 34.465116 | 110 | 0.63529 | from copy import deepcopy
import numpy as np
import json
import os
import gym
from baselines import logger
from baselines.her.ddpg import DDPG
from baselines.cher.her import make_sample_her_transitions
DEFAULT_ENV_PARAMS = {
'FetchReach-v0': {
'n_cycles': 10,
},
}
DEFAULT_PARAMS = {
'max_u': 1.,
'layers': 3,
'hidden': 256,
'network_class': 'baselines.her.actor_critic:ActorCritic',
'Q_lr': 0.001,
'pi_lr': 0.001,
'buffer_size': int(1E6),
'polyak': 0.95,
'action_l2': 1.0,
'clip_obs': 200.,
'scope': 'ddpg',
'relative_goals': False,
'n_cycles': 50,
'rollout_batch_size': 2,
'n_batches': 40,
'batch_size': 64,
'n_test_rollouts': 10,
'test_with_polyak': False,
'random_eps': 0.3,
'noise_eps': 0.2,
'replay_strategy': 'future',
'replay_k': 4,
'norm_eps': 0.01,
'norm_clip': 5,
}
CACHED_ENVS = {}
def cached_make_env(make_env):
if make_env not in CACHED_ENVS:
env = make_env()
CACHED_ENVS[make_env] = env
return CACHED_ENVS[make_env]
def prepare_params(kwargs):
ddpg_params = dict()
env_name = kwargs['env_name']
def make_env():
return gym.make(env_name)
kwargs['make_env'] = make_env
tmp_env = cached_make_env(kwargs['make_env'])
assert hasattr(tmp_env, '_max_episode_steps')
kwargs['T'] = tmp_env._max_episode_steps
tmp_env.reset()
kwargs['max_u'] = np.array(kwargs['max_u']) if type(kwargs['max_u']) == list else kwargs['max_u']
kwargs['gamma'] = 1. - 1. / kwargs['T']
if 'lr' in kwargs:
kwargs['pi_lr'] = kwargs['lr']
kwargs['Q_lr'] = kwargs['lr']
del kwargs['lr']
for name in ['buffer_size', 'hidden', 'layers',
'network_class',
'polyak',
'batch_size', 'Q_lr', 'pi_lr',
'norm_eps', 'norm_clip', 'max_u',
'action_l2', 'clip_obs', 'scope', 'relative_goals']:
ddpg_params[name] = kwargs[name]
kwargs['_' + name] = kwargs[name]
del kwargs[name]
kwargs['ddpg_params'] = ddpg_params
return kwargs
def log_params(params, logger=logger):
for key in sorted(params.keys()):
logger.info('{}: {}'.format(key, params[key]))
def configure_her(params):
env = cached_make_env(params['make_env'])
env.reset()
def reward_fun(ag_2, g, info):
return env.compute_reward(achieved_goal=ag_2, desired_goal=g, info=info)
her_params = {
'reward_fun': reward_fun,
}
for name in ['replay_strategy', 'replay_k']:
her_params[name] = params[name]
params['_' + name] = her_params[name]
del params[name]
sample_her_transitions = make_sample_her_transitions(**her_params)
return sample_her_transitions
def simple_goal_subtract(a, b):
assert a.shape == b.shape
return a - b
def configure_ddpg(dims, params, reuse=False, use_mpi=True, clip_return=True):
sample_her_transitions = configure_her(params)
gamma = params['gamma']
rollout_batch_size = params['rollout_batch_size']
ddpg_params = params['ddpg_params']
input_dims = dims.copy()
env = cached_make_env(params['make_env'])
env.reset()
ddpg_params.update({'input_dims': input_dims,
'T': params['T'],
'clip_pos_returns': True,
'clip_return': (1. / (1. - gamma)) if clip_return else np.inf,
'rollout_batch_size': rollout_batch_size,
'subtract_goals': simple_goal_subtract,
'sample_transitions': sample_her_transitions,
'gamma': gamma,
})
ddpg_params['info'] = {
'env_name': params['env_name'],
}
policy = DDPG(reuse=reuse, **ddpg_params, use_mpi=use_mpi)
return policy
def configure_dims(params):
env = cached_make_env(params['make_env'])
env.reset()
obs, _, _, info = env.step(env.action_space.sample())
dims = {
'o': obs['observation'].shape[0],
'u': env.action_space.shape[0],
'g': obs['desired_goal'].shape[0],
}
for key, value in info.items():
value = np.array(value)
if value.ndim == 0:
value = value.reshape(1)
dims['info_{}'.format(key)] = value.shape[0]
return dims
| true | true |
f72d93705daed63829c2d128d4788b910c9a36be | 2,748 | py | Python | var/spack/repos/builtin/packages/w3m/package.py | klevzoff/spack | 396936d24173254ecf4148bc460702185e4c99e5 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-10T13:47:48.000Z | 2019-04-17T13:05:17.000Z | var/spack/repos/builtin/packages/w3m/package.py | klevzoff/spack | 396936d24173254ecf4148bc460702185e4c99e5 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 32 | 2020-12-15T17:29:20.000Z | 2022-03-21T15:08:31.000Z | var/spack/repos/builtin/packages/w3m/package.py | Kerilk/spack | e027942b55407a4a5fe323b93d8e57200c873a43 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2018-04-06T09:04:11.000Z | 2020-01-24T12:52:12.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class W3m(AutotoolsPackage):
"""
w3m is a text-based web browser as well as a pager like `more' or `less'.
With w3m you can browse web pages through a terminal emulator window (xterm,
rxvt or something like that). Moreover, w3m can be used as a text formatting
tool which typesets HTML into plain text.
"""
homepage = "http://w3m.sourceforge.net/index.en.html"
url = "https://downloads.sourceforge.net/project/w3m/w3m/w3m-0.5.3/w3m-0.5.3.tar.gz"
maintainers = ['ronin_gw']
version('0.5.3', sha256='e994d263f2fd2c22febfbe45103526e00145a7674a0fda79c822b97c2770a9e3')
# mandatory dependency
depends_on('bdw-gc')
# termlib
variant('termlib', default='ncurses', description='select termlib',
values=('ncurses', 'termcap', 'none'), multi=False)
depends_on('termcap', when='termlib=termcap')
depends_on('ncurses+termlib', when='termlib=ncurses')
# https support
variant('https', default=True, description='support https protocol')
depends_on('openssl@:1.0.2u', when='+https')
# X11 support
variant('image', default=True, description='enable image')
depends_on('libx11', when='+image')
# inline image support
variant('imagelib', default='imlib2', description='select imagelib',
values=('gdk-pixbuf', 'imlib2'), multi=False)
depends_on('gdk-pixbuf@2:+x11', when='imagelib=gdk-pixbuf +image')
depends_on('imlib2@1.0.5:', when='imagelib=imlib2 +image')
# fix for modern libraries
patch('fix_redef.patch')
patch('fix_gc.patch')
def _add_arg_for_variant(self, args, variant, choices):
for avail_lib in choices:
if self.spec.variants[variant].value == avail_lib:
args.append('--with-{0}={1}'.format(variant, avail_lib))
return
def configure_args(self):
args = []
self._add_arg_for_variant(args, 'termlib', ('termcap', 'ncurses'))
if '+image' in self.spec:
args.append('--enable-image')
self._add_arg_for_variant(args, 'imagelib', ('gdk-pixbuf', 'imlib2'))
return args
def setup_build_environment(self, env):
if self.spec.variants['termlib'].value == 'ncurses':
env.append_flags('LDFLAGS', '-ltinfo')
env.append_flags('LDFLAGS', '-lncurses')
if '+image' in self.spec:
env.append_flags('LDFLAGS', '-lX11')
# parallel build causes build failure
def build(self, spec, prefix):
make(parallel=False)
| 35.688312 | 95 | 0.652838 |
from spack import *
class W3m(AutotoolsPackage):
homepage = "http://w3m.sourceforge.net/index.en.html"
url = "https://downloads.sourceforge.net/project/w3m/w3m/w3m-0.5.3/w3m-0.5.3.tar.gz"
maintainers = ['ronin_gw']
version('0.5.3', sha256='e994d263f2fd2c22febfbe45103526e00145a7674a0fda79c822b97c2770a9e3')
depends_on('bdw-gc')
variant('termlib', default='ncurses', description='select termlib',
values=('ncurses', 'termcap', 'none'), multi=False)
depends_on('termcap', when='termlib=termcap')
depends_on('ncurses+termlib', when='termlib=ncurses')
variant('https', default=True, description='support https protocol')
depends_on('openssl@:1.0.2u', when='+https')
variant('image', default=True, description='enable image')
depends_on('libx11', when='+image')
variant('imagelib', default='imlib2', description='select imagelib',
values=('gdk-pixbuf', 'imlib2'), multi=False)
depends_on('gdk-pixbuf@2:+x11', when='imagelib=gdk-pixbuf +image')
depends_on('imlib2@1.0.5:', when='imagelib=imlib2 +image')
patch('fix_redef.patch')
patch('fix_gc.patch')
def _add_arg_for_variant(self, args, variant, choices):
for avail_lib in choices:
if self.spec.variants[variant].value == avail_lib:
args.append('--with-{0}={1}'.format(variant, avail_lib))
return
def configure_args(self):
args = []
self._add_arg_for_variant(args, 'termlib', ('termcap', 'ncurses'))
if '+image' in self.spec:
args.append('--enable-image')
self._add_arg_for_variant(args, 'imagelib', ('gdk-pixbuf', 'imlib2'))
return args
def setup_build_environment(self, env):
if self.spec.variants['termlib'].value == 'ncurses':
env.append_flags('LDFLAGS', '-ltinfo')
env.append_flags('LDFLAGS', '-lncurses')
if '+image' in self.spec:
env.append_flags('LDFLAGS', '-lX11')
def build(self, spec, prefix):
make(parallel=False)
| true | true |
f72d93713d6d8410399ade77878b0f44c3d290fb | 2,666 | py | Python | netanalysis/dns/data/model.py | Jigsaw-Code/net-analysis | 2b36fe89c3305f4d1c93406725a7f46a74f246f9 | [
"Apache-2.0"
] | 88 | 2018-03-06T16:21:25.000Z | 2022-03-30T20:59:20.000Z | netanalysis/dns/data/model.py | Jigsaw-Code/net-analysis | 2b36fe89c3305f4d1c93406725a7f46a74f246f9 | [
"Apache-2.0"
] | 6 | 2020-03-02T18:06:06.000Z | 2022-03-16T10:35:57.000Z | netanalysis/dns/data/model.py | Jigsaw-Code/net-analysis | 2b36fe89c3305f4d1c93406725a7f46a74f246f9 | [
"Apache-2.0"
] | 23 | 2018-03-21T12:56:53.000Z | 2022-03-25T19:48:30.000Z | # Copyright 2018 Jigsaw Operations LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from ipaddress import ip_address, IPv4Address, IPv6Address
from typing import List, Union
class RecordData:
"""Represents the data in a DNS Resource Record."""
def __repr__(self):
return "%s(%s)" % (self.__class__, str(self.__dict__))
class IpAddressData(RecordData):
"""Data for Resource Record type A or AAAA"""
def __init__(self, ip_str: str) -> None:
self._ip = ip_address(ip_str)
@property
def ip(self):
return self._ip
class CnameData(RecordData):
"""Data for Resource Record type CNAME"""
def __init__(self, cname: str) -> None:
self._cname = cname
@property
def cname(self):
return self._cname
class ResourceRecord:
def __init__(self, name: str, data: RecordData, ttl: datetime.timedelta = None) -> None:
if not name:
raise ValueError("ResourceRecord requires name")
self.name = name
self.data = data
self.ttl = ttl
if not isinstance(ttl, (type(None), datetime.timedelta)):
raise ValueError("ttl must be of type datetime.timedelta. Found type %s, value %s" % (
type(ttl), repr(ttl)))
def __repr__(self):
return "%s(%s)" % (self.__class__, str(self.__dict__))
class DnsMeasurement:
def __init__(self,
measurement_id: str,
time: datetime.datetime,
records: List[ResourceRecord],
resolver_ip: Union[IPv4Address, IPv6Address] = None,
client_asn: int = None,
client_country: str = None,
provenance: str = None,
trust_reason: str = None) -> None:
self.measurement_id = measurement_id
self.time = time
self.records = records
self.resolver_ip = resolver_ip
self.client_asn = client_asn
self.client_country = client_country
self.provenance = provenance
self.trust_reason = trust_reason
def __repr__(self):
return "DnsMeasurement(%s)" % str(self.__dict__)
| 31.364706 | 98 | 0.643286 |
import datetime
from ipaddress import ip_address, IPv4Address, IPv6Address
from typing import List, Union
class RecordData:
def __repr__(self):
return "%s(%s)" % (self.__class__, str(self.__dict__))
class IpAddressData(RecordData):
def __init__(self, ip_str: str) -> None:
self._ip = ip_address(ip_str)
@property
def ip(self):
return self._ip
class CnameData(RecordData):
def __init__(self, cname: str) -> None:
self._cname = cname
@property
def cname(self):
return self._cname
class ResourceRecord:
def __init__(self, name: str, data: RecordData, ttl: datetime.timedelta = None) -> None:
if not name:
raise ValueError("ResourceRecord requires name")
self.name = name
self.data = data
self.ttl = ttl
if not isinstance(ttl, (type(None), datetime.timedelta)):
raise ValueError("ttl must be of type datetime.timedelta. Found type %s, value %s" % (
type(ttl), repr(ttl)))
def __repr__(self):
return "%s(%s)" % (self.__class__, str(self.__dict__))
class DnsMeasurement:
def __init__(self,
measurement_id: str,
time: datetime.datetime,
records: List[ResourceRecord],
resolver_ip: Union[IPv4Address, IPv6Address] = None,
client_asn: int = None,
client_country: str = None,
provenance: str = None,
trust_reason: str = None) -> None:
self.measurement_id = measurement_id
self.time = time
self.records = records
self.resolver_ip = resolver_ip
self.client_asn = client_asn
self.client_country = client_country
self.provenance = provenance
self.trust_reason = trust_reason
def __repr__(self):
return "DnsMeasurement(%s)" % str(self.__dict__)
| true | true |
f72d946353539b8f82bb86b10544528e8e6c6521 | 3,783 | py | Python | Note7_Learn Python_Dictionaries.py | stanreport/Python-Tutorials | 7aff8ff7c21d4face1afb218ab9679f3d1160e27 | [
"Apache-2.0"
] | null | null | null | Note7_Learn Python_Dictionaries.py | stanreport/Python-Tutorials | 7aff8ff7c21d4face1afb218ab9679f3d1160e27 | [
"Apache-2.0"
] | 1 | 2018-04-14T19:35:14.000Z | 2018-04-14T19:35:14.000Z | Note7_Learn Python_Dictionaries.py | stanreport/Python-Tutorials | 7aff8ff7c21d4face1afb218ab9679f3d1160e27 | [
"Apache-2.0"
] | null | null | null | # ---------- LEARN TO PROGRAM 7 ----------
# ---------- DICTIONARIES ----------
# While lists organize data based on sequential indexes
# Dictionaries instead use key / value pairs.
# A key / value pair could be
# fName : "Derek" where fName is the key and "Derek" is
# the value
# Create a Dictionary about me
derekDict = {"fName": "Derek", "lName": "Banas", "address": "123 Main St"}
# Get a value with the key
print("May name :", derekDict["fName"])
# Change a value with the key
derekDict["address"] = "215 North St"
# Dictionaries may not print out in the order created
# since they are unordered
print(derekDict)
# Add a new key value
derekDict['city'] = 'Pittsburgh'
# Check if a key exists
print("Is there a city :", "city" in derekDict)
# Get the list of values
print(derekDict.values())
# Get the list of keys
print(derekDict.keys())
# Get the key and value with items()
for k, v in derekDict.items():
print(k, v)
# Get gets a value associated with a key or the default
print(derekDict.get("mName", "Not Here"))
# Delete a key value
del derekDict["fName"]
# Loop through the dictionary keys
for i in derekDict:
print(i)
# Delete all entries
derekDict.clear()
# List for holding Dictionaries
employees = []
# Input employee data
fName, lName = input("Enter Employee Name : ").split()
employees.append({'fName': fName, 'lName': lName})
print(employees)
# ---------- PROBLEM : CREATE A CUSTOMER LIST ----------
# Create an array of customer dictionaries
# Output should look like this
'''
Enter Customer (Yes/No) : y
Enter Customer Name : Derek Banas
Enter Customer (Yes/No) : y
Enter Customer Name : Sally Smith
Enter Customer (Yes/No) : n
Derek Banas
Sally Smith
'''
# Create customer array outside the for so it isn't local
# to the while loop
customers = []
while True:
# Cut off the 1st letter to cover if the user
# types a n or y
createEntry = input("Enter Customer (Yes/No) : ")
createEntry = createEntry[0].lower()
if createEntry == "n":
# Leave the while loop when n is entered
break
else:
# Get the customer name by splitting at the space
fName, lName = input("Enter Customer Name : ").split()
# Add the dictionary to the array
customers.append({'fName': fName, 'lName': lName})
# Print out customer list
for cust in customers:
print(cust['fName'], cust['lName'])
# ---------- RECURSIVE FUNCTIONS ----------
# A function that refers to itself is a recursive function
# Calculating factorials is commonly done with a recursive
# function 3! = 3 * 2 * 1
def factorial(num):
# Every recursive function must contain a condition
# when it ceases to call itself
if num <= 1:
return 1
else:
result = num * factorial(num - 1)
return result
print(factorial(4))
# 1st : result = 4 * factorial(3) = 4 * 6 = 24
# 2nd : result = 3 * factorial(2) = 3 * 2 = 6
# 3rd : result = 2 * factorial(1) = 2 * 1 = 2
# ---------- PROBLEM : CALCULATE FIBONACCI NUMBERS ----------
# To calculate Fibonacci numbers we sum the 2 previous
# values to calculate the next item in the list like this
# 1, 1, 2, 3, 5, 8 ...
# The Fibonacci sequence is defined by:
# Fn = Fn-1 + Fn-2
# Where F0 = 0 and F1 = 1
'''
Sample Run Though to Help
print(fib(3))
# 1st : result = fib(2) + fib(1) : 2 + 1
# 2nd : result = (fib(1) + fib(0)) + (fib(0)) : 1 + 0
# 3rd : result = fib(2) + fib(1)
print(fib(4))
# 1st : result = fib(3) + fib(2) : 3 + 2
# 2nd : result = (fib(2) + fib(1)) + (fib(1) + fib(0)) : 2 + 1
# 3rd : result = (fib(1) + fib(0)) + fib(0) : 1 + 0
'''
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
result = fib(n-1) + fib(n-2)
return result
print(fib(3))
print(fib(4)) | 23.067073 | 74 | 0.626751 |
derekDict = {"fName": "Derek", "lName": "Banas", "address": "123 Main St"}
print("May name :", derekDict["fName"])
derekDict["address"] = "215 North St"
print(derekDict)
derekDict['city'] = 'Pittsburgh'
print("Is there a city :", "city" in derekDict)
print(derekDict.values())
print(derekDict.keys())
for k, v in derekDict.items():
print(k, v)
print(derekDict.get("mName", "Not Here"))
del derekDict["fName"]
for i in derekDict:
print(i)
derekDict.clear()
employees = []
fName, lName = input("Enter Employee Name : ").split()
employees.append({'fName': fName, 'lName': lName})
print(employees)
# to the while loop
customers = []
while True:
# Cut off the 1st letter to cover if the user
# types a n or y
createEntry = input("Enter Customer (Yes/No) : ")
createEntry = createEntry[0].lower()
if createEntry == "n":
# Leave the while loop when n is entered
break
else:
# Get the customer name by splitting at the space
fName, lName = input("Enter Customer Name : ").split()
# Add the dictionary to the array
customers.append({'fName': fName, 'lName': lName})
# Print out customer list
for cust in customers:
print(cust['fName'], cust['lName'])
# ---------- RECURSIVE FUNCTIONS ----------
# A function that refers to itself is a recursive function
# Calculating factorials is commonly done with a recursive
# function 3! = 3 * 2 * 1
def factorial(num):
# Every recursive function must contain a condition
# when it ceases to call itself
if num <= 1:
return 1
else:
result = num * factorial(num - 1)
return result
print(factorial(4))
# 1st : result = 4 * factorial(3) = 4 * 6 = 24
# 2nd : result = 3 * factorial(2) = 3 * 2 = 6
# 3rd : result = 2 * factorial(1) = 2 * 1 = 2
# ---------- PROBLEM : CALCULATE FIBONACCI NUMBERS ----------
# To calculate Fibonacci numbers we sum the 2 previous
# values to calculate the next item in the list like this
# 1, 1, 2, 3, 5, 8 ...
# The Fibonacci sequence is defined by:
# Fn = Fn-1 + Fn-2
# Where F0 = 0 and F1 = 1
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
result = fib(n-1) + fib(n-2)
return result
print(fib(3))
print(fib(4)) | true | true |
f72d949d658d47131c4a502292aadd093d90b245 | 212 | py | Python | test-examples/million_points.py | tlambert03/image-demos | a2974bcc7f040fd4d14e659c4cbfeabcf726c707 | [
"BSD-3-Clause"
] | null | null | null | test-examples/million_points.py | tlambert03/image-demos | a2974bcc7f040fd4d14e659c4cbfeabcf726c707 | [
"BSD-3-Clause"
] | null | null | null | test-examples/million_points.py | tlambert03/image-demos | a2974bcc7f040fd4d14e659c4cbfeabcf726c707 | [
"BSD-3-Clause"
] | null | null | null | """Test converting an image to a pyramid.
"""
import numpy as np
import napari
points = np.random.randint(100, size=(50_000, 2))
with napari.gui_qt():
viewer = napari.view_points(points, face_color='red')
| 19.272727 | 57 | 0.712264 |
import numpy as np
import napari
points = np.random.randint(100, size=(50_000, 2))
with napari.gui_qt():
viewer = napari.view_points(points, face_color='red')
| true | true |
f72d96f6423727aab809d5f6a13928f7eb8bc3a9 | 789 | py | Python | starting/10-Functions_variables_scope.py | souzartn/Python2Share | ef22c3b40f82455d40e512e5dd6de1c98e7100bc | [
"MIT"
] | 2 | 2020-01-11T19:58:11.000Z | 2020-01-11T19:58:42.000Z | starting/10-Functions_variables_scope.py | souzartn/Python2Share | ef22c3b40f82455d40e512e5dd6de1c98e7100bc | [
"MIT"
] | null | null | null | starting/10-Functions_variables_scope.py | souzartn/Python2Share | ef22c3b40f82455d40e512e5dd6de1c98e7100bc | [
"MIT"
] | 2 | 2020-01-11T19:58:51.000Z | 2020-01-11T19:58:51.000Z | #Here we define "x" global variable and assign a value to it
x=10
y=200
print('the value of x global variable is {0}'.format(x))
#Here define function "MyFunction", it takes no paraments
def MyFunction():
global y # this is the same global variable "y"
x=2 #This is a local variable, unrelated to "x" global variable
print('the value of x local variable (inside MyFunction) is {0}'.format(x))
y=300
print('the value of y global variable (inside MyFunction) is {0}'.format(y))
print('starting program...')
# Call function "MyFunction" passing no parameters
MyFunction()
print('the value of x global variable before the end of the program is {0}'.format(x))
print('the value of y global variable before the end of the program is {0}'.format(y))
print('The end.') | 39.45 | 87 | 0.709759 |
x=10
y=200
print('the value of x global variable is {0}'.format(x))
def MyFunction():
global y
x=2
print('the value of x local variable (inside MyFunction) is {0}'.format(x))
y=300
print('the value of y global variable (inside MyFunction) is {0}'.format(y))
print('starting program...')
MyFunction()
print('the value of x global variable before the end of the program is {0}'.format(x))
print('the value of y global variable before the end of the program is {0}'.format(y))
print('The end.') | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.